code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
from dataclasses import dataclass
from typing import List, NamedTuple
import numpy as np
from generic_search import bfsCave, nodeToPath
wall = "#"
emptySpace = "."
class GridLocation(NamedTuple):
column: int
row: int
def __lt__(self, other):
return self.row < other.row or \
self.row == other.row and self.column < other.column
def openLocations(cave, location: GridLocation) -> List[GridLocation]:
"""
Return a list of the open locations around the given location. The locations are
in reading order.
"""
available = []
row = cave[location.row]
if location.row > 0 and cave[location.row - 1, location.column] == ".":
available.append(GridLocation(location.column, location.row - 1))
if location.column > 0 and row[location.column - 1] == ".":
available.append(GridLocation(location.column - 1, location.row))
if location.column + 1 < len(row) and row[location.column + 1] == ".":
available.append(GridLocation(location.column + 1, location.row))
if location.row + 1 < len(cave) and cave[location.row + 1, location.column] == ".":
available.append(GridLocation(location.column, location.row + 1))
return sorted(available)
def reachedLocation(currentLocation, goalLocation):
return abs(currentLocation.row - goalLocation.row) + abs(currentLocation.column - goalLocation.column) == 1
@dataclass
class Unit:
x: int
y: int
race: str
hitPoints: int = 200
attackDamage: int = 3
def __str__(self):
return f"{self.race}({self.hitPoints})"
def __lt__(self, other):
if self.y != other.y:
return self.y < other.y
return self.x < other.x
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def location(self):
return GridLocation(self.x, self.y)
def sameLocation(self, other):
"""
Return True if this unit is at the same location as other
"""
return self.x == other.x and self.y == other.y
def atLocation(self, x, y):
"""
Return True if this unit is at this x,y location
"""
return self.x == x and self.y == y
def distanceTo(self, other):
"""
Return the Manhattan distance between this unit and other
Keyword arguments:
other -- The other unit.
"""
return abs(self.x - other.x) + abs(self.y - other.y)
def canAttack(self, units):
"""
Return True if there is an enemy available to attack.
Keyword arguments:
units -- A list of all units. Does not need to be sorted.
"""
for unit in units:
if unit.hitPoints > 0 and unit.race != self.race and self.distanceTo(unit) == 1:
return True
return False
def enemyExists(self, units):
"""
Return True if an enemy exists. The enemy does not need to be available for attack.
Keyword arguments:
units -- A list of all units. Does not need to be sorted.
"""
for unit in units:
if unit.hitPoints > 0 and unit.race != self.race:
return True
return False
def availableEnemies(self, cave, units):
"""
Return a list of available enemies in the list
Keyword arguments:
units -- A list of all units. Does not need to be sorted.
cave -- The array representing the cave
"""
availableList = []
for unit in units:
if unit.hitPoints > 0 and unit.race != self.race and openLocations(cave, unit.location()):
availableList.append(unit)
return availableList
def move(self, cave, units) -> None:
targetLocation: GridLocation = None
shortestPath = None
enemies = self.availableEnemies(cave, units)
for enemy in enemies:
solution = bfsCave(self.location(), enemy.location(), reachedLocation, cave, openLocations)
if solution:
path = nodeToPath(solution)
# We found a path. Now see if it's a better candidate than one already found
pathEnd = path[-1]
if shortestPath is None or len(path) < len(shortestPath) or \
len(path) == len(shortestPath) and (pathEnd < targetLocation):
targetLocation = pathEnd
shortestPath = path
if shortestPath:
cave[self.y, self.x] = '.'
# The first step in the path is the current location so go to the second step
nextLocation: GridLocation = shortestPath[1]
self.x = nextLocation.column
self.y = nextLocation.row
cave[self.y, self.x] = self.race
def attack(self, cave, units):
"""
Attack an available enemy.
units -- A list of all units. Does not need to be sorted.
"""
target = None
for unit in units:
if unit.hitPoints > 0 and unit.race != self.race and self.distanceTo(unit) == 1:
if target is None or unit.hitPoints < target.hitPoints or \
unit.hitPoints == target.hitPoints and unit < target:
target = unit
if target is not None:
target.hitPoints -= self.attackDamage
if target.hitPoints <= 0:
cave[target.y, target.x] = "."
def printCave(cave, units, showScores=False):
for rowNumber, row in enumerate(cave):
scores = " "
for columnNumber, cell in enumerate(row):
print(cell, end='')
if showScores and cell in ["E", "G"]:
unit = next(unit for unit in units if unit.hitPoints > 0 and unit.atLocation(columnNumber, rowNumber))
scores += str(unit) + " "
if len(scores.strip()):
print(scores, end='')
print()
def loadPuzzle(puzzleName, elfAttackPower):
# Get the dimensions of the puzzle.
with open(puzzleName, "r") as infile:
puzzleHeight = 0
puzzleWidth = 0
for line in infile:
puzzleHeight += 1
puzzleWidth = max(puzzleWidth, len(line.rstrip()))
# Create the cave with the determined puzzle dimensions.
cave = np.full((puzzleHeight, puzzleWidth), '.', dtype=str)
units = []
# Populate the cave and the list of units.
with open(puzzleName, "r") as infile:
for rowNumber, line in enumerate(infile):
for columnNumber, cell in enumerate(line.rstrip()):
if cell in ['E', 'G']:
units.append(Unit(columnNumber, rowNumber, cell, attackDamage=3 if cell == 'G' else elfAttackPower))
cave[rowNumber, columnNumber] = cell
return cave, units
if __name__ == "15a":
cave, units = loadPuzzle("15.txt", 3)
finished = False
playRound = 0
while not finished:
for unit in units:
if unit.hitPoints <= 0:
continue
if not unit.enemyExists(units):
finished = True
break
if not unit.canAttack(units):
unit.move(cave, units)
unit.attack(cave, units)
if not finished:
playRound += 1
print(playRound)
livingUnits = [unit for unit in units if unit.hitPoints > 0]
units = sorted(livingUnits)
if __name__ == "__main__":
goblinsWin = True
elfAttackPower = 3
originalElfCount = 0
survivingElfCount = 0
while goblinsWin or survivingElfCount < originalElfCount:
elfAttackPower += 1
cave, units = loadPuzzle("15.txt", elfAttackPower)
originalElfCount = len([unit for unit in units if unit.race == "E"])
finished = False
playRound = 0
while not finished:
for unit in units:
if unit.hitPoints <= 0:
continue
if not unit.enemyExists(units):
finished = True
break
if not unit.canAttack(units):
unit.move(cave, units)
unit.attack(cave, units)
survivingElfCount = len([unit for unit in units if unit.race == "E" and unit.hitPoints > 0])
if survivingElfCount < originalElfCount:
finished = True
break
if not finished:
playRound += 1
print(playRound)
livingUnits = [unit for unit in units if unit.hitPoints > 0]
units = sorted(livingUnits)
goblinsWin = units[0].race == "G"
printCave(cave, units, showScores=True)
print(f"Combat ends after {playRound} full rounds")
hitPoints = sum([unit.hitPoints for unit in units])
survivingRace = "Goblins" if units[0].race == "G" else "Elves"
print(f"{survivingRace} win with {hitPoints} total hit points left")
print(f"Outcome: {playRound} * {hitPoints} = {playRound * hitPoints}")
print(f"Elf attack power: {elfAttackPower}")
|
[
"numpy.full",
"generic_search.nodeToPath"
] |
[((6315, 6367), 'numpy.full', 'np.full', (['(puzzleHeight, puzzleWidth)', '"""."""'], {'dtype': 'str'}), "((puzzleHeight, puzzleWidth), '.', dtype=str)\n", (6322, 6367), True, 'import numpy as np\n'), ((4063, 4083), 'generic_search.nodeToPath', 'nodeToPath', (['solution'], {}), '(solution)\n', (4073, 4083), False, 'from generic_search import bfsCave, nodeToPath\n')]
|
# -*- coding: utf-8 -*-
import cv2
import sys
import numpy as np
import argparse
imagePath = "img.png"
sx = sy = None
previewImage = None
if len(sys.argv) < 3:
print("""
Usage:
python mouseInteractive -i img.png
""")
sys.exit(-1)
if sys.argv[1]=="-i":
imagePath = sys.argv[2]
def createBlankImage(width, height, color=(255,255,255)):
img = np.zeros((height, width, 3), np.uint8)
img[:] = color
return img
def mouseCallback(event,x,y,flags,param):
global sx,sy,previewImage
if (event == cv2.EVENT_LBUTTONDOWN):
print(event,x,y,flags,param)
bgrColor = frame[y][x]
previewImage = createBlankImage(200,200,bgrColor)
hsvColor = cv2.cvtColor(bgrColor.reshape(1,1,3),cv2.COLOR_BGR2HSV)
print("bgr->hsv:{}->{}".format(bgrColor,hsvColor.tolist()[0][0]))
cv2.circle(frame,(x,y),6, (0,0,255),-1)
if (sx != None):
cv2.line(frame,(sx,sy),(x,y),(0,0,255),3)
sx = x
sy = y
cv2.imshow('demo', frame)
cv2.imshow('preview', previewImage)
frame = cv2.imread(imagePath)
cv2.namedWindow("demo")
cv2.namedWindow("preview")
cv2.moveWindow("demo", 1500, 300)
cv2.moveWindow("preview", 1500, 80)
cv2.imshow('demo', frame)
cv2.setMouseCallback('demo', mouseCallback)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.line",
"cv2.circle",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.imread",
"cv2.setMouseCallback",
"sys.exit",
"cv2.moveWindow",
"cv2.imshow",
"cv2.namedWindow"
] |
[((1089, 1110), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (1099, 1110), False, 'import cv2\n'), ((1112, 1135), 'cv2.namedWindow', 'cv2.namedWindow', (['"""demo"""'], {}), "('demo')\n", (1127, 1135), False, 'import cv2\n'), ((1136, 1162), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview"""'], {}), "('preview')\n", (1151, 1162), False, 'import cv2\n'), ((1163, 1196), 'cv2.moveWindow', 'cv2.moveWindow', (['"""demo"""', '(1500)', '(300)'], {}), "('demo', 1500, 300)\n", (1177, 1196), False, 'import cv2\n'), ((1197, 1232), 'cv2.moveWindow', 'cv2.moveWindow', (['"""preview"""', '(1500)', '(80)'], {}), "('preview', 1500, 80)\n", (1211, 1232), False, 'import cv2\n'), ((1233, 1258), 'cv2.imshow', 'cv2.imshow', (['"""demo"""', 'frame'], {}), "('demo', frame)\n", (1243, 1258), False, 'import cv2\n'), ((1259, 1302), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""demo"""', 'mouseCallback'], {}), "('demo', mouseCallback)\n", (1279, 1302), False, 'import cv2\n'), ((1304, 1318), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1315, 1318), False, 'import cv2\n'), ((1319, 1342), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1340, 1342), False, 'import cv2\n'), ((248, 260), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (256, 260), False, 'import sys\n'), ((381, 419), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (389, 419), True, 'import numpy as np\n'), ((853, 898), 'cv2.circle', 'cv2.circle', (['frame', '(x, y)', '(6)', '(0, 0, 255)', '(-1)'], {}), '(frame, (x, y), 6, (0, 0, 255), -1)\n', (863, 898), False, 'import cv2\n'), ((1010, 1035), 'cv2.imshow', 'cv2.imshow', (['"""demo"""', 'frame'], {}), "('demo', frame)\n", (1020, 1035), False, 'import cv2\n'), ((1044, 1079), 'cv2.imshow', 'cv2.imshow', (['"""preview"""', 'previewImage'], {}), "('preview', previewImage)\n", (1054, 1079), False, 'import cv2\n'), ((930, 979), 'cv2.line', 'cv2.line', (['frame', '(sx, sy)', '(x, y)', '(0, 0, 255)', '(3)'], {}), '(frame, (sx, sy), (x, y), (0, 0, 255), 3)\n', (938, 979), False, 'import cv2\n')]
|
import numpy
class channel_noise_simulator:
"""Class to hold usefull funktions to simulate noise in a channel"""
def __init__(self):
return
# _____________create bits___________________
def create_random_bits_list(self, len):
"""create a random len bits long bitstring """
bits = []
for i in range(len):
bits.append(numpy.random.randint(0, 2))
return bits
def create_random_bits_string(self, len):
"""create a random len bits long string """
bits = ""
for i in range(len):
bits += str(numpy.random.randint(0, 2))
return bits
# _____________Randoise bits______________________
def randomise_bits_list(self, bits, probability):
"""A function to simply flip bits with the given probability
ARGS: a list of bits, the probability for an error[0-1]
RETURN: a list of bits
"""
new_bits = []
for b in bits:
if probability > numpy.random.random(): # roll random numbers
new_bits.append((b + 1) % 2) # turn 0 to 1 and 1 to 0
else:
new_bits.append(b)
return new_bits
def randomise_bits_string(self, bits, probability):
"""A function to simply flip bits with the given probability
ARGS: a list of bits, the probability for an error[0-1]
Return: a string full of bits
"""
new_bits = ""
for b in bits:
if probability > numpy.random.random(): # roll random numbers
new_bits += str((int(b) + 1) % 2) # turn 0 to 1 and 1 to 0
else:
new_bits += b
return new_bits
def randomise_bits_string_list(self, bits, probability):
"""A function to simply flip bits with the given probability
ARGS: a list of bits, the probability for an error[0-1]
RETURN: a list of bits
"""
new_bits = []
for b in bits:
new_bit = ""
for i in range(len(b)):
if probability > numpy.random.random(): # roll random numbers
new_bit += str((int(b[i]) + 1) % 2) # turn 0 to 1 and 1 to 0
else:
new_bit += str(b[i])
new_bits.append(new_bit)
return new_bits
def randomise_bits_burst_string_list(
self, bits, burst_probability, error_rate_in_burst=0.9,
):
"""A function to simply flip bits with the given probability
ARGS: a String of bits, the probability for an error[0-1], the probability to leave the bursterror[0-1]
Return: String of bits with added burst error
"""
new_bits = []
currently_bursting = False
for b in bits:
i = 0
new_bits.append("")
while i < len(b):
if burst_probability > numpy.random.random(): # roll random numbers
currently_bursting = True
while currently_bursting and i < len(
b
): # stop when bitstream ends (simulate one bursterror and adjust i)
if error_rate_in_burst > numpy.random.random():
new_bits[len(new_bits) - 1] += str(
((int(b[i]) + 1) % 2)
) # turn 0 to 1 and 1 to 0 randomly
else:
new_bits[len(new_bits) - 1] += str(b[i])
currently_bursting = False
i += 1
else:
new_bits[len(new_bits) - 1] += str(b[i])
i += 1
return new_bits
def randomise_bits_burst_list(
self, bits, burst_probability, error_rate_in_burst=0.9
):
"""A function to simply flip bits with the given probability
ARGS: a list of bits, the probability for an error[0-1], the probability to leave the bursterror[0-1]
Return: list of bits with added burst erorrs
"""
new_bits = []
i = 0
while i < len(bits):
if burst_probability > numpy.random.random(): # roll random numbers
currently_bursting = True
while currently_bursting and i < len(
bits
): # stop when bitstream ends (simulate one bursterror and adjust i)
if error_rate_in_burst > numpy.random.random():
new_bits.append(
(bits[i] + 1) % 2
) # turn 0 to 1 and 1 to 0 randomly
else:
new_bits.append(bits[i])
currently_bursting = False
i += 1
else:
new_bits.append(bits[i])
i += 1
return new_bits
def randomise_bits_burst_string(
self, bits, burst_probability, error_rate_in_burst=0.9,
):
"""A function to simply flip bits with the given probability
ARGS: a String of bits, the probability for an error[0-1], the probability to leave the bursterror[0-1]
Return: String of bits with added burst erorrs
"""
new_bits = ""
i = 0
while i < len(bits):
if burst_probability > numpy.random.random(): # roll random numbers
currently_bursting = True
while currently_bursting and i < len(
bits
): # stop when bitstream ends (simulate one bursterror and adjust i)
if error_rate_in_burst > numpy.random.random():
new_bits += str(
((int(bits[i]) + 1) % 2)
) # turn 0 to 1 and 1 to 0 randomly
else:
new_bits += str(bits[i])
currently_bursting = False
i += 1
else:
new_bits += str(bits[i])
i += 1
return new_bits
# ______________compare bits__________________________
def compare_and_highlight_differences(self, bits1, bits2):
"""compare two bitlists and higlight the differences"""
differences = []
if len(bits1) != len(bits2):
print("waning, different lengths detected. may result in higher errorrate")
min_length = min(len(bits1), len(bits2))
for i in range(min_length):
differences.append(1 if bits1[i] != bits2[i] else 0)
print("Differences found: " + str(differences.count(True)))
return differences
# c=channel_noise_simulator()
# print (c.randomise_bits_list([1,1,1,1,0,0,0,0,1],0.5))
# print (c.randomise_bits_string("1101110",0.5))
# print (c.compare_and_highlight_differences([1,1,1,0,0,1,1,0,0,1,0,1,1,1],[0,1,1,0,0,1,1,1,1,1,0,1,0,1]))
# print (c.create_random_bits_list(200))
# rb= c.create_random_bits_string(200)
# rr = c.randomise_bits_burst_string(rb,0.01,.9)
# print (c.compare_and_highlight_differences(rb,rr))
# """
|
[
"numpy.random.randint",
"numpy.random.random"
] |
[((381, 407), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (401, 407), False, 'import numpy\n'), ((601, 627), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (621, 627), False, 'import numpy\n'), ((1019, 1040), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (1038, 1040), False, 'import numpy\n'), ((1537, 1558), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (1556, 1558), False, 'import numpy\n'), ((4238, 4259), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (4257, 4259), False, 'import numpy\n'), ((5434, 5455), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (5453, 5455), False, 'import numpy\n'), ((2118, 2139), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (2137, 2139), False, 'import numpy\n'), ((2937, 2958), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (2956, 2958), False, 'import numpy\n'), ((4537, 4558), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (4556, 4558), False, 'import numpy\n'), ((5733, 5754), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (5752, 5754), False, 'import numpy\n'), ((3253, 3274), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (3272, 3274), False, 'import numpy\n')]
|
import torch, add_path
import numpy as np
from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, \
GMM, DeepIV, AGMM
import os
import tensorflow
from MMR_IVs.util import ROOT_PATH, load_data
import random
random.seed(527)
def eval_model(model, test):
g_pred_test = model.predict(test.x)
mse = float(((g_pred_test - test.g) ** 2).mean())
return mse
def save_model(model, save_path, test):
g_pred = model.predict(test.x)
np.savez(save_path, x=test.w, y=test.y, g_true=test.g, g_hat=g_pred)
def run_experiment(scenario_name,mid,repid, num_reps=10, seed=527,training=False):
# set random seed
torch.manual_seed(seed)
np.random.seed(seed)
tensorflow.set_random_seed(seed)
train, dev, test = load_data(ROOT_PATH + "/data/mendelian/" + scenario_name+'.npz')
# result folder
folder = ROOT_PATH + "/results/mendelian/"+scenario_name+"/"
os.makedirs(folder, exist_ok=True)
means = []
times = []
for rep in range(num_reps):
# Not all methods are applicable in all scenarios
methods = []
# baseline methods
methods += [("DirectNN", DirectNN())]
methods += [("Vanilla2SLS", Vanilla2SLS())]
methods += [("Poly2SLS", Poly2SLS())]
methods += [("GMM", GMM(g_model="2-layer", n_steps=20))]
methods += [("AGMM", AGMM())]
methods += [("DeepIV", DeepIV())]
if training:
if rep < repid:
continue
elif rep >repid:
break
else:
pass
for method_name, method in methods[mid:mid+1]:
print("Running " + method_name +" " + str(rep))
file_name = "%s_%d.npz" % (method_name, rep)
save_path = os.path.join(folder, file_name)
model, time = method.fit(train.x, train.y, train.z, None)
np.save(folder+"%s_%d_time.npy" % (method_name, rep),time)
save_model(model, save_path, test)
test_mse = eval_model(model, test)
model_type_name = type(model).__name__
print("Test MSE of %s: %f" % (model_type_name, test_mse))
else:
means2 = []
times2 = []
for method_name, method in methods:
# print("Running " + method_name +" " + str(rep))
file_name = "%s_%d.npz" % (method_name, rep)
save_path = os.path.join(folder, file_name)
if os.path.exists(save_path):
res = np.load(save_path)
mse = float(((res['g_hat'] - res['g_true']) ** 2).mean())
# print('mse: {}'.format(mse))
means2 += [mse]
else:
print(save_path, ' not exists')
time_path = folder+"%s_%d_time.npy" % (method_name, rep)
if os.path.exists(time_path):
res = np.load(time_path)
times2 += [res]
else:
print(time_path, ' not exists')
if len(means2) == len(methods):
means += [means2]
if len(times2) == len(methods):
times += [times2]
#print('means',np.mean(np.array(means),axis=0))
#print('std',np.std(np.array(means),axis=0))
return means,times
if __name__ == "__main__":
scenarios = ["mendelian_{}_{}_{}".format(s, i, j) for s in [8,16,32] for i,j in [[1,1]]]
scenarios += ["mendelian_{}_{}_{}".format(16, i, j) for i, j in [[1, 0.5],[1, 2]]]
scenarios += ["mendelian_{}_{}_{}".format(16, i, j)for i, j in [[0.5, 1],[2, 1]]]
for sce in scenarios:
for mid in range(6):
for repid in range(10):
run_experiment(sce, mid, repid, training=True)
rows = []
for i in range(len(scenarios)):
s = scenarios[i]
means,times = run_experiment(s,0,0,training=False)
mean = np.mean(means,axis=0)
std = np.std(means,axis=0)
rows += [["({},{:.4f}) +- ({:.3f},{:.3f})".format(s,mean[j],std[j],std[j]) for j in range(len(mean))]]
print('time: ',np.mean(times,axis=0),np.std(times,axis=0))
# methods = np.array(["DirectNN","Vanilla2SLS","Poly2SLS","GMM","AGMM","DeepIV"])[:,None]
rows = np.array(rows)
#rows = np.vstack((methods,rows))
print('addplot+[mark=*,error bars/.cd, y dir=both,y explicit] coordinates'.join(['{'+'\n'.join(e)+'};\n' for e in rows.T]))
print('Tabulate Table:')
# print(tabulate(np.vstack((np.append([""],scenarios),rows)), headers='firstrow',tablefmt='latex'))
|
[
"numpy.load",
"numpy.random.seed",
"numpy.mean",
"MMR_IVs.util.load_data",
"os.path.join",
"numpy.std",
"os.path.exists",
"baselines.all_baselines.DeepIV",
"tensorflow.set_random_seed",
"random.seed",
"baselines.all_baselines.DirectNN",
"numpy.save",
"torch.manual_seed",
"baselines.all_baselines.Poly2SLS",
"baselines.all_baselines.GMM",
"baselines.all_baselines.Vanilla2SLS",
"numpy.savez",
"baselines.all_baselines.AGMM",
"os.makedirs",
"numpy.array"
] |
[((223, 239), 'random.seed', 'random.seed', (['(527)'], {}), '(527)\n', (234, 239), False, 'import random\n'), ((460, 528), 'numpy.savez', 'np.savez', (['save_path'], {'x': 'test.w', 'y': 'test.y', 'g_true': 'test.g', 'g_hat': 'g_pred'}), '(save_path, x=test.w, y=test.y, g_true=test.g, g_hat=g_pred)\n', (468, 528), True, 'import numpy as np\n'), ((640, 663), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (657, 663), False, 'import torch, add_path\n'), ((668, 688), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (682, 688), True, 'import numpy as np\n'), ((693, 725), 'tensorflow.set_random_seed', 'tensorflow.set_random_seed', (['seed'], {}), '(seed)\n', (719, 725), False, 'import tensorflow\n'), ((750, 816), 'MMR_IVs.util.load_data', 'load_data', (["(ROOT_PATH + '/data/mendelian/' + scenario_name + '.npz')"], {}), "(ROOT_PATH + '/data/mendelian/' + scenario_name + '.npz')\n", (759, 816), False, 'from MMR_IVs.util import ROOT_PATH, load_data\n'), ((905, 939), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (916, 939), False, 'import os\n'), ((4325, 4339), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (4333, 4339), True, 'import numpy as np\n'), ((3984, 4006), 'numpy.mean', 'np.mean', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (3991, 4006), True, 'import numpy as np\n'), ((4020, 4041), 'numpy.std', 'np.std', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (4026, 4041), True, 'import numpy as np\n'), ((4175, 4197), 'numpy.mean', 'np.mean', (['times'], {'axis': '(0)'}), '(times, axis=0)\n', (4182, 4197), True, 'import numpy as np\n'), ((4197, 4218), 'numpy.std', 'np.std', (['times'], {'axis': '(0)'}), '(times, axis=0)\n', (4203, 4218), True, 'import numpy as np\n'), ((1142, 1152), 'baselines.all_baselines.DirectNN', 'DirectNN', ([], {}), '()\n', (1150, 1152), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1191, 1204), 'baselines.all_baselines.Vanilla2SLS', 'Vanilla2SLS', ([], {}), '()\n', (1202, 1204), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1240, 1250), 'baselines.all_baselines.Poly2SLS', 'Poly2SLS', ([], {}), '()\n', (1248, 1250), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1281, 1315), 'baselines.all_baselines.GMM', 'GMM', ([], {'g_model': '"""2-layer"""', 'n_steps': '(20)'}), "(g_model='2-layer', n_steps=20)\n", (1284, 1315), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1347, 1353), 'baselines.all_baselines.AGMM', 'AGMM', ([], {}), '()\n', (1351, 1353), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1387, 1395), 'baselines.all_baselines.DeepIV', 'DeepIV', ([], {}), '()\n', (1393, 1395), False, 'from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, GMM, DeepIV, AGMM\n'), ((1775, 1806), 'os.path.join', 'os.path.join', (['folder', 'file_name'], {}), '(folder, file_name)\n', (1787, 1806), False, 'import os\n'), ((1914, 1975), 'numpy.save', 'np.save', (["(folder + '%s_%d_time.npy' % (method_name, rep))", 'time'], {}), "(folder + '%s_%d_time.npy' % (method_name, rep), time)\n", (1921, 1975), True, 'import numpy as np\n'), ((2469, 2500), 'os.path.join', 'os.path.join', (['folder', 'file_name'], {}), '(folder, file_name)\n', (2481, 2500), False, 'import os\n'), ((2520, 2545), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (2534, 2545), False, 'import os\n'), ((2922, 2947), 'os.path.exists', 'os.path.exists', (['time_path'], {}), '(time_path)\n', (2936, 2947), False, 'import os\n'), ((2573, 2591), 'numpy.load', 'np.load', (['save_path'], {}), '(save_path)\n', (2580, 2591), True, 'import numpy as np\n'), ((2975, 2993), 'numpy.load', 'np.load', (['time_path'], {}), '(time_path)\n', (2982, 2993), True, 'import numpy as np\n')]
|
#This code is written for dynamic step-size. step size c0 gets smaller when it achieves the number 200.
#Author: <NAME>, Senior Research Fellow, University of Delhi
#Date: 5-07-2021
from math import *
import numpy as np
c0=50.0
for x in np.arange(c0,580,10):
t=10*(abs(200.1-c0)/200.1)*abs(np.log(0.3/abs(c0-200.1)))
y=1.0/(c0-200.0**2)**2
print(str(c0)+" "+str(y))
c0+=t
if c0> 198 and c0<202:
c0+=1
|
[
"numpy.arange"
] |
[((239, 261), 'numpy.arange', 'np.arange', (['c0', '(580)', '(10)'], {}), '(c0, 580, 10)\n', (248, 261), True, 'import numpy as np\n')]
|
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import time
import coord
import warnings
import treecorr
from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer
@timer
def test_dessv():
try:
import fitsio
except ImportError:
print('Skipping dessv test, since fitsio is not installed')
return
#treecorr.set_omp_threads(1);
get_from_wiki('des_sv.fits')
file_name = os.path.join('data','des_sv.fits')
cat = treecorr.Catalog(file_name, ra_col='ra', dec_col='dec', ra_units='deg', dec_units='deg')
# Use an odd number to make sure we force some of the shuffle bits in InitializeCenters
# to happen.
npatch = 43
field = cat.getNField(max_top=5)
t0 = time.time()
patches, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(patches))
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([xyz[patches==i].mean(axis=0) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=1.e-3)
# KMeans minimizes the total inertia.
# Check this value and the rms size, which should also be quite small.
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually < 0.2 * mean
assert np.std(sizes) < 0.1 * np.mean(sizes) # sizes have even less spread usually.
# Should all have similar number of points. Nothing is required here though.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
patches, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
assert np.std(sizes) < 0.1 * np.mean(sizes) # This is only a little bit smaller.
# This doesn't keep the counts as equal as the standard algorithm.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
patches, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 210.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
assert np.std(sizes) < 0.15 * np.mean(sizes)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_radec():
# Very similar to the above, but with a random set of points, so it will run even
# if the user doesn't have fitsio installed.
# In addition, we add weights to make sure that works.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
npatch = 111
field = cat.getNField()
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 210.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_3d():
# Like the above, but using x,y,z positions.
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal) + 1
cat = treecorr.Catalog(x=x, y=y, z=z, w=w)
npatch = 111
field = cat.getNField()
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
xyz = np.array([x, y, z]).T
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Should be the same thing with ra, dec, ra
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
r = (x**2 + y**2 + z**2)**0.5
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', r=r, w=w)
field = cat2.getNField()
t0 = time.time()
p2, cen = field.run_kmeans(npatch)
t1 = time.time()
inertia = np.array([np.sum(w[p2==i][:,None] * (xyz[p2==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p2==i]) for i in range(npatch)])
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.1 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_2d():
# Like the above, but using x,y positions.
# An additional check here is that this works with other fields besides NField, even though
# in practice NField will alsmost always be the kind of Field used.
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal) + 1
g1 = rng.normal(0,s, (ngal,) )
g2 = rng.normal(0,s, (ngal,) )
k = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2, k=k)
npatch = 111
field = cat.getGField()
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
xy = np.array([x, y]).T
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
assert np.std(inertia) < 0.1 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getKField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
assert np.std(inertia) < 0.4 * np.mean(inertia) # I've seen over 0.3 x mean here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_init_random():
# Test the init=random option
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, z=z)
xyz = np.array([x, y, z]).T
# Skip the refine_centers step.
print('3d with init=random')
npatch = 10
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
# Use higher max_iter, since random isn't a great initialization.
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Use a field with lots of top level cells
print('3d with init=random, min_top=10')
field = cat.getNField(min_top=10)
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in 2d
print('2d with init=random')
cat = treecorr.Catalog(x=x, y=y)
xy = np.array([x, y]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 2)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xy[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xy[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in spherical
print('spher with init=random')
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
xyz = np.array([cat.x, cat.y, cat.z]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
with assert_raises(ValueError):
field.run_kmeans(npatch, init='invalid')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch, init='invalid')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal*2, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal+1, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=0, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=-100, init='random')
# Should be valid to give npatch = 1, although not particularly useful.
cen_1 = field.kmeans_initialize_centers(npatch=1, init='random')
p_1 = field.kmeans_assign_patches(cen_1)
np.testing.assert_equal(p_1, np.zeros(ngal))
# If same number of patches as galaxies, each galaxy gets a patch.
# (This is stupid of course, but check that it doesn't fail.)
# Do this with fewer points though, since it's not particularly fast with N=10^5.
n = 100
cat = treecorr.Catalog(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')
field = cat.getNField()
cen_n = field.kmeans_initialize_centers(npatch=n, init='random')
p_n = field.kmeans_assign_patches(cen_n)
np.testing.assert_equal(sorted(p_n), list(range(n)))
@timer
def test_init_kmpp():
# Test the init=random option
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, z=z)
xyz = np.array([x, y, z]).T
# Skip the refine_centers step.
print('3d with init=kmeans++')
npatch = 10
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
# Use higher max_iter, since random isn't a great initialization.
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Use a field with lots of top level cells
print('3d with init=kmeans++, min_top=10')
field = cat.getNField(min_top=10)
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in 2d
print('2d with init=kmeans++')
cat = treecorr.Catalog(x=x, y=y)
xy = np.array([x, y]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 2)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xy[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xy[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in spherical
print('spher with init=kmeans++')
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
xyz = np.array([cat.x, cat.y, cat.z]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal*2, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal+1, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=0, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=-100, init='kmeans++')
# Should be valid to give npatch = 1, although not particularly useful.
cen_1 = field.kmeans_initialize_centers(npatch=1, init='kmeans++')
p_1 = field.kmeans_assign_patches(cen_1)
np.testing.assert_equal(p_1, np.zeros(ngal))
# If same number of patches as galaxies, each galaxy gets a patch.
# (This is stupid of course, but check that it doesn't fail.)
# Do this with fewer points though, since it's not particularly fast with N=10^5.
n = 100
cat = treecorr.Catalog(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')
field = cat.getNField()
cen_n = field.kmeans_initialize_centers(npatch=n, init='kmeans++')
p_n = field.kmeans_assign_patches(cen_n)
np.testing.assert_equal(sorted(p_n), list(range(n)))
@timer
def test_zero_weight():
# Based on test_ra_dec, but where many galaxies have w=0.
# There used to be a bug where w=0 objects were not assigned to any patch.
ngal = 10000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = np.zeros(ngal)
w[np.random.choice(range(ngal), ngal//10, replace=False)] = 1.0
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
keep_zero_weight=True)
treecorr.set_omp_threads(1)
npatch = 16
field = cat.getNField()
t0 = time.time()
p, c = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
print('w>0 patches = ',np.unique(p[w>0]))
print('w==0 patches = ',np.unique(p[w==0]))
assert set(p[w>0]) == set(p[w==0])
@timer
def test_catalog_sphere():
# This follows the same path as test_radec, but using the Catalog API to run kmeans.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec, r = coord.CelestialCoord.xyz_to_radec(x,y,z, return_r=True)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
npatch = 111
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w, npatch=npatch)
t0 = time.time()
p = cat.patch
cen = cat.patch_centers
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
npatch=npatch, kmeans_alt=True)
t0 = time.time()
p = cat2.patch
cen = cat2.patch_centers
t1 = time.time()
assert len(p) == cat2.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check using patch_centers from (ra,dec) -> (ra,dec,r)
cat3 = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
patch_centers=cat2.patch_centers)
np.testing.assert_array_equal(cat2.patch, cat3.patch)
np.testing.assert_array_equal(cat2.patch_centers, cat3.patch_centers)
@timer
def test_catalog_3d():
# With ra, dec, r, the Catalog API should only do patches using RA, Dec.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec, r = coord.CelestialCoord.xyz_to_radec(x,y,z, return_r=True)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
npatch = 111
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
npatch=npatch)
t0 = time.time()
p = cat.patch
cen = cat.patch_centers
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x/cat.r, cat.y/cat.r, cat.z/cat.r]).T
print('cen = ',cen)
print('xyz = ',xyz)
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
assert np.std(inertia) < 0.3 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
cat2 = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
npatch=npatch, kmeans_alt=True)
t0 = time.time()
p = cat2.patch
cen = cat2.patch_centers
t1 = time.time()
assert len(p) == cat2.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
assert np.std(inertia) < 0.15 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check using patch_centers from (ra,dec,r) -> (ra,dec)
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
patch_centers=cat2.patch_centers)
np.testing.assert_array_equal(cat2.patch, cat3.patch)
np.testing.assert_array_equal(cat2.patch_centers, cat3.patch_centers)
if __name__ == '__main__':
test_dessv()
test_radec()
test_3d()
test_2d()
test_init_random()
test_init_kmpp()
test_zero_weight()
test_catalog_sphere()
test_catalog_3d()
|
[
"numpy.sum",
"test_helper.assert_raises",
"treecorr.Catalog",
"numpy.mean",
"test_helper.get_from_wiki",
"os.path.join",
"numpy.unique",
"numpy.std",
"numpy.random.RandomState",
"numpy.max",
"numpy.testing.assert_allclose",
"numpy.average",
"numpy.testing.assert_array_equal",
"treecorr.set_omp_threads",
"numpy.min",
"coord.CelestialCoord.xyz_to_radec",
"numpy.zeros",
"time.time",
"numpy.array"
] |
[((1012, 1040), 'test_helper.get_from_wiki', 'get_from_wiki', (['"""des_sv.fits"""'], {}), "('des_sv.fits')\n", (1025, 1040), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((1057, 1092), 'os.path.join', 'os.path.join', (['"""data"""', '"""des_sv.fits"""'], {}), "('data', 'des_sv.fits')\n", (1069, 1092), False, 'import os\n'), ((1102, 1194), 'treecorr.Catalog', 'treecorr.Catalog', (['file_name'], {'ra_col': '"""ra"""', 'dec_col': '"""dec"""', 'ra_units': '"""deg"""', 'dec_units': '"""deg"""'}), "(file_name, ra_col='ra', dec_col='dec', ra_units='deg',\n dec_units='deg')\n", (1118, 1194), False, 'import treecorr\n'), ((1363, 1374), 'time.time', 'time.time', ([], {}), '()\n', (1372, 1374), False, 'import time\n'), ((1428, 1439), 'time.time', 'time.time', ([], {}), '()\n', (1437, 1439), False, 'import time\n'), ((1841, 1896), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cen', 'direct_cen'], {'atol': '(0.001)'}), '(cen, direct_cen, atol=0.001)\n', (1867, 1896), True, 'import numpy as np\n'), ((3145, 3156), 'time.time', 'time.time', ([], {}), '()\n', (3154, 3156), False, 'import time\n'), ((3220, 3231), 'time.time', 'time.time', ([], {}), '()\n', (3229, 3231), False, 'import time\n'), ((4539, 4550), 'time.time', 'time.time', ([], {}), '()\n', (4548, 4550), False, 'import time\n'), ((4604, 4615), 'time.time', 'time.time', ([], {}), '()\n', (4613, 4615), False, 'import time\n'), ((5936, 5966), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (5957, 5966), True, 'import numpy as np\n'), ((6176, 6218), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (6209, 6218), False, 'import coord\n'), ((6491, 6561), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)\n", (6507, 6561), False, 'import treecorr\n'), ((6617, 6628), 'time.time', 'time.time', ([], {}), '()\n', (6626, 6628), False, 'import time\n'), ((6676, 6687), 'time.time', 'time.time', ([], {}), '()\n', (6685, 6687), False, 'import time\n'), ((7083, 7138), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cen', 'direct_cen'], {'atol': '(0.002)'}), '(cen, direct_cen, atol=0.002)\n', (7109, 7138), True, 'import numpy as np\n'), ((8255, 8266), 'time.time', 'time.time', ([], {}), '()\n', (8264, 8266), False, 'import time\n'), ((8324, 8335), 'time.time', 'time.time', ([], {}), '()\n', (8333, 8335), False, 'import time\n'), ((9252, 9263), 'time.time', 'time.time', ([], {}), '()\n', (9261, 9263), False, 'import time\n'), ((9311, 9322), 'time.time', 'time.time', ([], {}), '()\n', (9320, 9322), False, 'import time\n'), ((10211, 10241), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (10232, 10241), True, 'import numpy as np\n'), ((10390, 10426), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'z', 'w': 'w'}), '(x=x, y=y, z=z, w=w)\n', (10406, 10426), False, 'import treecorr\n'), ((10482, 10493), 'time.time', 'time.time', ([], {}), '()\n', (10491, 10493), False, 'import time\n'), ((10541, 10552), 'time.time', 'time.time', ([], {}), '()\n', (10550, 10552), False, 'import time\n'), ((11442, 11484), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (11475, 11484), False, 'import coord\n'), ((11528, 11603), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'r': 'r', 'w': 'w'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', r=r, w=w)\n", (11544, 11603), False, 'import treecorr\n'), ((11642, 11653), 'time.time', 'time.time', ([], {}), '()\n', (11651, 11653), False, 'import time\n'), ((11702, 11713), 'time.time', 'time.time', ([], {}), '()\n', (11711, 11713), False, 'import time\n'), ((12366, 12377), 'time.time', 'time.time', ([], {}), '()\n', (12375, 12377), False, 'import time\n'), ((12435, 12446), 'time.time', 'time.time', ([], {}), '()\n', (12444, 12446), False, 'import time\n'), ((13307, 13318), 'time.time', 'time.time', ([], {}), '()\n', (13316, 13318), False, 'import time\n'), ((13366, 13377), 'time.time', 'time.time', ([], {}), '()\n', (13375, 13377), False, 'import time\n'), ((14434, 14464), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (14455, 14464), True, 'import numpy as np\n'), ((14683, 14733), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'w': 'w', 'g1': 'g1', 'g2': 'g2', 'k': 'k'}), '(x=x, y=y, w=w, g1=g1, g2=g2, k=k)\n', (14699, 14733), False, 'import treecorr\n'), ((14789, 14800), 'time.time', 'time.time', ([], {}), '()\n', (14798, 14800), False, 'import time\n'), ((14848, 14859), 'time.time', 'time.time', ([], {}), '()\n', (14857, 14859), False, 'import time\n'), ((15757, 15768), 'time.time', 'time.time', ([], {}), '()\n', (15766, 15768), False, 'import time\n'), ((15826, 15837), 'time.time', 'time.time', ([], {}), '()\n', (15835, 15837), False, 'import time\n'), ((16696, 16707), 'time.time', 'time.time', ([], {}), '()\n', (16705, 16707), False, 'import time\n'), ((16755, 16766), 'time.time', 'time.time', ([], {}), '()\n', (16764, 16766), False, 'import time\n'), ((17649, 17679), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (17670, 17679), True, 'import numpy as np\n'), ((17792, 17823), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'z'}), '(x=x, y=y, z=z)\n', (17808, 17823), False, 'import treecorr\n'), ((20074, 20100), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (20090, 20100), False, 'import treecorr\n'), ((21152, 21194), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (21185, 21194), False, 'import coord\n'), ((21203, 21268), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad')\n", (21219, 21268), False, 'import treecorr\n'), ((23350, 23423), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra[:n]', 'dec': 'dec[:n]', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')\n", (23366, 23423), False, 'import treecorr\n'), ((23728, 23758), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (23749, 23758), True, 'import numpy as np\n'), ((23871, 23902), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'z'}), '(x=x, y=y, z=z)\n', (23887, 23902), False, 'import treecorr\n'), ((26167, 26193), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (26183, 26193), False, 'import treecorr\n'), ((27251, 27293), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (27284, 27293), False, 'import coord\n'), ((27302, 27367), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad')\n", (27318, 27367), False, 'import treecorr\n'), ((29278, 29351), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra[:n]', 'dec': 'dec[:n]', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')\n", (29294, 29351), False, 'import treecorr\n'), ((29767, 29797), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (29788, 29797), True, 'import numpy as np\n'), ((29969, 29983), 'numpy.zeros', 'np.zeros', (['ngal'], {}), '(ngal)\n', (29977, 29983), True, 'import numpy as np\n'), ((30066, 30108), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (30099, 30108), False, 'import coord\n'), ((30381, 30478), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'keep_zero_weight': '(True)'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,\n keep_zero_weight=True)\n", (30397, 30478), False, 'import treecorr\n'), ((30506, 30533), 'treecorr.set_omp_threads', 'treecorr.set_omp_threads', (['(1)'], {}), '(1)\n', (30530, 30533), False, 'import treecorr\n'), ((30588, 30599), 'time.time', 'time.time', ([], {}), '()\n', (30597, 30599), False, 'import time\n'), ((30645, 30656), 'time.time', 'time.time', ([], {}), '()\n', (30654, 30656), False, 'import time\n'), ((31075, 31105), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (31096, 31105), True, 'import numpy as np\n'), ((31318, 31375), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {'return_r': '(True)'}), '(x, y, z, return_r=True)\n', (31351, 31375), False, 'import coord\n'), ((31665, 31754), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'npatch': 'npatch'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,\n npatch=npatch)\n", (31681, 31754), False, 'import treecorr\n'), ((31761, 31772), 'time.time', 'time.time', ([], {}), '()\n', (31770, 31772), False, 'import time\n'), ((31828, 31839), 'time.time', 'time.time', ([], {}), '()\n', (31837, 31839), False, 'import time\n'), ((32235, 32290), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cen', 'direct_cen'], {'atol': '(0.002)'}), '(cen, direct_cen, atol=0.002)\n', (32261, 32290), True, 'import numpy as np\n'), ((33409, 33515), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'npatch': 'npatch', 'kmeans_alt': '(True)'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,\n npatch=npatch, kmeans_alt=True)\n", (33425, 33515), False, 'import treecorr\n'), ((33549, 33560), 'time.time', 'time.time', ([], {}), '()\n', (33558, 33560), False, 'import time\n'), ((33618, 33629), 'time.time', 'time.time', ([], {}), '()\n', (33627, 33629), False, 'import time\n'), ((34461, 34574), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'r': 'r', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'patch_centers': 'cat2.patch_centers'}), "(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,\n patch_centers=cat2.patch_centers)\n", (34477, 34574), False, 'import treecorr\n'), ((34603, 34656), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cat2.patch', 'cat3.patch'], {}), '(cat2.patch, cat3.patch)\n', (34632, 34656), True, 'import numpy as np\n'), ((34661, 34730), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cat2.patch_centers', 'cat3.patch_centers'], {}), '(cat2.patch_centers, cat3.patch_centers)\n', (34690, 34730), True, 'import numpy as np\n'), ((34881, 34911), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (34902, 34911), True, 'import numpy as np\n'), ((35124, 35181), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {'return_r': '(True)'}), '(x, y, z, return_r=True)\n', (35157, 35181), False, 'import coord\n'), ((35471, 35565), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'r': 'r', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'npatch': 'npatch'}), "(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,\n npatch=npatch)\n", (35487, 35565), False, 'import treecorr\n'), ((35599, 35610), 'time.time', 'time.time', ([], {}), '()\n', (35608, 35610), False, 'import time\n'), ((35666, 35677), 'time.time', 'time.time', ([], {}), '()\n', (35675, 35677), False, 'import time\n'), ((36139, 36194), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cen', 'direct_cen'], {'atol': '(0.002)'}), '(cen, direct_cen, atol=0.002)\n', (36165, 36194), True, 'import numpy as np\n'), ((37313, 37424), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'r': 'r', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'npatch': 'npatch', 'kmeans_alt': '(True)'}), "(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,\n npatch=npatch, kmeans_alt=True)\n", (37329, 37424), False, 'import treecorr\n'), ((37458, 37469), 'time.time', 'time.time', ([], {}), '()\n', (37467, 37469), False, 'import time\n'), ((37527, 37538), 'time.time', 'time.time', ([], {}), '()\n', (37536, 37538), False, 'import time\n'), ((38370, 38478), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w', 'patch_centers': 'cat2.patch_centers'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,\n patch_centers=cat2.patch_centers)\n", (38386, 38478), False, 'import treecorr\n'), ((38507, 38560), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cat2.patch', 'cat3.patch'], {}), '(cat2.patch, cat3.patch)\n', (38536, 38560), True, 'import numpy as np\n'), ((38565, 38634), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['cat2.patch_centers', 'cat3.patch_centers'], {}), '(cat2.patch_centers, cat3.patch_centers)\n', (38594, 38634), True, 'import numpy as np\n'), ((1463, 1481), 'numpy.unique', 'np.unique', (['patches'], {}), '(patches)\n', (1472, 1481), True, 'import numpy as np\n'), ((1652, 1683), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (1660, 1683), True, 'import numpy as np\n'), ((2408, 2423), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (2414, 2423), True, 'import numpy as np\n'), ((2453, 2469), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (2460, 2469), True, 'import numpy as np\n'), ((2498, 2513), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (2504, 2513), True, 'import numpy as np\n'), ((2540, 2554), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (2547, 2554), True, 'import numpy as np\n'), ((2580, 2593), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (2586, 2593), True, 'import numpy as np\n'), ((2606, 2621), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (2612, 2621), True, 'import numpy as np\n'), ((2697, 2712), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (2703, 2712), True, 'import numpy as np\n'), ((2780, 2793), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (2786, 2793), True, 'import numpy as np\n'), ((2967, 2982), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (2974, 2982), True, 'import numpy as np\n'), ((3010, 3024), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (3016, 3024), True, 'import numpy as np\n'), ((3052, 3066), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (3058, 3066), True, 'import numpy as np\n'), ((3728, 3743), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (3734, 3743), True, 'import numpy as np\n'), ((3773, 3789), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (3780, 3789), True, 'import numpy as np\n'), ((3818, 3833), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (3824, 3833), True, 'import numpy as np\n'), ((3860, 3874), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (3867, 3874), True, 'import numpy as np\n'), ((3900, 3913), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (3906, 3913), True, 'import numpy as np\n'), ((3926, 3941), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (3932, 3941), True, 'import numpy as np\n'), ((4017, 4032), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (4023, 4032), True, 'import numpy as np\n'), ((4106, 4119), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (4112, 4119), True, 'import numpy as np\n'), ((4280, 4295), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (4287, 4295), True, 'import numpy as np\n'), ((4323, 4337), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (4329, 4337), True, 'import numpy as np\n'), ((4365, 4379), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (4371, 4379), True, 'import numpy as np\n'), ((5188, 5203), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (5194, 5203), True, 'import numpy as np\n'), ((5233, 5249), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (5240, 5249), True, 'import numpy as np\n'), ((5278, 5293), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (5284, 5293), True, 'import numpy as np\n'), ((5320, 5334), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (5327, 5334), True, 'import numpy as np\n'), ((5360, 5373), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (5366, 5373), True, 'import numpy as np\n'), ((5386, 5401), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (5392, 5401), True, 'import numpy as np\n'), ((5420, 5435), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (5426, 5435), True, 'import numpy as np\n'), ((5507, 5520), 'numpy.std', 'np.std', (['sizes'], {}), '(sizes)\n', (5513, 5520), True, 'import numpy as np\n'), ((5572, 5587), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (5579, 5587), True, 'import numpy as np\n'), ((5615, 5629), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (5621, 5629), True, 'import numpy as np\n'), ((5657, 5671), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (5663, 5671), True, 'import numpy as np\n'), ((6711, 6723), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (6720, 6723), True, 'import numpy as np\n'), ((6876, 6907), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (6884, 6907), True, 'import numpy as np\n'), ((7460, 7475), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (7466, 7475), True, 'import numpy as np\n'), ((7505, 7521), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (7512, 7521), True, 'import numpy as np\n'), ((7550, 7565), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (7556, 7565), True, 'import numpy as np\n'), ((7578, 7593), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (7584, 7593), True, 'import numpy as np\n'), ((7669, 7684), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (7675, 7684), True, 'import numpy as np\n'), ((8077, 8092), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (8084, 8092), True, 'import numpy as np\n'), ((8120, 8134), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (8126, 8134), True, 'import numpy as np\n'), ((8162, 8176), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (8168, 8176), True, 'import numpy as np\n'), ((8679, 8694), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (8685, 8694), True, 'import numpy as np\n'), ((8724, 8740), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (8731, 8740), True, 'import numpy as np\n'), ((8769, 8784), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (8775, 8784), True, 'import numpy as np\n'), ((8797, 8812), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (8803, 8812), True, 'import numpy as np\n'), ((8888, 8903), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (8894, 8903), True, 'import numpy as np\n'), ((8993, 9008), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (9000, 9008), True, 'import numpy as np\n'), ((9036, 9050), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (9042, 9050), True, 'import numpy as np\n'), ((9078, 9092), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (9084, 9092), True, 'import numpy as np\n'), ((9742, 9757), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (9748, 9757), True, 'import numpy as np\n'), ((9787, 9803), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (9794, 9803), True, 'import numpy as np\n'), ((9832, 9847), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (9838, 9847), True, 'import numpy as np\n'), ((9860, 9875), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (9866, 9875), True, 'import numpy as np\n'), ((9894, 9909), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (9900, 9909), True, 'import numpy as np\n'), ((9997, 10012), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (10004, 10012), True, 'import numpy as np\n'), ((10040, 10054), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (10046, 10054), True, 'import numpy as np\n'), ((10082, 10096), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (10088, 10096), True, 'import numpy as np\n'), ((10576, 10588), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (10585, 10588), True, 'import numpy as np\n'), ((10684, 10703), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (10692, 10703), True, 'import numpy as np\n'), ((11026, 11041), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (11032, 11041), True, 'import numpy as np\n'), ((11071, 11087), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (11078, 11087), True, 'import numpy as np\n'), ((11116, 11131), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (11122, 11131), True, 'import numpy as np\n'), ((11144, 11159), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (11150, 11159), True, 'import numpy as np\n'), ((11180, 11195), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (11186, 11195), True, 'import numpy as np\n'), ((11278, 11293), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (11285, 11293), True, 'import numpy as np\n'), ((11321, 11335), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (11327, 11335), True, 'import numpy as np\n'), ((11363, 11377), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (11369, 11377), True, 'import numpy as np\n'), ((11936, 11951), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (11942, 11951), True, 'import numpy as np\n'), ((11981, 11997), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (11988, 11997), True, 'import numpy as np\n'), ((12026, 12041), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (12032, 12041), True, 'import numpy as np\n'), ((12054, 12069), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (12060, 12069), True, 'import numpy as np\n'), ((12090, 12105), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (12096, 12105), True, 'import numpy as np\n'), ((12188, 12203), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (12195, 12203), True, 'import numpy as np\n'), ((12231, 12245), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (12237, 12245), True, 'import numpy as np\n'), ((12273, 12287), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (12279, 12287), True, 'import numpy as np\n'), ((12790, 12805), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (12796, 12805), True, 'import numpy as np\n'), ((12835, 12851), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (12842, 12851), True, 'import numpy as np\n'), ((12880, 12895), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (12886, 12895), True, 'import numpy as np\n'), ((12908, 12923), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (12914, 12923), True, 'import numpy as np\n'), ((12944, 12959), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (12950, 12959), True, 'import numpy as np\n'), ((13048, 13063), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (13055, 13063), True, 'import numpy as np\n'), ((13091, 13105), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (13097, 13105), True, 'import numpy as np\n'), ((13133, 13147), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (13139, 13147), True, 'import numpy as np\n'), ((13797, 13812), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (13803, 13812), True, 'import numpy as np\n'), ((13842, 13858), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (13849, 13858), True, 'import numpy as np\n'), ((13887, 13902), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (13893, 13902), True, 'import numpy as np\n'), ((13915, 13930), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (13921, 13930), True, 'import numpy as np\n'), ((13951, 13966), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (13957, 13966), True, 'import numpy as np\n'), ((14054, 14069), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (14061, 14069), True, 'import numpy as np\n'), ((14097, 14111), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (14103, 14111), True, 'import numpy as np\n'), ((14139, 14153), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (14145, 14153), True, 'import numpy as np\n'), ((14883, 14895), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (14892, 14895), True, 'import numpy as np\n'), ((14990, 15006), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (14998, 15006), True, 'import numpy as np\n'), ((15328, 15343), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (15334, 15343), True, 'import numpy as np\n'), ((15373, 15389), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (15380, 15389), True, 'import numpy as np\n'), ((15418, 15433), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (15424, 15433), True, 'import numpy as np\n'), ((15446, 15461), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (15452, 15461), True, 'import numpy as np\n'), ((15481, 15496), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (15487, 15496), True, 'import numpy as np\n'), ((15579, 15594), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (15586, 15594), True, 'import numpy as np\n'), ((15622, 15636), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (15628, 15636), True, 'import numpy as np\n'), ((15664, 15678), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (15670, 15678), True, 'import numpy as np\n'), ((16180, 16195), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (16186, 16195), True, 'import numpy as np\n'), ((16225, 16241), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (16232, 16241), True, 'import numpy as np\n'), ((16270, 16285), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (16276, 16285), True, 'import numpy as np\n'), ((16298, 16313), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (16304, 16313), True, 'import numpy as np\n'), ((16333, 16348), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (16339, 16348), True, 'import numpy as np\n'), ((16437, 16452), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (16444, 16452), True, 'import numpy as np\n'), ((16480, 16494), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (16486, 16494), True, 'import numpy as np\n'), ((16522, 16536), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (16528, 16536), True, 'import numpy as np\n'), ((17185, 17200), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (17191, 17200), True, 'import numpy as np\n'), ((17230, 17246), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (17237, 17246), True, 'import numpy as np\n'), ((17275, 17290), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (17281, 17290), True, 'import numpy as np\n'), ((17303, 17318), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (17309, 17318), True, 'import numpy as np\n'), ((17338, 17353), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (17344, 17353), True, 'import numpy as np\n'), ((17441, 17456), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (17448, 17456), True, 'import numpy as np\n'), ((17484, 17498), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (17490, 17498), True, 'import numpy as np\n'), ((17526, 17540), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (17532, 17540), True, 'import numpy as np\n'), ((17834, 17853), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (17842, 17853), True, 'import numpy as np\n'), ((18134, 18147), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (18143, 18147), True, 'import numpy as np\n'), ((18440, 18455), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (18446, 18455), True, 'import numpy as np\n'), ((18486, 18502), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (18492, 18502), True, 'import numpy as np\n'), ((18848, 18863), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (18854, 18863), True, 'import numpy as np\n'), ((18895, 18911), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (18901, 18911), True, 'import numpy as np\n'), ((18924, 18940), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (18930, 18940), True, 'import numpy as np\n'), ((18943, 18959), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (18949, 18959), True, 'import numpy as np\n'), ((19255, 19268), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (19264, 19268), True, 'import numpy as np\n'), ((19561, 19576), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (19567, 19576), True, 'import numpy as np\n'), ((19607, 19623), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (19613, 19623), True, 'import numpy as np\n'), ((19899, 19914), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (19905, 19914), True, 'import numpy as np\n'), ((19946, 19962), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (19952, 19962), True, 'import numpy as np\n'), ((19975, 19991), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (19981, 19991), True, 'import numpy as np\n'), ((19994, 20010), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (20000, 20010), True, 'import numpy as np\n'), ((20110, 20126), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (20118, 20126), True, 'import numpy as np\n'), ((20321, 20334), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (20330, 20334), True, 'import numpy as np\n'), ((20626, 20641), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (20632, 20641), True, 'import numpy as np\n'), ((20672, 20688), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (20678, 20688), True, 'import numpy as np\n'), ((20963, 20978), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (20969, 20978), True, 'import numpy as np\n'), ((21010, 21026), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (21016, 21026), True, 'import numpy as np\n'), ((21039, 21055), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (21045, 21055), True, 'import numpy as np\n'), ((21058, 21074), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (21064, 21074), True, 'import numpy as np\n'), ((21279, 21310), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (21287, 21310), True, 'import numpy as np\n'), ((21505, 21518), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (21514, 21518), True, 'import numpy as np\n'), ((21811, 21826), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (21817, 21826), True, 'import numpy as np\n'), ((21857, 21873), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (21863, 21873), True, 'import numpy as np\n'), ((22149, 22164), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (22155, 22164), True, 'import numpy as np\n'), ((22196, 22212), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (22202, 22212), True, 'import numpy as np\n'), ((22225, 22241), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (22231, 22241), True, 'import numpy as np\n'), ((22244, 22260), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (22250, 22260), True, 'import numpy as np\n'), ((22271, 22296), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22284, 22296), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22356, 22381), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22369, 22381), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22456, 22481), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22469, 22481), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22562, 22587), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22575, 22587), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22668, 22693), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22681, 22693), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((22769, 22794), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (22782, 22794), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((23088, 23102), 'numpy.zeros', 'np.zeros', (['ngal'], {}), '(ngal)\n', (23096, 23102), True, 'import numpy as np\n'), ((23913, 23932), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (23921, 23932), True, 'import numpy as np\n'), ((24217, 24230), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (24226, 24230), True, 'import numpy as np\n'), ((24523, 24538), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (24529, 24538), True, 'import numpy as np\n'), ((24569, 24585), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (24575, 24585), True, 'import numpy as np\n'), ((24933, 24948), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (24939, 24948), True, 'import numpy as np\n'), ((24980, 24996), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (24986, 24996), True, 'import numpy as np\n'), ((25009, 25025), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (25015, 25025), True, 'import numpy as np\n'), ((25028, 25044), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (25034, 25044), True, 'import numpy as np\n'), ((25344, 25357), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (25353, 25357), True, 'import numpy as np\n'), ((25650, 25665), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (25656, 25665), True, 'import numpy as np\n'), ((25696, 25712), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (25702, 25712), True, 'import numpy as np\n'), ((25990, 26005), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (25996, 26005), True, 'import numpy as np\n'), ((26037, 26053), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (26043, 26053), True, 'import numpy as np\n'), ((26066, 26082), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (26072, 26082), True, 'import numpy as np\n'), ((26085, 26101), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (26091, 26101), True, 'import numpy as np\n'), ((26203, 26219), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (26211, 26219), True, 'import numpy as np\n'), ((26416, 26429), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (26425, 26429), True, 'import numpy as np\n'), ((26721, 26736), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (26727, 26736), True, 'import numpy as np\n'), ((26767, 26783), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (26773, 26783), True, 'import numpy as np\n'), ((27060, 27075), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (27066, 27075), True, 'import numpy as np\n'), ((27107, 27123), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (27113, 27123), True, 'import numpy as np\n'), ((27136, 27152), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (27142, 27152), True, 'import numpy as np\n'), ((27155, 27171), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (27161, 27171), True, 'import numpy as np\n'), ((27378, 27409), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (27386, 27409), True, 'import numpy as np\n'), ((27606, 27619), 'numpy.unique', 'np.unique', (['p1'], {}), '(p1)\n', (27615, 27619), True, 'import numpy as np\n'), ((27912, 27927), 'numpy.std', 'np.std', (['counts1'], {}), '(counts1)\n', (27918, 27927), True, 'import numpy as np\n'), ((27958, 27974), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (27964, 27974), True, 'import numpy as np\n'), ((28252, 28267), 'numpy.std', 'np.std', (['counts2'], {}), '(counts2)\n', (28258, 28267), True, 'import numpy as np\n'), ((28299, 28315), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (28305, 28315), True, 'import numpy as np\n'), ((28328, 28344), 'numpy.sum', 'np.sum', (['inertia2'], {}), '(inertia2)\n', (28334, 28344), True, 'import numpy as np\n'), ((28347, 28363), 'numpy.sum', 'np.sum', (['inertia1'], {}), '(inertia1)\n', (28353, 28363), True, 'import numpy as np\n'), ((28374, 28399), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (28387, 28399), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((28482, 28507), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (28495, 28507), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((28590, 28615), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (28603, 28615), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((28693, 28718), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (28706, 28718), False, 'from test_helper import get_from_wiki, CaptureLog, assert_raises, profile, timer\n'), ((29016, 29030), 'numpy.zeros', 'np.zeros', (['ngal'], {}), '(ngal)\n', (29024, 29030), True, 'import numpy as np\n'), ((30680, 30692), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (30689, 30692), True, 'import numpy as np\n'), ((30804, 30823), 'numpy.unique', 'np.unique', (['p[w > 0]'], {}), '(p[w > 0])\n', (30813, 30823), True, 'import numpy as np\n'), ((30851, 30871), 'numpy.unique', 'np.unique', (['p[w == 0]'], {}), '(p[w == 0])\n', (30860, 30871), True, 'import numpy as np\n'), ((31863, 31875), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (31872, 31875), True, 'import numpy as np\n'), ((32028, 32059), 'numpy.array', 'np.array', (['[cat.x, cat.y, cat.z]'], {}), '([cat.x, cat.y, cat.z])\n', (32036, 32059), True, 'import numpy as np\n'), ((32612, 32627), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (32618, 32627), True, 'import numpy as np\n'), ((32657, 32673), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (32664, 32673), True, 'import numpy as np\n'), ((32702, 32717), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (32708, 32717), True, 'import numpy as np\n'), ((32730, 32745), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (32736, 32745), True, 'import numpy as np\n'), ((32821, 32836), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (32827, 32836), True, 'import numpy as np\n'), ((33229, 33244), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (33236, 33244), True, 'import numpy as np\n'), ((33272, 33286), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (33278, 33286), True, 'import numpy as np\n'), ((33314, 33328), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (33320, 33328), True, 'import numpy as np\n'), ((33974, 33989), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (33980, 33989), True, 'import numpy as np\n'), ((34019, 34035), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (34026, 34035), True, 'import numpy as np\n'), ((34064, 34079), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (34070, 34079), True, 'import numpy as np\n'), ((34092, 34107), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (34098, 34107), True, 'import numpy as np\n'), ((34183, 34198), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (34189, 34198), True, 'import numpy as np\n'), ((34288, 34303), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (34295, 34303), True, 'import numpy as np\n'), ((34331, 34345), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (34337, 34345), True, 'import numpy as np\n'), ((34373, 34387), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (34379, 34387), True, 'import numpy as np\n'), ((35701, 35713), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (35710, 35713), True, 'import numpy as np\n'), ((35866, 35921), 'numpy.array', 'np.array', (['[cat.x / cat.r, cat.y / cat.r, cat.z / cat.r]'], {}), '([cat.x / cat.r, cat.y / cat.r, cat.z / cat.r])\n', (35874, 35921), True, 'import numpy as np\n'), ((36516, 36531), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (36522, 36531), True, 'import numpy as np\n'), ((36561, 36577), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (36568, 36577), True, 'import numpy as np\n'), ((36606, 36621), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (36612, 36621), True, 'import numpy as np\n'), ((36634, 36649), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (36640, 36649), True, 'import numpy as np\n'), ((36725, 36740), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (36731, 36740), True, 'import numpy as np\n'), ((37133, 37148), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (37140, 37148), True, 'import numpy as np\n'), ((37176, 37190), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (37182, 37190), True, 'import numpy as np\n'), ((37218, 37232), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (37224, 37232), True, 'import numpy as np\n'), ((37883, 37898), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (37889, 37898), True, 'import numpy as np\n'), ((37928, 37944), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (37935, 37944), True, 'import numpy as np\n'), ((37973, 37988), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (37979, 37988), True, 'import numpy as np\n'), ((38001, 38016), 'numpy.sum', 'np.sum', (['inertia'], {}), '(inertia)\n', (38007, 38016), True, 'import numpy as np\n'), ((38092, 38107), 'numpy.std', 'np.std', (['inertia'], {}), '(inertia)\n', (38098, 38107), True, 'import numpy as np\n'), ((38197, 38212), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (38204, 38212), True, 'import numpy as np\n'), ((38240, 38254), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (38246, 38254), True, 'import numpy as np\n'), ((38282, 38296), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (38288, 38296), True, 'import numpy as np\n'), ((1793, 1824), 'numpy.sum', 'np.sum', (['(direct_cen ** 2)'], {'axis': '(1)'}), '(direct_cen ** 2, axis=1)\n', (1799, 1824), True, 'import numpy as np\n'), ((2039, 2080), 'numpy.sum', 'np.sum', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (2045, 2080), True, 'import numpy as np\n'), ((2269, 2289), 'numpy.sum', 'np.sum', (['(patches == i)'], {}), '(patches == i)\n', (2275, 2289), True, 'import numpy as np\n'), ((2721, 2737), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (2728, 2737), True, 'import numpy as np\n'), ((2802, 2816), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (2809, 2816), True, 'import numpy as np\n'), ((3358, 3399), 'numpy.sum', 'np.sum', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (3364, 3399), True, 'import numpy as np\n'), ((3588, 3608), 'numpy.sum', 'np.sum', (['(patches == i)'], {}), '(patches == i)\n', (3594, 3608), True, 'import numpy as np\n'), ((4042, 4058), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (4049, 4058), True, 'import numpy as np\n'), ((4128, 4142), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (4135, 4142), True, 'import numpy as np\n'), ((4742, 4783), 'numpy.sum', 'np.sum', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (4748, 4783), True, 'import numpy as np\n'), ((4972, 4992), 'numpy.sum', 'np.sum', (['(patches == i)'], {}), '(patches == i)\n', (4978, 4992), True, 'import numpy as np\n'), ((5444, 5460), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (5451, 5460), True, 'import numpy as np\n'), ((5530, 5544), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (5537, 5544), True, 'import numpy as np\n'), ((6937, 6987), 'numpy.average', 'np.average', (['xyz[p == i]'], {'axis': '(0)', 'weights': 'w[p == i]'}), '(xyz[p == i], axis=0, weights=w[p == i])\n', (6947, 6987), True, 'import numpy as np\n'), ((7035, 7066), 'numpy.sum', 'np.sum', (['(direct_cen ** 2)'], {'axis': '(1)'}), '(direct_cen ** 2, axis=1)\n', (7041, 7066), True, 'import numpy as np\n'), ((7164, 7220), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (7170, 7220), True, 'import numpy as np\n'), ((7262, 7279), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (7268, 7279), True, 'import numpy as np\n'), ((7693, 7709), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (7700, 7709), True, 'import numpy as np\n'), ((8444, 8500), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (8450, 8500), True, 'import numpy as np\n'), ((8542, 8559), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (8548, 8559), True, 'import numpy as np\n'), ((8913, 8929), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (8920, 8929), True, 'import numpy as np\n'), ((9431, 9487), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (9437, 9487), True, 'import numpy as np\n'), ((9529, 9546), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (9535, 9546), True, 'import numpy as np\n'), ((9918, 9934), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (9925, 9934), True, 'import numpy as np\n'), ((10730, 10786), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (10736, 10786), True, 'import numpy as np\n'), ((10828, 10845), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (10834, 10845), True, 'import numpy as np\n'), ((11204, 11220), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (11211, 11220), True, 'import numpy as np\n'), ((11738, 11796), 'numpy.sum', 'np.sum', (['(w[p2 == i][:, None] * (xyz[p2 == i] - cen[i]) ** 2)'], {}), '(w[p2 == i][:, None] * (xyz[p2 == i] - cen[i]) ** 2)\n', (11744, 11796), True, 'import numpy as np\n'), ((11838, 11856), 'numpy.sum', 'np.sum', (['w[p2 == i]'], {}), '(w[p2 == i])\n', (11844, 11856), True, 'import numpy as np\n'), ((12114, 12130), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (12121, 12130), True, 'import numpy as np\n'), ((12555, 12611), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (12561, 12611), True, 'import numpy as np\n'), ((12653, 12670), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (12659, 12670), True, 'import numpy as np\n'), ((12968, 12984), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (12975, 12984), True, 'import numpy as np\n'), ((13486, 13542), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (13492, 13542), True, 'import numpy as np\n'), ((13584, 13601), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (13590, 13601), True, 'import numpy as np\n'), ((13975, 13991), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (13982, 13991), True, 'import numpy as np\n'), ((15033, 15088), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)\n', (15039, 15088), True, 'import numpy as np\n'), ((15130, 15147), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (15136, 15147), True, 'import numpy as np\n'), ((15505, 15521), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (15512, 15521), True, 'import numpy as np\n'), ((15946, 16001), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)\n', (15952, 16001), True, 'import numpy as np\n'), ((16043, 16060), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (16049, 16060), True, 'import numpy as np\n'), ((16357, 16373), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (16364, 16373), True, 'import numpy as np\n'), ((16875, 16930), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xy[p == i] - cen[i]) ** 2)\n', (16881, 16930), True, 'import numpy as np\n'), ((16972, 16989), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (16978, 16989), True, 'import numpy as np\n'), ((17362, 17378), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (17369, 17378), True, 'import numpy as np\n'), ((18261, 18298), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (18267, 18298), True, 'import numpy as np\n'), ((18344, 18359), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (18350, 18359), True, 'import numpy as np\n'), ((18699, 18736), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (18705, 18736), True, 'import numpy as np\n'), ((18782, 18797), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (18788, 18797), True, 'import numpy as np\n'), ((19382, 19419), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (19388, 19419), True, 'import numpy as np\n'), ((19465, 19480), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (19471, 19480), True, 'import numpy as np\n'), ((19750, 19787), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (19756, 19787), True, 'import numpy as np\n'), ((19833, 19848), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (19839, 19848), True, 'import numpy as np\n'), ((20448, 20484), 'numpy.sum', 'np.sum', (['((xy[p1 == i] - cen1[i]) ** 2)'], {}), '((xy[p1 == i] - cen1[i]) ** 2)\n', (20454, 20484), True, 'import numpy as np\n'), ((20530, 20545), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (20536, 20545), True, 'import numpy as np\n'), ((20815, 20851), 'numpy.sum', 'np.sum', (['((xy[p2 == i] - cen2[i]) ** 2)'], {}), '((xy[p2 == i] - cen2[i]) ** 2)\n', (20821, 20851), True, 'import numpy as np\n'), ((20897, 20912), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (20903, 20912), True, 'import numpy as np\n'), ((21632, 21669), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (21638, 21669), True, 'import numpy as np\n'), ((21715, 21730), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (21721, 21730), True, 'import numpy as np\n'), ((22000, 22037), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (22006, 22037), True, 'import numpy as np\n'), ((22083, 22098), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (22089, 22098), True, 'import numpy as np\n'), ((24344, 24381), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (24350, 24381), True, 'import numpy as np\n'), ((24427, 24442), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (24433, 24442), True, 'import numpy as np\n'), ((24784, 24821), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (24790, 24821), True, 'import numpy as np\n'), ((24867, 24882), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (24873, 24882), True, 'import numpy as np\n'), ((25471, 25508), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (25477, 25508), True, 'import numpy as np\n'), ((25554, 25569), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (25560, 25569), True, 'import numpy as np\n'), ((25841, 25878), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (25847, 25878), True, 'import numpy as np\n'), ((25924, 25939), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (25930, 25939), True, 'import numpy as np\n'), ((26543, 26579), 'numpy.sum', 'np.sum', (['((xy[p1 == i] - cen1[i]) ** 2)'], {}), '((xy[p1 == i] - cen1[i]) ** 2)\n', (26549, 26579), True, 'import numpy as np\n'), ((26625, 26640), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (26631, 26640), True, 'import numpy as np\n'), ((26912, 26948), 'numpy.sum', 'np.sum', (['((xy[p2 == i] - cen2[i]) ** 2)'], {}), '((xy[p2 == i] - cen2[i]) ** 2)\n', (26918, 26948), True, 'import numpy as np\n'), ((26994, 27009), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (27000, 27009), True, 'import numpy as np\n'), ((27733, 27770), 'numpy.sum', 'np.sum', (['((xyz[p1 == i] - cen1[i]) ** 2)'], {}), '((xyz[p1 == i] - cen1[i]) ** 2)\n', (27739, 27770), True, 'import numpy as np\n'), ((27816, 27831), 'numpy.sum', 'np.sum', (['(p1 == i)'], {}), '(p1 == i)\n', (27822, 27831), True, 'import numpy as np\n'), ((28103, 28140), 'numpy.sum', 'np.sum', (['((xyz[p2 == i] - cen2[i]) ** 2)'], {}), '((xyz[p2 == i] - cen2[i]) ** 2)\n', (28109, 28140), True, 'import numpy as np\n'), ((28186, 28201), 'numpy.sum', 'np.sum', (['(p2 == i)'], {}), '(p2 == i)\n', (28192, 28201), True, 'import numpy as np\n'), ((32089, 32139), 'numpy.average', 'np.average', (['xyz[p == i]'], {'axis': '(0)', 'weights': 'w[p == i]'}), '(xyz[p == i], axis=0, weights=w[p == i])\n', (32099, 32139), True, 'import numpy as np\n'), ((32187, 32218), 'numpy.sum', 'np.sum', (['(direct_cen ** 2)'], {'axis': '(1)'}), '(direct_cen ** 2, axis=1)\n', (32193, 32218), True, 'import numpy as np\n'), ((32316, 32372), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (32322, 32372), True, 'import numpy as np\n'), ((32414, 32431), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (32420, 32431), True, 'import numpy as np\n'), ((32845, 32861), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (32852, 32861), True, 'import numpy as np\n'), ((33739, 33795), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (33745, 33795), True, 'import numpy as np\n'), ((33837, 33854), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (33843, 33854), True, 'import numpy as np\n'), ((34208, 34224), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (34215, 34224), True, 'import numpy as np\n'), ((35993, 36043), 'numpy.average', 'np.average', (['xyz[p == i]'], {'axis': '(0)', 'weights': 'w[p == i]'}), '(xyz[p == i], axis=0, weights=w[p == i])\n', (36003, 36043), True, 'import numpy as np\n'), ((36091, 36122), 'numpy.sum', 'np.sum', (['(direct_cen ** 2)'], {'axis': '(1)'}), '(direct_cen ** 2, axis=1)\n', (36097, 36122), True, 'import numpy as np\n'), ((36220, 36276), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (36226, 36276), True, 'import numpy as np\n'), ((36318, 36335), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (36324, 36335), True, 'import numpy as np\n'), ((36749, 36765), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (36756, 36765), True, 'import numpy as np\n'), ((37648, 37704), 'numpy.sum', 'np.sum', (['(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)'], {}), '(w[p == i][:, None] * (xyz[p == i] - cen[i]) ** 2)\n', (37654, 37704), True, 'import numpy as np\n'), ((37746, 37763), 'numpy.sum', 'np.sum', (['w[p == i]'], {}), '(w[p == i])\n', (37752, 37763), True, 'import numpy as np\n'), ((38117, 38133), 'numpy.mean', 'np.mean', (['inertia'], {}), '(inertia)\n', (38124, 38133), True, 'import numpy as np\n'), ((2124, 2166), 'numpy.mean', 'np.mean', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (2131, 2166), True, 'import numpy as np\n'), ((3443, 3485), 'numpy.mean', 'np.mean', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (3450, 3485), True, 'import numpy as np\n'), ((4827, 4869), 'numpy.mean', 'np.mean', (['((xyz[patches == i] - cen[i]) ** 2)'], {}), '((xyz[patches == i] - cen[i]) ** 2)\n', (4834, 4869), True, 'import numpy as np\n'), ((6238, 6248), 'numpy.min', 'np.min', (['ra'], {}), '(ra)\n', (6244, 6248), True, 'import numpy as np\n'), ((6303, 6313), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (6309, 6313), True, 'import numpy as np\n'), ((6369, 6380), 'numpy.min', 'np.min', (['dec'], {}), '(dec)\n', (6375, 6380), True, 'import numpy as np\n'), ((6436, 6447), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (6442, 6447), True, 'import numpy as np\n'), ((30128, 30138), 'numpy.min', 'np.min', (['ra'], {}), '(ra)\n', (30134, 30138), True, 'import numpy as np\n'), ((30193, 30203), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (30199, 30203), True, 'import numpy as np\n'), ((30259, 30270), 'numpy.min', 'np.min', (['dec'], {}), '(dec)\n', (30265, 30270), True, 'import numpy as np\n'), ((30326, 30337), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (30332, 30337), True, 'import numpy as np\n'), ((31395, 31405), 'numpy.min', 'np.min', (['ra'], {}), '(ra)\n', (31401, 31405), True, 'import numpy as np\n'), ((31460, 31470), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (31466, 31470), True, 'import numpy as np\n'), ((31526, 31537), 'numpy.min', 'np.min', (['dec'], {}), '(dec)\n', (31532, 31537), True, 'import numpy as np\n'), ((31593, 31604), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (31599, 31604), True, 'import numpy as np\n'), ((35201, 35211), 'numpy.min', 'np.min', (['ra'], {}), '(ra)\n', (35207, 35211), True, 'import numpy as np\n'), ((35266, 35276), 'numpy.max', 'np.max', (['ra'], {}), '(ra)\n', (35272, 35276), True, 'import numpy as np\n'), ((35332, 35343), 'numpy.min', 'np.min', (['dec'], {}), '(dec)\n', (35338, 35343), True, 'import numpy as np\n'), ((35399, 35410), 'numpy.max', 'np.max', (['dec'], {}), '(dec)\n', (35405, 35410), True, 'import numpy as np\n')]
|
from .. import Explanation
from ..utils import OpChain
from . import colors
import numpy as np
def convert_color(color):
try:
color = pl.get_cmap(color)
except:
pass
if color == "shap_red":
color = colors.red_rgb
elif color == "shap_blue":
color = colors.blue_rgb
return color
def convert_ordering(ordering, shap_values):
if issubclass(type(ordering), OpChain):
ordering = ordering.apply(Explanation(shap_values))
if issubclass(type(ordering), Explanation):
if "argsort" in [op["name"] for op in ordering.op_history]:
ordering = ordering.values
else:
ordering = ordering.argsort.flip.values
return ordering
def get_sort_order(dist, clust_order, cluster_threshold, feature_order):
""" Returns a sorted order of the values where we respect the clustering order when dist[i,j] < cluster_threshold
"""
#feature_imp = np.abs(values)
# if partition_tree is not None:
# new_tree = fill_internal_max_values(partition_tree, shap_values)
# clust_order = sort_inds(new_tree, np.abs(shap_values))
clust_inds = np.argsort(clust_order)
feature_order = feature_order.copy()#order.apply(Explanation(shap_values))
# print("feature_order", feature_order)
for i in range(len(feature_order)-1):
ind1 = feature_order[i]
next_ind = feature_order[i+1]
next_ind_pos = i + 1
for j in range(i+1,len(feature_order)):
ind2 = feature_order[j]
#if feature_imp[ind] >
# if ind1 == 2:
# print(ind1, ind2, dist[ind1,ind2])
if dist[ind1,ind2] <= cluster_threshold:
# if ind1 == 2:
# print(clust_inds)
# print(ind1, ind2, next_ind, dist[ind1,ind2], clust_inds[ind2], clust_inds[next_ind])
if dist[ind1,next_ind] > cluster_threshold or clust_inds[ind2] < clust_inds[next_ind]:
next_ind = ind2
next_ind_pos = j
# print("next_ind", next_ind)
# print("next_ind_pos", next_ind_pos)
# insert the next_ind next
for j in range(next_ind_pos, i+1, -1):
#print("j", j)
feature_order[j] = feature_order[j-1]
feature_order[i+1] = next_ind
#print(feature_order)
return feature_order
def merge_nodes(values, partition_tree):
""" This merges the two clustered leaf nodes with the smallest total value.
"""
M = partition_tree.shape[0] + 1
ptind = 0
min_val = np.inf
for i in range(partition_tree.shape[0]):
ind1 = int(partition_tree[i,0])
ind2 = int(partition_tree[i,1])
if ind1 < M and ind2 < M:
val = np.abs(values[ind1]) + np.abs(values[ind2])
if val < min_val:
min_val = val
ptind = i
#print("ptind", ptind, min_val)
ind1 = int(partition_tree[ptind,0])
ind2 = int(partition_tree[ptind,1])
if ind1 > ind2:
tmp = ind1
ind1 = ind2
ind2 = tmp
partition_tree_new = partition_tree.copy()
for i in range(partition_tree_new.shape[0]):
i0 = int(partition_tree_new[i,0])
i1 = int(partition_tree_new[i,1])
if i0 == ind2:
partition_tree_new[i,0] = ind1
elif i0 > ind2:
partition_tree_new[i,0] -= 1
if i0 == ptind + M:
partition_tree_new[i,0] = ind1
elif i0 > ptind + M:
partition_tree_new[i,0] -= 1
if i1 == ind2:
partition_tree_new[i,1] = ind1
elif i1 > ind2:
partition_tree_new[i,1] -= 1
if i1 == ptind + M:
partition_tree_new[i,1] = ind1
elif i1 > ptind + M:
partition_tree_new[i,1] -= 1
partition_tree_new = np.delete(partition_tree_new, ptind, axis=0)
# update the counts to be correct
fill_counts(partition_tree_new)
return partition_tree_new, ind1, ind2
def dendrogram_coords(leaf_positions, partition_tree):
""" Returns the x and y coords of the lines of a dendrogram where the leaf order is given.
Note that scipy can compute these coords as well, but it does not allow you to easily specify
a specific leaf order, hence this reimplementation.
"""
xout = []
yout = []
_dendrogram_coords_rec(partition_tree.shape[0]-1, leaf_positions, partition_tree, xout, yout)
return np.array(xout), np.array(yout)
def _dendrogram_coords_rec(pos, leaf_positions, partition_tree, xout, yout):
M = partition_tree.shape[0] + 1
if pos < 0:
return leaf_positions[pos + M], 0
left = int(partition_tree[pos, 0]) - M
right = int(partition_tree[pos, 1]) - M
x_left, y_left = _dendrogram_coords_rec(left, leaf_positions, partition_tree, xout, yout)
x_right, y_right = _dendrogram_coords_rec(right, leaf_positions, partition_tree, xout, yout)
y_curr = partition_tree[pos, 2]
xout.append([x_left, x_left, x_right, x_right])
yout.append([y_left, y_curr, y_curr, y_right])
return (x_left + x_right) / 2, y_curr
def fill_internal_max_values(partition_tree, leaf_values):
""" This fills the forth column of the partition tree matrix with the max leaf value in that cluster.
"""
M = partition_tree.shape[0] + 1
new_tree = partition_tree.copy()
for i in range(new_tree.shape[0]):
val = 0
if new_tree[i,0] < M:
ind = int(new_tree[i,0])
val = max(val, np.abs(leaf_values[ind]))
else:
ind = int(new_tree[i,0])-M
val = max(val, np.abs(new_tree[ind,3])) # / partition_tree[ind,2])
if new_tree[i,1] < M:
ind = int(new_tree[i,1])
val = max(val, np.abs(leaf_values[ind]))
else:
ind = int(new_tree[i,1])-M
val = max(val, np.abs(new_tree[ind,3])) # / partition_tree[ind,2])
new_tree[i,3] = val
return new_tree
def fill_counts(partition_tree):
""" This updates the
"""
M = partition_tree.shape[0] + 1
for i in range(partition_tree.shape[0]):
val = 0
if partition_tree[i,0] < M:
ind = int(partition_tree[i,0])
val += 1
else:
ind = int(partition_tree[i,0])-M
val += partition_tree[ind,3]
if partition_tree[i,1] < M:
ind = int(partition_tree[i,1])
val += 1
else:
ind = int(partition_tree[i,1])-M
val += partition_tree[ind,3]
partition_tree[i,3] = val
def sort_inds(partition_tree, leaf_values, pos=None, inds=None):
if inds is None:
inds = []
if pos is None:
partition_tree = fill_internal_max_values(partition_tree, leaf_values)
pos = partition_tree.shape[0]-1
M = partition_tree.shape[0] + 1
if pos < 0:
inds.append(pos + M)
return
left = int(partition_tree[pos, 0]) - M
right = int(partition_tree[pos, 1]) - M
left_val = partition_tree[left,3] if left >= 0 else leaf_values[left + M]
right_val = partition_tree[right,3] if right >= 0 else leaf_values[right + M]
if left_val < right_val:
tmp = right
right = left
left = tmp
sort_inds(partition_tree, leaf_values, left, inds)
sort_inds(partition_tree, leaf_values, right, inds)
return inds
|
[
"numpy.argsort",
"numpy.abs",
"numpy.array",
"numpy.delete"
] |
[((1167, 1190), 'numpy.argsort', 'np.argsort', (['clust_order'], {}), '(clust_order)\n', (1177, 1190), True, 'import numpy as np\n'), ((3993, 4037), 'numpy.delete', 'np.delete', (['partition_tree_new', 'ptind'], {'axis': '(0)'}), '(partition_tree_new, ptind, axis=0)\n', (4002, 4037), True, 'import numpy as np\n'), ((4621, 4635), 'numpy.array', 'np.array', (['xout'], {}), '(xout)\n', (4629, 4635), True, 'import numpy as np\n'), ((4637, 4651), 'numpy.array', 'np.array', (['yout'], {}), '(yout)\n', (4645, 4651), True, 'import numpy as np\n'), ((2856, 2876), 'numpy.abs', 'np.abs', (['values[ind1]'], {}), '(values[ind1])\n', (2862, 2876), True, 'import numpy as np\n'), ((2879, 2899), 'numpy.abs', 'np.abs', (['values[ind2]'], {}), '(values[ind2])\n', (2885, 2899), True, 'import numpy as np\n'), ((5708, 5732), 'numpy.abs', 'np.abs', (['leaf_values[ind]'], {}), '(leaf_values[ind])\n', (5714, 5732), True, 'import numpy as np\n'), ((5814, 5838), 'numpy.abs', 'np.abs', (['new_tree[ind, 3]'], {}), '(new_tree[ind, 3])\n', (5820, 5838), True, 'import numpy as np\n'), ((5960, 5984), 'numpy.abs', 'np.abs', (['leaf_values[ind]'], {}), '(leaf_values[ind])\n', (5966, 5984), True, 'import numpy as np\n'), ((6066, 6090), 'numpy.abs', 'np.abs', (['new_tree[ind, 3]'], {}), '(new_tree[ind, 3])\n', (6072, 6090), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
from read_data import get_X_y
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pickle
class NN():
def __init__(self, batch_size = 300, graph = tf.get_default_graph(),test_size = 0.1, steps_back=8, num_TCL=30):
self.num_TCL = num_TCL
with graph.as_default():
# Training Parameters
self.learning_rate = 0.1
self.num_steps = 100000
self.steps_back = steps_back
self.batch_size = batch_size
if batch_size==1:
self.test_proportion = 0
else:
self.test_proportion = test_size
self.batch_tr_size = int(self.batch_size * (1 - self.test_proportion))
self.test_size = int(self.test_proportion*self.batch_size)
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') # dropout (keep probability)
# display_step = 10
# Network Parameters
self.cnn_num_input = num_TCL # MNIST data input
self.fc_num_input = 4
self.num_output = 1 # MNIST total classes (0-9 digits)
self.dropout = 0.85 # Dropout, probability to keep units
# Placeholders
self.Xb = tf.placeholder(tf.float32, [self.batch_tr_size, self.steps_back, self.cnn_num_input],name='Xb')
self.Xe = tf.placeholder(tf.float32, [self.batch_tr_size, 1, 4], name='Xe')
self.Y = tf.placeholder(tf.float32, [self.batch_tr_size, self.num_output], name='Y')
if self.test_proportion != 0:
# Test Placeholders
self.Xb_test = tf.placeholder(tf.float32, [self.test_size, self.steps_back, self.cnn_num_input],name='Xb_test')
self.Xe_test = tf.placeholder(tf.float32, [self.test_size, 1, 4], name='Xe_test')
self.Y_test = tf.placeholder(tf.float32, [self.test_size, self.num_output], name='Y_test')
# Store layers weight & bias
self.weights = {
# 5x5 conv
'wc1': tf.Variable(tf.random_normal([2, 8, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([2, 8, 32, 64])),
# fully connected for cnn
'wd1': tf.Variable(tf.random_normal([self.steps_back*self.cnn_num_input*64//4, 1024])),
'wd11': tf.Variable(tf.random_normal([1024, 20])),
# fully connected for fl_net,
'wd2': tf.Variable(tf.random_normal([4, 20])),
# 1024+10 inputs, 1 output (class prediction)
'out': tf.Variable(tf.random_normal([20+20, 50])),
# second fuly connected layer 100 inputs and 1 output
'out2': tf.Variable(tf.random_normal([50, self.num_output]))
}
self.biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bd1': tf.Variable(tf.random_normal([1024])),
'bd11': tf.Variable(tf.random_normal([20])),
'bd2': tf.Variable(tf.random_normal([20])),
'out': tf.Variable(tf.random_normal([50])),
'out2': tf.Variable(tf.random_normal([self.num_output]))
}
# Create some wrappers for simplicity
def conv2d(self, x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(self, x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(self,xb):
xb = tf.reshape(xb, shape=[-1, self.steps_back, self.num_TCL, 1])
# Convolution Layer
conv1 = self.conv2d(xb, self.weights['wc1'],self.biases['bc1'])
# Max Pooling (down-sampling)
conv1 = self.maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = self.conv2d(conv1, self.weights['wc2'], self.biases['bc2'])
# Max Pooling (down-sampling)
# conv2 = self.maxpool2d(conv2, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
conv2_reshaped = tf.reshape(conv2, [-1, self.weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(conv2_reshaped, self.weights['wd1']), self.biases['bd1'])
fc1_relued = tf.nn.relu(fc1)
fc11 = tf.add(tf.matmul(fc1_relued, self.weights['wd11']), self.biases['bd11'])
fc11_relued = tf.nn.relu(fc11)
## Apply Dropout
return tf.nn.dropout(fc11_relued, self.keep_prob)
def fc_net(self,xe):
xe = tf.reshape(xe, shape=[-1, self.weights['wd2'].get_shape().as_list()[0]])
fc2 = tf.add(tf.matmul(xe, self.weights['wd2']), self.biases['bd2'])
return tf.nn.relu(fc2)
def combined_net(self, graph = tf.get_default_graph()):
with graph.as_default():
conv_component = self.conv_net(self.Xb)
fc_component = self.fc_net(self.Xe)
# concatenate the to components
fc = tf.concat([conv_component,fc_component], axis=1)
# another fc net with sigmoid
fc3 = tf.add(tf.matmul(fc, self.weights['out']), self.biases['out'])
fc3_sigmoided = tf.nn.sigmoid(fc3)
#linear fc
prediction = tf.add(tf.matmul(fc3_sigmoided, self.weights['out2']), self.biases['out2'], name="prediction")
# Define loss and optimizer
loss_op = tf.losses.mean_squared_error(predictions = prediction ,labels = self.Y)
optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
train_op = optimizer.minimize(loss_op,name="train_op")
if self.test_proportion != 0:
# Test graph
conv_component_test = self.conv_net(graph.get_tensor_by_name("Xb_test:0"))
fc_component_test = self.fc_net(graph.get_tensor_by_name("Xe_test:0"))
# concatenate the to components
fc_test = tf.concat([conv_component_test, fc_component_test], axis=1)
# another fc net with sigmoid
fc3_test = tf.add(tf.matmul(fc_test, self.weights['out']), self.biases['out'])
fc3_sigmoided_test = tf.nn.sigmoid(fc3_test)
# linear fc
prediction_test = tf.add(tf.matmul(fc3_sigmoided_test, self.weights['out2']), self.biases['out2'], name="prediction_test")
loss_op_test = tf.losses.mean_squared_error(predictions=prediction_test, labels=self.Y_test)
def run_sess(self, sess, batch_xb, batch_xe, batch_y, saver, name):
graph = sess.graph
batch_xe = np.reshape(batch_xe,[-1,1,self.fc_num_input])
batch_xb = np.reshape(batch_xb, [-1, self.steps_back, self.cnn_num_input])
batch_y = np.reshape(batch_y,[-1,self.num_output])
batch_tr_xe = batch_xe[:self.batch_tr_size]
batch_test_xe = batch_xe[self.batch_tr_size:]
batch_tr_xb = batch_xb[:self.batch_tr_size]
batch_test_xb = batch_xb[self.batch_tr_size:]
batch_tr_y = batch_y[:self.batch_tr_size]
batch_test_y = batch_y[self.batch_tr_size:]
overfitting=0
for step in range(1, self.num_steps + 1):
# Run optimization op (backprop)
sess.run("train_op", feed_dict={graph.get_tensor_by_name("Xb:0"): batch_tr_xb,
graph.get_tensor_by_name("Xe:0"): batch_tr_xe,
graph.get_tensor_by_name("Y:0"): batch_tr_y,
graph.get_tensor_by_name("keep_prob:0"): self.dropout})
# Calculate batch loss
training_l = sess.run("mean_squared_error/value:0",
feed_dict={graph.get_tensor_by_name("Xb:0"): batch_tr_xb,
graph.get_tensor_by_name("Xe:0"): batch_tr_xe,
graph.get_tensor_by_name("Y:0"): batch_tr_y,
graph.get_tensor_by_name("keep_prob:0"): 1.0})
test_l = sess.run("mean_squared_error_1/value:0",
feed_dict={graph.get_tensor_by_name("Xb_test:0"): batch_test_xb,
graph.get_tensor_by_name("Xe_test:0"): batch_test_xe,
graph.get_tensor_by_name("Y_test:0"): batch_test_y,
graph.get_tensor_by_name("keep_prob:0"): 1.0})
if step % 10 == 0 or step == 1:
print("Step " + str(step) + ", Minibatch training Loss= " + str(training_l))
print("Step " + str(step) + ", Minibatch validation Loss= " + str(test_l))
if test_l - training_l> 0.015:
overfitting += 1
else: overfitting = 0
if overfitting >= 30 and training_l <= 0.01 :
print("condition satisfied")
break
if test_l < 0.009 and training_l < 0.009 :
print("condition satisfied")
break
# self.training_loss.append(training_l)
# self.validation_loss.append(test_l)
print("Optimization Finished!")
# Save the variables to disk.
save_path = saver.save(sess, name)
print("Model saved in path: %s" % save_path)
def train(self,xb, xe, y, name = "./model0.ckpt", graph = tf.get_default_graph() ):
self.training_loss = []
self.validation_loss = []
with tf.Session(graph=graph) as sess:
saver = tf.train.Saver()
try:
saver.restore(sess, name)
except:
sess.run(tf.global_variables_initializer())
for i in range(xb.shape[0]//self.batch_size):
# Run the initializer
index = i*self.batch_size
self.run_sess(sess, xb[index:index+self.batch_size],xe[index:index+self.batch_size],y[index:index+self.batch_size], saver, name= name)
# plt.plot(range(len(self.training_loss)), self.training_loss, label='Training')
# plt.plot(range(len(self.validation_loss)), self.validation_loss, label='Validation')
# plt.xlabel('Steps')
# # plt.ylabel('Loss')
#
# plt.title("Loss function")
#
# plt.legend()
#
# plt.show()
# def retrain(self,xb, xe, y,sess):
# saver.restore(sess, "./model.ckpt")
# self.run_sess(sess,xb,xe,y)
def predict(self, xb, xe, sess):
# tf Graph input
graph = sess.graph
xb = np.reshape(xb, [-1, self.steps_back, self.cnn_num_input])
xe = np.reshape(xe, [-1, 1, self.fc_num_input])
p = sess.run("prediction:0", feed_dict={graph.get_tensor_by_name("Xb:0"): xb, graph.get_tensor_by_name("Xe:0"): xe, graph.get_tensor_by_name("keep_prob:0"): 1.0})
return p
if __name__ == '__main__':
xb, xe, y = get_X_y(steps_back=7, filename="Q_data0.csv")
neural_net = NN(batch_size = 100, steps_back=8)
scaler1 = {}
for i in range(xb.shape[1]):
scaler1[i] = MinMaxScaler(feature_range=(0,1), copy=True)
xb[:,i,:] = scaler1[i].fit_transform(xb[:,i,:])
scaler2 = MinMaxScaler(feature_range=(0,1), copy=True).fit(xe)
scaler3 = MinMaxScaler(feature_range=(0, 1), copy=True).fit(y.reshape(-1,1))
xe= scaler2.transform(xe)
y= scaler3.transform(y.reshape(-1,1))
# graph = tf.Graph()
neural_net.combined_net()
# saver = tf.train.Saver()
# keep_prob = neural_net.keep_prob
# init = tf.global_variables_initializer()
# graph = tf.get_default_graph()
neural_net.train(xb, xe, y)
|
[
"tensorflow.reshape",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.matmul",
"tensorflow.nn.conv2d",
"tensorflow.get_default_graph",
"tensorflow.nn.relu",
"tensorflow.concat",
"tensorflow.placeholder",
"numpy.reshape",
"tensorflow.nn.bias_add",
"tensorflow.losses.mean_squared_error",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.nn.max_pool",
"tensorflow.random_normal",
"read_data.get_X_y",
"tensorflow.nn.sigmoid",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.dropout"
] |
[((11564, 11609), 'read_data.get_X_y', 'get_X_y', ([], {'steps_back': '(7)', 'filename': '"""Q_data0.csv"""'}), "(steps_back=7, filename='Q_data0.csv')\n", (11571, 11609), False, 'from read_data import get_X_y\n'), ((239, 261), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (259, 261), True, 'import tensorflow as tf\n'), ((3593, 3661), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, strides, strides, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, strides, strides, 1], padding='SAME')\n", (3605, 3661), True, 'import tensorflow as tf\n'), ((3675, 3695), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {}), '(x, b)\n', (3689, 3695), True, 'import tensorflow as tf\n'), ((3712, 3725), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3722, 3725), True, 'import tensorflow as tf\n'), ((3807, 3882), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, k, k, 1]', 'strides': '[1, k, k, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n", (3821, 3882), True, 'import tensorflow as tf\n'), ((3978, 4038), 'tensorflow.reshape', 'tf.reshape', (['xb'], {'shape': '[-1, self.steps_back, self.num_TCL, 1]'}), '(xb, shape=[-1, self.steps_back, self.num_TCL, 1])\n', (3988, 4038), True, 'import tensorflow as tf\n'), ((4723, 4738), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc1'], {}), '(fc1)\n', (4733, 4738), True, 'import tensorflow as tf\n'), ((4851, 4867), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc11'], {}), '(fc11)\n', (4861, 4867), True, 'import tensorflow as tf\n'), ((4910, 4952), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fc11_relued', 'self.keep_prob'], {}), '(fc11_relued, self.keep_prob)\n', (4923, 4952), True, 'import tensorflow as tf\n'), ((5164, 5179), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc2'], {}), '(fc2)\n', (5174, 5179), True, 'import tensorflow as tf\n'), ((5220, 5242), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (5240, 5242), True, 'import tensorflow as tf\n'), ((7101, 7149), 'numpy.reshape', 'np.reshape', (['batch_xe', '[-1, 1, self.fc_num_input]'], {}), '(batch_xe, [-1, 1, self.fc_num_input])\n', (7111, 7149), True, 'import numpy as np\n'), ((7167, 7230), 'numpy.reshape', 'np.reshape', (['batch_xb', '[-1, self.steps_back, self.cnn_num_input]'], {}), '(batch_xb, [-1, self.steps_back, self.cnn_num_input])\n', (7177, 7230), True, 'import numpy as np\n'), ((7250, 7292), 'numpy.reshape', 'np.reshape', (['batch_y', '[-1, self.num_output]'], {}), '(batch_y, [-1, self.num_output])\n', (7260, 7292), True, 'import numpy as np\n'), ((9992, 10014), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10012, 10014), True, 'import tensorflow as tf\n'), ((11212, 11269), 'numpy.reshape', 'np.reshape', (['xb', '[-1, self.steps_back, self.cnn_num_input]'], {}), '(xb, [-1, self.steps_back, self.cnn_num_input])\n', (11222, 11269), True, 'import numpy as np\n'), ((11284, 11326), 'numpy.reshape', 'np.reshape', (['xe', '[-1, 1, self.fc_num_input]'], {}), '(xe, [-1, 1, self.fc_num_input])\n', (11294, 11326), True, 'import numpy as np\n'), ((11737, 11782), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)', 'copy': '(True)'}), '(feature_range=(0, 1), copy=True)\n', (11749, 11782), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((898, 942), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob"""'}), "(tf.float32, name='keep_prob')\n", (912, 942), True, 'import tensorflow as tf\n'), ((1329, 1430), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.batch_tr_size, self.steps_back, self.cnn_num_input]'], {'name': '"""Xb"""'}), "(tf.float32, [self.batch_tr_size, self.steps_back, self.\n cnn_num_input], name='Xb')\n", (1343, 1430), True, 'import tensorflow as tf\n'), ((1448, 1513), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.batch_tr_size, 1, 4]'], {'name': '"""Xe"""'}), "(tf.float32, [self.batch_tr_size, 1, 4], name='Xe')\n", (1462, 1513), True, 'import tensorflow as tf\n'), ((1536, 1611), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.batch_tr_size, self.num_output]'], {'name': '"""Y"""'}), "(tf.float32, [self.batch_tr_size, self.num_output], name='Y')\n", (1550, 1611), True, 'import tensorflow as tf\n'), ((4633, 4679), 'tensorflow.matmul', 'tf.matmul', (['conv2_reshaped', "self.weights['wd1']"], {}), "(conv2_reshaped, self.weights['wd1'])\n", (4642, 4679), True, 'import tensorflow as tf\n'), ((4762, 4805), 'tensorflow.matmul', 'tf.matmul', (['fc1_relued', "self.weights['wd11']"], {}), "(fc1_relued, self.weights['wd11'])\n", (4771, 4805), True, 'import tensorflow as tf\n'), ((5092, 5126), 'tensorflow.matmul', 'tf.matmul', (['xe', "self.weights['wd2']"], {}), "(xe, self.weights['wd2'])\n", (5101, 5126), True, 'import tensorflow as tf\n'), ((5444, 5493), 'tensorflow.concat', 'tf.concat', (['[conv_component, fc_component]'], {'axis': '(1)'}), '([conv_component, fc_component], axis=1)\n', (5453, 5493), True, 'import tensorflow as tf\n'), ((5647, 5665), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['fc3'], {}), '(fc3)\n', (5660, 5665), True, 'import tensorflow as tf\n'), ((5875, 5942), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'predictions': 'prediction', 'labels': 'self.Y'}), '(predictions=prediction, labels=self.Y)\n', (5903, 5942), True, 'import tensorflow as tf\n'), ((5972, 6028), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (5994, 6028), True, 'import tensorflow as tf\n'), ((10102, 10125), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (10112, 10125), True, 'import tensorflow as tf\n'), ((10156, 10172), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10170, 10172), True, 'import tensorflow as tf\n'), ((11856, 11901), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)', 'copy': '(True)'}), '(feature_range=(0, 1), copy=True)\n', (11868, 11901), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((11924, 11969), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)', 'copy': '(True)'}), '(feature_range=(0, 1), copy=True)\n', (11936, 11969), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1726, 1828), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.test_size, self.steps_back, self.cnn_num_input]'], {'name': '"""Xb_test"""'}), "(tf.float32, [self.test_size, self.steps_back, self.\n cnn_num_input], name='Xb_test')\n", (1740, 1828), True, 'import tensorflow as tf\n'), ((1855, 1921), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.test_size, 1, 4]'], {'name': '"""Xe_test"""'}), "(tf.float32, [self.test_size, 1, 4], name='Xe_test')\n", (1869, 1921), True, 'import tensorflow as tf\n'), ((1953, 2029), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.test_size, self.num_output]'], {'name': '"""Y_test"""'}), "(tf.float32, [self.test_size, self.num_output], name='Y_test')\n", (1967, 2029), True, 'import tensorflow as tf\n'), ((5562, 5596), 'tensorflow.matmul', 'tf.matmul', (['fc', "self.weights['out']"], {}), "(fc, self.weights['out'])\n", (5571, 5596), True, 'import tensorflow as tf\n'), ((5723, 5769), 'tensorflow.matmul', 'tf.matmul', (['fc3_sigmoided', "self.weights['out2']"], {}), "(fc3_sigmoided, self.weights['out2'])\n", (5732, 5769), True, 'import tensorflow as tf\n'), ((6430, 6489), 'tensorflow.concat', 'tf.concat', (['[conv_component_test, fc_component_test]'], {'axis': '(1)'}), '([conv_component_test, fc_component_test], axis=1)\n', (6439, 6489), True, 'import tensorflow as tf\n'), ((6671, 6694), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['fc3_test'], {}), '(fc3_test)\n', (6684, 6694), True, 'import tensorflow as tf\n'), ((6896, 6973), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'predictions': 'prediction_test', 'labels': 'self.Y_test'}), '(predictions=prediction_test, labels=self.Y_test)\n', (6924, 6973), True, 'import tensorflow as tf\n'), ((2168, 2199), 'tensorflow.random_normal', 'tf.random_normal', (['[2, 8, 1, 32]'], {}), '([2, 8, 1, 32])\n', (2184, 2199), True, 'import tensorflow as tf\n'), ((2289, 2321), 'tensorflow.random_normal', 'tf.random_normal', (['[2, 8, 32, 64]'], {}), '([2, 8, 32, 64])\n', (2305, 2321), True, 'import tensorflow as tf\n'), ((2403, 2475), 'tensorflow.random_normal', 'tf.random_normal', (['[self.steps_back * self.cnn_num_input * 64 // 4, 1024]'], {}), '([self.steps_back * self.cnn_num_input * 64 // 4, 1024])\n', (2419, 2475), True, 'import tensorflow as tf\n'), ((2509, 2537), 'tensorflow.random_normal', 'tf.random_normal', (['[1024, 20]'], {}), '([1024, 20])\n', (2525, 2537), True, 'import tensorflow as tf\n'), ((2623, 2648), 'tensorflow.random_normal', 'tf.random_normal', (['[4, 20]'], {}), '([4, 20])\n', (2639, 2648), True, 'import tensorflow as tf\n'), ((2750, 2781), 'tensorflow.random_normal', 'tf.random_normal', (['[20 + 20, 50]'], {}), '([20 + 20, 50])\n', (2766, 2781), True, 'import tensorflow as tf\n'), ((2890, 2929), 'tensorflow.random_normal', 'tf.random_normal', (['[50, self.num_output]'], {}), '([50, self.num_output])\n', (2906, 2929), True, 'import tensorflow as tf\n'), ((3013, 3035), 'tensorflow.random_normal', 'tf.random_normal', (['[32]'], {}), '([32])\n', (3029, 3035), True, 'import tensorflow as tf\n'), ((3074, 3096), 'tensorflow.random_normal', 'tf.random_normal', (['[64]'], {}), '([64])\n', (3090, 3096), True, 'import tensorflow as tf\n'), ((3135, 3159), 'tensorflow.random_normal', 'tf.random_normal', (['[1024]'], {}), '([1024])\n', (3151, 3159), True, 'import tensorflow as tf\n'), ((3199, 3221), 'tensorflow.random_normal', 'tf.random_normal', (['[20]'], {}), '([20])\n', (3215, 3221), True, 'import tensorflow as tf\n'), ((3260, 3282), 'tensorflow.random_normal', 'tf.random_normal', (['[20]'], {}), '([20])\n', (3276, 3282), True, 'import tensorflow as tf\n'), ((3321, 3343), 'tensorflow.random_normal', 'tf.random_normal', (['[50]'], {}), '([50])\n', (3337, 3343), True, 'import tensorflow as tf\n'), ((3383, 3418), 'tensorflow.random_normal', 'tf.random_normal', (['[self.num_output]'], {}), '([self.num_output])\n', (3399, 3418), True, 'import tensorflow as tf\n'), ((6572, 6611), 'tensorflow.matmul', 'tf.matmul', (['fc_test', "self.weights['out']"], {}), "(fc_test, self.weights['out'])\n", (6581, 6611), True, 'import tensorflow as tf\n'), ((6766, 6817), 'tensorflow.matmul', 'tf.matmul', (['fc3_sigmoided_test', "self.weights['out2']"], {}), "(fc3_sigmoided_test, self.weights['out2'])\n", (6775, 6817), True, 'import tensorflow as tf\n'), ((10281, 10314), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10312, 10314), True, 'import tensorflow as tf\n')]
|
'''
Compare the data where they overlap in the uv plane.
No offset correction is needed.
'''
from spectral_cube import SpectralCube
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import os
import scipy.ndimage as nd
from uvcombine.scale_factor import find_scale_factor
from cube_analysis.feather_cubes import feather_compare_cube
from paths import (seventeenB_HI_data_02kms_path,
seventeenB_HI_data_1kms_path,
data_path, allfigs_path)
from constants import hi_freq
from plotting_styles import onecolumn_figure
# Compare with the 1 km/s cube. Higher S/N
# vla_cube = SpectralCube.read(seventeenB_HI_data_02kms_path("M33_14B_17B_HI_contsub_width_02kms.image.pbcor.fits"))
vla_cube = SpectralCube.read(seventeenB_HI_data_1kms_path("M33_14B_17B_HI_contsub_width_1kms.image.pbcor.fits"))
# pb_cube = SpectralCube.read(seventeenB_HI_data_02kms_path("M33_14B_17B_HI_contsub_width_02kms.pb.fits"))
pb_cube = SpectralCube.read(seventeenB_HI_data_1kms_path("M33_14B_17B_HI_contsub_width_1kms.pb.fits"))
# PB minimally changes over the frequency range. So just grab one plane
pb_plane = pb_cube[0]
# We need to define a tapered weighting function to ignore emission outside
# of the VLA mosaic
def taper_weights(mask, sigma, nsig_cut=3):
dist = nd.distance_transform_edt(mask)
gauss_dists = np.where(np.logical_and(dist < nsig_cut * sigma, dist > 0.))
flat_dists = np.where(dist >= nsig_cut * sigma)
weight_arr = np.zeros_like(mask, dtype=float)
weight_arr[gauss_dists] = \
np.exp(- (dist[gauss_dists] - nsig_cut * sigma)**2 / (2 * sigma**2))
weight_arr[flat_dists] = 1.
return weight_arr
weight = taper_weights(np.isfinite(pb_plane), 30, nsig_cut=5)
gbt_path = os.path.join(data_path, "GBT")
# gbt_cube = SpectralCube.read(os.path.join(gbt_path, "17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_02kms.fits"))
gbt_cube = SpectralCube.read(os.path.join(gbt_path, "17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_1kms.fits"))
beam_fwhm = lambda diam: ((1.18 * hi_freq.to(u.cm, u.spectral())) / diam.to(u.cm)) * u.rad
# Already determined from the 14B HI analysis. Lowered spatial resolution
# due to lack of overlap in the GBT fields centered at M33. So the data were
# gridded with a Gaussian kernel, rather than a jinc function
gbt_eff_beam = beam_fwhm(87.5 * u.m)
# The shortest baseline in the 14B-088 data is ~44 m.
las = (hi_freq.to(u.cm, u.spectral()) / (44 * u.m)).to(u.arcsec, u.dimensionless_angles())
radii, ratios, high_pts, low_pts, chan_out = \
feather_compare_cube(vla_cube, gbt_cube, las,
num_cores=1,
lowresfwhm=gbt_eff_beam,
chunk=50,
verbose=False,
weights=weight,
relax_spectral_check=False,
# NOTE: there is an offset of ~0.4 km/s between the cubes
# The big GBT beam means this really doesn't matter (I
# manually checked). The difference is 0.36 times the
# channel size. I have no idea where this shift is coming
# from since the freq axis used in `gbt_regrid.py` matches
# the frequency in the individual channel MSs used in
# imaging. It's not even a half-channel offset like I
# would expect if the MS frequency was the channel edge...
spec_check_kwargs={'rtol': 0.4})
onecolumn_figure()
sc_factor, sc_err = find_scale_factor(np.hstack(low_pts), np.hstack(high_pts),
method='distrib',
verbose=True)
plt.grid(True)
plt.xlabel(r"ln I$_{\rm int}$ / I$_{\rm SD}$")
plt.tight_layout()
plt.savefig(allfigs_path("Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.png"))
plt.savefig(allfigs_path("Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"))
print("Factor: {0}+/-{1}".format(sc_factor, sc_err))
# Factor: 1.125046+/-0.00394768
# This isn't a fantastic fit, so this error was significantly underestimated
plt.close()
# Compare properties per-channel
sc_factor_chans = []
sc_err_chans = []
for low, high in zip(low_pts, high_pts):
sc_f, sc_e = \
find_scale_factor(low, high,
method='distrib',
verbose=False)
sc_factor_chans.append(sc_f)
sc_err_chans.append(sc_e)
sc_factor_chans_linfit = []
sc_err_chans_linfit = []
for low, high in zip(low_pts, high_pts):
sc_f, sc_e = \
find_scale_factor(low, high,
method='linfit',
verbose=False)
sc_factor_chans_linfit.append(sc_f)
sc_err_chans_linfit.append(sc_e)
sc_factor_chans_linfit = np.array(sc_factor_chans_linfit)
sc_err_chans_linfit = np.array(sc_err_chans_linfit)
chans = np.arange(len(low_pts))
onecolumn_figure()
plt.errorbar(chans, sc_factor_chans,
yerr=sc_err_chans,
alpha=0.5, label='Distrib Fit')
plt.errorbar(chans, sc_factor_chans_linfit,
yerr=[sc_factor_chans_linfit - sc_err_chans_linfit[:, 0],
sc_err_chans_linfit[:, 1] - sc_factor_chans_linfit],
alpha=0.5, label='Linear fit')
# plt.plot(chans, slope_lowess_85)
plt.axhline(1, linestyle='--')
plt.legend(frameon=True)
plt.ylabel(r"Scale Factor")
plt.xlabel("Channels")
plt.grid(True)
plt.tight_layout()
plt.savefig(allfigs_path("Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.png"))
plt.savefig(allfigs_path("Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"))
plt.close()
# Now refit with the channels near the systemic velocity, where most of the HI
# structure falls within the mosaic PB
chan_range = slice(80, 160)
onecolumn_figure()
sc_factor_chrange, sc_err_chrange = \
find_scale_factor(np.hstack(low_pts[chan_range]),
np.hstack(high_pts[chan_range]),
method='distrib',
verbose=True)
plt.grid(True)
plt.xlabel(r"ln I$_{\rm int}$ / I$_{\rm SD}$")
plt.tight_layout()
plt.savefig(allfigs_path(f"Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.png"))
plt.savefig(allfigs_path(f"Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.pdf"))
print("Factor: {0}+/-{1}".format(sc_factor_chrange, sc_err_chrange))
# Factor: 1.105133+/-0.00463
# Error still underestimated
# The >1 factor is due to some emission in the GBT data being cut-off by the
# PB limit of the VLA mosaic. The factor increases far from the systemic
# velocity, where bright HI gets cut-off (compared to the larger 14B data).
# So, despite the != 1 factor, no factor will be applied to the SD data.
# Besides, the 14B mosaic comparison gives a 1.0 factor with the GBT data.
# The tests here were for consistency and that's what we find.
plt.close()
|
[
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"scipy.ndimage.distance_transform_edt",
"numpy.zeros_like",
"uvcombine.scale_factor.find_scale_factor",
"cube_analysis.feather_cubes.feather_compare_cube",
"matplotlib.pyplot.close",
"plotting_styles.onecolumn_figure",
"numpy.isfinite",
"astropy.units.spectral",
"astropy.units.dimensionless_angles",
"paths.allfigs_path",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.legend",
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"paths.seventeenB_HI_data_1kms_path",
"numpy.logical_and",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((1775, 1805), 'os.path.join', 'os.path.join', (['data_path', '"""GBT"""'], {}), "(data_path, 'GBT')\n", (1787, 1805), False, 'import os\n'), ((2574, 2768), 'cube_analysis.feather_cubes.feather_compare_cube', 'feather_compare_cube', (['vla_cube', 'gbt_cube', 'las'], {'num_cores': '(1)', 'lowresfwhm': 'gbt_eff_beam', 'chunk': '(50)', 'verbose': '(False)', 'weights': 'weight', 'relax_spectral_check': '(False)', 'spec_check_kwargs': "{'rtol': 0.4}"}), "(vla_cube, gbt_cube, las, num_cores=1, lowresfwhm=\n gbt_eff_beam, chunk=50, verbose=False, weights=weight,\n relax_spectral_check=False, spec_check_kwargs={'rtol': 0.4})\n", (2594, 2768), False, 'from cube_analysis.feather_cubes import feather_compare_cube\n'), ((3587, 3605), 'plotting_styles.onecolumn_figure', 'onecolumn_figure', ([], {}), '()\n', (3603, 3605), False, 'from plotting_styles import onecolumn_figure\n'), ((3794, 3808), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3802, 3808), True, 'import matplotlib.pyplot as plt\n'), ((3809, 3856), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ln I$_{\\\\rm int}$ / I$_{\\\\rm SD}$"""'], {}), "('ln I$_{\\\\rm int}$ / I$_{\\\\rm SD}$')\n", (3819, 3856), True, 'import matplotlib.pyplot as plt\n'), ((3856, 3874), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3872, 3874), True, 'import matplotlib.pyplot as plt\n'), ((4214, 4225), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4223, 4225), True, 'import matplotlib.pyplot as plt\n'), ((4883, 4915), 'numpy.array', 'np.array', (['sc_factor_chans_linfit'], {}), '(sc_factor_chans_linfit)\n', (4891, 4915), True, 'import numpy as np\n'), ((4938, 4967), 'numpy.array', 'np.array', (['sc_err_chans_linfit'], {}), '(sc_err_chans_linfit)\n', (4946, 4967), True, 'import numpy as np\n'), ((5002, 5020), 'plotting_styles.onecolumn_figure', 'onecolumn_figure', ([], {}), '()\n', (5018, 5020), False, 'from plotting_styles import onecolumn_figure\n'), ((5021, 5113), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['chans', 'sc_factor_chans'], {'yerr': 'sc_err_chans', 'alpha': '(0.5)', 'label': '"""Distrib Fit"""'}), "(chans, sc_factor_chans, yerr=sc_err_chans, alpha=0.5, label=\n 'Distrib Fit')\n", (5033, 5113), True, 'import matplotlib.pyplot as plt\n'), ((5135, 5328), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['chans', 'sc_factor_chans_linfit'], {'yerr': '[sc_factor_chans_linfit - sc_err_chans_linfit[:, 0], sc_err_chans_linfit[:,\n 1] - sc_factor_chans_linfit]', 'alpha': '(0.5)', 'label': '"""Linear fit"""'}), "(chans, sc_factor_chans_linfit, yerr=[sc_factor_chans_linfit -\n sc_err_chans_linfit[:, 0], sc_err_chans_linfit[:, 1] -\n sc_factor_chans_linfit], alpha=0.5, label='Linear fit')\n", (5147, 5328), True, 'import matplotlib.pyplot as plt\n'), ((5401, 5431), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(1)'], {'linestyle': '"""--"""'}), "(1, linestyle='--')\n", (5412, 5431), True, 'import matplotlib.pyplot as plt\n'), ((5432, 5456), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)'}), '(frameon=True)\n', (5442, 5456), True, 'import matplotlib.pyplot as plt\n'), ((5457, 5483), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Scale Factor"""'], {}), "('Scale Factor')\n", (5467, 5483), True, 'import matplotlib.pyplot as plt\n'), ((5485, 5507), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Channels"""'], {}), "('Channels')\n", (5495, 5507), True, 'import matplotlib.pyplot as plt\n'), ((5508, 5522), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5516, 5522), True, 'import matplotlib.pyplot as plt\n'), ((5524, 5542), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5540, 5542), True, 'import matplotlib.pyplot as plt\n'), ((5734, 5745), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5743, 5745), True, 'import matplotlib.pyplot as plt\n'), ((5894, 5912), 'plotting_styles.onecolumn_figure', 'onecolumn_figure', ([], {}), '()\n', (5910, 5912), False, 'from plotting_styles import onecolumn_figure\n'), ((6137, 6151), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6145, 6151), True, 'import matplotlib.pyplot as plt\n'), ((6152, 6199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ln I$_{\\\\rm int}$ / I$_{\\\\rm SD}$"""'], {}), "('ln I$_{\\\\rm int}$ / I$_{\\\\rm SD}$')\n", (6162, 6199), True, 'import matplotlib.pyplot as plt\n'), ((6199, 6217), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6215, 6217), True, 'import matplotlib.pyplot as plt\n'), ((7045, 7056), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7054, 7056), True, 'import matplotlib.pyplot as plt\n'), ((774, 861), 'paths.seventeenB_HI_data_1kms_path', 'seventeenB_HI_data_1kms_path', (['"""M33_14B_17B_HI_contsub_width_1kms.image.pbcor.fits"""'], {}), "(\n 'M33_14B_17B_HI_contsub_width_1kms.image.pbcor.fits')\n", (802, 861), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((994, 1067), 'paths.seventeenB_HI_data_1kms_path', 'seventeenB_HI_data_1kms_path', (['"""M33_14B_17B_HI_contsub_width_1kms.pb.fits"""'], {}), "('M33_14B_17B_HI_contsub_width_1kms.pb.fits')\n", (1022, 1067), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((1318, 1349), 'scipy.ndimage.distance_transform_edt', 'nd.distance_transform_edt', (['mask'], {}), '(mask)\n', (1343, 1349), True, 'import scipy.ndimage as nd\n'), ((1447, 1481), 'numpy.where', 'np.where', (['(dist >= nsig_cut * sigma)'], {}), '(dist >= nsig_cut * sigma)\n', (1455, 1481), True, 'import numpy as np\n'), ((1500, 1532), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'float'}), '(mask, dtype=float)\n', (1513, 1532), True, 'import numpy as np\n'), ((1574, 1645), 'numpy.exp', 'np.exp', (['(-(dist[gauss_dists] - nsig_cut * sigma) ** 2 / (2 * sigma ** 2))'], {}), '(-(dist[gauss_dists] - nsig_cut * sigma) ** 2 / (2 * sigma ** 2))\n', (1580, 1645), True, 'import numpy as np\n'), ((1723, 1744), 'numpy.isfinite', 'np.isfinite', (['pb_plane'], {}), '(pb_plane)\n', (1734, 1744), True, 'import numpy as np\n'), ((1950, 2035), 'os.path.join', 'os.path.join', (['gbt_path', '"""17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_1kms.fits"""'], {}), "(gbt_path,\n '17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_1kms.fits')\n", (1962, 2035), False, 'import os\n'), ((2496, 2520), 'astropy.units.dimensionless_angles', 'u.dimensionless_angles', ([], {}), '()\n', (2518, 2520), True, 'import astropy.units as u\n'), ((3644, 3662), 'numpy.hstack', 'np.hstack', (['low_pts'], {}), '(low_pts)\n', (3653, 3662), True, 'import numpy as np\n'), ((3664, 3683), 'numpy.hstack', 'np.hstack', (['high_pts'], {}), '(high_pts)\n', (3673, 3683), True, 'import numpy as np\n'), ((3887, 3960), 'paths.allfigs_path', 'allfigs_path', (['"""Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.png"""'], {}), "('Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.png')\n", (3899, 3960), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((3974, 4047), 'paths.allfigs_path', 'allfigs_path', (['"""Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"""'], {}), "('Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf')\n", (3986, 4047), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((4367, 4428), 'uvcombine.scale_factor.find_scale_factor', 'find_scale_factor', (['low', 'high'], {'method': '"""distrib"""', 'verbose': '(False)'}), "(low, high, method='distrib', verbose=False)\n", (4384, 4428), False, 'from uvcombine.scale_factor import find_scale_factor\n'), ((4667, 4727), 'uvcombine.scale_factor.find_scale_factor', 'find_scale_factor', (['low', 'high'], {'method': '"""linfit"""', 'verbose': '(False)'}), "(low, high, method='linfit', verbose=False)\n", (4684, 4727), False, 'from uvcombine.scale_factor import find_scale_factor\n'), ((5556, 5642), 'paths.allfigs_path', 'allfigs_path', (['"""Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.png"""'], {}), "(\n 'Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.png')\n", (5568, 5642), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((5651, 5737), 'paths.allfigs_path', 'allfigs_path', (['"""Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"""'], {}), "(\n 'Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf')\n", (5663, 5737), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((5973, 6003), 'numpy.hstack', 'np.hstack', (['low_pts[chan_range]'], {}), '(low_pts[chan_range])\n', (5982, 6003), True, 'import numpy as np\n'), ((6027, 6058), 'numpy.hstack', 'np.hstack', (['high_pts[chan_range]'], {}), '(high_pts[chan_range])\n', (6036, 6058), True, 'import numpy as np\n'), ((6230, 6356), 'paths.allfigs_path', 'allfigs_path', (['f"""Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.png"""'], {}), "(\n f'Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.png'\n )\n", (6242, 6356), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((6360, 6486), 'paths.allfigs_path', 'allfigs_path', (['f"""Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.pdf"""'], {}), "(\n f'Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.pdf'\n )\n", (6372, 6486), False, 'from paths import seventeenB_HI_data_02kms_path, seventeenB_HI_data_1kms_path, data_path, allfigs_path\n'), ((1378, 1429), 'numpy.logical_and', 'np.logical_and', (['(dist < nsig_cut * sigma)', '(dist > 0.0)'], {}), '(dist < nsig_cut * sigma, dist > 0.0)\n', (1392, 1429), True, 'import numpy as np\n'), ((2455, 2467), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (2465, 2467), True, 'import astropy.units as u\n'), ((2085, 2097), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (2095, 2097), True, 'import astropy.units as u\n')]
|
#!/usr/bin/env python3
# Set this to True to enable building extensions using Cython.
# Set it to False to build extensions from the C file (that
# was previously created using Cython).
# Set it to 'auto' to build with Cython if available, otherwise
# from the C file.
import sys
from setuptools import setup, find_packages, Extension
from distutils.command.sdist import sdist as _sdist
import numpy
USE_CYTHON = "auto"
if USE_CYTHON:
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
except ImportError:
if USE_CYTHON == "auto":
USE_CYTHON = False
else:
raise
class CythonModule(object):
def __init__(self, name: str, path: str):
self.name = name
self.path = path
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name: str) -> None:
self._name = name
@property
def path(self) -> str:
return self._path
@path.setter
def path(self, path: str) -> None:
self._path = path
@property
def pyx(self) -> str:
return self.path + ".pyx"
@property
def c(self) -> str:
return self.path + ".c"
cython_modules = [
CythonModule(
name="tyme.base_forecasters.exponential_smoothing_cy",
path="src/cython/exponential_smoothing_cy",
),
CythonModule(
name="tyme.base_forecasters.robust_exponential_smoothing_cy",
path="src/cython/robust_exponential_smoothing_cy",
),
]
if sys.version_info[0] == 2:
raise Exception("Python 2.x is no longer supported")
if USE_CYTHON:
class sdist(_sdist):
def run(self):
# Make sure the compiled Cython files in the distribution are up-to-date
cythonize([module.pyx for module in cython_modules])
_sdist.run(self)
ext_modules = [
Extension(module.name, [module.pyx]) for module in cython_modules
]
cmdclass = dict(build_ext=build_ext, sdist=sdist)
else:
ext_modules = [
Extension(module.name, [module.c]) for module in cython_modules
]
cmdclass = {}
requirements = [
"Bottleneck",
"cycler",
"kiwisolver",
"numpy",
"pandas",
"Pillow",
"pyparsing",
"python-dateutil",
"pytz",
"six",
"scipy",
"Cython",
]
requirements_dev = ["pytest", "pytest-cov", "Cython", "pre-commit", "tox"]
setup(
name="tyme",
# version="0.1.0",
description="A timeseries forecasting package, specialised in forecasting grouped timeseries",
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/sam-bailey/tyme",
packages=find_packages(where="src"),
package_dir={"": "src"},
cmdclass=cmdclass,
ext_modules=ext_modules,
include_dirs=[numpy.get_include()],
long_description=open("README.md").read(),
install_requires=requirements,
extras_require={"dev": requirements_dev},
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Cython",
"Topic :: Scientific/Engineering :: Mathematics",
],
keywords="timeseries forecast forecasting time",
)
|
[
"setuptools.Extension",
"Cython.Build.cythonize",
"distutils.command.sdist.sdist.run",
"numpy.get_include",
"setuptools.find_packages"
] |
[((1910, 1946), 'setuptools.Extension', 'Extension', (['module.name', '[module.pyx]'], {}), '(module.name, [module.pyx])\n', (1919, 1946), False, 'from setuptools import setup, find_packages, Extension\n'), ((2070, 2104), 'setuptools.Extension', 'Extension', (['module.name', '[module.c]'], {}), '(module.name, [module.c])\n', (2079, 2104), False, 'from setuptools import setup, find_packages, Extension\n'), ((2689, 2715), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (2702, 2715), False, 'from setuptools import setup, find_packages, Extension\n'), ((1799, 1851), 'Cython.Build.cythonize', 'cythonize', (['[module.pyx for module in cython_modules]'], {}), '([module.pyx for module in cython_modules])\n', (1808, 1851), False, 'from Cython.Build import cythonize\n'), ((1864, 1880), 'distutils.command.sdist.sdist.run', '_sdist.run', (['self'], {}), '(self)\n', (1874, 1880), True, 'from distutils.command.sdist import sdist as _sdist\n'), ((2816, 2835), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2833, 2835), False, 'import numpy\n')]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.5
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_checklist_scenariobased_step04 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step04&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-4-copmarg).
# +
import numpy as np
import pandas as pd
from scipy.stats import t as tstu
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from arpym.statistics import quantile_sp, simulate_markov_chain_multiv, \
simulate_t, project_trans_matrix
from arpym.tools import histogram_sp, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-parameters)
# t_now is 31-Aug-2012. Set t_hor>t_now
t_hor = np.datetime64('2012-10-26') # the future investment horizon
j_ = 5000 # number of scenarios
d_plot = 97 # projected risk driver to plot
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step00): Load data
# +
path = '../../../databases/temporary-databases/'
# Risk drivers identification
# realizations of risk drivers up to and including time t_now
db_riskdrivers_series = pd.read_csv(path + 'db_riskdrivers_series.csv',
index_col=0, parse_dates=True)
x = db_riskdrivers_series.values
risk_drivers_names = db_riskdrivers_series.columns
# additional information
db_riskdrivers_tools = pd.read_csv(path + 'db_riskdrivers_tools.csv')
d_ = int(db_riskdrivers_tools.d_.dropna())
d_credit = int(db_riskdrivers_tools.d_credit.dropna())
n_stocks = int(db_riskdrivers_tools.n_stocks.dropna())
d_implvol = int(db_riskdrivers_tools.d_implvol.dropna())
n_bonds = int(db_riskdrivers_tools.n_bonds.dropna())
i_bonds = n_bonds * 4 # 4 NS parameters x n_bonds
c_ = int(db_riskdrivers_tools.c_.dropna())
ratings_tnow = np.array(db_riskdrivers_tools.ratings_tnow.dropna())
t_now = np.datetime64(db_riskdrivers_tools.t_now[0], 'D')
# Quest for invariance
# values of invariants
db_invariants_series = pd.read_csv(path + 'db_invariants_series.csv',
index_col=0, parse_dates=True)
epsi = db_invariants_series.values
t_, i_ = np.shape(epsi)
# next step models
db_invariants_nextstep = pd.read_csv(path + 'db_invariants_nextstep.csv')
# parameters for next step models
db_invariants_param = pd.read_csv(path + 'db_invariants_param.csv', index_col=0)
# parameters for GARCH(1,1) next step models
db_garch_sig2 = pd.read_csv(path + 'db_garch_sig2.csv', index_col=0,
parse_dates=True)
# estimated annual credit transition matrix
p_credit = pd.read_csv(path +
'db_invariants_p_credit.csv').values.reshape(c_ + 1, c_ + 1)
# Estimation
# parameters for invariants modeled using Student t distribution
db_estimation_parametric = pd.read_csv(path + 'db_estimation_parametric.csv',
index_col=0)
# estimated probabilities for nonparametric distributions
db_estimation_nonparametric = pd.read_csv(path + 'db_estimation_nonparametric.csv',
index_col=False)
p_marginal = db_estimation_nonparametric.values
# parameters for estimated Student t copula
db_estimation_copula = pd.read_csv(path + 'db_estimation_copula.csv')
nu_copula = int(db_estimation_copula['nu'].iloc[0])
rho2_copula = np.array(db_estimation_copula['rho2']).reshape(i_, i_)
# parameters for the credit copula
db_estimation_credit_copula = pd.read_csv(path + 'db_estimation_credit_copula.csv')
rho2_credit = db_estimation_credit_copula.rho2_credit.values.reshape(2, 2)
nu_credit = db_estimation_credit_copula.nu_credit[0]
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step01): Determine number of projection steps and scenario probabilities
# number of monitoring times
m_ = np.busday_count(t_now, t_hor)
# projection scenario probabilities
p = np.ones(j_) / j_
# invariants modeled parametrically
ind_parametric = np.arange(n_stocks + 1 + d_implvol,
n_stocks + 1 + d_implvol + i_bonds)
# invariants modeled nonparametrically
ind_nonparametric = list(set(range(i_)) - set(ind_parametric))
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step02): Projection of invariants
# +
epsi_proj = np.zeros((j_, m_, i_))
for m in range(m_):
# copula scenarios
# simulate standardized invariants scenarios for copula
epsi_tilde_proj = simulate_t(np.zeros(i_), rho2_copula, nu_copula, j_)
# generate invariants scenarios
# invariants modeled nonparametrically
for i in ind_nonparametric:
# project t-copula standardized invariants scenarios
u_proj = tstu.cdf(epsi_tilde_proj[:, i], nu_copula)
epsi_proj[:, m, i] = quantile_sp(u_proj, epsi[:, i], p_marginal[:, i])
# invariants modeled parametrically (estimated as Student t distributed)
for i in ind_parametric:
# project t-copula standardized invariants scenarios
u_proj = tstu.cdf(epsi_tilde_proj[:, i], nu_copula)
mu_marg = db_estimation_parametric.loc['mu', str(i)]
sig2_marg = db_estimation_parametric.loc['sig2', str(i)]
nu_marg = db_estimation_parametric.loc['nu', str(i)]
epsi_proj[:, m, i] = mu_marg + np.sqrt(sig2_marg) * tstu.ppf(u_proj, nu_marg)
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step03): Projection of risk drivers
# +
x_proj = np.zeros((j_, m_ + 1, d_))
dx_proj = np.zeros((j_, m_ + 1, d_))
sig2_garch = np.zeros((j_, m_ + 1, d_))
a_garch = db_invariants_param.loc['a'].values
b_garch = db_invariants_param.loc['b'].values
c_garch = db_invariants_param.loc['c'].values
mu_garch = db_invariants_param.loc['mu'].values
# risk drivers at time t_now are the starting values for all scenarios
x_proj[:, 0, :] = db_riskdrivers_series.iloc[-1, :]
# initialize parameters for GARCH(1,1) projection
d_garch = [d for d in range(d_)
if db_invariants_nextstep.iloc[0, d] == 'GARCH(1,1)']
for d in d_garch:
sig2_garch[:, 0, d] = db_garch_sig2.iloc[-1, d]
dx_proj[:, 0, d] = x[-1, d] - x[-2, d]
# project daily scenarios
for m in range(1, m_ + 1):
for d in range(d_):
# risk drivers modeled as random walk
if db_invariants_nextstep.iloc[0, d] == 'Random walk':
x_proj[:, m, d] = x_proj[:, m - 1, d] + epsi_proj[:, m - 1, d]
# risk drivers modeled as GARCH(1,1)
elif db_invariants_nextstep.iloc[0, d] == 'GARCH(1,1)':
sig2_garch[:, m, d] = c_garch[d] + \
b_garch[d] * sig2_garch[:, m - 1, d] + \
a_garch[d] * (dx_proj[:, m - 1, d] - mu_garch[d]) ** 2
dx_proj[:, m, d] = mu_garch[d] + \
np.sqrt(sig2_garch[:, m, d]) * epsi_proj[:, m - 1, d]
x_proj[:, m, d] = x_proj[:, m - 1, d] + dx_proj[:, m, d]
# risk drivers modeled as AR(1)
elif db_invariants_nextstep.iloc[0, d] == 'AR(1)':
b_ar1 = db_invariants_param.loc['b'][d]
x_proj[:, m, d] = b_ar1 * x_proj[:, m - 1, d] + epsi_proj[:, m - 1, d]
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step04): Projection of credit ratings
# +
# compute the daily credit transition matrix
p_credit_daily = project_trans_matrix(p_credit, 1 / 252)
# project ratings
ratings_proj = simulate_markov_chain_multiv(ratings_tnow, p_credit_daily,
m_, rho2=rho2_credit,
nu=nu_credit, j_=j_)
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step05): Save databases
# +
# delete big files
del dx_proj, sig2_garch
# projected risk drivers
out = pd.DataFrame({risk_drivers_names[d]:
x_proj[:, :, d].reshape((j_ * (m_ + 1),))
for d in range(d_)})
out = out[list(risk_drivers_names[:d_].values)]
out.to_csv(path + 'db_projection_riskdrivers.csv', index=None)
del out
# projected credit ratings
out = pd.DataFrame({'GE': ratings_proj[:, :, 0].reshape((j_ * (m_ + 1),)),
'JPM': ratings_proj[:, :, 1].reshape((j_ * (m_ + 1),))})
out.to_csv(path + 'db_projection_ratings.csv', index=None)
del out
# number of scenarios and future investment horizon
out = pd.DataFrame({'j_': pd.Series(j_),
't_hor': pd.Series(t_hor)})
out.to_csv(path + 'db_projection_tools.csv', index=None)
del out
# projected scenario probabilities
out = pd.DataFrame({'p': pd.Series(p)})
out.to_csv(path + 'db_scenario_probs.csv', index=None)
del out
# -
# ## Plots
# +
plt.style.use('arpm')
# number of paths to plot
num_plot = min(j_, 20)
# market risk driver path
fig1 = plt.figure(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)
# plot historical series
f1 = plt.plot(np.arange(t_ + 1), db_riskdrivers_series.iloc[:, d_plot - 1], lw=1)
# plot projected series
for j in range(num_plot):
f1 = plt.plot(np.arange(t_ + 1, t_ + 1 + m_ + 1), x_proj[j, :, d_plot - 1], lw=1)
f, xp = histogram_sp(x_proj[:, -1, d_plot - 1], k_=10 * np.log(j_))
f1 = plt.barh(xp, f / 10, height=xp[1] - xp[0], left=t_ + 1 + m_,
facecolor=[.3, .3, .3], edgecolor='k')
plt.title('Projected path: ' + risk_drivers_names[d_plot - 1],
fontweight='bold', fontsize=20)
plt.xlabel('t (days)', fontsize=17)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
add_logo(fig1, set_fig_size=False)
fig1.tight_layout()
# plot projected ratings
# select paths with rating changes
ind_j_plot_GE = np.zeros(1)
ind_j_plot_GE[0] = 0
k = 0
while k < num_plot:
k = k + 1
for j in range(j_):
if (j not in ind_j_plot_GE and
ratings_proj[j, -1, 0] != ratings_proj[k, -1, 0]):
ind_j_plot_GE = np.append(ind_j_plot_GE, j)
break
ind_j_plot_JPM = np.zeros(1)
ind_j_plot_JPM[0] = 0
k = 0
while k < num_plot:
k = k + 1
for j in range(j_):
if (j not in ind_j_plot_JPM and
ratings_proj[j, -1, 1] != ratings_proj[k, -1, 1]):
ind_j_plot_JPM = np.append(ind_j_plot_JPM, j)
break
fig2, ax = plt.subplots(2, 1, figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)
plt.sca(ax[0])
for j in ind_j_plot_GE:
f5 = plt.plot(np.arange(m_ + 1), ratings_proj[int(j), :, 0] + 1)
plt.title('Projected rating GE', fontweight='bold', fontsize=20)
plt.yticks(np.arange(10), fontsize=14)
ax[0].set_yticklabels(['', 'AAA', 'AA', 'A', 'BBB', 'BB', 'B', 'CCC', 'D', ''])
plt.gca().invert_yaxis()
plt.sca(ax[1])
for j in ind_j_plot_JPM:
plt.plot(np.arange(m_ + 1), ratings_proj[int(j), :, 1] + 1)
plt.title('Projected rating JPM', fontweight='bold', fontsize=20)
plt.yticks(np.arange(10), fontsize=14)
ax[1].set_yticklabels(['', 'AAA', 'AA', 'A', 'BBB', 'BB', 'B', 'CCC', 'D', ''])
plt.gca().invert_yaxis()
add_logo(fig2, set_fig_size=False)
fig2.tight_layout()
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"numpy.ones",
"numpy.shape",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.sqrt",
"scipy.stats.t.cdf",
"arpym.statistics.quantile_sp",
"matplotlib.pyplot.yticks",
"arpym.tools.add_logo",
"numpy.append",
"arpym.statistics.project_trans_matrix",
"scipy.stats.t.ppf",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.barh",
"pandas.Series",
"numpy.log",
"numpy.datetime64",
"numpy.zeros",
"pandas.plotting.register_matplotlib_converters",
"arpym.statistics.simulate_markov_chain_multiv",
"numpy.array",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.xlabel",
"numpy.busday_count"
] |
[((763, 795), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (793, 795), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((1126, 1153), 'numpy.datetime64', 'np.datetime64', (['"""2012-10-26"""'], {}), "('2012-10-26')\n", (1139, 1153), True, 'import numpy as np\n'), ((1565, 1643), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_riskdrivers_series.csv')"], {'index_col': '(0)', 'parse_dates': '(True)'}), "(path + 'db_riskdrivers_series.csv', index_col=0, parse_dates=True)\n", (1576, 1643), True, 'import pandas as pd\n'), ((1813, 1859), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_riskdrivers_tools.csv')"], {}), "(path + 'db_riskdrivers_tools.csv')\n", (1824, 1859), True, 'import pandas as pd\n'), ((2293, 2342), 'numpy.datetime64', 'np.datetime64', (['db_riskdrivers_tools.t_now[0]', '"""D"""'], {}), "(db_riskdrivers_tools.t_now[0], 'D')\n", (2306, 2342), True, 'import numpy as np\n'), ((2413, 2490), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_invariants_series.csv')"], {'index_col': '(0)', 'parse_dates': '(True)'}), "(path + 'db_invariants_series.csv', index_col=0, parse_dates=True)\n", (2424, 2490), True, 'import pandas as pd\n'), ((2570, 2584), 'numpy.shape', 'np.shape', (['epsi'], {}), '(epsi)\n', (2578, 2584), True, 'import numpy as np\n'), ((2630, 2678), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_invariants_nextstep.csv')"], {}), "(path + 'db_invariants_nextstep.csv')\n", (2641, 2678), True, 'import pandas as pd\n'), ((2736, 2794), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_invariants_param.csv')"], {'index_col': '(0)'}), "(path + 'db_invariants_param.csv', index_col=0)\n", (2747, 2794), True, 'import pandas as pd\n'), ((2857, 2927), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_garch_sig2.csv')"], {'index_col': '(0)', 'parse_dates': '(True)'}), "(path + 'db_garch_sig2.csv', index_col=0, parse_dates=True)\n", (2868, 2927), True, 'import pandas as pd\n'), ((3221, 3284), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_estimation_parametric.csv')"], {'index_col': '(0)'}), "(path + 'db_estimation_parametric.csv', index_col=0)\n", (3232, 3284), True, 'import pandas as pd\n'), ((3413, 3483), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_estimation_nonparametric.csv')"], {'index_col': '(False)'}), "(path + 'db_estimation_nonparametric.csv', index_col=False)\n", (3424, 3483), True, 'import pandas as pd\n'), ((3642, 3688), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_estimation_copula.csv')"], {}), "(path + 'db_estimation_copula.csv')\n", (3653, 3688), True, 'import pandas as pd\n'), ((3876, 3929), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_estimation_credit_copula.csv')"], {}), "(path + 'db_estimation_credit_copula.csv')\n", (3887, 3929), True, 'import pandas as pd\n'), ((4280, 4309), 'numpy.busday_count', 'np.busday_count', (['t_now', 't_hor'], {}), '(t_now, t_hor)\n', (4295, 4309), True, 'import numpy as np\n'), ((4420, 4491), 'numpy.arange', 'np.arange', (['(n_stocks + 1 + d_implvol)', '(n_stocks + 1 + d_implvol + i_bonds)'], {}), '(n_stocks + 1 + d_implvol, n_stocks + 1 + d_implvol + i_bonds)\n', (4429, 4491), True, 'import numpy as np\n'), ((4782, 4804), 'numpy.zeros', 'np.zeros', (['(j_, m_, i_)'], {}), '((j_, m_, i_))\n', (4790, 4804), True, 'import numpy as np\n'), ((5960, 5986), 'numpy.zeros', 'np.zeros', (['(j_, m_ + 1, d_)'], {}), '((j_, m_ + 1, d_))\n', (5968, 5986), True, 'import numpy as np\n'), ((5997, 6023), 'numpy.zeros', 'np.zeros', (['(j_, m_ + 1, d_)'], {}), '((j_, m_ + 1, d_))\n', (6005, 6023), True, 'import numpy as np\n'), ((6037, 6063), 'numpy.zeros', 'np.zeros', (['(j_, m_ + 1, d_)'], {}), '((j_, m_ + 1, d_))\n', (6045, 6063), True, 'import numpy as np\n'), ((7876, 7915), 'arpym.statistics.project_trans_matrix', 'project_trans_matrix', (['p_credit', '(1 / 252)'], {}), '(p_credit, 1 / 252)\n', (7896, 7915), False, 'from arpym.statistics import quantile_sp, simulate_markov_chain_multiv, simulate_t, project_trans_matrix\n'), ((7950, 8056), 'arpym.statistics.simulate_markov_chain_multiv', 'simulate_markov_chain_multiv', (['ratings_tnow', 'p_credit_daily', 'm_'], {'rho2': 'rho2_credit', 'nu': 'nu_credit', 'j_': 'j_'}), '(ratings_tnow, p_credit_daily, m_, rho2=\n rho2_credit, nu=nu_credit, j_=j_)\n', (7978, 8056), False, 'from arpym.statistics import quantile_sp, simulate_markov_chain_multiv, simulate_t, project_trans_matrix\n'), ((9235, 9256), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""arpm"""'], {}), "('arpm')\n", (9248, 9256), True, 'import matplotlib.pyplot as plt\n'), ((9341, 9400), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1280.0 / 72.0, 720.0 / 72.0)', 'dpi': '(72.0)'}), '(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)\n', (9351, 9400), True, 'import matplotlib.pyplot as plt\n'), ((9719, 9825), 'matplotlib.pyplot.barh', 'plt.barh', (['xp', '(f / 10)'], {'height': '(xp[1] - xp[0])', 'left': '(t_ + 1 + m_)', 'facecolor': '[0.3, 0.3, 0.3]', 'edgecolor': '"""k"""'}), "(xp, f / 10, height=xp[1] - xp[0], left=t_ + 1 + m_, facecolor=[0.3,\n 0.3, 0.3], edgecolor='k')\n", (9727, 9825), True, 'import matplotlib.pyplot as plt\n'), ((9833, 9932), 'matplotlib.pyplot.title', 'plt.title', (["('Projected path: ' + risk_drivers_names[d_plot - 1])"], {'fontweight': '"""bold"""', 'fontsize': '(20)'}), "('Projected path: ' + risk_drivers_names[d_plot - 1], fontweight=\n 'bold', fontsize=20)\n", (9842, 9932), True, 'import matplotlib.pyplot as plt\n'), ((9938, 9973), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t (days)"""'], {'fontsize': '(17)'}), "('t (days)', fontsize=17)\n", (9948, 9973), True, 'import matplotlib.pyplot as plt\n'), ((9974, 9997), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (9984, 9997), True, 'import matplotlib.pyplot as plt\n'), ((9998, 10021), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (10008, 10021), True, 'import matplotlib.pyplot as plt\n'), ((10022, 10056), 'arpym.tools.add_logo', 'add_logo', (['fig1'], {'set_fig_size': '(False)'}), '(fig1, set_fig_size=False)\n', (10030, 10056), False, 'from arpym.tools import histogram_sp, add_logo\n'), ((10154, 10165), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (10162, 10165), True, 'import numpy as np\n'), ((10449, 10460), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (10457, 10460), True, 'import numpy as np\n'), ((10742, 10809), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(1280.0 / 72.0, 720.0 / 72.0)', 'dpi': '(72.0)'}), '(2, 1, figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)\n', (10754, 10809), True, 'import matplotlib.pyplot as plt\n'), ((10810, 10824), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[0]'], {}), '(ax[0])\n', (10817, 10824), True, 'import matplotlib.pyplot as plt\n'), ((11131, 11145), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[1]'], {}), '(ax[1])\n', (11138, 11145), True, 'import matplotlib.pyplot as plt\n'), ((11450, 11484), 'arpym.tools.add_logo', 'add_logo', (['fig2'], {'set_fig_size': '(False)'}), '(fig2, set_fig_size=False)\n', (11458, 11484), False, 'from arpym.tools import histogram_sp, add_logo\n'), ((4350, 4361), 'numpy.ones', 'np.ones', (['j_'], {}), '(j_)\n', (4357, 4361), True, 'import numpy as np\n'), ((9441, 9458), 'numpy.arange', 'np.arange', (['(t_ + 1)'], {}), '(t_ + 1)\n', (9450, 9458), True, 'import numpy as np\n'), ((10922, 10986), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected rating GE"""'], {'fontweight': '"""bold"""', 'fontsize': '(20)'}), "('Projected rating GE', fontweight='bold', fontsize=20)\n", (10931, 10986), True, 'import matplotlib.pyplot as plt\n'), ((10998, 11011), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (11007, 11011), True, 'import numpy as np\n'), ((11240, 11305), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected rating JPM"""'], {'fontweight': '"""bold"""', 'fontsize': '(20)'}), "('Projected rating JPM', fontweight='bold', fontsize=20)\n", (11249, 11305), True, 'import matplotlib.pyplot as plt\n'), ((11317, 11330), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (11326, 11330), True, 'import numpy as np\n'), ((3755, 3793), 'numpy.array', 'np.array', (["db_estimation_copula['rho2']"], {}), "(db_estimation_copula['rho2'])\n", (3763, 3793), True, 'import numpy as np\n'), ((4942, 4954), 'numpy.zeros', 'np.zeros', (['i_'], {}), '(i_)\n', (4950, 4954), True, 'import numpy as np\n'), ((5174, 5216), 'scipy.stats.t.cdf', 'tstu.cdf', (['epsi_tilde_proj[:, i]', 'nu_copula'], {}), '(epsi_tilde_proj[:, i], nu_copula)\n', (5182, 5216), True, 'from scipy.stats import t as tstu\n'), ((5246, 5295), 'arpym.statistics.quantile_sp', 'quantile_sp', (['u_proj', 'epsi[:, i]', 'p_marginal[:, i]'], {}), '(u_proj, epsi[:, i], p_marginal[:, i])\n', (5257, 5295), False, 'from arpym.statistics import quantile_sp, simulate_markov_chain_multiv, simulate_t, project_trans_matrix\n'), ((5480, 5522), 'scipy.stats.t.cdf', 'tstu.cdf', (['epsi_tilde_proj[:, i]', 'nu_copula'], {}), '(epsi_tilde_proj[:, i], nu_copula)\n', (5488, 5522), True, 'from scipy.stats import t as tstu\n'), ((8947, 8960), 'pandas.Series', 'pd.Series', (['j_'], {}), '(j_)\n', (8956, 8960), True, 'import pandas as pd\n'), ((8991, 9007), 'pandas.Series', 'pd.Series', (['t_hor'], {}), '(t_hor)\n', (9000, 9007), True, 'import pandas as pd\n'), ((9136, 9148), 'pandas.Series', 'pd.Series', (['p'], {}), '(p)\n', (9145, 9148), True, 'import pandas as pd\n'), ((9577, 9611), 'numpy.arange', 'np.arange', (['(t_ + 1)', '(t_ + 1 + m_ + 1)'], {}), '(t_ + 1, t_ + 1 + m_ + 1)\n', (9586, 9611), True, 'import numpy as np\n'), ((10867, 10884), 'numpy.arange', 'np.arange', (['(m_ + 1)'], {}), '(m_ + 1)\n', (10876, 10884), True, 'import numpy as np\n'), ((11106, 11115), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11113, 11115), True, 'import matplotlib.pyplot as plt\n'), ((11185, 11202), 'numpy.arange', 'np.arange', (['(m_ + 1)'], {}), '(m_ + 1)\n', (11194, 11202), True, 'import numpy as np\n'), ((11425, 11434), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11432, 11434), True, 'import matplotlib.pyplot as plt\n'), ((3012, 3060), 'pandas.read_csv', 'pd.read_csv', (["(path + 'db_invariants_p_credit.csv')"], {}), "(path + 'db_invariants_p_credit.csv')\n", (3023, 3060), True, 'import pandas as pd\n'), ((9702, 9712), 'numpy.log', 'np.log', (['j_'], {}), '(j_)\n', (9708, 9712), True, 'import numpy as np\n'), ((10385, 10412), 'numpy.append', 'np.append', (['ind_j_plot_GE', 'j'], {}), '(ind_j_plot_GE, j)\n', (10394, 10412), True, 'import numpy as np\n'), ((10683, 10711), 'numpy.append', 'np.append', (['ind_j_plot_JPM', 'j'], {}), '(ind_j_plot_JPM, j)\n', (10692, 10711), True, 'import numpy as np\n'), ((5749, 5767), 'numpy.sqrt', 'np.sqrt', (['sig2_marg'], {}), '(sig2_marg)\n', (5756, 5767), True, 'import numpy as np\n'), ((5770, 5795), 'scipy.stats.t.ppf', 'tstu.ppf', (['u_proj', 'nu_marg'], {}), '(u_proj, nu_marg)\n', (5778, 5795), True, 'from scipy.stats import t as tstu\n'), ((7299, 7327), 'numpy.sqrt', 'np.sqrt', (['sig2_garch[:, m, d]'], {}), '(sig2_garch[:, m, d])\n', (7306, 7327), True, 'import numpy as np\n')]
|
import os
import sys
sys.path.append(os.getcwd())
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
from tensorflow import keras
from common.inputs.voc2010 import voc_parts
from common import layers, losses, utils, train, attacks
from common.ops.routing import activated_entropy, coupling_entropy
import numpy as np
import config
WEIGHT_DECAY = 1e-4
kernel_regularizer = keras.regularizers.l2(WEIGHT_DECAY)
kernel_initializer = keras.initializers.he_normal()
BASE_NAME = 'ex4_3'
def build_model_name(params):
model_name = BASE_NAME
model_name += '_{}'.format(params.model.backbone)
model_name += '_fine{}'.format(params.model.fine)
model_name += '_part{}'.format(params.caps.parts)
model_name += '_{}'.format(params.routing.type)
if params.routing.type == 'DR' or params.routing.type == 'EM':
model_name += '_iter{}'.format(params.routing.iter_num)
model_name += '_temper{}'.format(params.routing.temper)
model_name += '_atoms{}'.format(params.caps.atoms)
model_name += '_trial{}'.format(str(params.training.idx))
model_name += '_bs{}'.format(str(params.training.batch_size))
if params.dataset.flip:
model_name += '_flip'
if params.dataset.crop:
model_name += '_crop'
return model_name
def get_loss_opt(type):
optimizer = keras.optimizers.Adam(0.0001)
if type == 'DR' or type == 'EM':
loss = losses.MarginLoss(sparse=False, upper_margin=0.9, bottom_margin=0.1, down_weight=0.5)
else:
loss = keras.losses.CategoricalCrossentropy(from_logits=True)
return loss, optimizer
def build_model(num_out, params):
model_name = build_model_name(params)
inputs, probs, tensor_log = build(num_out,
params.model.backbone,
params.model.fine,
params.routing.type,
params.routing.iter_num,
params.routing.temper,
params.caps.parts,
params.caps.atoms
)
model = keras.Model(inputs=inputs, outputs=probs, name=model_name)
log_model = keras.Model(inputs=inputs, outputs=tensor_log.get_outputs(), name=model_name + '_log')
tensor_log.set_model(log_model)
loss, optimizer = get_loss_opt(params.routing.type)
model.compile(optimizer=optimizer,
loss=loss,
metrics=[])
model.summary()
model.callbacks = []
return model, tensor_log
def build(num_out, backbone, fine, routing, iter_num, temper, parts, atoms):
log = utils.TensorLog()
if backbone == 'VGG16':
in_shape = (224, 224, 3)
base = keras.applications.VGG16(include_top=False, input_shape=in_shape)
elif backbone == 'VGG19':
in_shape = (224, 224, 3)
base = keras.applications.VGG19(include_top=False, input_shape=in_shape)
elif backbone == 'InceptionV3':
in_shape = (299, 299, 3)
base = keras.applications.InceptionV3(include_top=False, input_shape=in_shape)
elif backbone == 'ResNet50':
in_shape = (224, 224, 3)
base = keras.applications.ResNet50(include_top=False, input_shape=in_shape)
else:
in_shape = (299, 299, 3)
base = keras.applications.InceptionV3(include_top=False, input_shape=in_shape)
layer_num = len(base.layers)
for i, layer in enumerate(base.layers):
if i < layer_num-fine:
layer.trainable = False
else:
for w in layer.weights:
if 'kernel' in w.name:
r = kernel_regularizer(w)
layer.add_loss(lambda: r)
inputs = keras.Input(in_shape)
features = base(inputs)
interpretable = keras.layers.Conv2D(filters=parts,
kernel_size=1,
activation='relu',
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(features)
shape = interpretable.get_shape().as_list()
if routing == 'avg':
pool = keras.layers.GlobalAveragePooling2D()(interpretable)
output = keras.layers.Dense(num_out)(pool)
elif routing == 'max':
pool = keras.layers.GlobalMaxPooling2D()(interpretable)
output = keras.layers.Dense(num_out)(pool)
elif routing == 'DR':
child_pose, child_prob = layers.CapsuleGroups(height=shape[1], width=shape[2], channel=shape[3],
atoms=16,
method='channel',
activation='squash')(interpretable)
log.add_hist('child_activation', child_prob)
transformed_caps = layers.CapsuleTransformDense(num_out=num_out,
out_atom=atoms,
share_weights=False,
initializer=keras.initializers.glorot_normal(),
regularizer=kernel_regularizer)(child_pose)
parent_poses, parent_probs, cs = layers.DynamicRouting(num_routing=iter_num,
softmax_in=False,
temper=temper,
activation='squash',
pooling=False,
log=log)((transformed_caps, child_prob))
log.add_hist('parent_activation', parent_probs[-1])
output = parent_probs[-1]
return inputs, output, log
def main():
args, params = config.parse_args()
if params.task == 'train':
params.dataset.name = 'voc2010'
if params.model.backbone == 'InceptionV3':
data_shape = (299, 299, 3)
else:
data_shape = (224, 224, 3)
train_set, test_set, info = voc_parts.build_dataset3(batch_size=params.training.batch_size,
shape=data_shape,
arch=params.model.backbone)
model, tensor_log = build_model(num_out=info.features['label'].num_classes,
params=params)
trainer = train.Trainer(model, params, info, tensor_log, finetune=True, inference_label=False, max_save=1)
trainer.metrics['accuracy'] = tf.keras.metrics.CategoricalAccuracy(name='accuracy')
if args.train:
trainer.fit(train_set, test_set)
else:
trainer.evaluate(test_set)
elif params.task == 'attack':
do_adv(os.getcwd())
elif params.task == 'score':
compute_entropies(os.getcwd())
def load_ckpt(model, model_dir):
model.compile(optimizer=keras.optimizers.Adam(0.0001),
loss=keras.losses.CategoricalCrossentropy(from_logits=False),
metrics=[])
ckpt = tf.train.Checkpoint(optimizer=model.optimizer, net=model)
manager = tf.train.CheckpointManager(ckpt, model_dir, max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
def get_model_dir(backbone, log='log', routing='avg', dataset='voc2010',
iter_num=None, temper=None, atoms=None,
finetune=0, parts=128, bs=32, idx=1):
model_dir = '{}/{}/{}_{}_fine{}_part{}_{}'.format(log, dataset, BASE_NAME, backbone, finetune, parts, routing)
if routing == 'DR' or routing == 'EM':
model_dir += '_iter{}'.format(iter_num)
model_dir += '_temper{}'.format(temper)
model_dir += '_atoms{}'.format(atoms)
model_dir += '_trial{}_bs{}_flip_crop'.format(idx, bs)
if not os.path.exists(model_dir):
raise Exception('model not exist:{}'.format(model_dir))
return model_dir
def load_model(backbone, iter_num, temper, atoms=16,
log='log', routing='DR',
finetune=0, parts=128, bs=128, idx=1):
data_shape = utils.get_shape(backbone)
model_dir = get_model_dir(backbone=backbone,
log=log,
routing=routing,
finetune=finetune,
parts=parts,
bs=bs,
iter_num=iter_num,
temper=temper,
atoms=atoms,
idx=idx)
inputs, probs, log = build(6, backbone, finetune, routing, iter_num, temper, parts, atoms)
model = keras.Model(inputs=inputs, outputs=probs, name='x')
load_ckpt(model, model_dir)
return model, data_shape, model_dir
def evaluate_attack(epsilons, root='', log='log', backbone='InceptionV3', metric='acc', all_target=False,
method='FGSM', steps=10,
finetune=0, routing='DR', black_box=False, iter_num=10, temper=1.0, atoms=16, parts=128, bs=64, idx=1):
model, data_shape, model_dir = load_model(log=root + log,
backbone=backbone,
routing=routing,
iter_num=iter_num,
temper=temper,
atoms=atoms,
parts=parts,
bs=bs,
finetune=finetune,
idx=idx)
if black_box:
print('load black box source model')
model_src, data_shape, model_dir = load_model(log=root + log,
backbone=backbone,
routing=routing,
iter_num=iter_num,
temper=temper,
atoms=atoms,
parts=parts,
bs=bs,
finetune=finetune,
idx=2)
else:
model_src = model
loss, _ = get_loss_opt(routing)
_, test_set, info = voc_parts.build_dataset3(root + 'data', batch_size=32, shape=data_shape)
acc_adv = keras.metrics.CategoricalAccuracy(name='acc_adv')
if metric == 'acc':
results = attacks.evaluate_model_after_attacks(epsilons, acc_adv, test_set, model, loss, method=method, steps=steps, label_sparse=False, cost=True, model_src=model_src)
elif metric == 'success':
if all_target:
categories = [i for i in range(6)]
results = attacks.evaluate_attacks_success_rate_all_target(epsilons, test_set, model, loss, categories, method=method, steps=steps, label_sparse=False, cost=True, model_src=model_src)
else:
results = attacks.evaluate_attacks_success_rate(epsilons, test_set, model, loss, method=method, steps=steps, label_sparse=False, cost=True, model_src=model_src)
return results
def do_adv(root):
epsilons = [0.1, 0.2, 0.3]
tempers = [0.0, 20.0, 40.0, 60.0, 80.0]
parts_list = [128]
all_target = False
black_box = False
methods = ['PGD', 'BIM', 'FGSM']
backbones = ['InceptionV3']
routing = 'DR'
for backbone in backbones:
print('backbone:', backbone)
for parts in parts_list:
print('parts:', parts)
for method in methods:
print('method:', method)
if routing == 'avg' or routing == 'max':
tempers = [-1]
for temper in tempers:
print('temper:', temper)
if all_target:
epsilons = [0.1]
evaluate_attack(epsilons,
root=root,
backbone=backbone,
metric='success',
all_target=all_target,
method=method,
steps=5,
routing=routing,
black_box=black_box,
parts=parts,
iter_num=2,
temper=temper,
atoms=16,
bs=64,
idx=1)
def compute_entropy(root,
backbone='InceptionV3',
iter_num=2,
activated=True,
temper=10.0,
atoms=16,
routing='DR',
finetune=0,
parts=128,
bs=32):
model, data_shape, model_dir = load_model(log=root + 'log',
backbone=backbone,
iter_num=iter_num,
temper=temper,
atoms=atoms,
routing=routing,
finetune=finetune,
parts=parts,
bs=bs)
train_set, test_set, info = voc_parts.build_dataset3(root + 'data', batch_size=32, shape=data_shape)
test_model = keras.Model(model.layers[0].input, [model.layers[3].output, model.layers[5].output])
results = []
for images, labels in test_set:
(child_poses, child_probs), (parent_poses, parent_probs, cs) = test_model(images)
c = cs[-1]
if activated:
entropy = activated_entropy(c, child_probs)
else:
entropy = coupling_entropy(c)
results.append(entropy)
results = np.concatenate(results, 0)
mean = np.mean(results)
std = np.std(results)
print('{:.4}/{:.3}'.format(mean, std))
def compute_entropies(root):
tempers = [0.0, 20.0, 40.0, 60.0, 80.0]
for temper in tempers:
print('temper:{}'.format(temper))
compute_entropy(root,
backbone='InceptionV3',
iter_num=2,
temper=temper,
atoms=16,
routing='DR',
finetune=0,
parts=128,
bs=64)
if __name__ == "__main__":
main()
|
[
"tensorflow.keras.applications.VGG19",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.metrics.CategoricalAccuracy",
"common.attacks.evaluate_attacks_success_rate",
"tensorflow.keras.losses.CategoricalCrossentropy",
"common.train.Trainer",
"numpy.mean",
"config.parse_args",
"common.utils.get_shape",
"tensorflow.keras.applications.VGG16",
"tensorflow.keras.regularizers.l2",
"tensorflow.get_logger",
"tensorflow.train.Checkpoint",
"numpy.std",
"tensorflow.keras.Input",
"os.path.exists",
"common.layers.CapsuleGroups",
"tensorflow.keras.applications.InceptionV3",
"common.attacks.evaluate_attacks_success_rate_all_target",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.applications.ResNet50",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"common.utils.TensorLog",
"common.inputs.voc2010.voc_parts.build_dataset3",
"tensorflow.keras.Model",
"common.losses.MarginLoss",
"common.ops.routing.activated_entropy",
"numpy.concatenate",
"tensorflow.keras.initializers.glorot_normal",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.layers.Conv2D",
"os.getcwd",
"common.ops.routing.coupling_entropy",
"common.layers.DynamicRouting",
"tensorflow.train.CheckpointManager",
"common.attacks.evaluate_model_after_attacks",
"tensorflow.keras.initializers.he_normal"
] |
[((384, 419), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['WEIGHT_DECAY'], {}), '(WEIGHT_DECAY)\n', (405, 419), False, 'from tensorflow import keras\n'), ((441, 471), 'tensorflow.keras.initializers.he_normal', 'keras.initializers.he_normal', ([], {}), '()\n', (469, 471), False, 'from tensorflow import keras\n'), ((38, 49), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (47, 49), False, 'import os\n'), ((1329, 1358), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (1350, 1358), False, 'from tensorflow import keras\n'), ((2195, 2253), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'probs', 'name': 'model_name'}), '(inputs=inputs, outputs=probs, name=model_name)\n', (2206, 2253), False, 'from tensorflow import keras\n'), ((2710, 2727), 'common.utils.TensorLog', 'utils.TensorLog', ([], {}), '()\n', (2725, 2727), False, 'from common import layers, losses, utils, train, attacks\n'), ((3789, 3810), 'tensorflow.keras.Input', 'keras.Input', (['in_shape'], {}), '(in_shape)\n', (3800, 3810), False, 'from tensorflow import keras\n'), ((6016, 6035), 'config.parse_args', 'config.parse_args', ([], {}), '()\n', (6033, 6035), False, 'import config\n'), ((7323, 7380), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'model.optimizer', 'net': 'model'}), '(optimizer=model.optimizer, net=model)\n', (7342, 7380), True, 'import tensorflow as tf\n'), ((7395, 7453), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'model_dir'], {'max_to_keep': '(3)'}), '(ckpt, model_dir, max_to_keep=3)\n', (7421, 7453), True, 'import tensorflow as tf\n'), ((8439, 8464), 'common.utils.get_shape', 'utils.get_shape', (['backbone'], {}), '(backbone)\n', (8454, 8464), False, 'from common import layers, losses, utils, train, attacks\n'), ((9012, 9063), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'probs', 'name': '"""x"""'}), "(inputs=inputs, outputs=probs, name='x')\n", (9023, 9063), False, 'from tensorflow import keras\n'), ((10865, 10937), 'common.inputs.voc2010.voc_parts.build_dataset3', 'voc_parts.build_dataset3', (["(root + 'data')"], {'batch_size': '(32)', 'shape': 'data_shape'}), "(root + 'data', batch_size=32, shape=data_shape)\n", (10889, 10937), False, 'from common.inputs.voc2010 import voc_parts\n'), ((10953, 11002), 'tensorflow.keras.metrics.CategoricalAccuracy', 'keras.metrics.CategoricalAccuracy', ([], {'name': '"""acc_adv"""'}), "(name='acc_adv')\n", (10986, 11002), False, 'from tensorflow import keras\n'), ((14082, 14154), 'common.inputs.voc2010.voc_parts.build_dataset3', 'voc_parts.build_dataset3', (["(root + 'data')"], {'batch_size': '(32)', 'shape': 'data_shape'}), "(root + 'data', batch_size=32, shape=data_shape)\n", (14106, 14154), False, 'from common.inputs.voc2010 import voc_parts\n'), ((14172, 14261), 'tensorflow.keras.Model', 'keras.Model', (['model.layers[0].input', '[model.layers[3].output, model.layers[5].output]'], {}), '(model.layers[0].input, [model.layers[3].output, model.layers[5]\n .output])\n', (14183, 14261), False, 'from tensorflow import keras\n'), ((14599, 14625), 'numpy.concatenate', 'np.concatenate', (['results', '(0)'], {}), '(results, 0)\n', (14613, 14625), True, 'import numpy as np\n'), ((14637, 14653), 'numpy.mean', 'np.mean', (['results'], {}), '(results)\n', (14644, 14653), True, 'import numpy as np\n'), ((14664, 14679), 'numpy.std', 'np.std', (['results'], {}), '(results)\n', (14670, 14679), True, 'import numpy as np\n'), ((76, 91), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (89, 91), True, 'import tensorflow as tf\n'), ((1411, 1500), 'common.losses.MarginLoss', 'losses.MarginLoss', ([], {'sparse': '(False)', 'upper_margin': '(0.9)', 'bottom_margin': '(0.1)', 'down_weight': '(0.5)'}), '(sparse=False, upper_margin=0.9, bottom_margin=0.1,\n down_weight=0.5)\n', (1428, 1500), False, 'from common import layers, losses, utils, train, attacks\n'), ((1522, 1576), 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (1558, 1576), False, 'from tensorflow import keras\n'), ((2804, 2869), 'tensorflow.keras.applications.VGG16', 'keras.applications.VGG16', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (2828, 2869), False, 'from tensorflow import keras\n'), ((3859, 4014), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': 'parts', 'kernel_size': '(1)', 'activation': '"""relu"""', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer'}), "(filters=parts, kernel_size=1, activation='relu',\n kernel_initializer=kernel_initializer, kernel_regularizer=\n kernel_regularizer)\n", (3878, 4014), False, 'from tensorflow import keras\n'), ((6286, 6400), 'common.inputs.voc2010.voc_parts.build_dataset3', 'voc_parts.build_dataset3', ([], {'batch_size': 'params.training.batch_size', 'shape': 'data_shape', 'arch': 'params.model.backbone'}), '(batch_size=params.training.batch_size, shape=\n data_shape, arch=params.model.backbone)\n', (6310, 6400), False, 'from common.inputs.voc2010 import voc_parts\n'), ((6664, 6764), 'common.train.Trainer', 'train.Trainer', (['model', 'params', 'info', 'tensor_log'], {'finetune': '(True)', 'inference_label': '(False)', 'max_save': '(1)'}), '(model, params, info, tensor_log, finetune=True,\n inference_label=False, max_save=1)\n', (6677, 6764), False, 'from common import layers, losses, utils, train, attacks\n'), ((6799, 6852), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {'name': '"""accuracy"""'}), "(name='accuracy')\n", (6835, 6852), True, 'import tensorflow as tf\n'), ((8161, 8186), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (8175, 8186), False, 'import os\n'), ((11045, 11211), 'common.attacks.evaluate_model_after_attacks', 'attacks.evaluate_model_after_attacks', (['epsilons', 'acc_adv', 'test_set', 'model', 'loss'], {'method': 'method', 'steps': 'steps', 'label_sparse': '(False)', 'cost': '(True)', 'model_src': 'model_src'}), '(epsilons, acc_adv, test_set, model,\n loss, method=method, steps=steps, label_sparse=False, cost=True,\n model_src=model_src)\n', (11081, 11211), False, 'from common import layers, losses, utils, train, attacks\n'), ((2948, 3013), 'tensorflow.keras.applications.VGG19', 'keras.applications.VGG19', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (2972, 3013), False, 'from tensorflow import keras\n'), ((4265, 4302), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (4300, 4302), False, 'from tensorflow import keras\n'), ((4335, 4362), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_out'], {}), '(num_out)\n', (4353, 4362), False, 'from tensorflow import keras\n'), ((7171, 7200), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (7192, 7200), False, 'from tensorflow import keras\n'), ((7225, 7280), 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (7261, 7280), False, 'from tensorflow import keras\n'), ((14463, 14496), 'common.ops.routing.activated_entropy', 'activated_entropy', (['c', 'child_probs'], {}), '(c, child_probs)\n', (14480, 14496), False, 'from common.ops.routing import activated_entropy, coupling_entropy\n'), ((14533, 14552), 'common.ops.routing.coupling_entropy', 'coupling_entropy', (['c'], {}), '(c)\n', (14549, 14552), False, 'from common.ops.routing import activated_entropy, coupling_entropy\n'), ((3098, 3169), 'tensorflow.keras.applications.InceptionV3', 'keras.applications.InceptionV3', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (3128, 3169), False, 'from tensorflow import keras\n'), ((4411, 4444), 'tensorflow.keras.layers.GlobalMaxPooling2D', 'keras.layers.GlobalMaxPooling2D', ([], {}), '()\n', (4442, 4444), False, 'from tensorflow import keras\n'), ((4477, 4504), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_out'], {}), '(num_out)\n', (4495, 4504), False, 'from tensorflow import keras\n'), ((7023, 7034), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7032, 7034), False, 'import os\n'), ((11326, 11508), 'common.attacks.evaluate_attacks_success_rate_all_target', 'attacks.evaluate_attacks_success_rate_all_target', (['epsilons', 'test_set', 'model', 'loss', 'categories'], {'method': 'method', 'steps': 'steps', 'label_sparse': '(False)', 'cost': '(True)', 'model_src': 'model_src'}), '(epsilons, test_set, model,\n loss, categories, method=method, steps=steps, label_sparse=False, cost=\n True, model_src=model_src)\n', (11374, 11508), False, 'from common import layers, losses, utils, train, attacks\n'), ((11536, 11695), 'common.attacks.evaluate_attacks_success_rate', 'attacks.evaluate_attacks_success_rate', (['epsilons', 'test_set', 'model', 'loss'], {'method': 'method', 'steps': 'steps', 'label_sparse': '(False)', 'cost': '(True)', 'model_src': 'model_src'}), '(epsilons, test_set, model, loss,\n method=method, steps=steps, label_sparse=False, cost=True, model_src=\n model_src)\n', (11573, 11695), False, 'from common import layers, losses, utils, train, attacks\n'), ((3251, 3319), 'tensorflow.keras.applications.ResNet50', 'keras.applications.ResNet50', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (3278, 3319), False, 'from tensorflow import keras\n'), ((3378, 3449), 'tensorflow.keras.applications.InceptionV3', 'keras.applications.InceptionV3', ([], {'include_top': '(False)', 'input_shape': 'in_shape'}), '(include_top=False, input_shape=in_shape)\n', (3408, 3449), False, 'from tensorflow import keras\n'), ((4570, 4694), 'common.layers.CapsuleGroups', 'layers.CapsuleGroups', ([], {'height': 'shape[1]', 'width': 'shape[2]', 'channel': 'shape[3]', 'atoms': '(16)', 'method': '"""channel"""', 'activation': '"""squash"""'}), "(height=shape[1], width=shape[2], channel=shape[3],\n atoms=16, method='channel', activation='squash')\n", (4590, 4694), False, 'from common import layers, losses, utils, train, attacks\n'), ((5388, 5513), 'common.layers.DynamicRouting', 'layers.DynamicRouting', ([], {'num_routing': 'iter_num', 'softmax_in': '(False)', 'temper': 'temper', 'activation': '"""squash"""', 'pooling': '(False)', 'log': 'log'}), "(num_routing=iter_num, softmax_in=False, temper=temper,\n activation='squash', pooling=False, log=log)\n", (5409, 5513), False, 'from common import layers, losses, utils, train, attacks\n'), ((7095, 7106), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7104, 7106), False, 'import os\n'), ((5211, 5245), 'tensorflow.keras.initializers.glorot_normal', 'keras.initializers.glorot_normal', ([], {}), '()\n', (5243, 5245), False, 'from tensorflow import keras\n')]
|
from teca import *
import numpy as np
import sys
def get_request_callback(rank, var_names):
def request(port, md_in, req_in):
sys.stderr.write('descriptive_stats::request MPI %d\n'%(rank))
req = teca_metadata(req_in)
req['arrays'] = var_names
return [req]
return request
def get_execute_callback(rank, var_names):
def execute(port, data_in, req):
sys.stderr.write('descriptive_stats::execute MPI %d\n'%(rank))
mesh = as_teca_cartesian_mesh(data_in[0])
table = teca_table.New()
table.declare_columns(['step','time'], ['ul','d'])
table << mesh.get_time_step() << mesh.get_time()
for var_name in var_names:
table.declare_columns(['min '+var_name, 'avg '+var_name, \
'max '+var_name, 'std '+var_name, 'low_q '+var_name, \
'med '+var_name, 'up_q '+var_name], ['d']*7)
var = mesh.get_point_arrays().get(var_name).as_array()
table << float(np.min(var)) << float(np.average(var)) \
<< float(np.max(var)) << float(np.std(var)) \
<< map(float, np.percentile(var, [25.,50.,75.]))
return table
return execute
|
[
"numpy.average",
"numpy.std",
"numpy.percentile",
"numpy.max",
"numpy.min",
"sys.stderr.write"
] |
[((139, 201), 'sys.stderr.write', 'sys.stderr.write', (["('descriptive_stats::request MPI %d\\n' % rank)"], {}), "('descriptive_stats::request MPI %d\\n' % rank)\n", (155, 201), False, 'import sys\n'), ((401, 463), 'sys.stderr.write', 'sys.stderr.write', (["('descriptive_stats::execute MPI %d\\n' % rank)"], {}), "('descriptive_stats::execute MPI %d\\n' % rank)\n", (417, 463), False, 'import sys\n'), ((1134, 1172), 'numpy.percentile', 'np.percentile', (['var', '[25.0, 50.0, 75.0]'], {}), '(var, [25.0, 50.0, 75.0])\n', (1147, 1172), True, 'import numpy as np\n'), ((1089, 1100), 'numpy.std', 'np.std', (['var'], {}), '(var)\n', (1095, 1100), True, 'import numpy as np\n'), ((1067, 1078), 'numpy.max', 'np.max', (['var'], {}), '(var)\n', (1073, 1078), True, 'import numpy as np\n'), ((1023, 1038), 'numpy.average', 'np.average', (['var'], {}), '(var)\n', (1033, 1038), True, 'import numpy as np\n'), ((1001, 1012), 'numpy.min', 'np.min', (['var'], {}), '(var)\n', (1007, 1012), True, 'import numpy as np\n')]
|
import numpy as np
def time_between_values(df, cols):
gap_df = df[cols].dropna(how='any')
return gap_df.index.to_series().diff(-1).dt.total_seconds().abs()
def distance_to_monitor(df):
dist = np.sqrt(
df.left_gaze_origin_in_user_coordinate_system_x ** 2
+ df.left_gaze_origin_in_user_coordinate_system_y ** 2
+ df.left_gaze_origin_in_user_coordinate_system_z ** 2
)
dist.index = df.time
return dist
def group_by_hour_of_day(series):
return series.groupby(series.index.to_series().dt.hour)
def blinks_per_minute_by_hour_of_day(df):
gaps = time_between_values(
df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter'])
blinks = gaps[(gaps < 0.5) & (gaps > 0.1)]
blinks_per_hour_of_day = group_by_hour_of_day(blinks).count()
seconds_recorded_per_hour_of_day = (
group_by_hour_of_day(gaps).count()
/ 60 # Divide by Frequency
)
return blinks_per_hour_of_day / seconds_recorded_per_hour_of_day * 60
|
[
"numpy.sqrt"
] |
[((208, 389), 'numpy.sqrt', 'np.sqrt', (['(df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.\n left_gaze_origin_in_user_coordinate_system_y ** 2 + df.\n left_gaze_origin_in_user_coordinate_system_z ** 2)'], {}), '(df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.\n left_gaze_origin_in_user_coordinate_system_y ** 2 + df.\n left_gaze_origin_in_user_coordinate_system_z ** 2)\n', (215, 389), True, 'import numpy as np\n')]
|
import numpy as np
from datetime import datetime
from astropy.io import ascii
from astropy.time import Time
from argparse import ArgumentParser
from antares_client.search import search, download
def build_query(ra0, dec0, fov, date):
"""Generate a query (a Python dictionary) to submit to the ANTARES client.
Parameters
----------
ra0 : float or None
Central RA for object search, in deg.
dec0 : float or None
Central declination for object search, in deg.
fov : float
Side length of box for search, in deg.
date : str
Start date for search; format is YYYY-MM-DD.
Returns
-------
query : dict
An ElasticSearch dictionary.
"""
# Build up the query.
query = { 'query': { 'bool': { 'must': [] } } }
# desi_candidate_test data stream:
# snfilter_last_proc_status should have a string like "Locus has two or
# more detections and is in DESI brightness range. Triggering."
query['query']['bool']['must'].append(
{ 'match':{ 'properties.snfilter_last_proc_status': '*DESI*' } })
# Set up the declination search.
if dec0 is not None:
ddec = 0.5 * fov
# dra / cos(dec) ensures an equal-area search rectangle.
dra = 0.5*fov / np.cos(np.radians(dec0))
query['query']['bool']['must'].append(
{'range': {'dec':{ 'gte':dec0-ddec, 'lte':dec0+ddec, } } })
else:
dra = 0.5*fov
# Set up the RA search.
if ra0 is not None:
query['query']['bool']['must'].append(
{'range': {'ra':{ 'gte':(ra0-dra)%360., 'lte':(ra0+dra)%360., } } })
# Set up the cumulative date search.
if date is not None:
tobs = Time(date).mjd
query['query']['bool']['must'].append(
{'range': {'mjd':{ 'gte':tobs, } } })
return query
if __name__ == '__main__':
today = datetime.today()
parser = ArgumentParser(description='Client API to query ANTARES alert DB')
parser.add_argument('--ra', default=None, type=float,
help='RA (J2000), in deg')
parser.add_argument('--dec', default=None, type=float,
help='Dec (J2000), in deg')
parser.add_argument('--tobs', default=datetime.today().strftime('%Y-%m-%d'),
help='Obs date [YYYY-MM-DD]')
args = parser.parse_args()
# Create query dict for ANTARES stream search.
query = build_query(ra0=args.ra, dec0=args.dec, fov=3.2, date=args.tobs)
print(query)
#result_set = search(query)
#print(result_set)
outfile = 'results_antares'
if args.ra is not None:
outfile = '{}_ra{:03.1f}'.format(outfile, args.ra)
if args.dec is not None:
outfile = '{}_dec{:03.1f}'.format(outfile, args.dec)
if args.tobs is not None:
outfile = '{}_{}'.format(outfile, args.tobs)
outfile += '.csv'
result_set = download(query, outfile, output_format='csv', decompress=True)
|
[
"numpy.radians",
"antares_client.search.download",
"datetime.datetime.today",
"argparse.ArgumentParser",
"astropy.time.Time"
] |
[((1882, 1898), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1896, 1898), False, 'from datetime import datetime\n'), ((1913, 1979), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Client API to query ANTARES alert DB"""'}), "(description='Client API to query ANTARES alert DB')\n", (1927, 1979), False, 'from argparse import ArgumentParser\n'), ((2901, 2963), 'antares_client.search.download', 'download', (['query', 'outfile'], {'output_format': '"""csv"""', 'decompress': '(True)'}), "(query, outfile, output_format='csv', decompress=True)\n", (2909, 2963), False, 'from antares_client.search import search, download\n'), ((1711, 1721), 'astropy.time.Time', 'Time', (['date'], {}), '(date)\n', (1715, 1721), False, 'from astropy.time import Time\n'), ((1278, 1294), 'numpy.radians', 'np.radians', (['dec0'], {}), '(dec0)\n', (1288, 1294), True, 'import numpy as np\n'), ((2242, 2258), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2256, 2258), False, 'from datetime import datetime\n')]
|
import copy
import numpy as np
PXL2CM = 0.035277778
def print_formatted_stats(stats):
"""
Print formatted results for result tables
"""
print("& {:.2f} & {:.2f} & {:.2f} & {:.2f} \\\\" .format(np.mean(stats['tracked_until_end_ratio']),
np.mean(stats['global_tracking_ratio']),
np.mean(stats['alignment_errors_mean'])*PXL2CM,
np.mean(stats['alignment_errors_std'])*PXL2CM))
def compute_alignment_stats(evaluation_data):
"""
Compute alignment stats
"""
alignment_errors = []
tracking_ratios = []
tracked_until_end = 0
tracked_onsets = 0
total_onsets = 0
for date_entry in evaluation_data:
alignment_errors += date_entry['alignment_errors']
tracking_ratios.append(date_entry['onsets_tracked'] / float(date_entry['total_onsets']))
if date_entry['onsets_tracked'] == date_entry['total_onsets']:
tracked_until_end += 1
tracked_onsets += date_entry['onsets_tracked']
total_onsets += date_entry['total_onsets']
alignment_errors = np.asarray(alignment_errors)
abs_alignment_errors = np.abs(alignment_errors)
tracking_ratios = np.asarray(tracking_ratios)
ae_mean, ae_median, ae_std = -1, -1, -1
if len(abs_alignment_errors) > 0:
ae_mean = abs_alignment_errors.mean()
ae_median = np.median(abs_alignment_errors)
ae_std = abs_alignment_errors.std()
tracking_ratios_mean = tracking_ratios.mean()
tracked_to_end_ratio = tracked_until_end / float(len(evaluation_data))
global_tracking_ratio = float(tracked_onsets) / total_onsets
stats = dict()
stats['alignment_errors_mean'] = ae_mean
stats['alignment_errors_median'] = ae_median
stats['alignment_errors_std'] = ae_std
stats['tracking_ratios_mean'] = tracking_ratios_mean
stats['global_tracking_ratio'] = global_tracking_ratio
stats['tracked_until_end_ratio'] = tracked_to_end_ratio
return stats
class Evaluator:
def __init__(self, make_env, evaluation_pools, config, trials=1, render_mode=None):
self.make_env = make_env
self.evaluation_pools = evaluation_pools
self.config = config
self.render_mode = render_mode
self.trials = trials
def _eval_pool(self, agent, pool, verbose):
pool.reset()
if verbose:
print(pool.get_current_song_name().ljust(60), end=" ")
env = self.make_env(pool, self.config, render_mode=self.render_mode)
alignment_errors = []
# get observations
episode_reward = 0
observation = env.reset()
onset_list = pool.get_current_song_onsets()
while True:
# choose action
action = agent.select_action(observation, train=False)
# perform step and observe
observation, reward, done, info = env.step(action)
episode_reward += reward
# keep alignment errors, only store tracking error if an onset occurs
if pool.curr_perf_frame in onset_list:
alignment_errors.append(pool.tracking_error())
if done:
break
# compute number of tracked onsets
onsets_tracked = np.sum(onset_list <= pool.curr_perf_frame)
song_data = {'alignment_errors': alignment_errors, 'onsets_tracked': onsets_tracked,
'total_onsets': len(onset_list)}
return song_data
def evaluate(self, agent, log_writer=None, log_step=0, verbose=False):
raise NotImplementedError
class PerformanceEvaluator(Evaluator):
def __init__(self, make_env, evaluation_pools, config, trials=1, render_mode=None):
Evaluator.__init__(self, make_env, evaluation_pools, config, trials, render_mode)
def evaluate(self, agent, log_writer=None, log_step=0, verbose=False):
mean_stats = None
for _ in range(self.trials):
evaluation_data = []
for pool in self.evaluation_pools:
song_data = self._eval_pool(agent, pool, verbose)
evaluation_data.append(song_data)
if verbose:
song_stats = compute_alignment_stats([song_data])
string = "tracking ratio: %.2f" % song_stats['global_tracking_ratio']
if song_stats['global_tracking_ratio'] == 1.0:
string += " +"
print(string)
# compute alignment stats
stats = compute_alignment_stats(evaluation_data)
stats['evaluation_data'] = evaluation_data
if mean_stats is None:
mean_stats = dict()
for key in stats.keys():
if key != "evaluation_data":
mean_stats[key] = []
for key in mean_stats.keys():
mean_stats[key].append(stats[key])
for key in mean_stats.keys():
mean_stats[key] = np.mean(mean_stats[key])
if log_writer is not None:
log_writer.add_scalar('eval/alignment_errors_mean', mean_stats['alignment_errors_mean'], log_step)
log_writer.add_scalar('eval/alignment_errors_median', mean_stats['alignment_errors_median'], log_step)
log_writer.add_scalar('eval/alignment_errors_std', mean_stats['alignment_errors_std'], log_step)
log_writer.add_scalar('eval/tracking_ratios_mean', mean_stats['tracking_ratios_mean'], log_step)
log_writer.add_scalar('eval/global_tracking_ratio', mean_stats['global_tracking_ratio'], log_step)
log_writer.add_scalar('eval/tracked_until_end_ratio', mean_stats['tracked_until_end_ratio'], log_step)
return mean_stats
class EmbeddingEvaluator(Evaluator):
def __init__(self, make_env, evaluation_pools, config, trials=1, render_mode=None):
Evaluator.__init__(self, make_env, evaluation_pools, config, trials, render_mode)
self.embedding = None
def store_embedding(self, module, input_, output_):
self.embedding = input_[0]
def register_hook(self, net):
embedding_layer = net._modules.get('policy_fc')
embedding_layer.register_forward_hook(self.store_embedding)
def _eval_pool(self, agent, pool, verbose):
self.register_hook(agent.model.net)
pool.reset()
if verbose:
print(pool.get_current_song_name())
env = self.make_env(pool, self.config, render_mode=self.render_mode)
plain_env = self.make_env(copy.deepcopy(pool), self.config, render_mode=self.render_mode)
while not hasattr(plain_env, 'rl_pool'):
plain_env = plain_env.env
plain_env.reset()
# get observations
observation = env.reset()
return_dicts = {'state': [],
'value': [],
'embedding': [],
'onsets_in_state': [],
'target_lost': [],
'song_name': [],
'tracking_error': [],
'speed': []}
# song_onsets = plain_env.rl_pool.curr_song.get_perf_onsets()
song_onsets = plain_env.rl_pool.curr_song.cur_perf['onsets_padded']
while True:
# choose action
action = agent.select_action(observation)
# perform step and observe
observation, reward, done, info = env.step(action)
cur_perf_frame = plain_env.rl_pool.curr_perf_frame
in_len = plain_env.rl_pool.perf_shape[-1]
onsets_in_input = len(list(filter(lambda o: cur_perf_frame-in_len <= o <= cur_perf_frame, song_onsets)))
# perform a step in the plain env to get the original observation
obs_org, r, d, _ = plain_env.step(action)
return_dicts['state'].append(obs_org)
return_dicts['value'].append(agent.predict_value(observation))
return_dicts['embedding'].append(self.embedding.cpu().data.numpy())
return_dicts['onsets_in_state'].append(onsets_in_input)
return_dicts['target_lost'].append(done)
return_dicts['song_name'].append(plain_env.rl_pool.curr_song.song_name)
return_dicts['tracking_error'].append(plain_env.rl_pool.tracking_error())
return_dicts['speed'].append(plain_env.rl_pool.sheet_speed)
if done:
break
tue = np.sum(song_onsets <= plain_env.rl_pool.curr_perf_frame) == len(song_onsets)
return_dicts['tue'] = [tue for _ in range(len(return_dicts['state']))]
return return_dicts
def evaluate(self, agent, log_writer=None, log_step=0, verbose=False):
return_dicts = {'state': [],
'value': [],
'embedding': [],
'onsets_in_state': [],
'tue': [],
'target_lost': [],
'song_name': [],
'tracking_error': [],
'speed': []}
for _ in range(self.trials):
for pool in self.evaluation_pools:
res = self._eval_pool(agent, pool, verbose)
return_dicts['state'].extend(res['state'])
return_dicts['value'].extend(res['value'])
return_dicts['embedding'].extend(res['embedding'])
return_dicts['onsets_in_state'].extend(res['onsets_in_state'])
return_dicts['tue'].extend(res['tue'])
return_dicts['target_lost'].extend(res['target_lost'])
return_dicts['song_name'].extend(res['song_name'])
return_dicts['tracking_error'].extend(res['tracking_error'])
return_dicts['speed'].extend(res['speed'])
return return_dicts
|
[
"copy.deepcopy",
"numpy.abs",
"numpy.sum",
"numpy.median",
"numpy.asarray",
"numpy.mean"
] |
[((1224, 1252), 'numpy.asarray', 'np.asarray', (['alignment_errors'], {}), '(alignment_errors)\n', (1234, 1252), True, 'import numpy as np\n'), ((1280, 1304), 'numpy.abs', 'np.abs', (['alignment_errors'], {}), '(alignment_errors)\n', (1286, 1304), True, 'import numpy as np\n'), ((1327, 1354), 'numpy.asarray', 'np.asarray', (['tracking_ratios'], {}), '(tracking_ratios)\n', (1337, 1354), True, 'import numpy as np\n'), ((1504, 1535), 'numpy.median', 'np.median', (['abs_alignment_errors'], {}), '(abs_alignment_errors)\n', (1513, 1535), True, 'import numpy as np\n'), ((3384, 3426), 'numpy.sum', 'np.sum', (['(onset_list <= pool.curr_perf_frame)'], {}), '(onset_list <= pool.curr_perf_frame)\n', (3390, 3426), True, 'import numpy as np\n'), ((214, 255), 'numpy.mean', 'np.mean', (["stats['tracked_until_end_ratio']"], {}), "(stats['tracked_until_end_ratio'])\n", (221, 255), True, 'import numpy as np\n'), ((318, 357), 'numpy.mean', 'np.mean', (["stats['global_tracking_ratio']"], {}), "(stats['global_tracking_ratio'])\n", (325, 357), True, 'import numpy as np\n'), ((5123, 5147), 'numpy.mean', 'np.mean', (['mean_stats[key]'], {}), '(mean_stats[key])\n', (5130, 5147), True, 'import numpy as np\n'), ((6678, 6697), 'copy.deepcopy', 'copy.deepcopy', (['pool'], {}), '(pool)\n', (6691, 6697), False, 'import copy\n'), ((8597, 8653), 'numpy.sum', 'np.sum', (['(song_onsets <= plain_env.rl_pool.curr_perf_frame)'], {}), '(song_onsets <= plain_env.rl_pool.curr_perf_frame)\n', (8603, 8653), True, 'import numpy as np\n'), ((420, 459), 'numpy.mean', 'np.mean', (["stats['alignment_errors_mean']"], {}), "(stats['alignment_errors_mean'])\n", (427, 459), True, 'import numpy as np\n'), ((529, 567), 'numpy.mean', 'np.mean', (["stats['alignment_errors_std']"], {}), "(stats['alignment_errors_std'])\n", (536, 567), True, 'import numpy as np\n')]
|
# Copyright 2021 Arm Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
from models.quantize_utils import fake_quant_with_min_max_vars_per_channel, fake_quant_with_min_max_vars, compute_ranges
##############################
## LINEAR BLOCK DEFINITIONS ##
##############################
#EXPANDED Linear block
class LinearBlock_e(tf.keras.layers.Layer):
def __init__(self,
in_filters: int,
num_inner_layers: int,
kernel_size: int,
padding: str,
out_filters: int,
feature_size: int,
quant_W: bool,
mode: str):
super().__init__()
"""
Expanded linear block. Input --> 3x3 Conv to expand number of channels
to 'feature_size' --> 1x1 Conv to project channels into 'out_filters'.
At inference time, this can be analytically collapsed into a single,
small 3x3 Conv layer. See also the LinearBlock_c class which is a
very efficient method to train linear blocks without any loss in
image quality.
"""
assert not quant_W, 'expanded linear block not compatible with w quant'
def conv2d(filters: int, kernel_size_: int) -> tf.keras.layers.Layer:
return tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size_, padding=padding)
layers = []
for _ in range(num_inner_layers):
layers.extend([conv2d(filters=feature_size, kernel_size_=kernel_size)])
layers.append(conv2d(filters=out_filters, kernel_size_=1))
self.block = tf.keras.Sequential(layers)
self.mode = mode
def call(self, inputs, training=None, mask=None):
return self.block(inputs, training=training)
#COLLAPSED Linear block
class LinearBlock_c(tf.keras.layers.Layer):
def __init__(self,
in_filters: int,
num_inner_layers: int,
kernel_size: int,
padding: str,
out_filters: int,
feature_size: int,
quant_W: bool,
mode: str):
tf.keras.layers.Layer.__init__(self)
"""
This is a simulated linear block in the train path. The idea is to collapse
linear block at each training step to speed up the forward pass. The backward
pass still updates all the expanded weights.
After training is completed, the weight generation ops are replaced by
a tf.constant at pb/tflite generation time.
----------------------------------------------------------------
| padded_identity |
| | |
| conv1x1(inCh, r*inCh) [optional] |
| | |
| convkxk(r*inCh, r*inCh) |
| | |
| conv1x1(r*inCh, outCh) |
| | |
| simulating residual: identity -> + |
| (or) padded_conv1x1_wt | (weight_tensor generated)|
----------------------------------------------------------------
|
input_tensor -> Actual convkxk(inCh, outCh)
|
Final output
"""
def conv2d(filters: int, kernel_size_: int, padding_: str) -> tf.keras.layers.Layer:
return tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size_, padding=padding_)
# Params
self.in_filters = in_filters
self.out_filters = out_filters
self.feature_size = feature_size
self.quant_W = quant_W
self.mode = mode
# If num_inner_layers > 1, then use another conv1x1 at the beginning
onebyone = True if num_inner_layers > 1 else False
# expansion with kx,ky kernel and then project to out_filters using 1x1
kernel_size = [kernel_size, kernel_size]
self.kx, self.ky = kernel_size
# Learnable Collapse Conv's
conv1 = conv2d(feature_size, [1, 1], "valid")
conv2 = conv2d(feature_size, kernel_size, "valid")
conv3 = conv2d(out_filters, [1, 1], "valid")
self.collapsed_weights = None
# Define Collapse Block
if onebyone:
self.collapse = tf.keras.Sequential([conv1, conv2, conv3])
else:
self.collapse = tf.keras.Sequential([conv2, conv3])
if self.mode == 'train':
self.fake_quant_with_min_max_vars_per_channel_fn = \
fake_quant_with_min_max_vars_per_channel
elif self.mode == 'infer':
self.fake_quant_with_min_max_vars_per_channel_fn = \
tf.quantization.fake_quant_with_min_max_vars_per_channel
def build(self, input_shapes):
# shape: (in_filters,in_filters)
delta = tf.eye(self.in_filters)
# expanded shape:(in_filters, 1, 1, in_filters)
delta = tf.expand_dims(tf.expand_dims(delta, 1), 1)
# padded shape: (in_filters, kx, ky, in_filters)
delta = tf.pad(delta, paddings=[[0, 0], [self.kx - 1, self.kx - 1], [self.ky - 1, self.ky - 1], [0, 0]])
# Ensure the Value isn't trainable
self.delta = tf.Variable(initial_value=delta, trainable=False, dtype=tf.float32)
if self.quant_W:
self.wt_quant_min = self.add_weight(
name='wt_quant_min',
shape=(self.out_filters,),
trainable=True)
self.wt_quant_max = self.add_weight(
name='wt_quant_max',
shape=(self.out_filters,),
trainable=True)
if self.mode == "train":
self.wt_quant_initialized = tf.Variable(False, trainable=False)
# Calculate Residual
kernel_dim = [self.kx, self.ky, self.in_filters, self.out_filters]
residual = np.zeros(kernel_dim, dtype=np.float32)
if self.in_filters == self.out_filters:
mid_kx = int(self.kx / 2)
mid_ky = int(self.ky / 2)
for out_ch in range(self.out_filters):
residual[mid_kx, mid_ky, out_ch, out_ch] = 1.0
# Ensure the Value isn't trainable
self.residual = tf.Variable(initial_value=residual, trainable=False, dtype=tf.float32)
def init_wt_quant_ranges(self, kernel: tf.Tensor) -> None:
quant_max, quant_min = compute_ranges(kernel, per_channel=True, symmetric=True)
self.wt_quant_max.assign(quant_max)
self.wt_quant_min.assign(quant_min)
self.wt_quant_initialized.assign(True)
def call(self, inputs):
if self.mode == "train" or (self.collapsed_weights is None):
# Run Through Conv2D's - online linear collapse
wt_tensor = self.collapse(self.delta)
# reverse order of elements in 1,2 axes
wt_tensor = tf.reverse(wt_tensor, tf.constant([1, 2]))
# (in_filters, kx, ky, out_filters) -> (kx, ky, in_filters, out_filters)
wt_tensor = tf.transpose(wt_tensor, [1, 2, 0, 3])
# Direct-residual addition
# when in_filters != self.out_filters, this is just zeros
wt_tensor += self.residual
if self.mode == "infer":
# store collapsed weights in the first inferece, won't need to collapse again
self.collapsed_weights = tf.Variable(
initial_value=wt_tensor,
trainable=False,
dtype=tf.float32)
# remove references to uncollapsed variables
self.collapse = None
else:
# use pre-collapsed weights
wt_tensor = self.collapsed_weights
if self.mode == "train":
if self.quant_W:
if not self.wt_quant_initialized:
self.init_wt_quant_ranges(wt_tensor)
elif self.mode == "infer":
pass
else:
assert False, self.mode
if self.quant_W:
wt_tensor = self.fake_quant_with_min_max_vars_per_channel_fn(
wt_tensor,
min=self.wt_quant_min,
max=self.wt_quant_max,
num_bits=8,
narrow_range=True)
# Output - the actual conv2d
out = tf.nn.conv2d(inputs, wt_tensor, strides=[1, 1, 1, 1], padding="SAME")
return out
|
[
"tensorflow.keras.layers.Conv2D",
"tensorflow.pad",
"tensorflow.eye",
"numpy.zeros",
"models.quantize_utils.compute_ranges",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.Layer.__init__",
"tensorflow.expand_dims"
] |
[((2255, 2282), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['layers'], {}), '(layers)\n', (2274, 2282), True, 'import tensorflow as tf\n'), ((2789, 2825), 'tensorflow.keras.layers.Layer.__init__', 'tf.keras.layers.Layer.__init__', (['self'], {}), '(self)\n', (2819, 2825), True, 'import tensorflow as tf\n'), ((5867, 5890), 'tensorflow.eye', 'tf.eye', (['self.in_filters'], {}), '(self.in_filters)\n', (5873, 5890), True, 'import tensorflow as tf\n'), ((6082, 6183), 'tensorflow.pad', 'tf.pad', (['delta'], {'paddings': '[[0, 0], [self.kx - 1, self.kx - 1], [self.ky - 1, self.ky - 1], [0, 0]]'}), '(delta, paddings=[[0, 0], [self.kx - 1, self.kx - 1], [self.ky - 1, \n self.ky - 1], [0, 0]])\n', (6088, 6183), True, 'import tensorflow as tf\n'), ((6244, 6311), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'delta', 'trainable': '(False)', 'dtype': 'tf.float32'}), '(initial_value=delta, trainable=False, dtype=tf.float32)\n', (6255, 6311), True, 'import tensorflow as tf\n'), ((6916, 6954), 'numpy.zeros', 'np.zeros', (['kernel_dim'], {'dtype': 'np.float32'}), '(kernel_dim, dtype=np.float32)\n', (6924, 6954), True, 'import numpy as np\n'), ((7263, 7333), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'residual', 'trainable': '(False)', 'dtype': 'tf.float32'}), '(initial_value=residual, trainable=False, dtype=tf.float32)\n', (7274, 7333), True, 'import tensorflow as tf\n'), ((7429, 7485), 'models.quantize_utils.compute_ranges', 'compute_ranges', (['kernel'], {'per_channel': '(True)', 'symmetric': '(True)'}), '(kernel, per_channel=True, symmetric=True)\n', (7443, 7485), False, 'from models.quantize_utils import fake_quant_with_min_max_vars_per_channel, fake_quant_with_min_max_vars, compute_ranges\n'), ((9403, 9472), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'wt_tensor'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(inputs, wt_tensor, strides=[1, 1, 1, 1], padding='SAME')\n", (9415, 9472), True, 'import tensorflow as tf\n'), ((1937, 2024), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size_', 'padding': 'padding'}), '(filters=filters, kernel_size=kernel_size_, padding=\n padding)\n', (1959, 2024), True, 'import tensorflow as tf\n'), ((4407, 4495), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size_', 'padding': 'padding_'}), '(filters=filters, kernel_size=kernel_size_, padding=\n padding_)\n', (4429, 4495), True, 'import tensorflow as tf\n'), ((5314, 5356), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['[conv1, conv2, conv3]'], {}), '([conv1, conv2, conv3])\n', (5333, 5356), True, 'import tensorflow as tf\n'), ((5399, 5434), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['[conv2, conv3]'], {}), '([conv2, conv3])\n', (5418, 5434), True, 'import tensorflow as tf\n'), ((5979, 6003), 'tensorflow.expand_dims', 'tf.expand_dims', (['delta', '(1)'], {}), '(delta, 1)\n', (5993, 6003), True, 'import tensorflow as tf\n'), ((8079, 8116), 'tensorflow.transpose', 'tf.transpose', (['wt_tensor', '[1, 2, 0, 3]'], {}), '(wt_tensor, [1, 2, 0, 3])\n', (8091, 8116), True, 'import tensorflow as tf\n'), ((6749, 6784), 'tensorflow.Variable', 'tf.Variable', (['(False)'], {'trainable': '(False)'}), '(False, trainable=False)\n', (6760, 6784), True, 'import tensorflow as tf\n'), ((7948, 7967), 'tensorflow.constant', 'tf.constant', (['[1, 2]'], {}), '([1, 2])\n', (7959, 7967), True, 'import tensorflow as tf\n'), ((8451, 8522), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'wt_tensor', 'trainable': '(False)', 'dtype': 'tf.float32'}), '(initial_value=wt_tensor, trainable=False, dtype=tf.float32)\n', (8462, 8522), True, 'import tensorflow as tf\n')]
|
from IPython import display
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
import os
import tensorflow as tf
from tensorflow import nn, layers
from tensorflow.contrib import layers as clayers
import numpy as np
import errno
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from matplotlib import pyplot as plt
import torch
# Output Directory
OUTPUT_PATH = os.path.join(os.path.abspath(__file__+ "../../"), '.output/')
DATASET_PATH = os.path.join(os.path.abspath(__file__+ "../../"), '.dataset/CIFAR/')
if not os.path.exists(OUTPUT_PATH): os.makedirs(OUTPUT_PATH)
if not os.path.exists(DATASET_PATH): os.makedirs(DATASET_PATH)
def cifar_data():
compose = transforms.Compose([transforms.Resize(64),transforms.ToTensor(),transforms.Normalize((.5, .5, .5), (.5, .5, .5)),])
return datasets.CIFAR10(root=DATASET_PATH, train=True, download=True, transform=compose)
dataset = cifar_data()
batch_size = 100
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
num_batches = len(dataloader)
IMAGES_SHAPE = (64, 64, 3)
NOISE_SIZE = 100
def default_conv2d(inputs, filters):
return layers.conv2d(
inputs,
filters=filters,
kernel_size=4,
strides=(2, 2),
padding='same',
data_format='channels_last',
use_bias=False,
)
def default_conv2d_transpose(inputs, filters):
return layers.conv2d_transpose(
inputs,
filters=filters,
kernel_size=4,
strides=(2, 2),
padding='same',
data_format='channels_last',
use_bias=False,
)
def noise(n_rows, n_cols):
return np.random.normal(size=(n_rows, n_cols))
def discriminator(x):
with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
with tf.variable_scope("conv1"):
conv1 = default_conv2d(x, 128)
conv1 = nn.leaky_relu(conv1,alpha=0.2)
with tf.variable_scope("conv2"):
conv2 = default_conv2d(conv1, 256)
conv2 = layers.batch_normalization(conv2)
conv2 = nn.leaky_relu(conv2,alpha=0.2)
with tf.variable_scope("conv3"):
conv3 = default_conv2d(conv2, 512)
conv3 = layers.batch_normalization(conv3)
conv3 = nn.leaky_relu(conv3,alpha=0.2)
with tf.variable_scope("conv4"):
conv4 = default_conv2d(conv3, 1024)
conv4 = layers.batch_normalization(conv3)
conv4 = nn.leaky_relu(conv3,alpha=0.2)
with tf.variable_scope("linear"):
linear = clayers.flatten(conv4)
linear = clayers.fully_connected(linear, 1)
with tf.variable_scope("out"):
out = nn.sigmoid(linear)
return out
def generator(z):
with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
with tf.variable_scope("linear"):
linear = clayers.fully_connected(z, 1024 * 4 * 4)
with tf.variable_scope("conv1_transp"):
# Reshape as 4x4 images
conv1 = tf.reshape(linear, (-1, 4, 4, 1024))
conv1 = default_conv2d_transpose(conv1, 512)
conv1 = layers.batch_normalization(conv1)
conv1 = nn.relu(conv1)
with tf.variable_scope("conv2_transp"):
conv2 = default_conv2d_transpose(conv1, 256)
conv2 = layers.batch_normalization(conv2)
conv2 = nn.relu(conv2)
with tf.variable_scope("conv3_transp"):
conv3 = default_conv2d_transpose(conv2, 128)
conv3 = layers.batch_normalization(conv3)
conv3 = nn.relu(conv3)
with tf.variable_scope("conv4_transp"):
conv4 = default_conv2d_transpose(conv3, 3)
with tf.variable_scope("out"):
out = tf.tanh(conv4)
return out
## Real Input
real_sample = tf.placeholder(tf.float32, shape=(None, )+IMAGES_SHAPE)
## Latent Variables / Noise
noise_sample = tf.placeholder(tf.float32, shape=(None, NOISE_SIZE))
# Generator
G_sample = generator(noise_sample)
# Discriminator
D_real = discriminator(real_sample)
D_fake = discriminator(G_sample)
# Generator
G_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_fake, labels=tf.ones_like(D_fake)
)
)
# Discriminator
D_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_real, labels=tf.ones_like(D_real)
)
)
D_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_fake, labels=tf.zeros_like(D_fake)
)
)
D_loss = D_loss_real + D_loss_fake
# Obtain trainable variables for both networks
train_vars = tf.trainable_variables()
G_vars = [var for var in train_vars if 'generator' in var.name]
D_vars = [var for var in train_vars if 'discriminator' in var.name]
num_epochs = 200
G_opt = tf.train.AdamOptimizer(2e-4).minimize(G_loss, var_list=G_vars,)
D_opt = tf.train.AdamOptimizer(2e-4).minimize(D_loss, var_list=D_vars,)
num_test_samples = 16
test_noise = noise(num_test_samples, NOISE_SIZE)
BATCH_SIZE = 100
NUM_EPOCHS = 200
# session = tf.InteractiveSession()
# tf.global_variables_initializer().run(session=session)
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
logger = Logger(model_name='DCGAN1', data_name='CIFAR10', root_path=OUTPUT_PATH)
# Iterate through epochs
for epoch in range(NUM_EPOCHS):
for n_batch, (batch,_) in enumerate(dataloader):
# 1. Train Discriminator
X_batch = batch.permute(0, 2, 3, 1).numpy()
feed_dict = {real_sample: X_batch, noise_sample: noise(BATCH_SIZE, NOISE_SIZE)}
_, d_error, d_pred_real, d_pred_fake = sess.run([D_opt, D_loss, D_real, D_fake], feed_dict=feed_dict)
# 2. Train Generator
feed_dict = {noise_sample: noise(BATCH_SIZE, NOISE_SIZE)}
_, g_error = sess.run([G_opt, G_loss], feed_dict=feed_dict)
# if n_batch % 10 == 0:
logger.display_status(epoch, num_epochs, n_batch, num_batches,d_error, g_error, d_pred_real, d_pred_fake)
if n_batch % 100 == 0:
display.clear_output(True)
# Generate images from test noise
test_images = sess.run(G_sample, feed_dict={noise_sample: test_noise})
# Log Images
logger.log_images(test_images, num_test_samples, epoch, n_batch, num_batches, format='NHWC');
# Log Status
logger.display_status(epoch, num_epochs, n_batch, num_batches,d_error, g_error, d_pred_real, d_pred_fake)
class Logger:
def __init__(self, model_name, data_name, root_path):
self.model_name = model_name
self.data_name = data_name
self.comment = '{}_{}'.format(model_name, data_name)
self.data_subdir = '{}/{}'.format(model_name, data_name)
# TensorBoard
self.writer = SummaryWriter(comment=self.comment)
self.rootPath = root_path
def log(self, d_error, g_error, epoch, n_batch, num_batches):
# var_class = torch.autograd.variable.Variable
if isinstance(d_error, torch.autograd.Variable):
d_error = d_error.data.cpu().numpy()
if isinstance(g_error, torch.autograd.Variable):
g_error = g_error.data.cpu().numpy()
step = Logger._step(epoch, n_batch, num_batches)
self.writer.add_scalar(
'{}/D_error'.format(self.comment), d_error, step)
self.writer.add_scalar(
'{}/G_error'.format(self.comment), g_error, step)
def log_images(self, images, num_images, epoch, n_batch, num_batches, format='NCHW', normalize=True):
'''
input images are expected in format (NCHW)
'''
if type(images) == np.ndarray:
images = torch.from_numpy(images)
if format=='NHWC':
images = images.transpose(1,3)
step = Logger._step(epoch, n_batch, num_batches)
img_name = '{}/images{}'.format(self.comment, '')
# Make horizontal grid from image tensor
horizontal_grid = vutils.make_grid(images, normalize=normalize, scale_each=True)
# Make vertical grid from image tensor
nrows = int(np.sqrt(num_images))
grid = vutils.make_grid(images, nrow=nrows, normalize=True, scale_each=True)
# Add horizontal images to tensorboard
self.writer.add_image(img_name, horizontal_grid, step)
# Save plots
self.save_torch_images(horizontal_grid, grid, epoch, n_batch)
print("Save Log Image")
def save_torch_images(self, horizontal_grid, grid, epoch, n_batch, plot_horizontal=True):
out_dir = (self.rootPath+'/images/{}').format(self.data_subdir)
Logger._make_dir(out_dir)
# Plot and save horizontal
fig = plt.figure(figsize=(16, 16))
plt.imshow(np.moveaxis(horizontal_grid.numpy(), 0, -1))
plt.axis('off')
if plot_horizontal:
display.display(plt.gcf())
self._save_images(fig, epoch, n_batch, 'hori')
plt.close()
# Save squared
fig = plt.figure()
plt.imshow(np.moveaxis(grid.numpy(), 0, -1))
plt.axis('off')
self._save_images(fig, epoch, n_batch)
plt.close()
def _save_images(self, fig, epoch, n_batch, comment=''):
out_dir = (self.rootPath+'/images/{}').format(self.data_subdir)
Logger._make_dir(out_dir)
fig.savefig('{}/{}_epoch_{}_batch_{}.png'.format(out_dir,comment, epoch, n_batch))
def display_status(self, epoch, num_epochs, n_batch, num_batches, d_error, g_error, d_pred_real, d_pred_fake):
# var_class = torch.autograd.variable.Variable
if isinstance(d_error, torch.autograd.Variable):
d_error = d_error.data.cpu().numpy()
if isinstance(g_error, torch.autograd.Variable):
g_error = g_error.data.cpu().numpy()
if isinstance(d_pred_real, torch.autograd.Variable):
d_pred_real = d_pred_real.data
if isinstance(d_pred_fake, torch.autograd.Variable):
d_pred_fake = d_pred_fake.data
print('Epoch: [{}/{}], Batch Num: [{}/{}]'.format(
epoch,num_epochs, n_batch, num_batches)
)
print('Discriminator Loss: {:.4f}, Generator Loss: {:.4f}'.format(d_error, g_error))
print('D(x): {:.4f}, D(G(z)): {:.4f}'.format(d_pred_real.mean(), d_pred_fake.mean()))
def save_models(self, generator, discriminator, epoch):
out_dir = (self.rootPath+'/models/{}').format(self.data_subdir)
Logger._make_dir(out_dir)
torch.save(generator.state_dict(),
'{}/G_epoch_{}'.format(out_dir, epoch))
torch.save(discriminator.state_dict(),
'{}/D_epoch_{}'.format(out_dir, epoch))
def close(self):
self.writer.close()
# Private Functionality
@staticmethod
def _step(epoch, n_batch, num_batches):
return epoch * num_batches + n_batch
@staticmethod
def _make_dir(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
|
[
"tensorflow.trainable_variables",
"tensorflow.contrib.layers.flatten",
"tensorflow.reshape",
"tensorflow.zeros_like",
"torchvision.datasets.CIFAR10",
"matplotlib.pyplot.figure",
"numpy.random.normal",
"tensorflow.layers.conv2d_transpose",
"torchvision.transforms.Normalize",
"tensorflow.nn.leaky_relu",
"tensorflow.layers.batch_normalization",
"numpy.sqrt",
"os.path.abspath",
"tensorflow.nn.relu",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.close",
"os.path.exists",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.tanh",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.ones_like",
"tensorflow.layers.conv2d",
"IPython.display.clear_output",
"matplotlib.pyplot.gcf",
"torchvision.transforms.Resize",
"torch.from_numpy",
"tensorboardX.SummaryWriter",
"os.makedirs",
"matplotlib.pyplot.axis",
"tensorflow.nn.sigmoid",
"torchvision.utils.make_grid",
"tensorflow.train.AdamOptimizer",
"torchvision.transforms.ToTensor"
] |
[((989, 1045), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True)\n', (999, 1045), False, 'from torch.utils.data import DataLoader\n'), ((3937, 3993), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '((None,) + IMAGES_SHAPE)'}), '(tf.float32, shape=(None,) + IMAGES_SHAPE)\n', (3951, 3993), True, 'import tensorflow as tf\n'), ((4036, 4088), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, NOISE_SIZE)'}), '(tf.float32, shape=(None, NOISE_SIZE))\n', (4050, 4088), True, 'import tensorflow as tf\n'), ((4748, 4772), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4770, 4772), True, 'import tensorflow as tf\n'), ((5278, 5290), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5288, 5290), True, 'import tensorflow as tf\n'), ((436, 472), 'os.path.abspath', 'os.path.abspath', (["(__file__ + '../../')"], {}), "(__file__ + '../../')\n", (451, 472), False, 'import os\n'), ((513, 549), 'os.path.abspath', 'os.path.abspath', (["(__file__ + '../../')"], {}), "(__file__ + '../../')\n", (528, 549), False, 'import os\n'), ((576, 603), 'os.path.exists', 'os.path.exists', (['OUTPUT_PATH'], {}), '(OUTPUT_PATH)\n', (590, 603), False, 'import os\n'), ((605, 629), 'os.makedirs', 'os.makedirs', (['OUTPUT_PATH'], {}), '(OUTPUT_PATH)\n', (616, 629), False, 'import os\n'), ((637, 665), 'os.path.exists', 'os.path.exists', (['DATASET_PATH'], {}), '(DATASET_PATH)\n', (651, 665), False, 'import os\n'), ((667, 692), 'os.makedirs', 'os.makedirs', (['DATASET_PATH'], {}), '(DATASET_PATH)\n', (678, 692), False, 'import os\n'), ((853, 939), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': 'DATASET_PATH', 'train': '(True)', 'download': '(True)', 'transform': 'compose'}), '(root=DATASET_PATH, train=True, download=True, transform=\n compose)\n', (869, 939), False, 'from torchvision import transforms, datasets\n'), ((1170, 1304), 'tensorflow.layers.conv2d', 'layers.conv2d', (['inputs'], {'filters': 'filters', 'kernel_size': '(4)', 'strides': '(2, 2)', 'padding': '"""same"""', 'data_format': '"""channels_last"""', 'use_bias': '(False)'}), "(inputs, filters=filters, kernel_size=4, strides=(2, 2),\n padding='same', data_format='channels_last', use_bias=False)\n", (1183, 1304), False, 'from tensorflow import nn, layers\n'), ((1423, 1567), 'tensorflow.layers.conv2d_transpose', 'layers.conv2d_transpose', (['inputs'], {'filters': 'filters', 'kernel_size': '(4)', 'strides': '(2, 2)', 'padding': '"""same"""', 'data_format': '"""channels_last"""', 'use_bias': '(False)'}), "(inputs, filters=filters, kernel_size=4, strides=(2,\n 2), padding='same', data_format='channels_last', use_bias=False)\n", (1446, 1567), False, 'from tensorflow import nn, layers\n'), ((1666, 1705), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n_rows, n_cols)'}), '(size=(n_rows, n_cols))\n', (1682, 1705), True, 'import numpy as np\n'), ((1738, 1793), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""discriminator"""'], {'reuse': 'tf.AUTO_REUSE'}), "('discriminator', reuse=tf.AUTO_REUSE)\n", (1755, 1793), True, 'import tensorflow as tf\n'), ((2824, 2875), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generator"""'], {'reuse': 'tf.AUTO_REUSE'}), "('generator', reuse=tf.AUTO_REUSE)\n", (2841, 2875), True, 'import tensorflow as tf\n'), ((4933, 4963), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0002)'], {}), '(0.0002)\n', (4955, 4963), True, 'import tensorflow as tf\n'), ((5005, 5035), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0002)'], {}), '(0.0002)\n', (5027, 5035), True, 'import tensorflow as tf\n'), ((5291, 5324), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5322, 5324), True, 'import tensorflow as tf\n'), ((6938, 6973), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'comment': 'self.comment'}), '(comment=self.comment)\n', (6951, 6973), False, 'from tensorboardX import SummaryWriter\n'), ((8136, 8198), 'torchvision.utils.make_grid', 'vutils.make_grid', (['images'], {'normalize': 'normalize', 'scale_each': '(True)'}), '(images, normalize=normalize, scale_each=True)\n', (8152, 8198), True, 'import torchvision.utils as vutils\n'), ((8302, 8371), 'torchvision.utils.make_grid', 'vutils.make_grid', (['images'], {'nrow': 'nrows', 'normalize': '(True)', 'scale_each': '(True)'}), '(images, nrow=nrows, normalize=True, scale_each=True)\n', (8318, 8371), True, 'import torchvision.utils as vutils\n'), ((8858, 8886), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (8868, 8886), True, 'from matplotlib import pyplot as plt\n'), ((8959, 8974), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8967, 8974), True, 'from matplotlib import pyplot as plt\n'), ((9105, 9116), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9114, 9116), True, 'from matplotlib import pyplot as plt\n'), ((9155, 9167), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9165, 9167), True, 'from matplotlib import pyplot as plt\n'), ((9229, 9244), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9237, 9244), True, 'from matplotlib import pyplot as plt\n'), ((9300, 9311), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9309, 9311), True, 'from matplotlib import pyplot as plt\n'), ((746, 767), 'torchvision.transforms.Resize', 'transforms.Resize', (['(64)'], {}), '(64)\n', (763, 767), False, 'from torchvision import transforms, datasets\n'), ((768, 789), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (787, 789), False, 'from torchvision import transforms, datasets\n'), ((790, 844), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (810, 844), False, 'from torchvision import transforms, datasets\n'), ((1808, 1834), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv1"""'], {}), "('conv1')\n", (1825, 1834), True, 'import tensorflow as tf\n'), ((1899, 1930), 'tensorflow.nn.leaky_relu', 'nn.leaky_relu', (['conv1'], {'alpha': '(0.2)'}), '(conv1, alpha=0.2)\n', (1912, 1930), False, 'from tensorflow import nn, layers\n'), ((1952, 1978), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv2"""'], {}), "('conv2')\n", (1969, 1978), True, 'import tensorflow as tf\n'), ((2047, 2080), 'tensorflow.layers.batch_normalization', 'layers.batch_normalization', (['conv2'], {}), '(conv2)\n', (2073, 2080), False, 'from tensorflow import nn, layers\n'), ((2101, 2132), 'tensorflow.nn.leaky_relu', 'nn.leaky_relu', (['conv2'], {'alpha': '(0.2)'}), '(conv2, alpha=0.2)\n', (2114, 2132), False, 'from tensorflow import nn, layers\n'), ((2158, 2184), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv3"""'], {}), "('conv3')\n", (2175, 2184), True, 'import tensorflow as tf\n'), ((2253, 2286), 'tensorflow.layers.batch_normalization', 'layers.batch_normalization', (['conv3'], {}), '(conv3)\n', (2279, 2286), False, 'from tensorflow import nn, layers\n'), ((2307, 2338), 'tensorflow.nn.leaky_relu', 'nn.leaky_relu', (['conv3'], {'alpha': '(0.2)'}), '(conv3, alpha=0.2)\n', (2320, 2338), False, 'from tensorflow import nn, layers\n'), ((2364, 2390), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv4"""'], {}), "('conv4')\n", (2381, 2390), True, 'import tensorflow as tf\n'), ((2460, 2493), 'tensorflow.layers.batch_normalization', 'layers.batch_normalization', (['conv3'], {}), '(conv3)\n', (2486, 2493), False, 'from tensorflow import nn, layers\n'), ((2514, 2545), 'tensorflow.nn.leaky_relu', 'nn.leaky_relu', (['conv3'], {'alpha': '(0.2)'}), '(conv3, alpha=0.2)\n', (2527, 2545), False, 'from tensorflow import nn, layers\n'), ((2567, 2594), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""linear"""'], {}), "('linear')\n", (2584, 2594), True, 'import tensorflow as tf\n'), ((2617, 2639), 'tensorflow.contrib.layers.flatten', 'clayers.flatten', (['conv4'], {}), '(conv4)\n', (2632, 2639), True, 'from tensorflow.contrib import layers as clayers\n'), ((2661, 2695), 'tensorflow.contrib.layers.fully_connected', 'clayers.fully_connected', (['linear', '(1)'], {}), '(linear, 1)\n', (2684, 2695), True, 'from tensorflow.contrib import layers as clayers\n'), ((2718, 2742), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""out"""'], {}), "('out')\n", (2735, 2742), True, 'import tensorflow as tf\n'), ((2762, 2780), 'tensorflow.nn.sigmoid', 'nn.sigmoid', (['linear'], {}), '(linear)\n', (2772, 2780), False, 'from tensorflow import nn, layers\n'), ((2895, 2922), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""linear"""'], {}), "('linear')\n", (2912, 2922), True, 'import tensorflow as tf\n'), ((2945, 2985), 'tensorflow.contrib.layers.fully_connected', 'clayers.fully_connected', (['z', '(1024 * 4 * 4)'], {}), '(z, 1024 * 4 * 4)\n', (2968, 2985), True, 'from tensorflow.contrib import layers as clayers\n'), ((3012, 3045), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv1_transp"""'], {}), "('conv1_transp')\n", (3029, 3045), True, 'import tensorflow as tf\n'), ((3103, 3139), 'tensorflow.reshape', 'tf.reshape', (['linear', '(-1, 4, 4, 1024)'], {}), '(linear, (-1, 4, 4, 1024))\n', (3113, 3139), True, 'import tensorflow as tf\n'), ((3217, 3250), 'tensorflow.layers.batch_normalization', 'layers.batch_normalization', (['conv1'], {}), '(conv1)\n', (3243, 3250), False, 'from tensorflow import nn, layers\n'), ((3271, 3285), 'tensorflow.nn.relu', 'nn.relu', (['conv1'], {}), '(conv1)\n', (3278, 3285), False, 'from tensorflow import nn, layers\n'), ((3308, 3341), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv2_transp"""'], {}), "('conv2_transp')\n", (3325, 3341), True, 'import tensorflow as tf\n'), ((3420, 3453), 'tensorflow.layers.batch_normalization', 'layers.batch_normalization', (['conv2'], {}), '(conv2)\n', (3446, 3453), False, 'from tensorflow import nn, layers\n'), ((3474, 3488), 'tensorflow.nn.relu', 'nn.relu', (['conv2'], {}), '(conv2)\n', (3481, 3488), False, 'from tensorflow import nn, layers\n'), ((3515, 3548), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv3_transp"""'], {}), "('conv3_transp')\n", (3532, 3548), True, 'import tensorflow as tf\n'), ((3627, 3660), 'tensorflow.layers.batch_normalization', 'layers.batch_normalization', (['conv3'], {}), '(conv3)\n', (3653, 3660), False, 'from tensorflow import nn, layers\n'), ((3681, 3695), 'tensorflow.nn.relu', 'nn.relu', (['conv3'], {}), '(conv3)\n', (3688, 3695), False, 'from tensorflow import nn, layers\n'), ((3722, 3755), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv4_transp"""'], {}), "('conv4_transp')\n", (3739, 3755), True, 'import tensorflow as tf\n'), ((3834, 3858), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""out"""'], {}), "('out')\n", (3851, 3858), True, 'import tensorflow as tf\n'), ((3878, 3892), 'tensorflow.tanh', 'tf.tanh', (['conv4'], {}), '(conv4)\n', (3885, 3892), True, 'import tensorflow as tf\n'), ((4335, 4355), 'tensorflow.ones_like', 'tf.ones_like', (['D_fake'], {}), '(D_fake)\n', (4347, 4355), True, 'import tensorflow as tf\n'), ((4486, 4506), 'tensorflow.ones_like', 'tf.ones_like', (['D_real'], {}), '(D_real)\n', (4498, 4506), True, 'import tensorflow as tf\n'), ((4621, 4642), 'tensorflow.zeros_like', 'tf.zeros_like', (['D_fake'], {}), '(D_fake)\n', (4634, 4642), True, 'import tensorflow as tf\n'), ((6190, 6216), 'IPython.display.clear_output', 'display.clear_output', (['(True)'], {}), '(True)\n', (6210, 6216), False, 'from IPython import display\n'), ((7831, 7855), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (7847, 7855), False, 'import torch\n'), ((8266, 8285), 'numpy.sqrt', 'np.sqrt', (['num_images'], {}), '(num_images)\n', (8273, 8285), True, 'import numpy as np\n'), ((11120, 11142), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (11131, 11142), False, 'import os\n'), ((9031, 9040), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9038, 9040), True, 'from matplotlib import pyplot as plt\n')]
|
import csv
import logging
import numpy as np
import datajoint as dj
import pathlib
import scipy.io as scio
from tifffile import imread
from . import InsertBuffer
from .reference import ccf_ontology
from . import get_schema_name
schema = dj.schema(get_schema_name('ccf'))
log = logging.getLogger(__name__)
@schema
class CCFLabel(dj.Lookup):
definition = """
# CCF Dataset Information
ccf_label_id: int # Local CCF ID
---
ccf_version: int # Allen CCF Version
ccf_resolution: int # Voxel Resolution (uM)
ccf_description: varchar(255) # CCFLabel Description
"""
CCF_R3_20UM_ID = 0
CCF_R3_20UM_DESC = 'Allen Institute Mouse CCF, Rev. 3, 20uM Resolution'
CCF_R3_20UM_TYPE = 'CCF_R3_20UM'
contents = [(CCF_R3_20UM_ID, 3, 20, CCF_R3_20UM_DESC)]
@schema
class CCF(dj.Lookup):
definition = """
# Common Coordinate Framework
-> CCFLabel
ccf_x : int # (um)
ccf_y : int # (um)
ccf_z : int # (um)
"""
@schema
class AnnotationType(dj.Lookup):
definition = """
annotation_type : varchar(16)
"""
contents = ((CCFLabel.CCF_R3_20UM_TYPE,),)
@schema
class CCFAnnotation(dj.Manual):
definition = """
-> CCF
-> AnnotationType
---
annotation : varchar(1024)
index (annotation)
"""
@classmethod
def get_ccf_r3_20um_ontology_regions(cls):
return [c for c in csv.reader(ccf_ontology.splitlines())
if len(c) == 2]
@classmethod
def load_ccf_r3_20um(cls):
"""
Load the CCF r3 20 uM Dataset.
Requires that dj.config['ccf.r3_20um_path'] be set to the location
of the CCF Annotation tif stack.
"""
# TODO: scaling
log.info('CCFAnnotation.load_ccf_r3_20um(): start')
self = cls() # Instantiate self,
stack_path = dj.config['custom']['ccf.r3_20um_path']
stack = imread(stack_path) # load reference stack,
log.info('.. loaded stack of shape {} from {}'
.format(stack.shape, stack_path))
# iterate over ccf ontology region id/name records,
regions = self.get_ccf_r3_20um_ontology_regions()
region, nregions = 0, len(regions)
chunksz, ib_args = 50000, {'skip_duplicates': True,
'allow_direct_insert': True}
for num, txt in regions:
region += 1
num = int(num)
log.info('.. loading region {} ({}/{}) ({})'
.format(num, region, nregions, txt))
# extracting filled volumes from stack in scaled [[x,y,z]] shape,
vol = np.array(np.where(stack == num)).T[:, [2, 1, 0]] * 20
if not vol.shape[0]:
log.info('.. region {} volume: shape {} - skipping'
.format(num, vol.shape))
continue
log.info('.. region {} volume: shape {}'.format(num, vol.shape))
with dj.conn().transaction:
with InsertBuffer(CCF, chunksz, **ib_args) as buf:
for vox in vol:
buf.insert1((CCFLabel.CCF_R3_20UM_ID, *vox))
buf.flush()
with InsertBuffer(cls, chunksz, **ib_args) as buf:
for vox in vol:
buf.insert1((CCFLabel.CCF_R3_20UM_ID, *vox,
CCFLabel.CCF_R3_20UM_TYPE, txt))
buf.flush()
log.info('.. done.')
@schema
class AnnotatedBrainSurface(dj.Manual):
definition = """ # iso-surface of annotated brain in CCF coordinate frame
annotated_brain_name: varchar(100) # e.g. Annotation_new_10_ds222_16bit
---
vertices: longblob # (px)
faces: longblob
"""
@classmethod
def load_matlab_mesh(self, mesh_fp):
mesh_fp = pathlib.Path(mesh_fp).resolve()
assert mesh_fp.exists()
mesh = scio.loadmat(mesh_fp, struct_as_record = False, squeeze_me = True)['mesh']
self.insert1(dict(annotated_brain_name=mesh_fp.stem,
vertices=mesh.vertices,
faces=mesh.faces - 1), # 0-base index
allow_direct_insert=True)
|
[
"scipy.io.loadmat",
"pathlib.Path",
"numpy.where",
"tifffile.imread",
"datajoint.conn",
"logging.getLogger"
] |
[((284, 311), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (301, 311), False, 'import logging\n'), ((1964, 1982), 'tifffile.imread', 'imread', (['stack_path'], {}), '(stack_path)\n', (1970, 1982), False, 'from tifffile import imread\n'), ((3995, 4057), 'scipy.io.loadmat', 'scio.loadmat', (['mesh_fp'], {'struct_as_record': '(False)', 'squeeze_me': '(True)'}), '(mesh_fp, struct_as_record=False, squeeze_me=True)\n', (4007, 4057), True, 'import scipy.io as scio\n'), ((3916, 3937), 'pathlib.Path', 'pathlib.Path', (['mesh_fp'], {}), '(mesh_fp)\n', (3928, 3937), False, 'import pathlib\n'), ((3027, 3036), 'datajoint.conn', 'dj.conn', ([], {}), '()\n', (3034, 3036), True, 'import datajoint as dj\n'), ((2709, 2731), 'numpy.where', 'np.where', (['(stack == num)'], {}), '(stack == num)\n', (2717, 2731), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import sys
import os
from utils import DATA_DIR
class Dataset(object):
def __init__(self, DATA_NAME):
self.DATA_NAME = DATA_NAME
print("Initializing dataset:", DATA_NAME)
sys.stdout.flush()
data = pd.read_csv(os.path.join(DATA_DIR, "df_"+DATA_NAME+".csv"))
data['item_id'].loc[data['item_id'].isna()] = ''
data['user_id'].loc[data['user_id'].isna()] = ''
item_id_vals, item_ids = pd.factorize(data['item_id'].values)
user_id_vals, user_ids = pd.factorize(data['user_id'].values)
item_attr_vals, item_attr_ids = pd.factorize(data['model_attr'].values)
user_attr_vals, user_attr_ids = pd.factorize(data['user_attr'].values)
tmp = dict(zip(data['item_id'].values, item_attr_vals))
self.item_attr = np.array([tmp[_i] for _i in item_ids], dtype=int)
tmp = dict(zip(data['user_id'].values, user_attr_vals))
self.user_attr = np.array([tmp[_i] for _i in user_ids], dtype=int)
data['item_id'] = item_id_vals
data['user_id'] = user_id_vals
self.item_ids = item_ids
self.user_ids = user_ids
self.item_attr_ids = item_attr_ids
self.user_attr_ids = user_attr_ids
self.n_item = data['item_id'].max()+1
self.n_user = data['user_id'].max()+1
self.data = data[['user_id','item_id','rating','split','model_attr','user_attr']]
print("Successfully initialized!")
print(self.data.shape[0], "training records")
print("about", self.n_user, "users and", self.n_item, "items are loaded!")
sys.stdout.flush()
def get_user_item_train_map(self):
data = self.data
user_item_train_map = (self.data.loc[(self.data['rating']>=4) & (self.data['split'] == 0)]).groupby(
['user_id'])['item_id'].apply(list).to_dict()
return user_item_train_map
def get_neg_samples(self, N_NEG=10):
user_item_map = (self.data.loc[self.data['rating']>=4]).groupby(['user_id'])['item_id'].apply(list).to_dict()
print("Start sampling negative examples ...")
neg_samples = []
count = 0
print("current progress for", self.n_user, "users: ", end="")
sys.stdout.flush()
for u in range(self.n_user):
if count % 5000 == 0:
print(count, end=", ")
sys.stdout.flush()
count += 1
p = np.ones(self.n_item)
if u in user_item_map:
pos_items = np.array(user_item_map[u], dtype=int)
p[pos_items] = 0
p /= np.sum(p)
neg_items = np.random.choice(self.n_item, size=N_NEG, p=p)
neg_samples.append(neg_items)
print("done!")
sys.stdout.flush()
return np.array(neg_samples, dtype=int)
|
[
"numpy.sum",
"numpy.ones",
"numpy.array",
"sys.stdout.flush",
"numpy.random.choice",
"pandas.factorize",
"os.path.join"
] |
[((249, 267), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (265, 267), False, 'import sys\n'), ((500, 536), 'pandas.factorize', 'pd.factorize', (["data['item_id'].values"], {}), "(data['item_id'].values)\n", (512, 536), True, 'import pandas as pd\n'), ((570, 606), 'pandas.factorize', 'pd.factorize', (["data['user_id'].values"], {}), "(data['user_id'].values)\n", (582, 606), True, 'import pandas as pd\n'), ((647, 686), 'pandas.factorize', 'pd.factorize', (["data['model_attr'].values"], {}), "(data['model_attr'].values)\n", (659, 686), True, 'import pandas as pd\n'), ((727, 765), 'pandas.factorize', 'pd.factorize', (["data['user_attr'].values"], {}), "(data['user_attr'].values)\n", (739, 765), True, 'import pandas as pd\n'), ((864, 913), 'numpy.array', 'np.array', (['[tmp[_i] for _i in item_ids]'], {'dtype': 'int'}), '([tmp[_i] for _i in item_ids], dtype=int)\n', (872, 913), True, 'import numpy as np\n'), ((1003, 1052), 'numpy.array', 'np.array', (['[tmp[_i] for _i in user_ids]'], {'dtype': 'int'}), '([tmp[_i] for _i in user_ids], dtype=int)\n', (1011, 1052), True, 'import numpy as np\n'), ((1690, 1708), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1706, 1708), False, 'import sys\n'), ((2365, 2383), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2381, 2383), False, 'import sys\n'), ((2901, 2919), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2917, 2919), False, 'import sys\n'), ((2944, 2976), 'numpy.array', 'np.array', (['neg_samples'], {'dtype': 'int'}), '(neg_samples, dtype=int)\n', (2952, 2976), True, 'import numpy as np\n'), ((296, 346), 'os.path.join', 'os.path.join', (['DATA_DIR', "('df_' + DATA_NAME + '.csv')"], {}), "(DATA_DIR, 'df_' + DATA_NAME + '.csv')\n", (308, 346), False, 'import os\n'), ((2575, 2595), 'numpy.ones', 'np.ones', (['self.n_item'], {}), '(self.n_item)\n', (2582, 2595), True, 'import numpy as np\n'), ((2747, 2756), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (2753, 2756), True, 'import numpy as np\n'), ((2781, 2827), 'numpy.random.choice', 'np.random.choice', (['self.n_item'], {'size': 'N_NEG', 'p': 'p'}), '(self.n_item, size=N_NEG, p=p)\n', (2797, 2827), True, 'import numpy as np\n'), ((2517, 2535), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2533, 2535), False, 'import sys\n'), ((2659, 2696), 'numpy.array', 'np.array', (['user_item_map[u]'], {'dtype': 'int'}), '(user_item_map[u], dtype=int)\n', (2667, 2696), True, 'import numpy as np\n')]
|
import os
import random
import syft as sy
import pandas as pd
import numpy as np
from PIL import Image
from tqdm import tqdm
from torch import ( # pylint:disable=no-name-in-module
manual_seed,
stack,
cat,
std_mean,
save,
is_tensor,
from_numpy,
randperm,
default_generator,
)
from torch._utils import _accumulate
import albumentations as a
from copy import deepcopy
from torch.utils import data as torchdata
from torchvision.datasets import MNIST
from torchvision import transforms
from torchvision.datasets.folder import default_loader
from os.path import splitext
from typing import Dict, Union, Set, Callable
from pathlib import Path
from .dicomtools import DicomLoader
class AlbumentationsTorchTransform:
def __init__(self, transform, **kwargs):
# print("init albu transform wrapper")
self.transform = transform
self.kwargs = kwargs
def __call__(self, img):
# print("call albu transform wrapper")
if Image.isImageType(img):
img = np.array(img)
elif is_tensor(img):
img = img.cpu().numpy()
img = self.transform(image=img, **self.kwargs)["image"]
# if img.max() > 1:
# img = a.augmentations.functional.to_float(img, max_value=255)
img = from_numpy(img)
if img.shape[-1] < img.shape[0]:
img = img.permute(2, 0, 1)
return img
class CombinedLoader:
"""Class that combines several data loaders and their extensions.
Args:
mapping (Dict): Dictionary that maps loader names to tuples
consisting of (corresponding extensions, loader method)
"""
def __init__(
self,
mapping: Dict[str, Dict[str, Union[Set[str], Callable]]] = {
"default": {
"extensions": {
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
},
"loader": default_loader,
},
"dicom": {"extensions": {".dcm", ".dicom"}, "loader": DicomLoader(3)},
},
):
self.extensions = set()
self.mapping = mapping
self.ext_to_loader_name = dict()
for loader_name, defining_dict in mapping.items():
self.extensions |= defining_dict["extensions"]
for ext in defining_dict["extensions"]:
if ext in self.ext_to_loader_name:
raise RuntimeError(
"Extension {:s} was passed for multiple loaders".format(ext)
)
self.ext_to_loader_name[ext] = loader_name
def __call__(self, path: Path, **kwargs):
"""Apply loader to path
Args:
path (Path): path to file.
kwargs: kwargs passed to load methods
Returns:
Image: a PIL image of the given path
Raises:
RuntimeError: If loader for path extension not specified.
"""
file_ending = splitext(path)[1].lower()
if file_ending in self.extensions:
return self.mapping[self.ext_to_loader_name[file_ending]]["loader"](
path, **kwargs
)
else:
raise RuntimeError(
"file extension does not match specified supported extensions. "
"Please provide the matching loader for the {:s} extension.".format(
file_ending
)
)
def change_channels(self, num_channels: int):
"""Change the number of channels that are loaded (Default: 3)
Args:
num_channels (int): Number of channels. Currently only 1 and 3 supported
Raises:
RuntimeError: if num_channels is not 1 or 3
"""
if num_channels not in [1, 3]:
raise RuntimeError("Only 1 or 3 channels supported yet.")
self.mapping["default"]["loader"] = (
single_channel_loader if num_channels == 1 else default_loader
)
self.mapping["dicom"]["loader"] = DicomLoader(num_channels)
def create_albu_transform(args, mean, std):
train_tf = transforms.RandomAffine(
degrees=args.rotation,
translate=(args.translate, args.translate),
scale=(1.0 - args.scale, 1.0 + args.scale),
shear=args.shear,
# fillcolor=0,
)
start_transformations = [
a.Resize(args.inference_resolution, args.inference_resolution),
a.RandomCrop(args.train_resolution, args.train_resolution),
]
if args.clahe:
start_transformations.extend(
[
a.FromFloat(dtype="uint8", max_value=1.0),
a.CLAHE(always_apply=True, clip_limit=(1, 1)),
]
)
train_tf_albu = [
a.VerticalFlip(p=args.individual_albu_probs),
]
if args.randomgamma:
train_tf_albu.append(a.RandomGamma(p=args.individual_albu_probs))
if args.randombrightness:
train_tf_albu.append(a.RandomBrightness(p=args.individual_albu_probs))
if args.blur:
train_tf_albu.append(a.Blur(p=args.individual_albu_probs))
if args.elastic:
train_tf_albu.append(a.ElasticTransform(p=args.individual_albu_probs))
if args.optical_distortion:
train_tf_albu.append(a.OpticalDistortion(p=args.individual_albu_probs))
if args.grid_distortion:
train_tf_albu.append(a.GridDistortion(p=args.individual_albu_probs))
if args.grid_shuffle:
train_tf_albu.append(a.RandomGridShuffle(p=args.individual_albu_probs))
if args.hsv:
train_tf_albu.append(a.HueSaturationValue(p=args.individual_albu_probs))
if args.invert:
train_tf_albu.append(a.InvertImg(p=args.individual_albu_probs))
if args.cutout:
train_tf_albu.append(
a.Cutout(
num_holes=5, max_h_size=80, max_w_size=80, p=args.individual_albu_probs
)
)
if args.shadow:
assert args.pretrained, "RandomShadows needs 3 channels"
train_tf_albu.append(a.RandomShadow(p=args.individual_albu_probs))
if args.fog:
assert args.pretrained, "RandomFog needs 3 channels"
train_tf_albu.append(a.RandomFog(p=args.individual_albu_probs))
if args.sun_flare:
assert args.pretrained, "RandomSunFlare needs 3 channels"
train_tf_albu.append(a.RandomSunFlare(p=args.individual_albu_probs))
if args.solarize:
train_tf_albu.append(a.Solarize(p=args.individual_albu_probs))
if args.equalize:
train_tf_albu.append(a.Equalize(p=args.individual_albu_probs))
if args.grid_dropout:
train_tf_albu.append(a.GridDropout(p=args.individual_albu_probs))
train_tf_albu.append(a.GaussNoise(var_limit=args.noise_std ** 2, p=args.noise_prob))
end_transformations = [
a.ToFloat(max_value=255.0),
a.Normalize(mean, std, max_pixel_value=1.0),
]
if not args.pretrained:
end_transformations.append(
a.Lambda(image=lambda x, **kwargs: x[:, :, np.newaxis])
)
train_tf_albu = AlbumentationsTorchTransform(
a.Compose(
[
a.Compose(start_transformations),
a.Compose(train_tf_albu, p=args.albu_prob),
a.Compose(end_transformations),
]
)
)
return transforms.Compose([train_tf, train_tf_albu,])
def calc_mean_std(dataset, save_folder=None):
"""
Calculates the mean and standard deviation of `dataset` and
saves them to `save_folder`.
Needs a dataset where all images have the same size
"""
accumulated_data = []
for d in tqdm(
dataset, total=len(dataset), leave=False, desc="accumulate data in dataset"
):
if type(d) is tuple or type(d) is list:
d = d[0]
accumulated_data.append(d)
if isinstance(dataset, torchdata.Dataset):
accumulated_data = stack(accumulated_data)
elif isinstance(dataset, torchdata.DataLoader):
accumulated_data = cat(accumulated_data)
else:
raise NotImplementedError("don't know how to process this data input class")
if accumulated_data.shape[1] in [1, 3]: # ugly hack
dims = (0, *range(2, len(accumulated_data.shape)))
else:
dims = (*range(len(accumulated_data.shape)),)
std, mean = std_mean(accumulated_data, dim=dims)
if save_folder:
save(stack([mean, std]), os.path.join(save_folder, "mean_std.pt"))
return mean, std
def single_channel_loader(filename):
"""Converts `filename` to a grayscale PIL Image
"""
with open(filename, "rb") as f:
img = Image.open(f).convert("L")
return img.copy()
class LabelMNIST(MNIST):
def __init__(self, labels, *args, **kwargs):
super().__init__(*args, **kwargs)
indices = np.isin(self.targets, labels).astype("bool")
self.data = self.data[indices]
self.targets = self.targets[indices]
class PathDataset(torchdata.Dataset):
def __init__(
self,
root,
transform=None,
loader=CombinedLoader(),
extensions=[
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
".dcm",
".dicom",
],
):
super(PathDataset, self).__init__()
self.root = root
self.transform = transform
self.loader = loader
self.imgs = [
f
for f in os.listdir(root)
if os.path.splitext(f)[1].lower() in extensions
and not os.path.split(f)[1].lower().startswith("._")
]
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
img_path = self.imgs[idx]
img = self.loader(os.path.join(self.root, img_path))
if self.transform:
img = self.transform(img)
return img
class RemoteTensorDataset(torchdata.Dataset):
def __init__(self, tensor):
self.tensor = tensor
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, idx):
return self.tensor[idx].copy()
class ImageFolderFromCSV(torchdata.Dataset):
def __init__(
self, csv_path, img_folder_path, transform=None, target_transform=None
):
super().__init__()
self.transform = transform
self.target_transform = target_transform
self.img_folder_path = img_folder_path
self.img_files = [
i for i in os.listdir(img_folder_path) if not i.startswith(".")
]
metastats = pd.read_csv(csv_path)
metastats["class_label"] = metastats.apply(
ImageFolderFromCSV.__meta_to_class__, axis=1
)
self.categorize_dict = dict(
zip(metastats.X_ray_image_name, metastats.class_label)
)
for img in self.img_files:
assert (
img in self.categorize_dict.keys()
), "img label not known {:s}".format(str(img))
if self.categorize_dict[img] == -1:
self.img_files.remove(img)
print("Ignore image {:s} because category is certain".format(img))
@staticmethod
def __meta_to_class__(row):
if row["Label"] == "Normal":
return 0
if row["Label"] == "Pnemonia": # i know this is a typo but was in original csv
if row["Label_1_Virus_category"] == "bacteria":
return 1
if row["Label_1_Virus_category"] == "Virus":
return 2
return -1
def __getitem__(self, i):
img_path = self.img_files[i]
label = self.categorize_dict[img_path]
img = single_channel_loader(os.path.join(self.img_folder_path, img_path))
if self.transform:
img = self.transform(img)
if self.target_transform:
label = self.target_transform(label)
return img, label
def __len__(self):
return len(self.img_files)
class PPPP(torchdata.Dataset):
def __init__(
self, label_path="data/Labels.csv", train=False, transform=None, seed=1,
):
super().__init__()
random.seed(seed)
manual_seed(seed)
self.train = train
self.labels = pd.read_csv(label_path)
self.labels = self.labels[
self.labels["Dataset_type"] == ("TRAIN" if train else "TEST")
]
self.transform = transform
"""
Split into train and validation set
if self.train:
indices = [
i
for i in range(len(self.labels))
if ((i % self.val_split) != 0 and self.val)
or (not self.val and (i % self.val_split) == 0)
]
self.labels = self.labels.drop(index=indices)
"""
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
row = self.labels.iloc[index]
label = row["Numeric_Label"]
path = "train" if self.train else "test"
path = os.path.join("data", path, row["X_ray_image_name"])
img = single_channel_loader(path)
if self.transform:
img = self.transform(img)
return img, label
# def get_class_name(self, numeric_label):
# return self.class_names[numeric_label]
"""
Works only if not torch.utils.torchdata.random_split is applied
"""
def get_class_occurances(self):
return dict(self.labels["Numeric_Label"].value_counts())
def __compute_mean_std__(self):
calc_mean_std(
self, save_folder="data",
)
##This is from torch.data.utils and adapted for our purposes
class Subset(torchdata.Dataset):
def __init__(self, dataset, indices):
self.dataset = deepcopy(dataset)
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def random_split(dataset, lengths, generator=default_generator):
if sum(lengths) != len(dataset):
raise ValueError(
"Sum of input lengths does not equal the length of the input dataset!"
)
indices = randperm(sum(lengths), generator=generator).tolist()
return [
Subset(dataset, indices[offset - length : offset])
for offset, length in zip(_accumulate(lengths), lengths)
]
if __name__ == "__main__":
# import matplotlib.pyplot as plt
import sys
from tqdm import tqdm
import numpy as np
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
from torchlib.utils import AddGaussianNoise
ds = PPPP(train=True, transform=transforms.ToTensor())
print("Class distribution")
print(ds.get_class_occurances())
sizes = []
for data, _ in tqdm(ds, total=len(ds), leave=False):
sizes.append(data.size()[1:])
sizes = np.array(sizes)
print(
"data resolution stats: \n\tmin: {:s}\n\tmax: {:s}\n\tmean: {:s}\n\tmedian: {:s}".format(
str(np.min(sizes, axis=0)),
str(np.max(sizes, axis=0)),
str(np.mean(sizes, axis=0)),
str(np.median(sizes, axis=0)),
)
)
ds = PPPP(train=False)
L = len(ds)
print("length test set: {:d}".format(L))
img, label = ds[1]
img.show()
tf = transforms.Compose(
[transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(),]
)
ds = PPPP(train=True, transform=tf)
ds.__compute_mean_std__()
L = len(ds)
print("length train set: {:d}".format(L))
from matplotlib import pyplot as plt
ds = PPPP()
hist = ds.labels.hist(bins=3, column="Numeric_Label")
plt.show()
|
[
"numpy.isin",
"albumentations.Lambda",
"albumentations.RandomSunFlare",
"albumentations.GaussNoise",
"albumentations.Resize",
"pandas.read_csv",
"albumentations.RandomShadow",
"torch.cat",
"albumentations.RandomFog",
"numpy.mean",
"albumentations.Normalize",
"torch.std_mean",
"os.path.join",
"PIL.Image.isImageType",
"albumentations.Blur",
"albumentations.Cutout",
"os.path.dirname",
"albumentations.OpticalDistortion",
"numpy.max",
"torchvision.transforms.Compose",
"random.seed",
"torchvision.transforms.CenterCrop",
"albumentations.CLAHE",
"torch.is_tensor",
"copy.deepcopy",
"matplotlib.pyplot.show",
"albumentations.GridDistortion",
"albumentations.GridDropout",
"torch.manual_seed",
"numpy.median",
"albumentations.RandomGridShuffle",
"numpy.min",
"albumentations.RandomBrightness",
"albumentations.RandomGamma",
"albumentations.VerticalFlip",
"os.listdir",
"torch.from_numpy",
"albumentations.ToFloat",
"torchvision.transforms.Resize",
"torch._utils._accumulate",
"torchvision.transforms.RandomAffine",
"albumentations.Compose",
"torch.stack",
"albumentations.InvertImg",
"albumentations.Solarize",
"albumentations.HueSaturationValue",
"PIL.Image.open",
"numpy.array",
"albumentations.FromFloat",
"os.path.splitext",
"albumentations.ElasticTransform",
"albumentations.RandomCrop",
"os.path.split",
"albumentations.Equalize",
"torchvision.transforms.ToTensor"
] |
[((4313, 4474), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': 'args.rotation', 'translate': '(args.translate, args.translate)', 'scale': '(1.0 - args.scale, 1.0 + args.scale)', 'shear': 'args.shear'}), '(degrees=args.rotation, translate=(args.translate,\n args.translate), scale=(1.0 - args.scale, 1.0 + args.scale), shear=args\n .shear)\n', (4336, 4474), False, 'from torchvision import transforms\n'), ((7495, 7540), 'torchvision.transforms.Compose', 'transforms.Compose', (['[train_tf, train_tf_albu]'], {}), '([train_tf, train_tf_albu])\n', (7513, 7540), False, 'from torchvision import transforms\n'), ((8490, 8526), 'torch.std_mean', 'std_mean', (['accumulated_data'], {'dim': 'dims'}), '(accumulated_data, dim=dims)\n', (8498, 8526), False, 'from torch import manual_seed, stack, cat, std_mean, save, is_tensor, from_numpy, randperm, default_generator\n'), ((15169, 15184), 'numpy.array', 'np.array', (['sizes'], {}), '(sizes)\n', (15177, 15184), True, 'import numpy as np\n'), ((15977, 15987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15985, 15987), True, 'from matplotlib import pyplot as plt\n'), ((993, 1015), 'PIL.Image.isImageType', 'Image.isImageType', (['img'], {}), '(img)\n', (1010, 1015), False, 'from PIL import Image\n'), ((1296, 1311), 'torch.from_numpy', 'from_numpy', (['img'], {}), '(img)\n', (1306, 1311), False, 'from torch import manual_seed, stack, cat, std_mean, save, is_tensor, from_numpy, randperm, default_generator\n'), ((4569, 4631), 'albumentations.Resize', 'a.Resize', (['args.inference_resolution', 'args.inference_resolution'], {}), '(args.inference_resolution, args.inference_resolution)\n', (4577, 4631), True, 'import albumentations as a\n'), ((4641, 4699), 'albumentations.RandomCrop', 'a.RandomCrop', (['args.train_resolution', 'args.train_resolution'], {}), '(args.train_resolution, args.train_resolution)\n', (4653, 4699), True, 'import albumentations as a\n'), ((4954, 4998), 'albumentations.VerticalFlip', 'a.VerticalFlip', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (4968, 4998), True, 'import albumentations as a\n'), ((6884, 6946), 'albumentations.GaussNoise', 'a.GaussNoise', ([], {'var_limit': '(args.noise_std ** 2)', 'p': 'args.noise_prob'}), '(var_limit=args.noise_std ** 2, p=args.noise_prob)\n', (6896, 6946), True, 'import albumentations as a\n'), ((6984, 7010), 'albumentations.ToFloat', 'a.ToFloat', ([], {'max_value': '(255.0)'}), '(max_value=255.0)\n', (6993, 7010), True, 'import albumentations as a\n'), ((7020, 7063), 'albumentations.Normalize', 'a.Normalize', (['mean', 'std'], {'max_pixel_value': '(1.0)'}), '(mean, std, max_pixel_value=1.0)\n', (7031, 7063), True, 'import albumentations as a\n'), ((8074, 8097), 'torch.stack', 'stack', (['accumulated_data'], {}), '(accumulated_data)\n', (8079, 8097), False, 'from torch import manual_seed, stack, cat, std_mean, save, is_tensor, from_numpy, randperm, default_generator\n'), ((10809, 10830), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (10820, 10830), True, 'import pandas as pd\n'), ((12391, 12408), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (12402, 12408), False, 'import random\n'), ((12417, 12434), 'torch.manual_seed', 'manual_seed', (['seed'], {}), '(seed)\n', (12428, 12434), False, 'from torch import manual_seed, stack, cat, std_mean, save, is_tensor, from_numpy, randperm, default_generator\n'), ((12484, 12507), 'pandas.read_csv', 'pd.read_csv', (['label_path'], {}), '(label_path)\n', (12495, 12507), True, 'import pandas as pd\n'), ((13270, 13321), 'os.path.join', 'os.path.join', (['"""data"""', 'path', "row['X_ray_image_name']"], {}), "('data', path, row['X_ray_image_name'])\n", (13282, 13321), False, 'import os\n'), ((14008, 14025), 'copy.deepcopy', 'deepcopy', (['dataset'], {}), '(dataset)\n', (14016, 14025), False, 'from copy import deepcopy\n'), ((1035, 1048), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1043, 1048), True, 'import numpy as np\n'), ((1062, 1076), 'torch.is_tensor', 'is_tensor', (['img'], {}), '(img)\n', (1071, 1076), False, 'from torch import manual_seed, stack, cat, std_mean, save, is_tensor, from_numpy, randperm, default_generator\n'), ((5060, 5103), 'albumentations.RandomGamma', 'a.RandomGamma', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (5073, 5103), True, 'import albumentations as a\n'), ((5164, 5212), 'albumentations.RandomBrightness', 'a.RandomBrightness', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (5182, 5212), True, 'import albumentations as a\n'), ((5261, 5297), 'albumentations.Blur', 'a.Blur', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (5267, 5297), True, 'import albumentations as a\n'), ((5349, 5397), 'albumentations.ElasticTransform', 'a.ElasticTransform', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (5367, 5397), True, 'import albumentations as a\n'), ((5460, 5509), 'albumentations.OpticalDistortion', 'a.OpticalDistortion', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (5479, 5509), True, 'import albumentations as a\n'), ((5569, 5615), 'albumentations.GridDistortion', 'a.GridDistortion', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (5585, 5615), True, 'import albumentations as a\n'), ((5672, 5721), 'albumentations.RandomGridShuffle', 'a.RandomGridShuffle', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (5691, 5721), True, 'import albumentations as a\n'), ((5769, 5819), 'albumentations.HueSaturationValue', 'a.HueSaturationValue', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (5789, 5819), True, 'import albumentations as a\n'), ((5870, 5911), 'albumentations.InvertImg', 'a.InvertImg', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (5881, 5911), True, 'import albumentations as a\n'), ((5975, 6061), 'albumentations.Cutout', 'a.Cutout', ([], {'num_holes': '(5)', 'max_h_size': '(80)', 'max_w_size': '(80)', 'p': 'args.individual_albu_probs'}), '(num_holes=5, max_h_size=80, max_w_size=80, p=args.\n individual_albu_probs)\n', (5983, 6061), True, 'import albumentations as a\n'), ((6211, 6255), 'albumentations.RandomShadow', 'a.RandomShadow', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (6225, 6255), True, 'import albumentations as a\n'), ((6364, 6405), 'albumentations.RandomFog', 'a.RandomFog', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (6375, 6405), True, 'import albumentations as a\n'), ((6525, 6571), 'albumentations.RandomSunFlare', 'a.RandomSunFlare', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (6541, 6571), True, 'import albumentations as a\n'), ((6624, 6664), 'albumentations.Solarize', 'a.Solarize', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (6634, 6664), True, 'import albumentations as a\n'), ((6717, 6757), 'albumentations.Equalize', 'a.Equalize', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (6727, 6757), True, 'import albumentations as a\n'), ((6814, 6857), 'albumentations.GridDropout', 'a.GridDropout', ([], {'p': 'args.individual_albu_probs'}), '(p=args.individual_albu_probs)\n', (6827, 6857), True, 'import albumentations as a\n'), ((7147, 7202), 'albumentations.Lambda', 'a.Lambda', ([], {'image': '(lambda x, **kwargs: x[:, :, np.newaxis])'}), '(image=lambda x, **kwargs: x[:, :, np.newaxis])\n', (7155, 7202), True, 'import albumentations as a\n'), ((8177, 8198), 'torch.cat', 'cat', (['accumulated_data'], {}), '(accumulated_data)\n', (8180, 8198), False, 'from torch import manual_seed, stack, cat, std_mean, save, is_tensor, from_numpy, randperm, default_generator\n'), ((8560, 8578), 'torch.stack', 'stack', (['[mean, std]'], {}), '([mean, std])\n', (8565, 8578), False, 'from torch import manual_seed, stack, cat, std_mean, save, is_tensor, from_numpy, randperm, default_generator\n'), ((8580, 8620), 'os.path.join', 'os.path.join', (['save_folder', '"""mean_std.pt"""'], {}), "(save_folder, 'mean_std.pt')\n", (8592, 8620), False, 'import os\n'), ((10006, 10039), 'os.path.join', 'os.path.join', (['self.root', 'img_path'], {}), '(self.root, img_path)\n', (10018, 10039), False, 'import os\n'), ((11938, 11982), 'os.path.join', 'os.path.join', (['self.img_folder_path', 'img_path'], {}), '(self.img_folder_path, img_path)\n', (11950, 11982), False, 'import os\n'), ((14953, 14974), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (14972, 14974), False, 'from torchvision import transforms\n'), ((15641, 15663), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224)'], {}), '(224)\n', (15658, 15663), False, 'from torchvision import transforms\n'), ((15665, 15691), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (15686, 15691), False, 'from torchvision import transforms\n'), ((15693, 15714), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (15712, 15714), False, 'from torchvision import transforms\n'), ((4794, 4835), 'albumentations.FromFloat', 'a.FromFloat', ([], {'dtype': '"""uint8"""', 'max_value': '(1.0)'}), "(dtype='uint8', max_value=1.0)\n", (4805, 4835), True, 'import albumentations as a\n'), ((4853, 4898), 'albumentations.CLAHE', 'a.CLAHE', ([], {'always_apply': '(True)', 'clip_limit': '(1, 1)'}), '(always_apply=True, clip_limit=(1, 1))\n', (4860, 4898), True, 'import albumentations as a\n'), ((7312, 7344), 'albumentations.Compose', 'a.Compose', (['start_transformations'], {}), '(start_transformations)\n', (7321, 7344), True, 'import albumentations as a\n'), ((7362, 7404), 'albumentations.Compose', 'a.Compose', (['train_tf_albu'], {'p': 'args.albu_prob'}), '(train_tf_albu, p=args.albu_prob)\n', (7371, 7404), True, 'import albumentations as a\n'), ((7422, 7452), 'albumentations.Compose', 'a.Compose', (['end_transformations'], {}), '(end_transformations)\n', (7431, 7452), True, 'import albumentations as a\n'), ((8792, 8805), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (8802, 8805), False, 'from PIL import Image\n'), ((8981, 9010), 'numpy.isin', 'np.isin', (['self.targets', 'labels'], {}), '(self.targets, labels)\n', (8988, 9010), True, 'import numpy as np\n'), ((9707, 9723), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (9717, 9723), False, 'import os\n'), ((10725, 10752), 'os.listdir', 'os.listdir', (['img_folder_path'], {}), '(img_folder_path)\n', (10735, 10752), False, 'import os\n'), ((14591, 14611), 'torch._utils._accumulate', '_accumulate', (['lengths'], {}), '(lengths)\n', (14602, 14611), False, 'from torch._utils import _accumulate\n'), ((14818, 14843), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (14833, 14843), False, 'import os\n'), ((15310, 15331), 'numpy.min', 'np.min', (['sizes'], {'axis': '(0)'}), '(sizes, axis=0)\n', (15316, 15331), True, 'import numpy as np\n'), ((15350, 15371), 'numpy.max', 'np.max', (['sizes'], {'axis': '(0)'}), '(sizes, axis=0)\n', (15356, 15371), True, 'import numpy as np\n'), ((15390, 15412), 'numpy.mean', 'np.mean', (['sizes'], {'axis': '(0)'}), '(sizes, axis=0)\n', (15397, 15412), True, 'import numpy as np\n'), ((15431, 15455), 'numpy.median', 'np.median', (['sizes'], {'axis': '(0)'}), '(sizes, axis=0)\n', (15440, 15455), True, 'import numpy as np\n'), ((3167, 3181), 'os.path.splitext', 'splitext', (['path'], {}), '(path)\n', (3175, 3181), False, 'from os.path import splitext\n'), ((9739, 9758), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (9755, 9758), False, 'import os\n'), ((9804, 9820), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (9817, 9820), False, 'import os\n')]
|
import warnings
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", FutureWarning)
import argparse
import os
import pandas as pd
import numpy as np
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from collections import defaultdict
from catalyst.utils import any2device
from pytorch_toolbelt.utils import to_numpy, fs
from pytorch_toolbelt.utils.catalyst import report_checkpoint
from alaska2 import *
from alaska2.dataset import get_train_except_holdout
@torch.no_grad()
def compute_trn_predictions(model, dataset, fp16=False, batch_size=1, workers=0) -> pd.DataFrame:
df = defaultdict(list)
for batch in tqdm(
DataLoader(
dataset, batch_size=batch_size, num_workers=workers, shuffle=False, drop_last=False, pin_memory=True
)
):
batch = any2device(batch, device="cuda")
if fp16 and INPUT_FEATURES_JPEG_FLOAT in batch:
batch[INPUT_FEATURES_JPEG_FLOAT] = batch[INPUT_FEATURES_JPEG_FLOAT].half()
if INPUT_TRUE_MODIFICATION_FLAG in batch:
y_trues = to_numpy(batch[INPUT_TRUE_MODIFICATION_FLAG]).flatten()
df[INPUT_TRUE_MODIFICATION_FLAG].extend(y_trues)
if INPUT_TRUE_MODIFICATION_TYPE in batch:
y_labels = to_numpy(batch[INPUT_TRUE_MODIFICATION_TYPE]).flatten()
df[INPUT_TRUE_MODIFICATION_TYPE].extend(y_labels)
image_ids = batch[INPUT_IMAGE_ID_KEY]
df[INPUT_IMAGE_ID_KEY].extend(image_ids)
outputs = model(**batch)
if OUTPUT_PRED_MODIFICATION_FLAG in outputs:
df[OUTPUT_PRED_MODIFICATION_FLAG].extend(to_numpy(outputs[OUTPUT_PRED_MODIFICATION_FLAG]).flatten())
if OUTPUT_PRED_MODIFICATION_TYPE in outputs:
df[OUTPUT_PRED_MODIFICATION_TYPE].extend(outputs[OUTPUT_PRED_MODIFICATION_TYPE].tolist())
if OUTPUT_PRED_EMBEDDING in outputs:
df[OUTPUT_PRED_EMBEDDING].extend(outputs[OUTPUT_PRED_EMBEDDING].tolist())
# Save also TTA predictions for future use
if OUTPUT_PRED_MODIFICATION_FLAG + "_tta" in outputs:
df[OUTPUT_PRED_MODIFICATION_FLAG + "_tta"].extend(
to_numpy(outputs[OUTPUT_PRED_MODIFICATION_FLAG + "_tta"]).tolist()
)
if OUTPUT_PRED_MODIFICATION_TYPE + "_tta" in outputs:
df[OUTPUT_PRED_MODIFICATION_TYPE + "_tta"].extend(
to_numpy(outputs[OUTPUT_PRED_MODIFICATION_TYPE + "_tta"]).tolist()
)
df = pd.DataFrame.from_dict(df)
return df
@torch.no_grad()
def main():
# Give no chance to randomness
torch.manual_seed(0)
np.random.seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint", type=str, nargs="+")
parser.add_argument("-dd", "--data-dir", type=str, default=os.environ.get("KAGGLE_2020_ALASKA2"))
parser.add_argument("-b", "--batch-size", type=int, default=1)
parser.add_argument("-w", "--workers", type=int, default=0)
parser.add_argument("-d4", "--d4-tta", action="store_true")
parser.add_argument("-hv", "--hv-tta", action="store_true")
parser.add_argument("-f", "--force-recompute", action="store_true")
parser.add_argument("-fp16", "--fp16", action="store_true")
args = parser.parse_args()
checkpoint_fnames = args.checkpoint
data_dir = args.data_dir
batch_size = args.batch_size
workers = args.workers
fp16 = args.fp16
d4_tta = args.d4_tta
force_recompute = args.force_recompute
need_embedding = True
outputs = [OUTPUT_PRED_MODIFICATION_FLAG, OUTPUT_PRED_MODIFICATION_TYPE, OUTPUT_PRED_EMBEDDING]
embedding_suffix = "_w_emb" if need_embedding else ""
for checkpoint_fname in checkpoint_fnames:
model, checkpoints, required_features = ensemble_from_checkpoints(
[checkpoint_fname], strict=True, outputs=outputs, activation=None, tta=None, need_embedding=need_embedding
)
report_checkpoint(checkpoints[0])
model = model.cuda()
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model = model.eval()
if fp16:
model = model.half()
train_ds = get_train_except_holdout(data_dir, features=required_features)
holdout_ds = get_holdout(data_dir, features=required_features)
test_ds = get_test_dataset(data_dir, features=required_features)
if d4_tta:
model = wrap_model_with_tta(model, "d4", inputs=required_features, outputs=outputs).eval()
tta_suffix = "_d4_tta"
else:
tta_suffix = ""
# Train
trn_predictions_csv = fs.change_extension(
checkpoint_fname, f"_train_predictions{embedding_suffix}{tta_suffix}.pkl"
)
if force_recompute or not os.path.exists(trn_predictions_csv):
trn_predictions = compute_trn_predictions(
model, train_ds, fp16=fp16, batch_size=batch_size, workers=workers
)
trn_predictions.to_pickle(trn_predictions_csv)
# Holdout
hld_predictions_csv = fs.change_extension(
checkpoint_fname, f"_holdout_predictions{embedding_suffix}{tta_suffix}.pkl"
)
if force_recompute or not os.path.exists(hld_predictions_csv):
hld_predictions = compute_trn_predictions(
model, holdout_ds, fp16=fp16, batch_size=batch_size, workers=workers
)
hld_predictions.to_pickle(hld_predictions_csv)
# Test
tst_predictions_csv = fs.change_extension(
checkpoint_fname, f"_test_predictions{embedding_suffix}{tta_suffix}.pkl"
)
if force_recompute or not os.path.exists(tst_predictions_csv):
tst_predictions = compute_trn_predictions(
model, test_ds, fp16=fp16, batch_size=batch_size, workers=workers
)
tst_predictions.to_pickle(tst_predictions_csv)
if __name__ == "__main__":
main()
|
[
"numpy.random.seed",
"pandas.DataFrame.from_dict",
"warnings.simplefilter",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"catalyst.utils.any2device",
"os.path.exists",
"collections.defaultdict",
"pytorch_toolbelt.utils.fs.change_extension",
"os.environ.get",
"pytorch_toolbelt.utils.to_numpy",
"torch.nn.DataParallel",
"pytorch_toolbelt.utils.catalyst.report_checkpoint",
"alaska2.dataset.get_train_except_holdout"
] |
[((18, 62), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (39, 62), False, 'import warnings\n'), ((63, 109), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'FutureWarning'], {}), "('ignore', FutureWarning)\n", (84, 109), False, 'import warnings\n'), ((646, 663), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (657, 663), False, 'from collections import defaultdict\n'), ((2504, 2530), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['df'], {}), '(df)\n', (2526, 2530), True, 'import pandas as pd\n'), ((2640, 2657), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2654, 2657), True, 'import numpy as np\n'), ((2761, 2786), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2784, 2786), False, 'import argparse\n'), ((695, 812), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'num_workers': 'workers', 'shuffle': '(False)', 'drop_last': '(False)', 'pin_memory': '(True)'}), '(dataset, batch_size=batch_size, num_workers=workers, shuffle=\n False, drop_last=False, pin_memory=True)\n', (705, 812), False, 'from torch.utils.data import DataLoader\n'), ((853, 885), 'catalyst.utils.any2device', 'any2device', (['batch'], {'device': '"""cuda"""'}), "(batch, device='cuda')\n", (863, 885), False, 'from catalyst.utils import any2device\n'), ((4040, 4073), 'pytorch_toolbelt.utils.catalyst.report_checkpoint', 'report_checkpoint', (['checkpoints[0]'], {}), '(checkpoints[0])\n', (4057, 4073), False, 'from pytorch_toolbelt.utils.catalyst import report_checkpoint\n'), ((4289, 4351), 'alaska2.dataset.get_train_except_holdout', 'get_train_except_holdout', (['data_dir'], {'features': 'required_features'}), '(data_dir, features=required_features)\n', (4313, 4351), False, 'from alaska2.dataset import get_train_except_holdout\n'), ((4743, 4841), 'pytorch_toolbelt.utils.fs.change_extension', 'fs.change_extension', (['checkpoint_fname', 'f"""_train_predictions{embedding_suffix}{tta_suffix}.pkl"""'], {}), "(checkpoint_fname,\n f'_train_predictions{embedding_suffix}{tta_suffix}.pkl')\n", (4762, 4841), False, 'from pytorch_toolbelt.utils import to_numpy, fs\n'), ((5191, 5291), 'pytorch_toolbelt.utils.fs.change_extension', 'fs.change_extension', (['checkpoint_fname', 'f"""_holdout_predictions{embedding_suffix}{tta_suffix}.pkl"""'], {}), "(checkpoint_fname,\n f'_holdout_predictions{embedding_suffix}{tta_suffix}.pkl')\n", (5210, 5291), False, 'from pytorch_toolbelt.utils import to_numpy, fs\n'), ((5640, 5737), 'pytorch_toolbelt.utils.fs.change_extension', 'fs.change_extension', (['checkpoint_fname', 'f"""_test_predictions{embedding_suffix}{tta_suffix}.pkl"""'], {}), "(checkpoint_fname,\n f'_test_predictions{embedding_suffix}{tta_suffix}.pkl')\n", (5659, 5737), False, 'from pytorch_toolbelt.utils import to_numpy, fs\n'), ((2909, 2946), 'os.environ.get', 'os.environ.get', (['"""KAGGLE_2020_ALASKA2"""'], {}), "('KAGGLE_2020_ALASKA2')\n", (2923, 2946), False, 'import os\n'), ((4166, 4188), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (4181, 4188), False, 'from torch import nn\n'), ((4894, 4929), 'os.path.exists', 'os.path.exists', (['trn_predictions_csv'], {}), '(trn_predictions_csv)\n', (4908, 4929), False, 'import os\n'), ((5344, 5379), 'os.path.exists', 'os.path.exists', (['hld_predictions_csv'], {}), '(hld_predictions_csv)\n', (5358, 5379), False, 'import os\n'), ((5790, 5825), 'os.path.exists', 'os.path.exists', (['tst_predictions_csv'], {}), '(tst_predictions_csv)\n', (5804, 5825), False, 'import os\n'), ((1103, 1148), 'pytorch_toolbelt.utils.to_numpy', 'to_numpy', (['batch[INPUT_TRUE_MODIFICATION_FLAG]'], {}), '(batch[INPUT_TRUE_MODIFICATION_FLAG])\n', (1111, 1148), False, 'from pytorch_toolbelt.utils import to_numpy, fs\n'), ((1294, 1339), 'pytorch_toolbelt.utils.to_numpy', 'to_numpy', (['batch[INPUT_TRUE_MODIFICATION_TYPE]'], {}), '(batch[INPUT_TRUE_MODIFICATION_TYPE])\n', (1302, 1339), False, 'from pytorch_toolbelt.utils import to_numpy, fs\n'), ((1649, 1697), 'pytorch_toolbelt.utils.to_numpy', 'to_numpy', (['outputs[OUTPUT_PRED_MODIFICATION_FLAG]'], {}), '(outputs[OUTPUT_PRED_MODIFICATION_FLAG])\n', (1657, 1697), False, 'from pytorch_toolbelt.utils import to_numpy, fs\n'), ((2190, 2247), 'pytorch_toolbelt.utils.to_numpy', 'to_numpy', (["outputs[OUTPUT_PRED_MODIFICATION_FLAG + '_tta']"], {}), "(outputs[OUTPUT_PRED_MODIFICATION_FLAG + '_tta'])\n", (2198, 2247), False, 'from pytorch_toolbelt.utils import to_numpy, fs\n'), ((2413, 2470), 'pytorch_toolbelt.utils.to_numpy', 'to_numpy', (["outputs[OUTPUT_PRED_MODIFICATION_TYPE + '_tta']"], {}), "(outputs[OUTPUT_PRED_MODIFICATION_TYPE + '_tta'])\n", (2421, 2470), False, 'from pytorch_toolbelt.utils import to_numpy, fs\n')]
|
#! /usr/bin/env python3
import sys
import csv
import argparse
import numpy as np
import pandas as pd
from mll_calc.all_jobs import parent_jobs, kid_jobs
def row_calcs(ext_test):
if 'no' in ext_test:
#db_rows = 450240
#max_jobs = 9750
db_rows = 90048 * 4
max_jobs = 978 * 4
else:
db_rows = 505
max_jobs = 10
n_rows = db_rows // max_jobs
init_rows = np.arange(0, db_rows, n_rows).tolist()
end_rows = init_rows[1:]
# TODO took out +1 below because had index 1 too high last time
end_rows.append(db_rows)
################################################
################ In-script test ################
################################################
if db_rows % n_rows == 0:
total_jobs = db_rows // n_rows
else:
total_jobs = db_rows // n_rows + 1
if len(init_rows) != total_jobs or len(end_rows) != total_jobs:
print(total_jobs, len(init_rows), len(end_rows))
sys.exit('total expected jobs does not equal one of db_row lists')
################################################
return init_rows, end_rows
def make_paramstxt(parent_job, kid_jobs):
parent_dir = parent_job['parent_dir']
fname = parent_dir + '_params.txt'
init_rows, end_rows = row_calcs(parent_job['ext_test'])
for unc_num, (kid_dir, unc) in enumerate(zip(kid_jobs['job_dirs'], kid_jobs['uncs'])):
if parent_dir == 'train_nuc29':
fname = parent_dir + '_' + str(unc_num) + '_params.txt'
#with open(fname, 'w') as f:
with open(fname, 'a') as f:
w = csv.writer(f)
job_dir = parent_dir + '/' + kid_dir
for i in range(0, len(init_rows)):
job = [job_dir, unc,
parent_job['train_pkl'], parent_job['test_pkl'],
str(i).zfill(4), init_rows[i], end_rows[i],
parent_job['ext_test'], parent_job['ratios']
]
w.writerow(job)
return
def main():
"""
Reads all the job descriptions from all_jobs.py and populates the necessary
params_mll_calc.txt files
"""
for parent_job in parent_jobs:
make_paramstxt(parent_job, kid_jobs)
return
if __name__ == "__main__":
main()
|
[
"numpy.arange",
"csv.writer",
"sys.exit"
] |
[((993, 1059), 'sys.exit', 'sys.exit', (['"""total expected jobs does not equal one of db_row lists"""'], {}), "('total expected jobs does not equal one of db_row lists')\n", (1001, 1059), False, 'import sys\n'), ((414, 443), 'numpy.arange', 'np.arange', (['(0)', 'db_rows', 'n_rows'], {}), '(0, db_rows, n_rows)\n', (423, 443), True, 'import numpy as np\n'), ((1617, 1630), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1627, 1630), False, 'import csv\n')]
|
# import the needed packages
import pickle
from sklearn import preprocessing
import time
from os import listdir
from os.path import isfile, join
from random import randint, uniform
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cv
from scipy import ndimage
from skimage import morphology
from skimage import exposure
import os
from math import pi
from math import isnan
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, precision_score, recall_score
from skimage.filters import sobel
# set random seed
np.random.seed(26)
# the NaiveBayes classifier I wrote for assignment 6 in BSYSE_530, modified a little for this purpose
class NaiveBayes:
# P(c|x) = P(x|c) * P(c) / P(x)
# P(x|x) is the posterior probability
# P(x|c) is the likelihood
# P(c) is the class prior probability, or the prob of c occuring indpendently.
# P(x) is the predictor prior probability, or the prob of x occuring independently
def fit(self, features, target):
# define class variables
self.classes = np.unique(target)
self.count = len(self.classes)
self.feature_nums = features.shape[1]
self.rows = features.shape[0]
# calculate statistics for all those features
self.calc_statistics(features, target)
# prior is the random chance of drawing a particular class based on its proportion in the dataset
self.prior = self.calc_prior(features, target)
def get_predictions(self, input_vector):
predictions = []
for i in range(len(input_vector)):
result = self.calc_posterior((input_vector.iloc[i,:]))
predictions.append(result)
return predictions
def predict(self, observation):
#call the calc_posterior function on the observation
pred_class = self.calc_posterior(observation)
return pred_class
def calc_statistics(self, features, target):
# calculate mean, variance for each column and convert to numpy array
self.mean = features.groupby(target).apply(np.mean).to_numpy()
self.var = features.groupby(target).apply(np.var).to_numpy()
return self.mean, self.var
def calc_prior(self, features, target):
# this is the probability of picking one of a class at random from the dataset
self.prior = (features.groupby(target).apply(lambda x: len(x)/self.rows).to_numpy())
return self.prior
def calc_posterior(self, x):
# this is the probability, post evidence
# x is a numpy array
# x is feature vector for one observation
# make a list that we will add each classes posterior prob to
posteriors = []
# iterate through the classes
for i in range(0, self.count):
# for each class look at the prior probability for the class
prior = self.prior[i]
# calculate the conditional probability for the
conditional = np.sum(self.gaussian_density(i, x))
posterior = prior + conditional
# print(f"i = {i}, prior = {prior}, conditional = {conditional}, posterior = {posterior}")
posteriors.append(posterior)
return self.classes[np.argmax(posteriors)]
def gaussian_density(self, class_idx, x):
# calc probability from gaussian denssityy fucntion (normal dist)
mean = self.mean[class_idx]
var = self.var[class_idx]
# this part sucked and I had a typo that cost me hours
numerator = np.exp(-((x-mean)**2 / (2 * var)))
denominator = np.sqrt(2 * np.pi * var)
return numerator / denominator
def pdf(self, x, mean, stdev):
# calculate probability density function
exponent = np.exp(-((x-mean)**2 / (2*stdev**2)))
return exponent * (1/(np.sqrt(2*np.pi)*stdev))
def get_accuracy(self, test, predictions):
correct = 0
for i in range(len(test)):
if test.iloc[i] == predictions[i]:
correct += 1
return (correct / float(len(test)))
# TODO: read these and see how it works
# https://www.mathworks.com/help/matlab/matlab_external/matlab-arrays-as-python-variables.html
# https://www.mathworks.com/help/matlab/matlab_external/passing-data-to-python.html
# this exists only for my testing purposes
class MatlabSurrogate():
def __init__(self):
self.state_of_mind = "Badass."
def acquire_kinect_image(self, filename):
# give this function a filename, and it will load that image with opencv
# this will be a BGR format, because that is how opencv rolls
kinect_image = cv.imread(filename)
print(f"kinect has acquired the image with shape = {kinect_image.shape}")
return kinect_image
# function to display images resized, using opencv
def imshow(self, image, imdiv = 4):
imdiv = int(imdiv)
w, h = int(image.shape[1]/imdiv), int(image.shape[0]/imdiv)
cv.namedWindow("output", cv.WINDOW_NORMAL)
cv.resizeWindow("output", (w, h))
cv.imshow("output", image)
cv.waitKey(0)
cv.destroyAllWindows()
# I should probably have one image processing class that takes in a single image and then spits out a dataframe that could be used for prediction
# replaces ImageSegmenter
class ImageProcess():
def __init__(self):
print("image processor activated! use 'process_image_to_df()' to get back a pandas df")
self.black_lower = (0, 0, 0)
self.black_upper = (179, 255, 30)
self.hsv_lower = (0, 0, 0)
self.hsv_upper = (179, 255, 90)
# self.black_lower = (0, 0, 203)
# self.black_upper = (43, 255, 255)
# self.hsv_lower = (0, 0, 70)
# self.hsv_upper = (179, 34, 255)
# NOT mask for lego_imgs[14]
# hsv_lower = (0,0,0)
# hsv_upper = (179,234,77)
def dummy_method(self, a):
if type(a) is np.ndarray:
result = "object is a numpy.ndarray, this is perfect. Is the image RGB order or BGR?"
return result
else:
result = "object is a " + str(type(a)) + "and I'm gonna have a hard time with that"
return result
def bg_segmentation(self, image, mode="hsv", show_img=False):
# create an hsv mask for red colors
hsv_mask = cv.inRange(cv.cvtColor(image, cv.COLOR_BGR2HSV),
self.hsv_lower,
self.hsv_upper).astype(np.uint8)
# use this as a NOT mask
hsv_mask = np.where(hsv_mask > 1, 0, 1).astype(np.uint8)
hsv_mask = ndimage.gaussian_filter(hsv_mask, sigma=1)
# erode the mask
hsv_mask = morphology.erosion(hsv_mask, morphology.disk(3))
# # median filter to despeckle
# hsv_mask = ndimage.median_filter(hsv_mask, size=(3, 3)).astype(np.uint8)
# binary dilation
hsv_mask = morphology.binary_dilation(hsv_mask, np.ones((20, 20))).astype(np.uint8)
# fill the holes
hsv_mask = ndimage.binary_fill_holes(hsv_mask).astype(np.uint8)
# erode the mask
hsv_mask = morphology.erosion(hsv_mask, morphology.disk(5))
# TODO: remove this it is for testing purposes to show the segmentation
if (show_img == True):
m = MatlabSurrogate()
m.imshow(cv.bitwise_and(image, image, mask=hsv_mask).astype(np.uint8))
# apply the mask and return the result
return cv.bitwise_and(image, image, mask=hsv_mask).astype(np.uint8)
def process_image_to_df(self, input_image, area_th):
seg_img = self.bg_segmentation(input_image, show_img=False)
# # make the mask a binary thresholded image
mask = cv.cvtColor(seg_img, cv.COLOR_BGR2GRAY)
mask = cv.GaussianBlur(mask,(5,5),0)
ret3, mask = cv.threshold(mask,0,255,cv.THRESH_BINARY)
# output image with contours drawn on the original image
output_image = input_image.copy()
# find the contours of the detected objects in the image
contours, hier = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# create the df that we'll return for this image
df = pd.DataFrame(columns=['color'])
# # reset the object num
object_num = 0
for cnt in contours:
# draw contours on the output image for our personal enjoyment
cv.drawContours(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)
# CALCULATE ALL THE CONTOUR SHAPE FEATURES
# get the x, y, w, h of the bounding rect for the contour
x, y, w, h = cv.boundingRect(cnt)
# contour features
area = cv.contourArea(cnt)
rect_area = w * h
fullosity = area / rect_area
aspect_ratio = float(w)/h
extent = float(area/ rect_area)
hull = cv.convexHull(cnt)
hull_area = cv.contourArea(hull)
solidity = float(area)/hull_area
eq_diameter = np.sqrt(4*area/np.pi)
M= cv.moments(cnt)
cx= int(M['m10']/M['m00'])
cy= int(M['m01']/M['m00'])
# take this rectangle as a subset of the input_image, and calculate things within it
img_subset = input_image[y:y+h, x:x+w, :]
# convert to hsv for extracting those values
img_subset_hsv = cv.cvtColor(img_subset, cv.COLOR_BGR2HSV)
# FILTER OUT THE WEIRD ONES
# get rid of tiny objects that are probably noisef
if area > area_th:
# draw a blank canvas to put the contour onto, JUST THIS ONE not the others
# this is a mask
cimg_justthiscontour = np.zeros_like(input_image)
# draw the contours on the blank canvas which is original sized
cv.drawContours(cimg_justthiscontour, [cnt], 0, color=(255, 255, 255), thickness=-1)
# now take the subset of just the area around the contour of interest
cimg_subset = cimg_justthiscontour[y:y+h, x:x+w, :]
# make a binary mask
cimg_mask = cv.cvtColor(cimg_subset, cv.COLOR_BGR2GRAY)
ret2, mask = cv.threshold(cimg_mask,0,255,cv.THRESH_BINARY)
# draw contours on the output image for our personal enjoyment
cv.drawContours(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)
img_subset = cv.bitwise_and(img_subset, img_subset, mask=mask).astype(np.uint8)
# calculate where the object is
pts = np.where(cimg_subset == 255)
hue = img_subset_hsv[pts[0], pts[1], 0]
sat = img_subset_hsv[pts[0], pts[1], 1]
val = img_subset_hsv[pts[0], pts[1], 2]
r = img_subset[pts[0], pts[1], 0]
g = img_subset[pts[0], pts[1], 1]
b = img_subset[pts[0], pts[1], 2]
# and export the image for later analysis with something else like a neural network
cv.imwrite(f"images/train/XX_{object_num}_{randint(10000,99999)}.png", img_subset)
# add the object labels to the cimg for identification
cv.putText(output_image, text= str(object_num),
org=(cx - 5,cy - 5),
fontFace= cv.FONT_HERSHEY_SIMPLEX,
fontScale=3,
color=(255,255,255),
thickness=5,
lineType=cv.LINE_AA)
# print(r.mean(), g.mean(), b.mean(), gli.mean())
df = df.append({'color' : 0,
'x': x,
'y': y,
'object_num': object_num,
'r': r.mean(),
'g': g.mean(),
'b': b.mean(),
'hue': hue.mean(),
'sat': sat.mean(),
'val': val.mean()
}, ignore_index=True)
# last thing we do on this loop is increment the object_num
object_num += 1
#
# end result should be a pandas dataframe and the contour image with numbers
return df.sort_values(by='object_num', axis=0, ascending=True), output_image
def hsv_slide_tool(self, image):
def empty(a):
pass
h, w = int(image.shape[1]/2), int(image.shape[0]/2)
cv.namedWindow('masked_image', cv.WINDOW_NORMAL)
cv.resizeWindow('masked_image', h, w)
cv.namedWindow("trackbars")
cv.resizeWindow("trackbars", 800, 300)
# color mask trackbars
cv.createTrackbar("hue_min", "trackbars", 0, 179, empty)
cv.createTrackbar('hue_max', 'trackbars', 179, 179, empty)
cv.createTrackbar('sat_min', 'trackbars', 0, 255, empty)
cv.createTrackbar('sat_max', 'trackbars', 255, 255, empty)
cv.createTrackbar('val_min', 'trackbars', 0, 255, empty)
cv.createTrackbar('val_max', 'trackbars', 255, 255, empty)
while True:
# get image
img_hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
# get trackbar positions
h_min = cv.getTrackbarPos("hue_min", "trackbars")
h_max = cv.getTrackbarPos('hue_max', 'trackbars')
s_min = cv.getTrackbarPos('sat_min', 'trackbars')
s_max = cv.getTrackbarPos('sat_max', 'trackbars')
v_min = cv.getTrackbarPos('val_min', 'trackbars')
v_max = cv.getTrackbarPos('val_max', 'trackbars')
# self.black_lower = (0, 0, 0)
# self.black_upper = (179, 255, 30)
# self.hsv_lower = (0, 0, 100)
# self.hsv_upper = (179, 255, 255)
# create mask
hsv_lower = np.array([h_min, s_min, v_min])
hsv_upper = np.array([h_max, s_max, v_max])
black_lower = np.array([0, 0, 0])
black_upper = np.array([179, 255, 30])
color_mask = cv.inRange(img_hsv, hsv_lower, hsv_upper)
black_mask = cv.inRange(img_hsv, black_lower, black_upper)
mask = color_mask + black_mask
masked_image = cv.bitwise_and(img_hsv, img_hsv, mask=mask)
cv.imshow('masked_image', masked_image)
k = cv.waitKey(1000) & 0xFF # large wait time
if k == 113 or k == 27:
break
cv.destroyAllWindows()
print(f'hsv_lower is {hsv_lower}, hsv_upper = {hsv_upper}')
def label_dataframe(self, image_df, class_list):
for i, row in image_df.iterrows():
image_df.loc[i, 'color'] = class_list[i]
print(type(image_df))
return image_df
# def fake_df(self, input_df, reps = 3):
# # creates a bunch of fake adjustments to the dataframe so my train set is bigger
# output_df = input_df.copy()
# for rep in range(0, reps):
# fake_df = input_df.copy()
# for i, row in fake_df.iterrows():
# fake_df.loc[i, 'r'] = fake_df.loc[i, 'r'] + uniform(-.1, .1)
# fake_df.loc[i, 'g'] = fake_df.loc[i, 'g'] + uniform(-.1, .1)
# fake_df.loc[i, 'b'] = fake_df.loc[i, 'b'] + uniform(-.1, .1)
# output_df = pd.concat(output_df, fake_df)
# return output_df
def otsu_threshold(self, image):
blur = cv.GaussianBlur(image,(5,5),0)
ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
return ret3, th3
def process_image_make_predictions(self, input_image, model):
predictive_model = model
area_th = 400
seg_img = self.bg_segmentation(input_image, show_img=False)
# # make the mask a binary thresholded image
mask = cv.cvtColor(seg_img, cv.COLOR_BGR2GRAY)
mask = cv.GaussianBlur(mask,(5,5),0)
ret3, mask = cv.threshold(mask,0,255,cv.THRESH_BINARY)
# output image with contours drawn on the original image
output_image = input_image.copy()
# find the contours of the detected objects in the image
contours, hier = cv.findContours(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# create the df that we'll return for this image
df = pd.DataFrame(columns=['color'])
# # reset the object num
object_num = 0
for cnt in contours:
# draw contours on the output image for our personal enjoyment
cv.drawContours(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)
# CALCULATE ALL THE CONTOUR SHAPE FEATURES
# get the x, y, w, h of the bounding rect for the contour
x, y, w, h = cv.boundingRect(cnt)
# contour features
area = cv.contourArea(cnt)
rect_area = w * h
fullosity = area / rect_area
aspect_ratio = float(w)/h
extent = float(area/ rect_area)
hull = cv.convexHull(cnt)
hull_area = cv.contourArea(hull)
solidity = float(area)/hull_area
eq_diameter = np.sqrt(4*area/np.pi)
M= cv.moments(cnt)
cx= int(M['m10']/M['m00'])
cy= int(M['m01']/M['m00'])
# take this rectangle as a subset of the input_image, and calculate things within it
img_subset = input_image[y:y+h, x:x+w, :]
# convert to hsv for extracting those values
img_subset_hsv = cv.cvtColor(img_subset, cv.COLOR_BGR2HSV)
# FILTER OUT THE WEIRD ONES
# get rid of tiny objects that are probably noisef
if area > area_th:
# draw a blank canvas to put the contour onto, JUST THIS ONE not the others
# this is a mask
cimg_justthiscontour = np.zeros_like(input_image)
# draw the contours on the blank canvas which is original sized
cv.drawContours(cimg_justthiscontour, [cnt], 0, color=(255, 255, 255), thickness=-1)
# now take the subset of just the area around the contour of interest
cimg_subset = cimg_justthiscontour[y:y+h, x:x+w, :]
# make a binary mask
cimg_mask = cv.cvtColor(cimg_subset, cv.COLOR_BGR2GRAY)
ret2, mask = cv.threshold(cimg_mask,0,255,cv.THRESH_BINARY)
# draw contours on the output image for our personal enjoyment
cv.drawContours(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)
img_subset = cv.bitwise_and(img_subset, img_subset, mask=mask).astype(np.uint8)
# calculate where the object is
pts = np.where(cimg_subset == 255)
hue = img_subset_hsv[pts[0], pts[1], 0]
sat = img_subset_hsv[pts[0], pts[1], 1]
val = img_subset_hsv[pts[0], pts[1], 2]
r = img_subset[pts[0], pts[1], 0]
g = img_subset[pts[0], pts[1], 1]
b = img_subset[pts[0], pts[1], 2]
df = [{'r': (r.mean() / 255),
'g': (g.mean() / 255),
'b': (b.mean() / 255),
'hue': (hue.mean() / 255),
'sat': (sat.mean() / 255),
'val': (val.mean() / 255)}]
df = pd.DataFrame.from_dict(df)
pred = predictive_model.get_predictions(df)
class_dict = {0:"medium_blue",
1:"black",
2:"darK_stone_gray",
3:"bright_green",
4:"light_green",
5:"bright_orange",
6:"bright_red",
7:"bright_blue",
8:"white",
9:"bright_yellow"}
color_text = class_dict[pred[0]]
object_label = "obj" + str(object_num) + "_pred" + str(pred[0])
print(object_label)
# add the object labels to the cimg for identification
cv.putText(output_image, text= str(object_label),
org=(cx - 5,cy - 5),
fontFace= cv.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(0,255,0),
thickness=3,
lineType=cv.LINE_AA)
# last thing we do on this loop is increment the object_num
object_num += 1
# AFTER ALL CONTOURS HAVE BEEN DONE submit the df to the model for predictions
# results = predictive_model.blind_predictions()
# result = loaded_model.get_predictions(X_test, Y_test)
# print(result)
# # use the test set to see how we do
# y_test_predictions = nb.get_predictions(X_test)
# # scores
# acc = nb.get_accuracy(y_test, y_test_predictions)
# prec = precision_score(y_test, y_test_predictions, average="micro")
# rec = recall_score(y_test, y_test_predictions, average="micro")
# print(f"precision is {prec}, recall is {rec}, accuracy = {acc}")
# # confusion matrix
# labels = [(i, c) for i, c in class_dict.items()]
# cm = confusion_matrix(y_test, y_test_predictions)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# cax = ax.matshow(cm)
# plt.title('confusion matrix of the classifier')
# fig.colorbar(cax)
# plt.xlabel('Predicted')
# plt.ylabel('True')
# plt.show()
# print(labels)
# take the row
# end result should be a pandas dataframe and the contour image with numbers
return output_image
|
[
"cv2.GaussianBlur",
"numpy.random.seed",
"cv2.bitwise_and",
"numpy.argmax",
"scipy.ndimage.binary_fill_holes",
"numpy.ones",
"numpy.exp",
"cv2.imshow",
"cv2.inRange",
"numpy.unique",
"pandas.DataFrame",
"cv2.contourArea",
"numpy.zeros_like",
"random.randint",
"cv2.cvtColor",
"scipy.ndimage.gaussian_filter",
"cv2.namedWindow",
"cv2.getTrackbarPos",
"cv2.drawContours",
"cv2.destroyAllWindows",
"cv2.boundingRect",
"cv2.createTrackbar",
"pandas.DataFrame.from_dict",
"cv2.waitKey",
"cv2.convexHull",
"cv2.resizeWindow",
"cv2.threshold",
"cv2.moments",
"skimage.morphology.disk",
"cv2.imread",
"numpy.where",
"numpy.array",
"cv2.findContours",
"numpy.sqrt"
] |
[((622, 640), 'numpy.random.seed', 'np.random.seed', (['(26)'], {}), '(26)\n', (636, 640), True, 'import numpy as np\n'), ((1152, 1169), 'numpy.unique', 'np.unique', (['target'], {}), '(target)\n', (1161, 1169), True, 'import numpy as np\n'), ((3827, 3865), 'numpy.exp', 'np.exp', (['(-((x - mean) ** 2 / (2 * var)))'], {}), '(-((x - mean) ** 2 / (2 * var)))\n', (3833, 3865), True, 'import numpy as np\n'), ((3885, 3909), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * var)'], {}), '(2 * np.pi * var)\n', (3892, 3909), True, 'import numpy as np\n'), ((4072, 4117), 'numpy.exp', 'np.exp', (['(-((x - mean) ** 2 / (2 * stdev ** 2)))'], {}), '(-((x - mean) ** 2 / (2 * stdev ** 2)))\n', (4078, 4117), True, 'import numpy as np\n'), ((5055, 5074), 'cv2.imread', 'cv.imread', (['filename'], {}), '(filename)\n', (5064, 5074), True, 'import cv2 as cv\n'), ((5402, 5444), 'cv2.namedWindow', 'cv.namedWindow', (['"""output"""', 'cv.WINDOW_NORMAL'], {}), "('output', cv.WINDOW_NORMAL)\n", (5416, 5444), True, 'import cv2 as cv\n'), ((5454, 5487), 'cv2.resizeWindow', 'cv.resizeWindow', (['"""output"""', '(w, h)'], {}), "('output', (w, h))\n", (5469, 5487), True, 'import cv2 as cv\n'), ((5497, 5523), 'cv2.imshow', 'cv.imshow', (['"""output"""', 'image'], {}), "('output', image)\n", (5506, 5523), True, 'import cv2 as cv\n'), ((5533, 5546), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (5543, 5546), True, 'import cv2 as cv\n'), ((5556, 5578), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (5576, 5578), True, 'import cv2 as cv\n'), ((7160, 7202), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['hsv_mask'], {'sigma': '(1)'}), '(hsv_mask, sigma=1)\n', (7183, 7202), False, 'from scipy import ndimage\n'), ((8439, 8478), 'cv2.cvtColor', 'cv.cvtColor', (['seg_img', 'cv.COLOR_BGR2GRAY'], {}), '(seg_img, cv.COLOR_BGR2GRAY)\n', (8450, 8478), True, 'import cv2 as cv\n'), ((8495, 8527), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['mask', '(5, 5)', '(0)'], {}), '(mask, (5, 5), 0)\n', (8510, 8527), True, 'import cv2 as cv\n'), ((8547, 8591), 'cv2.threshold', 'cv.threshold', (['mask', '(0)', '(255)', 'cv.THRESH_BINARY'], {}), '(mask, 0, 255, cv.THRESH_BINARY)\n', (8559, 8591), True, 'import cv2 as cv\n'), ((8798, 8857), 'cv2.findContours', 'cv.findContours', (['mask', 'cv.RETR_TREE', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n', (8813, 8857), True, 'import cv2 as cv\n'), ((8934, 8965), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['color']"}), "(columns=['color'])\n", (8946, 8965), True, 'import pandas as pd\n'), ((13522, 13570), 'cv2.namedWindow', 'cv.namedWindow', (['"""masked_image"""', 'cv.WINDOW_NORMAL'], {}), "('masked_image', cv.WINDOW_NORMAL)\n", (13536, 13570), True, 'import cv2 as cv\n'), ((13580, 13617), 'cv2.resizeWindow', 'cv.resizeWindow', (['"""masked_image"""', 'h', 'w'], {}), "('masked_image', h, w)\n", (13595, 13617), True, 'import cv2 as cv\n'), ((13637, 13664), 'cv2.namedWindow', 'cv.namedWindow', (['"""trackbars"""'], {}), "('trackbars')\n", (13651, 13664), True, 'import cv2 as cv\n'), ((13674, 13712), 'cv2.resizeWindow', 'cv.resizeWindow', (['"""trackbars"""', '(800)', '(300)'], {}), "('trackbars', 800, 300)\n", (13689, 13712), True, 'import cv2 as cv\n'), ((13772, 13828), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""hue_min"""', '"""trackbars"""', '(0)', '(179)', 'empty'], {}), "('hue_min', 'trackbars', 0, 179, empty)\n", (13789, 13828), True, 'import cv2 as cv\n'), ((13838, 13896), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""hue_max"""', '"""trackbars"""', '(179)', '(179)', 'empty'], {}), "('hue_max', 'trackbars', 179, 179, empty)\n", (13855, 13896), True, 'import cv2 as cv\n'), ((13906, 13962), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""sat_min"""', '"""trackbars"""', '(0)', '(255)', 'empty'], {}), "('sat_min', 'trackbars', 0, 255, empty)\n", (13923, 13962), True, 'import cv2 as cv\n'), ((13972, 14030), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""sat_max"""', '"""trackbars"""', '(255)', '(255)', 'empty'], {}), "('sat_max', 'trackbars', 255, 255, empty)\n", (13989, 14030), True, 'import cv2 as cv\n'), ((14040, 14096), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""val_min"""', '"""trackbars"""', '(0)', '(255)', 'empty'], {}), "('val_min', 'trackbars', 0, 255, empty)\n", (14057, 14096), True, 'import cv2 as cv\n'), ((14106, 14164), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""val_max"""', '"""trackbars"""', '(255)', '(255)', 'empty'], {}), "('val_max', 'trackbars', 255, 255, empty)\n", (14123, 14164), True, 'import cv2 as cv\n'), ((15636, 15658), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (15656, 15658), True, 'import cv2 as cv\n'), ((16693, 16726), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['image', '(5, 5)', '(0)'], {}), '(image, (5, 5), 0)\n', (16708, 16726), True, 'import cv2 as cv\n'), ((16744, 16805), 'cv2.threshold', 'cv.threshold', (['blur', '(0)', '(255)', '(cv.THRESH_BINARY + cv.THRESH_OTSU)'], {}), '(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n', (16756, 16805), True, 'import cv2 as cv\n'), ((17134, 17173), 'cv2.cvtColor', 'cv.cvtColor', (['seg_img', 'cv.COLOR_BGR2GRAY'], {}), '(seg_img, cv.COLOR_BGR2GRAY)\n', (17145, 17173), True, 'import cv2 as cv\n'), ((17190, 17222), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['mask', '(5, 5)', '(0)'], {}), '(mask, (5, 5), 0)\n', (17205, 17222), True, 'import cv2 as cv\n'), ((17242, 17286), 'cv2.threshold', 'cv.threshold', (['mask', '(0)', '(255)', 'cv.THRESH_BINARY'], {}), '(mask, 0, 255, cv.THRESH_BINARY)\n', (17254, 17286), True, 'import cv2 as cv\n'), ((17489, 17548), 'cv2.findContours', 'cv.findContours', (['mask', 'cv.RETR_TREE', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n', (17504, 17548), True, 'import cv2 as cv\n'), ((17623, 17654), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['color']"}), "(columns=['color'])\n", (17635, 17654), True, 'import pandas as pd\n'), ((3505, 3526), 'numpy.argmax', 'np.argmax', (['posteriors'], {}), '(posteriors)\n', (3514, 3526), True, 'import numpy as np\n'), ((7288, 7306), 'skimage.morphology.disk', 'morphology.disk', (['(3)'], {}), '(3)\n', (7303, 7306), False, 'from skimage import morphology\n'), ((7767, 7785), 'skimage.morphology.disk', 'morphology.disk', (['(5)'], {}), '(5)\n', (7782, 7785), False, 'from skimage import morphology\n'), ((9155, 9230), 'cv2.drawContours', 'cv.drawContours', (['output_image', '[cnt]', '(0)'], {'color': '(255, 255, 255)', 'thickness': '(5)'}), '(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)\n', (9170, 9230), True, 'import cv2 as cv\n'), ((9386, 9406), 'cv2.boundingRect', 'cv.boundingRect', (['cnt'], {}), '(cnt)\n', (9401, 9406), True, 'import cv2 as cv\n'), ((9461, 9480), 'cv2.contourArea', 'cv.contourArea', (['cnt'], {}), '(cnt)\n', (9475, 9480), True, 'import cv2 as cv\n'), ((9660, 9678), 'cv2.convexHull', 'cv.convexHull', (['cnt'], {}), '(cnt)\n', (9673, 9678), True, 'import cv2 as cv\n'), ((9704, 9724), 'cv2.contourArea', 'cv.contourArea', (['hull'], {}), '(hull)\n', (9718, 9724), True, 'import cv2 as cv\n'), ((9800, 9825), 'numpy.sqrt', 'np.sqrt', (['(4 * area / np.pi)'], {}), '(4 * area / np.pi)\n', (9807, 9825), True, 'import numpy as np\n'), ((9840, 9855), 'cv2.moments', 'cv.moments', (['cnt'], {}), '(cnt)\n', (9850, 9855), True, 'import cv2 as cv\n'), ((10181, 10222), 'cv2.cvtColor', 'cv.cvtColor', (['img_subset', 'cv.COLOR_BGR2HSV'], {}), '(img_subset, cv.COLOR_BGR2HSV)\n', (10192, 10222), True, 'import cv2 as cv\n'), ((14245, 14281), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2HSV'], {}), '(image, cv.COLOR_BGR2HSV)\n', (14256, 14281), True, 'import cv2 as cv\n'), ((14355, 14396), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""hue_min"""', '"""trackbars"""'], {}), "('hue_min', 'trackbars')\n", (14372, 14396), True, 'import cv2 as cv\n'), ((14418, 14459), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""hue_max"""', '"""trackbars"""'], {}), "('hue_max', 'trackbars')\n", (14435, 14459), True, 'import cv2 as cv\n'), ((14481, 14522), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""sat_min"""', '"""trackbars"""'], {}), "('sat_min', 'trackbars')\n", (14498, 14522), True, 'import cv2 as cv\n'), ((14544, 14585), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""sat_max"""', '"""trackbars"""'], {}), "('sat_max', 'trackbars')\n", (14561, 14585), True, 'import cv2 as cv\n'), ((14607, 14648), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""val_min"""', '"""trackbars"""'], {}), "('val_min', 'trackbars')\n", (14624, 14648), True, 'import cv2 as cv\n'), ((14670, 14711), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""val_max"""', '"""trackbars"""'], {}), "('val_max', 'trackbars')\n", (14687, 14711), True, 'import cv2 as cv\n'), ((14973, 15004), 'numpy.array', 'np.array', (['[h_min, s_min, v_min]'], {}), '([h_min, s_min, v_min])\n', (14981, 15004), True, 'import numpy as np\n'), ((15030, 15061), 'numpy.array', 'np.array', (['[h_max, s_max, v_max]'], {}), '([h_max, s_max, v_max])\n', (15038, 15061), True, 'import numpy as np\n'), ((15089, 15108), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (15097, 15108), True, 'import numpy as np\n'), ((15136, 15160), 'numpy.array', 'np.array', (['[179, 255, 30]'], {}), '([179, 255, 30])\n', (15144, 15160), True, 'import numpy as np\n'), ((15201, 15242), 'cv2.inRange', 'cv.inRange', (['img_hsv', 'hsv_lower', 'hsv_upper'], {}), '(img_hsv, hsv_lower, hsv_upper)\n', (15211, 15242), True, 'import cv2 as cv\n'), ((15269, 15314), 'cv2.inRange', 'cv.inRange', (['img_hsv', 'black_lower', 'black_upper'], {}), '(img_hsv, black_lower, black_upper)\n', (15279, 15314), True, 'import cv2 as cv\n'), ((15387, 15430), 'cv2.bitwise_and', 'cv.bitwise_and', (['img_hsv', 'img_hsv'], {'mask': 'mask'}), '(img_hsv, img_hsv, mask=mask)\n', (15401, 15430), True, 'import cv2 as cv\n'), ((15458, 15497), 'cv2.imshow', 'cv.imshow', (['"""masked_image"""', 'masked_image'], {}), "('masked_image', masked_image)\n", (15467, 15497), True, 'import cv2 as cv\n'), ((17844, 17919), 'cv2.drawContours', 'cv.drawContours', (['output_image', '[cnt]', '(0)'], {'color': '(255, 255, 255)', 'thickness': '(5)'}), '(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)\n', (17859, 17919), True, 'import cv2 as cv\n'), ((18075, 18095), 'cv2.boundingRect', 'cv.boundingRect', (['cnt'], {}), '(cnt)\n', (18090, 18095), True, 'import cv2 as cv\n'), ((18150, 18169), 'cv2.contourArea', 'cv.contourArea', (['cnt'], {}), '(cnt)\n', (18164, 18169), True, 'import cv2 as cv\n'), ((18349, 18367), 'cv2.convexHull', 'cv.convexHull', (['cnt'], {}), '(cnt)\n', (18362, 18367), True, 'import cv2 as cv\n'), ((18393, 18413), 'cv2.contourArea', 'cv.contourArea', (['hull'], {}), '(hull)\n', (18407, 18413), True, 'import cv2 as cv\n'), ((18489, 18514), 'numpy.sqrt', 'np.sqrt', (['(4 * area / np.pi)'], {}), '(4 * area / np.pi)\n', (18496, 18514), True, 'import numpy as np\n'), ((18529, 18544), 'cv2.moments', 'cv.moments', (['cnt'], {}), '(cnt)\n', (18539, 18544), True, 'import cv2 as cv\n'), ((18870, 18911), 'cv2.cvtColor', 'cv.cvtColor', (['img_subset', 'cv.COLOR_BGR2HSV'], {}), '(img_subset, cv.COLOR_BGR2HSV)\n', (18881, 18911), True, 'import cv2 as cv\n'), ((7086, 7114), 'numpy.where', 'np.where', (['(hsv_mask > 1)', '(0)', '(1)'], {}), '(hsv_mask > 1, 0, 1)\n', (7094, 7114), True, 'import numpy as np\n'), ((7629, 7664), 'scipy.ndimage.binary_fill_holes', 'ndimage.binary_fill_holes', (['hsv_mask'], {}), '(hsv_mask)\n', (7654, 7664), False, 'from scipy import ndimage\n'), ((8159, 8202), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'hsv_mask'}), '(image, image, mask=hsv_mask)\n', (8173, 8202), True, 'import cv2 as cv\n'), ((10529, 10555), 'numpy.zeros_like', 'np.zeros_like', (['input_image'], {}), '(input_image)\n', (10542, 10555), True, 'import numpy as np\n'), ((10656, 10744), 'cv2.drawContours', 'cv.drawContours', (['cimg_justthiscontour', '[cnt]', '(0)'], {'color': '(255, 255, 255)', 'thickness': '(-1)'}), '(cimg_justthiscontour, [cnt], 0, color=(255, 255, 255),\n thickness=-1)\n', (10671, 10744), True, 'import cv2 as cv\n'), ((10968, 11011), 'cv2.cvtColor', 'cv.cvtColor', (['cimg_subset', 'cv.COLOR_BGR2GRAY'], {}), '(cimg_subset, cv.COLOR_BGR2GRAY)\n', (10979, 11011), True, 'import cv2 as cv\n'), ((11046, 11095), 'cv2.threshold', 'cv.threshold', (['cimg_mask', '(0)', '(255)', 'cv.THRESH_BINARY'], {}), '(cimg_mask, 0, 255, cv.THRESH_BINARY)\n', (11058, 11095), True, 'import cv2 as cv\n'), ((11192, 11267), 'cv2.drawContours', 'cv.drawContours', (['output_image', '[cnt]', '(0)'], {'color': '(255, 255, 255)', 'thickness': '(5)'}), '(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)\n', (11207, 11267), True, 'import cv2 as cv\n'), ((11441, 11469), 'numpy.where', 'np.where', (['(cimg_subset == 255)'], {}), '(cimg_subset == 255)\n', (11449, 11469), True, 'import numpy as np\n'), ((15515, 15531), 'cv2.waitKey', 'cv.waitKey', (['(1000)'], {}), '(1000)\n', (15525, 15531), True, 'import cv2 as cv\n'), ((19220, 19246), 'numpy.zeros_like', 'np.zeros_like', (['input_image'], {}), '(input_image)\n', (19233, 19246), True, 'import numpy as np\n'), ((19347, 19435), 'cv2.drawContours', 'cv.drawContours', (['cimg_justthiscontour', '[cnt]', '(0)'], {'color': '(255, 255, 255)', 'thickness': '(-1)'}), '(cimg_justthiscontour, [cnt], 0, color=(255, 255, 255),\n thickness=-1)\n', (19362, 19435), True, 'import cv2 as cv\n'), ((19659, 19702), 'cv2.cvtColor', 'cv.cvtColor', (['cimg_subset', 'cv.COLOR_BGR2GRAY'], {}), '(cimg_subset, cv.COLOR_BGR2GRAY)\n', (19670, 19702), True, 'import cv2 as cv\n'), ((19737, 19786), 'cv2.threshold', 'cv.threshold', (['cimg_mask', '(0)', '(255)', 'cv.THRESH_BINARY'], {}), '(cimg_mask, 0, 255, cv.THRESH_BINARY)\n', (19749, 19786), True, 'import cv2 as cv\n'), ((19883, 19958), 'cv2.drawContours', 'cv.drawContours', (['output_image', '[cnt]', '(0)'], {'color': '(255, 255, 255)', 'thickness': '(5)'}), '(output_image, [cnt], 0, color=(255, 255, 255), thickness=5)\n', (19898, 19958), True, 'import cv2 as cv\n'), ((20132, 20160), 'numpy.where', 'np.where', (['(cimg_subset == 255)'], {}), '(cimg_subset == 255)\n', (20140, 20160), True, 'import numpy as np\n'), ((20869, 20895), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['df'], {}), '(df)\n', (20891, 20895), True, 'import pandas as pd\n'), ((4141, 4159), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4148, 4159), True, 'import numpy as np\n'), ((6853, 6889), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2HSV'], {}), '(image, cv.COLOR_BGR2HSV)\n', (6864, 6889), True, 'import cv2 as cv\n'), ((7537, 7554), 'numpy.ones', 'np.ones', (['(20, 20)'], {}), '((20, 20))\n', (7544, 7554), True, 'import numpy as np\n'), ((8003, 8046), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'hsv_mask'}), '(image, image, mask=hsv_mask)\n', (8017, 8046), True, 'import cv2 as cv\n'), ((11300, 11349), 'cv2.bitwise_and', 'cv.bitwise_and', (['img_subset', 'img_subset'], {'mask': 'mask'}), '(img_subset, img_subset, mask=mask)\n', (11314, 11349), True, 'import cv2 as cv\n'), ((19991, 20040), 'cv2.bitwise_and', 'cv.bitwise_and', (['img_subset', 'img_subset'], {'mask': 'mask'}), '(img_subset, img_subset, mask=mask)\n', (20005, 20040), True, 'import cv2 as cv\n'), ((11957, 11978), 'random.randint', 'randint', (['(10000)', '(99999)'], {}), '(10000, 99999)\n', (11964, 11978), False, 'from random import randint, uniform\n')]
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # ####
# ## ## # ## # #
# # # # # # # # # ###
# # ## # ## ## #
# # # # # # ####
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
# Python v3.6.4
#
# <NAME>
# (c) 2016, 2017, 2018
#
# Licence APL2.0
#
###########################################################
import logging
import os
import PyQt5
import time
import copy
import operator
import numpy
from astrometry import transform
class ModelPoints:
logger = logging.getLogger(__name__)
def __init__(self, app):
self.app = app
self.transform = transform.Transform(self.app)
self.horizonPoints = list()
self.modelPoints = list()
self.celestialEquator = list()
# signal slot
self.app.ui.btn_loadInitialModelPoints.clicked.connect(self.selectInitialModelPointsFileName)
self.app.ui.btn_saveInitialModelPoints.clicked.connect(self.saveInitialModelPoints)
self.app.ui.btn_saveInitialModelPointsAs.clicked.connect(self.saveInitialModelPointsAs)
self.app.ui.btn_loadFullModelPoints.clicked.connect(self.selectFullModelPointsFileName)
self.app.ui.btn_saveFullModelPoints.clicked.connect(self.saveFullModelPoints)
self.app.ui.btn_saveFullModelPointsAs.clicked.connect(self.saveFullModelPointsAs)
self.app.ui.btn_loadHorizonMask.clicked.connect(self.selectHorizonPointsFileName)
self.app.ui.btn_saveHorizonMask.clicked.connect(self.saveHorizonMask)
self.app.ui.btn_saveHorizonMaskAs.clicked.connect(self.saveHorizonMaskAs)
self.app.signalMountSiteData.connect(self.generateCelestialEquator)
def initConfig(self):
try:
if 'HorizonPointsFileName' in self.app.config:
self.app.ui.le_horizonPointsFileName.setText(self.app.config['HorizonPointsFileName'])
if 'CheckUseMinimumHorizonLine' in self.app.config:
self.app.ui.checkUseMinimumHorizonLine.setChecked(self.app.config['CheckUseMinimumHorizonLine'])
if 'CheckUseFileHorizonLine' in self.app.config:
self.app.ui.checkUseFileHorizonLine.setChecked(self.app.config['CheckUseFileHorizonLine'])
if 'AltitudeMinimumHorizon' in self.app.config:
self.app.ui.altitudeMinimumHorizon.setValue(self.app.config['AltitudeMinimumHorizon'])
if 'ModelInitialPointsFileName' in self.app.config:
self.app.ui.le_modelInitialPointsFileName.setText(self.app.config['ModelInitialPointsFileName'])
if 'ModelFullPointsFileName' in self.app.config:
self.app.ui.le_modelFullPointsFileName.setText(self.app.config['ModelFullPointsFileName'])
if 'HorizonPointsFileName' in self.app.config and 'CheckUseMinimumHorizonLine' in self.app.config and 'CheckUseFileHorizonLine' in self.app.config and 'AltitudeMinimumHorizon' in self.app.config:
self.loadHorizonPoints(self.app.config['HorizonPointsFileName'],
self.app.config['CheckUseFileHorizonLine'],
self.app.config['CheckUseMinimumHorizonLine'],
self.app.config['AltitudeMinimumHorizon'])
except Exception as e:
self.logger.error('item in config.cfg could not be initialize, error:{0}'.format(e))
finally:
pass
def storeConfig(self):
self.app.config['HorizonPointsFileName'] = self.app.ui.le_horizonPointsFileName.text()
self.app.config['CheckUseMinimumHorizonLine'] = self.app.ui.checkUseMinimumHorizonLine.isChecked()
self.app.config['CheckUseFileHorizonLine'] = self.app.ui.checkUseFileHorizonLine.isChecked()
self.app.config['AltitudeMinimumHorizon'] = self.app.ui.altitudeMinimumHorizon.value()
self.app.config['ModelInitialPointsFileName'] = self.app.ui.le_modelInitialPointsFileName.text()
self.app.config['ModelFullPointsFileName'] = self.app.ui.le_modelFullPointsFileName.text()
def saveHorizonMask(self):
filepath = os.getcwd() + '/config/' + self.app.ui.le_horizonPointsFileName.text()
self.saveHorizonPoints(filepath)
def saveHorizonMaskAs(self):
value, ext = self.app.selectFile(self.app, 'Save horizon mask points file', '/config', 'Model point files (*.txt)', False)
if value != '':
self.app.ui.le_horizonPointsFileName.setText(os.path.basename(value))
self.saveHorizonPoints(value)
else:
self.logger.warning('No model points file selected')
def selectHorizonPointsFileName(self):
value, ext = self.app.selectFile(self.app, 'Open horizon mask file', '/config', 'Horizon mask files (*.txt)', True)
if value != '':
self.app.ui.le_horizonPointsFileName.setText(os.path.basename(value))
self.app.hemisphereWindow.selectHorizonPointsMode()
self.app.hemisphereWindow.drawHemisphere()
def saveModelPoints(self, modelPointsFileName):
msg = None
fileHandle = None
if modelPointsFileName.strip() == '':
msg = 'No Model Points Filename given!'
self.logger.warning('No Model Points Filename given!')
return msg
try:
fileHandle = open(modelPointsFileName + '.txt', 'w')
for i in range(0, len(self.modelPoints)):
fileHandle.write('MW-3:{0:03.2f}:{1:03.2f}\n'.format(self.modelPoints[i][0], self.modelPoints[i][1]))
fileHandle.close()
except Exception as e:
msg = 'Error saving modeling points to file [{0}] error: {1}!'.format(modelPointsFileName, e)
self.logger.warning('Error loading modeling points to file [{0}] error: {1}!'.format(modelPointsFileName, e))
finally:
if fileHandle:
fileHandle.close()
return msg
def saveInitialModelPoints(self):
filepath = os.getcwd() + '/config/' + self.app.ui.le_modelInitialPointsFileName.text()
self.saveModelPoints(filepath)
def saveInitialModelPointsAs(self):
value, ext = self.app.selectFile(self.app, 'Save initial model points file', '/config', 'Model point files (*.txt)', False)
if value != '':
self.app.ui.le_modelInitialPointsFileName.setText(os.path.basename(value))
self.saveModelPoints(value)
else:
self.logger.warning('No model points file selected')
def selectInitialModelPointsFileName(self):
value, ext = self.app.selectFile(self.app, 'Open initial model points file', '/config', 'Model points files (*.txt)', True)
if value != '':
value = os.path.basename(value)
self.app.ui.le_modelInitialPointsFileName.setText(value)
self.showInitialPoints(value)
else:
self.logger.warning('No file selected')
def saveFullModelPoints(self):
filepath = os.getcwd() + '/config/' + self.app.ui.le_modelFullPointsFileName.text()
self.saveModelPoints(filepath)
def saveFullModelPointsAs(self):
value, ext = self.app.selectFile(self.app, 'Save full model points file', '/config', 'Model point files (*.txt)', False)
if value != '':
self.app.ui.le_modelFullPointsFileName.setText(os.path.basename(value))
self.saveModelPoints(value)
else:
self.logger.warning('No model points file selected')
def selectFullModelPointsFileName(self):
value, ext = self.app.selectFile(self.app, 'Open full model points file', '/config', 'Model points files (*.txt)', True)
if value != '':
value = os.path.basename(value)
self.app.ui.le_modelFullPointsFileName.setText(value)
self.showFullPoints(value, self.app.ui.checkDeletePointsHorizonMask.isChecked(), self.app.ui.checkSortPoints.isChecked())
else:
self.logger.warning('No file selected')
def loadModelPoints(self, modelPointsFileName, modeltype):
p = []
number = 0
msg = None
if modelPointsFileName.strip() == '':
msg = 'No model points filename given!'
self.logger.warning('No model points filename given!')
return p, msg
try:
with open('config/' + modelPointsFileName + '.txt', 'r') as fileHandle:
for line in fileHandle:
if line.startswith('GRID'):
# if grid, then its a TSX file (the sky x)
convertedLine = line.rstrip('\n').split()
point = (float(convertedLine[2]), float(convertedLine[3]))
number += 1
if modeltype == 'Refinement' and number > 3:
p.append(point)
elif modeltype == 'Base' and number <= 3:
p.append(point)
elif line.startswith('MW-3'):
# if mountwizzard3, it's native version 3
convertedLine = line.rstrip('\n').split(':')
p.append((float(convertedLine[1]), float(convertedLine[2])))
else:
# format is same as Per's Model Maker
convertedLine = line.rstrip('\n').split(':')
point = (int(convertedLine[0]), int(convertedLine[1]))
if len(convertedLine) == 2 and modeltype == 'Full':
p.append(point)
elif len(convertedLine) != 2 and modeltype == 'Initial':
p.append(point)
except Exception as e:
msg = 'Error loading modeling points from file [{0}] error: {1}!'.format(modelPointsFileName, e)
self.logger.warning('Error loading modeling points from file [{0}] error: {1}!'.format(modelPointsFileName, e))
finally:
return p, msg
def sortPoints(self):
if len(self.modelPoints) == 0:
self.logger.warning('There are no points to sort')
return
westSide = []
eastSide = []
a = sorted(self.modelPoints, key=operator.itemgetter(0))
for i in range(0, len(a)):
if a[i][0] >= 180:
westSide.append((a[i][0], a[i][1]))
else:
eastSide.append((a[i][0], a[i][1]))
westSide = sorted(westSide, key=operator.itemgetter(1))
eastSide = sorted(eastSide, key=operator.itemgetter(1))
self.modelPoints = westSide + eastSide
def loadHorizonPoints(self, horizonPointsFileName, horizonByFile, horizonByAltitude, altitudeMinimumHorizon):
self.horizonPoints = []
if not (horizonByFile or horizonByAltitude):
return
hp = []
msg = None
if horizonByFile:
if horizonPointsFileName == '':
msg = 'No horizon points filename given !'
return msg
if not os.path.isfile(os.getcwd() + '/config/' + horizonPointsFileName + '.txt'):
msg = 'Horizon points file does not exist !'
self.logger.warning('Horizon points file does not exist')
else:
try:
with open(os.getcwd() + '/config/' + horizonPointsFileName + '.txt') as f:
for line in f:
if ':' in line:
# model maker format
m = line.rstrip('\n').split(':')
else:
# carte du ciel / skychart format
m = line.rstrip('\n').split(' ')
point = (int(m[0]), int(m[1]))
hp.append(point)
f.close()
except Exception as e:
msg = 'Error loading horizon points: {0}'.format(e)
self.logger.error('Error loading horizon points: {0}'.format(e))
return msg
hp = sorted(hp, key=operator.itemgetter(0))
if len(hp) == 0:
hp = ((0, 0), (360, 0))
x = [i[0] for i in hp]
y = [i[1] for i in hp]
if horizonByAltitude:
y = numpy.clip(y, altitudeMinimumHorizon, None)
self.horizonPoints = [list(a) for a in zip(x, y)]
return msg
def saveHorizonPoints(self, horizonPointsFileName):
msg = None
fileHandle = None
if horizonPointsFileName.strip() == '':
msg = 'No horizon points filename given!'
self.logger.warning('No Model Points Filename given!')
return msg
try:
fileHandle = open(horizonPointsFileName + '.txt', 'w')
for i in range(0, len(self.horizonPoints)):
# saving in model maker format
fileHandle.write('{0:03d}:{1:03d}\n'.format(int(self.horizonPoints[i][0]), int(int(self.horizonPoints[i][1]))))
fileHandle.close()
except Exception as e:
msg = 'Error saving horizon points to file [{0}] error: {1}!'.format(horizonPointsFileName, e)
self.logger.warning('Error loading horizon points to file [{0}] error: {1}!'.format(horizonPointsFileName, e))
finally:
if fileHandle:
fileHandle.close()
return msg
def isAboveHorizonLine(self, point):
x = range(0, 361)
y = numpy.interp(x, [i[0] for i in self.horizonPoints], [i[1] for i in self.horizonPoints], left=None, right=None, period=None)
if point[1] > y[int(point[0])]:
return True
else:
return False
def deleteBelowHorizonLine(self):
i = 0
while i < len(self.modelPoints):
if self.isAboveHorizonLine(self.modelPoints[i]):
i += 1
else:
del self.modelPoints[i]
def deletePoints(self):
self.modelPoints = list()
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def showInitialPoints(self, filename):
self.modelPoints, msg = self.loadModelPoints(filename, 'Initial')
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def showFullPoints(self, filename, limitByHorizonMask, doSortingPoints):
self.modelPoints, msg = self.loadModelPoints(filename, 'Full')
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateDSOPoints(self, limitByHorizonMask, hoursPathLength, numberOfPathPoints, hoursPathLengthPreview):
# we have no position of the mount -> therefore we can't calculate the path
if 'RaJNow' not in self.app.workerMountDispatcher.data:
return
self.modelPoints = list()
ra = copy.copy(self.app.workerMountDispatcher.data['RaJNow'])
dec = copy.copy(self.app.workerMountDispatcher.data['DecJNow'])
for i in range(0, numberOfPathPoints):
ra = ra - float(i) * hoursPathLength / numberOfPathPoints - hoursPathLengthPreview
az, alt = self.transform.transformERFA(ra, dec, 1)
if alt > 0:
self.modelPoints.append((az, alt))
if limitByHorizonMask:
self.deleteBelowHorizonLine()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateMaxPoints(self, limitByHorizonMask, doSortingPoints):
west = []
east = []
off = -5
i = 0
for dec in range(-15, 90, 10):
if dec < 30:
step = 10
elif dec < 70:
step = 10
else:
step = 30
if i % 2:
for ha in range(120 + off, -120 + off, -step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
else:
for ha in range(-120 + off, 120 + off, step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
i += 1
self.modelPoints = west + east
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateNormalPoints(self, limitByHorizonMask, doSortingPoints):
west = []
east = []
off = -5
i = 0
for dec in range(-15, 90, 15):
if dec < 60:
step = 10
else:
step = 20
if i % 2:
for ha in range(120 + off, -120 + off, -step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
else:
for ha in range(-120 + off, 120 + off, step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
i += 1
self.modelPoints = west + east
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateMinPoints(self, limitByHorizonMask, doSortingPoints):
west = list()
east = list()
off = -5
i = 0
for dec in range(-15, 90, 15):
if dec < 60:
step = 15
else:
step = 30
if i % 2:
for ha in range(120 + off, -120 + off, -step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
else:
for ha in range(-120 + off, 120 + off, step):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
i += 1
self.modelPoints = west + east
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateGridPoints(self, limitByHorizonMask, doSortingPoints, numberOfRows, numberOfColumns, altitudeMin, altitudeMax):
west = list()
east = list()
i = 0
for alt in range(altitudeMin, altitudeMax + 1, int((altitudeMax - altitudeMin) / (numberOfRows - 1))):
if i % 2:
for az in range(365 - int(360 / numberOfColumns), 0, -int(360 / numberOfColumns)):
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
else:
for az in range(5, 360, int(360 / numberOfColumns)):
if alt > 0:
if az > 180:
east.insert(0, (az, alt))
else:
west.append((az, alt))
i += 1
self.modelPoints = west + east
if limitByHorizonMask:
self.deleteBelowHorizonLine()
if doSortingPoints:
self.sortPoints()
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateInitialPoints(self, azimuth, altitude, numberOfPoints):
self.modelPoints = list()
for i in range(0, numberOfPoints):
azp = i * 360 / numberOfPoints + azimuth
if azp > 360:
azp -= 360
azp = int(azp)
point = (azp, altitude)
self.modelPoints.append(point)
self.app.messageQueue.put('ToModel>{0:02d}'.format(len(self.modelPoints)))
self.app.workerModelingDispatcher.signalModelPointsRedraw.emit()
def generateCelestialEquator(self):
self.celestialEquator = list()
off = -5
for dec in range(-15, 90, 15):
for ha in range(120 + off, -120 + off, -2):
az, alt = self.transform.topocentricToAzAlt(ha / 10, dec)
if alt > 0:
self.celestialEquator.append((az, alt))
|
[
"os.path.basename",
"os.getcwd",
"copy.copy",
"numpy.clip",
"astrometry.transform.Transform",
"numpy.interp",
"operator.itemgetter",
"logging.getLogger"
] |
[((621, 648), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (638, 648), False, 'import logging\n'), ((727, 756), 'astrometry.transform.Transform', 'transform.Transform', (['self.app'], {}), '(self.app)\n', (746, 756), False, 'from astrometry import transform\n'), ((13679, 13807), 'numpy.interp', 'numpy.interp', (['x', '[i[0] for i in self.horizonPoints]', '[i[1] for i in self.horizonPoints]'], {'left': 'None', 'right': 'None', 'period': 'None'}), '(x, [i[0] for i in self.horizonPoints], [i[1] for i in self.\n horizonPoints], left=None, right=None, period=None)\n', (13691, 13807), False, 'import numpy\n'), ((15317, 15373), 'copy.copy', 'copy.copy', (["self.app.workerMountDispatcher.data['RaJNow']"], {}), "(self.app.workerMountDispatcher.data['RaJNow'])\n", (15326, 15373), False, 'import copy\n'), ((15388, 15445), 'copy.copy', 'copy.copy', (["self.app.workerMountDispatcher.data['DecJNow']"], {}), "(self.app.workerMountDispatcher.data['DecJNow'])\n", (15397, 15445), False, 'import copy\n'), ((6841, 6864), 'os.path.basename', 'os.path.basename', (['value'], {}), '(value)\n', (6857, 6864), False, 'import os\n'), ((7822, 7845), 'os.path.basename', 'os.path.basename', (['value'], {}), '(value)\n', (7838, 7845), False, 'import os\n'), ((12483, 12526), 'numpy.clip', 'numpy.clip', (['y', 'altitudeMinimumHorizon', 'None'], {}), '(y, altitudeMinimumHorizon, None)\n', (12493, 12526), False, 'import numpy\n'), ((4215, 4226), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4224, 4226), False, 'import os\n'), ((4573, 4596), 'os.path.basename', 'os.path.basename', (['value'], {}), '(value)\n', (4589, 4596), False, 'import os\n'), ((4968, 4991), 'os.path.basename', 'os.path.basename', (['value'], {}), '(value)\n', (4984, 4991), False, 'import os\n'), ((6098, 6109), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6107, 6109), False, 'import os\n'), ((6472, 6495), 'os.path.basename', 'os.path.basename', (['value'], {}), '(value)\n', (6488, 6495), False, 'import os\n'), ((7097, 7108), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7106, 7108), False, 'import os\n'), ((7459, 7482), 'os.path.basename', 'os.path.basename', (['value'], {}), '(value)\n', (7475, 7482), False, 'import os\n'), ((10371, 10393), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (10390, 10393), False, 'import operator\n'), ((10623, 10645), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (10642, 10645), False, 'import operator\n'), ((10687, 10709), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (10706, 10709), False, 'import operator\n'), ((12290, 12312), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (12309, 12312), False, 'import operator\n'), ((11202, 11213), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11211, 11213), False, 'import os\n'), ((11466, 11477), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11475, 11477), False, 'import os\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train resnet."""
import os
import time
import argparse
import ast
import numpy as np
from mindspore import context
from mindspore import Tensor
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from mindspore.communication.management import init
from mindspore.train.callback import Callback
from src.loss import Softmaxloss
from src.loss import Tripletloss
from src.loss import Quadrupletloss
from src.lr_generator import get_lr
from src.resnet import resnet50
from src.utility import GetDatasetGenerator_softmax, GetDatasetGenerator_triplet, GetDatasetGenerator_quadruplet
set_seed(1)
parser = argparse.ArgumentParser(description='Image classification')
# modelarts parameter
parser.add_argument('--train_url', type=str, default=None, help='Train output path')
parser.add_argument('--data_url', type=str, default=None, help='Dataset path')
parser.add_argument('--ckpt_url', type=str, default=None, help='Pretrained ckpt path')
parser.add_argument('--checkpoint_name', type=str, default='resnet-120_625.ckpt', help='Checkpoint file')
parser.add_argument('--loss_name', type=str, default='softmax',
help='loss name: softmax(pretrained) triplet quadruplet')
# Ascend parameter
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
parser.add_argument('--ckpt_path', type=str, default=None, help='ckpt path name')
parser.add_argument('--run_distribute', type=ast.literal_eval, default=False, help='Run distribute')
parser.add_argument('--device_id', type=int, default=0, help='Device id')
parser.add_argument('--run_modelarts', type=ast.literal_eval, default=False, help='Run distribute')
args_opt = parser.parse_args()
class Monitor(Callback):
"""Monitor"""
def __init__(self, lr_init=None):
super(Monitor, self).__init__()
self.lr_init = lr_init
self.lr_init_len = len(lr_init)
def epoch_begin(self, run_context):
self.losses = []
self.epoch_time = time.time()
dataset_generator.__init__(data_dir=DATA_DIR, train_list=TRAIN_LIST)
def epoch_end(self, run_context):
cb_params = run_context.original_args()
epoch_mseconds = (time.time() - self.epoch_time) * 1000
per_step_mseconds = epoch_mseconds / cb_params.batch_num
print("epoch time: {:5.3f}, per step time: {:5.3f}, avg loss: {:8.5f}"
.format(epoch_mseconds, per_step_mseconds, np.mean(self.losses)))
print('batch_size:', config.batch_size, 'epochs_size:', config.epoch_size,
'lr_model:', config.lr_decay_mode, 'lr:', config.lr_max, 'step_size:', step_size)
def step_begin(self, run_context):
self.step_time = time.time()
def step_end(self, run_context):
"""step_end"""
cb_params = run_context.original_args()
step_mseconds = (time.time() - self.step_time) * 1000
step_loss = cb_params.net_outputs
if isinstance(step_loss, (tuple, list)) and isinstance(step_loss[0], Tensor):
step_loss = step_loss[0]
if isinstance(step_loss, Tensor):
step_loss = np.mean(step_loss.asnumpy())
self.losses.append(step_loss)
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num
print("epochs: [{:3d}/{:3d}], step:[{:5d}/{:5d}], loss:[{:8.5f}/{:8.5f}], time:[{:5.3f}], lr:[{:8.5f}]".format(
cb_params.cur_epoch_num, config.epoch_size, cur_step_in_epoch, cb_params.batch_num, step_loss,
np.mean(self.losses), step_mseconds, self.lr_init[cb_params.cur_step_num - 1]))
if __name__ == '__main__':
if args_opt.loss_name == 'softmax':
from src.config import config0 as config
from src.dataset import create_dataset0 as create_dataset
elif args_opt.loss_name == 'triplet':
from src.config import config1 as config
from src.dataset import create_dataset1 as create_dataset
elif args_opt.loss_name == 'quadruplet':
from src.config import config2 as config
from src.dataset import create_dataset1 as create_dataset
else:
print('loss no')
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
# init distributed
if args_opt.run_modelarts:
import moxing as mox
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
context.set_context(device_id=device_id)
local_data_url = '/cache/data'
local_ckpt_url = '/cache/ckpt'
local_train_url = '/cache/train'
if device_num > 1:
init()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
local_data_url = os.path.join(local_data_url, str(device_id))
local_ckpt_url = os.path.join(local_ckpt_url, str(device_id))
mox.file.copy_parallel(args_opt.data_url, local_data_url)
mox.file.copy_parallel(args_opt.ckpt_url, local_ckpt_url)
DATA_DIR = local_data_url + '/'
else:
if args_opt.run_distribute:
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
context.set_context(device_id=device_id)
init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
else:
context.set_context(device_id=args_opt.device_id)
device_num = 1
device_id = args_opt.device_id
DATA_DIR = args_opt.dataset_path + '/'
# create dataset
TRAIN_LIST = DATA_DIR + 'train_half.txt'
if args_opt.loss_name == 'softmax':
dataset_generator = GetDatasetGenerator_softmax(data_dir=DATA_DIR,
train_list=TRAIN_LIST)
elif args_opt.loss_name == 'triplet':
dataset_generator = GetDatasetGenerator_triplet(data_dir=DATA_DIR,
train_list=TRAIN_LIST)
elif args_opt.loss_name == 'quadruplet':
dataset_generator = GetDatasetGenerator_quadruplet(data_dir=DATA_DIR,
train_list=TRAIN_LIST)
else:
print('loss no')
dataset = create_dataset(dataset_generator, do_train=True, batch_size=config.batch_size,
device_num=device_num, rank_id=device_id)
step_size = dataset.get_dataset_size()
# define net
net = resnet50(class_num=config.class_num)
# init weight
if args_opt.run_modelarts:
checkpoint_path = os.path.join(local_ckpt_url, args_opt.checkpoint_name)
else:
checkpoint_path = args_opt.ckpt_path
param_dict = load_checkpoint(checkpoint_path)
load_param_into_net(net.backbone, param_dict)
# init lr
lr = Tensor(get_lr(lr_init=config.lr_init,
lr_end=config.lr_end,
lr_max=config.lr_max,
warmup_epochs=config.warmup_epochs,
total_epochs=config.epoch_size,
steps_per_epoch=step_size,
lr_decay_mode=config.lr_decay_mode))
# define opt
opt = Momentum(params=net.trainable_params(),
learning_rate=lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
loss_scale=config.loss_scale)
# define loss, model
if args_opt.loss_name == 'softmax':
loss = Softmaxloss(sparse=True, smooth_factor=0.1, num_classes=config.class_num)
elif args_opt.loss_name == 'triplet':
loss = Tripletloss(margin=0.1)
elif args_opt.loss_name == 'quadruplet':
loss = Quadrupletloss(train_batch_size=config.batch_size, samples_each_class=2, margin=0.1)
else:
print('loss no')
loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
if args_opt.loss_name == 'softmax':
model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=None,
amp_level='O3', keep_batchnorm_fp32=False)
else:
model = Model(net.backbone, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=None,
amp_level='O3', keep_batchnorm_fp32=False)
#define callback
cb = []
if config.save_checkpoint and (device_num == 1 or device_id == 0):
config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
keep_checkpoint_max=config.keep_checkpoint_max)
check_name = 'ResNet50_' + args_opt.loss_name
if args_opt.run_modelarts:
ckpt_cb = ModelCheckpoint(prefix=check_name, directory=local_train_url, config=config_ck)
else:
save_ckpt_path = os.path.join(config.save_checkpoint_path, 'model_'+ str(device_id) +'/')
ckpt_cb = ModelCheckpoint(prefix=check_name, directory=save_ckpt_path, config=config_ck)
cb += [ckpt_cb]
cb += [Monitor(lr_init=lr.asnumpy())]
# train model
model.train(config.epoch_size - config.pretrain_epoch_size, dataset, callbacks=cb, dataset_sink_mode=True)
if args_opt.run_modelarts and config.save_checkpoint and (device_num == 1 or device_id == 0):
mox.file.copy_parallel(src_url=local_train_url, dst_url=args_opt.train_url)
|
[
"src.loss.Quadrupletloss",
"argparse.ArgumentParser",
"mindspore.train.callback.ModelCheckpoint",
"src.dataset.create_dataset1",
"moxing.file.copy_parallel",
"numpy.mean",
"mindspore.train.serialization.load_checkpoint",
"os.path.join",
"mindspore.train.serialization.load_param_into_net",
"mindspore.context.set_context",
"src.utility.GetDatasetGenerator_softmax",
"mindspore.context.set_auto_parallel_context",
"mindspore.common.set_seed",
"src.utility.GetDatasetGenerator_triplet",
"src.resnet.resnet50",
"mindspore.train.loss_scale_manager.FixedLossScaleManager",
"src.loss.Softmaxloss",
"src.utility.GetDatasetGenerator_quadruplet",
"mindspore.train.callback.CheckpointConfig",
"mindspore.communication.management.init",
"src.loss.Tripletloss",
"mindspore.train.model.Model",
"os.getenv",
"mindspore.context.reset_auto_parallel_context",
"time.time",
"src.lr_generator.get_lr"
] |
[((1586, 1597), 'mindspore.common.set_seed', 'set_seed', (['(1)'], {}), '(1)\n', (1594, 1597), False, 'from mindspore.common import set_seed\n'), ((1608, 1667), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Image classification"""'}), "(description='Image classification')\n", (1631, 1667), False, 'import argparse\n'), ((5087, 5178), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""Ascend"""', 'save_graphs': '(False)'}), "(mode=context.GRAPH_MODE, device_target='Ascend',\n save_graphs=False)\n", (5106, 5178), False, 'from mindspore import context\n'), ((7514, 7639), 'src.dataset.create_dataset1', 'create_dataset', (['dataset_generator'], {'do_train': '(True)', 'batch_size': 'config.batch_size', 'device_num': 'device_num', 'rank_id': 'device_id'}), '(dataset_generator, do_train=True, batch_size=config.\n batch_size, device_num=device_num, rank_id=device_id)\n', (7528, 7639), True, 'from src.dataset import create_dataset1 as create_dataset\n'), ((7735, 7771), 'src.resnet.resnet50', 'resnet50', ([], {'class_num': 'config.class_num'}), '(class_num=config.class_num)\n', (7743, 7771), False, 'from src.resnet import resnet50\n'), ((7975, 8007), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['checkpoint_path'], {}), '(checkpoint_path)\n', (7990, 8007), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((8012, 8057), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['net.backbone', 'param_dict'], {}), '(net.backbone, param_dict)\n', (8031, 8057), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((9120, 9188), 'mindspore.train.loss_scale_manager.FixedLossScaleManager', 'FixedLossScaleManager', (['config.loss_scale'], {'drop_overflow_update': '(False)'}), '(config.loss_scale, drop_overflow_update=False)\n', (9141, 9188), False, 'from mindspore.train.loss_scale_manager import FixedLossScaleManager\n'), ((2963, 2974), 'time.time', 'time.time', ([], {}), '()\n', (2972, 2974), False, 'import time\n'), ((3669, 3680), 'time.time', 'time.time', ([], {}), '()\n', (3678, 3680), False, 'import time\n'), ((5363, 5403), 'mindspore.context.set_context', 'context.set_context', ([], {'device_id': 'device_id'}), '(device_id=device_id)\n', (5382, 5403), False, 'from mindspore import context\n'), ((5949, 6006), 'moxing.file.copy_parallel', 'mox.file.copy_parallel', (['args_opt.data_url', 'local_data_url'], {}), '(args_opt.data_url, local_data_url)\n', (5971, 6006), True, 'import moxing as mox\n'), ((6015, 6072), 'moxing.file.copy_parallel', 'mox.file.copy_parallel', (['args_opt.ckpt_url', 'local_ckpt_url'], {}), '(args_opt.ckpt_url, local_ckpt_url)\n', (6037, 6072), True, 'import moxing as mox\n'), ((6938, 7007), 'src.utility.GetDatasetGenerator_softmax', 'GetDatasetGenerator_softmax', ([], {'data_dir': 'DATA_DIR', 'train_list': 'TRAIN_LIST'}), '(data_dir=DATA_DIR, train_list=TRAIN_LIST)\n', (6965, 7007), False, 'from src.utility import GetDatasetGenerator_softmax, GetDatasetGenerator_triplet, GetDatasetGenerator_quadruplet\n'), ((7848, 7902), 'os.path.join', 'os.path.join', (['local_ckpt_url', 'args_opt.checkpoint_name'], {}), '(local_ckpt_url, args_opt.checkpoint_name)\n', (7860, 7902), False, 'import os\n'), ((8089, 8302), 'src.lr_generator.get_lr', 'get_lr', ([], {'lr_init': 'config.lr_init', 'lr_end': 'config.lr_end', 'lr_max': 'config.lr_max', 'warmup_epochs': 'config.warmup_epochs', 'total_epochs': 'config.epoch_size', 'steps_per_epoch': 'step_size', 'lr_decay_mode': 'config.lr_decay_mode'}), '(lr_init=config.lr_init, lr_end=config.lr_end, lr_max=config.lr_max,\n warmup_epochs=config.warmup_epochs, total_epochs=config.epoch_size,\n steps_per_epoch=step_size, lr_decay_mode=config.lr_decay_mode)\n', (8095, 8302), False, 'from src.lr_generator import get_lr\n'), ((8767, 8840), 'src.loss.Softmaxloss', 'Softmaxloss', ([], {'sparse': '(True)', 'smooth_factor': '(0.1)', 'num_classes': 'config.class_num'}), '(sparse=True, smooth_factor=0.1, num_classes=config.class_num)\n', (8778, 8840), False, 'from src.loss import Softmaxloss\n'), ((9246, 9377), 'mindspore.train.model.Model', 'Model', (['net'], {'loss_fn': 'loss', 'optimizer': 'opt', 'loss_scale_manager': 'loss_scale', 'metrics': 'None', 'amp_level': '"""O3"""', 'keep_batchnorm_fp32': '(False)'}), "(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale,\n metrics=None, amp_level='O3', keep_batchnorm_fp32=False)\n", (9251, 9377), False, 'from mindspore.train.model import Model\n'), ((9422, 9563), 'mindspore.train.model.Model', 'Model', (['net.backbone'], {'loss_fn': 'loss', 'optimizer': 'opt', 'loss_scale_manager': 'loss_scale', 'metrics': 'None', 'amp_level': '"""O3"""', 'keep_batchnorm_fp32': '(False)'}), "(net.backbone, loss_fn=loss, optimizer=opt, loss_scale_manager=\n loss_scale, metrics=None, amp_level='O3', keep_batchnorm_fp32=False)\n", (9427, 9563), False, 'from mindspore.train.model import Model\n'), ((9706, 9839), 'mindspore.train.callback.CheckpointConfig', 'CheckpointConfig', ([], {'save_checkpoint_steps': '(config.save_checkpoint_epochs * step_size)', 'keep_checkpoint_max': 'config.keep_checkpoint_max'}), '(save_checkpoint_steps=config.save_checkpoint_epochs *\n step_size, keep_checkpoint_max=config.keep_checkpoint_max)\n', (9722, 9839), False, 'from mindspore.train.callback import ModelCheckpoint, CheckpointConfig\n'), ((10585, 10660), 'moxing.file.copy_parallel', 'mox.file.copy_parallel', ([], {'src_url': 'local_train_url', 'dst_url': 'args_opt.train_url'}), '(src_url=local_train_url, dst_url=args_opt.train_url)\n', (10607, 10660), True, 'import moxing as mox\n'), ((5282, 5304), 'os.getenv', 'os.getenv', (['"""DEVICE_ID"""'], {}), "('DEVICE_ID')\n", (5291, 5304), False, 'import os\n'), ((5331, 5353), 'os.getenv', 'os.getenv', (['"""RANK_SIZE"""'], {}), "('RANK_SIZE')\n", (5340, 5353), False, 'import os\n'), ((5562, 5568), 'mindspore.communication.management.init', 'init', ([], {}), '()\n', (5566, 5568), False, 'from mindspore.communication.management import init\n'), ((5581, 5705), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': 'device_num', 'parallel_mode': 'ParallelMode.DATA_PARALLEL', 'gradients_mean': '(True)'}), '(device_num=device_num, parallel_mode=\n ParallelMode.DATA_PARALLEL, gradients_mean=True)\n', (5614, 5705), False, 'from mindspore import context\n'), ((6276, 6316), 'mindspore.context.set_context', 'context.set_context', ([], {'device_id': 'device_id'}), '(device_id=device_id)\n', (6295, 6316), False, 'from mindspore import context\n'), ((6329, 6335), 'mindspore.communication.management.init', 'init', ([], {}), '()\n', (6333, 6335), False, 'from mindspore.communication.management import init\n'), ((6348, 6385), 'mindspore.context.reset_auto_parallel_context', 'context.reset_auto_parallel_context', ([], {}), '()\n', (6383, 6385), False, 'from mindspore import context\n'), ((6398, 6522), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': 'device_num', 'parallel_mode': 'ParallelMode.DATA_PARALLEL', 'gradients_mean': '(True)'}), '(device_num=device_num, parallel_mode=\n ParallelMode.DATA_PARALLEL, gradients_mean=True)\n', (6431, 6522), False, 'from mindspore import context\n'), ((6636, 6685), 'mindspore.context.set_context', 'context.set_context', ([], {'device_id': 'args_opt.device_id'}), '(device_id=args_opt.device_id)\n', (6655, 6685), False, 'from mindspore import context\n'), ((7134, 7203), 'src.utility.GetDatasetGenerator_triplet', 'GetDatasetGenerator_triplet', ([], {'data_dir': 'DATA_DIR', 'train_list': 'TRAIN_LIST'}), '(data_dir=DATA_DIR, train_list=TRAIN_LIST)\n', (7161, 7203), False, 'from src.utility import GetDatasetGenerator_softmax, GetDatasetGenerator_triplet, GetDatasetGenerator_quadruplet\n'), ((8898, 8921), 'src.loss.Tripletloss', 'Tripletloss', ([], {'margin': '(0.1)'}), '(margin=0.1)\n', (8909, 8921), False, 'from src.loss import Tripletloss\n'), ((9985, 10064), 'mindspore.train.callback.ModelCheckpoint', 'ModelCheckpoint', ([], {'prefix': 'check_name', 'directory': 'local_train_url', 'config': 'config_ck'}), '(prefix=check_name, directory=local_train_url, config=config_ck)\n', (10000, 10064), False, 'from mindspore.train.callback import ModelCheckpoint, CheckpointConfig\n'), ((10203, 10281), 'mindspore.train.callback.ModelCheckpoint', 'ModelCheckpoint', ([], {'prefix': 'check_name', 'directory': 'save_ckpt_path', 'config': 'config_ck'}), '(prefix=check_name, directory=save_ckpt_path, config=config_ck)\n', (10218, 10281), False, 'from mindspore.train.callback import ModelCheckpoint, CheckpointConfig\n'), ((3164, 3175), 'time.time', 'time.time', ([], {}), '()\n', (3173, 3175), False, 'import time\n'), ((3403, 3423), 'numpy.mean', 'np.mean', (['self.losses'], {}), '(self.losses)\n', (3410, 3423), True, 'import numpy as np\n'), ((3814, 3825), 'time.time', 'time.time', ([], {}), '()\n', (3823, 3825), False, 'import time\n'), ((4468, 4488), 'numpy.mean', 'np.mean', (['self.losses'], {}), '(self.losses)\n', (4475, 4488), True, 'import numpy as np\n'), ((6187, 6209), 'os.getenv', 'os.getenv', (['"""DEVICE_ID"""'], {}), "('DEVICE_ID')\n", (6196, 6209), False, 'import os\n'), ((6240, 6262), 'os.getenv', 'os.getenv', (['"""RANK_SIZE"""'], {}), "('RANK_SIZE')\n", (6249, 6262), False, 'import os\n'), ((7333, 7405), 'src.utility.GetDatasetGenerator_quadruplet', 'GetDatasetGenerator_quadruplet', ([], {'data_dir': 'DATA_DIR', 'train_list': 'TRAIN_LIST'}), '(data_dir=DATA_DIR, train_list=TRAIN_LIST)\n', (7363, 7405), False, 'from src.utility import GetDatasetGenerator_softmax, GetDatasetGenerator_triplet, GetDatasetGenerator_quadruplet\n'), ((8982, 9070), 'src.loss.Quadrupletloss', 'Quadrupletloss', ([], {'train_batch_size': 'config.batch_size', 'samples_each_class': '(2)', 'margin': '(0.1)'}), '(train_batch_size=config.batch_size, samples_each_class=2,\n margin=0.1)\n', (8996, 9070), False, 'from src.loss import Quadrupletloss\n')]
|
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNotEqual(TestCase):
def cpu_op_exec(self, input1, input2):
output = torch.ne(input1, input2)
output = output.numpy().astype(np.int32)
return output
def npu_op_exec(self, input1, input2):
output = torch.ne(input1, input2)
output = output.to("cpu")
output = output.numpy().astype(np.int32)
return output
def cpu_op_inplace_exec(self, input1, input2):
input1.ne_(input2)
output = input1.numpy().astype(np.int32)
return output
def npu_op_inplace_exec(self, input1, input2):
input1.ne_(input2)
output = input1.to("cpu")
output = output.numpy().astype(np.int32)
return output
def npu_op_exec_out(self, input1, input2, out):
torch.ne(input1, input2, out=out)
output = out.to("cpu")
output = output.numpy().astype(np.int32)
return output
def not_equal_scalar_result(self, shape_format):
for item in shape_format:
scalar = np.random.uniform(0, 100)
cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 100)
npu_input3 = copy.deepcopy(cpu_input1).to("npu").to(torch.bool)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1, scalar)
npu_output = self.npu_op_exec(npu_input1, scalar)
npu_output_out = self.npu_op_exec_out(npu_input1, scalar, npu_input3)
cpu_output_inp = self.cpu_op_inplace_exec(cpu_input1, scalar)
npu_output_inp = self.npu_op_inplace_exec(npu_input1, scalar)
self.assertRtolEqual(cpu_output, npu_output)
self.assertRtolEqual(cpu_output, npu_output_out)
self.assertRtolEqual(cpu_output_inp, npu_output_inp)
def not_equal_result(self, shape_format):
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 100)
cpu_input2, npu_input2 = create_common_tensor(item[1], 0, 100)
npu_input3 = copy.deepcopy(cpu_input1).to("npu").to(torch.bool)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_input2 = cpu_input2.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
npu_output_out = self.npu_op_exec_out(npu_input1, npu_input2, npu_input3)
cpu_output_inp = self.cpu_op_inplace_exec(cpu_input1, cpu_input2)
npu_output_inp = self.npu_op_inplace_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
self.assertRtolEqual(cpu_output, npu_output_out)
self.assertRtolEqual(cpu_output_inp, npu_output_inp)
def test_not_equal_shape_format_fp16_1d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [16]], [np.float16, i, [16]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp32_1d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [16]], [np.float32, i, [16]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp16_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [448, 1]], [np.float16, i, [448, 1]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp32_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [448, 1]], [np.float32, i, [448, 1]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp16_3d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [16, 640, 640]], [np.float16, i, [16, 640, 640]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp32_3d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float32, i, [16, 640, 640]], [np.float32, i, [16, 640, 640]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp16_4d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float16, i, [32, 3, 3, 3]], [np.float16, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_fp32_4d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float32, i, [32, 3, 3, 3]], [np.float32, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_result(shape_format)
# scala-----------------------------------------------------------------
def test_not_equal_scalar_shape_format_fp16_1d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float16, i, 18]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp32_1d(self, device):
format_list = [-1, 0, 3]
shape_format = [[[np.float32, i, [18]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp16_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [64, 7]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp32_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [64, 7]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp32_3d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [64, 24, 38]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp16_4d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float16, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_scalar_shape_format_fp32_4d(self, device):
format_list = [-1, 0]
shape_format = [[[np.float32, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_scalar_result(shape_format)
def test_not_equal_shape_format_int32_1d(self, device):
format_list = [-1, 0]
shape_format = [[[np.int32, i, [16]], [np.int32, i, [16]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_int32_2d(self, device):
format_list = [-1, 0]
shape_format = [[[np.int32, i, [448, 1]], [np.int32, i, [448, 1]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_int32_3d(self, device):
format_list = [-1, 0]
shape_format = [[[np.int32, i, [16, 640, 640]], [np.int32, i, [16, 640, 640]]] for i in format_list]
self.not_equal_result(shape_format)
def test_not_equal_shape_format_int32_4d(self, device):
format_list = [-1, 0]
shape_format = [[[np.int32, i, [32, 3, 3, 3]], [np.int32, i, [32, 3, 3, 3]]] for i in format_list]
self.not_equal_result(shape_format)
instantiate_device_type_tests(TestNotEqual, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
|
[
"numpy.random.uniform",
"torch.ne",
"copy.deepcopy",
"common_utils.run_tests",
"util_test.create_common_tensor"
] |
[((8284, 8295), 'common_utils.run_tests', 'run_tests', ([], {}), '()\n', (8293, 8295), False, 'from common_utils import TestCase, run_tests\n'), ((894, 918), 'torch.ne', 'torch.ne', (['input1', 'input2'], {}), '(input1, input2)\n', (902, 918), False, 'import torch\n'), ((1051, 1075), 'torch.ne', 'torch.ne', (['input1', 'input2'], {}), '(input1, input2)\n', (1059, 1075), False, 'import torch\n'), ((1576, 1609), 'torch.ne', 'torch.ne', (['input1', 'input2'], {'out': 'out'}), '(input1, input2, out=out)\n', (1584, 1609), False, 'import torch\n'), ((1821, 1846), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(100)'], {}), '(0, 100)\n', (1838, 1846), True, 'import numpy as np\n'), ((1884, 1921), 'util_test.create_common_tensor', 'create_common_tensor', (['item[0]', '(0)', '(100)'], {}), '(item[0], 0, 100)\n', (1904, 1921), False, 'from util_test import create_common_tensor\n'), ((2764, 2801), 'util_test.create_common_tensor', 'create_common_tensor', (['item[0]', '(0)', '(100)'], {}), '(item[0], 0, 100)\n', (2784, 2801), False, 'from util_test import create_common_tensor\n'), ((2839, 2876), 'util_test.create_common_tensor', 'create_common_tensor', (['item[1]', '(0)', '(100)'], {}), '(item[1], 0, 100)\n', (2859, 2876), False, 'from util_test import create_common_tensor\n'), ((1947, 1972), 'copy.deepcopy', 'copy.deepcopy', (['cpu_input1'], {}), '(cpu_input1)\n', (1960, 1972), False, 'import copy\n'), ((2902, 2927), 'copy.deepcopy', 'copy.deepcopy', (['cpu_input1'], {}), '(cpu_input1)\n', (2915, 2927), False, 'import copy\n')]
|
"""
Module containing the definitions and methods to compute
a variety of indices used to study ENSO
"""
from typing import List, Optional, Tuple
import numpy as np
import xarray as xr
from eofs.xarray import Eof
from .core import compute_anomaly, compute_climatology, xconvolve
class ECindex:
"""
Computes the E and C index according to Takahashi
"""
def __init__(
self,
sst_data: xr.DataArray,
isanomaly: bool = False,
climatology: Optional[xr.DataArray] = None,
base_period: Tuple[str, str] = ("1979-01-01", "2009-12-30"),
corr_factor: Optional[List[int]] = None,
smooth_kernel: List[int] = [1, 2, 1],
):
self.sst_data = sst_data
self.base_period = base_period
if climatology is None:
climatology = compute_climatology(self.sst_data, base_period)
self.climatology = climatology
if not isanomaly:
self.sst_data = compute_anomaly(self.sst_data, self.climatology)
self._compute_pcs()
self.smooth_kernel = smooth_kernel
if corr_factor is None:
self._auto_corr_factor()
else:
self.corr_factor = corr_factor
def _compute_pcs(self) -> None:
"""
Compute the principal components
"""
_subset = self.sst_data.sortby("lat").sel(lat=slice(-10, 10))
coslat = np.cos(np.deg2rad(_subset.lat.data))
wgts = np.sqrt(coslat)[..., np.newaxis]
self.solver = Eof(_subset.sel(time=slice(*self.base_period)), weights=wgts)
clim_std = self.solver.eigenvalues(neigs=2) ** (1 / 2)
self.anom_pcs = (
self.solver.projectField(
_subset.drop("month"),
neofs=2,
)
/ clim_std
)
self.anom_smooth_pcs = None
def _corrected_pcs(self) -> xr.DataArray:
"""
Return the pcs with the correction factor applied
"""
return self.anom_pcs * self.corr_factor
def _auto_corr_factor(self) -> None:
"""
Automatically determine the correction factor by estimating
the sign of known events for the E and C index.
"""
_eofs = self.solver.eofs(neofs=2)
_subset = dict(lat=slice(-2, 2), lon=slice(210, 250))
new_corr_factor = np.zeros(2)
new_corr_factor[0] = 1 if _eofs.sel(mode=0, **_subset).mean() > 0 else -1
new_corr_factor[1] = 1 if _eofs.sel(mode=1, **_subset).mean() < 0 else -1
self.corr_factor = new_corr_factor
def _compute_index(self, smooth: bool = False) -> xr.Dataset:
"""
Compute the E and C index
"""
_pcs = self._corrected_pcs()
if smooth is True:
_pcs = xconvolve(_pcs, self._smooth_kernel, dim="time")
pc1 = _pcs.sel(mode=0)
pc2 = _pcs.sel(mode=1)
eindex = (pc1 - pc2) / (2 ** (1 / 2))
eindex.name = "E_index"
cindex = (pc1 + pc2) / (2 ** (1 / 2))
cindex.name = "C_index"
return xr.merge([eindex, cindex])
@property
def corr_factor(self) -> xr.DataArray:
"""
Return the correction factor applied to the first two pcs
"""
return self._corr_factor
@corr_factor.setter
def corr_factor(self, corr_factor: List[int]) -> None:
"""
Set a new correction factor to be applied to the first two pcs
"""
self._corr_factor = xr.DataArray(
np.array(corr_factor),
coords=[("mode", [0, 1])],
)
@property
def smooth_kernel(self) -> xr.DataArray:
"""
Return the smooth kernel used in the first two pcs
"""
return self._smooth_kernel
@smooth_kernel.setter
def smooth_kernel(self, smooth_kernel: List) -> None:
"""
Set a new smooth kernel to be applied to the first two pcs
"""
kernel = np.array(smooth_kernel)
self._smooth_kernel = xr.DataArray(kernel / kernel.sum(), dims=["time"])
@property
def pcs(self) -> xr.DataArray:
"""
Return the first two principal components used
in the computation of the E and C index
"""
return self._corrected_pcs()
@property
def pcs_smooth(self) -> xr.DataArray:
"""
Return the first two principal components smoothed
with the specified smooth_kernel
"""
if self.anom_smooth_pcs is None:
self.anom_smooth_pcs = xconvolve(
self._corrected_pcs(),
self._smooth_kernel,
dim="time",
)
return self.anom_smooth_pcs
@property
def ecindex(self) -> xr.Dataset:
"""
Return the first two principal components rotated,
also known as the E and C index
"""
return self._compute_index()
@property
def ecindex_smooth(self) -> xr.Dataset:
"""
Return the first two principal components smoothed and
rotated, also known as the E and C index
"""
return self._compute_index(smooth=True)
def enzones(data: xr.DataArray, zone: str = "34") -> xr.DataArray:
"""
Computes the mean from the selected El Niño zone, also
know as El Niño Index for each of the zones.
"""
zones = {
"12": {"lat": slice(-10, 0), "lon": slice(270, 280)},
"3": {"lat": slice(-5, 5), "lon": slice(210, 270)},
"34": {"lat": slice(-5, 5), "lon": slice(190, 240)},
"4": {"lat": slice(-5, 5), "lon": slice(160, 210)},
}
return data.sel(**zones[zone]).mean(dim=["lat", "lon"])
|
[
"numpy.deg2rad",
"numpy.zeros",
"xarray.merge",
"numpy.array",
"numpy.sqrt"
] |
[((2338, 2349), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2346, 2349), True, 'import numpy as np\n'), ((3047, 3073), 'xarray.merge', 'xr.merge', (['[eindex, cindex]'], {}), '([eindex, cindex])\n', (3055, 3073), True, 'import xarray as xr\n'), ((3931, 3954), 'numpy.array', 'np.array', (['smooth_kernel'], {}), '(smooth_kernel)\n', (3939, 3954), True, 'import numpy as np\n'), ((1404, 1432), 'numpy.deg2rad', 'np.deg2rad', (['_subset.lat.data'], {}), '(_subset.lat.data)\n', (1414, 1432), True, 'import numpy as np\n'), ((1449, 1464), 'numpy.sqrt', 'np.sqrt', (['coslat'], {}), '(coslat)\n', (1456, 1464), True, 'import numpy as np\n'), ((3488, 3509), 'numpy.array', 'np.array', (['corr_factor'], {}), '(corr_factor)\n', (3496, 3509), True, 'import numpy as np\n')]
|
import unittest
from spn.algorithms.EM import EM_optimization
from spn.algorithms.Inference import log_likelihood
from spn.algorithms.LearningWrappers import learn_parametric, learn_mspn
from spn.gpu.TensorFlow import spn_to_tf_graph, eval_tf, likelihood_loss, tf_graph_to_spn
from spn.structure.Base import Context
from spn.structure.StatisticalTypes import MetaType
import numpy as np
from spn.structure.leaves.parametric.Parametric import Gaussian
import tensorflow as tf
class TestEM(unittest.TestCase):
def test_optimization(self):
np.random.seed(17)
data = np.random.normal(10, 0.01, size=2000).tolist() + np.random.normal(30, 10, size=2000).tolist()
data = np.array(data).reshape((-1, 10))
data = data.astype(np.float32)
ds_context = Context(meta_types=[MetaType.REAL] * data.shape[1], parametric_types=[Gaussian] * data.shape[1])
spn = learn_parametric(data, ds_context)
spn.weights = [0.8, 0.2]
py_ll = log_likelihood(spn, data)
print(spn.weights)
EM_optimization(spn, data)
print(spn.weights)
py_ll_opt = log_likelihood(spn, data)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"spn.algorithms.LearningWrappers.learn_parametric",
"numpy.random.seed",
"spn.algorithms.EM.EM_optimization",
"spn.algorithms.Inference.log_likelihood",
"numpy.array",
"numpy.random.normal",
"spn.structure.Base.Context"
] |
[((1190, 1205), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1203, 1205), False, 'import unittest\n'), ((556, 574), 'numpy.random.seed', 'np.random.seed', (['(17)'], {}), '(17)\n', (570, 574), True, 'import numpy as np\n'), ((793, 894), 'spn.structure.Base.Context', 'Context', ([], {'meta_types': '([MetaType.REAL] * data.shape[1])', 'parametric_types': '([Gaussian] * data.shape[1])'}), '(meta_types=[MetaType.REAL] * data.shape[1], parametric_types=[\n Gaussian] * data.shape[1])\n', (800, 894), False, 'from spn.structure.Base import Context\n'), ((905, 939), 'spn.algorithms.LearningWrappers.learn_parametric', 'learn_parametric', (['data', 'ds_context'], {}), '(data, ds_context)\n', (921, 939), False, 'from spn.algorithms.LearningWrappers import learn_parametric, learn_mspn\n'), ((991, 1016), 'spn.algorithms.Inference.log_likelihood', 'log_likelihood', (['spn', 'data'], {}), '(spn, data)\n', (1005, 1016), False, 'from spn.algorithms.Inference import log_likelihood\n'), ((1054, 1080), 'spn.algorithms.EM.EM_optimization', 'EM_optimization', (['spn', 'data'], {}), '(spn, data)\n', (1069, 1080), False, 'from spn.algorithms.EM import EM_optimization\n'), ((1130, 1155), 'spn.algorithms.Inference.log_likelihood', 'log_likelihood', (['spn', 'data'], {}), '(spn, data)\n', (1144, 1155), False, 'from spn.algorithms.Inference import log_likelihood\n'), ((699, 713), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (707, 713), True, 'import numpy as np\n'), ((590, 627), 'numpy.random.normal', 'np.random.normal', (['(10)', '(0.01)'], {'size': '(2000)'}), '(10, 0.01, size=2000)\n', (606, 627), True, 'import numpy as np\n'), ((639, 674), 'numpy.random.normal', 'np.random.normal', (['(30)', '(10)'], {'size': '(2000)'}), '(30, 10, size=2000)\n', (655, 674), True, 'import numpy as np\n')]
|
##-------------------------------------------
## 2 VARIABLE NORMAL DISTIBUTION
##-------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
#USER INPUTS
FUNC=2
FS=18 #FONT SIZE
CMAP='hsv' #'RdYlBu'
#normal distribution param
ux=0.5; uy=0.0
sx=2.0; sy=1.0 #STD-DEV
rho=0.5; #[0,1) RHO=PEARSON CORRELATION
u=np.array([[ux],[uy]]) #MEAN VECTOR u=[ux,uy]
s=np.array([[sx**2.0,rho*sy*sx],[rho*sy*sx,sy**2.0]]) #COVARIANCE METRIC
#GENERATE POINTS SAMPLED FROM DISTRIBUTION
xp, yp = np.random.multivariate_normal(u.reshape(2), s, 1000).T
# DEFINE FUNCTION
def N(x, y):
out=1.0/(2*3.1415*sx*sy*(1-rho**2.0)**0.5)
out=out*np.exp(-(((x-ux)/sx)**2.0-2*rho*((x-ux)/sx)*((y-uy)/sy)+((y-uy)/sy)**2.0)/(2*(1-rho**2)))
return out
#MESH-1 (SMALLER)
L=3*max(sx,sy)
xmin=-L; xmax=L; ymin=-L; ymax=L
x,y = np.meshgrid(np.linspace(xmin,xmax,20),np.linspace(ymin,ymax,20))
#MESH-2 (DENSER)
X, Y = np.meshgrid(np.linspace(xmin, xmax, 40), np.linspace(ymin, ymax, 40))
#SURFACE PLOT
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.set_xlabel('x', fontsize=FS); ax.set_ylabel('y', fontsize=FS); ax.set_zlabel('p(x,y)', fontsize=FS)
surf=ax.plot_surface(X, Y, N(X, Y), cmap=CMAP)
ax.scatter(xp, yp, 1.1*np.max(N(X, Y)) , '.')
plt.show();
#SCATTER PLOT
plt.plot(xp, yp,'.')
#CONTOUR PLOT
# plt.axis('equal')
plt.contour(X, Y, N(X, Y), 20, cmap=CMAP);
plt.show();
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] |
[((364, 386), 'numpy.array', 'np.array', (['[[ux], [uy]]'], {}), '([[ux], [uy]])\n', (372, 386), True, 'import numpy as np\n'), ((417, 483), 'numpy.array', 'np.array', (['[[sx ** 2.0, rho * sy * sx], [rho * sy * sx, sy ** 2.0]]'], {}), '([[sx ** 2.0, rho * sy * sx], [rho * sy * sx, sy ** 2.0]])\n', (425, 483), True, 'import numpy as np\n'), ((1043, 1088), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}"}), "(subplot_kw={'projection': '3d'})\n", (1055, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1294, 1296), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1335), 'matplotlib.pyplot.plot', 'plt.plot', (['xp', 'yp', '"""."""'], {}), "(xp, yp, '.')\n", (1322, 1335), True, 'import matplotlib.pyplot as plt\n'), ((1415, 1425), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1423, 1425), True, 'import matplotlib.pyplot as plt\n'), ((869, 896), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(20)'], {}), '(xmin, xmax, 20)\n', (880, 896), True, 'import numpy as np\n'), ((895, 922), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(20)'], {}), '(ymin, ymax, 20)\n', (906, 922), True, 'import numpy as np\n'), ((959, 986), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(40)'], {}), '(xmin, xmax, 40)\n', (970, 986), True, 'import numpy as np\n'), ((988, 1015), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(40)'], {}), '(ymin, ymax, 40)\n', (999, 1015), True, 'import numpy as np\n'), ((682, 813), 'numpy.exp', 'np.exp', (['(-(((x - ux) / sx) ** 2.0 - 2 * rho * ((x - ux) / sx) * ((y - uy) / sy) + (\n (y - uy) / sy) ** 2.0) / (2 * (1 - rho ** 2)))'], {}), '(-(((x - ux) / sx) ** 2.0 - 2 * rho * ((x - ux) / sx) * ((y - uy) /\n sy) + ((y - uy) / sy) ** 2.0) / (2 * (1 - rho ** 2)))\n', (688, 813), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from torchsummaryX import summary
from torch.nn.utils import weight_norm, remove_weight_norm
from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up,walk_ratent_space
from typing import Tuple
from torchsummaryX import summary
import numpy as np
import random
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
LRELU_SLOPE = 0.1
class ResBlock(nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
assert len(dilation) == 3
self.convs1 = nn.ModuleList([
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class Encoder(nn.Module):
def __init__(self, h):
super().__init__()
self.h = h
rks = h.resblock_kernel_sizes
rds = h.resblock_dilation_sizes
drs = h.downsample_rates
drks = h.downsample_kernel_sizes
dci = h.downsample_initial_channel
self.num_kernels = len(rks)
self.num_downsamples = len(drs)
self.conv_pre = weight_norm(nn.Conv1d(1, dci, 7,1,3))
# get expected input lengthes and output lengths
init_len = h.n_fft
self.L_ins = [init_len]
self.L_outs = []
for r in drs:
lo = int(init_len/r)
self.L_outs.append(lo)
self.L_ins.append(lo)
init_len = lo
self.L_outs.append(1)
# get downsampling paddings
self.pads = []
for i,r in enumerate(drs):
pad = get_padding_down(self.L_ins[i],self.L_outs[i],drks[i],r)
self.pads.append(pad)
# get downsampling channels
self.channels = []
for i in range(len(drs)+1):
self.channels.append(dci*(2**i))
self.dns = nn.ModuleList()
for i, (u, k) in enumerate(zip(drs, drks)):
self.dns.append(weight_norm(
nn.Conv1d(self.channels[i], self.channels[i+1],k,u,self.pads[i])
))
self.resblocks = nn.ModuleList()
for i in range(len(self.dns)):
ch = self.channels[i+1]
for j,(k,d) in enumerate(zip(rks,rds)):
self.resblocks.append(ResBlock(ch,k,d))
self.conv_post = weight_norm(nn.Conv1d(self.channels[-1],h.ratent_dim,self.L_ins[-1]))
self.conv_post_var = weight_norm(nn.Conv1d(self.channels[-1],h.ratent_dim,self.L_ins[-1]))
self.dns.apply(init_weights)
self.conv_post.apply(init_weights)
self.conv_post_var.apply(init_weights)
def forward(self, x:torch.Tensor) -> torch.Tensor:
x = self.conv_pre(x)
for i in range(self.num_downsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.dns[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
mean = self.conv_post(x)
var = F.softplus(self.conv_post_var(x)) + 1e-8
return mean,var
def dual_flow(self, x1:torch.Tensor, x2:torch.Tensor,with_random:bool=True) -> torch.Tensor:
mean1,var1 = self.forward(x1)
mean2,var2 = self.forward(x2)
if with_random:
out1 = self.random_sample(mean1,var1)
out2 = self.random_sample(mean2,var2)
else:
out1,out2 = mean1,mean2
out = torch.cat([out1, out2], dim=1) #.tanh() # notanh
return out
@staticmethod
def random_sample(mean:torch.Tensor, var:torch.Tensor):
return mean + torch.randn_like(mean)*torch.sqrt(var)
def summary(self):
dummy = torch.randn(1,1,self.h.n_fft)
summary(self, dummy)
def remove_weight_norm(self):
print("Removing weight norm...")
for l in self.dns:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class Decoder(nn.Module):
def __init__(self, h) -> None:
super().__init__()
self.h = h
rks = h.resblock_kernel_sizes
rds = h.resblock_dilation_sizes
uik = h.upsample_initial_kernel
urs = h.upsample_rates
urks = h.upsample_kernel_sizes
uic = h.upsample_initial_channel
self.out_len = h.n_fft +h.hop_len
self.num_kernels = len(rks)
self.num_upsamples = len(urs)
self.conv_pre = weight_norm(nn.ConvTranspose1d(h.ratent_dim*2, uic,uik))
# get expected input lengthes and output lengthes
init_len = uik
self.L_ins = [init_len]
self.L_outs = []
for r in urs:
lo = init_len * r
self.L_ins.append(lo)
self.L_outs.append(lo)
init_len = lo
# get upsampling paddings
self.pads = []
for i,r in enumerate(urs):
pad = get_padding_up(self.L_ins[i],self.L_outs[i],urks[i],r)
self.pads.append(pad)
# get upsampling channels
self.channels = [uic]
ch = uic
for i in range(len(urs)):
self.channels.append(int(ch/(2**i)))
self.ups = nn.ModuleList()
for i, (u,k) in enumerate(zip(urs,urks)):
self.ups.append(weight_norm(
nn.ConvTranspose1d(self.channels[i], self.channels[i+1],k,u,self.pads[i])
))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = self.channels[i+1]
for j,(k,d) in enumerate(zip(rks,rds)):
self.resblocks.append(ResBlock(ch,k,d))
self.conv_post = weight_norm(nn.Conv1d(self.channels[-1],1,7,1,3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x:torch.Tensor) -> torch.Tensor:
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
l = x.size(-1)
start = int((l - self.out_len)/2)
x = x[:,:,start:start+self.out_len]
#x = x.tanh() # grad explosion ?
return x
def summary(self):
dummy = torch.randn(1,self.h.ratent_dim*2,1)
summary(self,dummy)
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class VoiceBand(pl.LightningModule):
def __init__(self, h,dtype:torch.dtype=torch.float,device:torch.device='cpu') -> None:
super().__init__()
self.h = h
self.reset_seed()
self.encoder = Encoder(h).type(dtype).to(self.device)
self.decoder = Decoder(h).type(dtype).to(self.device)
self.n_fft = h.n_fft
self.ratent_dim = h.ratent_dim
self.walking_steps = int(h.breath_len / h.hop_len) + 1
self.walking_resolution = h.walking_resolution
self.out_len = self.decoder.out_len
self.view_interval = 10
self.kl_lambda = h.kl_lambda
# training settings
self.MSE = nn.MSELoss()
self.MAE = nn.L1Loss()
self.actions = walk_ratent_space(self.ratent_dim, self.walking_steps,self.walking_resolution,device=device,dtype=dtype)
def forward(self, x1:torch.Tensor,x2:torch.Tensor) -> torch.Tensor:
"""
x1: (-1, 1, n_fft)
x2: (-1, 1, n_fft)
"""
mean1,var1 = self.encoder.forward(x1)
mean2,var2 = self.encoder.forward(x2)
mean,var = torch.cat([mean1,mean2],dim=1),torch.cat([var1,var2],dim=1)
out = self.encoder.random_sample(mean,var)#.tanh()# notanh
out = self.decoder(out)
return out,mean,var
def on_fit_start(self) -> None:
self.logger.log_hyperparams(self.h)
def training_step(self, batch:Tuple[torch.Tensor], batch_idx) -> torch.Tensor:
"""
batch : (-1, ch, n_fft+hop_len)
"""
sound, = batch
sound = sound.type(self.dtype)
if self.h.random_gain:
sound= self.random_gain(sound)
x1,x2,ans = sound[:,:,:self.h.n_fft], sound[:,:,-self.h.n_fft:], sound
out_,mean,var = self.forward(x1,x2)
out = out_.tanh() # atanh grad explotsion
mse = self.MSE(ans, out)
mae = self.MAE(ans,out)
KL = 0.5*torch.sum(
torch.pow(mean,2) +
var -
torch.log(var) -1
).sum() / out.size(0)
#marginal_likelihood = self.BCEwithLogits(torch.atanh(out),0.5*ans+1)
#print(True in torch.isnan(out))
marginal_likelihood= F.binary_cross_entropy_with_logits(out,0.5*ans+1,reduction="sum") / out.size(0)
loss = marginal_likelihood + KL * self.kl_lambda
#loss = self.kl_lambda * KL + mse
self.log("loss",loss)
self.log("mse",mse)
self.log("mae",mae)
self.log("KL div",KL)
self.log("Marginal likelihood",marginal_likelihood)
return loss
@torch.no_grad()
def on_epoch_end(self) -> None:
"""
walk through the ratent space and log audio wave.
"""
if self.current_epoch%self.view_interval !=0:
return
self.actions = walk_ratent_space(self.ratent_dim, self.walking_steps,self.walking_resolution,
device=self.device,dtype=self.dtype)
wave = None
for act in self.actions.unsqueeze(1):
wave= self.predict_one_step(act,wave)
wave = wave.squeeze(0).T.detach().cpu().numpy()
# tensorboard logging
tb:SummaryWriter = self.logger.experiment
tb.add_audio("Ratent space audio",wave, self.current_epoch,self.h.frame_rate)
fig = plt.figure()
ax = fig.add_subplot()
ax.plot(wave)
tb.add_figure("Walked wave",fig, self.current_epoch)
return
def random_gain(self, sound:torch.Tensor) -> torch.Tensor:
n,c,l = sound.shape
maxes= sound.view(n,c*l).abs().max(dim=1,keepdim=True).values.unsqueeze(-1)
maxes[maxes==0.0] = 1.0
gains = torch.rand_like(maxes)
sound = (sound/maxes) * gains
return sound
def configure_optimizers(self):
optim = torch.optim.AdamW(self.parameters(), self.h.lr,[self.h.adam_b1,self.h.adam_b2])
scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=self.h.lr_decay)
scheduler.last_epoch=self.trainer.max_epochs
return [optim],[scheduler]
silence = None
def set_silence(self):
self.silence = torch.zeros(1,self.h.sample_ch,self.n_fft,device=self.device,dtype=self.dtype)
def set_view_interval(self, interval:int=None):
if interval:
self.view_interval= interval
def predict_one_step(self, action:torch.Tensor,previous_wave:torch.Tensor=None) -> torch.Tensor:
"""
action : (-1, ratent_dim, 1)
previous_wave : (-1,ch, l)
"""
if previous_wave is None:
if self.silence is None:
self.set_silence()
previous_wave = self.silence
assert len(action.shape) == 3
assert len(previous_wave.shape) == 3
if previous_wave.size(-1) < self.n_fft :
pad_len = self.n_fft - previous_wave.size(-1)
n,c,l = previous_wave.shape
pad = torch.zeros(n,c,pad_len,dtype=previous_wave.dtype,device=previous_wave.device)
previous_wave = torch.cat([pad,previous_wave],dim=-1)
enc_in = previous_wave[:,:,-self.n_fft:].to(self.dtype).to(self.device)
encoded = self.encoder.forward(enc_in)[0]#.tanh()# notanh
dec_in = torch.cat([encoded,action],dim=1)
d_out = self.decoder.forward(dec_in)[:,:,self.n_fft:].type_as(previous_wave)
d_out = d_out.tanh() # grad explosion ?
wave = torch.cat([previous_wave,d_out],dim=-1)
return wave
def reset_seed(self):
seed = self.h.seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
def summary(self,tensorboard:bool = True):
dummy = torch.randn(1,1,self.n_fft)
summary(self, dummy,dummy)
if tensorboard:
writer = SummaryWriter()
writer.add_graph(self, [dummy,dummy])
def remove_weight_norm(self):
self.encoder.remove_weight_norm()
self.decoder.remove_weight_norm()
if __name__ == '__main__':
from utils import load_config
config = load_config("hparams/origin.json")
model = VoiceBand(config)
model.summary()
model.remove_weight_norm()
|
[
"numpy.random.seed",
"torch.sqrt",
"torch.cat",
"torch.randn",
"utils.get_padding",
"matplotlib.pyplot.figure",
"torch.rand_like",
"utils.load_config",
"torch.nn.functional.leaky_relu",
"torchsummaryX.summary",
"torch.no_grad",
"utils.walk_ratent_space",
"torch.nn.MSELoss",
"torch.nn.Conv1d",
"torch.nn.functional.binary_cross_entropy_with_logits",
"random.seed",
"torch.optim.lr_scheduler.ExponentialLR",
"utils.get_padding_up",
"torch.utils.tensorboard.SummaryWriter",
"torch.zeros",
"torch.log",
"torch.randn_like",
"torch.nn.ModuleList",
"utils.get_padding_down",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.pow",
"torch.nn.utils.remove_weight_norm",
"torch.nn.L1Loss",
"torch.nn.ConvTranspose1d"
] |
[((11251, 11266), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11264, 11266), False, 'import torch\n'), ((14800, 14834), 'utils.load_config', 'load_config', (['"""hparams/origin.json"""'], {}), "('hparams/origin.json')\n", (14811, 14834), False, 'from utils import load_config\n'), ((3380, 3395), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3393, 3395), True, 'import torch.nn as nn\n'), ((3611, 3626), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3624, 3626), True, 'import torch.nn as nn\n'), ((4649, 4664), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (4661, 4664), True, 'import torch.nn.functional as F\n'), ((5148, 5178), 'torch.cat', 'torch.cat', (['[out1, out2]'], {'dim': '(1)'}), '([out1, out2], dim=1)\n', (5157, 5178), False, 'import torch\n'), ((5396, 5427), 'torch.randn', 'torch.randn', (['(1)', '(1)', 'self.h.n_fft'], {}), '(1, 1, self.h.n_fft)\n', (5407, 5427), False, 'import torch\n'), ((5434, 5454), 'torchsummaryX.summary', 'summary', (['self', 'dummy'], {}), '(self, dummy)\n', (5441, 5454), False, 'from torchsummaryX import summary\n'), ((5668, 5701), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['self.conv_pre'], {}), '(self.conv_pre)\n', (5686, 5701), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((5710, 5744), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['self.conv_post'], {}), '(self.conv_post)\n', (5728, 5744), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((6967, 6982), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6980, 6982), True, 'import torch.nn as nn\n'), ((7205, 7220), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7218, 7220), True, 'import torch.nn as nn\n'), ((8066, 8081), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (8078, 8081), True, 'import torch.nn.functional as F\n'), ((8319, 8359), 'torch.randn', 'torch.randn', (['(1)', '(self.h.ratent_dim * 2)', '(1)'], {}), '(1, self.h.ratent_dim * 2, 1)\n', (8330, 8359), False, 'import torch\n'), ((8364, 8384), 'torchsummaryX.summary', 'summary', (['self', 'dummy'], {}), '(self, dummy)\n', (8371, 8384), False, 'from torchsummaryX import summary\n'), ((8597, 8630), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['self.conv_pre'], {}), '(self.conv_pre)\n', (8615, 8630), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((8639, 8673), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['self.conv_post'], {}), '(self.conv_post)\n', (8657, 8673), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((9345, 9357), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (9355, 9357), True, 'import torch.nn as nn\n'), ((9377, 9388), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (9386, 9388), True, 'import torch.nn as nn\n'), ((9413, 9525), 'utils.walk_ratent_space', 'walk_ratent_space', (['self.ratent_dim', 'self.walking_steps', 'self.walking_resolution'], {'device': 'device', 'dtype': 'dtype'}), '(self.ratent_dim, self.walking_steps, self.\n walking_resolution, device=device, dtype=dtype)\n', (9430, 9525), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((11482, 11604), 'utils.walk_ratent_space', 'walk_ratent_space', (['self.ratent_dim', 'self.walking_steps', 'self.walking_resolution'], {'device': 'self.device', 'dtype': 'self.dtype'}), '(self.ratent_dim, self.walking_steps, self.\n walking_resolution, device=self.device, dtype=self.dtype)\n', (11499, 11604), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((11991, 12003), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12001, 12003), True, 'import matplotlib.pyplot as plt\n'), ((12358, 12380), 'torch.rand_like', 'torch.rand_like', (['maxes'], {}), '(maxes)\n', (12373, 12380), False, 'import torch\n'), ((12593, 12661), 'torch.optim.lr_scheduler.ExponentialLR', 'torch.optim.lr_scheduler.ExponentialLR', (['optim'], {'gamma': 'self.h.lr_decay'}), '(optim, gamma=self.h.lr_decay)\n', (12631, 12661), False, 'import torch\n'), ((12820, 12907), 'torch.zeros', 'torch.zeros', (['(1)', 'self.h.sample_ch', 'self.n_fft'], {'device': 'self.device', 'dtype': 'self.dtype'}), '(1, self.h.sample_ch, self.n_fft, device=self.device, dtype=self\n .dtype)\n', (12831, 12907), False, 'import torch\n'), ((13937, 13972), 'torch.cat', 'torch.cat', (['[encoded, action]'], {'dim': '(1)'}), '([encoded, action], dim=1)\n', (13946, 13972), False, 'import torch\n'), ((14119, 14160), 'torch.cat', 'torch.cat', (['[previous_wave, d_out]'], {'dim': '(-1)'}), '([previous_wave, d_out], dim=-1)\n', (14128, 14160), False, 'import torch\n'), ((14249, 14269), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (14263, 14269), True, 'import numpy as np\n'), ((14278, 14301), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (14295, 14301), False, 'import torch\n'), ((14310, 14338), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (14332, 14338), False, 'import torch\n'), ((14347, 14364), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (14358, 14364), False, 'import random\n'), ((14429, 14458), 'torch.randn', 'torch.randn', (['(1)', '(1)', 'self.n_fft'], {}), '(1, 1, self.n_fft)\n', (14440, 14458), False, 'import torch\n'), ((14465, 14492), 'torchsummaryX.summary', 'summary', (['self', 'dummy', 'dummy'], {}), '(self, dummy, dummy)\n', (14472, 14492), False, 'from torchsummaryX import summary\n'), ((1903, 1931), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', 'LRELU_SLOPE'], {}), '(x, LRELU_SLOPE)\n', (1915, 1931), True, 'import torch.nn.functional as F\n'), ((1973, 2002), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['xt', 'LRELU_SLOPE'], {}), '(xt, LRELU_SLOPE)\n', (1985, 2002), True, 'import torch.nn.functional as F\n'), ((2144, 2165), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['l'], {}), '(l)\n', (2162, 2165), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((2208, 2229), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['l'], {}), '(l)\n', (2226, 2229), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((2647, 2673), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', 'dci', '(7)', '(1)', '(3)'], {}), '(1, dci, 7, 1, 3)\n', (2656, 2673), True, 'import torch.nn as nn\n'), ((3116, 3175), 'utils.get_padding_down', 'get_padding_down', (['self.L_ins[i]', 'self.L_outs[i]', 'drks[i]', 'r'], {}), '(self.L_ins[i], self.L_outs[i], drks[i], r)\n', (3132, 3175), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((3856, 3914), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.channels[-1]', 'h.ratent_dim', 'self.L_ins[-1]'], {}), '(self.channels[-1], h.ratent_dim, self.L_ins[-1])\n', (3865, 3914), True, 'import torch.nn as nn\n'), ((3955, 4013), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.channels[-1]', 'h.ratent_dim', 'self.L_ins[-1]'], {}), '(self.channels[-1], h.ratent_dim, self.L_ins[-1])\n', (3964, 4013), True, 'import torch.nn as nn\n'), ((4287, 4315), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', 'LRELU_SLOPE'], {}), '(x, LRELU_SLOPE)\n', (4299, 4315), True, 'import torch.nn.functional as F\n'), ((5570, 5591), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['l'], {}), '(l)\n', (5588, 5591), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((6235, 6281), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(h.ratent_dim * 2)', 'uic', 'uik'], {}), '(h.ratent_dim * 2, uic, uik)\n', (6253, 6281), True, 'import torch.nn as nn\n'), ((6685, 6742), 'utils.get_padding_up', 'get_padding_up', (['self.L_ins[i]', 'self.L_outs[i]', 'urks[i]', 'r'], {}), '(self.L_ins[i], self.L_outs[i], urks[i], r)\n', (6699, 6742), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((7441, 7481), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.channels[-1]', '(1)', '(7)', '(1)', '(3)'], {}), '(self.channels[-1], 1, 7, 1, 3)\n', (7450, 7481), True, 'import torch.nn as nn\n'), ((7704, 7732), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', 'LRELU_SLOPE'], {}), '(x, LRELU_SLOPE)\n', (7716, 7732), True, 'import torch.nn.functional as F\n'), ((8499, 8520), 'torch.nn.utils.remove_weight_norm', 'remove_weight_norm', (['l'], {}), '(l)\n', (8517, 8520), False, 'from torch.nn.utils import weight_norm, remove_weight_norm\n'), ((9790, 9822), 'torch.cat', 'torch.cat', (['[mean1, mean2]'], {'dim': '(1)'}), '([mean1, mean2], dim=1)\n', (9799, 9822), False, 'import torch\n'), ((9821, 9851), 'torch.cat', 'torch.cat', (['[var1, var2]'], {'dim': '(1)'}), '([var1, var2], dim=1)\n', (9830, 9851), False, 'import torch\n'), ((10869, 10940), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['out', '(0.5 * ans + 1)'], {'reduction': '"""sum"""'}), "(out, 0.5 * ans + 1, reduction='sum')\n", (10903, 10940), True, 'import torch.nn.functional as F\n'), ((13620, 13707), 'torch.zeros', 'torch.zeros', (['n', 'c', 'pad_len'], {'dtype': 'previous_wave.dtype', 'device': 'previous_wave.device'}), '(n, c, pad_len, dtype=previous_wave.dtype, device=previous_wave.\n device)\n', (13631, 13707), False, 'import torch\n'), ((13727, 13766), 'torch.cat', 'torch.cat', (['[pad, previous_wave]'], {'dim': '(-1)'}), '([pad, previous_wave], dim=-1)\n', (13736, 13766), False, 'import torch\n'), ((14537, 14552), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (14550, 14552), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5317, 5339), 'torch.randn_like', 'torch.randn_like', (['mean'], {}), '(mean)\n', (5333, 5339), False, 'import torch\n'), ((5340, 5355), 'torch.sqrt', 'torch.sqrt', (['var'], {}), '(var)\n', (5350, 5355), False, 'import torch\n'), ((3505, 3574), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.channels[i]', 'self.channels[i + 1]', 'k', 'u', 'self.pads[i]'], {}), '(self.channels[i], self.channels[i + 1], k, u, self.pads[i])\n', (3514, 3574), True, 'import torch.nn as nn\n'), ((7090, 7168), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['self.channels[i]', 'self.channels[i + 1]', 'k', 'u', 'self.pads[i]'], {}), '(self.channels[i], self.channels[i + 1], k, u, self.pads[i])\n', (7108, 7168), True, 'import torch.nn as nn\n'), ((826, 863), 'utils.get_padding', 'get_padding', (['kernel_size', 'dilation[0]'], {}), '(kernel_size, dilation[0])\n', (837, 863), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((998, 1035), 'utils.get_padding', 'get_padding', (['kernel_size', 'dilation[1]'], {}), '(kernel_size, dilation[1])\n', (1009, 1035), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((1170, 1207), 'utils.get_padding', 'get_padding', (['kernel_size', 'dilation[2]'], {}), '(kernel_size, dilation[2])\n', (1181, 1207), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((1421, 1448), 'utils.get_padding', 'get_padding', (['kernel_size', '(1)'], {}), '(kernel_size, 1)\n', (1432, 1448), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((1573, 1600), 'utils.get_padding', 'get_padding', (['kernel_size', '(1)'], {}), '(kernel_size, 1)\n', (1584, 1600), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((1725, 1752), 'utils.get_padding', 'get_padding', (['kernel_size', '(1)'], {}), '(kernel_size, 1)\n', (1736, 1752), False, 'from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up, walk_ratent_space\n'), ((10672, 10686), 'torch.log', 'torch.log', (['var'], {}), '(var)\n', (10681, 10686), False, 'import torch\n'), ((10622, 10640), 'torch.pow', 'torch.pow', (['mean', '(2)'], {}), '(mean, 2)\n', (10631, 10640), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
Created on Tue Oct 6 16:23:04 2020
@author: Admin
"""
import numpy as np
import pandas as pd
import math
import os
from keras.layers import Dense
from keras.layers import LSTM
from keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
import matplotlib.pyplot as plt
#load data
filename = 'international-airline-passengers.csv'
filepath = os.path.join(os.getcwd(), filename)
dataframe = pd.read_csv(filepath,
usecols = [1],
engine = 'python')
dataset = dataframe.values
#convert dataframe to numpy array
dataset = dataset.astype('float32')
#the shape of dataset: num_samples, features
#normalise the dataset
feature_range = (0, 1)
scaler = MinMaxScaler(feature_range = feature_range)
dataset = scaler.fit_transform(dataset)
#split the dataset into training and test set
i_split = 0.8
train_size = int(len(dataset) * i_split)
#print(train_size)
test_size = len(dataset) - train_size
#print(test_size)
train_set = dataset[0:train_size, :]
test_set = dataset[train_size:, :]
#convert an array values into a dataset matrix for LSTM
def create_dataset(dataset, look_back):
dataX = []
dataY = []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i+look_back), 0]
b = dataset[i+look_back, 0]
dataX.append(a)
dataY.append(b)
dataX = np.array(dataX)
dataY = np.array(dataY)
return dataX, dataY
look_back = 1
#look_back = time_steps: the number of previous time steps
trainX, trainY = create_dataset(train_set, look_back)
testX, testY = create_dataset(test_set, look_back)
#reshape input to be [samples, time_steps, features]
time_steps = look_back
features = dataset.shape[1]
trainX = np.reshape(trainX, (trainX.shape[0], time_steps, features))
testX = np.reshape(testX, (testX.shape[0], time_steps, features))
#create and fit the LSTM
input_shape = (time_steps, features)
lstm_neurons = 4
#lstm_neurons is a hyper-parameter
dense_neurons = 1
#dense_neurions is equal to the shape of trainY(= 1)
batch_size = 1
epochs = 100
lr = 0.001
optimizer = Adam(lr = lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8, decay = 0.0, amsgrad = True)
model = Sequential()
model.add(LSTM(lstm_neurons, input_shape = input_shape, return_sequences = False))
model.add(Dense(dense_neurons, activation = 'linear'))
model.compile(loss = 'mean_squared_error', optimizer = optimizer)
model.fit(trainX,
trainY,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
shuffle = True)
#make predictions
trainPredict = model.predict(trainX, batch_size = batch_size)
testPredict = model.predict(testX, batch_size = batch_size)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
'''
the most important hyper-parameter is look_back and batch_size
researchers should try few times to determine the best values
'''
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"os.getcwd",
"keras.layers.LSTM",
"sklearn.preprocessing.MinMaxScaler",
"keras.optimizers.Adam",
"numpy.empty_like",
"keras.layers.Dense",
"numpy.array",
"numpy.reshape",
"keras.models.Sequential",
"sklearn.metrics.mean_squared_error"
] |
[((587, 638), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'usecols': '[1]', 'engine': '"""python"""'}), "(filepath, usecols=[1], engine='python')\n", (598, 638), True, 'import pandas as pd\n'), ((901, 942), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': 'feature_range'}), '(feature_range=feature_range)\n', (913, 942), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1958, 2017), 'numpy.reshape', 'np.reshape', (['trainX', '(trainX.shape[0], time_steps, features)'], {}), '(trainX, (trainX.shape[0], time_steps, features))\n', (1968, 2017), True, 'import numpy as np\n'), ((2027, 2084), 'numpy.reshape', 'np.reshape', (['testX', '(testX.shape[0], time_steps, features)'], {}), '(testX, (testX.shape[0], time_steps, features))\n', (2037, 2084), True, 'import numpy as np\n'), ((2334, 2411), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'decay': '(0.0)', 'amsgrad': '(True)'}), '(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0, amsgrad=True)\n', (2338, 2411), False, 'from keras.optimizers import Adam\n'), ((2434, 2446), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2444, 2446), False, 'from keras.models import Sequential\n'), ((3517, 3539), 'numpy.empty_like', 'np.empty_like', (['dataset'], {}), '(dataset)\n', (3530, 3539), True, 'import numpy as np\n'), ((3708, 3730), 'numpy.empty_like', 'np.empty_like', (['dataset'], {}), '(dataset)\n', (3721, 3730), True, 'import numpy as np\n'), ((3928, 3954), 'matplotlib.pyplot.plot', 'plt.plot', (['trainPredictPlot'], {}), '(trainPredictPlot)\n', (3936, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3956, 3981), 'matplotlib.pyplot.plot', 'plt.plot', (['testPredictPlot'], {}), '(testPredictPlot)\n', (3964, 3981), True, 'import matplotlib.pyplot as plt\n'), ((3983, 3993), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3991, 3993), True, 'import matplotlib.pyplot as plt\n'), ((551, 562), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (560, 562), False, 'import os\n'), ((1578, 1593), 'numpy.array', 'np.array', (['dataX'], {}), '(dataX)\n', (1586, 1593), True, 'import numpy as np\n'), ((1607, 1622), 'numpy.array', 'np.array', (['dataY'], {}), '(dataY)\n', (1615, 1622), True, 'import numpy as np\n'), ((2458, 2525), 'keras.layers.LSTM', 'LSTM', (['lstm_neurons'], {'input_shape': 'input_shape', 'return_sequences': '(False)'}), '(lstm_neurons, input_shape=input_shape, return_sequences=False)\n', (2462, 2525), False, 'from keras.layers import LSTM\n'), ((2542, 2583), 'keras.layers.Dense', 'Dense', (['dense_neurons'], {'activation': '"""linear"""'}), "(dense_neurons, activation='linear')\n", (2547, 2583), False, 'from keras.layers import Dense\n'), ((3240, 3289), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['trainY[0]', 'trainPredict[:, 0]'], {}), '(trainY[0], trainPredict[:, 0])\n', (3258, 3289), False, 'from sklearn.metrics import mean_squared_error\n'), ((3361, 3408), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['testY[0]', 'testPredict[:, 0]'], {}), '(testY[0], testPredict[:, 0])\n', (3379, 3408), False, 'from sklearn.metrics import mean_squared_error\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.cluster import AgglomerativeClustering
# # Organizing clusters as a hierarchical tree
# ## Grouping clusters in bottom-up fashion
np.random.seed(123)
variables = ['X', 'Y', 'Z']
labels = ['ID_0', 'ID_1', 'ID_2', 'ID_3', 'ID_4']
X = np.random.random_sample([5, 3])*10
df = pd.DataFrame(X, columns=variables, index=labels)
print(df)
# ## Performing hierarchical clustering on a distance matrix
row_dist = pd.DataFrame(squareform(pdist(df, metric='euclidean')),
columns=labels,
index=labels)
print(row_dist)
# We can either pass a condensed distance matrix (upper triangular) from the `pdist` function, or we can pass the "original" data array and define the `metric='euclidean'` argument in `linkage`. However, we should not pass the squareform distance matrix, which would yield different distance values although the overall clustering could be the same.
# 1. incorrect approach: Squareform distance matrix
#row_clusters = linkage(row_dist, method='complete', metric='euclidean')
#pd.DataFrame(row_clusters,
# columns=['row label 1', 'row label 2',
# 'distance', 'no. of items in clust.'],
# index=['cluster %d' % (i + 1)
# for i in range(row_clusters.shape[0])])
# 2. correct approach: Condensed distance matrix
row_clusters = linkage(pdist(df, metric='euclidean'), method='complete')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2',
'distance', 'no. of items in clust.'],
index=['cluster %d' % (i + 1)
for i in range(row_clusters.shape[0])])
# 3. correct approach: Input matrix
row_clusters = linkage(df.values, method='complete', metric='euclidean')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2',
'distance', 'no. of items in clust.'],
index=['cluster %d' % (i + 1)
for i in range(row_clusters.shape[0])])
# make dendrogram
row_dendr = dendrogram(row_clusters,
labels=labels,
color_threshold=np.inf
)
plt.tight_layout()
plt.ylabel('Euclidean distance')
plt.show()
# ## Attaching dendrograms to a heat map
# plot row dendrogram
fig = plt.figure(figsize=(8, 8), facecolor='white')
axd = fig.add_axes([0.09, 0.1, 0.2, 0.6])
# note: for matplotlib < v1.5.1, please use orientation='right'
row_dendr = dendrogram(row_clusters, orientation='left')
# reorder data with respect to clustering
df_rowclust = df.iloc[row_dendr['leaves'][::-1]]
axd.set_xticks([])
axd.set_yticks([])
# remove axes spines from dendrogram
for i in axd.spines.values():
i.set_visible(False)
# plot heatmap
axm = fig.add_axes([0.23, 0.1, 0.6, 0.6]) # x-pos, y-pos, width, height
cax = axm.matshow(df_rowclust, interpolation='nearest', cmap='hot_r')
fig.colorbar(cax)
axm.set_xticklabels([''] + list(df_rowclust.columns))
axm.set_yticklabels([''] + list(df_rowclust.index))
plt.show()
# ## Applying agglomerative clustering via scikit-learn
ac = AgglomerativeClustering(n_clusters=3,
affinity='euclidean',
linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels)
ac = AgglomerativeClustering(n_clusters=2,
affinity='euclidean',
linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels)
|
[
"pandas.DataFrame",
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.random.random_sample",
"scipy.cluster.hierarchy.linkage",
"matplotlib.pyplot.figure",
"sklearn.cluster.AgglomerativeClustering",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.tight_layout",
"scipy.cluster.hierarchy.dendrogram"
] |
[((326, 345), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (340, 345), True, 'import numpy as np\n'), ((470, 518), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'variables', 'index': 'labels'}), '(X, columns=variables, index=labels)\n', (482, 518), True, 'import pandas as pd\n'), ((1898, 1955), 'scipy.cluster.hierarchy.linkage', 'linkage', (['df.values'], {'method': '"""complete"""', 'metric': '"""euclidean"""'}), "(df.values, method='complete', metric='euclidean')\n", (1905, 1955), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((2231, 2294), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['row_clusters'], {'labels': 'labels', 'color_threshold': 'np.inf'}), '(row_clusters, labels=labels, color_threshold=np.inf)\n', (2241, 2294), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((2366, 2384), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2382, 2384), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2417), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Euclidean distance"""'], {}), "('Euclidean distance')\n", (2395, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2418, 2428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2426, 2428), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)', 'facecolor': '"""white"""'}), "(figsize=(8, 8), facecolor='white')\n", (2510, 2545), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2709), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['row_clusters'], {'orientation': '"""left"""'}), "(row_clusters, orientation='left')\n", (2675, 2709), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((3217, 3227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3225, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3290, 3369), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(3)', 'affinity': '"""euclidean"""', 'linkage': '"""complete"""'}), "(n_clusters=3, affinity='euclidean', linkage='complete')\n", (3313, 3369), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((3500, 3579), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(2)', 'affinity': '"""euclidean"""', 'linkage': '"""complete"""'}), "(n_clusters=2, affinity='euclidean', linkage='complete')\n", (3523, 3579), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((430, 461), 'numpy.random.random_sample', 'np.random.random_sample', (['[5, 3]'], {}), '([5, 3])\n', (453, 461), True, 'import numpy as np\n'), ((1552, 1581), 'scipy.spatial.distance.pdist', 'pdist', (['df'], {'metric': '"""euclidean"""'}), "(df, metric='euclidean')\n", (1557, 1581), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((627, 656), 'scipy.spatial.distance.pdist', 'pdist', (['df'], {'metric': '"""euclidean"""'}), "(df, metric='euclidean')\n", (632, 656), False, 'from scipy.spatial.distance import pdist, squareform\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 15:19:55 2020
@author: mi19356
"""
import numpy as np
import os
import pandas as pd
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree
import random
import math
from scrape import vtk_scrap
from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream,sphericaldiam,printtofiletext
#scrape data
#orien,vtkdata,vtkdataPoints,const=vtk_scrap('PF_00189000','graindata')
dream=0
if dream==1:
orien,vtkdata,vtkdataPoints,const=vtk_scrap('PF_00130000','graindata',dream)
grainids=data_reconstruct(vtkdata, vtkdataPoints,1,orien)
else:
orien,vtkdata,const=vtk_scrap('vtkupdate','graindata',dream)
grainids,diameter=data_reconstruct_dream(vtkdata,orien)
#construct a vtk file
#vtkdatareso=reso_change(vtkdata)
"""
Create orientatio matrix
"""
def rotation_info(orien,grainids):
#Defining local variables
vec1=[0,0,1]
vec2=[0,1,0]
#modify the orientations
orien=orien[1:,1:]
#check to see if there are missing orientations
if len(orien)<len(grainids):
totaldif=len(grainids)-len(orien)
for i in range(0,int(totaldif)):
orien=np.append(orien,[random.uniform(0,2*math.pi),random.uniform(0,2*math.pi),random.uniform(0,2*math.pi)])
orien=orien.reshape(int(len(orien)/3),3)
#contruct rotation matrix
zrot=np.array([[np.cos((orien[:,0])),np.sin((orien[:,0])),np.zeros(len(orien))],[-np.sin((orien[:,0])),np.cos((orien[:,0])),np.zeros(len(orien))],[np.zeros(len(orien)),np.zeros(len(orien)),np.ones(len(orien))]])
xrot=np.array([[np.ones(len(orien)),np.zeros(len(orien)),np.zeros(len(orien))],[np.zeros(len(orien)),np.cos((orien[:,1])),np.sin((orien[:,1]))],[np.zeros(len(orien)),-np.sin((orien[:,1])),np.cos((orien[:,1]))]])
zrot2=np.array([[np.cos((orien[:,2])),np.sin((orien[:,2])),np.zeros(len(orien))],[-np.sin((orien[:,2])),np.cos((orien[:,2])),np.zeros(len(orien))],[np.zeros(len(orien)),np.zeros(len(orien)),np.ones(len(orien))]])
total_rot=[[]*len(orien)]*len(orien)
samp1=[[]*len(orien)]*len(orien)
samp2=[[]*len(orien)]*len(orien)
for i in range(0,len(orien)):
total_rot[i]=np.transpose(np.dot(np.dot(zrot2[:,:,i],xrot[:,:,i]),zrot[:,:,i]))
samp1[i]=np.dot(total_rot[i],vec1)
samp2[i]=np.dot(total_rot[i],vec2)
return vec1, vec2, samp1, samp2, total_rot, orien
"""
create material file for AMITEX
"""
def mat_create(orien,const, diameter,statev):
#rotating vectors using grain orientations
vec1,vec2,samp1,samp2,total_rot, orien=rotation_info(orien,grainids)
#use the diameter to create a variable parameter for \tau
#diameter currnetly in microns, convert to mm
#need to add 17.9 and 10 to excel const file.
diameter=(2*diameter)/1000
#writing diameters to file
printtofiletext(diameter,'diameters')
#writing orientations to file
orienprint=list(orien)
printtofiletext(orienprint,'orientations')
taud=220 + (17.9/((diameter)**0.5))
#check to make sure the there are no
#checkgreater=np.where(taud>350)[0]
#replace these values
#taud[checkgreater]=340.0
Materials = Element('Materials')
comment = Comment('REFERENCE MATERIAL')
Materials.append(comment)
child = SubElement(Materials, 'Reference_Material',Lambda0= '2.0431e+5', Mu0='0.8756e+5' )
comment = Comment('MATERIAL 1')
Materials.append(comment)
"orientation files required if material zone technique is used in AMITEX"
fsamp1 = open('fsam1.txt', 'w')
fsamp2 = open('fsam2.txt', 'w')
fsamp3 = open('fsam3.txt', 'w')
fsamp21 = open('fsam21.txt', 'w')
fsamp22 = open('fsam22.txt', 'w')
fsamp23 = open('fsam23.txt', 'w')
orien1 = open('orien1.txt', 'w')
orien2 = open('orien2.txt', 'w')
orien3 = open('orien3.txt', 'w')
tau01 = open('tau1.txt', 'w')
tau02 = open('tau2.txt', 'w')
for numMat in range(1,len(orien)+1):
for i in range(0,(len(const))):
if i==59:
const[i,0]=samp1[numMat-1][0]
fsamp1.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==60:
const[i,0]=samp1[numMat-1][1]
fsamp2.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==61:
const[i,0]=samp1[numMat-1][2]
fsamp3.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==67:
const[i,0]=samp2[numMat-1][0]
fsamp21.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==68:
const[i,0]=samp2[numMat-1][1]
fsamp22.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==69:
const[i,0]=samp2[numMat-1][2]
fsamp23.write(str("{:.16f}".format(const[i,0]))+'\n')
#adjust const array to include grain dependent info
#grain orientations
#update the value for tau0
elif i==98:
const[i,0]=taud[numMat-1]
tau01.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==114:
const[i,0]=taud[numMat-1]
tau02.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==168:
const[i,0]=(orien[numMat-1,0])
orien1.write(str(const[i,0])+'\n')
elif i==169:
const[i,0]=(orien[numMat-1,1])
orien2.write(str(const[i,0])+'\n')
elif i==170:
const[i,0]=(orien[numMat-1,2])
orien3.write(str(const[i,0])+'\n')
fsamp1.close()
fsamp2.close()
fsamp3.close()
fsamp21.close()
fsamp22.close()
fsamp23.close()
orien1.close()
orien2.close()
orien3.close()
child_grain=SubElement(Materials, 'Material', numM="1",Lib='/mnt/storage/home/mi19356/amitex_fftp-v8.17.1/Grainsize/UMAT/libUmatAmitex.so', Law='UMATBCCGDGS')
"This stores all the parameters required for the material"
"Coeff is the element of the grain material, and the atrributes are the parameter values"
"iterate across the different material constants to create subelelements for each constant2"
for i in range(0,(len(const))):
if i==59:
const[i,0]=samp1[numMat-1][0]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam1.txt")
elif i==60:
const[i,0]=samp1[numMat-1][1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam2.txt")
elif i==61:
const[i,0]=samp1[numMat-1][2]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam3.txt")
elif i==67:
const[i,0]=samp2[numMat-1][0]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam21.txt")
elif i==68:
const[i,0]=samp2[numMat-1][1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam22.txt")
elif i==69:
const[i,0]=samp2[numMat-1][2]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam23.txt")
elif i==98:
const[i,0]=taud[numMat-1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/tau1.txt")
elif i==114:
const[i,0]=taud[numMat-1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/tau2.txt")
elif i==168:
const[i,0]=(orien[numMat-1,0])
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/orien1.txt")
elif i==169:
const[i,0]=(orien[numMat-1,1])
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/orien2.txt")
elif i==170:
const[i,0]=(orien[numMat-1,2])
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/orien3.txt")
else:
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant',Value=str(const[i,0]))
#iterate across the required number of state vairables needed
for i in range(0,statev):
child_grain_tail = SubElement(child_grain, 'IntVar',Index=str(i+1), Type='Constant',Value='0.')
tree = ElementTree(Materials)
tree.write("fatemptzone2.xml")
mat_create(orien,const,diameter,900)
|
[
"dataconversions.data_reconstruct_dream",
"random.uniform",
"dataconversions.printtofiletext",
"xml.etree.ElementTree.Element",
"dataconversions.data_reconstruct",
"xml.etree.ElementTree.Comment",
"numpy.sin",
"numpy.cos",
"xml.etree.ElementTree.SubElement",
"numpy.dot",
"scrape.vtk_scrap",
"xml.etree.ElementTree.ElementTree"
] |
[((547, 591), 'scrape.vtk_scrap', 'vtk_scrap', (['"""PF_00130000"""', '"""graindata"""', 'dream'], {}), "('PF_00130000', 'graindata', dream)\n", (556, 591), False, 'from scrape import vtk_scrap\n'), ((605, 655), 'dataconversions.data_reconstruct', 'data_reconstruct', (['vtkdata', 'vtkdataPoints', '(1)', 'orien'], {}), '(vtkdata, vtkdataPoints, 1, orien)\n', (621, 655), False, 'from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream, sphericaldiam, printtofiletext\n'), ((686, 728), 'scrape.vtk_scrap', 'vtk_scrap', (['"""vtkupdate"""', '"""graindata"""', 'dream'], {}), "('vtkupdate', 'graindata', dream)\n", (695, 728), False, 'from scrape import vtk_scrap\n'), ((750, 788), 'dataconversions.data_reconstruct_dream', 'data_reconstruct_dream', (['vtkdata', 'orien'], {}), '(vtkdata, orien)\n', (772, 788), False, 'from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream, sphericaldiam, printtofiletext\n'), ((3067, 3105), 'dataconversions.printtofiletext', 'printtofiletext', (['diameter', '"""diameters"""'], {}), "(diameter, 'diameters')\n", (3082, 3105), False, 'from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream, sphericaldiam, printtofiletext\n'), ((3179, 3222), 'dataconversions.printtofiletext', 'printtofiletext', (['orienprint', '"""orientations"""'], {}), "(orienprint, 'orientations')\n", (3194, 3222), False, 'from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream, sphericaldiam, printtofiletext\n'), ((3438, 3458), 'xml.etree.ElementTree.Element', 'Element', (['"""Materials"""'], {}), "('Materials')\n", (3445, 3458), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((3474, 3503), 'xml.etree.ElementTree.Comment', 'Comment', (['"""REFERENCE MATERIAL"""'], {}), "('REFERENCE MATERIAL')\n", (3481, 3503), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((3550, 3636), 'xml.etree.ElementTree.SubElement', 'SubElement', (['Materials', '"""Reference_Material"""'], {'Lambda0': '"""2.0431e+5"""', 'Mu0': '"""0.8756e+5"""'}), "(Materials, 'Reference_Material', Lambda0='2.0431e+5', Mu0=\n '0.8756e+5')\n", (3560, 3636), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((3650, 3671), 'xml.etree.ElementTree.Comment', 'Comment', (['"""MATERIAL 1"""'], {}), "('MATERIAL 1')\n", (3657, 3671), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((6156, 6313), 'xml.etree.ElementTree.SubElement', 'SubElement', (['Materials', '"""Material"""'], {'numM': '"""1"""', 'Lib': '"""/mnt/storage/home/mi19356/amitex_fftp-v8.17.1/Grainsize/UMAT/libUmatAmitex.so"""', 'Law': '"""UMATBCCGDGS"""'}), "(Materials, 'Material', numM='1', Lib=\n '/mnt/storage/home/mi19356/amitex_fftp-v8.17.1/Grainsize/UMAT/libUmatAmitex.so'\n , Law='UMATBCCGDGS')\n", (6166, 6313), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((9162, 9184), 'xml.etree.ElementTree.ElementTree', 'ElementTree', (['Materials'], {}), '(Materials)\n', (9173, 9184), False, 'from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree\n'), ((2436, 2462), 'numpy.dot', 'np.dot', (['total_rot[i]', 'vec1'], {}), '(total_rot[i], vec1)\n', (2442, 2462), True, 'import numpy as np\n'), ((2481, 2507), 'numpy.dot', 'np.dot', (['total_rot[i]', 'vec2'], {}), '(total_rot[i], vec2)\n', (2487, 2507), True, 'import numpy as np\n'), ((1515, 1534), 'numpy.cos', 'np.cos', (['orien[:, 0]'], {}), '(orien[:, 0])\n', (1521, 1534), True, 'import numpy as np\n'), ((1536, 1555), 'numpy.sin', 'np.sin', (['orien[:, 0]'], {}), '(orien[:, 0])\n', (1542, 1555), True, 'import numpy as np\n'), ((1602, 1621), 'numpy.cos', 'np.cos', (['orien[:, 0]'], {}), '(orien[:, 0])\n', (1608, 1621), True, 'import numpy as np\n'), ((1818, 1837), 'numpy.cos', 'np.cos', (['orien[:, 1]'], {}), '(orien[:, 1])\n', (1824, 1837), True, 'import numpy as np\n'), ((1839, 1858), 'numpy.sin', 'np.sin', (['orien[:, 1]'], {}), '(orien[:, 1])\n', (1845, 1858), True, 'import numpy as np\n'), ((1905, 1924), 'numpy.cos', 'np.cos', (['orien[:, 1]'], {}), '(orien[:, 1])\n', (1911, 1924), True, 'import numpy as np\n'), ((1952, 1971), 'numpy.cos', 'np.cos', (['orien[:, 2]'], {}), '(orien[:, 2])\n', (1958, 1971), True, 'import numpy as np\n'), ((1973, 1992), 'numpy.sin', 'np.sin', (['orien[:, 2]'], {}), '(orien[:, 2])\n', (1979, 1992), True, 'import numpy as np\n'), ((2039, 2058), 'numpy.cos', 'np.cos', (['orien[:, 2]'], {}), '(orien[:, 2])\n', (2045, 2058), True, 'import numpy as np\n'), ((2370, 2407), 'numpy.dot', 'np.dot', (['zrot2[:, :, i]', 'xrot[:, :, i]'], {}), '(zrot2[:, :, i], xrot[:, :, i])\n', (2376, 2407), True, 'import numpy as np\n'), ((1315, 1345), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (1329, 1345), False, 'import random\n'), ((1343, 1373), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (1357, 1373), False, 'import random\n'), ((1371, 1401), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (1385, 1401), False, 'import random\n'), ((1581, 1600), 'numpy.sin', 'np.sin', (['orien[:, 0]'], {}), '(orien[:, 0])\n', (1587, 1600), True, 'import numpy as np\n'), ((1884, 1903), 'numpy.sin', 'np.sin', (['orien[:, 1]'], {}), '(orien[:, 1])\n', (1890, 1903), True, 'import numpy as np\n'), ((2018, 2037), 'numpy.sin', 'np.sin', (['orien[:, 2]'], {}), '(orien[:, 2])\n', (2024, 2037), True, 'import numpy as np\n')]
|
"""
Use this script to post-process the predicted softmax segmentation.
This script performs rigid register of the softmax prediction to the subject space.
@author: <NAME> (<EMAIL>)
"""
import os
from argparse import ArgumentParser
import numpy as np
import nibabel as nib
parser = ArgumentParser()
parser.add_argument('--softmax', required=True,
help='path to the softmax prediction in the template space.')
parser.add_argument('--aff', required=True,
help='path to the Affine transformation that was used'
'to go from subject space to template space.')
parser.add_argument('--input_img', required=True,
help='Path to the SRR to preprocess')
parser.add_argument('--output_folder', required=True)
def invert_affine(aff_path, output_dir):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
aff_name = os.path.split(aff_path)[1].replace('.txt', '')
save_inv_aff_path = os.path.join(
output_dir,
'%s_inv.txt' % aff_name,
)
cmd = 'reg_transform -invAff %s %s' % (aff_path, save_inv_aff_path)
os.system(cmd)
return save_inv_aff_path
def warp_softmax(softmax_path, ref_img_path, save_path, aff_path):
# Warp the softmax
cmd = 'reg_resample -ref %s -flo %s -trans %s -res %s -inter 1 -pad 0 -voff' % \
(ref_img_path, softmax_path, aff_path, save_path)
os.system(cmd)
# Fix border effects due to padding with 0 AND change order of channels
softmax_nii = nib.load(save_path)
softmax = softmax_nii.get_fdata().astype(np.float32)
sum_proba = np.sum(softmax, axis=-1)
softmax[:, :, :, 0] += 1. - sum_proba
post_softmax_nii = nib.Nifti1Image(softmax, softmax_nii.affine)
nib.save(post_softmax_nii, save_path)
def main(args):
if not os.path.exists(args.output_folder):
os.mkdir(args.output_folder)
# Compute the inverse affine transform
print('Invert %s' % args.aff)
inv_aff_path = invert_affine(aff_path=args.aff, output_dir=args.output_folder)
print(inv_aff_path)
# Warp the softmax
save_path = os.path.join(args.output_folder, 'softmax.nii.gz')
print('warp %s' % args.softmax)
warp_softmax(
softmax_path=args.softmax,
ref_img_path=args.input_img,
save_path=save_path,
aff_path=inv_aff_path,
)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
[
"nibabel.Nifti1Image",
"os.mkdir",
"numpy.sum",
"argparse.ArgumentParser",
"nibabel.load",
"os.path.exists",
"os.system",
"nibabel.save",
"os.path.split",
"os.path.join"
] |
[((285, 301), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (299, 301), False, 'from argparse import ArgumentParser\n'), ((982, 1031), 'os.path.join', 'os.path.join', (['output_dir', "('%s_inv.txt' % aff_name)"], {}), "(output_dir, '%s_inv.txt' % aff_name)\n", (994, 1031), False, 'import os\n'), ((1131, 1145), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1140, 1145), False, 'import os\n'), ((1414, 1428), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1423, 1428), False, 'import os\n'), ((1524, 1543), 'nibabel.load', 'nib.load', (['save_path'], {}), '(save_path)\n', (1532, 1543), True, 'import nibabel as nib\n'), ((1617, 1641), 'numpy.sum', 'np.sum', (['softmax'], {'axis': '(-1)'}), '(softmax, axis=-1)\n', (1623, 1641), True, 'import numpy as np\n'), ((1707, 1751), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['softmax', 'softmax_nii.affine'], {}), '(softmax, softmax_nii.affine)\n', (1722, 1751), True, 'import nibabel as nib\n'), ((1756, 1793), 'nibabel.save', 'nib.save', (['post_softmax_nii', 'save_path'], {}), '(post_softmax_nii, save_path)\n', (1764, 1793), True, 'import nibabel as nib\n'), ((2121, 2171), 'os.path.join', 'os.path.join', (['args.output_folder', '"""softmax.nii.gz"""'], {}), "(args.output_folder, 'softmax.nii.gz')\n", (2133, 2171), False, 'import os\n'), ((839, 865), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (853, 865), False, 'import os\n'), ((875, 895), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (883, 895), False, 'import os\n'), ((1823, 1857), 'os.path.exists', 'os.path.exists', (['args.output_folder'], {}), '(args.output_folder)\n', (1837, 1857), False, 'import os\n'), ((1867, 1895), 'os.mkdir', 'os.mkdir', (['args.output_folder'], {}), '(args.output_folder)\n', (1875, 1895), False, 'import os\n'), ((911, 934), 'os.path.split', 'os.path.split', (['aff_path'], {}), '(aff_path)\n', (924, 934), False, 'import os\n')]
|
""" Construct dataset """
import sys
import math
import pandas as pd
import numpy as np
import csv
def calc_gaps(station):
"""Calculate gaps in time series"""
df = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
df = df.set_index(['Date'])
df.index = pd.to_datetime(df.index)
dates = df.index.values
first_date = dates[0]
last_date = dates[-1]
print('Data from {0} to {1}'.format(first_date, last_date))
total_range = last_date - first_date
total_range_seconds = total_range / np.timedelta64(1, 's')
last_read_date = first_date
gaps = []
total_gap = 0;
for d in dates:
diff = d - last_read_date
seconds = diff / np.timedelta64(1, 's')
hours = diff / np.timedelta64(1, 'h')
if hours > 72: # met stations
# if hours > 24: # flow stations
total_gap = total_gap + seconds
gaps.append(seconds)
last_read_date = d
print('Number of gaps {0}'.format(len(gaps)))
years = math.floor(total_gap / 3600 / 24 / 365.25)
days = math.floor((total_gap / 3600 / 24 % 365.25))
print('Total gap {0} years'.format(total_gap / 3600 / 24 / 365.25))
print('Total gap {0} years {1} days'.format(years, days))
total_left = total_range_seconds - total_gap
years_left = math.floor(total_left / 3600 / 24 / 365.25)
days_left = math.floor((total_left / 3600 / 24 % 365.25))
print('Total left {0} years'.format(total_left / 3600 / 24 / 365.25))
print('Total left {0} years {1} days'.format(years_left, days_left))
# gap_file = '{0}-gaps.txt'.format(station)
# np.savetxt(gap_file, gaps, delimiter=',', fmt="%s")
def calc_histogram(station):
"""Get histogram"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
total_count = df.count()
i0 = df[(df['Value'] == 0)].count()
i1 = df[(df['Value'] > 0) & (df['Value'] <= 10)].count()
i2 = df[(df['Value'] > 10) & (df['Value'] <= 50)].count()
i3 = df[(df['Value'] > 50) & (df['Value'] <= 100)].count()
i4 = df[(df['Value'] > 100) & (df['Value'] <= 200)].count()
i5 = df[(df['Value'] > 200) & (df['Value'] <= 300)].count()
i6 = df[(df['Value'] > 300) & (df['Value'] <= 400)].count()
i7 = df[(df['Value'] > 400) & (df['Value'] <= 500)].count()
i8 = df[(df['Value'] > 500) & (df['Value'] <= 1000)].count()
i9 = df[(df['Value'] > 1000)].count()
print('Total count: {0}'.format(total_count['Value']))
print(' 0: {0}'.format(i0['Value']/total_count['Value']))
print(' 0 - 10: {0}'.format(i1['Value']/total_count['Value']))
print(' 10 - 50: {0}'.format(i2['Value']/total_count['Value']))
print(' 50 - 100: {0}'.format(i3['Value']/total_count['Value']))
print('100 - 200: {0}'.format(i4['Value']/total_count['Value']))
print('200 - 300: {0}'.format(i5['Value']/total_count['Value']))
print('300 - 400: {0}'.format(i6['Value']/total_count['Value']))
print('400 - 500: {0}'.format(i7['Value']/total_count['Value']))
print('500 - 1000: {0}'.format(i8['Value']/total_count['Value']))
print(' > 1000: {0}'.format(i9['Value']/total_count['Value']))
def calc_histogram4(station1, station2):
"""Get histogram"""
raw1 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station1), parse_dates=['Date'])
raw1 = raw1.set_index(['Date'])
raw1.index = pd.to_datetime(raw1.index)
df1 = raw1.resample('1H').mean()
raw2 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station2), parse_dates=['Date'])
raw2 = raw2.set_index(['Date'])
raw2.index = pd.to_datetime(raw2.index)
df2 = raw2.resample('1H').mean()
df1['Total'] = df1['Value'] + df2['Value']
total_count = df1.count()
i0 = df1[(df1['Total'] == 0)].count()
i1 = df1[(df1['Total'] > 0) & (df1['Total'] <= 10)].count()
i2 = df1[(df1['Total'] > 10) & (df1['Total'] <= 50)].count()
i3 = df1[(df1['Total'] > 50) & (df1['Total'] <= 100)].count()
i4 = df1[(df1['Total'] > 100) & (df1['Total'] <= 200)].count()
i5 = df1[(df1['Total'] > 200) & (df1['Total'] <= 300)].count()
i6 = df1[(df1['Total'] > 300) & (df1['Total'] <= 400)].count()
i7 = df1[(df1['Total'] > 400) & (df1['Total'] <= 500)].count()
i8 = df1[(df1['Total'] > 500) & (df1['Total'] <= 1000)].count()
i9 = df1[(df1['Total'] > 1000)].count()
print('Total count: {0}'.format(total_count['Total']))
print(' 0: {0}'.format(i0['Total']/total_count['Total']))
print(' 0 - 10: {0}'.format(i1['Total']/total_count['Total']))
print(' 10 - 50: {0}'.format(i2['Total']/total_count['Total']))
print(' 50 - 100: {0}'.format(i3['Total']/total_count['Total']))
print('100 - 200: {0}'.format(i4['Total']/total_count['Total']))
print('200 - 300: {0}'.format(i5['Total']/total_count['Total']))
print('300 - 400: {0}'.format(i6['Total']/total_count['Total']))
print('400 - 500: {0}'.format(i7['Total']/total_count['Total']))
print('500 - 1000: {0}'.format(i8['Total']/total_count['Total']))
print(' > 1000: {0}'.format(i9['Total']/total_count['Total']))
def calc_histogram3(station1, station2, station3):
"""Get histogram"""
raw1 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station1), parse_dates=['Date'])
raw1 = raw1.set_index(['Date'])
raw1.index = pd.to_datetime(raw1.index)
df1 = raw1.resample('1H').mean()
raw2 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station2), parse_dates=['Date'])
raw2 = raw2.set_index(['Date'])
raw2.index = pd.to_datetime(raw2.index)
df2 = raw2.resample('1H').mean()
raw3 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station3), parse_dates=['Date'])
raw3 = raw3.set_index(['Date'])
raw3.index = pd.to_datetime(raw3.index)
df3 = raw3.resample('1H').mean()
df1['Total'] = df1['Value'] + df2['Value'] + df3['Value']
total_count = df1.count()
i0 = df1[(df1['Total'] == 0)].count()
i1 = df1[(df1['Total'] > 0) & (df1['Total'] <= 10)].count()
i2 = df1[(df1['Total'] > 10) & (df1['Total'] <= 50)].count()
i3 = df1[(df1['Total'] > 50) & (df1['Total'] <= 100)].count()
i4 = df1[(df1['Total'] > 100) & (df1['Total'] <= 200)].count()
i5 = df1[(df1['Total'] > 200) & (df1['Total'] <= 300)].count()
i6 = df1[(df1['Total'] > 300) & (df1['Total'] <= 400)].count()
i7 = df1[(df1['Total'] > 400) & (df1['Total'] <= 500)].count()
i8 = df1[(df1['Total'] > 500) & (df1['Total'] <= 1000)].count()
i9 = df1[(df1['Total'] > 1000)].count()
print('Total count: {0}'.format(total_count['Total']))
print(' 0: {0}'.format(i0['Total']/total_count['Total']))
print(' 0 - 10: {0}'.format(i1['Total']/total_count['Total']))
print(' 10 - 50: {0}'.format(i2['Total']/total_count['Total']))
print(' 50 - 100: {0}'.format(i3['Total']/total_count['Total']))
print('100 - 200: {0}'.format(i4['Total']/total_count['Total']))
print('200 - 300: {0}'.format(i5['Total']/total_count['Total']))
print('300 - 400: {0}'.format(i6['Total']/total_count['Total']))
print('400 - 500: {0}'.format(i7['Total']/total_count['Total']))
print('500 - 1000: {0}'.format(i8['Total']/total_count['Total']))
print(' > 1000: {0}'.format(i9['Total']/total_count['Total']))
def calc_histogram2(station):
"""Get histogram"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
total_count = df.count()
i0 = df[(df['Value'] == 0)].count()
i1 = df[(df['Value'] > 0) & (df['Value'] <= 5)].count()
i2 = df[(df['Value'] > 5) & (df['Value'] <= 10)].count()
i3 = df[(df['Value'] > 10) & (df['Value'] <= 20)].count()
i4 = df[(df['Value'] > 20) & (df['Value'] <= 50)].count()
i5 = df[(df['Value'] > 50) & (df['Value'] <= 100)].count()
i6 = df[(df['Value'] > 100)].count()
print('Total count: {0}'.format(total_count['Value']))
print(' 0: {0}'.format(i0['Value']/total_count['Value']))
print(' 0 - 5: {0}'.format(i1['Value']/total_count['Value']))
print(' 5 - 10: {0}'.format(i2['Value']/total_count['Value']))
print(' 10 - 20: {0}'.format(i3['Value']/total_count['Value']))
print(' 20 - 50: {0}'.format(i4['Value']/total_count['Value']))
print(' 50 - 100: {0}'.format(i5['Value']/total_count['Value']))
print(' > 100: {0}'.format(i6['Value']/total_count['Value']))
def median_sampling_rate(station):
"""Get median over year sampling rate"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('Y').count()
df.to_csv('{0}_sample_count.csv'.format(station))
def resample(station):
"""Resample station data"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
df = df.round({'Value': 0})
df.to_csv('{0}_resampled.csv'.format(station))
if __name__ == '__main__':
station = sys.argv[1]
calc_gaps(station)
#calc_histogram(station)
#calc_histogram2(station)
#calc_histogram3('D7H014Z', 'D7H015Z', 'D7H016Z')
#calc_histogram4('D7H008', 'D7H017PLUS')
#median_sampling_rate(station)
#resample(station)
|
[
"numpy.timedelta64",
"pandas.to_datetime",
"math.floor"
] |
[((307, 331), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (321, 331), True, 'import pandas as pd\n'), ((1041, 1083), 'math.floor', 'math.floor', (['(total_gap / 3600 / 24 / 365.25)'], {}), '(total_gap / 3600 / 24 / 365.25)\n', (1051, 1083), False, 'import math\n'), ((1095, 1137), 'math.floor', 'math.floor', (['(total_gap / 3600 / 24 % 365.25)'], {}), '(total_gap / 3600 / 24 % 365.25)\n', (1105, 1137), False, 'import math\n'), ((1341, 1384), 'math.floor', 'math.floor', (['(total_left / 3600 / 24 / 365.25)'], {}), '(total_left / 3600 / 24 / 365.25)\n', (1351, 1384), False, 'import math\n'), ((1401, 1444), 'math.floor', 'math.floor', (['(total_left / 3600 / 24 % 365.25)'], {}), '(total_left / 3600 / 24 % 365.25)\n', (1411, 1444), False, 'import math\n'), ((1900, 1925), 'pandas.to_datetime', 'pd.to_datetime', (['raw.index'], {}), '(raw.index)\n', (1914, 1925), True, 'import pandas as pd\n'), ((3577, 3603), 'pandas.to_datetime', 'pd.to_datetime', (['raw1.index'], {}), '(raw1.index)\n', (3591, 3603), True, 'import pandas as pd\n'), ((3792, 3818), 'pandas.to_datetime', 'pd.to_datetime', (['raw2.index'], {}), '(raw2.index)\n', (3806, 3818), True, 'import pandas as pd\n'), ((5561, 5587), 'pandas.to_datetime', 'pd.to_datetime', (['raw1.index'], {}), '(raw1.index)\n', (5575, 5587), True, 'import pandas as pd\n'), ((5776, 5802), 'pandas.to_datetime', 'pd.to_datetime', (['raw2.index'], {}), '(raw2.index)\n', (5790, 5802), True, 'import pandas as pd\n'), ((5991, 6017), 'pandas.to_datetime', 'pd.to_datetime', (['raw3.index'], {}), '(raw3.index)\n', (6005, 6017), True, 'import pandas as pd\n'), ((7749, 7774), 'pandas.to_datetime', 'pd.to_datetime', (['raw.index'], {}), '(raw.index)\n', (7763, 7774), True, 'import pandas as pd\n'), ((9021, 9046), 'pandas.to_datetime', 'pd.to_datetime', (['raw.index'], {}), '(raw.index)\n', (9035, 9046), True, 'import pandas as pd\n'), ((9337, 9362), 'pandas.to_datetime', 'pd.to_datetime', (['raw.index'], {}), '(raw.index)\n', (9351, 9362), True, 'import pandas as pd\n'), ((558, 580), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (572, 580), True, 'import numpy as np\n'), ((726, 748), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (740, 748), True, 'import numpy as np\n'), ((772, 794), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""h"""'], {}), "(1, 'h')\n", (786, 794), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import keras
import keras.layers as klayers
import time_series as tsutils
import processing
import metrics
class ModelBase(object):
# Required 'context' information for a model
input_window = None
# How many point the model can predict for a single given context
output_window = None
# How output is shifted w.r.t. to input window
offset = 1
class Model(ModelBase):
def __init__(self,
input_shape: tuple = (5, 1),
outputs: int = 1):
self.input_window = input_shape[0]
self.output_window = outputs
self.offset = outputs
model = keras.Sequential()
model.add(klayers.Conv1D(10, input_shape=input_shape, padding='same', kernel_size=3, activation='relu'))
model.add(klayers.Conv1D(10, padding='same', kernel_size=3, activation='relu'))
model.add(klayers.Conv1D(10, padding='same', kernel_size=3, activation='relu'))
model.add(klayers.Flatten())
model.add(klayers.Dense(outputs))
#model.add(klayers.Dense(10, input_shape=input_shape))
#model.add(klayers.Dense(outputs))
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
self.model = model
def predict(self, x, *args, **kwargs):
return self.model.predict(x, *args, **kwargs)
def train(self, x, y, *args, **kwargs):
self.model.fit(x, y, *args, **kwargs)
def main():
path = 'D:\\data\\M3\\M3Other\\N2836.csv'
data = np.genfromtxt(path)
print('Data len: {0}'.format(len(data)))
predict_points = 8
model = Model()
ts = tsutils.TimeSeries(data, test_size=predict_points, scaler=processing.StandardScaler())
x_train, y_train, t_train = ts.train_data(input_window=model.input_window, output_window=model.output_window, expand=True)
model.train(x_train, y_train, epochs=200)
#x_test, y_test, t_test = ts.train_data(input_window=model.input_window, output_window=model.output_window)
ctx = np.expand_dims(ts.get_test_context(model.input_window, expand=True), axis=0)
y_pred = tsutils.free_run_batch(model.predict, ctx, predict_points, ts, batch_size=1)
y_true = ts.get_test_data()
y_pred_flat = ts.inverse_y(np.squeeze(y_pred))
y_true_flat = ts.inverse_y(np.squeeze(y_true))
print(metrics.evaluate(y_true_flat, y_pred_flat, metrics=('smape', 'mae', 'umbrae')))
'''
x_all, y_all, t_all = ts.train_data(input_window=model.input_window, output_window=model.output_window)
y_all_pred = model.predict(x_all)
t_all_flat = ts.inverse_y(np.squeeze(t_all))
y_all_flat = ts.inverse_y(np.squeeze(y_all))
y_pred_pred_flat = ts.inverse_y(np.squeeze(y_all_pred))
plt.plot(t_all_flat, y_all_flat)
plt.plot(t_all_flat, y_pred_pred_flat)
plt.show()
'''
#y_free_run_flat = np.squeeze(predictions)
#plt.plot(np.reshape(y_all, (-1, )))
#plt.plot(np.concatenate((y_pred_flat, y_free_run_flat)))
#plt.show()
if __name__ == '__main__':
main()
|
[
"time_series.free_run_batch",
"processing.StandardScaler",
"keras.Sequential",
"keras.layers.Flatten",
"numpy.genfromtxt",
"keras.layers.Conv1D",
"metrics.evaluate",
"keras.layers.Dense",
"numpy.squeeze"
] |
[((1529, 1548), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {}), '(path)\n', (1542, 1548), True, 'import numpy as np\n'), ((2123, 2199), 'time_series.free_run_batch', 'tsutils.free_run_batch', (['model.predict', 'ctx', 'predict_points', 'ts'], {'batch_size': '(1)'}), '(model.predict, ctx, predict_points, ts, batch_size=1)\n', (2145, 2199), True, 'import time_series as tsutils\n'), ((680, 698), 'keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (696, 698), False, 'import keras\n'), ((2264, 2282), 'numpy.squeeze', 'np.squeeze', (['y_pred'], {}), '(y_pred)\n', (2274, 2282), True, 'import numpy as np\n'), ((2315, 2333), 'numpy.squeeze', 'np.squeeze', (['y_true'], {}), '(y_true)\n', (2325, 2333), True, 'import numpy as np\n'), ((2346, 2424), 'metrics.evaluate', 'metrics.evaluate', (['y_true_flat', 'y_pred_flat'], {'metrics': "('smape', 'mae', 'umbrae')"}), "(y_true_flat, y_pred_flat, metrics=('smape', 'mae', 'umbrae'))\n", (2362, 2424), False, 'import metrics\n'), ((717, 814), 'keras.layers.Conv1D', 'klayers.Conv1D', (['(10)'], {'input_shape': 'input_shape', 'padding': '"""same"""', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(10, input_shape=input_shape, padding='same', kernel_size=3,\n activation='relu')\n", (731, 814), True, 'import keras.layers as klayers\n'), ((830, 898), 'keras.layers.Conv1D', 'klayers.Conv1D', (['(10)'], {'padding': '"""same"""', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(10, padding='same', kernel_size=3, activation='relu')\n", (844, 898), True, 'import keras.layers as klayers\n'), ((918, 986), 'keras.layers.Conv1D', 'klayers.Conv1D', (['(10)'], {'padding': '"""same"""', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(10, padding='same', kernel_size=3, activation='relu')\n", (932, 986), True, 'import keras.layers as klayers\n'), ((1006, 1023), 'keras.layers.Flatten', 'klayers.Flatten', ([], {}), '()\n', (1021, 1023), True, 'import keras.layers as klayers\n'), ((1043, 1065), 'keras.layers.Dense', 'klayers.Dense', (['outputs'], {}), '(outputs)\n', (1056, 1065), True, 'import keras.layers as klayers\n'), ((1706, 1733), 'processing.StandardScaler', 'processing.StandardScaler', ([], {}), '()\n', (1731, 1733), False, 'import processing\n')]
|
import bisect
import operator
import numpy as np
import torch
from torch.utils import data
from multilayer_perceptron import *
from utils import *
def preprocess_weights(weights):
w_later = np.abs(weights[-1])
w_input = np.abs(weights[0])
for i in range(len(weights) - 2, 0, -1):
w_later = np.matmul(w_later, np.abs(weights[i]))
return w_input, w_later
def make_one_indexed(interaction_ranking):
return [(tuple(np.array(i) + 1), s) for i, s in interaction_ranking]
def interpret_interactions(w_input, w_later, get_main_effects=False):
interaction_strengths = {}
for i in range(w_later.shape[1]):
sorted_hweights = sorted(
enumerate(w_input[i]), key=lambda x: x[1], reverse=True
)
interaction_candidate = []
candidate_weights = []
for j in range(w_input.shape[1]):
bisect.insort(interaction_candidate, sorted_hweights[j][0])
candidate_weights.append(sorted_hweights[j][1])
if not get_main_effects and len(interaction_candidate) == 1:
continue
interaction_tup = tuple(interaction_candidate)
if interaction_tup not in interaction_strengths:
interaction_strengths[interaction_tup] = 0
interaction_strength = (min(candidate_weights)) * (np.sum(w_later[:, i]))
interaction_strengths[interaction_tup] += interaction_strength
interaction_ranking = sorted(
interaction_strengths.items(), key=operator.itemgetter(1), reverse=True
)
return interaction_ranking
def interpret_pairwise_interactions(w_input, w_later):
p = w_input.shape[1]
interaction_ranking = []
for i in range(p):
for j in range(p):
if i < j:
strength = (np.minimum(w_input[:, i], w_input[:, j]) * w_later).sum()
interaction_ranking.append(((i, j), strength))
interaction_ranking.sort(key=lambda x: x[1], reverse=True)
return interaction_ranking
def get_interactions(weights, pairwise=False, one_indexed=False):
w_input, w_later = preprocess_weights(weights)
if pairwise:
interaction_ranking = interpret_pairwise_interactions(w_input, w_later)
else:
interaction_ranking = interpret_interactions(w_input, w_later)
interaction_ranking = prune_redundant_interactions(interaction_ranking)
if one_indexed:
return make_one_indexed(interaction_ranking)
else:
return interaction_ranking
def prune_redundant_interactions(interaction_ranking, max_interactions=100):
interaction_ranking_pruned = []
current_superset_inters = []
for inter, strength in interaction_ranking:
set_inter = set(inter)
if len(interaction_ranking_pruned) >= max_interactions:
break
subset_inter_skip = False
update_superset_inters = []
for superset_inter in current_superset_inters:
if set_inter < superset_inter:
subset_inter_skip = True
break
elif not (set_inter > superset_inter):
update_superset_inters.append(superset_inter)
if subset_inter_skip:
continue
current_superset_inters = update_superset_inters
current_superset_inters.append(set_inter)
interaction_ranking_pruned.append((inter, strength))
return interaction_ranking_pruned
def detect_interactions(
Xd,
Yd,
arch=[256, 128, 64],
batch_size=100,
device=torch.device("cpu"),
seed=None,
**kwargs
):
if seed is not None:
set_seed(seed)
data_loaders = convert_to_torch_loaders(Xd, Yd, batch_size)
model = create_mlp([feats.shape[1]] + arch + [1]).to(device)
model, mlp_loss = train(model, data_loaders, device=device, **kwargs)
inters = get_interactions(get_weights(model))
return inters, mlp_loss
|
[
"numpy.minimum",
"numpy.abs",
"numpy.sum",
"numpy.array",
"torch.device",
"operator.itemgetter",
"bisect.insort"
] |
[((196, 215), 'numpy.abs', 'np.abs', (['weights[-1]'], {}), '(weights[-1])\n', (202, 215), True, 'import numpy as np\n'), ((230, 248), 'numpy.abs', 'np.abs', (['weights[0]'], {}), '(weights[0])\n', (236, 248), True, 'import numpy as np\n'), ((3519, 3538), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3531, 3538), False, 'import torch\n'), ((332, 350), 'numpy.abs', 'np.abs', (['weights[i]'], {}), '(weights[i])\n', (338, 350), True, 'import numpy as np\n'), ((872, 931), 'bisect.insort', 'bisect.insort', (['interaction_candidate', 'sorted_hweights[j][0]'], {}), '(interaction_candidate, sorted_hweights[j][0])\n', (885, 931), False, 'import bisect\n'), ((1509, 1531), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1528, 1531), False, 'import operator\n'), ((1333, 1354), 'numpy.sum', 'np.sum', (['w_later[:, i]'], {}), '(w_later[:, i])\n', (1339, 1354), True, 'import numpy as np\n'), ((445, 456), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (453, 456), True, 'import numpy as np\n'), ((1796, 1836), 'numpy.minimum', 'np.minimum', (['w_input[:, i]', 'w_input[:, j]'], {}), '(w_input[:, i], w_input[:, j])\n', (1806, 1836), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation
from keras.layers.core import Dense
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.utils import to_categorical
trainFile = pd.read_csv('./dataset/train.csv').drop(columns="datasetId")
testFile = pd.read_csv('./dataset/test.csv').drop(columns="datasetId")
# train
train_samples = trainFile.drop(columns='condition').to_numpy()
train_labels = trainFile['condition'].to_numpy()
# test
test_samples = testFile.drop(columns='condition').to_numpy()
test_labels = testFile['condition'].to_numpy()
# normalizing features
scaler = MinMaxScaler(feature_range=(0, 1))
train_samples = scaler.fit_transform(train_samples)
test_samples = scaler.fit_transform(test_samples)
# one-hot-encoding labels
one_hot_encoder = OneHotEncoder(categories='auto')
train_labels = one_hot_encoder.fit_transform(train_labels.reshape(-1, 1)).toarray()
test_labels = one_hot_encoder.fit_transform(test_labels.reshape(-1, 1)).toarray()
# build the model
model = Sequential([
Dense(34, input_shape=[34, ], activation='relu'),
Dense(20, activation='relu'),
Dense(10, activation='relu'),
Dense(3, activation='softmax')
])
print(model.summary())
model.compile(Adam(lr=.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_samples, train_labels, validation_split=0.1, batch_size=10, epochs=10, shuffle=True, verbose=2)
model.save('model.h5')
predictions = model.predict(test_samples)
print(predictions)
np.savetxt('predictions.csv', test_samples, delimiter=",")
|
[
"keras.layers.core.Dense",
"pandas.read_csv",
"numpy.savetxt",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.OneHotEncoder",
"keras.optimizers.Adam"
] |
[((826, 860), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (838, 860), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1008, 1040), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (1021, 1040), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1746, 1804), 'numpy.savetxt', 'np.savetxt', (['"""predictions.csv"""', 'test_samples'], {'delimiter': '""","""'}), "('predictions.csv', test_samples, delimiter=',')\n", (1756, 1804), True, 'import numpy as np\n'), ((1446, 1461), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (1450, 1461), False, 'from keras.optimizers import Adam\n'), ((424, 458), 'pandas.read_csv', 'pd.read_csv', (['"""./dataset/train.csv"""'], {}), "('./dataset/train.csv')\n", (435, 458), True, 'import pandas as pd\n'), ((496, 529), 'pandas.read_csv', 'pd.read_csv', (['"""./dataset/test.csv"""'], {}), "('./dataset/test.csv')\n", (507, 529), True, 'import pandas as pd\n'), ((1251, 1297), 'keras.layers.core.Dense', 'Dense', (['(34)'], {'input_shape': '[34]', 'activation': '"""relu"""'}), "(34, input_shape=[34], activation='relu')\n", (1256, 1297), False, 'from keras.layers.core import Dense\n'), ((1305, 1333), 'keras.layers.core.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (1310, 1333), False, 'from keras.layers.core import Dense\n'), ((1339, 1367), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (1344, 1367), False, 'from keras.layers.core import Dense\n'), ((1373, 1403), 'keras.layers.core.Dense', 'Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (1378, 1403), False, 'from keras.layers.core import Dense\n')]
|
r'''This dataloader is an attemp to make a master DL that provides 2 augmented version
of a sparse clip (covering minimum 64 frames) and 2 augmented versions of 4 dense clips
(covering 16 frames temporal span minimum)'''
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import config as cfg
import random
import pickle
import parameters as params
import json
import math
import cv2
# from tqdm import tqdm
import time
import torchvision.transforms as trans
# from decord import VideoReader
class ss_dataset_gen1(Dataset):
def __init__(self, shuffle = True, data_percentage = 1.0, split = 1):
#####################
# self.all_paths = open(os.path.join(cfg.path_folder,'train_vids.txt'),'r').read().splitlines()
if split == 1:
self.all_paths = open(os.path.join(cfg.path_folder, 'ucfTrainTestlist/trainlist01.txt'),'r').read().splitlines()
elif split ==2:
self.all_paths = open(os.path.join(cfg.path_folder, 'ucfTrainTestlist/trainlist02.txt'),'r').read().splitlines()
elif split ==3:
self.all_paths = open(os.path.join(cfg.path_folder, 'ucfTrainTestlist/trainlist03.txt'),'r').read().splitlines()
else:
print(f'Invalid split input: {split}')
#####################
self.shuffle = shuffle
if self.shuffle:
random.shuffle(self.all_paths)
self.data_percentage = data_percentage
self.data_limit = int(len(self.all_paths)*self.data_percentage)
self.data = self.all_paths[0: self.data_limit]
self.PIL = trans.ToPILImage()
self.TENSOR = trans.ToTensor()
self.erase_size = 19
def __len__(self):
return len(self.data)
def __getitem__(self,index):
sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense, vid_path = self.process_data(index)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense, vid_path
def process_data(self, idx):
vid_path = cfg.path_folder + '/UCF-101/' + self.data[idx].split(' ')[0]
sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense = self.build_clip(vid_path)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense, vid_path
def build_clip(self, vid_path):
try:
cap = cv2.VideoCapture(vid_path)
cap.set(1, 0)
frame_count = cap.get(7)
if frame_count <= 56:
# print(f'Video {vid_path} has insufficient frames')
return None, None, None, None, None, None, None, None, None, None, None, None
############################# frame_list maker start here#################################
min_temporal_span_sparse = params.num_frames*params.sr_ratio
if frame_count > min_temporal_span_sparse:
start_frame = np.random.randint(0,frame_count-min_temporal_span_sparse)
#Dynamic skip rate experiment
# skip_max = int((frame_count - start_frame)/params.num_frames)
# # here 4 is the skip rate ratio = 4 chunks
# if skip_max >= 16:
# sr_sparse = np.random.choice([4,8,12,16])
# elif (skip_max<16) and (skip_max>=12):
# sr_sparse = np.random.choice([4,8,12])
# elif (skip_max<12) and (skip_max>=8):
# sr_sparse = np.random.choice([4,8])
# else:
sr_sparse = 4
else:
start_frame = 0
sr_sparse = 4
sr_dense = int(sr_sparse/4)
frames_sparse = [start_frame] + [start_frame + i*sr_sparse for i in range(1,params.num_frames)]
frames_dense = [[frames_sparse[j*4]]+[frames_sparse[j*4] + i*sr_dense for i in range(1,params.num_frames)] for j in range(4)]
################################ frame list maker finishes here ###########################
################################ actual clip builder starts here ##########################
sparse_clip = []
dense_clip0 = []
dense_clip1 = []
dense_clip2 = []
dense_clip3 = []
a_sparse_clip = []
a_dense_clip0 = []
a_dense_clip1 = []
a_dense_clip2 = []
a_dense_clip3 = []
list_sparse = []
list_dense = [[] for i in range(4)]
count = -1
random_array = np.random.rand(10,8)
x_erase = np.random.randint(0,params.reso_h, size = (10,))
y_erase = np.random.randint(0,params.reso_w, size = (10,))
cropping_factor1 = np.random.uniform(0.6, 1, size = (10,)) # on an average cropping factor is 80% i.e. covers 64% area
x0 = [np.random.randint(0, params.ori_reso_w - params.ori_reso_w*cropping_factor1[ii] + 1) for ii in range(10)]
y0 = [np.random.randint(0, params.ori_reso_h - params.ori_reso_h*cropping_factor1[ii] + 1) for ii in range(10)]
contrast_factor1 = np.random.uniform(0.75,1.25, size = (10,))
hue_factor1 = np.random.uniform(-0.1,0.1, size = (10,))
saturation_factor1 = np.random.uniform(0.75,1.25, size = (10,))
brightness_factor1 = np.random.uniform(0.75,1.25,size = (10,))
gamma1 = np.random.uniform(0.75,1.25, size = (10,))
erase_size1 = np.random.randint(int(self.erase_size/2),self.erase_size, size = (10,))
erase_size2 = np.random.randint(int(self.erase_size/2),self.erase_size, size = (10,))
random_color_dropped = np.random.randint(0,3,(10))
while(cap.isOpened()):
count += 1
ret, frame = cap.read()
if ((count not in frames_sparse) and (count not in frames_dense[0]) \
and (count not in frames_dense[1]) and (count not in frames_dense[2]) \
and (count not in frames_dense[3])) and (ret == True):
continue
if ret == True:
if (count in frames_sparse):
sparse_clip.append(self.augmentation(frame, random_array[0], x_erase[0], y_erase[0], cropping_factor1[0],\
x0[0], y0[0], contrast_factor1[0], hue_factor1[0], saturation_factor1[0], brightness_factor1[0],\
gamma1[0],erase_size1[0],erase_size2[0], random_color_dropped[0]))
a_sparse_clip.append(self.augmentation(frame, random_array[1], x_erase[1], y_erase[1], cropping_factor1[1],\
x0[1], y0[1], contrast_factor1[1], hue_factor1[1], saturation_factor1[1], brightness_factor1[1],\
gamma1[1],erase_size1[1],erase_size2[1], random_color_dropped[1]))
list_sparse.append(count)
if (count in frames_dense[0]):
dense_clip0.append(self.augmentation(frame, random_array[2], x_erase[2], y_erase[2], cropping_factor1[2],\
x0[2], y0[2], contrast_factor1[2], hue_factor1[2], saturation_factor1[2], brightness_factor1[2],\
gamma1[2],erase_size1[2],erase_size2[2], random_color_dropped[2]))
a_dense_clip0.append(self.augmentation(frame, random_array[3], x_erase[3], y_erase[3], cropping_factor1[3],\
x0[3], y0[3], contrast_factor1[3], hue_factor1[3], saturation_factor1[3], brightness_factor1[3],\
gamma1[3],erase_size1[3],erase_size2[3], random_color_dropped[3]))
list_dense[0].append(count)
if (count in frames_dense[1]):
dense_clip1.append(self.augmentation(frame, random_array[4], x_erase[4], y_erase[4], cropping_factor1[4],\
x0[4], y0[4], contrast_factor1[4], hue_factor1[4], saturation_factor1[4], brightness_factor1[4],\
gamma1[4],erase_size1[4],erase_size2[4], random_color_dropped[4]))
a_dense_clip1.append(self.augmentation(frame, random_array[5], x_erase[5], y_erase[5], cropping_factor1[5],\
x0[5], y0[5], contrast_factor1[5], hue_factor1[5], saturation_factor1[5], brightness_factor1[5],\
gamma1[5],erase_size1[5],erase_size2[5], random_color_dropped[5]))
list_dense[1].append(count)
if (count in frames_dense[2]):
dense_clip2.append(self.augmentation(frame, random_array[6], x_erase[6], y_erase[6], cropping_factor1[6],\
x0[6], y0[6], contrast_factor1[6], hue_factor1[6], saturation_factor1[6], brightness_factor1[6],\
gamma1[6],erase_size1[6],erase_size2[6], random_color_dropped[6]))
a_dense_clip2.append(self.augmentation(frame, random_array[7], x_erase[7], y_erase[7], cropping_factor1[7],\
x0[7], y0[7], contrast_factor1[7], hue_factor1[7], saturation_factor1[7], brightness_factor1[7],\
gamma1[7],erase_size1[7],erase_size2[7], random_color_dropped[7]))
list_dense[2].append(count)
if (count in frames_dense[3]):
dense_clip3.append(self.augmentation(frame, random_array[8], x_erase[8], y_erase[8], cropping_factor1[8],\
x0[8], y0[8], contrast_factor1[8], hue_factor1[8], saturation_factor1[8], brightness_factor1[8],\
gamma1[8],erase_size1[8],erase_size2[8], random_color_dropped[8]))
a_dense_clip3.append(self.augmentation(frame, random_array[9], x_erase[9], y_erase[9], cropping_factor1[9],\
x0[9], y0[9], contrast_factor1[9], hue_factor1[9], saturation_factor1[9], brightness_factor1[9],\
gamma1[9],erase_size1[9],erase_size2[9], random_color_dropped[9]))
list_dense[3].append(count)
else:
break
if len(sparse_clip) < params.num_frames and len(sparse_clip)>13:
# if params.num_frames - len(sparse_clip) >= 1:
# print(f'sparse_clip {vid_path} is missing {params.num_frames - len(sparse_clip)} frames')
remaining_num_frames = params.num_frames - len(sparse_clip)
sparse_clip = sparse_clip + sparse_clip[::-1][1:remaining_num_frames+1]
a_sparse_clip = a_sparse_clip + a_sparse_clip[::-1][1:remaining_num_frames+1]
if len(dense_clip3) < params.num_frames and len(dense_clip3)>7:
# if params.num_frames - len(dense_clip3) >= 1:
# print(f'dense_clip3 {vid_path} is missing {params.num_frames - len(dense_clip3)} frames')
remaining_num_frames = params.num_frames - len(dense_clip3)
dense_clip3 = dense_clip3 + dense_clip3[::-1][1:remaining_num_frames+1]
a_dense_clip3 = a_dense_clip3 + a_dense_clip3[::-1][1:remaining_num_frames+1]
try:
assert(len(sparse_clip)==params.num_frames)
assert(len(dense_clip0)==params.num_frames)
assert(len(dense_clip1)==params.num_frames)
assert(len(dense_clip2)==params.num_frames)
assert(len(dense_clip3)==params.num_frames)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense
except:
print(f'Clip {vid_path} has some frames reading issue, failed')
return None, None, None, None, None, None, None, None, None, None, None, None
except:
print(f'Clip {vid_path} has some unknown issue, failed')
return None, None, None, None, None, None, None, None, None, None, None, None
def augmentation(self, image, random_array, x_erase, y_erase, cropping_factor1,\
x0, y0, contrast_factor1, hue_factor1, saturation_factor1, brightness_factor1,\
gamma1,erase_size1,erase_size2, random_color_dropped):
image = self.PIL(image)
image = trans.functional.resized_crop(image,y0,x0,int(params.ori_reso_h*cropping_factor1),int(params.ori_reso_h*cropping_factor1),(params.reso_h,params.reso_w),interpolation=2)
if random_array[0] < 0.125:
image = trans.functional.adjust_contrast(image, contrast_factor = contrast_factor1) #0.75 to 1.25
if random_array[1] < 0.3 :
image = trans.functional.adjust_hue(image, hue_factor = hue_factor1) # hue factor will be between [-0.1, 0.1]
if random_array[2] < 0.3 :
image = trans.functional.adjust_saturation(image, saturation_factor = saturation_factor1) # brightness factor will be between [0.75, 1,25]
if random_array[3] < 0.3 :
image = trans.functional.adjust_brightness(image, brightness_factor = brightness_factor1) # brightness factor will be between [0.75, 1,25]
if random_array[0] > 0.125 and random_array[0] < 0.25:
image = trans.functional.adjust_contrast(image, contrast_factor = contrast_factor1) #0.75 to 1.25
if random_array[4] > 0.70:
if random_array[4] < 0.875:
image = trans.functional.to_grayscale(image, num_output_channels = 3)
if random_array[5] > 0.25:
image = trans.functional.adjust_gamma(image, gamma = gamma1, gain=1) #gamma range [0.8, 1.2]
else:
image = trans.functional.to_tensor(image)
image[random_color_dropped,:,:] = 0
image = self.PIL(image)
if random_array[6] > 0.5:
image = trans.functional.hflip(image)
image = trans.functional.to_tensor(image)
if random_array[7] < 0.5 :
image = trans.functional.erase(image, x_erase, y_erase, erase_size1, erase_size2, v=0)
return image
def collate_fn2(batch):
sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, a_sparse_clip, \
a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, \
list_sparse, list_dense, vid_path = [], [], [], [], [], [], [], [], [], [], [], [], []
for item in batch:
if not (None in item):
sparse_clip.append(torch.stack(item[0],dim=0))
dense_clip0.append(torch.stack(item[1],dim=0))
dense_clip1.append(torch.stack(item[2],dim=0))
dense_clip2.append(torch.stack(item[3],dim=0))
dense_clip3.append(torch.stack(item[4],dim=0))
a_sparse_clip.append(torch.stack(item[5],dim=0))
a_dense_clip0.append(torch.stack(item[6],dim=0))
a_dense_clip1.append(torch.stack(item[7],dim=0))
a_dense_clip2.append(torch.stack(item[8],dim=0))
a_dense_clip3.append(torch.stack(item[9],dim=0))
list_sparse.append(np.asarray(item[10]))
list_dense.append(np.asarray(item[11]))
vid_path.append(item[12])
sparse_clip = torch.stack(sparse_clip, dim=0)
dense_clip0 = torch.stack(dense_clip0, dim=0)
dense_clip1 = torch.stack(dense_clip1, dim=0)
dense_clip2 = torch.stack(dense_clip2, dim=0)
dense_clip3 = torch.stack(dense_clip3, dim=0)
a_sparse_clip = torch.stack(a_sparse_clip, dim=0)
a_dense_clip0 = torch.stack(a_dense_clip0, dim=0)
a_dense_clip1 = torch.stack(a_dense_clip1, dim=0)
a_dense_clip2 = torch.stack(a_dense_clip2, dim=0)
a_dense_clip3 = torch.stack(a_dense_clip3, dim=0)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, a_sparse_clip, \
a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, \
list_sparse, list_dense, vid_path
if __name__ == '__main__':
train_dataset = ss_dataset_gen1(shuffle = True, data_percentage = 1.0)
train_dataloader = DataLoader(train_dataset, batch_size=40, \
shuffle=False, num_workers=4, collate_fn=collate_fn2)
print(f'Step involved: {len(train_dataset)/24}')
t=time.time()
for i, (sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, a_sparse_clip, \
a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, \
list_sparse, list_dense, vid_path) in enumerate(train_dataloader):
if (i+1)%25 == 0:
print(sparse_clip.shape)
print(dense_clip3.shape)
print()
print(f'Time taken to load data is {time.time()-t}')
|
[
"torchvision.transforms.functional.to_tensor",
"random.shuffle",
"torchvision.transforms.functional.adjust_saturation",
"numpy.random.randint",
"os.path.join",
"torch.utils.data.DataLoader",
"torchvision.transforms.functional.hflip",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.functional.erase",
"torchvision.transforms.functional.adjust_hue",
"torchvision.transforms.functional.adjust_contrast",
"numpy.asarray",
"torchvision.transforms.functional.to_grayscale",
"torchvision.transforms.functional.adjust_gamma",
"torchvision.transforms.functional.adjust_brightness",
"numpy.random.uniform",
"torch.stack",
"time.time",
"cv2.VideoCapture",
"numpy.random.rand",
"torchvision.transforms.ToTensor"
] |
[((16132, 16163), 'torch.stack', 'torch.stack', (['sparse_clip'], {'dim': '(0)'}), '(sparse_clip, dim=0)\n', (16143, 16163), False, 'import torch\n'), ((16182, 16213), 'torch.stack', 'torch.stack', (['dense_clip0'], {'dim': '(0)'}), '(dense_clip0, dim=0)\n', (16193, 16213), False, 'import torch\n'), ((16232, 16263), 'torch.stack', 'torch.stack', (['dense_clip1'], {'dim': '(0)'}), '(dense_clip1, dim=0)\n', (16243, 16263), False, 'import torch\n'), ((16282, 16313), 'torch.stack', 'torch.stack', (['dense_clip2'], {'dim': '(0)'}), '(dense_clip2, dim=0)\n', (16293, 16313), False, 'import torch\n'), ((16332, 16363), 'torch.stack', 'torch.stack', (['dense_clip3'], {'dim': '(0)'}), '(dense_clip3, dim=0)\n', (16343, 16363), False, 'import torch\n'), ((16385, 16418), 'torch.stack', 'torch.stack', (['a_sparse_clip'], {'dim': '(0)'}), '(a_sparse_clip, dim=0)\n', (16396, 16418), False, 'import torch\n'), ((16439, 16472), 'torch.stack', 'torch.stack', (['a_dense_clip0'], {'dim': '(0)'}), '(a_dense_clip0, dim=0)\n', (16450, 16472), False, 'import torch\n'), ((16493, 16526), 'torch.stack', 'torch.stack', (['a_dense_clip1'], {'dim': '(0)'}), '(a_dense_clip1, dim=0)\n', (16504, 16526), False, 'import torch\n'), ((16547, 16580), 'torch.stack', 'torch.stack', (['a_dense_clip2'], {'dim': '(0)'}), '(a_dense_clip2, dim=0)\n', (16558, 16580), False, 'import torch\n'), ((16601, 16634), 'torch.stack', 'torch.stack', (['a_dense_clip3'], {'dim': '(0)'}), '(a_dense_clip3, dim=0)\n', (16612, 16634), False, 'import torch\n'), ((16987, 17085), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': '(40)', 'shuffle': '(False)', 'num_workers': '(4)', 'collate_fn': 'collate_fn2'}), '(train_dataset, batch_size=40, shuffle=False, num_workers=4,\n collate_fn=collate_fn2)\n', (16997, 17085), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((17151, 17162), 'time.time', 'time.time', ([], {}), '()\n', (17160, 17162), False, 'import time\n'), ((1717, 1735), 'torchvision.transforms.ToPILImage', 'trans.ToPILImage', ([], {}), '()\n', (1733, 1735), True, 'import torchvision.transforms as trans\n'), ((1758, 1774), 'torchvision.transforms.ToTensor', 'trans.ToTensor', ([], {}), '()\n', (1772, 1774), True, 'import torchvision.transforms as trans\n'), ((14838, 14871), 'torchvision.transforms.functional.to_tensor', 'trans.functional.to_tensor', (['image'], {}), '(image)\n', (14864, 14871), True, 'import torchvision.transforms as trans\n'), ((1484, 1514), 'random.shuffle', 'random.shuffle', (['self.all_paths'], {}), '(self.all_paths)\n', (1498, 1514), False, 'import random\n'), ((2954, 2980), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vid_path'], {}), '(vid_path)\n', (2970, 2980), False, 'import cv2\n'), ((5208, 5229), 'numpy.random.rand', 'np.random.rand', (['(10)', '(8)'], {}), '(10, 8)\n', (5222, 5229), True, 'import numpy as np\n'), ((5251, 5298), 'numpy.random.randint', 'np.random.randint', (['(0)', 'params.reso_h'], {'size': '(10,)'}), '(0, params.reso_h, size=(10,))\n', (5268, 5298), True, 'import numpy as np\n'), ((5322, 5369), 'numpy.random.randint', 'np.random.randint', (['(0)', 'params.reso_w'], {'size': '(10,)'}), '(0, params.reso_w, size=(10,))\n', (5339, 5369), True, 'import numpy as np\n'), ((5404, 5441), 'numpy.random.uniform', 'np.random.uniform', (['(0.6)', '(1)'], {'size': '(10,)'}), '(0.6, 1, size=(10,))\n', (5421, 5441), True, 'import numpy as np\n'), ((5794, 5835), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.25)'], {'size': '(10,)'}), '(0.75, 1.25, size=(10,))\n', (5811, 5835), True, 'import numpy as np\n'), ((5863, 5903), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': '(10,)'}), '(-0.1, 0.1, size=(10,))\n', (5880, 5903), True, 'import numpy as np\n'), ((5938, 5979), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.25)'], {'size': '(10,)'}), '(0.75, 1.25, size=(10,))\n', (5955, 5979), True, 'import numpy as np\n'), ((6014, 6055), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.25)'], {'size': '(10,)'}), '(0.75, 1.25, size=(10,))\n', (6031, 6055), True, 'import numpy as np\n'), ((6077, 6118), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.25)'], {'size': '(10,)'}), '(0.75, 1.25, size=(10,))\n', (6094, 6118), True, 'import numpy as np\n'), ((6353, 6380), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)', '(10)'], {}), '(0, 3, 10)\n', (6370, 6380), True, 'import numpy as np\n'), ((13457, 13530), 'torchvision.transforms.functional.adjust_contrast', 'trans.functional.adjust_contrast', (['image'], {'contrast_factor': 'contrast_factor1'}), '(image, contrast_factor=contrast_factor1)\n', (13489, 13530), True, 'import torchvision.transforms as trans\n'), ((13602, 13660), 'torchvision.transforms.functional.adjust_hue', 'trans.functional.adjust_hue', (['image'], {'hue_factor': 'hue_factor1'}), '(image, hue_factor=hue_factor1)\n', (13629, 13660), True, 'import torchvision.transforms as trans\n'), ((13759, 13838), 'torchvision.transforms.functional.adjust_saturation', 'trans.functional.adjust_saturation', (['image'], {'saturation_factor': 'saturation_factor1'}), '(image, saturation_factor=saturation_factor1)\n', (13793, 13838), True, 'import torchvision.transforms as trans\n'), ((13945, 14024), 'torchvision.transforms.functional.adjust_brightness', 'trans.functional.adjust_brightness', (['image'], {'brightness_factor': 'brightness_factor1'}), '(image, brightness_factor=brightness_factor1)\n', (13979, 14024), True, 'import torchvision.transforms as trans\n'), ((14159, 14232), 'torchvision.transforms.functional.adjust_contrast', 'trans.functional.adjust_contrast', (['image'], {'contrast_factor': 'contrast_factor1'}), '(image, contrast_factor=contrast_factor1)\n', (14191, 14232), True, 'import torchvision.transforms as trans\n'), ((14789, 14818), 'torchvision.transforms.functional.hflip', 'trans.functional.hflip', (['image'], {}), '(image)\n', (14811, 14818), True, 'import torchvision.transforms as trans\n'), ((14928, 15006), 'torchvision.transforms.functional.erase', 'trans.functional.erase', (['image', 'x_erase', 'y_erase', 'erase_size1', 'erase_size2'], {'v': '(0)'}), '(image, x_erase, y_erase, erase_size1, erase_size2, v=0)\n', (14950, 15006), True, 'import torchvision.transforms as trans\n'), ((3502, 3562), 'numpy.random.randint', 'np.random.randint', (['(0)', '(frame_count - min_temporal_span_sparse)'], {}), '(0, frame_count - min_temporal_span_sparse)\n', (3519, 3562), True, 'import numpy as np\n'), ((5522, 5612), 'numpy.random.randint', 'np.random.randint', (['(0)', '(params.ori_reso_w - params.ori_reso_w * cropping_factor1[ii] + 1)'], {}), '(0, params.ori_reso_w - params.ori_reso_w *\n cropping_factor1[ii] + 1)\n', (5539, 5612), True, 'import numpy as np\n'), ((5656, 5746), 'numpy.random.randint', 'np.random.randint', (['(0)', '(params.ori_reso_h - params.ori_reso_h * cropping_factor1[ii] + 1)'], {}), '(0, params.ori_reso_h - params.ori_reso_h *\n cropping_factor1[ii] + 1)\n', (5673, 5746), True, 'import numpy as np\n'), ((14348, 14407), 'torchvision.transforms.functional.to_grayscale', 'trans.functional.to_grayscale', (['image'], {'num_output_channels': '(3)'}), '(image, num_output_channels=3)\n', (14377, 14407), True, 'import torchvision.transforms as trans\n'), ((14608, 14641), 'torchvision.transforms.functional.to_tensor', 'trans.functional.to_tensor', (['image'], {}), '(image)\n', (14634, 14641), True, 'import torchvision.transforms as trans\n'), ((15384, 15411), 'torch.stack', 'torch.stack', (['item[0]'], {'dim': '(0)'}), '(item[0], dim=0)\n', (15395, 15411), False, 'import torch\n'), ((15444, 15471), 'torch.stack', 'torch.stack', (['item[1]'], {'dim': '(0)'}), '(item[1], dim=0)\n', (15455, 15471), False, 'import torch\n'), ((15503, 15530), 'torch.stack', 'torch.stack', (['item[2]'], {'dim': '(0)'}), '(item[2], dim=0)\n', (15514, 15530), False, 'import torch\n'), ((15562, 15589), 'torch.stack', 'torch.stack', (['item[3]'], {'dim': '(0)'}), '(item[3], dim=0)\n', (15573, 15589), False, 'import torch\n'), ((15621, 15648), 'torch.stack', 'torch.stack', (['item[4]'], {'dim': '(0)'}), '(item[4], dim=0)\n', (15632, 15648), False, 'import torch\n'), ((15683, 15710), 'torch.stack', 'torch.stack', (['item[5]'], {'dim': '(0)'}), '(item[5], dim=0)\n', (15694, 15710), False, 'import torch\n'), ((15744, 15771), 'torch.stack', 'torch.stack', (['item[6]'], {'dim': '(0)'}), '(item[6], dim=0)\n', (15755, 15771), False, 'import torch\n'), ((15805, 15832), 'torch.stack', 'torch.stack', (['item[7]'], {'dim': '(0)'}), '(item[7], dim=0)\n', (15816, 15832), False, 'import torch\n'), ((15866, 15893), 'torch.stack', 'torch.stack', (['item[8]'], {'dim': '(0)'}), '(item[8], dim=0)\n', (15877, 15893), False, 'import torch\n'), ((15927, 15954), 'torch.stack', 'torch.stack', (['item[9]'], {'dim': '(0)'}), '(item[9], dim=0)\n', (15938, 15954), False, 'import torch\n'), ((15988, 16008), 'numpy.asarray', 'np.asarray', (['item[10]'], {}), '(item[10])\n', (15998, 16008), True, 'import numpy as np\n'), ((16040, 16060), 'numpy.asarray', 'np.asarray', (['item[11]'], {}), '(item[11])\n', (16050, 16060), True, 'import numpy as np\n'), ((14481, 14539), 'torchvision.transforms.functional.adjust_gamma', 'trans.functional.adjust_gamma', (['image'], {'gamma': 'gamma1', 'gain': '(1)'}), '(image, gamma=gamma1, gain=1)\n', (14510, 14539), True, 'import torchvision.transforms as trans\n'), ((17572, 17583), 'time.time', 'time.time', ([], {}), '()\n', (17581, 17583), False, 'import time\n'), ((920, 985), 'os.path.join', 'os.path.join', (['cfg.path_folder', '"""ucfTrainTestlist/trainlist01.txt"""'], {}), "(cfg.path_folder, 'ucfTrainTestlist/trainlist01.txt')\n", (932, 985), False, 'import os\n'), ((1070, 1135), 'os.path.join', 'os.path.join', (['cfg.path_folder', '"""ucfTrainTestlist/trainlist02.txt"""'], {}), "(cfg.path_folder, 'ucfTrainTestlist/trainlist02.txt')\n", (1082, 1135), False, 'import os\n'), ((1220, 1285), 'os.path.join', 'os.path.join', (['cfg.path_folder', '"""ucfTrainTestlist/trainlist03.txt"""'], {}), "(cfg.path_folder, 'ucfTrainTestlist/trainlist03.txt')\n", (1232, 1285), False, 'import os\n')]
|
# General
import numpy as np
import random
import argparse
import json
import commentjson
import joblib
import os
import pathlib
from collections import OrderedDict
# Pytorch
import torch
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
# Optuna
import optuna
from optuna.integration import PyTorchLightningPruningCallback
# Our Methods
from . import SubGNN as md
from SubGNN import config
def parse_arguments():
"""
Read in the config file specifying all of the parameters
"""
parser = argparse.ArgumentParser(description="Learn subgraph embeddings")
parser.add_argument("-config_path", type=str, default=None, help="Load config file")
args = parser.parse_args()
return args
def read_json(fname):
"""
Read in the json file specified by 'fname'
"""
with open(fname, "rt") as handle:
return commentjson.load(handle, object_hook=OrderedDict)
def get_optuna_suggest(param_dict, name, trial):
"""
Returns a suggested value for the hyperparameter specified by 'name' from the range
of values in 'param_dict'
name: string specifying hyperparameter
trial: optuna trial
param_dict: dictionary containing information about the hyperparameter (range of
values & type of sampler)
e.g.{
"type" : "suggest_categorical",
"args" : [[ 64, 128]]
}
"""
module_name = param_dict["type"] # e.g. suggest_categorical, suggest_float
args = [name]
args.extend(
param_dict["args"]
) # resulting list will look something like this ['batch_size', [ 64, 128]]
if "kwargs" in param_dict:
kwargs = dict(param_dict["kwargs"])
return getattr(trial, module_name)(*args, **kwargs)
else:
return getattr(trial, module_name)(*args)
def get_hyperparams_optuna(run_config, trial):
"""
Converts the fixed and variable hyperparameters in the run config to a dictionary of
the final hyperparameters
Returns: hyp_fix - dictionary where key is the hyperparameter name (e.g. batch_size)
and value is the hyperparameter value
"""
# initialize the dict with the fixed hyperparameters
hyp_fix = dict(run_config["hyperparams_fix"])
# update the dict with variable value hyperparameters by sampling a hyperparameter
# value from the range specified in the run_config
hyp_optuna = {
k: get_optuna_suggest(run_config["hyperparams_optuna"][k], k, trial)
for k in dict(run_config["hyperparams_optuna"]).keys()
}
hyp_fix.update(hyp_optuna)
return hyp_fix
def build_model(run_config, trial=None):
"""
Creates SubGNN from the hyperparameters specified in the run config
"""
# get hyperparameters for the current trial
hyperparameters = get_hyperparams_optuna(run_config, trial)
# Set seeds for reproducibility
torch.manual_seed(hyperparameters["seed"])
np.random.seed(hyperparameters["seed"])
torch.cuda.manual_seed(hyperparameters["seed"])
torch.cuda.manual_seed_all(hyperparameters["seed"])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# initialize SubGNN
model = md.SubGNN_Chem(
hyperparameters,
run_config["graph_path"],
run_config["subgraphs_path"],
run_config["embedding_path"],
run_config["similarities_path"],
run_config["shortest_paths_path"],
run_config["degree_sequence_path"],
run_config["ego_graph_path"],
)
return model, hyperparameters
def build_trainer(run_config, hyperparameters, trial=None):
"""
Set up optuna trainer
"""
if "progress_bar_refresh_rate" in hyperparameters:
p_refresh = hyperparameters["progress_bar_refresh_rate"]
else:
p_refresh = 5
# set epochs, gpus, gradient clipping, etc.
# if 'no_gpu' in run config, then use CPU
trainer_kwargs = {
"max_epochs": hyperparameters["max_epochs"],
"gpus": 0 if "no_gpu" in run_config else 1,
"num_sanity_val_steps": 0,
"progress_bar_refresh_rate": p_refresh,
"gradient_clip_val": hyperparameters["grad_clip"],
}
# set auto learning rate finder param
if "auto_lr_find" in hyperparameters and hyperparameters["auto_lr_find"]:
trainer_kwargs["auto_lr_find"] = hyperparameters["auto_lr_find"]
# Create tensorboard logger
lgdir = os.path.join(run_config["tb"]["dir_full"], run_config["tb"]["name"])
if not os.path.exists(lgdir):
os.makedirs(lgdir)
logger = TensorBoardLogger(
run_config["tb"]["dir_full"],
name=run_config["tb"]["name"],
version="version_" + str(random.randint(0, 10000000)),
)
if not os.path.exists(logger.log_dir):
os.makedirs(logger.log_dir)
print("Tensorboard logging at ", logger.log_dir)
trainer_kwargs["logger"] = logger
# Save top three model checkpoints
trainer_kwargs["checkpoint_callback"] = ModelCheckpoint(
filepath=os.path.join(
logger.log_dir, "{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}"
),
save_top_k=3,
verbose=True,
monitor=run_config["optuna"]["monitor_metric"],
mode="max",
)
# if we use pruning, use the pytorch lightning pruning callback
if run_config["optuna"]["pruning"]:
trainer_kwargs["early_stop_callback"] = PyTorchLightningPruningCallback(
trial, monitor=run_config["optuna"]["monitor_metric"]
)
trainer = pl.Trainer(**trainer_kwargs)
return trainer, trainer_kwargs, logger.log_dir
def train_model(run_config, trial=None):
"""
Train a single model whose hyperparameters are specified in the run config
Returns the max (or min) metric specified by 'monitor_metric' in the run config
"""
# get model and hyperparameter dict
model, hyperparameters = build_model(run_config, trial)
# build optuna trainer
trainer, trainer_kwargs, results_path = build_trainer(
run_config, hyperparameters, trial
)
# dump hyperparameters to results dir
hparam_file = open(os.path.join(results_path, "hyperparams.json"), "w")
hparam_file.write(json.dumps(hyperparameters, indent=4))
hparam_file.close()
# dump trainer args to results dir
tkwarg_file = open(os.path.join(results_path, "trainer_kwargs.json"), "w")
pop_keys = [
key
for key in ["logger", "profiler", "early_stop_callback", "checkpoint_callback"]
if key in trainer_kwargs.keys()
]
[trainer_kwargs.pop(key) for key in pop_keys]
tkwarg_file.write(json.dumps(trainer_kwargs, indent=4))
tkwarg_file.close()
# train the model
trainer.fit(model)
# write results to the results dir
if results_path is not None:
hparam_file = open(os.path.join(results_path, "final_metric_scores.json"), "w")
results_serializable = {k: float(v) for k, v in model.metric_scores[-1].items()}
hparam_file.write(json.dumps(results_serializable, indent=4))
hparam_file.close()
# return the max (or min) metric specified by 'monitor_metric' in the run config
all_scores = [
score[run_config["optuna"]["monitor_metric"]].numpy()
for score in model.metric_scores
]
if run_config["optuna"]["opt_direction"] == "maximize":
return np.max(all_scores)
else:
return np.min(all_scores)
def main():
"""
Perform an optuna run according to the hyperparameters and directory locations
specified in 'config_path'
"""
torch.autograd.set_detect_anomaly(True)
args = parse_arguments()
# read in config file
run_config = read_json(args.config_path)
# Set paths to data
task = run_config["data"]["task"]
# paths to subgraphs, edge list, and shortest paths between all nodes in the graph
run_config["subgraphs_path"] = os.path.join(task, "subgraphs.pth")
run_config["graph_path"] = os.path.join(task, "edge_list.txt")
run_config["shortest_paths_path"] = os.path.join(task, "shortest_path_matrix.npy")
run_config["degree_sequence_path"] = os.path.join(task, "degree_sequence.txt")
run_config["ego_graph_path"] = os.path.join(task, "ego_graphs.txt")
# directory where similarity calculations will be stored
run_config["similarities_path"] = os.path.join(task, "similarities/")
# get location of node embeddings
run_config["embedding_path"] = os.path.join(task, "atom_features.pth")
# create a tensorboard directory in the folder specified by dir in the PROJECT ROOT
# folder
if "local" in run_config["tb"] and run_config["tb"]["local"]:
run_config["tb"]["dir_full"] = run_config["tb"]["dir"]
else:
run_config["tb"]["dir_full"] = os.path.join(
config.PROJECT_ROOT, run_config["tb"]["dir"]
)
ntrials = run_config["optuna"]["opt_n_trials"]
print(f"Running {ntrials} Trials of optuna")
if run_config["optuna"]["pruning"]:
pruner = optuna.pruners.MedianPruner()
else:
pruner = None
# the complete study path is the tensorboard directory + the study name
run_config["study_path"] = os.path.join(
run_config["tb"]["dir_full"], run_config["tb"]["name"]
)
print("Logging to ", run_config["study_path"])
pathlib.Path(run_config["study_path"]).mkdir(parents=True, exist_ok=True)
# get database file
db_file = os.path.join(run_config["study_path"], "optuna_study_sqlite.db")
# specify sampler
if (
run_config["optuna"]["sampler"] == "grid"
and "grid_search_space" in run_config["optuna"]
):
sampler = optuna.samplers.GridSampler(run_config["optuna"]["grid_search_space"])
elif run_config["optuna"]["sampler"] == "tpe":
sampler = optuna.samplers.TPESampler()
elif run_config["optuna"]["sampler"] == "random":
sampler = optuna.samplers.RandomSampler()
# create an optuna study with the specified sampler, pruner, direction (e.g.
# maximize) A SQLlite database is used to keep track of results Will load in
# existing study if one exists
study = optuna.create_study(
direction=run_config["optuna"]["opt_direction"],
sampler=sampler,
pruner=pruner,
storage="sqlite:///" + db_file,
study_name=run_config["study_path"],
load_if_exists=True,
)
study.optimize(
lambda trial: train_model(run_config, trial),
n_trials=run_config["optuna"]["opt_n_trials"],
n_jobs=run_config["optuna"]["opt_n_cores"],
)
optuna_results_path = os.path.join(run_config["study_path"], "optuna_study.pkl")
print("Saving Study Results to", optuna_results_path)
joblib.dump(study, optuna_results_path)
print(study.best_params)
if __name__ == "__main__":
main()
|
[
"pytorch_lightning.Trainer",
"numpy.random.seed",
"argparse.ArgumentParser",
"joblib.dump",
"json.dumps",
"pathlib.Path",
"torch.autograd.set_detect_anomaly",
"optuna.integration.PyTorchLightningPruningCallback",
"os.path.join",
"optuna.samplers.TPESampler",
"random.randint",
"os.path.exists",
"numpy.max",
"optuna.samplers.GridSampler",
"torch.manual_seed",
"torch.cuda.manual_seed",
"optuna.samplers.RandomSampler",
"numpy.min",
"optuna.pruners.MedianPruner",
"os.makedirs",
"torch.cuda.manual_seed_all",
"commentjson.load",
"optuna.create_study"
] |
[((602, 666), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Learn subgraph embeddings"""'}), "(description='Learn subgraph embeddings')\n", (625, 666), False, 'import argparse\n'), ((2974, 3016), 'torch.manual_seed', 'torch.manual_seed', (["hyperparameters['seed']"], {}), "(hyperparameters['seed'])\n", (2991, 3016), False, 'import torch\n'), ((3021, 3060), 'numpy.random.seed', 'np.random.seed', (["hyperparameters['seed']"], {}), "(hyperparameters['seed'])\n", (3035, 3060), True, 'import numpy as np\n'), ((3065, 3112), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (["hyperparameters['seed']"], {}), "(hyperparameters['seed'])\n", (3087, 3112), False, 'import torch\n'), ((3117, 3168), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["hyperparameters['seed']"], {}), "(hyperparameters['seed'])\n", (3143, 3168), False, 'import torch\n'), ((4519, 4587), 'os.path.join', 'os.path.join', (["run_config['tb']['dir_full']", "run_config['tb']['name']"], {}), "(run_config['tb']['dir_full'], run_config['tb']['name'])\n", (4531, 4587), False, 'import os\n'), ((5634, 5662), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {}), '(**trainer_kwargs)\n', (5644, 5662), True, 'import pytorch_lightning as pl\n'), ((7689, 7728), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (7722, 7728), False, 'import torch\n'), ((8016, 8051), 'os.path.join', 'os.path.join', (['task', '"""subgraphs.pth"""'], {}), "(task, 'subgraphs.pth')\n", (8028, 8051), False, 'import os\n'), ((8083, 8118), 'os.path.join', 'os.path.join', (['task', '"""edge_list.txt"""'], {}), "(task, 'edge_list.txt')\n", (8095, 8118), False, 'import os\n'), ((8159, 8205), 'os.path.join', 'os.path.join', (['task', '"""shortest_path_matrix.npy"""'], {}), "(task, 'shortest_path_matrix.npy')\n", (8171, 8205), False, 'import os\n'), ((8247, 8288), 'os.path.join', 'os.path.join', (['task', '"""degree_sequence.txt"""'], {}), "(task, 'degree_sequence.txt')\n", (8259, 8288), False, 'import os\n'), ((8324, 8360), 'os.path.join', 'os.path.join', (['task', '"""ego_graphs.txt"""'], {}), "(task, 'ego_graphs.txt')\n", (8336, 8360), False, 'import os\n'), ((8461, 8496), 'os.path.join', 'os.path.join', (['task', '"""similarities/"""'], {}), "(task, 'similarities/')\n", (8473, 8496), False, 'import os\n'), ((8571, 8610), 'os.path.join', 'os.path.join', (['task', '"""atom_features.pth"""'], {}), "(task, 'atom_features.pth')\n", (8583, 8610), False, 'import os\n'), ((9300, 9368), 'os.path.join', 'os.path.join', (["run_config['tb']['dir_full']", "run_config['tb']['name']"], {}), "(run_config['tb']['dir_full'], run_config['tb']['name'])\n", (9312, 9368), False, 'import os\n'), ((9551, 9615), 'os.path.join', 'os.path.join', (["run_config['study_path']", '"""optuna_study_sqlite.db"""'], {}), "(run_config['study_path'], 'optuna_study_sqlite.db')\n", (9563, 9615), False, 'import os\n'), ((10262, 10460), 'optuna.create_study', 'optuna.create_study', ([], {'direction': "run_config['optuna']['opt_direction']", 'sampler': 'sampler', 'pruner': 'pruner', 'storage': "('sqlite:///' + db_file)", 'study_name': "run_config['study_path']", 'load_if_exists': '(True)'}), "(direction=run_config['optuna']['opt_direction'],\n sampler=sampler, pruner=pruner, storage='sqlite:///' + db_file,\n study_name=run_config['study_path'], load_if_exists=True)\n", (10281, 10460), False, 'import optuna\n'), ((10723, 10781), 'os.path.join', 'os.path.join', (["run_config['study_path']", '"""optuna_study.pkl"""'], {}), "(run_config['study_path'], 'optuna_study.pkl')\n", (10735, 10781), False, 'import os\n'), ((10844, 10883), 'joblib.dump', 'joblib.dump', (['study', 'optuna_results_path'], {}), '(study, optuna_results_path)\n', (10855, 10883), False, 'import joblib\n'), ((943, 992), 'commentjson.load', 'commentjson.load', (['handle'], {'object_hook': 'OrderedDict'}), '(handle, object_hook=OrderedDict)\n', (959, 992), False, 'import commentjson\n'), ((4599, 4620), 'os.path.exists', 'os.path.exists', (['lgdir'], {}), '(lgdir)\n', (4613, 4620), False, 'import os\n'), ((4630, 4648), 'os.makedirs', 'os.makedirs', (['lgdir'], {}), '(lgdir)\n', (4641, 4648), False, 'import os\n'), ((4838, 4868), 'os.path.exists', 'os.path.exists', (['logger.log_dir'], {}), '(logger.log_dir)\n', (4852, 4868), False, 'import os\n'), ((4878, 4905), 'os.makedirs', 'os.makedirs', (['logger.log_dir'], {}), '(logger.log_dir)\n', (4889, 4905), False, 'import os\n'), ((5510, 5601), 'optuna.integration.PyTorchLightningPruningCallback', 'PyTorchLightningPruningCallback', (['trial'], {'monitor': "run_config['optuna']['monitor_metric']"}), "(trial, monitor=run_config['optuna'][\n 'monitor_metric'])\n", (5541, 5601), False, 'from optuna.integration import PyTorchLightningPruningCallback\n'), ((6241, 6287), 'os.path.join', 'os.path.join', (['results_path', '"""hyperparams.json"""'], {}), "(results_path, 'hyperparams.json')\n", (6253, 6287), False, 'import os\n'), ((6316, 6353), 'json.dumps', 'json.dumps', (['hyperparameters'], {'indent': '(4)'}), '(hyperparameters, indent=4)\n', (6326, 6353), False, 'import json\n'), ((6442, 6491), 'os.path.join', 'os.path.join', (['results_path', '"""trainer_kwargs.json"""'], {}), "(results_path, 'trainer_kwargs.json')\n", (6454, 6491), False, 'import os\n'), ((6733, 6769), 'json.dumps', 'json.dumps', (['trainer_kwargs'], {'indent': '(4)'}), '(trainer_kwargs, indent=4)\n', (6743, 6769), False, 'import json\n'), ((7478, 7496), 'numpy.max', 'np.max', (['all_scores'], {}), '(all_scores)\n', (7484, 7496), True, 'import numpy as np\n'), ((7522, 7540), 'numpy.min', 'np.min', (['all_scores'], {}), '(all_scores)\n', (7528, 7540), True, 'import numpy as np\n'), ((8891, 8949), 'os.path.join', 'os.path.join', (['config.PROJECT_ROOT', "run_config['tb']['dir']"], {}), "(config.PROJECT_ROOT, run_config['tb']['dir'])\n", (8903, 8949), False, 'import os\n'), ((9130, 9159), 'optuna.pruners.MedianPruner', 'optuna.pruners.MedianPruner', ([], {}), '()\n', (9157, 9159), False, 'import optuna\n'), ((9779, 9849), 'optuna.samplers.GridSampler', 'optuna.samplers.GridSampler', (["run_config['optuna']['grid_search_space']"], {}), "(run_config['optuna']['grid_search_space'])\n", (9806, 9849), False, 'import optuna\n'), ((5115, 5207), 'os.path.join', 'os.path.join', (['logger.log_dir', '"""{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}"""'], {}), "(logger.log_dir,\n '{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}')\n", (5127, 5207), False, 'import os\n'), ((6941, 6995), 'os.path.join', 'os.path.join', (['results_path', '"""final_metric_scores.json"""'], {}), "(results_path, 'final_metric_scores.json')\n", (6953, 6995), False, 'import os\n'), ((7117, 7159), 'json.dumps', 'json.dumps', (['results_serializable'], {'indent': '(4)'}), '(results_serializable, indent=4)\n', (7127, 7159), False, 'import json\n'), ((9438, 9476), 'pathlib.Path', 'pathlib.Path', (["run_config['study_path']"], {}), "(run_config['study_path'])\n", (9450, 9476), False, 'import pathlib\n'), ((9919, 9947), 'optuna.samplers.TPESampler', 'optuna.samplers.TPESampler', ([], {}), '()\n', (9945, 9947), False, 'import optuna\n'), ((10020, 10051), 'optuna.samplers.RandomSampler', 'optuna.samplers.RandomSampler', ([], {}), '()\n', (10049, 10051), False, 'import optuna\n'), ((4791, 4818), 'random.randint', 'random.randint', (['(0)', '(10000000)'], {}), '(0, 10000000)\n', (4805, 4818), False, 'import random\n')]
|
import numpy as np
import est_dir
def test_1():
"""
Test for compute_forward() - check for flag=True.
"""
np.random.seed(90)
m = 10
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
region = 1
step = 0.17741338024633116
forward_tol = 1000000
no_vars = 10
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
track = np.array([[0, f_old], [step, f_new]])
track, count_func_evals, flag = (est_dir.compute_forward
(step, const_forward, forward_tol, track,
centre_point, beta,
f, func_args))
assert(f_old > f_new)
assert(count_func_evals == len(track) - 2)
assert(flag == True)
assert(track[0][0] == 0)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_forward
if j < len(track) - 1:
assert(track[j][1] < track[j - 1][1])
else:
assert(track[j][1] > track[j - 1][1])
def test_2():
"""
Test for compute_forward() - check that when flag=False, track is returned.
"""
np.random.seed(90)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([20, 20])
matrix = np.identity(m)
func_args = (minimizer, matrix, 0, 0.0000001)
step = 1
forward_tol = 100000
beta = np.array([0.0001, 0.0001])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
track = np.array([[0, f_old], [step, f_new]])
test_track, count_func_evals, flag = (est_dir.compute_forward
(step, const_forward, forward_tol,
track, centre_point, beta, f,
func_args))
assert(f_old > f_new)
assert(flag == False)
assert(count_func_evals > 0)
for j in range(len(test_track)):
assert(test_track[j, 0] < forward_tol)
if j >= 1:
assert(test_track[j, 1] < test_track[j - 1, 1])
assert(test_track[j, 0] * const_forward > forward_tol)
def test_3():
"""
Test for forward_tracking - flag=True and f_new >= track[-2][1]
"""
np.random.seed(90)
m = 10
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 0.05
forward_tol = 1000000
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, step, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(len(track) - 1 == total_func_evals)
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(step, 3))
assert(flag == True)
for j in range(2, len(track)):
step = step * 2
assert(np.round(track[j][0], 3) == step)
if j == (len(track) - 1):
assert(track[j][1] > track[j - 1][1])
else:
assert(track[j - 1][1] > track[j][1])
def test_4():
"""
Test for forward_tracking - forward_tol not met and f_new < track[-2][1].
"""
np.random.seed(25)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 10)
t = 0.005
forward_tol = 10000
beta = np.array([1, 1])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, t, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(flag == True)
for j in range(1, len(track)):
if j == (len(track) - 1):
assert(track[j][1] > track[j-1][1])
else:
assert(track[j-1][1] > track[j][1])
def test_5():
"""
Test for forward_tracking - forward_tol not met initially, f_new <
track[-2][1] and eventually forward_tol is met.
"""
np.random.seed(25)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 10)
t = 0.005
forward_tol = 10
beta = np.array([1, 1])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, t, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(flag == False)
for j in range(1, len(track)):
assert(track[j-1][1] > track[j][1])
def test_6():
"""
Test for forward_tracking - forward_tol met.
"""
np.random.seed(90)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([20, 20])
matrix = np.identity(m)
func_args = (minimizer, matrix, 0, 0.0000001)
step = 0.5
forward_tol = 1.5
beta = np.array([0.0001, 0.0001])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, step, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(flag == False)
assert(track[2][1] < track[1][1] < track[0][1])
assert(total_func_evals == 1)
def test_7():
"""
Test for compute_backward - check that when flag=True, track is updated.
"""
np.random.seed(90)
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 1, (m, ))
centre_point = np.random.uniform(0, 1, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 0.1)
step = 0.001
back_tol = 0.000001
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track = np.array([[0, f_old], [step, f_new]])
track, total_func_evals, flag = (est_dir.compute_backward
(step, const_back, back_tol, track,
centre_point, beta, f, func_args))
assert(total_func_evals == len(track) - 2)
assert(flag == True)
assert(track[0][0] == 0)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_back
if j < len(track) - 1:
assert(track[j][1] < track[j-1][1])
else:
assert(track[j][1] > track[j-1][1])
def test_8():
"""
Test for compute_backward - check that when flag=False,
original track is returned.
"""
np.random.seed(90)
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 1, (m, ))
centre_point = np.random.uniform(0, 1, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 0.1)
step = 0.1
back_tol = 0.075
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track = np.array([[0, f_old], [step, f_new]])
track_new, total_func_evals, flag = (est_dir.compute_backward
(step, const_back, back_tol, track,
centre_point, beta, f, func_args))
assert(np.all(track == track_new))
assert(flag == False)
assert(total_func_evals == 0)
def test_9():
"""
Test for backward_tracking - back_tol is met.
"""
np.random.seed(32964)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
t = 1
back_tol = 1
beta = np.array([200, 200])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, count_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(track.shape == (2, m))
assert(track[0][0] == 0)
assert(track[1][0] == t)
assert(track[1][0] < track[1][1])
assert(count_func_evals == 0)
def test_10():
"""
Test for backward_tracking - back tol is not met and f_new >
track[-2][1].
"""
np.random.seed(32964)
n = 6
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
t = 97.688932389756
back_tol = 0.000000001
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, total_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
for j in range(1, len(track)):
assert(np.round(track[j][0], 4) == np.round(t, 4))
t = t / 2
assert(np.min(track[:, 1]) < track[1][0])
def test_11():
"""
Test for backward_tracking - back tol is not met and f_new < track[-2][1]
"""
np.random.seed(329998)
n = 20
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 10, (m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 1000)
t = 17001.993794080016
back_tol = 0.000000001
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, total_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(np.min(track[:, 1]) < track[:, 1][0])
def test_12():
"""
Test for backward_tracking - back tol is not initially met, f_new <
track[-2][1] and eventaully back tol is met.
"""
np.random.seed(329998)
n = 20
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 10, (m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 1000)
t = 17001.993794080016
back_tol = 1
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, total_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(np.min(track[:, 1]) < track[:, 1][0])
def test_13():
"""Test for compute_coeffs"""
track_y = np.array([100, 200, 50])
track_t = np.array([0, 1, 0.5])
design_matrix_step = np.vstack((np.repeat(track_y[0], len(track_t)),
np.array(track_t),
np.array(track_t) ** 2)).T
assert(np.all(design_matrix_step[0, :] == np.array([100, 0, 0])))
assert(np.all(design_matrix_step[1, :] == np.array([100, 1, 1])))
assert(np.all(design_matrix_step[2, :] == np.array([100, 0.5, 0.25])))
OLS = (np.linalg.inv(design_matrix_step.T @ design_matrix_step) @
design_matrix_step.T @ track_y)
check = -OLS[1] / (2 * OLS[2])
opt_t = est_dir.compute_coeffs(track_y, track_t)
assert(np.all(np.round(check, 5) == np.round(opt_t, 5)))
def test_14():
"""
Test for combine_tracking - check that correct step size is returned when
forward_tol is met.
"""
np.random.seed(90)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([20, 20])
matrix = np.identity(m)
func_args = (minimizer, matrix, 0, 0.0000001)
step = 1
forward_tol = 100000
back_tol = 0.0000001
beta = np.array([0.0001, 0.0001])
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val < f_old)
def test_15():
"""
Test for combine_tracking - check that correct step size is returned, when
forward_tol is not met.
"""
np.random.seed(3291)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 0.005
forward_tol = 10000
back_tol = 0.0000001
beta = np.array([1, 1])
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val < f_old)
def test_16():
"""
Test for combine_tracking - check that correct step size is returned,
when back_tol is met.
"""
np.random.seed(32964)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 1
back_tol = 1
forward_tol = 100000
beta = np.array([200, 200])
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val == f_old)
def test_17():
"""
Test for combine_tracking - check that correct step size is returned,
when back_tol is not met.
"""
np.random.seed(32964)
n = 6
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 10
forward_tol = 1000000
back_tol = 0.000000001
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val < f_old)
def test_18():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 80],
[2, 160],
[4, 40],
[8, 20],
[16, 90]])
track_method = 'Forward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 20, 90])))
assert(np.all(track_t == np.array([0, 8, 16])))
def test_19():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 80],
[2, 70],
[4, 90]])
track_method = 'Forward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 70, 90])))
assert(np.all(track_t == np.array([0, 2, 4])))
def test_20():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 120],
[0.5, 110],
[0.25, 90]])
track_method = 'Backward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 90, 110])))
assert(np.all(track_t == np.array([0, 0.25, 0.5])))
def test_21():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 120],
[0.5, 80]])
track_method = 'Backward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 80, 120])))
assert(np.all(track_t == np.array([0, 0.5, 1])))
def test_22():
"""Test for check_func_val_coeffs when func_val > track_y[1]."""
np.random.seed(90)
m = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 60)
step = 1.8251102718712913
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track = np.array([[0, 100],
[1, 160],
[2, 40],
[4, 90]])
track_method = 'Forward'
upd_point, func_val = (est_dir.check_func_val_coeffs
(track, track_method, centre_point, beta, f,
func_args))
assert(upd_point.shape == (m, ))
assert(func_val == 40)
def test_23():
"""Test for check_func_val_coeffs when func_val <= track_y[1]."""
np.random.seed(91)
m = 10
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 0.01
forward_tol = 1000000
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, step, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(flag == True)
assert(total_func_evals > 0)
track_method = 'Forward'
upd_point, func_val = (est_dir.check_func_val_coeffs
(track, track_method, centre_point, beta, f,
func_args))
assert(upd_point.shape == (m, ))
assert(np.all(func_val <= track[:, 1]))
|
[
"numpy.random.seed",
"est_dir.combine_tracking",
"numpy.ones",
"est_dir.backward_tracking",
"numpy.round",
"est_dir.compute_direction_LS",
"numpy.copy",
"est_dir.compute_coeffs",
"numpy.identity",
"est_dir.forward_tracking",
"est_dir.compute_direction_XY",
"numpy.min",
"numpy.linalg.inv",
"numpy.all",
"numpy.random.uniform",
"est_dir.compute_backward",
"est_dir.compute_forward",
"est_dir.quad_func_params",
"numpy.array",
"est_dir.arrange_track_y_t",
"est_dir.check_func_val_coeffs"
] |
[((134, 152), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (148, 152), True, 'import numpy as np\n'), ((272, 285), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (279, 285), True, 'import numpy as np\n'), ((306, 336), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(m,)'], {}), '(0, 20, (m,))\n', (323, 336), True, 'import numpy as np\n'), ((352, 386), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (376, 386), False, 'import est_dir\n'), ((547, 623), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (575, 623), False, 'import est_dir\n'), ((887, 924), 'numpy.array', 'np.array', (['[[0, f_old], [step, f_new]]'], {}), '([[0, f_old], [step, f_new]])\n', (895, 924), True, 'import numpy as np\n'), ((963, 1065), 'est_dir.compute_forward', 'est_dir.compute_forward', (['step', 'const_forward', 'forward_tol', 'track', 'centre_point', 'beta', 'f', 'func_args'], {}), '(step, const_forward, forward_tol, track,\n centre_point, beta, f, func_args)\n', (986, 1065), False, 'import est_dir\n'), ((1693, 1711), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (1707, 1711), True, 'import numpy as np\n'), ((1830, 1843), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (1837, 1843), True, 'import numpy as np\n'), ((1864, 1882), 'numpy.array', 'np.array', (['[20, 20]'], {}), '([20, 20])\n', (1872, 1882), True, 'import numpy as np\n'), ((1897, 1911), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (1908, 1911), True, 'import numpy as np\n'), ((2015, 2041), 'numpy.array', 'np.array', (['[0.0001, 0.0001]'], {}), '([0.0001, 0.0001])\n', (2023, 2041), True, 'import numpy as np\n'), ((2169, 2206), 'numpy.array', 'np.array', (['[[0, f_old], [step, f_new]]'], {}), '([[0, f_old], [step, f_new]])\n', (2177, 2206), True, 'import numpy as np\n'), ((2250, 2352), 'est_dir.compute_forward', 'est_dir.compute_forward', (['step', 'const_forward', 'forward_tol', 'track', 'centre_point', 'beta', 'f', 'func_args'], {}), '(step, const_forward, forward_tol, track,\n centre_point, beta, f, func_args)\n', (2273, 2352), False, 'import est_dir\n'), ((2908, 2926), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (2922, 2926), True, 'import numpy as np\n'), ((3046, 3059), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (3053, 3059), True, 'import numpy as np\n'), ((3080, 3110), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(m,)'], {}), '(0, 20, (m,))\n', (3097, 3110), True, 'import numpy as np\n'), ((3126, 3160), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (3150, 3160), False, 'import est_dir\n'), ((3306, 3382), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (3334, 3382), False, 'import est_dir\n'), ((3698, 3808), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 'step', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, step, f_old, f_new, beta,\n const_forward, forward_tol, f, func_args)\n', (3722, 3808), False, 'import est_dir\n'), ((4496, 4514), 'numpy.random.seed', 'np.random.seed', (['(25)'], {}), '(25)\n', (4510, 4514), True, 'import numpy as np\n'), ((4633, 4646), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (4640, 4646), True, 'import numpy as np\n'), ((4667, 4685), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (4675, 4685), True, 'import numpy as np\n'), ((4700, 4734), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (4724, 4734), False, 'import est_dir\n'), ((4831, 4847), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (4839, 4847), True, 'import numpy as np\n'), ((5024, 5131), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_forward,\n forward_tol, f, func_args)\n', (5048, 5131), False, 'import est_dir\n'), ((5769, 5787), 'numpy.random.seed', 'np.random.seed', (['(25)'], {}), '(25)\n', (5783, 5787), True, 'import numpy as np\n'), ((5906, 5919), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (5913, 5919), True, 'import numpy as np\n'), ((5940, 5958), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (5948, 5958), True, 'import numpy as np\n'), ((5973, 6007), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (5997, 6007), False, 'import est_dir\n'), ((6101, 6117), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (6109, 6117), True, 'import numpy as np\n'), ((6294, 6401), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_forward,\n forward_tol, f, func_args)\n', (6318, 6401), False, 'import est_dir\n'), ((6862, 6880), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (6876, 6880), True, 'import numpy as np\n'), ((6999, 7012), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (7006, 7012), True, 'import numpy as np\n'), ((7033, 7051), 'numpy.array', 'np.array', (['[20, 20]'], {}), '([20, 20])\n', (7041, 7051), True, 'import numpy as np\n'), ((7066, 7080), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (7077, 7080), True, 'import numpy as np\n'), ((7183, 7209), 'numpy.array', 'np.array', (['[0.0001, 0.0001]'], {}), '([0.0001, 0.0001])\n', (7191, 7209), True, 'import numpy as np\n'), ((7362, 7472), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 'step', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, step, f_old, f_new, beta,\n const_forward, forward_tol, f, func_args)\n', (7386, 7472), False, 'import est_dir\n'), ((7822, 7840), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (7836, 7840), True, 'import numpy as np\n'), ((7923, 7952), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(m,)'], {}), '(0, 1, (m,))\n', (7940, 7952), True, 'import numpy as np\n'), ((7974, 8003), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(m,)'], {}), '(0, 1, (m,))\n', (7991, 8003), True, 'import numpy as np\n'), ((8019, 8053), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (8043, 8053), False, 'import est_dir\n'), ((8200, 8276), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (8228, 8276), False, 'import est_dir\n'), ((8569, 8606), 'numpy.array', 'np.array', (['[[0, f_old], [step, f_new]]'], {}), '([[0, f_old], [step, f_new]])\n', (8577, 8606), True, 'import numpy as np\n'), ((8645, 8742), 'est_dir.compute_backward', 'est_dir.compute_backward', (['step', 'const_back', 'back_tol', 'track', 'centre_point', 'beta', 'f', 'func_args'], {}), '(step, const_back, back_tol, track, centre_point,\n beta, f, func_args)\n', (8669, 8742), False, 'import est_dir\n'), ((9312, 9330), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (9326, 9330), True, 'import numpy as np\n'), ((9413, 9442), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(m,)'], {}), '(0, 1, (m,))\n', (9430, 9442), True, 'import numpy as np\n'), ((9464, 9493), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(m,)'], {}), '(0, 1, (m,))\n', (9481, 9493), True, 'import numpy as np\n'), ((9509, 9543), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (9533, 9543), False, 'import est_dir\n'), ((9685, 9761), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (9713, 9761), False, 'import est_dir\n'), ((10054, 10091), 'numpy.array', 'np.array', (['[[0, f_old], [step, f_new]]'], {}), '([[0, f_old], [step, f_new]])\n', (10062, 10091), True, 'import numpy as np\n'), ((10134, 10231), 'est_dir.compute_backward', 'est_dir.compute_backward', (['step', 'const_back', 'back_tol', 'track', 'centre_point', 'beta', 'f', 'func_args'], {}), '(step, const_back, back_tol, track, centre_point,\n beta, f, func_args)\n', (10158, 10231), False, 'import est_dir\n'), ((10329, 10355), 'numpy.all', 'np.all', (['(track == track_new)'], {}), '(track == track_new)\n', (10335, 10355), True, 'import numpy as np\n'), ((10512, 10533), 'numpy.random.seed', 'np.random.seed', (['(32964)'], {}), '(32964)\n', (10526, 10533), True, 'import numpy as np\n'), ((10614, 10627), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (10621, 10627), True, 'import numpy as np\n'), ((10648, 10666), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (10656, 10666), True, 'import numpy as np\n'), ((10681, 10715), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (10705, 10715), False, 'import est_dir\n'), ((10800, 10820), 'numpy.array', 'np.array', (['[200, 200]'], {}), '([200, 200])\n', (10808, 10820), True, 'import numpy as np\n'), ((10991, 11093), 'est_dir.backward_tracking', 'est_dir.backward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_back', 'back_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_back,\n back_tol, f, func_args)\n', (11016, 11093), False, 'import est_dir\n'), ((11454, 11475), 'numpy.random.seed', 'np.random.seed', (['(32964)'], {}), '(32964)\n', (11468, 11475), True, 'import numpy as np\n'), ((11567, 11580), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (11574, 11580), True, 'import numpy as np\n'), ((11601, 11631), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (11618, 11631), True, 'import numpy as np\n'), ((11646, 11680), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (11670, 11680), False, 'import est_dir\n'), ((11834, 11913), 'est_dir.compute_direction_XY', 'est_dir.compute_direction_XY', (['n', 'm', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(n, m, centre_point, f, func_args, no_vars, region)\n', (11862, 11913), False, 'import est_dir\n'), ((12219, 12321), 'est_dir.backward_tracking', 'est_dir.backward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_back', 'back_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_back,\n back_tol, f, func_args)\n', (12244, 12321), False, 'import est_dir\n'), ((12815, 12837), 'numpy.random.seed', 'np.random.seed', (['(329998)'], {}), '(329998)\n', (12829, 12837), True, 'import numpy as np\n'), ((12932, 12962), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (12949, 12962), True, 'import numpy as np\n'), ((12983, 13013), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (13000, 13013), True, 'import numpy as np\n'), ((13028, 13062), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (13052, 13062), False, 'import est_dir\n'), ((13222, 13301), 'est_dir.compute_direction_XY', 'est_dir.compute_direction_XY', (['n', 'm', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(n, m, centre_point, f, func_args, no_vars, region)\n', (13250, 13301), False, 'import est_dir\n'), ((13607, 13709), 'est_dir.backward_tracking', 'est_dir.backward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_back', 'back_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_back,\n back_tol, f, func_args)\n', (13632, 13709), False, 'import est_dir\n'), ((14135, 14157), 'numpy.random.seed', 'np.random.seed', (['(329998)'], {}), '(329998)\n', (14149, 14157), True, 'import numpy as np\n'), ((14252, 14282), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (14269, 14282), True, 'import numpy as np\n'), ((14303, 14333), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (14320, 14333), True, 'import numpy as np\n'), ((14348, 14382), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (14372, 14382), False, 'import est_dir\n'), ((14532, 14611), 'est_dir.compute_direction_XY', 'est_dir.compute_direction_XY', (['n', 'm', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(n, m, centre_point, f, func_args, no_vars, region)\n', (14560, 14611), False, 'import est_dir\n'), ((14917, 15019), 'est_dir.backward_tracking', 'est_dir.backward_tracking', (['centre_point', 't', 'f_old', 'f_new', 'beta', 'const_back', 'back_tol', 'f', 'func_args'], {}), '(centre_point, t, f_old, f_new, beta, const_back,\n back_tol, f, func_args)\n', (14942, 15019), False, 'import est_dir\n'), ((15349, 15373), 'numpy.array', 'np.array', (['[100, 200, 50]'], {}), '([100, 200, 50])\n', (15357, 15373), True, 'import numpy as np\n'), ((15389, 15410), 'numpy.array', 'np.array', (['[0, 1, 0.5]'], {}), '([0, 1, 0.5])\n', (15397, 15410), True, 'import numpy as np\n'), ((15987, 16027), 'est_dir.compute_coeffs', 'est_dir.compute_coeffs', (['track_y', 'track_t'], {}), '(track_y, track_t)\n', (16009, 16027), False, 'import est_dir\n'), ((16237, 16255), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (16251, 16255), True, 'import numpy as np\n'), ((16374, 16387), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (16381, 16387), True, 'import numpy as np\n'), ((16408, 16426), 'numpy.array', 'np.array', (['[20, 20]'], {}), '([20, 20])\n', (16416, 16426), True, 'import numpy as np\n'), ((16441, 16455), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (16452, 16455), True, 'import numpy as np\n'), ((16585, 16611), 'numpy.array', 'np.array', (['[0.0001, 0.0001]'], {}), '([0.0001, 0.0001])\n', (16593, 16611), True, 'import numpy as np\n'), ((16708, 16833), 'est_dir.combine_tracking', 'est_dir.combine_tracking', (['centre_point', 'f_old', 'beta', 'step', 'const_back', 'back_tol', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, f_old, beta, step, const_back,\n back_tol, const_forward, forward_tol, f, func_args)\n', (16732, 16833), False, 'import est_dir\n'), ((17329, 17349), 'numpy.random.seed', 'np.random.seed', (['(3291)'], {}), '(3291)\n', (17343, 17349), True, 'import numpy as np\n'), ((17468, 17481), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (17475, 17481), True, 'import numpy as np\n'), ((17502, 17520), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (17510, 17520), True, 'import numpy as np\n'), ((17535, 17569), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (17559, 17569), False, 'import est_dir\n'), ((17694, 17710), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (17702, 17710), True, 'import numpy as np\n'), ((17807, 17932), 'est_dir.combine_tracking', 'est_dir.combine_tracking', (['centre_point', 'f_old', 'beta', 'step', 'const_back', 'back_tol', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, f_old, beta, step, const_back,\n back_tol, const_forward, forward_tol, f, func_args)\n', (17831, 17932), False, 'import est_dir\n'), ((18421, 18442), 'numpy.random.seed', 'np.random.seed', (['(32964)'], {}), '(32964)\n', (18435, 18442), True, 'import numpy as np\n'), ((18561, 18574), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (18568, 18574), True, 'import numpy as np\n'), ((18595, 18613), 'numpy.array', 'np.array', (['[25, 25]'], {}), '([25, 25])\n', (18603, 18613), True, 'import numpy as np\n'), ((18628, 18662), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (18652, 18662), False, 'import est_dir\n'), ((18776, 18796), 'numpy.array', 'np.array', (['[200, 200]'], {}), '([200, 200])\n', (18784, 18796), True, 'import numpy as np\n'), ((18893, 19018), 'est_dir.combine_tracking', 'est_dir.combine_tracking', (['centre_point', 'f_old', 'beta', 'step', 'const_back', 'back_tol', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, f_old, beta, step, const_back,\n back_tol, const_forward, forward_tol, f, func_args)\n', (18917, 19018), False, 'import est_dir\n'), ((19512, 19533), 'numpy.random.seed', 'np.random.seed', (['(32964)'], {}), '(32964)\n', (19526, 19533), True, 'import numpy as np\n'), ((19663, 19676), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (19670, 19676), True, 'import numpy as np\n'), ((19697, 19727), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(m,)'], {}), '(0, 10, (m,))\n', (19714, 19727), True, 'import numpy as np\n'), ((19742, 19776), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (19766, 19776), False, 'import est_dir\n'), ((19947, 20026), 'est_dir.compute_direction_XY', 'est_dir.compute_direction_XY', (['n', 'm', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(n, m, centre_point, f, func_args, no_vars, region)\n', (19975, 20026), False, 'import est_dir\n'), ((20258, 20383), 'est_dir.combine_tracking', 'est_dir.combine_tracking', (['centre_point', 'f_old', 'beta', 'step', 'const_back', 'back_tol', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, f_old, beta, step, const_back,\n back_tol, const_forward, forward_tol, f, func_args)\n', (20282, 20383), False, 'import est_dir\n'), ((20798, 20865), 'numpy.array', 'np.array', (['[[0, 100], [1, 80], [2, 160], [4, 40], [8, 20], [16, 90]]'], {}), '([[0, 100], [1, 80], [2, 160], [4, 40], [8, 20], [16, 90]])\n', (20806, 20865), True, 'import numpy as np\n'), ((21035, 21081), 'est_dir.arrange_track_y_t', 'est_dir.arrange_track_y_t', (['track', 'track_method'], {}), '(track, track_method)\n', (21060, 21081), False, 'import est_dir\n'), ((21262, 21309), 'numpy.array', 'np.array', (['[[0, 100], [1, 80], [2, 70], [4, 90]]'], {}), '([[0, 100], [1, 80], [2, 70], [4, 90]])\n', (21270, 21309), True, 'import numpy as np\n'), ((21433, 21479), 'est_dir.arrange_track_y_t', 'est_dir.arrange_track_y_t', (['track', 'track_method'], {}), '(track, track_method)\n', (21458, 21479), False, 'import est_dir\n'), ((21659, 21713), 'numpy.array', 'np.array', (['[[0, 100], [1, 120], [0.5, 110], [0.25, 90]]'], {}), '([[0, 100], [1, 120], [0.5, 110], [0.25, 90]])\n', (21667, 21713), True, 'import numpy as np\n'), ((21838, 21884), 'est_dir.arrange_track_y_t', 'est_dir.arrange_track_y_t', (['track', 'track_method'], {}), '(track, track_method)\n', (21863, 21884), False, 'import est_dir\n'), ((22070, 22111), 'numpy.array', 'np.array', (['[[0, 100], [1, 120], [0.5, 80]]'], {}), '([[0, 100], [1, 120], [0.5, 80]])\n', (22078, 22111), True, 'import numpy as np\n'), ((22213, 22259), 'est_dir.arrange_track_y_t', 'est_dir.arrange_track_y_t', (['track', 'track_method'], {}), '(track, track_method)\n', (22238, 22259), False, 'import est_dir\n'), ((22466, 22484), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (22480, 22484), True, 'import numpy as np\n'), ((22544, 22557), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (22551, 22557), True, 'import numpy as np\n'), ((22578, 22608), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(m,)'], {}), '(0, 20, (m,))\n', (22595, 22608), True, 'import numpy as np\n'), ((22624, 22658), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (22648, 22658), False, 'import est_dir\n'), ((22792, 22868), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (22820, 22868), False, 'import est_dir\n'), ((23160, 23208), 'numpy.array', 'np.array', (['[[0, 100], [1, 160], [2, 40], [4, 90]]'], {}), '([[0, 100], [1, 160], [2, 40], [4, 90]])\n', (23168, 23208), True, 'import numpy as np\n'), ((23336, 23424), 'est_dir.check_func_val_coeffs', 'est_dir.check_func_val_coeffs', (['track', 'track_method', 'centre_point', 'beta', 'f', 'func_args'], {}), '(track, track_method, centre_point, beta, f,\n func_args)\n', (23365, 23424), False, 'import est_dir\n'), ((23642, 23660), 'numpy.random.seed', 'np.random.seed', (['(91)'], {}), '(91)\n', (23656, 23660), True, 'import numpy as np\n'), ((23780, 23793), 'numpy.ones', 'np.ones', (['(m,)'], {}), '((m,))\n', (23787, 23793), True, 'import numpy as np\n'), ((23814, 23844), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(m,)'], {}), '(0, 20, (m,))\n', (23831, 23844), True, 'import numpy as np\n'), ((23860, 23894), 'est_dir.quad_func_params', 'est_dir.quad_func_params', (['(1)', '(10)', 'm'], {}), '(1, 10, m)\n', (23884, 23894), False, 'import est_dir\n'), ((24040, 24116), 'est_dir.compute_direction_LS', 'est_dir.compute_direction_LS', (['m', 'centre_point', 'f', 'func_args', 'no_vars', 'region'], {}), '(m, centre_point, f, func_args, no_vars, region)\n', (24068, 24116), False, 'import est_dir\n'), ((24432, 24542), 'est_dir.forward_tracking', 'est_dir.forward_tracking', (['centre_point', 'step', 'f_old', 'f_new', 'beta', 'const_forward', 'forward_tol', 'f', 'func_args'], {}), '(centre_point, step, f_old, f_new, beta,\n const_forward, forward_tol, f, func_args)\n', (24456, 24542), False, 'import est_dir\n'), ((24775, 24863), 'est_dir.check_func_val_coeffs', 'est_dir.check_func_val_coeffs', (['track', 'track_method', 'centre_point', 'beta', 'f', 'func_args'], {}), '(track, track_method, centre_point, beta, f,\n func_args)\n', (24804, 24863), False, 'import est_dir\n'), ((24969, 25000), 'numpy.all', 'np.all', (['(func_val <= track[:, 1])'], {}), '(func_val <= track[:, 1])\n', (24975, 25000), True, 'import numpy as np\n'), ((775, 796), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (782, 796), True, 'import numpy as np\n'), ((2057, 2078), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (2064, 2078), True, 'import numpy as np\n'), ((3534, 3555), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (3541, 3555), True, 'import numpy as np\n'), ((3983, 4007), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (3991, 4007), True, 'import numpy as np\n'), ((4011, 4025), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (4019, 4025), True, 'import numpy as np\n'), ((4039, 4063), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (4047, 4063), True, 'import numpy as np\n'), ((4067, 4084), 'numpy.round', 'np.round', (['step', '(3)'], {}), '(step, 3)\n', (4075, 4084), True, 'import numpy as np\n'), ((4863, 4884), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (4870, 4884), True, 'import numpy as np\n'), ((5258, 5282), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (5266, 5282), True, 'import numpy as np\n'), ((5286, 5300), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (5294, 5300), True, 'import numpy as np\n'), ((5314, 5338), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (5322, 5338), True, 'import numpy as np\n'), ((5342, 5356), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (5350, 5356), True, 'import numpy as np\n'), ((6133, 6154), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (6140, 6154), True, 'import numpy as np\n'), ((6528, 6552), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (6536, 6552), True, 'import numpy as np\n'), ((6556, 6570), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (6564, 6570), True, 'import numpy as np\n'), ((6584, 6608), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (6592, 6608), True, 'import numpy as np\n'), ((6612, 6626), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (6620, 6626), True, 'import numpy as np\n'), ((7225, 7246), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (7232, 7246), True, 'import numpy as np\n'), ((8428, 8449), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (8435, 8449), True, 'import numpy as np\n'), ((9913, 9934), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (9920, 9934), True, 'import numpy as np\n'), ((10836, 10857), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (10843, 10857), True, 'import numpy as np\n'), ((12064, 12085), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (12071, 12085), True, 'import numpy as np\n'), ((12397, 12421), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (12405, 12421), True, 'import numpy as np\n'), ((12425, 12439), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (12433, 12439), True, 'import numpy as np\n'), ((12453, 12477), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (12461, 12477), True, 'import numpy as np\n'), ((12481, 12495), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (12489, 12495), True, 'import numpy as np\n'), ((12658, 12677), 'numpy.min', 'np.min', (['track[:, 1]'], {}), '(track[:, 1])\n', (12664, 12677), True, 'import numpy as np\n'), ((13452, 13473), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (13459, 13473), True, 'import numpy as np\n'), ((13785, 13809), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (13793, 13809), True, 'import numpy as np\n'), ((13813, 13827), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (13821, 13827), True, 'import numpy as np\n'), ((13841, 13865), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (13849, 13865), True, 'import numpy as np\n'), ((13869, 13883), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (13877, 13883), True, 'import numpy as np\n'), ((13931, 13950), 'numpy.min', 'np.min', (['track[:, 1]'], {}), '(track[:, 1])\n', (13937, 13950), True, 'import numpy as np\n'), ((14762, 14783), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (14769, 14783), True, 'import numpy as np\n'), ((15095, 15119), 'numpy.round', 'np.round', (['track[0][0]', '(3)'], {}), '(track[0][0], 3)\n', (15103, 15119), True, 'import numpy as np\n'), ((15123, 15137), 'numpy.round', 'np.round', (['(0)', '(3)'], {}), '(0, 3)\n', (15131, 15137), True, 'import numpy as np\n'), ((15151, 15175), 'numpy.round', 'np.round', (['track[1][0]', '(3)'], {}), '(track[1][0], 3)\n', (15159, 15175), True, 'import numpy as np\n'), ((15179, 15193), 'numpy.round', 'np.round', (['t', '(3)'], {}), '(t, 3)\n', (15187, 15193), True, 'import numpy as np\n'), ((15241, 15260), 'numpy.min', 'np.min', (['track[:, 1]'], {}), '(track[:, 1])\n', (15247, 15260), True, 'import numpy as np\n'), ((16627, 16648), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (16634, 16648), True, 'import numpy as np\n'), ((17726, 17747), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (17733, 17747), True, 'import numpy as np\n'), ((18812, 18833), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (18819, 18833), True, 'import numpy as np\n'), ((20177, 20198), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (20184, 20198), True, 'import numpy as np\n'), ((23021, 23042), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (23028, 23042), True, 'import numpy as np\n'), ((24268, 24289), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (24275, 24289), True, 'import numpy as np\n'), ((825, 846), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (832, 846), True, 'import numpy as np\n'), ((2107, 2128), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (2114, 2128), True, 'import numpy as np\n'), ((3584, 3605), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (3591, 3605), True, 'import numpy as np\n'), ((4189, 4213), 'numpy.round', 'np.round', (['track[j][0]', '(3)'], {}), '(track[j][0], 3)\n', (4197, 4213), True, 'import numpy as np\n'), ((4913, 4934), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (4920, 4934), True, 'import numpy as np\n'), ((6183, 6204), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (6190, 6204), True, 'import numpy as np\n'), ((7275, 7296), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (7282, 7296), True, 'import numpy as np\n'), ((8478, 8499), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (8485, 8499), True, 'import numpy as np\n'), ((9963, 9984), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (9970, 9984), True, 'import numpy as np\n'), ((10886, 10907), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (10893, 10907), True, 'import numpy as np\n'), ((12114, 12135), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (12121, 12135), True, 'import numpy as np\n'), ((12583, 12607), 'numpy.round', 'np.round', (['track[j][0]', '(4)'], {}), '(track[j][0], 4)\n', (12591, 12607), True, 'import numpy as np\n'), ((12611, 12625), 'numpy.round', 'np.round', (['t', '(4)'], {}), '(t, 4)\n', (12619, 12625), True, 'import numpy as np\n'), ((13502, 13523), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (13509, 13523), True, 'import numpy as np\n'), ((14812, 14833), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (14819, 14833), True, 'import numpy as np\n'), ((15652, 15673), 'numpy.array', 'np.array', (['[100, 0, 0]'], {}), '([100, 0, 0])\n', (15660, 15673), True, 'import numpy as np\n'), ((15723, 15744), 'numpy.array', 'np.array', (['[100, 1, 1]'], {}), '([100, 1, 1])\n', (15731, 15744), True, 'import numpy as np\n'), ((15794, 15820), 'numpy.array', 'np.array', (['[100, 0.5, 0.25]'], {}), '([100, 0.5, 0.25])\n', (15802, 15820), True, 'import numpy as np\n'), ((15835, 15891), 'numpy.linalg.inv', 'np.linalg.inv', (['(design_matrix_step.T @ design_matrix_step)'], {}), '(design_matrix_step.T @ design_matrix_step)\n', (15848, 15891), True, 'import numpy as np\n'), ((16047, 16065), 'numpy.round', 'np.round', (['check', '(5)'], {}), '(check, 5)\n', (16055, 16065), True, 'import numpy as np\n'), ((16069, 16087), 'numpy.round', 'np.round', (['opt_t', '(5)'], {}), '(opt_t, 5)\n', (16077, 16087), True, 'import numpy as np\n'), ((21112, 21135), 'numpy.array', 'np.array', (['[100, 20, 90]'], {}), '([100, 20, 90])\n', (21120, 21135), True, 'import numpy as np\n'), ((21168, 21188), 'numpy.array', 'np.array', (['[0, 8, 16]'], {}), '([0, 8, 16])\n', (21176, 21188), True, 'import numpy as np\n'), ((21510, 21533), 'numpy.array', 'np.array', (['[100, 70, 90]'], {}), '([100, 70, 90])\n', (21518, 21533), True, 'import numpy as np\n'), ((21566, 21585), 'numpy.array', 'np.array', (['[0, 2, 4]'], {}), '([0, 2, 4])\n', (21574, 21585), True, 'import numpy as np\n'), ((21915, 21939), 'numpy.array', 'np.array', (['[100, 90, 110]'], {}), '([100, 90, 110])\n', (21923, 21939), True, 'import numpy as np\n'), ((21972, 21996), 'numpy.array', 'np.array', (['[0, 0.25, 0.5]'], {}), '([0, 0.25, 0.5])\n', (21980, 21996), True, 'import numpy as np\n'), ((22290, 22314), 'numpy.array', 'np.array', (['[100, 80, 120]'], {}), '([100, 80, 120])\n', (22298, 22314), True, 'import numpy as np\n'), ((22347, 22368), 'numpy.array', 'np.array', (['[0, 0.5, 1]'], {}), '([0, 0.5, 1])\n', (22355, 22368), True, 'import numpy as np\n'), ((23071, 23092), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (23078, 23092), True, 'import numpy as np\n'), ((24318, 24339), 'numpy.copy', 'np.copy', (['centre_point'], {}), '(centre_point)\n', (24325, 24339), True, 'import numpy as np\n'), ((15522, 15539), 'numpy.array', 'np.array', (['track_t'], {}), '(track_t)\n', (15530, 15539), True, 'import numpy as np\n'), ((15578, 15595), 'numpy.array', 'np.array', (['track_t'], {}), '(track_t)\n', (15586, 15595), True, 'import numpy as np\n')]
|
"""
Notes
-----
This test and docs/source/usage/iss/iss_cli.sh test the same code paths and should be updated
together
"""
import os
import unittest
import numpy as np
import pandas as pd
import pytest
from starfish.test.full_pipelines.cli._base_cli_test import CLITest
from starfish.types import Features
EXPERIMENT_JSON_URL = "https://d2nhj9g34unfro.cloudfront.net/20181005/ISS-TEST/experiment.json"
@pytest.mark.slow
class TestWithIssData(CLITest, unittest.TestCase):
@property
def spots_file(self):
return "decoded-spots.nc"
@property
def subdirs(self):
return (
"max_projected",
"transforms",
"registered",
"filtered",
"results",
)
@property
def stages(self):
return (
[
"starfish", "validate", "experiment", EXPERIMENT_JSON_URL,
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][primary]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "max_projected", "primary_images.json"),
"MaxProj",
"--dims", "c",
"--dims", "z"
],
[
"starfish", "learn_transform",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "max_projected", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "transforms", "transforms.json"),
"Translation",
"--reference-stack",
f"@{EXPERIMENT_JSON_URL}[fov_001][dots]",
"--upsampling", "1000",
"--axes", "r"
],
[
"starfish", "apply_transform",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][primary]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "registered", "primary_images.json"),
"--transformation-list", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "transforms", "transforms.json"),
"Warp",
],
[
"starfish", "filter",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "registered", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][nuclei]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "nuclei.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][dots]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "dots.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "detect_spots",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc"),
"--blobs-stack", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "dots.json"),
"--blobs-axis", "r", "--blobs-axis", "c",
"BlobDetector",
"--min-sigma", "4",
"--max-sigma", "6",
"--num-sigma", "20",
"--threshold", "0.01",
],
[
"starfish", "segment",
"--primary-images", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"--nuclei", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "nuclei.json"),
"-o", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "label_image.png"),
"Watershed",
"--nuclei-threshold", ".16",
"--input-threshold", ".22",
"--min-distance", "57",
],
[
"starfish", "target_assignment",
"--label-image",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "label_image.png"),
"--intensities", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc"),
"Label",
],
[
"starfish", "decode",
"-i", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc"),
"--codebook",
f"@{EXPERIMENT_JSON_URL}",
"-o", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "decoded-spots.nc"),
"PerRoundMaxChannelDecoder",
],
# Validate results/{spots,targeted-spots,decoded-spots}.nc
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc")
],
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc")
],
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "decoded-spots.nc")
],
)
def verify_results(self, intensities):
# TODO make this test stronger
genes, counts = np.unique(
intensities.coords[Features.TARGET], return_counts=True)
gene_counts = pd.Series(counts, genes)
# TODO THERE"S NO HUMAN/MOUSE KEYS?
assert gene_counts['ACTB']
|
[
"os.path.join",
"pandas.Series",
"numpy.unique"
] |
[((6573, 6639), 'numpy.unique', 'np.unique', (['intensities.coords[Features.TARGET]'], {'return_counts': '(True)'}), '(intensities.coords[Features.TARGET], return_counts=True)\n', (6582, 6639), True, 'import numpy as np\n'), ((6675, 6699), 'pandas.Series', 'pd.Series', (['counts', 'genes'], {}), '(counts, genes)\n', (6684, 6699), True, 'import pandas as pd\n'), ((1104, 1165), 'os.path.join', 'os.path.join', (['tempdir', '"""max_projected"""', '"""primary_images.json"""'], {}), "(tempdir, 'max_projected', 'primary_images.json')\n", (1116, 1165), False, 'import os\n'), ((1413, 1474), 'os.path.join', 'os.path.join', (['tempdir', '"""max_projected"""', '"""primary_images.json"""'], {}), "(tempdir, 'max_projected', 'primary_images.json')\n", (1425, 1474), False, 'import os\n'), ((1558, 1612), 'os.path.join', 'os.path.join', (['tempdir', '"""transforms"""', '"""transforms.json"""'], {}), "(tempdir, 'transforms', 'transforms.json')\n", (1570, 1612), False, 'import os\n'), ((2056, 2114), 'os.path.join', 'os.path.join', (['tempdir', '"""registered"""', '"""primary_images.json"""'], {}), "(tempdir, 'registered', 'primary_images.json')\n", (2068, 2114), False, 'import os\n'), ((2211, 2265), 'os.path.join', 'os.path.join', (['tempdir', '"""transforms"""', '"""transforms.json"""'], {}), "(tempdir, 'transforms', 'transforms.json')\n", (2223, 2265), False, 'import os\n'), ((2439, 2497), 'os.path.join', 'os.path.join', (['tempdir', '"""registered"""', '"""primary_images.json"""'], {}), "(tempdir, 'registered', 'primary_images.json')\n", (2451, 2497), False, 'import os\n'), ((2581, 2637), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""primary_images.json"""'], {}), "(tempdir, 'filtered', 'primary_images.json')\n", (2593, 2637), False, 'import os\n'), ((2948, 2996), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""nuclei.json"""'], {}), "(tempdir, 'filtered', 'nuclei.json')\n", (2960, 2996), False, 'import os\n'), ((3305, 3351), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""dots.json"""'], {}), "(tempdir, 'filtered', 'dots.json')\n", (3317, 3351), False, 'import os\n'), ((3580, 3636), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""primary_images.json"""'], {}), "(tempdir, 'filtered', 'primary_images.json')\n", (3592, 3636), False, 'import os\n'), ((3720, 3764), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""spots.nc"""'], {}), "(tempdir, 'results', 'spots.nc')\n", (3732, 3764), False, 'import os\n'), ((3853, 3899), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""dots.json"""'], {}), "(tempdir, 'filtered', 'dots.json')\n", (3865, 3899), False, 'import os\n'), ((4297, 4353), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""primary_images.json"""'], {}), "(tempdir, 'filtered', 'primary_images.json')\n", (4309, 4353), False, 'import os\n'), ((4437, 4485), 'os.path.join', 'os.path.join', (['tempdir', '"""filtered"""', '"""nuclei.json"""'], {}), "(tempdir, 'filtered', 'nuclei.json')\n", (4449, 4485), False, 'import os\n'), ((4563, 4614), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""label_image.png"""'], {}), "(tempdir, 'results', 'label_image.png')\n", (4575, 4614), False, 'import os\n'), ((4955, 5006), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""label_image.png"""'], {}), "(tempdir, 'results', 'label_image.png')\n", (4967, 5006), False, 'import os\n'), ((5095, 5139), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""spots.nc"""'], {}), "(tempdir, 'results', 'spots.nc')\n", (5107, 5139), False, 'import os\n'), ((5223, 5276), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""targeted-spots.nc"""'], {}), "(tempdir, 'results', 'targeted-spots.nc')\n", (5235, 5276), False, 'import os\n'), ((5446, 5499), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""targeted-spots.nc"""'], {}), "(tempdir, 'results', 'targeted-spots.nc')\n", (5458, 5499), False, 'import os\n'), ((5650, 5702), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""decoded-spots.nc"""'], {}), "(tempdir, 'results', 'decoded-spots.nc')\n", (5662, 5702), False, 'import os\n'), ((5970, 6014), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""spots.nc"""'], {}), "(tempdir, 'results', 'spots.nc')\n", (5982, 6014), False, 'import os\n'), ((6164, 6217), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""targeted-spots.nc"""'], {}), "(tempdir, 'results', 'targeted-spots.nc')\n", (6176, 6217), False, 'import os\n'), ((6367, 6419), 'os.path.join', 'os.path.join', (['tempdir', '"""results"""', '"""decoded-spots.nc"""'], {}), "(tempdir, 'results', 'decoded-spots.nc')\n", (6379, 6419), False, 'import os\n')]
|
from mayavi import mlab as mayalab
import numpy as np
import os
def plot_pc(pcs,color=None,scale_factor=.05,mode='point'):
if color == 'red':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(1,0,0))
print("color",color)
elif color == 'blue':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(0,0,1))
elif color == 'green':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(0,1,0))
elif color == 'ycan':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(0,1,1))
else:
print("unkown color")
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=color)
def plot_pc_with_normal(pcs,pcs_n,scale_factor=1.0,color='red'):
if color == 'red':
mayalab.quiver3d(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1], pcs_n[:, 2], color=(1,0,0), mode='arrow',scale_factor=1.0)
elif color == 'blue':
mayalab.quiver3d(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1], pcs_n[:, 2], color=(0,0,1), mode='arrow',scale_factor=1.0)
elif color == 'green':
mayalab.quiver3d(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1], pcs_n[:, 2], color=(0,1,0), mode='arrow',scale_factor=1.0)
def plot_origin():
origin_pc = np.array([0.0,0.0,0.0]).reshape((-1,3))
plot_pc(origin_pc,color='ycan',mode='sphere',scale_factor=.01)
origin_pcs = np.tile(origin_pc,(3,1))
origin_pcns = np.eye(3) * 0.01
plot_pc_with_normal(origin_pcs,origin_pcns)
if __name__ == '__main__':
#save_dir = '/home/lins/MetaGrasp/Data/BlensorResult/2056'
#gripper_name = '056_rho0.384015_azi1.000000_ele89.505854_theta0.092894_xcam0.000000_ycam0.000000_zcam0.384015_scale0.146439_xdim0.084960_ydim0.084567_zdim0.08411000000_pcn_new.npz.npy'
#gripper_name ='339_rho0.308024_azi6.000000_ele89.850030_theta-0.013403_xcam0.000000_ycam0.000000_zcam0.308024_scale0.061975_xdim0.048725_ydim0.036192_zdim0.01252500000_pcn.npz'
gripper = np.load(os.path.join("robotiq2f_open.npy"))
#plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.002)
plot_pc(gripper,color=(209/255.0,64/255.0,109/255.0),mode='sphere',scale_factor=0.002)
plot_origin()
mayalab.show()
#sle = np.array([1494,1806])
#plot_pc(gripper[sle],color='red',mode='sphere',scale_factor=0.002)
#mayalab.show()
#save_dir = '/home/lins/MetaGrasp/meta_grasping/saved_results/interp'
#save_dir = '/home/lins/MetaGrasp/Data/Gripper/Data3'
# #save_dir_gt = '/home/lins/MetaGrasp/Data/Gripper/Data'
save_dir = '/home/lins/MetaGrasp/Data/Gripper/Data_DB/G5/f2_5_close.npy'
a = np.load(save_dir)
plot_pc(a)
save_dirb = '/home/lins/MetaGrasp/Data/Gripper/Data_DB/G3/f2_3_close.npy'
b = np.load(save_dirb)
plot_pc(b,color='red')
mayalab.show()
#for i in range(10001,10300):
# gripper_name = 'f2_'+str(i)+'_middel.npy'
#print(gripper_name)
# gripper = np.load(os.path.join(save_dir,gripper_name))
# plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.002)
# plot_origin()
# mayalab.show()
#save_dir_gt = '/home/lins/MetaGrasp/Data/Gripper/Data'
#gripper_gt = np.load(os.path.join(save_dir_gt,gripper_name))
#plot_pc(gripper_gt,color='red',mode='sphere',scale_factor=0.002)
if 0:
for i in range(0,199):
save_dir = '/home/lins/MetaGrasp/Data/Gripper/Data_noR'
#save_dir = '/home/lins/MetaGrasp/meta_grasping/saved_results/recon_old'
gripper_name = 'robotiq_3f_'+str(i)+'.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
if 0:
save_dir = '/home/lins/MetaGrasp/meta_grasping/saved_results/interp'
gripper_name = 'kinova_kg3_0.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
gripper_name = 'robotiq_3f_1.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
save_dir = '/home/lins/MetaGrasp/meta_grasping/saved_results/interp'
gripper_name = 'middle0.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
gripper_name = 'middle1.npy'
print(gripper_name)
gripper = np.load(os.path.join(save_dir,gripper_name))
plot_pc(gripper,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
save_dir = '/home/lins/MetaGrasp/Data/Gripper/Data_noR'
gripper_name1 = 'kinova_kg3_0.npy'
print(gripper_name)
gripper1 = np.load(os.path.join(save_dir,gripper_name1))
plot_pc(gripper1,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
gripper_name2 = 'robotiq_3f_1.npy'
print(gripper_name)
gripper2 = np.load(os.path.join(save_dir,gripper_name2))
plot_pc(gripper2,color=(139/255.0,177/255.0,212/255.0),mode='sphere',scale_factor=0.01)
plot_origin()
mayalab.show()
|
[
"numpy.load",
"mayavi.mlab.quiver3d",
"mayavi.mlab.show",
"mayavi.mlab.points3d",
"numpy.array",
"numpy.tile",
"numpy.eye",
"os.path.join"
] |
[((1488, 1514), 'numpy.tile', 'np.tile', (['origin_pc', '(3, 1)'], {}), '(origin_pc, (3, 1))\n', (1495, 1514), True, 'import numpy as np\n'), ((2297, 2311), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (2309, 2311), True, 'from mayavi import mlab as mayalab\n'), ((2695, 2712), 'numpy.load', 'np.load', (['save_dir'], {}), '(save_dir)\n', (2702, 2712), True, 'import numpy as np\n'), ((2805, 2823), 'numpy.load', 'np.load', (['save_dirb'], {}), '(save_dirb)\n', (2812, 2823), True, 'import numpy as np\n'), ((2849, 2863), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (2861, 2863), True, 'from mayavi import mlab as mayalab\n'), ((150, 259), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(1, 0, 0)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(1, 0, 0))\n', (166, 259), True, 'from mayavi import mlab as mayalab\n'), ((867, 1008), 'mayavi.mlab.quiver3d', 'mayalab.quiver3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]', 'pcs_n[:, 0]', 'pcs_n[:, 1]', 'pcs_n[:, 2]'], {'color': '(1, 0, 0)', 'mode': '"""arrow"""', 'scale_factor': '(1.0)'}), "(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1],\n pcs_n[:, 2], color=(1, 0, 0), mode='arrow', scale_factor=1.0)\n", (883, 1008), True, 'from mayavi import mlab as mayalab\n'), ((1529, 1538), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1535, 1538), True, 'import numpy as np\n'), ((2067, 2101), 'os.path.join', 'os.path.join', (['"""robotiq2f_open.npy"""'], {}), "('robotiq2f_open.npy')\n", (2079, 2101), False, 'import os\n'), ((4062, 4076), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (4074, 4076), True, 'from mayavi import mlab as mayalab\n'), ((4302, 4316), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (4314, 4316), True, 'from mayavi import mlab as mayalab\n'), ((4607, 4621), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (4619, 4621), True, 'from mayavi import mlab as mayalab\n'), ((4842, 4856), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (4854, 4856), True, 'from mayavi import mlab as mayalab\n'), ((5143, 5157), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (5155, 5157), True, 'from mayavi import mlab as mayalab\n'), ((5387, 5401), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (5399, 5401), True, 'from mayavi import mlab as mayalab\n'), ((298, 407), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(0, 0, 1)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(0, 0, 1))\n', (314, 407), True, 'from mayavi import mlab as mayalab\n'), ((1032, 1173), 'mayavi.mlab.quiver3d', 'mayalab.quiver3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]', 'pcs_n[:, 0]', 'pcs_n[:, 1]', 'pcs_n[:, 2]'], {'color': '(0, 0, 1)', 'mode': '"""arrow"""', 'scale_factor': '(1.0)'}), "(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1],\n pcs_n[:, 2], color=(0, 0, 1), mode='arrow', scale_factor=1.0)\n", (1048, 1173), True, 'from mayavi import mlab as mayalab\n'), ((1368, 1393), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1376, 1393), True, 'import numpy as np\n'), ((3742, 3756), 'mayavi.mlab.show', 'mayalab.show', ([], {}), '()\n', (3754, 3756), True, 'from mayavi import mlab as mayalab\n'), ((3917, 3953), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (3929, 3953), False, 'import os\n'), ((4158, 4194), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (4170, 4194), False, 'import os\n'), ((4462, 4498), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (4474, 4498), False, 'import os\n'), ((4698, 4734), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (4710, 4734), False, 'import os\n'), ((4997, 5034), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name1'], {}), '(save_dir, gripper_name1)\n', (5009, 5034), False, 'import os\n'), ((5240, 5277), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name2'], {}), '(save_dir, gripper_name2)\n', (5252, 5277), False, 'import os\n'), ((422, 531), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(0, 1, 0)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(0, 1, 0))\n', (438, 531), True, 'from mayavi import mlab as mayalab\n'), ((1198, 1339), 'mayavi.mlab.quiver3d', 'mayalab.quiver3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]', 'pcs_n[:, 0]', 'pcs_n[:, 1]', 'pcs_n[:, 2]'], {'color': '(0, 1, 0)', 'mode': '"""arrow"""', 'scale_factor': '(1.0)'}), "(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1],\n pcs_n[:, 2], color=(0, 1, 0), mode='arrow', scale_factor=1.0)\n", (1214, 1339), True, 'from mayavi import mlab as mayalab\n'), ((3594, 3630), 'os.path.join', 'os.path.join', (['save_dir', 'gripper_name'], {}), '(save_dir, gripper_name)\n', (3606, 3630), False, 'import os\n'), ((545, 654), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(0, 1, 1)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(0, 1, 1))\n', (561, 654), True, 'from mayavi import mlab as mayalab\n'), ((678, 783), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': 'color'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=color)\n', (694, 783), True, 'from mayavi import mlab as mayalab\n')]
|
import numpy as np
import torch
from agent.heuristics.util import get_agent_turn, wrapper, get_days, \
get_recent_byr_offers, get_last_norm
from agent.const import DELTA_SLR, NUM_COMMON_CONS
class HeuristicSlr:
def __init__(self, delta=None):
self.patient = np.isclose(delta, DELTA_SLR[-1])
def __call__(self, observation=None):
# noinspection PyProtectedMember
x = observation._asdict()
# turn number
turn = get_agent_turn(x=x, byr=False)
# index of action
f = wrapper(turn)
if turn == 2:
days = get_days(x=x, turn=turn)
tau = 5.05 if self.patient else 3.03
idx = f(0) if days <= tau else f(1)
elif turn == 4:
if self.patient:
days = get_days(x=x, turn=turn)
idx = f(0) if days <= 2.01 else f(.5)
else:
num_offers = get_recent_byr_offers(x=x, turn=turn)
idx = f(1) if num_offers <= .5 else f(0)
elif turn == 6:
if self.patient:
days4 = get_days(x=x, turn=4)
if days4 <= 2.01:
days6 = get_days(x=x, turn=6)
idx = f(0) if days6 <= 2.04 else f(1)
else:
norm = get_last_norm(x=x, turn=turn)
idx = f(.5) if norm <= .67 else f(1)
else:
idx = f(0)
else:
raise ValueError('Invalid turn: {}'.format(turn))
# deterministic categorical action distribution
pdf = torch.zeros(NUM_COMMON_CONS + 3, dtype=torch.float)
pdf[idx] = 1.
return pdf
|
[
"agent.heuristics.util.get_agent_turn",
"agent.heuristics.util.get_days",
"agent.heuristics.util.get_last_norm",
"numpy.isclose",
"torch.zeros",
"agent.heuristics.util.wrapper",
"agent.heuristics.util.get_recent_byr_offers"
] |
[((276, 308), 'numpy.isclose', 'np.isclose', (['delta', 'DELTA_SLR[-1]'], {}), '(delta, DELTA_SLR[-1])\n', (286, 308), True, 'import numpy as np\n'), ((465, 495), 'agent.heuristics.util.get_agent_turn', 'get_agent_turn', ([], {'x': 'x', 'byr': '(False)'}), '(x=x, byr=False)\n', (479, 495), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((535, 548), 'agent.heuristics.util.wrapper', 'wrapper', (['turn'], {}), '(turn)\n', (542, 548), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((1580, 1631), 'torch.zeros', 'torch.zeros', (['(NUM_COMMON_CONS + 3)'], {'dtype': 'torch.float'}), '(NUM_COMMON_CONS + 3, dtype=torch.float)\n', (1591, 1631), False, 'import torch\n'), ((590, 614), 'agent.heuristics.util.get_days', 'get_days', ([], {'x': 'x', 'turn': 'turn'}), '(x=x, turn=turn)\n', (598, 614), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((789, 813), 'agent.heuristics.util.get_days', 'get_days', ([], {'x': 'x', 'turn': 'turn'}), '(x=x, turn=turn)\n', (797, 813), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((915, 952), 'agent.heuristics.util.get_recent_byr_offers', 'get_recent_byr_offers', ([], {'x': 'x', 'turn': 'turn'}), '(x=x, turn=turn)\n', (936, 952), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((1088, 1109), 'agent.heuristics.util.get_days', 'get_days', ([], {'x': 'x', 'turn': '(4)'}), '(x=x, turn=4)\n', (1096, 1109), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((1172, 1193), 'agent.heuristics.util.get_days', 'get_days', ([], {'x': 'x', 'turn': '(6)'}), '(x=x, turn=6)\n', (1180, 1193), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n'), ((1301, 1330), 'agent.heuristics.util.get_last_norm', 'get_last_norm', ([], {'x': 'x', 'turn': 'turn'}), '(x=x, turn=turn)\n', (1314, 1330), False, 'from agent.heuristics.util import get_agent_turn, wrapper, get_days, get_recent_byr_offers, get_last_norm\n')]
|
from abc import abstractmethod
from numpy import random
from rec.base import ParametrizedObject
from rec.dataset.dataset import Dataset
class DatasetSplitter(ParametrizedObject):
@abstractmethod
def split(self, dataset):
assert isinstance(dataset, Dataset)
pass
def _prepare_target_datasets(self, dataset):
train = Dataset(dataset.name)
test = Dataset(dataset.name)
train.items = dataset.items
test.items = dataset.items
return train, test
class IdentitySplitter(DatasetSplitter):
"""
Do not split dataset at all.
It returns for both, train and test, the same object.
This implementation is mainly for testing purpose.
It shouldn't be used in a real-life training schedule.
"""
def split(self, dataset):
return dataset, dataset
class PreciseUserNumberDatasetSplitter(DatasetSplitter):
def __init__(self, train_size=0, test_size=0):
super(PreciseUserNumberDatasetSplitter, self).__init__()
self.train_size = train_size
self.test_size = test_size
def split(self, dataset):
super(PreciseUserNumberDatasetSplitter, self).split(dataset)
train, test = self._prepare_target_datasets(dataset)
n = 0
for u, u_sessions in list(dataset.sessions.items()):
if n <= self.train_size:
train.sessions[u] = u_sessions
elif n <= self.train_size + self.test_size:
test.sessions[u] = u_sessions
else:
break
n += len(u_sessions)
train._create_indexes()
test._create_indexes()
return train, test
class RandomSessionSplitter(DatasetSplitter):
def __init__(self, train_ratio=0.7):
super(RandomSessionSplitter, self).__init__()
self.test_ratio = train_ratio
def split(self, dataset):
super(RandomSessionSplitter, self).split(dataset)
train, test = self._prepare_target_datasets(dataset)
test_session_num = self.test_ratio * dataset.sessions_num()
user_session_ids = []
for u, u_sessions in list(dataset.sessions.items()):
for sid in u_sessions.keys():
user_session_ids.append((u, sid))
random.shuffle(user_session_ids)
for n in range(len(user_session_ids)):
u, sid = user_session_ids[n]
out_dataset = train if n <= test_session_num else test
out_dataset.sessions[u][sid] = dataset.sessions[u][sid]
train._create_indexes()
test._create_indexes()
return train, test
class TimestampSessionSplitter(DatasetSplitter):
def __init__(self, split_sec=24 * 60 * 60):
super(TimestampSessionSplitter, self).__init__()
self.split_sec = split_sec
def split(self, dataset):
super(TimestampSessionSplitter, self).split(dataset)
train, test = self._prepare_target_datasets(dataset)
max_ts = self._get_max_timestamp(dataset)
threshold = max_ts - self.split_sec
for u, u_sessions in list(dataset.sessions.items()):
for sid, session in list(u_sessions.items()):
out_dataset = train if session.timestamp_end < threshold else test
out_dataset.sessions[u][sid] = dataset.sessions[u][sid]
train._create_indexes()
test._create_indexes()
return train, test
def _get_max_timestamp(self, dataset):
max_ts = 0
for u, u_sessions in list(dataset.sessions.items()):
for sid, session in list(u_sessions.items()):
if session.timestamp_end > max_ts:
max_ts = session.timestamp_end
return max_ts
class LastNPercentOfSessionsInDataset(DatasetSplitter):
def __init__(self, split_percent=.05):
self.split_percent = split_percent
def split(self, dataset):
all_sessions = dataset.all_sessions_list()
sorted(all_sessions, key=lambda s: s.timestamp_start)
split_num = len(all_sessions) * self.split_percent
train, test = self._prepare_target_datasets(dataset)
# iterate from last event till split is filled
for s in reversed(all_sessions):
out_dataset = train
if split_num > 0:
split_num -= 1
out_dataset = test
out_dataset.sessions[s.user_id][s.id] = s
train._create_indexes()
test._create_indexes()
return train, test
|
[
"rec.dataset.dataset.Dataset",
"numpy.random.shuffle"
] |
[((356, 377), 'rec.dataset.dataset.Dataset', 'Dataset', (['dataset.name'], {}), '(dataset.name)\n', (363, 377), False, 'from rec.dataset.dataset import Dataset\n'), ((393, 414), 'rec.dataset.dataset.Dataset', 'Dataset', (['dataset.name'], {}), '(dataset.name)\n', (400, 414), False, 'from rec.dataset.dataset import Dataset\n'), ((2267, 2299), 'numpy.random.shuffle', 'random.shuffle', (['user_session_ids'], {}), '(user_session_ids)\n', (2281, 2299), False, 'from numpy import random\n')]
|
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.model_selection import train_test_split
def down_scale(x, scale=2):
# order 2 -> order 4
h = int(np.sqrt(x.shape[1]))
img = x.astype("float32").reshape(x.shape[0], h, h, 1)
scaled_img = tf.nn.avg_pool(img, ksize=[1, scale, scale, 1],
strides=[1, scale, scale, 1],
padding='VALID')
h //= scale
return tf.reshape(scaled_img, [x.shape[0], h ** 2])
def quantize(x):
phi = tf.concat(
[tf.expand_dims(tf.cos(x) * np.pi/2, 2),
tf.expand_dims(tf.sin(x) * np.pi/2, 2)], 2)
return phi
def load_mnist(one_hot=True, random_state=42):
mnist = input_data.read_data_sets('MNIST_data/', one_hot=one_hot)
mnist_X = np.concatenate((mnist.train.images, mnist.test.images), axis=0)
mnist_y = np.concatenate((mnist.train.labels, mnist.test.labels), axis=0)
return train_test_split(mnist_X, mnist_y, test_size=0.2,
random_state=random_state)
|
[
"tensorflow.sin",
"sklearn.model_selection.train_test_split",
"tensorflow.reshape",
"tensorflow.nn.avg_pool",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.cos",
"numpy.concatenate",
"numpy.sqrt"
] |
[((319, 418), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['img'], {'ksize': '[1, scale, scale, 1]', 'strides': '[1, scale, scale, 1]', 'padding': '"""VALID"""'}), "(img, ksize=[1, scale, scale, 1], strides=[1, scale, scale, 1\n ], padding='VALID')\n", (333, 418), True, 'import tensorflow as tf\n'), ((506, 550), 'tensorflow.reshape', 'tf.reshape', (['scaled_img', '[x.shape[0], h ** 2]'], {}), '(scaled_img, [x.shape[0], h ** 2])\n', (516, 550), True, 'import tensorflow as tf\n'), ((770, 827), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data/"""'], {'one_hot': 'one_hot'}), "('MNIST_data/', one_hot=one_hot)\n", (795, 827), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((842, 905), 'numpy.concatenate', 'np.concatenate', (['(mnist.train.images, mnist.test.images)'], {'axis': '(0)'}), '((mnist.train.images, mnist.test.images), axis=0)\n', (856, 905), True, 'import numpy as np\n'), ((920, 983), 'numpy.concatenate', 'np.concatenate', (['(mnist.train.labels, mnist.test.labels)'], {'axis': '(0)'}), '((mnist.train.labels, mnist.test.labels), axis=0)\n', (934, 983), True, 'import numpy as np\n'), ((996, 1072), 'sklearn.model_selection.train_test_split', 'train_test_split', (['mnist_X', 'mnist_y'], {'test_size': '(0.2)', 'random_state': 'random_state'}), '(mnist_X, mnist_y, test_size=0.2, random_state=random_state)\n', (1012, 1072), False, 'from sklearn.model_selection import train_test_split\n'), ((222, 241), 'numpy.sqrt', 'np.sqrt', (['x.shape[1]'], {}), '(x.shape[1])\n', (229, 241), True, 'import numpy as np\n'), ((615, 624), 'tensorflow.cos', 'tf.cos', (['x'], {}), '(x)\n', (621, 624), True, 'import tensorflow as tf\n'), ((664, 673), 'tensorflow.sin', 'tf.sin', (['x'], {}), '(x)\n', (670, 673), True, 'import tensorflow as tf\n')]
|
"""
This is a pseudo-public API for downstream libraries. We ask that downstream
authors
1) Try to avoid using internals directly altogether, and failing that,
2) Use only functions exposed here (or in core.internals)
"""
from __future__ import annotations
from collections import defaultdict
from typing import DefaultDict
import numpy as np
from pandas._libs.internals import BlockPlacement
from pandas._typing import (
ArrayLike,
Dtype,
)
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
pandas_dtype,
)
from pandas.core.arrays import DatetimeArray
from pandas.core.construction import extract_array
from pandas.core.indexes.api import Index
from pandas.core.internals.blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
check_ndim,
ensure_block_shape,
extract_pandas_array,
get_block_type,
maybe_coerce_values,
new_block,
)
from pandas.core.internals.managers import (
BlockManager,
construction_error,
multi_blockify,
simple_blockify,
)
def make_block(
values, placement, klass=None, ndim=None, dtype: Dtype | None = None
) -> Block:
"""
This is a pseudo-public analogue to blocks.new_block.
We ask that downstream libraries use this rather than any fully-internal
APIs, including but not limited to:
- core.internals.blocks.make_block
- Block.make_block
- Block.make_block_same_class
- Block.__init__
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
values, dtype = extract_pandas_array(values, dtype, ndim)
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(values, dtype)
elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype):
# pyarrow calls get here
values = DatetimeArray._simple_new(values, dtype=dtype)
if not isinstance(placement, BlockPlacement):
placement = BlockPlacement(placement)
ndim = maybe_infer_ndim(values, placement, ndim)
if is_datetime64tz_dtype(values.dtype):
# GH#41168 ensure we can pass 1D dt64tz values
values = extract_array(values, extract_numpy=True)
values = ensure_block_shape(values, ndim)
check_ndim(values, placement, ndim)
values = maybe_coerce_values(values)
return klass(values, ndim=ndim, placement=placement)
def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:
"""
If `ndim` is not provided, infer it from placment and values.
"""
if ndim is None:
# GH#38134 Block constructor now assumes ndim is not None
if not isinstance(values.dtype, np.dtype):
if len(placement) != 1:
ndim = 1
else:
ndim = 2
else:
ndim = values.ndim
return ndim
def create_block_manager_from_arrays(
arrays,
names: Index,
axes: list[Index],
consolidate: bool = True,
) -> BlockManager:
# Assertions disabled for performance
# assert isinstance(names, Index)
# assert isinstance(axes, list)
# assert all(isinstance(x, Index) for x in axes)
arrays = [extract_array(x, extract_numpy=True) for x in arrays]
try:
blocks = _form_blocks(arrays, names, axes, consolidate)
mgr = BlockManager(blocks, axes)
except ValueError as e:
raise construction_error(len(arrays), arrays[0].shape, axes, e)
if consolidate:
mgr._consolidate_inplace()
return mgr
def _form_blocks(
arrays: list[ArrayLike], names: Index, axes: list[Index], consolidate: bool
) -> list[Block]:
# put "leftover" items in float bucket, where else?
# generalize?
items_dict: DefaultDict[str, list] = defaultdict(list)
extra_locs = []
names_idx = names
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
# Assertion disabled for performance
# assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
v = arrays[name_idx]
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, v))
blocks: list[Block] = []
if len(items_dict["NumericBlock"]):
numeric_blocks = multi_blockify(
items_dict["NumericBlock"], consolidate=consolidate
)
blocks.extend(numeric_blocks)
if len(items_dict["DatetimeLikeBlock"]):
dtlike_blocks = multi_blockify(
items_dict["DatetimeLikeBlock"], consolidate=consolidate
)
blocks.extend(dtlike_blocks)
if len(items_dict["DatetimeTZBlock"]):
dttz_blocks = [
DatetimeTZBlock(
ensure_block_shape(extract_array(array), 2),
placement=BlockPlacement(i),
ndim=2,
)
for i, array in items_dict["DatetimeTZBlock"]
]
blocks.extend(dttz_blocks)
if len(items_dict["ObjectBlock"]) > 0:
object_blocks = simple_blockify(
items_dict["ObjectBlock"], np.object_, consolidate=consolidate
)
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
cat_blocks = [
CategoricalBlock(array, placement=BlockPlacement(i), ndim=2)
for i, array in items_dict["CategoricalBlock"]
]
blocks.extend(cat_blocks)
if len(items_dict["ExtensionBlock"]):
external_blocks = [
ExtensionBlock(array, placement=BlockPlacement(i), ndim=2)
for i, array in items_dict["ExtensionBlock"]
]
blocks.extend(external_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = new_block(block_values, placement=extra_locs, ndim=2)
blocks.append(na_block)
return blocks
|
[
"pandas.core.internals.blocks.extract_pandas_array",
"pandas.core.dtypes.common.pandas_dtype",
"numpy.empty",
"pandas.core.internals.blocks.new_block",
"pandas.core.internals.managers.simple_blockify",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.internals.managers.BlockManager",
"collections.defaultdict",
"pandas.core.internals.managers.multi_blockify",
"pandas.core.internals.blocks.ensure_block_shape",
"pandas.core.internals.blocks.maybe_coerce_values",
"pandas.core.arrays.DatetimeArray._simple_new",
"pandas.core.internals.blocks.check_ndim",
"pandas.core.internals.blocks.get_block_type",
"pandas.core.construction.extract_array",
"pandas._libs.internals.BlockPlacement"
] |
[((1554, 1595), 'pandas.core.internals.blocks.extract_pandas_array', 'extract_pandas_array', (['values', 'dtype', 'ndim'], {}), '(values, dtype, ndim)\n', (1574, 1595), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((2038, 2073), 'pandas.core.dtypes.common.is_datetime64tz_dtype', 'is_datetime64tz_dtype', (['values.dtype'], {}), '(values.dtype)\n', (2059, 2073), False, 'from pandas.core.dtypes.common import is_datetime64tz_dtype, pandas_dtype\n'), ((2244, 2279), 'pandas.core.internals.blocks.check_ndim', 'check_ndim', (['values', 'placement', 'ndim'], {}), '(values, placement, ndim)\n', (2254, 2279), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((2293, 2320), 'pandas.core.internals.blocks.maybe_coerce_values', 'maybe_coerce_values', (['values'], {}), '(values)\n', (2312, 2320), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((3745, 3762), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3756, 3762), False, 'from collections import defaultdict\n'), ((1513, 1532), 'pandas.core.dtypes.common.pandas_dtype', 'pandas_dtype', (['dtype'], {}), '(dtype)\n', (1525, 1532), False, 'from pandas.core.dtypes.common import is_datetime64tz_dtype, pandas_dtype\n'), ((1673, 1702), 'pandas.core.internals.blocks.get_block_type', 'get_block_type', (['values', 'dtype'], {}), '(values, dtype)\n', (1687, 1702), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((1951, 1976), 'pandas._libs.internals.BlockPlacement', 'BlockPlacement', (['placement'], {}), '(placement)\n', (1965, 1976), False, 'from pandas._libs.internals import BlockPlacement\n'), ((2147, 2188), 'pandas.core.construction.extract_array', 'extract_array', (['values'], {'extract_numpy': '(True)'}), '(values, extract_numpy=True)\n', (2160, 2188), False, 'from pandas.core.construction import extract_array\n'), ((2206, 2238), 'pandas.core.internals.blocks.ensure_block_shape', 'ensure_block_shape', (['values', 'ndim'], {}), '(values, ndim)\n', (2224, 2238), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((3173, 3209), 'pandas.core.construction.extract_array', 'extract_array', (['x'], {'extract_numpy': '(True)'}), '(x, extract_numpy=True)\n', (3186, 3209), False, 'from pandas.core.construction import extract_array\n'), ((3315, 3341), 'pandas.core.internals.managers.BlockManager', 'BlockManager', (['blocks', 'axes'], {}), '(blocks, axes)\n', (3327, 3341), False, 'from pandas.core.internals.managers import BlockManager, construction_error, multi_blockify, simple_blockify\n'), ((4246, 4263), 'pandas.core.internals.blocks.get_block_type', 'get_block_type', (['v'], {}), '(v)\n', (4260, 4263), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((4414, 4481), 'pandas.core.internals.managers.multi_blockify', 'multi_blockify', (["items_dict['NumericBlock']"], {'consolidate': 'consolidate'}), "(items_dict['NumericBlock'], consolidate=consolidate)\n", (4428, 4481), False, 'from pandas.core.internals.managers import BlockManager, construction_error, multi_blockify, simple_blockify\n'), ((4612, 4684), 'pandas.core.internals.managers.multi_blockify', 'multi_blockify', (["items_dict['DatetimeLikeBlock']"], {'consolidate': 'consolidate'}), "(items_dict['DatetimeLikeBlock'], consolidate=consolidate)\n", (4626, 4684), False, 'from pandas.core.internals.managers import BlockManager, construction_error, multi_blockify, simple_blockify\n'), ((5156, 5235), 'pandas.core.internals.managers.simple_blockify', 'simple_blockify', (["items_dict['ObjectBlock']", 'np.object_'], {'consolidate': 'consolidate'}), "(items_dict['ObjectBlock'], np.object_, consolidate=consolidate)\n", (5171, 5235), False, 'from pandas.core.internals.managers import BlockManager, construction_error, multi_blockify, simple_blockify\n'), ((5948, 5977), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'object'}), '(shape, dtype=object)\n', (5956, 5977), True, 'import numpy as np\n'), ((6032, 6085), 'pandas.core.internals.blocks.new_block', 'new_block', (['block_values'], {'placement': 'extra_locs', 'ndim': '(2)'}), '(block_values, placement=extra_locs, ndim=2)\n', (6041, 6085), False, 'from pandas.core.internals.blocks import Block, CategoricalBlock, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, new_block\n'), ((1833, 1879), 'pandas.core.arrays.DatetimeArray._simple_new', 'DatetimeArray._simple_new', (['values'], {'dtype': 'dtype'}), '(values, dtype=dtype)\n', (1858, 1879), False, 'from pandas.core.arrays import DatetimeArray\n'), ((1746, 1781), 'pandas.core.dtypes.common.is_datetime64tz_dtype', 'is_datetime64tz_dtype', (['values.dtype'], {}), '(values.dtype)\n', (1767, 1781), False, 'from pandas.core.dtypes.common import is_datetime64tz_dtype, pandas_dtype\n'), ((4876, 4896), 'pandas.core.construction.extract_array', 'extract_array', (['array'], {}), '(array)\n', (4889, 4896), False, 'from pandas.core.construction import extract_array\n'), ((4928, 4945), 'pandas._libs.internals.BlockPlacement', 'BlockPlacement', (['i'], {}), '(i)\n', (4942, 4945), False, 'from pandas._libs.internals import BlockPlacement\n'), ((5413, 5430), 'pandas._libs.internals.BlockPlacement', 'BlockPlacement', (['i'], {}), '(i)\n', (5427, 5430), False, 'from pandas._libs.internals import BlockPlacement\n'), ((5658, 5675), 'pandas._libs.internals.BlockPlacement', 'BlockPlacement', (['i'], {}), '(i)\n', (5672, 5675), False, 'from pandas._libs.internals import BlockPlacement\n')]
|
import numpy as np
from tensorflow import keras
import matplotlib.pyplot as plt
import os
import cv2
import random
import sklearn.model_selection as model_selection
import datetime
from model import createModel
from contextlib import redirect_stdout
categories = ["NonDemented", "MildDemented", "ModerateDemented", "VeryMildDemented"]
SIZE = 120
def getData():
rawdata = []
data = []
dir = "./data/"
for category in categories:
path = os.path.join(dir, category)
class_num = categories.index(category)
for img in os.listdir(path):
try:
rawdata = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_data = cv2.resize(rawdata, (SIZE, SIZE))
data.append([new_data, class_num])
except Exception as e:
pass
random.shuffle(data)
img_data = []
img_labels = []
for features, label in data:
img_data.append(features)
img_labels.append(label)
img_data = np.array(img_data).reshape(-1, SIZE, SIZE, 1)
img_data = img_data / 255.0
img_labels = np.array(img_labels)
return img_data, img_labels
data, labels = getData()
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(data, labels, test_size=0.20)
train_data, val_data, train_labels, val_labels = model_selection.train_test_split(train_data, train_labels,test_size=0.10)
print(len(train_data), " ", len(train_labels), len(test_data), " ", len(test_labels))
model = createModel(train_data)
checkpoint = keras.callbacks.ModelCheckpoint(filepath='./model/model.h5', save_best_only=True, monitor='val_loss', mode='min')
opt = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=opt, loss="sparse_categorical_crossentropy", metrics=["accuracy"], )
history = model.fit(train_data, train_labels, epochs=10, validation_data=(val_data, val_labels)
)
model.save('./model/model.h5')
test_loss, test_acc = model.evaluate(test_data, test_labels)
print("Model Accuracy: ", test_acc, "Model Loss: ", test_loss)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.legend",
"random.shuffle",
"tensorflow.keras.callbacks.ModelCheckpoint",
"model.createModel",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((1254, 1315), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['data', 'labels'], {'test_size': '(0.2)'}), '(data, labels, test_size=0.2)\n', (1286, 1315), True, 'import sklearn.model_selection as model_selection\n'), ((1367, 1440), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['train_data', 'train_labels'], {'test_size': '(0.1)'}), '(train_data, train_labels, test_size=0.1)\n', (1399, 1440), True, 'import sklearn.model_selection as model_selection\n'), ((1536, 1559), 'model.createModel', 'createModel', (['train_data'], {}), '(train_data)\n', (1547, 1559), False, 'from model import createModel\n'), ((1574, 1692), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': '"""./model/model.h5"""', 'save_best_only': '(True)', 'monitor': '"""val_loss"""', 'mode': '"""min"""'}), "(filepath='./model/model.h5', save_best_only\n =True, monitor='val_loss', mode='min')\n", (1605, 1692), False, 'from tensorflow import keras\n'), ((1695, 1737), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1716, 1737), False, 'from tensorflow import keras\n'), ((2108, 2145), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (2116, 2145), True, 'import matplotlib.pyplot as plt\n'), ((2146, 2187), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (2154, 2187), True, 'import matplotlib.pyplot as plt\n'), ((2188, 2215), 'matplotlib.pyplot.title', 'plt.title', (['"""Model accuracy"""'], {}), "('Model accuracy')\n", (2197, 2215), True, 'import matplotlib.pyplot as plt\n'), ((2216, 2238), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (2226, 2238), True, 'import matplotlib.pyplot as plt\n'), ((2239, 2258), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2249, 2258), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2306), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (2269, 2306), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2317), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2315, 2317), True, 'import matplotlib.pyplot as plt\n'), ((2347, 2380), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (2355, 2380), True, 'import matplotlib.pyplot as plt\n'), ((2381, 2418), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (2389, 2418), True, 'import matplotlib.pyplot as plt\n'), ((2419, 2442), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (2428, 2442), True, 'import matplotlib.pyplot as plt\n'), ((2443, 2461), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2453, 2461), True, 'import matplotlib.pyplot as plt\n'), ((2462, 2481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2472, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2482, 2529), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (2492, 2529), True, 'import matplotlib.pyplot as plt\n'), ((2530, 2540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2538, 2540), True, 'import matplotlib.pyplot as plt\n'), ((851, 871), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (865, 871), False, 'import random\n'), ((1121, 1141), 'numpy.array', 'np.array', (['img_labels'], {}), '(img_labels)\n', (1129, 1141), True, 'import numpy as np\n'), ((464, 491), 'os.path.join', 'os.path.join', (['dir', 'category'], {}), '(dir, category)\n', (476, 491), False, 'import os\n'), ((558, 574), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (568, 574), False, 'import os\n'), ((1026, 1044), 'numpy.array', 'np.array', (['img_data'], {}), '(img_data)\n', (1034, 1044), True, 'import numpy as np\n'), ((704, 737), 'cv2.resize', 'cv2.resize', (['rawdata', '(SIZE, SIZE)'], {}), '(rawdata, (SIZE, SIZE))\n', (714, 737), False, 'import cv2\n'), ((630, 653), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (642, 653), False, 'import os\n')]
|
#!/usr/bin/env python
import sys
import os.path
from os.path import join as PJ
import re
import json
import numpy as np
from tqdm import tqdm
import igraph as ig
import jgf
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def calcModularity(g):
if("Community" in g.vertex_attributes()):
Ci = reindexList(g.vs["Community"])
else:
return (None,None)
if("weight" in g.edge_attributes()):
return None, g.modularity(Ci, weights="weight");
else:
return None, g.modularity(Ci, weights=None);
def calcDegree(g):
results = np.array(g.degree(mode="ALL"))
return results, np.average(results)
def calcInDegree(g):
if(not g.is_directed()):
return (None,None)
results = np.array(g.indegree())
return results, np.average(results)
def calcOutDegree(g):
if(not g.is_directed()):
return (None,None)
results = np.array(g.outdegree())
return results, np.average(results)
def calcStrength(g):
if("weight" not in g.edge_attributes()):
return (None,None)
results = np.array(g.strength(mode="ALL", weights = "weight"))
return results, np.average(results)
def calcInStrength(g):
if("weight" not in g.edge_attributes() or not g.is_directed()):
return (None,None)
results = np.array(g.strength(mode="IN", weights = "weight"))
return results, np.average(results)
def calcOutStrength(g):
if("weight" not in g.edge_attributes() or not g.is_directed()):
return (None,None)
results = np.array(g.strength(mode="OUT", weights = "weight"))
return results, np.average(results)
def calcClusteringCoefficient(g):
# if("weight" in g.edge_attributes()):
results = g.transitivity_local_undirected(weights=None)
# else:
# results = g.transitivity_local_undirected(weights="weight")
return np.nan_to_num(results,0), np.nanmean(results)
def calcCoreness(g):
results = np.array(g.coreness(mode="ALL"))
return results, None
def calcMatchIndex(g):
degree = np.array(g.degree())
matchIndex = np.zeros(g.ecount())
for id,e in enumerate(g.es):
node1,node2 = e.tuple
viz1 = g.neighbors(node1)
viz2 = g.neighbors(node2)
sharedNei = set(viz1) & set(viz2)
if ((degree[node1]+degree[node2]) > 2):
matchIndex[id] = len(sharedNei)/float(degree[node1]+degree[node2]-2)
else:
matchIndex[id] = 0
meanMatchIndex = np.mean(matchIndex)
return None, meanMatchIndex
def calcBetweenessCentrality(g):
result = np.array(g.betweenness(directed=g.is_directed()))
return result,np.average(result)
def calcBetweenessCentralityWeighted(g):
if("weight" not in g.edge_attributes()):
return (None,None)
result = np.array(g.betweenness(weights="weight"))
return result,np.average(result)
def calcBetweennessCentralization(G):
vnum = G.vcount()
if vnum < 3:
return None,0
denom = (vnum-1)*(vnum-2)
temparr = [2*i/denom for i in G.betweenness()]
max_temparr = max(temparr)
return None,sum(max_temparr-i for i in temparr)/(vnum-1)
def calcRichClubCoefficient(g, highest=True, scores=None, indices_only=False):
Trc = richClubPercentage
degree = np.array(g.degree())
edges = np.array(g.get_edgelist())
sourceDegree,targetDegree = degree[edges[:,0]],degree[edges[:,1]]
dT = int(np.percentile(degree,Trc))
indNodes = np.nonzero(degree>=dT)[0]
indEdges = np.nonzero((sourceDegree>=dT)&(targetDegree>=dT))[0]
if (indNodes.size>1):
RC = 2.*indEdges.size/(indNodes.size*(indNodes.size-1))
else:
RC = 0
return None,RC
def calcDegreeAssortativity(g):
return None,g.assortativity_degree(directed=g.is_directed())
def calcDiameter(g):
if("weight" in g.edge_attributes()):
return None,g.diameter(directed=g.is_directed(),weights="weight")
else:
return None,g.diameter(directed=g.is_directed())
def reindexList(names,returnDict=False):
d = {ni: indi for indi, ni in enumerate(set(names))}
numbers = [d[ni] for ni in names]
if(returnDict):
return numbers,d
else:
return numbers
def getNeighborhoods(g,mode="ALL"):
if("weight" in g.edge_attributes()):
return [[(e.target,e["weight"]) if e.target!=i else (e.source,e["weight"]) for e in g.es[g.incident(i,mode=mode)]] for i in range(g.vcount())]
else:
return [[(e.target,1) if e.target!=i else (e.source,1) for e in g.es[g.incident(i,mode=mode)]] for i in range(g.vcount())]
def calcModuleDegreeZScore(g,mode="ALL"):
if("Community" in g.vertex_attributes()):
Ci = reindexList(g.vs["Community"])
else:
return (None,None)
neighs = getNeighborhoods(g,mode=mode)
cneighs = [[(Ci[vertexID],weigth) for vertexID,weigth in neigh] for neigh in neighs]
kappa = np.zeros(g.vcount())
kappaSi = [[] for _ in range(max(Ci)+1)]
for i in range(g.vcount()):
kappa[i] = np.sum([weight for community,weight in cneighs[i] if community==Ci[i]])
kappaSi[Ci[i]].append(kappa[i])
avgKappaSi = np.zeros(max(Ci)+1)
stdKappaSi = np.zeros(max(Ci)+1)
for ci in range(len(kappaSi)):
avgKappaSi[ci] = np.average(kappaSi[ci])
stdKappaSi[ci] = np.std(kappaSi[ci])
zmodule = np.zeros(g.vcount())
for i in range(g.vcount()):
ci = Ci[i]
if(stdKappaSi[ci]>0):
zmodule[i] = (kappa[i]-avgKappaSi[ci])/stdKappaSi[ci]
return zmodule,None
def calcParticipationCoeff(g,mode="ALL"):
if("Community" in g.vertex_attributes()):
Ci = reindexList(g.vs["Community"])
else:
return (None,None)
neighs = getNeighborhoods(g,mode=mode)
cneighs = [[(Ci[vertexID],weigth) for vertexID,weigth in neigh] for neigh in neighs]
if("weight" in g.edge_attributes()):
degrees = np.array(g.strength(mode=mode,weights="weight"))
else:
degrees = np.array(g.degree(mode=mode))
kappasi = np.zeros(g.vcount())
for i in range(g.vcount()):
nodeCommunities = set([community for community,weight in cneighs[i]])
communityDegrees = {community:0 for community in nodeCommunities}
for community,weight in cneighs[i]:
communityDegrees[community]+=weight
kappasi[i] = np.sum(np.power(list(communityDegrees.values()),2))
result = 1.0-kappasi/np.power(degrees,2.0)
result[degrees==0.0] = 0
return result,None
measurements = {
"Degree" : calcDegree,
"InDegree" : calcInDegree,
"OutDegree" : calcOutDegree,
"Strength" : calcStrength,
"InStrength" : calcInStrength,
"OutStrength" : calcOutStrength,
"ClusteringCoefficient" : calcClusteringCoefficient,
"Coreness" : calcCoreness,
"MatchIndex" : calcMatchIndex,
"BetweenessCentrality" : calcBetweenessCentrality,
"BetweenessCentralityWeighted" : calcBetweenessCentralityWeighted,
"BetweennessCentralization" : calcBetweennessCentralization,
"RichClubCoefficient" : calcRichClubCoefficient,
"DegreeAssortativity" : calcDegreeAssortativity,
"Diameter" : calcDiameter,
"ModuleDegreeZScore" : calcModuleDegreeZScore,
"ParticipationCoeff" : calcParticipationCoeff,
"Modularity" : calcModularity,
}
def isFloat(value):
if(value is None):
return False
try:
numericValue = float(value)
return np.isfinite(numericValue)
except ValueError:
return False
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
ret = int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
ret = float(obj)
elif isinstance(obj, (np.ndarray,)):
ret = obj.tolist()
else:
ret = json.JSONEncoder.default(self, obj)
if isinstance(ret, (float)):
if math.isnan(ret):
ret = None
if isinstance(ret, (bytes, bytearray)):
ret = ret.decode("utf-8")
return ret
results = {"errors": [], "warnings": [], "brainlife": [], "datatype_tags": [], "tags": []}
def warning(msg):
global results
results['warnings'].append(msg)
#results['brainlife'].append({"type": "warning", "msg": msg})
print(msg)
def error(msg):
global results
results['errors'].append(msg)
#results['brainlife'].append({"type": "error", "msg": msg})
print(msg)
def exitApp():
global results
with open("product.json", "w") as fp:
json.dump(results, fp, cls=NumpyEncoder)
if len(results["errors"]) > 0:
sys.exit(1)
else:
sys.exit()
def exitAppWithError(msg):
global results
results['errors'].append(msg)
#results['brainlife'].append({"type": "error", "msg": msg})
print(msg)
exitApp()
configFilename = "config.json"
argCount = len(sys.argv)
if(argCount > 1):
configFilename = sys.argv[1]
outputDirectory = "output"
outputFile = PJ(outputDirectory,"network.json.gz")
if(not os.path.exists(outputDirectory)):
os.makedirs(outputDirectory)
with open(configFilename, "r") as fd:
config = json.load(fd)
# "transform":"absolute", //"absolute" or "signed"
# "retain-weights":false,
# "threshold": "none"
richClubPercentage = 90
if("richClubPercentage" in config):
richClubPercentage = config["richClubPercentage"];
networks = jgf.igraph.load(config["network"], compressed=True)
outputNetworks = []
for network in tqdm(networks):
weighted = "weight" in network.edge_attributes()
hasCommunities = "Community" in network.vertex_attributes()
for measurement,measurementFunction in measurements.items():
nodePropData,networkPropData = measurementFunction(network)
if(nodePropData is not None):
network.vs[measurement] = nodePropData
if(networkPropData is not None):
if(nodePropData is not None): #Average measurement
network["Avg. "+measurement] = networkPropData
else:
network[measurement] = networkPropData
outputNetworks.append(network)
jgf.igraph.save(outputNetworks, outputFile, compressed=True)
exitApp()
|
[
"numpy.sum",
"numpy.nan_to_num",
"numpy.mean",
"os.path.join",
"numpy.nanmean",
"numpy.std",
"numpy.power",
"numpy.isfinite",
"json.JSONEncoder.default",
"json.dump",
"tqdm.tqdm",
"numpy.average",
"jgf.igraph.save",
"numpy.percentile",
"matplotlib.use",
"jgf.igraph.load",
"sys.exit",
"json.load",
"numpy.nonzero"
] |
[((200, 214), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (207, 214), True, 'import matplotlib as mpl\n'), ((8293, 8331), 'os.path.join', 'PJ', (['outputDirectory', '"""network.json.gz"""'], {}), "(outputDirectory, 'network.json.gz')\n", (8295, 8331), True, 'from os.path import join as PJ\n'), ((8695, 8746), 'jgf.igraph.load', 'jgf.igraph.load', (["config['network']"], {'compressed': '(True)'}), "(config['network'], compressed=True)\n", (8710, 8746), False, 'import jgf\n'), ((8784, 8798), 'tqdm.tqdm', 'tqdm', (['networks'], {}), '(networks)\n', (8788, 8798), False, 'from tqdm import tqdm\n'), ((9346, 9406), 'jgf.igraph.save', 'jgf.igraph.save', (['outputNetworks', 'outputFile'], {'compressed': '(True)'}), '(outputNetworks, outputFile, compressed=True)\n', (9361, 9406), False, 'import jgf\n'), ((2262, 2281), 'numpy.mean', 'np.mean', (['matchIndex'], {}), '(matchIndex)\n', (2269, 2281), True, 'import numpy as np\n'), ((8454, 8467), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (8463, 8467), False, 'import json\n'), ((606, 625), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (616, 625), True, 'import numpy as np\n'), ((747, 766), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (757, 766), True, 'import numpy as np\n'), ((889, 908), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (899, 908), True, 'import numpy as np\n'), ((1075, 1094), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (1085, 1094), True, 'import numpy as np\n'), ((1285, 1304), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (1295, 1304), True, 'import numpy as np\n'), ((1497, 1516), 'numpy.average', 'np.average', (['results'], {}), '(results)\n', (1507, 1516), True, 'import numpy as np\n'), ((1730, 1755), 'numpy.nan_to_num', 'np.nan_to_num', (['results', '(0)'], {}), '(results, 0)\n', (1743, 1755), True, 'import numpy as np\n'), ((1756, 1775), 'numpy.nanmean', 'np.nanmean', (['results'], {}), '(results)\n', (1766, 1775), True, 'import numpy as np\n'), ((2420, 2438), 'numpy.average', 'np.average', (['result'], {}), '(result)\n', (2430, 2438), True, 'import numpy as np\n'), ((2611, 2629), 'numpy.average', 'np.average', (['result'], {}), '(result)\n', (2621, 2629), True, 'import numpy as np\n'), ((3129, 3155), 'numpy.percentile', 'np.percentile', (['degree', 'Trc'], {}), '(degree, Trc)\n', (3142, 3155), True, 'import numpy as np\n'), ((3168, 3192), 'numpy.nonzero', 'np.nonzero', (['(degree >= dT)'], {}), '(degree >= dT)\n', (3178, 3192), True, 'import numpy as np\n'), ((3206, 3261), 'numpy.nonzero', 'np.nonzero', (['((sourceDegree >= dT) & (targetDegree >= dT))'], {}), '((sourceDegree >= dT) & (targetDegree >= dT))\n', (3216, 3261), True, 'import numpy as np\n'), ((4590, 4664), 'numpy.sum', 'np.sum', (['[weight for community, weight in cneighs[i] if community == Ci[i]]'], {}), '([weight for community, weight in cneighs[i] if community == Ci[i]])\n', (4596, 4664), True, 'import numpy as np\n'), ((4817, 4840), 'numpy.average', 'np.average', (['kappaSi[ci]'], {}), '(kappaSi[ci])\n', (4827, 4840), True, 'import numpy as np\n'), ((4860, 4879), 'numpy.std', 'np.std', (['kappaSi[ci]'], {}), '(kappaSi[ci])\n', (4866, 4879), True, 'import numpy as np\n'), ((6776, 6801), 'numpy.isfinite', 'np.isfinite', (['numericValue'], {}), '(numericValue)\n', (6787, 6801), True, 'import numpy as np\n'), ((7872, 7912), 'json.dump', 'json.dump', (['results', 'fp'], {'cls': 'NumpyEncoder'}), '(results, fp, cls=NumpyEncoder)\n', (7881, 7912), False, 'import json\n'), ((7947, 7958), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7955, 7958), False, 'import sys\n'), ((7968, 7978), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7976, 7978), False, 'import sys\n'), ((5857, 5879), 'numpy.power', 'np.power', (['degrees', '(2.0)'], {}), '(degrees, 2.0)\n', (5865, 5879), True, 'import numpy as np\n'), ((7231, 7266), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (7255, 7266), False, 'import json\n')]
|
####################################################################
# #
# MD_plotting_toolkit, #
# a python package to visualize the results obtained from MD #
# #
# Written by <NAME> <<EMAIL>> #
# Copyright (c) 2021 University of Colorado Boulder #
# #
####################################################################
"""
Unit tests for the module `MD_plotting_toolkit.data_processing`.
"""
import os
import numpy as np
import MD_plotting_toolkit.data_processing as data_processing
current_path = os.path.dirname(os.path.abspath(__file__))
input_path = os.path.join(current_path, "sample_inputs")
output_path = os.path.join(current_path, "sample_outputs")
fes_file = input_path + "/fes.dat"
potential_file = input_path + "/potential.xvg"
hills_corrupted = input_path + "/corrupted_HILLS"
dhdl_corrupted = input_path + "/corrupted_dhdl.xvg"
def test_read_2d_data():
# Case 1: readable by np.loadtxt
x1, y1 = data_processing.read_2d_data(fes_file)
# Case 2: not readable by np.loadtxt
x2, y2 = data_processing.read_2d_data(potential_file)
# Case 3: Non-default col_idx
x3, y3 = data_processing.read_2d_data(fes_file, col_idx=4)
# Here we only compare the first 5 elements to save up some space
x1, y1 = x1[:5], y1[:5]
x2, y2 = x2[:5], y2[:5]
x3, y3 = x3[:5], y3[:5]
# Expected results
xx1 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])
yy1 = np.array([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355])
xx2 = np.array([0, 2, 4, 6, 8])
yy2 = np.array(
[-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078]
)
xx3 = np.array([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])
yy3 = np.array(
[-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338]
)
np.testing.assert_array_almost_equal(x1, xx1)
np.testing.assert_array_almost_equal(y1, yy1)
np.testing.assert_array_almost_equal(x2, xx2)
np.testing.assert_array_almost_equal(y2, yy2)
np.testing.assert_array_almost_equal(x3, xx3)
np.testing.assert_array_almost_equal(y3, yy3)
def test_deduplicate_data():
x1 = [2, 4, 6, 2, 7, 8, 4, 3] # not the x-data for a typical time seris
y1 = [1, 2, 3, 4, 5, 6, 7, 8]
# Below we test from reading the file to cleaning the data
x2, y2 = data_processing.read_2d_data(hills_corrupted) # PLUMED output
x3, y3 = data_processing.read_2d_data(dhdl_corrupted) # GROMACS output
x1, y1 = data_processing.deduplicate_data(x1, y1)
x2, y2 = data_processing.deduplicate_data(x2, y2)
x3, y3 = data_processing.deduplicate_data(x3, y3)
assert list(x1) == [6, 2, 7, 8, 4, 3]
assert list(y1) == [3, 4, 5, 6, 7, 8]
assert len(x2) == 3000
assert len(y2) == 3000
assert len(x3) == 1501
assert len(y3) == 1501
assert int(np.sum(np.diff(x2))) == (len(x2) - 1) * 1
assert int(np.sum(np.diff(x3))) == (len(x3) - 1) * 2
def test_scale_data():
f = 2
T = 300
c1 = 1.38064852 * 6.022 * T / 1000
c2 = np.pi / 180
c3 = 0.239005736
data = np.random.rand(100)
conversion_dict = {
"ns to ps": 1000,
"ps to ns": 1 / 1000,
"kT to kJ/mol": c1,
"kJ/mol to kT": 1 / c1,
"kT to kcal/mol": c1 * c3,
"kcal/mol to kT": 1 / (c1 * c3),
"kJ/mol to kcal/mol": c3,
"kcal/mol to kJ/mol": 1 / c3,
"degree to radian": c2,
"radian to degree": 1 / c2,
}
np.testing.assert_array_almost_equal(data_processing.scale_data(data), data)
for i in conversion_dict:
expected = data * conversion_dict[i] * f
np.testing.assert_array_almost_equal(
data_processing.scale_data(data, i, f, T), expected
)
def test_slice_data():
data = np.arange(100)
data_unchaged = data_processing.slice_data(data)
data_1 = data_processing.slice_data(data, truncate=20)
data_2 = data_processing.slice_data(data, truncate_b=20)
data_3 = data_processing.slice_data(data, truncate=20, truncate_b=20)
np.testing.assert_equal(data, data_unchaged)
assert data_1[0] == 20
assert data_2[-1] == 19
assert data_3[0] == 20
assert data_3[-1] == 79
def test_analyze_data():
x = np.arange(100)
y = np.arange(100, 200)
outfile = output_path + "/test_output.txt"
# Test 1: When input data is not a time series
x_label = "Dihedral (deg)"
y_label = "Free energy (kT)"
data_processing.analyze_data(x, y, x_label, y_label, outfile)
line_1 = "Maximum of free energy: 199.000 kT, which occurs at 99.000 deg.\n"
line_2 = "Minimum of free energy: 100.000 kT, which occurs at 0.000 deg.\n"
texts = [line_1, line_2]
infile = open(outfile, "r")
lines = infile.readlines()
infile.close()
assert os.path.isfile(outfile) is True
assert texts == lines
os.remove(outfile)
# Test 2: When input data is a time series
x_label = "Time (ns)"
y_label = "Distance (nm)"
data_processing.analyze_data(x, y, x_label, y_label, outfile)
line_1 = (
"The average of distance: 149.500 (RMSF: 0.193, max: 199.000, min: 100.000)\n"
)
line_2 = "The maximum of distance occurs at 99.000 ns.\n"
line_3 = "The minimum of distance occurs at 0.000 ns.\n"
line_4 = "The distance (149.000 nm) at 49.000 ns is closet to the average.\n"
texts = [line_1, line_2, line_3, line_4]
infile = open(outfile, "r")
lines = infile.readlines()
infile.close()
assert os.path.isfile(outfile) is True
assert texts == lines
os.remove(outfile)
|
[
"os.path.abspath",
"os.remove",
"MD_plotting_toolkit.data_processing.deduplicate_data",
"MD_plotting_toolkit.data_processing.scale_data",
"MD_plotting_toolkit.data_processing.read_2d_data",
"MD_plotting_toolkit.data_processing.analyze_data",
"os.path.isfile",
"numpy.diff",
"numpy.array",
"numpy.arange",
"numpy.testing.assert_equal",
"MD_plotting_toolkit.data_processing.slice_data",
"numpy.random.rand",
"numpy.testing.assert_array_almost_equal",
"os.path.join"
] |
[((840, 883), 'os.path.join', 'os.path.join', (['current_path', '"""sample_inputs"""'], {}), "(current_path, 'sample_inputs')\n", (852, 883), False, 'import os\n'), ((898, 942), 'os.path.join', 'os.path.join', (['current_path', '"""sample_outputs"""'], {}), "(current_path, 'sample_outputs')\n", (910, 942), False, 'import os\n'), ((800, 825), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (815, 825), False, 'import os\n'), ((1205, 1243), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['fes_file'], {}), '(fes_file)\n', (1233, 1243), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((1299, 1343), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['potential_file'], {}), '(potential_file)\n', (1327, 1343), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((1392, 1441), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['fes_file'], {'col_idx': '(4)'}), '(fes_file, col_idx=4)\n', (1420, 1441), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((1631, 1705), 'numpy.array', 'np.array', (['[-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]'], {}), '([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])\n', (1639, 1705), True, 'import numpy as np\n'), ((1716, 1791), 'numpy.array', 'np.array', (['[-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355]'], {}), '([-0.00035355, -0.00035355, -0.00035355, -0.00035355, -0.00035355])\n', (1724, 1791), True, 'import numpy as np\n'), ((1802, 1827), 'numpy.array', 'np.array', (['[0, 2, 4, 6, 8]'], {}), '([0, 2, 4, 6, 8])\n', (1810, 1827), True, 'import numpy as np\n'), ((1838, 1928), 'numpy.array', 'np.array', (['[-20045.462891, -19989.603516, -19909.130859, -20057.402344, -19812.580078]'], {}), '([-20045.462891, -19989.603516, -19909.130859, -20057.402344, -\n 19812.580078])\n', (1846, 1928), True, 'import numpy as np\n'), ((1948, 2022), 'numpy.array', 'np.array', (['[-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524]'], {}), '([-3.14159265, -3.0787608, -3.01592895, -2.95309709, -2.89026524])\n', (1956, 2022), True, 'import numpy as np\n'), ((2033, 2126), 'numpy.array', 'np.array', (['[-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -8703.7556338]'], {}), '([-8778.4411543, -8765.49326731, -8748.15371253, -8727.40373623, -\n 8703.7556338])\n', (2041, 2126), True, 'import numpy as np\n'), ((2141, 2186), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x1', 'xx1'], {}), '(x1, xx1)\n', (2177, 2186), True, 'import numpy as np\n'), ((2191, 2236), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y1', 'yy1'], {}), '(y1, yy1)\n', (2227, 2236), True, 'import numpy as np\n'), ((2241, 2286), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x2', 'xx2'], {}), '(x2, xx2)\n', (2277, 2286), True, 'import numpy as np\n'), ((2291, 2336), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y2', 'yy2'], {}), '(y2, yy2)\n', (2327, 2336), True, 'import numpy as np\n'), ((2341, 2386), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x3', 'xx3'], {}), '(x3, xx3)\n', (2377, 2386), True, 'import numpy as np\n'), ((2391, 2436), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y3', 'yy3'], {}), '(y3, yy3)\n', (2427, 2436), True, 'import numpy as np\n'), ((2657, 2702), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['hills_corrupted'], {}), '(hills_corrupted)\n', (2685, 2702), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((2733, 2777), 'MD_plotting_toolkit.data_processing.read_2d_data', 'data_processing.read_2d_data', (['dhdl_corrupted'], {}), '(dhdl_corrupted)\n', (2761, 2777), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((2810, 2850), 'MD_plotting_toolkit.data_processing.deduplicate_data', 'data_processing.deduplicate_data', (['x1', 'y1'], {}), '(x1, y1)\n', (2842, 2850), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((2864, 2904), 'MD_plotting_toolkit.data_processing.deduplicate_data', 'data_processing.deduplicate_data', (['x2', 'y2'], {}), '(x2, y2)\n', (2896, 2904), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((2918, 2958), 'MD_plotting_toolkit.data_processing.deduplicate_data', 'data_processing.deduplicate_data', (['x3', 'y3'], {}), '(x3, y3)\n', (2950, 2958), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((3405, 3424), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (3419, 3424), True, 'import numpy as np\n'), ((4105, 4119), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (4114, 4119), True, 'import numpy as np\n'), ((4140, 4172), 'MD_plotting_toolkit.data_processing.slice_data', 'data_processing.slice_data', (['data'], {}), '(data)\n', (4166, 4172), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((4186, 4231), 'MD_plotting_toolkit.data_processing.slice_data', 'data_processing.slice_data', (['data'], {'truncate': '(20)'}), '(data, truncate=20)\n', (4212, 4231), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((4245, 4292), 'MD_plotting_toolkit.data_processing.slice_data', 'data_processing.slice_data', (['data'], {'truncate_b': '(20)'}), '(data, truncate_b=20)\n', (4271, 4292), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((4306, 4366), 'MD_plotting_toolkit.data_processing.slice_data', 'data_processing.slice_data', (['data'], {'truncate': '(20)', 'truncate_b': '(20)'}), '(data, truncate=20, truncate_b=20)\n', (4332, 4366), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((4372, 4416), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data', 'data_unchaged'], {}), '(data, data_unchaged)\n', (4395, 4416), True, 'import numpy as np\n'), ((4562, 4576), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (4571, 4576), True, 'import numpy as np\n'), ((4585, 4604), 'numpy.arange', 'np.arange', (['(100)', '(200)'], {}), '(100, 200)\n', (4594, 4604), True, 'import numpy as np\n'), ((4772, 4833), 'MD_plotting_toolkit.data_processing.analyze_data', 'data_processing.analyze_data', (['x', 'y', 'x_label', 'y_label', 'outfile'], {}), '(x, y, x_label, y_label, outfile)\n', (4800, 4833), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((5182, 5200), 'os.remove', 'os.remove', (['outfile'], {}), '(outfile)\n', (5191, 5200), False, 'import os\n'), ((5309, 5370), 'MD_plotting_toolkit.data_processing.analyze_data', 'data_processing.analyze_data', (['x', 'y', 'x_label', 'y_label', 'outfile'], {}), '(x, y, x_label, y_label, outfile)\n', (5337, 5370), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((5886, 5904), 'os.remove', 'os.remove', (['outfile'], {}), '(outfile)\n', (5895, 5904), False, 'import os\n'), ((3830, 3862), 'MD_plotting_toolkit.data_processing.scale_data', 'data_processing.scale_data', (['data'], {}), '(data)\n', (3856, 3862), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((5120, 5143), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (5134, 5143), False, 'import os\n'), ((5824, 5847), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (5838, 5847), False, 'import os\n'), ((4007, 4048), 'MD_plotting_toolkit.data_processing.scale_data', 'data_processing.scale_data', (['data', 'i', 'f', 'T'], {}), '(data, i, f, T)\n', (4033, 4048), True, 'import MD_plotting_toolkit.data_processing as data_processing\n'), ((3174, 3185), 'numpy.diff', 'np.diff', (['x2'], {}), '(x2)\n', (3181, 3185), True, 'import numpy as np\n'), ((3231, 3242), 'numpy.diff', 'np.diff', (['x3'], {}), '(x3)\n', (3238, 3242), True, 'import numpy as np\n')]
|
from pytrigno import TrignoAccel
from pytrigno import TrignoEMG
from pytrigno import TrignoOrientation
import numpy as np
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
#Reading one sensor accel data:
#t=TrignoAccel(channel_range=(0,2),samples_per_read=100) #limit channels to 3 (0,1,2 according to accel_x, accel_y, accel_z)
#t.start()
#data=t.read()
#t.stop()
#print(data.shape, data.sum())
#print(data)
sensors_number = 1
acc_channels = 3*sensors_number
emg_channels = sensors_number
orientation_channels = 4*sensors_number #for quaternion
orientation = TrignoOrientation(channel_range=(0,orientation_channels-1),samples_per_read=100)
#
#orientation.pair_sensor(1)
#print('Place the sensor on the base station magnet to pair')
#time.sleep(5)
#orientation.is_paired(1)
#orientation.is_active(1)
orientation.start()
orientation.what_mode(1)
fig, axs = plt.subplots(3)
xs = []
ys = []
r = []
p = []
y = []
def animate(i, xs, r, p, y):
start_time = time.time()
data = orientation.read()
if any([data[0,-1], data[1,-1], data[2,-1],data[3,-1]]):
orientation_quat = R.from_quat([data[0,-1], data[1,-1], data[2,-1],data[3,-1]])
#orientation_quats = R.from_quat(np.transpose([data[0, :], data[1, :], data[2, :], data[3, :]]))
#iters=any([data[0, :], data[1, :], data[2, :], data[3, :]])
orientation_rpy = orientation_quat.as_euler('zyx', degrees=True)
r.append(orientation_rpy[0])
p.append(orientation_rpy[1])
y.append(orientation_rpy[2])
print(np.shape(data))
#acc_x.extend(data[0,:])
#acc_y.extend(data[1,:])
#acc_z.extend(data[2,:])
r = r[-1000:]
p = p[-1000:]
y = y[-1000:]
axs[0].clear()
axs[1].clear()
axs[2].clear()
axs[0].plot(r)
axs[1].plot(p)
axs[2].plot(y)
print("--- %f seconds ---" % (time.time() - start_time))
ani = animation.FuncAnimation(fig, animate, fargs=(xs, r, p, y), interval= 100)
plt.show()
orientation.stop()
|
[
"matplotlib.pyplot.show",
"time.time",
"matplotlib.animation.FuncAnimation",
"numpy.shape",
"pytrigno.TrignoOrientation",
"scipy.spatial.transform.Rotation.from_quat",
"matplotlib.pyplot.subplots"
] |
[((646, 734), 'pytrigno.TrignoOrientation', 'TrignoOrientation', ([], {'channel_range': '(0, orientation_channels - 1)', 'samples_per_read': '(100)'}), '(channel_range=(0, orientation_channels - 1),\n samples_per_read=100)\n', (663, 734), False, 'from pytrigno import TrignoOrientation\n'), ((943, 958), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {}), '(3)\n', (955, 958), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2072), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'fargs': '(xs, r, p, y)', 'interval': '(100)'}), '(fig, animate, fargs=(xs, r, p, y), interval=100)\n', (2023, 2072), True, 'import matplotlib.animation as animation\n'), ((2074, 2084), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2082, 2084), True, 'import matplotlib.pyplot as plt\n'), ((1044, 1055), 'time.time', 'time.time', ([], {}), '()\n', (1053, 1055), False, 'import time\n'), ((1175, 1240), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['[data[0, -1], data[1, -1], data[2, -1], data[3, -1]]'], {}), '([data[0, -1], data[1, -1], data[2, -1], data[3, -1]])\n', (1186, 1240), True, 'from scipy.spatial.transform import Rotation as R\n'), ((1608, 1622), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (1616, 1622), True, 'import numpy as np\n'), ((1965, 1976), 'time.time', 'time.time', ([], {}), '()\n', (1974, 1976), False, 'import time\n')]
|
# ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Supervised Dataset
- Class responsible for using a training and validation dataset to feed data to the model through tf.data.dataset
"""
from enum import Enum
import logging
import os
import math
import multiprocessing
import pickle
import numpy as np
from diplomacy_research.settings import WORKING_DIR
# Constants
LOGGER = logging.getLogger(__name__)
class TrainingMode(Enum):
""" Enumeration of training modes """
TRAINING = 'train'
VALIDATION = 'valid'
class SupervisedDataset():
""" This object is responsible for generating entries to feed the model (using the tf.data.dataset API) """
# pylint: disable=too-many-instance-attributes
def __init__(self, batch_size, dataset_builder, checkpoint_dir='', cluster_config=None, debug_batch=False,
no_iterator=False, do_infinite_training=False, perc_epoch_for_training=1.):
""" Constructor
:param batch_size: The size of a batch per tower
:param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods
:param checkpoint_dir: The directory where the status is to be saved. None to disable, '' for default dir.
:param cluster_config: Optional. If set, the cluster configuration will be used for distributed training.
:param debug_batch: Boolean flag to indicate to return the same batch over-and-over to debug our model
:param no_iterator: Boolean flag that indicates to not create an iterator (it will be loaded from a ckpt)
:param do_infinite_training: If set, supervised training will loop over the training set forever
and will not switch to the validation set.
:param perc_epoch_for_training: If set, the training epoch will be for this percentage of available steps
before running another evaluation epoch (e.g. 2.5% train, valid, 2.5% train, ...)
:type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder
:type cluster_config: diplomacy_research.utils.cluster.ClusterConfig
"""
# pylint: disable=too-many-arguments
self._batch_size = batch_size
self.dataset_builder = dataset_builder
self.checkpoint_dir = checkpoint_dir if checkpoint_dir != '' else WORKING_DIR # None = disabled
self.cluster_config = cluster_config
self.debug_batch = debug_batch
self.no_iterator = no_iterator
self.perc_epoch_for_training = 1.00 if do_infinite_training else max(1e-3, min(1., perc_epoch_for_training))
self.do_infinite_training = do_infinite_training
self.is_closing = False
self.session = None
# Creating empty datasets
self.training_dataset = None
self.validation_dataset = None
self.feedable_dataset = None
# Creating iterator with init ops
self.iterator = None
self._iterator_initialized = False
self.training_init_op = None
self.validation_init_op = None
self.output_features = None # This represents iterator.get_next()
self.default_features = {} # Will be used as default if features are missing from queue
# Steps
self.nb_batches_to_skip = 0 # Nb of batches to skip
self.steps_in_current_mode = 0 # Step count in current mode
self.training_progress = 0.
# Number of items remaining in epoch
self.total_nb_items_training_proto = 0
self.total_nb_items_valid_proto = 0
self.training_mode = TrainingMode.TRAINING
self.nb_completed_epochs = 0
self._dataset_is_done = False
# Loading number of items remaining
if os.path.exists(self.dataset_builder.dataset_index_path) \
and os.path.getsize(self.dataset_builder.dataset_index_path):
with open(self.dataset_builder.dataset_index_path, 'rb') as dataset_index:
dataset_index = pickle.load(dataset_index)
self.total_nb_items_training_proto = dataset_index['size_train_dataset']
self.total_nb_items_valid_proto = dataset_index['size_valid_dataset']
# Building the datasets
self.build()
@property
def can_support_iterator(self):
""" Determines if the dataset can support an iterator or if it is a remote (RPC) dataset """
return True
@property
def batch_size(self):
""" Getter for batch_size """
return self._batch_size
@batch_size.setter
def batch_size(self, value):
""" Setter for batch_size """
if self.num_shards is not None:
raise RuntimeError('You cannot change the batch_size when using shards')
self._batch_size = value
@property
def num_shards(self):
""" Returns the number of shards (if a cluster config is set), otherwise None """
return self.cluster_config.num_shards if self.cluster_config else 1
@property
def nb_training_steps_per_epoch(self):
""" Returns the number of training steps per epoch """
nb_items_per_epoch = self.perc_epoch_for_training * self.total_nb_items_training_proto
return int(math.ceil(nb_items_per_epoch / (self.batch_size * self.num_shards)))
@property
def nb_training_steps_per_full_epoch(self): # pylint: disable=invalid-name
""" Returns the number of training steps per full epoch """
return int(math.ceil(self.total_nb_items_training_proto / (self.batch_size * self.num_shards)))
@property
def nb_validation_steps_per_epoch(self):
""" Returns the number of validation steps per epoch """
return int(math.ceil(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards)))
@property
def nb_total_steps_per_epoch(self):
""" Returns the total number of training and validation steps per epoch """
return self.nb_training_steps_per_epoch + self.nb_validation_steps_per_epoch
@property
def nb_steps_per_epoch_current_mode(self):
""" Returns the number of steps per epoch in the current mode (Training / Validation) """
if self.training_mode == TrainingMode.VALIDATION:
return self.nb_validation_steps_per_epoch
return self.nb_training_steps_per_epoch
@property
def iterator_initialized(self):
""" Determine if the iterator has been initialized """
return self._iterator_initialized
@property
def status_path(self):
""" Path to the status file on disk (where progress is saved) """
if not self.checkpoint_dir:
return None
if not self.cluster_config:
return os.path.join(self.checkpoint_dir, 'status.pkl')
return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.cluster_config.task_id)
@property
def chief_status_path(self):
""" Path to the chief status path (to validate our status) """
if not self.cluster_config:
return None
return os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0)
@property
def fallback_status_path(self):
""" Path to an alternate status file if the primary is not available """
fallbacks = [os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0),
os.path.join(self.checkpoint_dir, 'status.pkl')]
for fallback in fallbacks:
if os.path.exists(fallback):
return fallback
return None
@property
def is_done(self):
""" Returns True if the end of file has been reached """
if self.do_infinite_training:
return False
return self._dataset_is_done or self.steps_in_current_mode >= self.nb_steps_per_epoch_current_mode
def take_local_step(self):
""" Increments the local step counter """
if not self.is_done or self.do_infinite_training:
self.steps_in_current_mode += 1
if self.training_mode == TrainingMode.TRAINING:
self.training_progress = (self.training_progress + 1. / self.nb_training_steps_per_full_epoch) % 1
def mark_as_done(self):
""" Marks the dataset as having reached the end of the file"""
self._dataset_is_done = True
def build(self):
""" Builds the TensorFlow datasets """
from diplomacy_research.utils.tensorflow import tf
assert 'request_id' in self.dataset_builder.get_proto_fields(), 'You need to have a "request_id" field.'
# Training dataset
self.training_dataset = tf.data.TFRecordDataset(self.dataset_builder.training_dataset_path,
compression_type='GZIP')
# Debug (batch) mode
# Only taking one batch and looping over that batch forever
if self.debug_batch:
self.training_dataset = self.training_dataset.take(self.batch_size)
self.training_dataset = self.training_dataset.repeat(count=-1)
# Regular mode
# Otherwise, sharding and shuffling the dataset
# Repeating to make sure all workers can loop on the dataset at all times
else:
if self.cluster_config and self.num_shards > 1:
LOGGER.info('Sharding dataset. There are %d shards. Current shard index: #%d.',
self.cluster_config.num_shards, self.cluster_config.shard_index)
shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards,
shard_index=self.cluster_config.shard_index)
self.training_dataset = self.training_dataset.apply(shard_fn)
self.training_dataset = self.training_dataset.repeat()
self.training_dataset = self.training_dataset.shuffle(100 * self.batch_size)
# Batching with prefetching
self.training_dataset = self.training_dataset.map(self.dataset_builder.parse_function,
num_parallel_calls=multiprocessing.cpu_count())
self.training_dataset = self.training_dataset.prefetch(100 * self.batch_size)
self.training_dataset = self.training_dataset.padded_batch(self.batch_size,
padded_shapes=self.dataset_builder.padded_shapes)
# Building a list of generic default values from the output types and output shapes
self.default_features = {}
for feature_name, feature_shape in self.dataset_builder.output_shapes.items():
if self.dataset_builder.output_types[feature_name] == np.object:
self.default_features[feature_name] = bytes('', 'utf-8')
else:
dtype = self.dataset_builder.output_types[feature_name]
self.default_features[feature_name] = np.zeros(shape=feature_shape[1:], dtype=dtype)
# -----------------------------
# Validation dataset
self.validation_dataset = tf.data.TFRecordDataset(self.dataset_builder.validation_dataset_path,
compression_type='GZIP')
# Sharding, but no need to shuffle
if self.cluster_config and self.num_shards > 1:
shard_fn = tf.data.experimental.filter_for_shard(num_shards=self.cluster_config.num_shards,
shard_index=self.cluster_config.shard_index)
self.validation_dataset = self.validation_dataset.apply(shard_fn)
# Batching with prefetching
self.validation_dataset = self.validation_dataset.map(self.dataset_builder.parse_function,
num_parallel_calls=multiprocessing.cpu_count())
self.validation_dataset = self.validation_dataset.prefetch(20 * self.batch_size)
self.validation_dataset = self.validation_dataset.padded_batch(self.batch_size,
padded_shapes=self.dataset_builder.padded_shapes)
# Creating iterator (with a new iterator_resource), unless specified otherwise
if not self.no_iterator:
self.create_iterator()
def create_iterator(self, iterator_resource=None, shared_name=None, features=None):
""" Creates an iterator object (optionally using a shared name and a specific iterator resource)
:param iterator_resource: A tf.resource scalar tf.Tensor representing the iterator.
:param shared_name: Optional. If non-empty, this iterator will be shared under the given name across
multiple sessions that share the same devices (e.g. when using a remote server).
:param features: If an iterator_resource is specified, this corresponds to the output of iterator.get_next()
:return: Nothing, but sets the self.iterator, self.features, and dataset init_ops
"""
if iterator_resource is not None and not self.no_iterator:
LOGGER.error('An iterator resource can only be set if the dataset was created with the "no_iterator" flag.')
raise RuntimeError("Cannot create new iterator")
if iterator_resource is not None and features is None:
LOGGER.error('The iterator features are required when reloading a saved iterator.')
raise ValueError()
# Loading TensorFlow
from diplomacy_research.utils.tensorflow import tf
output_types = self.training_dataset.output_types
output_shapes = self.training_dataset.output_shapes
output_classes = self.training_dataset.output_classes
# Making sure itertor is on the right device/worker
with tf.device(self.cluster_config.iterator_device if self.cluster_config else None):
# We have an iterator resource, so we use it
if iterator_resource is not None:
self.iterator = tf.data.Iterator(iterator_resource=iterator_resource,
initializer=None,
output_types=output_types,
output_shapes=output_shapes,
output_classes=output_classes)
if features:
self.output_features = features
# Otherwise, we create a brand new iterator
else:
self.iterator = tf.data.Iterator.from_structure(output_types=output_types,
output_shapes=output_shapes,
output_classes=output_classes,
shared_name=shared_name)
self.output_features = self.iterator.get_next()
# Generating init op for each dataset
# Using different names because we can't define initializers with the same name
self._iterator_initialized = False
self.training_init_op = self.iterator.make_initializer(self.training_dataset)
self.validation_init_op = self.iterator.make_initializer(self.validation_dataset)
def initialize_iterator(self, session):
""" Initializes the current iterator
:param session: The session used to initialize the init op
:type session: tensorflow.python.client.session.Session
"""
# We haven't created an iterator yet
if self.iterator is None:
return
# Loading TensorFlow
from diplomacy_research.utils.tensorflow import tf
# Running init_op
# If session is wrapped, executing it without hooks
init_op = {TrainingMode.TRAINING: self.training_init_op,
TrainingMode.VALIDATION: self.validation_init_op}[self.training_mode]
if hasattr(session, 'run_step_fn'):
session.run_step_fn(lambda step_context: step_context.session.run(init_op))
else:
session.run(init_op)
self._iterator_initialized = True
self._dataset_is_done = False
# For validation set, we can reset the steps since we are always starting from the beginning
# For training, we might resume mid-epoch (from load_status()) - So we keep the current value
if self.training_mode == TrainingMode.VALIDATION:
self.steps_in_current_mode = 0
# Resuming by skipping a certain number of already processed items
if self.nb_batches_to_skip:
LOGGER.info('Resuming training by skipping %d batches in the training dataset.', self.nb_batches_to_skip)
try:
for _ in range(self.nb_batches_to_skip):
if hasattr(session, 'run_step_fn'):
session.run_step_fn(
lambda step_context: step_context.session.run(self.output_features['request_id']))
else:
session.run(self.output_features['request_id'])
except tf.errors.OutOfRangeError:
self.mark_as_done()
self.nb_batches_to_skip = 0
def start_training_mode(self, session):
""" Starts the dataset in training mode
:param session: The session used to initialize the init op
:type session: tensorflow.python.client.session.Session
"""
if self.is_done:
self.nb_completed_epochs += 1
self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch)
self.training_mode = TrainingMode.TRAINING
self.steps_in_current_mode = 0
self.initialize_iterator(session)
def start_validation_mode(self, session):
""" Starts the dataset in validation mode
:param session: The session used to initialize the init op
:type session: tensorflow.python.client.session.Session
"""
if self.do_infinite_training:
LOGGER.error('Dataset is currently in "infinite training" mode. Only the training set can be accessed.')
raise RuntimeError('Invalid training mode specified.')
self.training_mode = TrainingMode.VALIDATION
self.steps_in_current_mode = 0
self.initialize_iterator(session)
def get_progress(self):
""" Returns the number of completed epochs, and the current % of the epoch completed """
if self.do_infinite_training:
self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch)
perc_epoch_completed = self.steps_in_current_mode / self.nb_steps_per_epoch_current_mode
return self.nb_completed_epochs, perc_epoch_completed
def save_status(self):
""" Save current status to file to be able to resume later """
# Not saving status if checkpoint_dir is None
if not self.status_path:
return
# Recomputing nb of completed epochs when doing infinite training
if self.do_infinite_training:
self.nb_completed_epochs = int(self.steps_in_current_mode / self.nb_training_steps_per_full_epoch)
# Creating directory and saving
if not os.path.exists(os.path.dirname(self.status_path)):
os.makedirs(os.path.dirname(self.status_path), exist_ok=True)
status = {'training_mode': self.training_mode,
'nb_completed_epochs': self.nb_completed_epochs,
'steps_current_mode': self.steps_in_current_mode,
'training_progress': self.training_progress,
'num_shards': self.num_shards}
with open(self.status_path, 'wb') as file:
pickle.dump(status, file, pickle.HIGHEST_PROTOCOL)
def load_status(self):
""" Loads dataset status from disk and resume where we were """
status = {}
status_loaded = False
# Not loading status if checkpoint_dir is None.
if not self.status_path:
return
# Trying to load from primary path
if os.path.exists(self.status_path) and os.path.getsize(self.status_path):
with open(self.status_path, 'rb') as status:
status = pickle.load(status)
# Detecting num of shards change and deleting file if that's the case
if self.num_shards == status['num_shards']:
status_loaded = True
else:
LOGGER.info('Number of shards has changed from %d to %d', status['num_shards'], self.num_shards)
# If we are chief, we do a cleanup on the status folder
if self.cluster_config and self.cluster_config.is_chief:
for status_ix in range(self.num_shards, status['num_shards']):
if os.path.exists(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)):
os.unlink(os.path.join(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix))
# Otherwise, we just delete the worker status file
else:
os.unlink(self.status_path)
# We load the fallback status
if not status_loaded and self.fallback_status_path:
try:
with open(self.fallback_status_path, 'rb') as status:
status = pickle.load(status)
status_loaded = True
except EOFError:
pass
# We load the chief status to validate that we have the same training_mode and nb_epochs
if self.cluster_config and os.path.exists(self.chief_status_path) and os.path.getsize(self.chief_status_path):
with open(self.chief_status_path, 'rb') as chief_status:
chief_status = pickle.load(chief_status)
else:
chief_status = status
# We couldn't find a status file to load, aborting
if not status_loaded:
return
# If we have the same value as the chief, we load our status, otherwise we use the chief
use_own_status = ((status['training_mode'] == chief_status['training_mode'])
and status['nb_completed_epochs'] == chief_status['nb_completed_epochs'])
# Loading status
self._iterator_initialized = False
if use_own_status:
self.training_mode = status['training_mode']
self.nb_completed_epochs = status['nb_completed_epochs']
self.steps_in_current_mode = status['steps_current_mode']
self.training_progress = status['training_progress']
if self.training_mode == TrainingMode.VALIDATION:
self.steps_in_current_mode = 0
else:
LOGGER.warning('Status between worker and chief does not match. Resuming using chief status.')
self.training_mode = chief_status['training_mode']
self.nb_completed_epochs = chief_status['nb_completed_epochs']
self.steps_in_current_mode = chief_status['steps_current_mode']
self.training_progress = chief_status['training_progress']
if self.training_mode == TrainingMode.VALIDATION:
self.steps_in_current_mode = 0
# If we were training the train dataset, we need to skip a certain number of batches
# to get to the same training point
if self.training_mode == TrainingMode.TRAINING:
self.nb_batches_to_skip = int(self.training_progress * self.nb_training_steps_per_full_epoch)
def make_session_run_hook(self):
""" Builds a SessionRunHook for the MonitoredTrainingSession object """
from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook
return SupervisedDatasetSessionRunHook(self)
def close(self):
""" Stops iterating the dataset """
self.is_closing = True
self.training_dataset = None
self.validation_dataset = None
|
[
"pickle.dump",
"diplomacy_research.utils.tensorflow.tf.data.Iterator.from_structure",
"os.unlink",
"diplomacy_research.utils.tensorflow.tf.device",
"math.ceil",
"os.path.getsize",
"os.path.dirname",
"diplomacy_research.utils.tensorflow.tf.data.TFRecordDataset",
"os.path.exists",
"numpy.zeros",
"pickle.load",
"diplomacy_research.utils.tensorflow.tf.data.Iterator",
"diplomacy_research.utils.tensorflow.tf.data.experimental.filter_for_shard",
"diplomacy_research.utils.tensorflow.SupervisedDatasetSessionRunHook",
"os.path.join",
"logging.getLogger",
"multiprocessing.cpu_count"
] |
[((1125, 1152), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1142, 1152), False, 'import logging\n'), ((7732, 7829), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % self.cluster_config.task_id)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % self.\n cluster_config.task_id)\n", (7744, 7829), False, 'import os\n'), ((8019, 8085), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % 0)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0)\n", (8031, 8085), False, 'import os\n'), ((9575, 9671), 'diplomacy_research.utils.tensorflow.tf.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['self.dataset_builder.training_dataset_path'], {'compression_type': '"""GZIP"""'}), "(self.dataset_builder.training_dataset_path,\n compression_type='GZIP')\n", (9598, 9671), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((12072, 12170), 'diplomacy_research.utils.tensorflow.tf.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['self.dataset_builder.validation_dataset_path'], {'compression_type': '"""GZIP"""'}), "(self.dataset_builder.validation_dataset_path,\n compression_type='GZIP')\n", (12095, 12170), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((24955, 24992), 'diplomacy_research.utils.tensorflow.SupervisedDatasetSessionRunHook', 'SupervisedDatasetSessionRunHook', (['self'], {}), '(self)\n', (24986, 24992), False, 'from diplomacy_research.utils.tensorflow import SupervisedDatasetSessionRunHook\n'), ((4624, 4679), 'os.path.exists', 'os.path.exists', (['self.dataset_builder.dataset_index_path'], {}), '(self.dataset_builder.dataset_index_path)\n', (4638, 4679), False, 'import os\n'), ((4702, 4758), 'os.path.getsize', 'os.path.getsize', (['self.dataset_builder.dataset_index_path'], {}), '(self.dataset_builder.dataset_index_path)\n', (4717, 4758), False, 'import os\n'), ((6105, 6172), 'math.ceil', 'math.ceil', (['(nb_items_per_epoch / (self.batch_size * self.num_shards))'], {}), '(nb_items_per_epoch / (self.batch_size * self.num_shards))\n', (6114, 6172), False, 'import math\n'), ((6427, 6515), 'math.ceil', 'math.ceil', (['(self.total_nb_items_training_proto / (self.batch_size * self.num_shards))'], {}), '(self.total_nb_items_training_proto / (self.batch_size * self.\n num_shards))\n', (6436, 6515), False, 'import math\n'), ((6656, 6741), 'math.ceil', 'math.ceil', (['(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards))'], {}), '(self.total_nb_items_valid_proto / (self.batch_size * self.num_shards)\n )\n', (6665, 6741), False, 'import math\n'), ((7669, 7716), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status.pkl"""'], {}), "(self.checkpoint_dir, 'status.pkl')\n", (7681, 7716), False, 'import os\n'), ((8239, 8305), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % 0)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % 0)\n", (8251, 8305), False, 'import os\n'), ((8328, 8375), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status.pkl"""'], {}), "(self.checkpoint_dir, 'status.pkl')\n", (8340, 8375), False, 'import os\n'), ((8427, 8451), 'os.path.exists', 'os.path.exists', (['fallback'], {}), '(fallback)\n', (8441, 8451), False, 'import os\n'), ((12348, 12478), 'diplomacy_research.utils.tensorflow.tf.data.experimental.filter_for_shard', 'tf.data.experimental.filter_for_shard', ([], {'num_shards': 'self.cluster_config.num_shards', 'shard_index': 'self.cluster_config.shard_index'}), '(num_shards=self.cluster_config.\n num_shards, shard_index=self.cluster_config.shard_index)\n', (12385, 12478), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((14840, 14919), 'diplomacy_research.utils.tensorflow.tf.device', 'tf.device', (['(self.cluster_config.iterator_device if self.cluster_config else None)'], {}), '(self.cluster_config.iterator_device if self.cluster_config else None)\n', (14849, 14919), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((20902, 20952), 'pickle.dump', 'pickle.dump', (['status', 'file', 'pickle.HIGHEST_PROTOCOL'], {}), '(status, file, pickle.HIGHEST_PROTOCOL)\n', (20913, 20952), False, 'import pickle\n'), ((21267, 21299), 'os.path.exists', 'os.path.exists', (['self.status_path'], {}), '(self.status_path)\n', (21281, 21299), False, 'import os\n'), ((21304, 21337), 'os.path.getsize', 'os.path.getsize', (['self.status_path'], {}), '(self.status_path)\n', (21319, 21337), False, 'import os\n'), ((22803, 22841), 'os.path.exists', 'os.path.exists', (['self.chief_status_path'], {}), '(self.chief_status_path)\n', (22817, 22841), False, 'import os\n'), ((22846, 22885), 'os.path.getsize', 'os.path.getsize', (['self.chief_status_path'], {}), '(self.chief_status_path)\n', (22861, 22885), False, 'import os\n'), ((4879, 4905), 'pickle.load', 'pickle.load', (['dataset_index'], {}), '(dataset_index)\n', (4890, 4905), False, 'import pickle\n'), ((10458, 10588), 'diplomacy_research.utils.tensorflow.tf.data.experimental.filter_for_shard', 'tf.data.experimental.filter_for_shard', ([], {'num_shards': 'self.cluster_config.num_shards', 'shard_index': 'self.cluster_config.shard_index'}), '(num_shards=self.cluster_config.\n num_shards, shard_index=self.cluster_config.shard_index)\n', (10495, 10588), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((11096, 11123), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (11121, 11123), False, 'import multiprocessing\n'), ((11921, 11967), 'numpy.zeros', 'np.zeros', ([], {'shape': 'feature_shape[1:]', 'dtype': 'dtype'}), '(shape=feature_shape[1:], dtype=dtype)\n', (11929, 11967), True, 'import numpy as np\n'), ((12830, 12857), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (12855, 12857), False, 'import multiprocessing\n'), ((15057, 15224), 'diplomacy_research.utils.tensorflow.tf.data.Iterator', 'tf.data.Iterator', ([], {'iterator_resource': 'iterator_resource', 'initializer': 'None', 'output_types': 'output_types', 'output_shapes': 'output_shapes', 'output_classes': 'output_classes'}), '(iterator_resource=iterator_resource, initializer=None,\n output_types=output_types, output_shapes=output_shapes, output_classes=\n output_classes)\n', (15073, 15224), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((15600, 15748), 'diplomacy_research.utils.tensorflow.tf.data.Iterator.from_structure', 'tf.data.Iterator.from_structure', ([], {'output_types': 'output_types', 'output_shapes': 'output_shapes', 'output_classes': 'output_classes', 'shared_name': 'shared_name'}), '(output_types=output_types, output_shapes=\n output_shapes, output_classes=output_classes, shared_name=shared_name)\n', (15631, 15748), False, 'from diplomacy_research.utils.tensorflow import tf\n'), ((20426, 20459), 'os.path.dirname', 'os.path.dirname', (['self.status_path'], {}), '(self.status_path)\n', (20441, 20459), False, 'import os\n'), ((20486, 20519), 'os.path.dirname', 'os.path.dirname', (['self.status_path'], {}), '(self.status_path)\n', (20501, 20519), False, 'import os\n'), ((21421, 21440), 'pickle.load', 'pickle.load', (['status'], {}), '(status)\n', (21432, 21440), False, 'import pickle\n'), ((22987, 23012), 'pickle.load', 'pickle.load', (['chief_status'], {}), '(chief_status)\n', (22998, 23012), False, 'import pickle\n'), ((22320, 22347), 'os.unlink', 'os.unlink', (['self.status_path'], {}), '(self.status_path)\n', (22329, 22347), False, 'import os\n'), ((22563, 22582), 'pickle.load', 'pickle.load', (['status'], {}), '(status)\n', (22574, 22582), False, 'import pickle\n'), ((22019, 22093), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % status_ix)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)\n", (22031, 22093), False, 'import os\n'), ((22134, 22208), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""status"""', "('status-%03d.pkl' % status_ix)"], {}), "(self.checkpoint_dir, 'status', 'status-%03d.pkl' % status_ix)\n", (22146, 22208), False, 'import os\n')]
|
import numpy as np
import uuid
import os
import pandas as pd
import psutil
import pickle
#import kde_info
#from lanfactory.config import
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import load_model
from tensorflow.python.client import device_lib
import warnings
from lanfactory.utils import try_gen_folder
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self,
file_IDs,
batch_size=32,
shuffle=True,
label_prelog_cutoff_low = 1e-7, # label prelog cutoff --> label_preprocessor ?
label_prelog_cutoff_high = None,
):
# List physical devices
#print(tf.config.list_physical_devices())
# Do I allow for arbitrary input file sizes ?
# Initialization
self.batch_size = batch_size
#self.labels = labels
self.file_IDs = file_IDs
self.shuffle = shuffle
self.label_prelog_cutoff_low = label_prelog_cutoff_low
self.label_prelog_cutoff_high = label_prelog_cutoff_high
#self.training_data_folder = training_data_folder
self.tmp_data = None
# Get metadata from loading a test file....
# FILL IN
# self.file_shape_dict =
self.__init_file_shape()
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor((len(self.file_IDs) * self.file_shape_dict['inputs'][0]) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
# Find list of IDs
#file_IDs_temp = [self.file_IDs[k] for k in indexes]
if index % self.batches_per_file == 0 or self.tmp_data == None:
#self.tmp_file =
#print('index')
#print('debugging')
#print('loading new datafile')
#print('batch: ', index)
#print('new file loaded:', index // self.batches_per_file)
self.__load_file(file_index = self.indexes[index // self.batches_per_file])
# Generate data
batch_ids = np.arange(((index % self.batches_per_file) * self.batch_size), ((index % self.batches_per_file) + 1) * self.batch_size, 1)
X, y = self.__data_generation(batch_ids)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.file_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, batch_ids = None):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, self.input_dim), dtype = np.float32)
y = np.empty((self.batch_size, self.label_dim), dtype = np.float32)
X = self.tmp_data['data'][batch_ids, :] #tmp_file[batch_ids, :-1]
y = self.tmp_data['labels'][batch_ids] #tmp_file[batch_ids, -1]
if self.label_prelog_cutoff_low is not None:
y[y < np.log(self.label_prelog_cutoff_low)] = np.log(self.label_prelog_cutoff_low)
if self.label_prelog_cutoff_high is not None:
y[y > np.log(self.label_prelog_cutoff_high)] = np.log(self.label_prelog_cutoff_high)
return X, y
def __load_file(self, file_index):
self.tmp_data = pickle.load(open(self.file_IDs[file_index], 'rb'))
shuffle_idx = np.random.choice(self.tmp_data['data'].shape[0], size = self.tmp_data['data'].shape[0], replace = True)
self.tmp_data['data'] = self.tmp_data['data'][shuffle_idx, :]
self.tmp_data['labels'] = self.tmp_data['labels'][shuffle_idx]
#return np.random.shuffle(np.load(self.training_data_folder + '/' + self.file_IDs[file_index]))
def __init_file_shape(self):
init_file = pickle.load(open(self.file_IDs[0], 'rb'))
#print('Init file shape: ', init_file['data'].shape, init_file['labels'].shape)
self.file_shape_dict = {'inputs': init_file['data'].shape, 'labels': init_file['labels'].shape}
self.batches_per_file = int(self.file_shape_dict['inputs'][0] / self.batch_size)
self.input_dim = self.file_shape_dict['inputs'][1]
if len(self.file_shape_dict['labels']) > 1:
self.label_dim = self.file_shape_dict['labels'][1]
else:
self.label_dim = 1
return
#return np.load(self.training_data_folder + '/' + self.file_IDs[0]).shape
class KerasModel:
def __init__(self, network_config = None, input_shape = 10, save_folder = None, generative_model_id = 'ddm'):
assert network_config is not None, 'You need to supply a network config dict'
self.model_id = uuid.uuid1().hex + '_' + generative_model_id
self.save_folder = save_folder
self.input_shape = input_shape
self.network_config = network_config
self.model = self.__build_model()
def __build_model(self):
model = keras.Sequential()
for i in range(len(self.network_config['layer_sizes']) + 1):
if i == 0:
model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i],
input_dim = self.input_shape,
activation = self.network_config['activations'][i]))
else:
if self.network_config['layer_types'][i - 1] == 'dense':
model.add(keras.layers.Dense(units = self.network_config['layer_sizes'][i - 1],
activation = self.network_config['activations'][i - 1]))
else:
raise ValueError("Only Dense Layers for now --> check your network config")
return model
def _save_model_yaml(self, allow_abs_path_folder_generation = False):
spec = self.model.to_yaml()
assert self.save_folder is not None, 'You did not supply a folder for saving the model'
try_gen_folder(folder = self.save_folder, allow_abs_path_folder_generation = allow_abs_path_folder_generation)
open(self.save_folder + "/" + self.model_id + "_model_spec.yaml", "w").write(spec)
class ModelTrainerKerasSeq:
def __init__(self,
train_config = None,
data_generator_train = None,
data_generator_val = None,
model = None,
output_folder = None,
warm_start = False,
allow_abs_path_folder_generation = False,
):
self.train_config = train_config
self.model = model
self.output_folder = output_folder
self.allow_abs_path_folder_generation = allow_abs_path_folder_generation
self.data_generator_train = data_generator_train
self.data_generator_val = data_generator_val
self.warm_start = warm_start
self.__get_loss()
self.__get_optimizer()
self.__get_metrics()
self.__get_callbacks()
self.__compile_model()
self.__load_weights()
try_gen_folder(folder = self.output_folder,
allow_abs_path_folder_generation = allow_abs_path_folder_generation) # AF-TODO import folder
def __get_loss(self):
if self.train_config['loss'] == 'huber':
self.loss_fun = tf.keras.losses.Huber()
elif self.train_config['loss'] == 'mse':
self.loss_fun = 'mse'
return
def __get_optimizer(self):
# Adam example here needs optimizer only as a string
# We can have self.optimizer as a functions or class too
if self.train_config['optimizer'] == 'adam':
self.optimizer = 'adam'
return
def __get_metrics(self):
self.metrics = self.train_config['metrics']
return
def __get_callbacks(self):
self.cb_list = []
for cb_tmp in self.train_config['callbacks']:
if cb_tmp == 'checkpoint':
ckpt_file_name = self.output_folder + '/' + self.model.model_id + '_ckpt.h5'
self.cb_list.append(keras.callbacks.ModelCheckpoint(ckpt_file_name,
monitor = 'val_loss',
verbose = 1,
save_best_only = False))
elif cb_tmp == 'earlystopping':
self.cb_list.append(keras.callbacks.EarlyStopping(monitor = 'val_loss',
min_delta = 0,
verbose = 1,
patience = 10))
elif cb_tmp == 'reducelr':
self.cb_list.append(keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss',
factor = 0.1,
patience = 5,
verbose = 1,
min_delta = 0.0001,
min_lr = 0.00000001))
else:
print('Provided a string for a callback function that is none of: checkpoint, earlystopping, reducelr')
def __compile_model(self):
self.model.model.compile(loss = self.loss_fun,
optimizer = self.optimizer,
metrics = self.metrics)
def __load_weights(self):
# If warmstart == True, we load model weights and start training from there !
return
def train_model(self, save_history = True , verbose = 1):
history = self.model.model.fit(x = self.data_generator_train,
validation_data = self.data_generator_val,
epochs = self.train_config['n_epochs'],
callbacks = self.cb_list,
verbose = verbose,
)
if save_history:
pd.DataFrame(history.history).to_csv(self.output_folder + "/" + self.model.model_id + "_training_history.csv")
if not 'checkpoint' in self.train_config['callbacks']:
# Save Model
print('Saving final state of the model, since callbacks did not include checkpoint creation')
self.model.model.save(self.output_folder + "/" + self.model.model_id + "_model_final.h5")
def _get_model(self):
return self.model.model
# def __try_gen_output_folder(self):
# output_folder_list = self.output_folder.split('/')
# # Check if folder string supplied defines a relative or absolute path
# if not output_folder_list[0]:
# if not self.allow_abs_path_folder_generation:
# warnings.warn('Absolute folder path provided, but setting allow_abs_path_folder_generation = False. No folders will be generated.')
# return
# else:
# rel_folder = True
# i = 1
# else:
# rel_folder = False
# i = 0
# #
# while i < len(output_folder_list):
# if not output_folder_list[i]:
# output_folder_list.pop(i)
# else:
# i += 1
# if rel_folder:
# output_folder_list[1] = '/' + output_folder_list[1]
# output_folder_list.pop(0)
# tmp_dir_str = ''
# i = 0
# while i < len(output_folder_list):
# if i == 0:
# tmp_dir_str += output_folder_list[i]
# else:
# tmp_dir_str += '/' + output_folder_list[i]
# if not os.path.exists(tmp_dir_str):
# print('Did not find folder: ', tmp_dir_str)
# print('Creating it...')
# try:
# os.makedirs(tmp_dir_str)
# except:
# print('Some problem occured when creating the directory ', tmp_dir_str)
# else:
# print('Found folder: ', tmp_dir_str)
# print('Moving on...')
# i += 1
# return
|
[
"pandas.DataFrame",
"numpy.random.choice",
"lanfactory.utils.try_gen_folder",
"numpy.log",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"numpy.empty",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.losses.Huber",
"uuid.uuid1",
"numpy.arange",
"tensorflow.keras.Sequential",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.random.shuffle"
] |
[((2206, 2327), 'numpy.arange', 'np.arange', (['(index % self.batches_per_file * self.batch_size)', '((index % self.batches_per_file + 1) * self.batch_size)', '(1)'], {}), '(index % self.batches_per_file * self.batch_size, (index % self.\n batches_per_file + 1) * self.batch_size, 1)\n', (2215, 2327), True, 'import numpy as np\n'), ((2788, 2849), 'numpy.empty', 'np.empty', (['(self.batch_size, self.input_dim)'], {'dtype': 'np.float32'}), '((self.batch_size, self.input_dim), dtype=np.float32)\n', (2796, 2849), True, 'import numpy as np\n'), ((2864, 2925), 'numpy.empty', 'np.empty', (['(self.batch_size, self.label_dim)'], {'dtype': 'np.float32'}), '((self.batch_size, self.label_dim), dtype=np.float32)\n', (2872, 2925), True, 'import numpy as np\n'), ((3550, 3654), 'numpy.random.choice', 'np.random.choice', (["self.tmp_data['data'].shape[0]"], {'size': "self.tmp_data['data'].shape[0]", 'replace': '(True)'}), "(self.tmp_data['data'].shape[0], size=self.tmp_data['data']\n .shape[0], replace=True)\n", (3566, 3654), True, 'import numpy as np\n'), ((5131, 5149), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (5147, 5149), False, 'from tensorflow import keras\n'), ((6159, 6270), 'lanfactory.utils.try_gen_folder', 'try_gen_folder', ([], {'folder': 'self.save_folder', 'allow_abs_path_folder_generation': 'allow_abs_path_folder_generation'}), '(folder=self.save_folder, allow_abs_path_folder_generation=\n allow_abs_path_folder_generation)\n', (6173, 6270), False, 'from lanfactory.utils import try_gen_folder\n'), ((7256, 7369), 'lanfactory.utils.try_gen_folder', 'try_gen_folder', ([], {'folder': 'self.output_folder', 'allow_abs_path_folder_generation': 'allow_abs_path_folder_generation'}), '(folder=self.output_folder, allow_abs_path_folder_generation=\n allow_abs_path_folder_generation)\n', (7270, 7369), False, 'from lanfactory.utils import try_gen_folder\n'), ((2568, 2599), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (2585, 2599), True, 'import numpy as np\n'), ((3195, 3231), 'numpy.log', 'np.log', (['self.label_prelog_cutoff_low'], {}), '(self.label_prelog_cutoff_low)\n', (3201, 3231), True, 'import numpy as np\n'), ((3354, 3391), 'numpy.log', 'np.log', (['self.label_prelog_cutoff_high'], {}), '(self.label_prelog_cutoff_high)\n', (3360, 3391), True, 'import numpy as np\n'), ((7529, 7552), 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', ([], {}), '()\n', (7550, 7552), True, 'import tensorflow as tf\n'), ((3155, 3191), 'numpy.log', 'np.log', (['self.label_prelog_cutoff_low'], {}), '(self.label_prelog_cutoff_low)\n', (3161, 3191), True, 'import numpy as np\n'), ((3313, 3350), 'numpy.log', 'np.log', (['self.label_prelog_cutoff_high'], {}), '(self.label_prelog_cutoff_high)\n', (3319, 3350), True, 'import numpy as np\n'), ((4866, 4878), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (4876, 4878), False, 'import uuid\n'), ((5268, 5414), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': "self.network_config['layer_sizes'][i]", 'input_dim': 'self.input_shape', 'activation': "self.network_config['activations'][i]"}), "(units=self.network_config['layer_sizes'][i], input_dim=\n self.input_shape, activation=self.network_config['activations'][i])\n", (5286, 5414), False, 'from tensorflow import keras\n'), ((8291, 8396), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['ckpt_file_name'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(False)'}), "(ckpt_file_name, monitor='val_loss', verbose\n =1, save_best_only=False)\n", (8322, 8396), False, 'from tensorflow import keras\n'), ((10537, 10566), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (10549, 10566), True, 'import pandas as pd\n'), ((5628, 5753), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': "self.network_config['layer_sizes'][i - 1]", 'activation': "self.network_config['activations'][i - 1]"}), "(units=self.network_config['layer_sizes'][i - 1],\n activation=self.network_config['activations'][i - 1])\n", (5646, 5753), False, 'from tensorflow import keras\n'), ((8685, 8775), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'verbose': '(1)', 'patience': '(10)'}), "(monitor='val_loss', min_delta=0, verbose=1,\n patience=10)\n", (8714, 8775), False, 'from tensorflow import keras\n'), ((9057, 9182), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'keras.callbacks.ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(5)', 'verbose': '(1)', 'min_delta': '(0.0001)', 'min_lr': '(1e-08)'}), "(monitor='val_loss', factor=0.1, patience=\n 5, verbose=1, min_delta=0.0001, min_lr=1e-08)\n", (9090, 9182), False, 'from tensorflow import keras\n')]
|
"""
Sample data files with missing data create ancestors at many different time points,
often only one ancestor in each time point, which can cause difficulties parallelising
the inference. This script takes a sampledata file (usually containing missing data),
calculates the times-as-freq values, then bins them into frequency bands.
"""
import argparse
import numpy as np
import tsinfer
import tskit
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input_file",
help="A tsinfer sample file ending in '.samples")
parser.add_argument("output_file",
help="A tsinfer sample file ending in '.samples")
args = parser.parse_args()
sd = tsinfer.load(args.input_file).copy(path=args.output_file)
times = sd.sites_time[:]
for j, variant in enumerate(sd.variants(inference_sites=True)):
time = variant.site.time
if time == tsinfer.constants.TIME_UNSPECIFIED:
counts = tsinfer.formats.allele_counts(variant.genotypes)
# Non-variable sites have no obvious freq-as-time values
assert counts.known != counts.derived
assert counts.known != counts.ancestral
assert counts.known > 0
# Time = freq of *all* derived alleles. Note that if n_alleles > 2 this
# may not be sensible: https://github.com/tskit-dev/tsinfer/issues/228
times[variant.site.id] = counts.derived / counts.known
sd.sites_time[:] = np.around(times * sd.num_samples)/sd.num_samples
print(
"Number of samples:",
sd.num_samples,
". Number of discrete times:",
len(np.unique(sd.sites_time[:])))
sd.finalise()
|
[
"argparse.ArgumentParser",
"numpy.around",
"tsinfer.formats.allele_counts",
"tsinfer.load",
"numpy.unique"
] |
[((446, 490), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (469, 490), False, 'import argparse\n'), ((1549, 1582), 'numpy.around', 'np.around', (['(times * sd.num_samples)'], {}), '(times * sd.num_samples)\n', (1558, 1582), True, 'import numpy as np\n'), ((725, 754), 'tsinfer.load', 'tsinfer.load', (['args.input_file'], {}), '(args.input_file)\n', (737, 754), False, 'import tsinfer\n'), ((1003, 1051), 'tsinfer.formats.allele_counts', 'tsinfer.formats.allele_counts', (['variant.genotypes'], {}), '(variant.genotypes)\n', (1032, 1051), False, 'import tsinfer\n'), ((1714, 1741), 'numpy.unique', 'np.unique', (['sd.sites_time[:]'], {}), '(sd.sites_time[:])\n', (1723, 1741), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# test_x5.py -
#
# Author: <NAME> <<EMAIL>>
#
import os.path as op
import numpy as np
import pytest
import h5py
import fsl.data.image as fslimage
import fsl.utils.tempdir as tempdir
import fsl.transform.affine as affine
import fsl.transform.fnirt as fnirt
import fsl.transform.nonlinear as nonlinear
import fsl.transform.x5 as x5
from .. import make_random_image
def _check_metadata(group):
assert group.attrs['Format'] == x5.X5_FORMAT
assert group.attrs['Version'] == x5.X5_VERSION
def _check_affine(group, xform):
assert group.attrs['Type'] == 'affine'
gotxform = np.array(group['Matrix'])
assert np.all(np.isclose(gotxform, xform))
def _check_space(group, img):
assert group.attrs['Type'] == 'image'
assert np.all(np.isclose(group.attrs['Size'], img.shape[ :3]))
assert np.all(np.isclose(group.attrs['Scales'], img.pixdim[:3]))
_check_affine(group['Mapping'], img.voxToWorldMat)
def _check_deformation(group, field):
assert group.attrs['Type'] == 'deformation'
assert group.attrs['SubType'] == field.deformationType
xform = np.array(group['Matrix'])
assert np.all(np.isclose(xform, field.data))
_check_affine(group['Mapping'], field.voxToWorldMat)
def test_readWriteLinearX5():
with tempdir.tempdir():
make_random_image('src.nii')
make_random_image('ref.nii')
xform = affine.compose(
np.random.randint(1, 5, 3),
np.random.randint(-10, 10, 3),
-np.pi / 4 + np.random.random(3) * np.pi / 2)
src = fslimage.Image('src.nii')
ref = fslimage.Image('ref.nii')
x5.writeLinearX5('linear.x5', xform, src, ref)
gotxform, gotsrc, gotref = x5.readLinearX5('linear.x5')
assert np.all(np.isclose(gotxform, xform))
assert gotsrc.sameSpace(src)
assert gotref.sameSpace(ref)
with h5py.File('linear.x5', 'r') as f:
_check_metadata(f)
assert f.attrs['Type'] == 'linear'
_check_affine(f['/Transform'], xform)
_check_space( f['/A'], src)
_check_space( f['/B'], ref)
def test_readWriteNonLinearX5():
datadir = op.join(op.dirname(__file__), 'testdata', 'nonlinear')
dffile = op.join(datadir, 'displacementfield.nii.gz')
srcfile = op.join(datadir, 'src.nii.gz')
reffile = op.join(datadir, 'ref.nii.gz')
src = fslimage.Image(srcfile)
ref = fslimage.Image(reffile)
dfield = fnirt.readFnirt(dffile, src, ref)
wdfield = nonlinear.convertDeformationSpace(dfield, 'world', 'world')
with tempdir.tempdir():
# field must be world->world
with pytest.raises(x5.X5Error):
x5.writeNonLinearX5('nonlinear.x5', dfield)
x5.writeNonLinearX5('nonlinear.x5', wdfield)
gotdfield = x5.readNonLinearX5('nonlinear.x5')
assert gotdfield.src.sameSpace(src)
assert gotdfield.ref.sameSpace(ref)
assert gotdfield.srcSpace == wdfield.srcSpace
assert gotdfield.refSpace == wdfield.refSpace
assert gotdfield.deformationType == wdfield.deformationType
assert np.all(np.isclose(gotdfield.data, wdfield.data))
with h5py.File('nonlinear.x5', 'r') as f:
assert f.attrs['Type'] == 'nonlinear'
_check_metadata(f)
_check_deformation(f['/Transform'], wdfield)
_check_space( f['/A'], ref)
_check_space( f['/B'], src)
|
[
"fsl.transform.x5.readLinearX5",
"h5py.File",
"fsl.transform.nonlinear.convertDeformationSpace",
"os.path.dirname",
"fsl.data.image.Image",
"fsl.transform.x5.readNonLinearX5",
"numpy.isclose",
"numpy.random.randint",
"numpy.array",
"fsl.transform.x5.writeLinearX5",
"pytest.raises",
"numpy.random.random",
"fsl.transform.fnirt.readFnirt",
"fsl.utils.tempdir.tempdir",
"fsl.transform.x5.writeNonLinearX5",
"os.path.join"
] |
[((646, 671), 'numpy.array', 'np.array', (["group['Matrix']"], {}), "(group['Matrix'])\n", (654, 671), True, 'import numpy as np\n'), ((1145, 1170), 'numpy.array', 'np.array', (["group['Matrix']"], {}), "(group['Matrix'])\n", (1153, 1170), True, 'import numpy as np\n'), ((2301, 2345), 'os.path.join', 'op.join', (['datadir', '"""displacementfield.nii.gz"""'], {}), "(datadir, 'displacementfield.nii.gz')\n", (2308, 2345), True, 'import os.path as op\n'), ((2360, 2390), 'os.path.join', 'op.join', (['datadir', '"""src.nii.gz"""'], {}), "(datadir, 'src.nii.gz')\n", (2367, 2390), True, 'import os.path as op\n'), ((2405, 2435), 'os.path.join', 'op.join', (['datadir', '"""ref.nii.gz"""'], {}), "(datadir, 'ref.nii.gz')\n", (2412, 2435), True, 'import os.path as op\n'), ((2451, 2474), 'fsl.data.image.Image', 'fslimage.Image', (['srcfile'], {}), '(srcfile)\n', (2465, 2474), True, 'import fsl.data.image as fslimage\n'), ((2489, 2512), 'fsl.data.image.Image', 'fslimage.Image', (['reffile'], {}), '(reffile)\n', (2503, 2512), True, 'import fsl.data.image as fslimage\n'), ((2527, 2560), 'fsl.transform.fnirt.readFnirt', 'fnirt.readFnirt', (['dffile', 'src', 'ref'], {}), '(dffile, src, ref)\n', (2542, 2560), True, 'import fsl.transform.fnirt as fnirt\n'), ((2575, 2634), 'fsl.transform.nonlinear.convertDeformationSpace', 'nonlinear.convertDeformationSpace', (['dfield', '"""world"""', '"""world"""'], {}), "(dfield, 'world', 'world')\n", (2608, 2634), True, 'import fsl.transform.nonlinear as nonlinear\n'), ((690, 717), 'numpy.isclose', 'np.isclose', (['gotxform', 'xform'], {}), '(gotxform, xform)\n', (700, 717), True, 'import numpy as np\n'), ((811, 857), 'numpy.isclose', 'np.isclose', (["group.attrs['Size']", 'img.shape[:3]'], {}), "(group.attrs['Size'], img.shape[:3])\n", (821, 857), True, 'import numpy as np\n'), ((880, 929), 'numpy.isclose', 'np.isclose', (["group.attrs['Scales']", 'img.pixdim[:3]'], {}), "(group.attrs['Scales'], img.pixdim[:3])\n", (890, 929), True, 'import numpy as np\n'), ((1189, 1218), 'numpy.isclose', 'np.isclose', (['xform', 'field.data'], {}), '(xform, field.data)\n', (1199, 1218), True, 'import numpy as np\n'), ((1318, 1335), 'fsl.utils.tempdir.tempdir', 'tempdir.tempdir', ([], {}), '()\n', (1333, 1335), True, 'import fsl.utils.tempdir as tempdir\n'), ((1599, 1624), 'fsl.data.image.Image', 'fslimage.Image', (['"""src.nii"""'], {}), "('src.nii')\n", (1613, 1624), True, 'import fsl.data.image as fslimage\n'), ((1639, 1664), 'fsl.data.image.Image', 'fslimage.Image', (['"""ref.nii"""'], {}), "('ref.nii')\n", (1653, 1664), True, 'import fsl.data.image as fslimage\n'), ((1674, 1720), 'fsl.transform.x5.writeLinearX5', 'x5.writeLinearX5', (['"""linear.x5"""', 'xform', 'src', 'ref'], {}), "('linear.x5', xform, src, ref)\n", (1690, 1720), True, 'import fsl.transform.x5 as x5\n'), ((1757, 1785), 'fsl.transform.x5.readLinearX5', 'x5.readLinearX5', (['"""linear.x5"""'], {}), "('linear.x5')\n", (1772, 1785), True, 'import fsl.transform.x5 as x5\n'), ((2240, 2260), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (2250, 2260), True, 'import os.path as op\n'), ((2645, 2662), 'fsl.utils.tempdir.tempdir', 'tempdir.tempdir', ([], {}), '()\n', (2660, 2662), True, 'import fsl.utils.tempdir as tempdir\n'), ((2807, 2851), 'fsl.transform.x5.writeNonLinearX5', 'x5.writeNonLinearX5', (['"""nonlinear.x5"""', 'wdfield'], {}), "('nonlinear.x5', wdfield)\n", (2826, 2851), True, 'import fsl.transform.x5 as x5\n'), ((2873, 2907), 'fsl.transform.x5.readNonLinearX5', 'x5.readNonLinearX5', (['"""nonlinear.x5"""'], {}), "('nonlinear.x5')\n", (2891, 2907), True, 'import fsl.transform.x5 as x5\n'), ((1455, 1481), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)', '(3)'], {}), '(1, 5, 3)\n', (1472, 1481), True, 'import numpy as np\n'), ((1495, 1524), 'numpy.random.randint', 'np.random.randint', (['(-10)', '(10)', '(3)'], {}), '(-10, 10, 3)\n', (1512, 1524), True, 'import numpy as np\n'), ((1808, 1835), 'numpy.isclose', 'np.isclose', (['gotxform', 'xform'], {}), '(gotxform, xform)\n', (1818, 1835), True, 'import numpy as np\n'), ((1925, 1952), 'h5py.File', 'h5py.File', (['"""linear.x5"""', '"""r"""'], {}), "('linear.x5', 'r')\n", (1934, 1952), False, 'import h5py\n'), ((2715, 2740), 'pytest.raises', 'pytest.raises', (['x5.X5Error'], {}), '(x5.X5Error)\n', (2728, 2740), False, 'import pytest\n'), ((2754, 2797), 'fsl.transform.x5.writeNonLinearX5', 'x5.writeNonLinearX5', (['"""nonlinear.x5"""', 'dfield'], {}), "('nonlinear.x5', dfield)\n", (2773, 2797), True, 'import fsl.transform.x5 as x5\n'), ((3195, 3235), 'numpy.isclose', 'np.isclose', (['gotdfield.data', 'wdfield.data'], {}), '(gotdfield.data, wdfield.data)\n', (3205, 3235), True, 'import numpy as np\n'), ((3251, 3281), 'h5py.File', 'h5py.File', (['"""nonlinear.x5"""', '"""r"""'], {}), "('nonlinear.x5', 'r')\n", (3260, 3281), False, 'import h5py\n'), ((1551, 1570), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (1567, 1570), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.ndimage as nd
import torch
import torch.nn as nn
from torch.nn import functional as F
from .utils import dequeue_and_enqueue
def compute_rce_loss(predict, target):
from einops import rearrange
predict = F.softmax(predict, dim=1)
with torch.no_grad():
_, num_cls, h, w = predict.shape
temp_tar = target.clone()
temp_tar[target == 255] = 0
label = (
F.one_hot(temp_tar.clone().detach(), num_cls).float().cuda()
) # (batch, h, w, num_cls)
label = rearrange(label, "b h w c -> b c h w")
label = torch.clamp(label, min=1e-4, max=1.0)
rce = -torch.sum(predict * torch.log(label), dim=1) * (target != 255).bool()
return rce.sum() / (target != 255).sum()
def compute_unsupervised_loss(predict, target, percent, pred_teacher):
batch_size, num_class, h, w = predict.shape
with torch.no_grad():
# drop pixels with high entropy
prob = torch.softmax(pred_teacher, dim=1)
entropy = -torch.sum(prob * torch.log(prob + 1e-10), dim=1)
thresh = np.percentile(
entropy[target != 255].detach().cpu().numpy().flatten(), percent
)
thresh_mask = entropy.ge(thresh).bool() * (target != 255).bool()
target[thresh_mask] = 255
weight = batch_size * h * w / torch.sum(target != 255)
loss = weight * F.cross_entropy(predict, target, ignore_index=255) # [10, 321, 321]
return loss
def compute_contra_memobank_loss(
rep,
label_l,
label_u,
prob_l,
prob_u,
low_mask,
high_mask,
cfg,
memobank,
queue_prtlis,
queue_size,
rep_teacher,
momentum_prototype=None,
i_iter=0,
):
# current_class_threshold: delta_p (0.3)
# current_class_negative_threshold: delta_n (1)
current_class_threshold = cfg["current_class_threshold"]
current_class_negative_threshold = cfg["current_class_negative_threshold"]
low_rank, high_rank = cfg["low_rank"], cfg["high_rank"]
temp = cfg["temperature"]
num_queries = cfg["num_queries"]
num_negatives = cfg["num_negatives"]
num_feat = rep.shape[1]
num_labeled = label_l.shape[0]
num_segments = label_l.shape[1]
low_valid_pixel = torch.cat((label_l, label_u), dim=0) * low_mask
high_valid_pixel = torch.cat((label_l, label_u), dim=0) * high_mask
rep = rep.permute(0, 2, 3, 1)
rep_teacher = rep_teacher.permute(0, 2, 3, 1)
seg_feat_all_list = []
seg_feat_low_entropy_list = [] # candidate anchor pixels
seg_num_list = [] # the number of low_valid pixels in each class
seg_proto_list = [] # the center of each class
_, prob_indices_l = torch.sort(prob_l, 1, True)
prob_indices_l = prob_indices_l.permute(0, 2, 3, 1) # (num_labeled, h, w, num_cls)
_, prob_indices_u = torch.sort(prob_u, 1, True)
prob_indices_u = prob_indices_u.permute(
0, 2, 3, 1
) # (num_unlabeled, h, w, num_cls)
prob = torch.cat((prob_l, prob_u), dim=0) # (batch_size, num_cls, h, w)
valid_classes = []
new_keys = []
for i in range(num_segments):
low_valid_pixel_seg = low_valid_pixel[:, i] # select binary mask for i-th class
high_valid_pixel_seg = high_valid_pixel[:, i]
prob_seg = prob[:, i, :, :]
rep_mask_low_entropy = (
prob_seg > current_class_threshold
) * low_valid_pixel_seg.bool()
rep_mask_high_entropy = (
prob_seg < current_class_negative_threshold
) * high_valid_pixel_seg.bool()
seg_feat_all_list.append(rep[low_valid_pixel_seg.bool()])
seg_feat_low_entropy_list.append(rep[rep_mask_low_entropy])
# positive sample: center of the class
seg_proto_list.append(
torch.mean(
rep_teacher[low_valid_pixel_seg.bool()].detach(), dim=0, keepdim=True
)
)
# generate class mask for unlabeled data
# prob_i_classes = prob_indices_u[rep_mask_high_entropy[num_labeled :]]
class_mask_u = torch.sum(
prob_indices_u[:, :, :, low_rank:high_rank].eq(i), dim=3
).bool()
# generate class mask for labeled data
# label_l_mask = rep_mask_high_entropy[: num_labeled] * (label_l[:, i] == 0)
# prob_i_classes = prob_indices_l[label_l_mask]
class_mask_l = torch.sum(prob_indices_l[:, :, :, :low_rank].eq(i), dim=3).bool()
class_mask = torch.cat(
(class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0
)
negative_mask = rep_mask_high_entropy * class_mask
keys = rep_teacher[negative_mask].detach()
new_keys.append(
dequeue_and_enqueue(
keys=keys,
queue=memobank[i],
queue_ptr=queue_prtlis[i],
queue_size=queue_size[i],
)
)
if low_valid_pixel_seg.sum() > 0:
seg_num_list.append(int(low_valid_pixel_seg.sum().item()))
valid_classes.append(i)
if (
len(seg_num_list) <= 1
): # in some rare cases, a small mini-batch might only contain 1 or no semantic class
if momentum_prototype is None:
return new_keys, torch.tensor(0.0) * rep.sum()
else:
return momentum_prototype, new_keys, torch.tensor(0.0) * rep.sum()
else:
reco_loss = torch.tensor(0.0).cuda()
seg_proto = torch.cat(seg_proto_list) # shape: [valid_seg, 256]
valid_seg = len(seg_num_list) # number of valid classes
prototype = torch.zeros(
(prob_indices_l.shape[-1], num_queries, 1, num_feat)
).cuda()
for i in range(valid_seg):
if (
len(seg_feat_low_entropy_list[i]) > 0
and memobank[valid_classes[i]][0].shape[0] > 0
):
# select anchor pixel
seg_low_entropy_idx = torch.randint(
len(seg_feat_low_entropy_list[i]), size=(num_queries,)
)
anchor_feat = (
seg_feat_low_entropy_list[i][seg_low_entropy_idx].clone().cuda()
)
else:
# in some rare cases, all queries in the current query class are easy
reco_loss = reco_loss + 0 * rep.sum()
continue
# apply negative key sampling from memory bank (with no gradients)
with torch.no_grad():
negative_feat = memobank[valid_classes[i]][0].clone().cuda()
high_entropy_idx = torch.randint(
len(negative_feat), size=(num_queries * num_negatives,)
)
negative_feat = negative_feat[high_entropy_idx]
negative_feat = negative_feat.reshape(
num_queries, num_negatives, num_feat
)
positive_feat = (
seg_proto[i]
.unsqueeze(0)
.unsqueeze(0)
.repeat(num_queries, 1, 1)
.cuda()
) # (num_queries, 1, num_feat)
if momentum_prototype is not None:
if not (momentum_prototype == 0).all():
ema_decay = min(1 - 1 / i_iter, 0.999)
positive_feat = (
1 - ema_decay
) * positive_feat + ema_decay * momentum_prototype[
valid_classes[i]
]
prototype[valid_classes[i]] = positive_feat.clone()
all_feat = torch.cat(
(positive_feat, negative_feat), dim=1
) # (num_queries, 1 + num_negative, num_feat)
seg_logits = torch.cosine_similarity(
anchor_feat.unsqueeze(1), all_feat, dim=2
)
reco_loss = reco_loss + F.cross_entropy(
seg_logits / temp, torch.zeros(num_queries).long().cuda()
)
if momentum_prototype is None:
return new_keys, reco_loss / valid_seg
else:
return prototype, new_keys, reco_loss / valid_seg
def get_criterion(cfg):
cfg_criterion = cfg["criterion"]
aux_weight = (
cfg["net"]["aux_loss"]["loss_weight"]
if cfg["net"].get("aux_loss", False)
else 0
)
ignore_index = cfg["dataset"]["ignore_label"]
if cfg_criterion["type"] == "ohem":
criterion = CriterionOhem(
aux_weight, ignore_index=ignore_index, **cfg_criterion["kwargs"]
)
else:
criterion = Criterion(
aux_weight, ignore_index=ignore_index, **cfg_criterion["kwargs"]
)
return criterion
class Criterion(nn.Module):
def __init__(self, aux_weight, ignore_index=255, use_weight=False):
super(Criterion, self).__init__()
self._aux_weight = aux_weight
self._ignore_index = ignore_index
self.use_weight = use_weight
if not use_weight:
self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)
else:
weights = torch.FloatTensor(
[
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
1.0,
1.0,
]
).cuda()
self._criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)
self._criterion1 = nn.CrossEntropyLoss(
ignore_index=ignore_index, weight=weights
)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
if self._aux_weight > 0: # require aux loss
main_pred, aux_pred = preds
main_h, main_w = main_pred.size(2), main_pred.size(3)
aux_h, aux_w = aux_pred.size(2), aux_pred.size(3)
assert (
len(preds) == 2
and main_h == aux_h
and main_w == aux_w
and main_h == h
and main_w == w
)
if self.use_weight:
loss1 = self._criterion(main_pred, target) + self._criterion1(
main_pred, target
)
else:
loss1 = self._criterion(main_pred, target)
loss2 = self._criterion(aux_pred, target)
loss = loss1 + self._aux_weight * loss2
else:
pred_h, pred_w = preds.size(2), preds.size(3)
assert pred_h == h and pred_w == w
loss = self._criterion(preds, target)
return loss
class CriterionOhem(nn.Module):
def __init__(
self,
aux_weight,
thresh=0.7,
min_kept=100000,
ignore_index=255,
use_weight=False,
):
super(CriterionOhem, self).__init__()
self._aux_weight = aux_weight
self._criterion1 = OhemCrossEntropy2dTensor(
ignore_index, thresh, min_kept, use_weight
)
self._criterion2 = OhemCrossEntropy2dTensor(ignore_index, thresh, min_kept)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
if self._aux_weight > 0: # require aux loss
main_pred, aux_pred = preds
main_h, main_w = main_pred.size(2), main_pred.size(3)
aux_h, aux_w = aux_pred.size(2), aux_pred.size(3)
assert (
len(preds) == 2
and main_h == aux_h
and main_w == aux_w
and main_h == h
and main_w == w
)
loss1 = self._criterion1(main_pred, target)
loss2 = self._criterion2(aux_pred, target)
loss = loss1 + self._aux_weight * loss2
else:
pred_h, pred_w = preds.size(2), preds.size(3)
assert pred_h == h and pred_w == w
loss = self._criterion1(preds, target)
return loss
class OhemCrossEntropy2d(nn.Module):
def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8):
super(OhemCrossEntropy2d, self).__init__()
self.ignore_label = ignore_label
self.thresh = float(thresh)
self.min_kept = int(min_kept)
self.factor = factor
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label)
def find_threshold(self, np_predict, np_target):
# downsample 1/8
factor = self.factor
predict = nd.zoom(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1)
target = nd.zoom(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0)
n, c, h, w = predict.shape
min_kept = self.min_kept // (
factor * factor
) # int(self.min_kept_ratio * n * h * w)
input_label = target.ravel().astype(np.int32)
input_prob = np.rollaxis(predict, 1).reshape((c, -1))
valid_flag = input_label != self.ignore_label
valid_inds = np.where(valid_flag)[0]
label = input_label[valid_flag]
num_valid = valid_flag.sum()
if min_kept >= num_valid:
threshold = 1.0
elif num_valid > 0:
prob = input_prob[:, valid_flag]
pred = prob[label, np.arange(len(label), dtype=np.int32)]
threshold = self.thresh
if min_kept > 0:
k_th = min(len(pred), min_kept) - 1
new_array = np.partition(pred, k_th)
new_threshold = new_array[k_th]
if new_threshold > self.thresh:
threshold = new_threshold
return threshold
def generate_new_target(self, predict, target):
np_predict = predict.data.cpu().numpy()
np_target = target.data.cpu().numpy()
n, c, h, w = np_predict.shape
threshold = self.find_threshold(np_predict, np_target)
input_label = np_target.ravel().astype(np.int32)
input_prob = np.rollaxis(np_predict, 1).reshape((c, -1))
valid_flag = input_label != self.ignore_label
valid_inds = np.where(valid_flag)[0]
label = input_label[valid_flag]
num_valid = valid_flag.sum()
if num_valid > 0:
prob = input_prob[:, valid_flag]
pred = prob[label, np.arange(len(label), dtype=np.int32)]
kept_flag = pred <= threshold
valid_inds = valid_inds[kept_flag]
label = input_label[valid_inds].copy()
input_label.fill(self.ignore_label)
input_label[valid_inds] = label
new_target = (
torch.from_numpy(input_label.reshape(target.size()))
.long()
.cuda(target.get_device())
)
return new_target
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
input_prob = F.softmax(predict, 1)
target = self.generate_new_target(input_prob, target)
return self.criterion(predict, target)
class OhemCrossEntropy2dTensor(nn.Module):
"""
Ohem Cross Entropy Tensor Version
"""
def __init__(
self, ignore_index=255, thresh=0.7, min_kept=256, use_weight=False, reduce=False
):
super(OhemCrossEntropy2dTensor, self).__init__()
self.ignore_index = ignore_index
self.thresh = float(thresh)
self.min_kept = int(min_kept)
if use_weight:
weight = torch.FloatTensor(
[
0.8373,
0.918,
0.866,
1.0345,
1.0166,
0.9969,
0.9754,
1.0489,
0.8786,
1.0023,
0.9539,
0.9843,
1.1116,
0.9037,
1.0865,
1.0955,
1.0865,
1.1529,
1.0507,
]
).cuda()
# weight = torch.FloatTensor(
# [0.4762, 0.5, 0.4762, 1.4286, 1.1111, 0.4762, 0.8333, 0.5, 0.5, 0.8333, 0.5263, 0.5882,
# 1.4286, 0.5, 3.3333,5.0, 10.0, 2.5, 0.8333]).cuda()
self.criterion = torch.nn.CrossEntropyLoss(
reduction="mean", weight=weight, ignore_index=ignore_index
)
elif reduce:
self.criterion = torch.nn.CrossEntropyLoss(
reduction="none", ignore_index=ignore_index
)
else:
self.criterion = torch.nn.CrossEntropyLoss(
reduction="mean", ignore_index=ignore_index
)
def forward(self, pred, target):
b, c, h, w = pred.size()
target = target.view(-1)
valid_mask = target.ne(self.ignore_index)
target = target * valid_mask.long()
num_valid = valid_mask.sum()
prob = F.softmax(pred, dim=1)
prob = (prob.transpose(0, 1)).reshape(c, -1)
if self.min_kept > num_valid:
pass
# print('Labels: {}'.format(num_valid))
elif num_valid > 0:
prob = prob.masked_fill_(~valid_mask, 1)
mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)]
threshold = self.thresh
if self.min_kept > 0:
_, index = mask_prob.sort()
threshold_index = index[min(len(index), self.min_kept) - 1]
if mask_prob[threshold_index] > self.thresh:
threshold = mask_prob[threshold_index]
kept_mask = mask_prob.le(threshold)
target = target * kept_mask.long()
valid_mask = valid_mask * kept_mask
target = target.masked_fill_(~valid_mask, self.ignore_index)
target = target.view(b, h, w)
return self.criterion(pred, target)
|
[
"numpy.partition",
"torch.log",
"torch.nn.CrossEntropyLoss",
"torch.cat",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.softmax",
"scipy.ndimage.zoom",
"torch.FloatTensor",
"torch.clamp",
"einops.rearrange",
"numpy.where",
"numpy.rollaxis",
"torch.zeros",
"torch.no_grad",
"torch.sum",
"torch.sort",
"torch.tensor"
] |
[((247, 272), 'torch.nn.functional.softmax', 'F.softmax', (['predict'], {'dim': '(1)'}), '(predict, dim=1)\n', (256, 272), True, 'from torch.nn import functional as F\n'), ((2693, 2720), 'torch.sort', 'torch.sort', (['prob_l', '(1)', '(True)'], {}), '(prob_l, 1, True)\n', (2703, 2720), False, 'import torch\n'), ((2834, 2861), 'torch.sort', 'torch.sort', (['prob_u', '(1)', '(True)'], {}), '(prob_u, 1, True)\n', (2844, 2861), False, 'import torch\n'), ((2978, 3012), 'torch.cat', 'torch.cat', (['(prob_l, prob_u)'], {'dim': '(0)'}), '((prob_l, prob_u), dim=0)\n', (2987, 3012), False, 'import torch\n'), ((283, 298), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (296, 298), False, 'import torch\n'), ((555, 593), 'einops.rearrange', 'rearrange', (['label', '"""b h w c -> b c h w"""'], {}), "(label, 'b h w c -> b c h w')\n", (564, 593), False, 'from einops import rearrange\n'), ((610, 649), 'torch.clamp', 'torch.clamp', (['label'], {'min': '(0.0001)', 'max': '(1.0)'}), '(label, min=0.0001, max=1.0)\n', (621, 649), False, 'import torch\n'), ((906, 921), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (919, 921), False, 'import torch\n'), ((978, 1012), 'torch.softmax', 'torch.softmax', (['pred_teacher'], {'dim': '(1)'}), '(pred_teacher, dim=1)\n', (991, 1012), False, 'import torch\n'), ((1393, 1443), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['predict', 'target'], {'ignore_index': '(255)'}), '(predict, target, ignore_index=255)\n', (1408, 1443), True, 'from torch.nn import functional as F\n'), ((2251, 2287), 'torch.cat', 'torch.cat', (['(label_l, label_u)'], {'dim': '(0)'}), '((label_l, label_u), dim=0)\n', (2260, 2287), False, 'import torch\n'), ((2322, 2358), 'torch.cat', 'torch.cat', (['(label_l, label_u)'], {'dim': '(0)'}), '((label_l, label_u), dim=0)\n', (2331, 2358), False, 'import torch\n'), ((4447, 4516), 'torch.cat', 'torch.cat', (['(class_mask_l * (label_l[:, i] == 0), class_mask_u)'], {'dim': '(0)'}), '((class_mask_l * (label_l[:, i] == 0), class_mask_u), dim=0)\n', (4456, 4516), False, 'import torch\n'), ((5429, 5454), 'torch.cat', 'torch.cat', (['seg_proto_list'], {}), '(seg_proto_list)\n', (5438, 5454), False, 'import torch\n'), ((12645, 12697), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'ignore_index': 'ignore_label'}), '(ignore_index=ignore_label)\n', (12670, 12697), False, 'import torch\n'), ((12824, 12892), 'scipy.ndimage.zoom', 'nd.zoom', (['np_predict', '(1.0, 1.0, 1.0 / factor, 1.0 / factor)'], {'order': '(1)'}), '(np_predict, (1.0, 1.0, 1.0 / factor, 1.0 / factor), order=1)\n', (12831, 12892), True, 'import scipy.ndimage as nd\n'), ((12910, 12972), 'scipy.ndimage.zoom', 'nd.zoom', (['np_target', '(1.0, 1.0 / factor, 1.0 / factor)'], {'order': '(0)'}), '(np_target, (1.0, 1.0 / factor, 1.0 / factor), order=0)\n', (12917, 12972), True, 'import scipy.ndimage as nd\n'), ((15446, 15467), 'torch.nn.functional.softmax', 'F.softmax', (['predict', '(1)'], {}), '(predict, 1)\n', (15455, 15467), True, 'from torch.nn import functional as F\n'), ((17520, 17542), 'torch.nn.functional.softmax', 'F.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (17529, 17542), True, 'from torch.nn import functional as F\n'), ((1347, 1371), 'torch.sum', 'torch.sum', (['(target != 255)'], {}), '(target != 255)\n', (1356, 1371), False, 'import torch\n'), ((9083, 9129), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'ignore_index'}), '(ignore_index=ignore_index)\n', (9102, 9129), True, 'import torch.nn as nn\n'), ((9747, 9793), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'ignore_index'}), '(ignore_index=ignore_index)\n', (9766, 9793), True, 'import torch.nn as nn\n'), ((9825, 9887), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'ignore_index', 'weight': 'weights'}), '(ignore_index=ignore_index, weight=weights)\n', (9844, 9887), True, 'import torch.nn as nn\n'), ((13318, 13338), 'numpy.where', 'np.where', (['valid_flag'], {}), '(valid_flag)\n', (13326, 13338), True, 'import numpy as np\n'), ((14409, 14429), 'numpy.where', 'np.where', (['valid_flag'], {}), '(valid_flag)\n', (14417, 14429), True, 'import numpy as np\n'), ((16858, 16948), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""', 'weight': 'weight', 'ignore_index': 'ignore_index'}), "(reduction='mean', weight=weight, ignore_index=\n ignore_index)\n", (16883, 16948), False, 'import torch\n'), ((5384, 5401), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5396, 5401), False, 'import torch\n'), ((5568, 5633), 'torch.zeros', 'torch.zeros', (['(prob_indices_l.shape[-1], num_queries, 1, num_feat)'], {}), '((prob_indices_l.shape[-1], num_queries, 1, num_feat))\n', (5579, 5633), False, 'import torch\n'), ((6447, 6462), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6460, 6462), False, 'import torch\n'), ((7645, 7693), 'torch.cat', 'torch.cat', (['(positive_feat, negative_feat)'], {'dim': '(1)'}), '((positive_feat, negative_feat), dim=1)\n', (7654, 7693), False, 'import torch\n'), ((13201, 13224), 'numpy.rollaxis', 'np.rollaxis', (['predict', '(1)'], {}), '(predict, 1)\n', (13212, 13224), True, 'import numpy as np\n'), ((14289, 14315), 'numpy.rollaxis', 'np.rollaxis', (['np_predict', '(1)'], {}), '(np_predict, 1)\n', (14300, 14315), True, 'import numpy as np\n'), ((17024, 17094), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""none"""', 'ignore_index': 'ignore_index'}), "(reduction='none', ignore_index=ignore_index)\n", (17049, 17094), False, 'import torch\n'), ((17168, 17238), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""', 'ignore_index': 'ignore_index'}), "(reduction='mean', ignore_index=ignore_index)\n", (17193, 17238), False, 'import torch\n'), ((680, 696), 'torch.log', 'torch.log', (['label'], {}), '(label)\n', (689, 696), False, 'import torch\n'), ((1049, 1072), 'torch.log', 'torch.log', (['(prob + 1e-10)'], {}), '(prob + 1e-10)\n', (1058, 1072), False, 'import torch\n'), ((5230, 5247), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5242, 5247), False, 'import torch\n'), ((5323, 5340), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5335, 5340), False, 'import torch\n'), ((9166, 9285), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0,\n 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, \n 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0])\n', (9183, 9285), False, 'import torch\n'), ((13769, 13793), 'numpy.partition', 'np.partition', (['pred', 'k_th'], {}), '(pred, k_th)\n', (13781, 13793), True, 'import numpy as np\n'), ((16007, 16185), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, \n 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529,\n 1.0507]'], {}), '([0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, \n 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,\n 1.0865, 1.1529, 1.0507])\n', (16024, 16185), False, 'import torch\n'), ((7989, 8013), 'torch.zeros', 'torch.zeros', (['num_queries'], {}), '(num_queries)\n', (8000, 8013), False, 'import torch\n')]
|
"""Tools used by the examples """
import numpy as np
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))+"/../meep_tomo")
from meep_tomo import extract, common
import ex_bpg
def compute_metrices(tomo_path, approx, autofocus=False):
"""Compute RMS and TV metrices for a MEEP-simulated ODT reconstruction
Parameters
----------
tomo_path: str
Simulation directory or .npy file of a reconstructed simulation
approx: str
Approximation to use, one of ["radon", "born", "rytov"]
autofocus: bool
If `True`, perform autofocusing. If `False` uses the exact
focusing (the center of rotation in the simulation).
This only makes sense if `tomo_path` is not an .npy file.
Returns
-------
rms, tv: floats
root-mean-square and total variation errors
Notes
-----
A second call with the same arguments will be fast, because the
result is saved on disk.
See Also
--------
metric_rms, metric_tv: The used metrics
"""
assert approx in ["radon", "born", "rytov"]
tomo_path = os.path.abspath(tomo_path)
if os.path.isdir(tomo_path):
sim_dir = os.path.abspath(tomo_path)
res_dir = os.path.abspath(tomo_path)+"_results"
common.mkdir_p(res_dir)
metr_file = os.path.join(res_dir, "metrices.txt")
npy_file = False
elif tomo_path.endswith(".npy"):
res_dir = os.path.dirname(os.path.abspath(tomo_path))
sim_dir = res_dir[:-8]
msg = "Simulation directory not found! The .npy file should be in a " +\
"folder named after the simulation with '_results' appended!"
assert os.path.exists(sim_dir), msg
metr_file = tomo_path[:-4]+"_metrices.txt"
npy_file = tomo_path
else:
raise ValueError("simulation must be a directory or an .npy file!")
tv = None
ss = None
# Check if the results_file exists and read parameters
if os.path.exists(metr_file):
with open(metr_file, "r") as fd:
lines = fd.readlines()
for line in lines:
line = line.strip()
if line.startswith("TV_"+approx):
try:
tv = float(line.split()[1])
except:
pass
elif line.startswith("SS_"+approx):
try:
ss = float(line.split()[1])
except:
pass
if tv is None or ss is None:
if npy_file:
ri = np.load(npy_file)
assert autofocus == False, "`autofocus` has no effect for .npy files!"
else:
# Recompute everything
ri = ex_bpg.backpropagate_fdtd_data(sim_dir,
approximation=approx,
autofocus=autofocus)
# reference
riref = extract.get_tomo_ri_structure(sim_dir)
ss = metric_rms(ri, riref)
tv = metric_tv(ri, riref)
# Save result in resf files
with open(metr_file, "a") as resfdata:
lines = "# metrices of ri-riref\n"
lines += "TV_{} {:.15e}\n".format(approx, tv)
lines += "SS_{} {:.15e}\n".format(approx, ss)
resfdata.writelines(lines)
return ss, tv
def cutout(a):
"""Cut out circle/sphere from 2D/3D square/cubic array"""
x = np.arange(a.shape[0])
c = a.shape[0] / 2
if len(a.shape) == 2:
x = x.reshape(-1, 1)
y = x.reshape(1, -1)
zero = ((x-c)**2 + (y-c)**2) < c**2
elif len(a.shape) == 3:
x = x.reshape(-1, 1, 1)
y = x.reshape(1, -1, 1)
z = x.reshape(1, -1, 1)
zero = ((x-c)**2 + (y-c)**2 + (z-c)**2) < c**2
else:
raise ValueError("Cutout array must have dimension 2 or 3!")
a *= zero
#tool.arr2im(a, scale=True).save("test.png")
return a
def metric_rms(ri, ref):
"""Root mean square metric (normalized)
This metric was used and described in
Müller et. al, "ODTbrain: a Python library for full-view,
dense diffraction tomography" Bioinformatics 2015
"""
rms = np.sum(cutout(ri.real-ref.real)**2)
norm = np.sum(cutout(ref.real-1)**2)
return np.sqrt(rms/norm)
def metric_tv(ri, ref):
"""Total variation metric (normalized)
This metric was used and described in
Müller et. al, "ODTbrain: a Python library for full-view,
dense diffraction tomography" Bioinformatics 2015
"""
grad = np.gradient(ri.real-ref)
result = 0
for g in grad:
result += np.sum(cutout(np.abs(g)))
tv = result / len(grad)
norm = np.sum(cutout(ref.real-1)**2)
return np.sqrt(tv/norm)
|
[
"os.path.abspath",
"numpy.load",
"numpy.abs",
"os.path.isdir",
"os.path.exists",
"meep_tomo.extract.get_tomo_ri_structure",
"meep_tomo.common.mkdir_p",
"ex_bpg.backpropagate_fdtd_data",
"numpy.arange",
"os.path.join",
"numpy.gradient",
"numpy.sqrt"
] |
[((1119, 1145), 'os.path.abspath', 'os.path.abspath', (['tomo_path'], {}), '(tomo_path)\n', (1134, 1145), False, 'import os\n'), ((1154, 1178), 'os.path.isdir', 'os.path.isdir', (['tomo_path'], {}), '(tomo_path)\n', (1167, 1178), False, 'import os\n'), ((1989, 2014), 'os.path.exists', 'os.path.exists', (['metr_file'], {}), '(metr_file)\n', (2003, 2014), False, 'import os\n'), ((3485, 3506), 'numpy.arange', 'np.arange', (['a.shape[0]'], {}), '(a.shape[0])\n', (3494, 3506), True, 'import numpy as np\n'), ((4329, 4348), 'numpy.sqrt', 'np.sqrt', (['(rms / norm)'], {}), '(rms / norm)\n', (4336, 4348), True, 'import numpy as np\n'), ((4594, 4620), 'numpy.gradient', 'np.gradient', (['(ri.real - ref)'], {}), '(ri.real - ref)\n', (4605, 4620), True, 'import numpy as np\n'), ((4777, 4795), 'numpy.sqrt', 'np.sqrt', (['(tv / norm)'], {}), '(tv / norm)\n', (4784, 4795), True, 'import numpy as np\n'), ((1198, 1224), 'os.path.abspath', 'os.path.abspath', (['tomo_path'], {}), '(tomo_path)\n', (1213, 1224), False, 'import os\n'), ((1289, 1312), 'meep_tomo.common.mkdir_p', 'common.mkdir_p', (['res_dir'], {}), '(res_dir)\n', (1303, 1312), False, 'from meep_tomo import extract, common\n'), ((1333, 1370), 'os.path.join', 'os.path.join', (['res_dir', '"""metrices.txt"""'], {}), "(res_dir, 'metrices.txt')\n", (1345, 1370), False, 'import os\n'), ((2984, 3022), 'meep_tomo.extract.get_tomo_ri_structure', 'extract.get_tomo_ri_structure', (['sim_dir'], {}), '(sim_dir)\n', (3013, 3022), False, 'from meep_tomo import extract, common\n'), ((110, 135), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (125, 135), False, 'import os\n'), ((1243, 1269), 'os.path.abspath', 'os.path.abspath', (['tomo_path'], {}), '(tomo_path)\n', (1258, 1269), False, 'import os\n'), ((1698, 1721), 'os.path.exists', 'os.path.exists', (['sim_dir'], {}), '(sim_dir)\n', (1712, 1721), False, 'import os\n'), ((2601, 2618), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (2608, 2618), True, 'import numpy as np\n'), ((2768, 2855), 'ex_bpg.backpropagate_fdtd_data', 'ex_bpg.backpropagate_fdtd_data', (['sim_dir'], {'approximation': 'approx', 'autofocus': 'autofocus'}), '(sim_dir, approximation=approx, autofocus=\n autofocus)\n', (2798, 2855), False, 'import ex_bpg\n'), ((1467, 1493), 'os.path.abspath', 'os.path.abspath', (['tomo_path'], {}), '(tomo_path)\n', (1482, 1493), False, 'import os\n'), ((4685, 4694), 'numpy.abs', 'np.abs', (['g'], {}), '(g)\n', (4691, 4694), True, 'import numpy as np\n')]
|
import numpy as np
def apply_cross_fade(clips, cross_fade_ms, sr):
"""Concatenate audio clips with a cross fade."""
num_clips = len(clips)
cross_fade_samples = int(np.floor(cross_fade_ms * sr / 1000))
fade_ramp = np.arange(cross_fade_samples) / cross_fade_samples
# if not is_even(cross_fade_samples):
# cross_fade_samples += 1
raw_num_samples = 0
for clip in clips:
raw_num_samples += len(clip)
total_overlap_samples = (num_clips - 1) * cross_fade_samples
num_samples = raw_num_samples - total_overlap_samples
y = np.zeros(num_samples)
write_in = 0
for clip in clips:
write_out = write_in + len(clip)
# Update pointers.
ramp_in = write_out - cross_fade_samples
ramp_out = write_out
# Fade in and place.
clip[:cross_fade_samples] *= fade_ramp
y[write_in:write_out] += clip
# Fade out.
y[ramp_in:ramp_out] *= (1 - fade_ramp)
# Advance write pointer.
write_in = ramp_in
return y
if __name__ == '__main__':
import matplotlib.pyplot as plt
import scipy.io.wavfile
file_path = "../audio/008-you-possess-the-treasure-you-seek-seed001.wav"
# Test audio file.
sr, x = scipy.io.wavfile.read(file_path)
x = x / np.iinfo(np.int16).max
time_x = np.arange(len(x)) / sr
plt.plot(time_x, x, label='Original')
# Quick list-of-clips demo.
tmp = []
for i in range(20):
tmp.append(x[i * 1000:(i + 1) * 1000])
cross_fade_ms = 20
y = apply_cross_fade(tmp, cross_fade_ms, sr)
time_y = np.arange(len(y)) / sr
plt.plot(time_y, y, label='Cross fade')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.floor",
"numpy.zeros",
"numpy.iinfo",
"numpy.arange"
] |
[((579, 600), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (587, 600), True, 'import numpy as np\n'), ((1363, 1400), 'matplotlib.pyplot.plot', 'plt.plot', (['time_x', 'x'], {'label': '"""Original"""'}), "(time_x, x, label='Original')\n", (1371, 1400), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1671), 'matplotlib.pyplot.plot', 'plt.plot', (['time_y', 'y'], {'label': '"""Cross fade"""'}), "(time_y, y, label='Cross fade')\n", (1640, 1671), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1686), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1684, 1686), True, 'import matplotlib.pyplot as plt\n'), ((180, 215), 'numpy.floor', 'np.floor', (['(cross_fade_ms * sr / 1000)'], {}), '(cross_fade_ms * sr / 1000)\n', (188, 215), True, 'import numpy as np\n'), ((233, 262), 'numpy.arange', 'np.arange', (['cross_fade_samples'], {}), '(cross_fade_samples)\n', (242, 262), True, 'import numpy as np\n'), ((1299, 1317), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (1307, 1317), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import os
from conv import *
import multiprocessing
from multiprocessing import Pool
from itertools import product
from numba import njit
from functools import partial
import math
import sklearn
from sklearn import linear_model
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
images.append(cv2.imread(os.path.join(folder,filename),0))
return images
def load_data(folder):
images=[]
n=len(os.listdir(folder))
#print(n)
output=[]
iters = 0
for filename in os.listdir(folder):
path=folder+"\\"+filename
pictures = load_images_from_folder(path)
for pics in pictures:
images.append(pics)
y=np.zeros((n,1))
y[iters,:] =1
y.reshape(1,n)
output.append(y)
iters += 1
return images,output
def convert(l):
return (*l,)
def data_preprocessing(data,reshape_dim):
for i in range(0,len(data)):
data[i]=ConvNet(cv2.resize(data[i]/255,reshape_dim,interpolation=cv2.INTER_AREA))
data[i]=data[i].reshape(data[i].size,1)
return data
def prepare(data,reshape_dim,i):
data[i]=ConvNet(cv2.resize(data[i]/255,reshape_dim,interpolation=cv2.INTER_AREA))
data[i]=data[i].reshape(data[i].size,1)
def prepare_2(data):
data=ConvNet(cv2.resize(data/255,(256,256),interpolation=cv2.INTER_AREA))
data=data.reshape(data.size,1)
return data
def parallel(data,reshape_dim):
process=[]
for i in range(len(data)):
p=multiprocessing.Process(target=prepare,args=(data,reshape_dim,i))
process.append(p)
for x in process:
x.start()
for x in process:
x.join()
for i in data:
print(i.shape)
return data
def square(x):
return x**2
def parallel_2(data,reshape_dim):
x=0
pool=Pool(4)
x=pool.map(prepare_2,data)
print(x)
pool.close()
pool.join()
return x
def softmax(Z):
e_Z = np.exp(Z - np.max(Z, axis = 0, keepdims = True,initial=-np.inf))
return e_Z / e_Z.sum(axis = 0)
def predict(X,weights):
return softmax(weights.T@X)
def cross_entropy(y_hat, y):
return - np.log(y_hat[range(len(y_hat)), y])
def update_weights(features,output,weights,learning_rate):
predicted=predict(features,weights)
print(features.shape)
print(weights.shape)
print(predicted.shape)
#print(np.linalg.norm(predicted-output))
weights=weights-learning_rate*(((output-predicted)@features.T).T)
return weights
def Adam(features,output,weights,lr,t,beta1=0.9,beta2=0.999,epsilon=1e-08):
#print(features.shape)
#print(output.shape)
#print(weights)
#print(type(weights))
predicted=predict(features,weights)
g=(-(output-predicted)@features.T).T
m=np.zeros(weights.shape)
v=np.zeros(weights.shape)
m=beta1*m+(1-beta1)*g
v=beta2*v+(1-beta2)*(g*g)
m_hat=m/(1-(beta1**(t+1)))
v_hat=v/(1-(beta2**(t+1)))
#print(m_hat,v_hat)
#print(type(((lr*m_hat)/(np.sqrt(v_hat)+epsilon)).T))
weights=weights-((lr*m_hat)/(np.sqrt(v_hat)+epsilon))
return weights
def softmax_regression(data,output,learning_rate,epoch):
data_hat=np.array(data)
data_hat=data_hat.reshape(data_hat.shape[0],data_hat.shape[1]).T
output_hat=np.array(output)
output_hat=output_hat.reshape(output_hat.shape[0],output_hat.shape[1]).T
pre_weights=0
weights=np.zeros((len(data[0]),len(output[0])))
model=linear_model.LogisticRegression(C=1e5,solver='lbfgs',multi_class='multinomial')
"""for i in range(epoch):
predicted=predict(data_hat,weights)
print(np.linalg.norm(predicted-output_hat))
#for n in np.random.permutation(len(output)):
weights=Adam(data_hat,output_hat,weights,learning_rate,i)
#if np.linalg.norm(weights-pre_weights)<0.0001:
# print(i)
# break"""
return weights
def softmax_regression_2(data,output,x1,x2,x3):
output=np.asarray(output)
output=output.reshape(output.shape[0],output.shape[1]).T
output=output.reshape(-1)
data=np.asarray(data)
data=data.reshape(data.shape[0],data.shape[1]).T
weights=np.zeros((len(data),len(output)))
model=sklearn.linear_model.LogisticRegression(C=1e5,solver='lbfgs',multi_class='multinomial')
model.fit(data,output)
y1=model.predict(x1)
y2=model.predict(x2)
y3=model.predict(x3)
#for i in range(epoch):
# weights=update_weights(data,output,weights,learning_rate)
return y1,y2,y3
def CNN(data,output,lr,epoch):
k1=np.random.rand(3,3)
k2=np.random.rand(3,3)
k3=np.random.rand(3,3)
k4=np.random.rand(3,3)
k5=np.random.rand(3,3)
k6=np.random.rand(3,3)
k7=np.random.rand(3,3)
k8=np.random.rand(3,3)
pool=Pool(4)
conv1=pool.map(partial(conv_layer,kernel=k1),data)
pool.close()
pool.join()
conv1[conv1<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv1)
pool.close()
pool.join()
m1=[i[0] for i in m1_]
pos1=[i[1]for i in m1_]
u1=[i[2]for i in m1_]
r1=[i[3]for i in m1_]
pool=Pool(4)
conv2=pool.map(partial(conv_layer,kernel=k2),m1)
pool.close()
pool.join()
conv2[conv2<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv2)
pool.close()
pool.join()
m2=[i[0] for i in m1_]
pos2=[i[1]for i in m1_]
u2=[i[2]for i in m1_]
r2=[i[3]for i in m1_]
pool=Pool(4)
conv3=pool.map(partial(conv_layer,kernel=k3),m2)
pool.close()
pool.join()
conv3[conv3<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv3)
pool.close()
pool.join()
m3=[i[0] for i in m1_]
pos3=[i[1]for i in m1_]
u3=[i[2]for i in m1_]
r3=[i[3]for i in m1_]
pool=Pool(4)
conv4=pool.map(partial(conv_layer,kernel=k4),m3)
pool.close()
pool.join()
conv4[conv4<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv4)
pool.close()
pool.join()
m4=[i[0] for i in m1_]
pos4=[i[1]for i in m1_]
u4=[i[2]for i in m1_]
r4=[i[3]for i in m1_]
pool=Pool(4)
conv5=pool.map(partial(conv_layer,kernel=k5),m4)
pool.close()
pool.join()
conv5[conv5<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv5)
pool.close()
pool.join()
m5=[i[0] for i in m1_]
pos5=[i[1]for i in m1_]
u5=[i[2]for i in m1_]
r5=[i[3]for i in m1_]
pool=Pool(4)
conv6=pool.map(partial(conv_layer,kernel=k6),m5)
pool.close()
pool.join()
conv6[conv6<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv6)
pool.close()
pool.join()
m6=[i[0] for i in m1_]
pos6=[i[1]for i in m1_]
u6=[i[2]for i in m1_]
r6=[i[3]for i in m1_]
pool=Pool(4)
conv7=pool.map(partial(conv_layer,kernel=k7),m6)
pool.close()
pool.join()
conv7[conv7<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv7)
pool.close()
pool.join()
m7=[i[0] for i in m1_]
pos7=[i[1]for i in m1_]
u7=[i[2]for i in m1_]
r7=[i[3]for i in m1_]
pool=Pool(4)
conv8=pool.map(partial(conv_layer,kernel=k8),m7)
pool.close()
pool.join()
conv8[conv8<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv1)
pool.close()
pool.join()
m8=[i[0] for i in m1_]
pos8=[i[1]for i in m1_]
u8=[i[2]for i in m1_]
r8=[i[3]for i in m1_]
def train(folder,reshape_dim,learning_rate,epoch):
data,output=load_data(folder)
#data=[1,2,3,4,5,6,7,8,9,10,11,12,13]
#print(output)
#print(output[0].shape)
#print(data[0].shape)
#print(data[1])
data=parallel_2(data,reshape_dim)
weights=softmax_regression(data,output,learning_rate,epoch)
return weights
def train_with_sklearn(folder,reshape_dim,x1,x2,x3):
data,output=load_data(folder)
data=parallel_2(data,reshape_dim)
y1,y2,y3=softmax_regression_2(data,output,x1,x2,x3)
return y1,y2,y3
|
[
"functools.partial",
"numpy.asarray",
"numpy.zeros",
"sklearn.linear_model.LogisticRegression",
"numpy.max",
"numpy.array",
"multiprocessing.Pool",
"numpy.random.rand",
"multiprocessing.Process",
"os.path.join",
"os.listdir",
"cv2.resize",
"numpy.sqrt"
] |
[((348, 366), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (358, 366), False, 'import os\n'), ((593, 611), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (603, 611), False, 'import os\n'), ((1960, 1967), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (1964, 1967), False, 'from multiprocessing import Pool\n'), ((2925, 2948), 'numpy.zeros', 'np.zeros', (['weights.shape'], {}), '(weights.shape)\n', (2933, 2948), True, 'import numpy as np\n'), ((2956, 2979), 'numpy.zeros', 'np.zeros', (['weights.shape'], {}), '(weights.shape)\n', (2964, 2979), True, 'import numpy as np\n'), ((3342, 3356), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3350, 3356), True, 'import numpy as np\n'), ((3443, 3459), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (3451, 3459), True, 'import numpy as np\n'), ((3621, 3712), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'C': '(100000.0)', 'solver': '"""lbfgs"""', 'multi_class': '"""multinomial"""'}), "(C=100000.0, solver='lbfgs', multi_class=\n 'multinomial')\n", (3652, 3712), False, 'from sklearn import linear_model\n'), ((4148, 4166), 'numpy.asarray', 'np.asarray', (['output'], {}), '(output)\n', (4158, 4166), True, 'import numpy as np\n'), ((4270, 4286), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (4280, 4286), True, 'import numpy as np\n'), ((4399, 4497), 'sklearn.linear_model.LogisticRegression', 'sklearn.linear_model.LogisticRegression', ([], {'C': '(100000.0)', 'solver': '"""lbfgs"""', 'multi_class': '"""multinomial"""'}), "(C=100000.0, solver='lbfgs',\n multi_class='multinomial')\n", (4438, 4497), False, 'import sklearn\n'), ((4751, 4771), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4765, 4771), True, 'import numpy as np\n'), ((4779, 4799), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4793, 4799), True, 'import numpy as np\n'), ((4807, 4827), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4821, 4827), True, 'import numpy as np\n'), ((4835, 4855), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4849, 4855), True, 'import numpy as np\n'), ((4863, 4883), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4877, 4883), True, 'import numpy as np\n'), ((4891, 4911), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4905, 4911), True, 'import numpy as np\n'), ((4919, 4939), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4933, 4939), True, 'import numpy as np\n'), ((4947, 4967), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (4961, 4967), True, 'import numpy as np\n'), ((4981, 4988), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (4985, 4988), False, 'from multiprocessing import Pool\n'), ((5113, 5120), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5117, 5120), False, 'from multiprocessing import Pool\n'), ((5319, 5326), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5323, 5326), False, 'from multiprocessing import Pool\n'), ((5449, 5456), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5453, 5456), False, 'from multiprocessing import Pool\n'), ((5653, 5660), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5657, 5660), False, 'from multiprocessing import Pool\n'), ((5783, 5790), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5787, 5790), False, 'from multiprocessing import Pool\n'), ((5987, 5994), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (5991, 5994), False, 'from multiprocessing import Pool\n'), ((6117, 6124), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6121, 6124), False, 'from multiprocessing import Pool\n'), ((6321, 6328), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6325, 6328), False, 'from multiprocessing import Pool\n'), ((6451, 6458), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6455, 6458), False, 'from multiprocessing import Pool\n'), ((6655, 6662), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6659, 6662), False, 'from multiprocessing import Pool\n'), ((6785, 6792), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6789, 6792), False, 'from multiprocessing import Pool\n'), ((6989, 6996), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (6993, 6996), False, 'from multiprocessing import Pool\n'), ((7119, 7126), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (7123, 7126), False, 'from multiprocessing import Pool\n'), ((7323, 7330), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (7327, 7330), False, 'from multiprocessing import Pool\n'), ((7453, 7460), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (7457, 7460), False, 'from multiprocessing import Pool\n'), ((507, 525), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (517, 525), False, 'import os\n'), ((1281, 1349), 'cv2.resize', 'cv2.resize', (['(data[i] / 255)', 'reshape_dim'], {'interpolation': 'cv2.INTER_AREA'}), '(data[i] / 255, reshape_dim, interpolation=cv2.INTER_AREA)\n', (1291, 1349), False, 'import cv2\n'), ((1432, 1496), 'cv2.resize', 'cv2.resize', (['(data / 255)', '(256, 256)'], {'interpolation': 'cv2.INTER_AREA'}), '(data / 255, (256, 256), interpolation=cv2.INTER_AREA)\n', (1442, 1496), False, 'import cv2\n'), ((1637, 1705), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'prepare', 'args': '(data, reshape_dim, i)'}), '(target=prepare, args=(data, reshape_dim, i))\n', (1660, 1705), False, 'import multiprocessing\n'), ((5009, 5039), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k1'}), '(conv_layer, kernel=k1)\n', (5016, 5039), False, 'from functools import partial\n'), ((5347, 5377), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k2'}), '(conv_layer, kernel=k2)\n', (5354, 5377), False, 'from functools import partial\n'), ((5681, 5711), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k3'}), '(conv_layer, kernel=k3)\n', (5688, 5711), False, 'from functools import partial\n'), ((6015, 6045), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k4'}), '(conv_layer, kernel=k4)\n', (6022, 6045), False, 'from functools import partial\n'), ((6349, 6379), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k5'}), '(conv_layer, kernel=k5)\n', (6356, 6379), False, 'from functools import partial\n'), ((6683, 6713), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k6'}), '(conv_layer, kernel=k6)\n', (6690, 6713), False, 'from functools import partial\n'), ((7017, 7047), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k7'}), '(conv_layer, kernel=k7)\n', (7024, 7047), False, 'from functools import partial\n'), ((7351, 7381), 'functools.partial', 'partial', (['conv_layer'], {'kernel': 'k8'}), '(conv_layer, kernel=k8)\n', (7358, 7381), False, 'from functools import partial\n'), ((777, 793), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (785, 793), True, 'import numpy as np\n'), ((1094, 1162), 'cv2.resize', 'cv2.resize', (['(data[i] / 255)', 'reshape_dim'], {'interpolation': 'cv2.INTER_AREA'}), '(data[i] / 255, reshape_dim, interpolation=cv2.INTER_AREA)\n', (1104, 1162), False, 'import cv2\n'), ((2102, 2151), 'numpy.max', 'np.max', (['Z'], {'axis': '(0)', 'keepdims': '(True)', 'initial': '(-np.inf)'}), '(Z, axis=0, keepdims=True, initial=-np.inf)\n', (2108, 2151), True, 'import numpy as np\n'), ((402, 432), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (414, 432), False, 'import os\n'), ((3222, 3236), 'numpy.sqrt', 'np.sqrt', (['v_hat'], {}), '(v_hat)\n', (3229, 3236), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
from src.square_matrix_multiply import square_matrix_multiply
class TestStrassenMultiply(unittest.TestCase):
def test_square_1(self):
matrix_a = np.array([[1, 3],
[7, 5]])
matrix_b = np.array([[6, 8],
[4, 2]])
expected = np.array([[18, 14],
[62, 66]])
self.assertTrue(bool((square_matrix_multiply(matrix_a, matrix_b) == expected).all()))
|
[
"numpy.array",
"src.square_matrix_multiply.square_matrix_multiply"
] |
[((196, 222), 'numpy.array', 'np.array', (['[[1, 3], [7, 5]]'], {}), '([[1, 3], [7, 5]])\n', (204, 222), True, 'import numpy as np\n'), ((271, 297), 'numpy.array', 'np.array', (['[[6, 8], [4, 2]]'], {}), '([[6, 8], [4, 2]])\n', (279, 297), True, 'import numpy as np\n'), ((347, 377), 'numpy.array', 'np.array', (['[[18, 14], [62, 66]]'], {}), '([[18, 14], [62, 66]])\n', (355, 377), True, 'import numpy as np\n'), ((438, 480), 'src.square_matrix_multiply.square_matrix_multiply', 'square_matrix_multiply', (['matrix_a', 'matrix_b'], {}), '(matrix_a, matrix_b)\n', (460, 480), False, 'from src.square_matrix_multiply import square_matrix_multiply\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from numpy.lib.function_base import angle
radius = 100 # curvature radius of the mirror in mm (must be positive)
angle_d = 30 # maximum angle of incidence of the incident beam in degrees
num_rays = 21 # number of rays
source_pos = 80 # source position in mm (must be positive)
focal_length = radius / 2 # focal length of the mirror
y = np.linspace(-radius, radius, 1000)
# mirror equation z = sqrt(R^2 - y^2) - R
def surface(y):
return np.sqrt(radius ** 2 - y ** 2) - radius
# angle between the incident ray and the line connecting the point of incidence
# of the ray on the mirror and the center of curvature of the mirror
def epsilon(inc_angle):
q = radius - source_pos
return np.arcsin(q / radius * np.sin(inc_angle))
# angle of reflected ray
def ref_angle(inc_angle):
return inc_angle - 2 * epsilon(inc_angle)
# the z-coordinate of the intersection of the reflected ray with the axis
def ref_z(inc_angle):
q = radius * np.sin(-epsilon(inc_angle)) / np.sin(ref_angle(inc_angle))
return radius - q
# the y-coordinate of the intersection of the incident ray with the mirror
def height(inc_angle):
phi = ref_angle(inc_angle) + epsilon(inc_angle)
return radius * np.sin(phi)
# line equation for extension of the reflected ray
def line(inc_angle, z, z0):
return np.tan(inc_angle) * (z - z0)
plt.figure(figsize=(13, 8))
plt.plot(surface(y), y) # mirror surface visualization
plt.plot([-2 * radius, 0], [0, 0]) # axis of the mirror
plt.plot([-focal_length], [0], 'o') # focal point
for ang in np.linspace(-angle_d, angle_d, num_rays):
inc_angle = ang * np.pi / 180
h = height(inc_angle)
z_inc = np.array([-source_pos, surface(h)])
y_inc = np.array([0, h])
plt.plot(z_inc, y_inc, 'k', lw=1) # draw incident beam
z_0 = ref_z(inc_angle)
if np.isnan(z_0):
z_0 = -2 * radius
if source_pos >= focal_length:
z_0 = -z_0 if z_0 > 0 else z_0
else:
z_0 = z_0 if z_0 > 0 else -z_0
z_ref = np.array([surface(h), -2 * radius])
y_ref = np.array([h, line(ref_angle(inc_angle), -2 * radius, z_0)])
if abs(source_pos) < abs(2 * focal_length) and abs(source_pos) > abs(focal_length) and abs(z_0) > abs(2 * radius):
z_ref = np.array([surface(h), z_0])
y_ref = np.array([h, 0])
plt.plot(z_ref, y_ref, 'r', lw=1)
plt.title("Radius = {:.1f} mm. Focal length = {:.1f} mm. Source position = {:.1f} mm.\nMaximum incident angle = {:.1f} deg. Number of rays = {}".format(radius, focal_length, -source_pos, angle_d, num_rays))
plt.xlabel("z, mm")
plt.ylabel("r, mm")
plt.ylim(-radius, radius)
plt.xlim(-2 * radius, 0)
plt.grid()
plt.show()
|
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.tan",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"numpy.sqrt"
] |
[((399, 433), 'numpy.linspace', 'np.linspace', (['(-radius)', 'radius', '(1000)'], {}), '(-radius, radius, 1000)\n', (410, 433), True, 'import numpy as np\n'), ((1428, 1455), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 8)'}), '(figsize=(13, 8))\n', (1438, 1455), True, 'import matplotlib.pyplot as plt\n'), ((1513, 1547), 'matplotlib.pyplot.plot', 'plt.plot', (['[-2 * radius, 0]', '[0, 0]'], {}), '([-2 * radius, 0], [0, 0])\n', (1521, 1547), True, 'import matplotlib.pyplot as plt\n'), ((1570, 1605), 'matplotlib.pyplot.plot', 'plt.plot', (['[-focal_length]', '[0]', '"""o"""'], {}), "([-focal_length], [0], 'o')\n", (1578, 1605), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1674), 'numpy.linspace', 'np.linspace', (['(-angle_d)', 'angle_d', 'num_rays'], {}), '(-angle_d, angle_d, num_rays)\n', (1645, 1674), True, 'import numpy as np\n'), ((2672, 2691), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""z, mm"""'], {}), "('z, mm')\n", (2682, 2691), True, 'import matplotlib.pyplot as plt\n'), ((2693, 2712), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""r, mm"""'], {}), "('r, mm')\n", (2703, 2712), True, 'import matplotlib.pyplot as plt\n'), ((2714, 2739), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-radius)', 'radius'], {}), '(-radius, radius)\n', (2722, 2739), True, 'import matplotlib.pyplot as plt\n'), ((2741, 2765), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2 * radius)', '(0)'], {}), '(-2 * radius, 0)\n', (2749, 2765), True, 'import matplotlib.pyplot as plt\n'), ((2767, 2777), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2775, 2777), True, 'import matplotlib.pyplot as plt\n'), ((2779, 2789), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2787, 2789), True, 'import matplotlib.pyplot as plt\n'), ((1802, 1818), 'numpy.array', 'np.array', (['[0, h]'], {}), '([0, h])\n', (1810, 1818), True, 'import numpy as np\n'), ((1824, 1857), 'matplotlib.pyplot.plot', 'plt.plot', (['z_inc', 'y_inc', '"""k"""'], {'lw': '(1)'}), "(z_inc, y_inc, 'k', lw=1)\n", (1832, 1857), True, 'import matplotlib.pyplot as plt\n'), ((1917, 1930), 'numpy.isnan', 'np.isnan', (['z_0'], {}), '(z_0)\n', (1925, 1930), True, 'import numpy as np\n'), ((2427, 2460), 'matplotlib.pyplot.plot', 'plt.plot', (['z_ref', 'y_ref', '"""r"""'], {'lw': '(1)'}), "(z_ref, y_ref, 'r', lw=1)\n", (2435, 2460), True, 'import matplotlib.pyplot as plt\n'), ((508, 537), 'numpy.sqrt', 'np.sqrt', (['(radius ** 2 - y ** 2)'], {}), '(radius ** 2 - y ** 2)\n', (515, 537), True, 'import numpy as np\n'), ((1287, 1298), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1293, 1298), True, 'import numpy as np\n'), ((1394, 1411), 'numpy.tan', 'np.tan', (['inc_angle'], {}), '(inc_angle)\n', (1400, 1411), True, 'import numpy as np\n'), ((2405, 2421), 'numpy.array', 'np.array', (['[h, 0]'], {}), '([h, 0])\n', (2413, 2421), True, 'import numpy as np\n'), ((789, 806), 'numpy.sin', 'np.sin', (['inc_angle'], {}), '(inc_angle)\n', (795, 806), True, 'import numpy as np\n')]
|
"""
Tests for the loading of surface maps for the GPROF-NN data processing.
"""
from datetime import datetime
import pytest
import numpy as np
from gprof_nn.data.surface import (read_land_mask,
read_autosnow,
read_emissivity_classes)
from gprof_nn.data.preprocessor import has_preprocessor
HAS_PREPROCESSOR = has_preprocessor()
@pytest.mark.skipif(not HAS_PREPROCESSOR, reason="Preprocessor missing.")
def test_read_land_mask():
"""
Test reading of land mask.
"""
mask = read_land_mask("GMI")
assert mask.mask.shape == (180 * 32, 360 * 32)
mask = read_land_mask("MHS")
assert mask.mask.shape == (180 * 16, 360 * 16)
# Ensure point in North Atlantic is classified as Ocean.
m = mask.interp({"longitude": -46.0, "latitude": 35.0})
assert np.isclose(m.mask.data, 0)
# Ensure point in Africa is classified as land.
m = mask.interp({"longitude": 0.0, "latitude": 20.0})
assert np.all(m.mask.data > 0)
@pytest.mark.skipif(not HAS_PREPROCESSOR, reason="Preprocessor missing.")
def test_read_autosnow():
"""
Test reading of autosnow files.
"""
autosnow = read_autosnow("2021-01-01T00:00:00")
# Ensure no snow around equator
autosnow_eq = autosnow.interp({"latitude": 0.0, "longitude": 0.0}, "nearest")
assert np.all(autosnow_eq.snow.data == 0)
@pytest.mark.skipif(not HAS_PREPROCESSOR, reason="Preprocessor missing.")
def test_read_emissivity_classes():
"""
Test reading of emissivity classes.
"""
data = read_emissivity_classes()
# Ensure point in North Atlantic is classified as Ocean.
data_i = data.interp({"longitude": -46.0, "latitude": 35.0})
assert np.all(np.isclose(data_i.emissivity.data, 0))
# Ensure point in Africa is classified as land.
data_i = data.interp({"longitude": 0.0, "latitude": 20.0})
assert np.all(data_i.emissivity.data > 0)
|
[
"gprof_nn.data.surface.read_land_mask",
"gprof_nn.data.surface.read_emissivity_classes",
"gprof_nn.data.surface.read_autosnow",
"numpy.isclose",
"pytest.mark.skipif",
"gprof_nn.data.preprocessor.has_preprocessor",
"numpy.all"
] |
[((383, 401), 'gprof_nn.data.preprocessor.has_preprocessor', 'has_preprocessor', ([], {}), '()\n', (399, 401), False, 'from gprof_nn.data.preprocessor import has_preprocessor\n'), ((405, 477), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_PREPROCESSOR)'], {'reason': '"""Preprocessor missing."""'}), "(not HAS_PREPROCESSOR, reason='Preprocessor missing.')\n", (423, 477), False, 'import pytest\n'), ((1030, 1102), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_PREPROCESSOR)'], {'reason': '"""Preprocessor missing."""'}), "(not HAS_PREPROCESSOR, reason='Preprocessor missing.')\n", (1048, 1102), False, 'import pytest\n'), ((1401, 1473), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_PREPROCESSOR)'], {'reason': '"""Preprocessor missing."""'}), "(not HAS_PREPROCESSOR, reason='Preprocessor missing.')\n", (1419, 1473), False, 'import pytest\n'), ((563, 584), 'gprof_nn.data.surface.read_land_mask', 'read_land_mask', (['"""GMI"""'], {}), "('GMI')\n", (577, 584), False, 'from gprof_nn.data.surface import read_land_mask, read_autosnow, read_emissivity_classes\n'), ((648, 669), 'gprof_nn.data.surface.read_land_mask', 'read_land_mask', (['"""MHS"""'], {}), "('MHS')\n", (662, 669), False, 'from gprof_nn.data.surface import read_land_mask, read_autosnow, read_emissivity_classes\n'), ((854, 880), 'numpy.isclose', 'np.isclose', (['m.mask.data', '(0)'], {}), '(m.mask.data, 0)\n', (864, 880), True, 'import numpy as np\n'), ((1003, 1026), 'numpy.all', 'np.all', (['(m.mask.data > 0)'], {}), '(m.mask.data > 0)\n', (1009, 1026), True, 'import numpy as np\n'), ((1196, 1232), 'gprof_nn.data.surface.read_autosnow', 'read_autosnow', (['"""2021-01-01T00:00:00"""'], {}), "('2021-01-01T00:00:00')\n", (1209, 1232), False, 'from gprof_nn.data.surface import read_land_mask, read_autosnow, read_emissivity_classes\n'), ((1363, 1397), 'numpy.all', 'np.all', (['(autosnow_eq.snow.data == 0)'], {}), '(autosnow_eq.snow.data == 0)\n', (1369, 1397), True, 'import numpy as np\n'), ((1577, 1602), 'gprof_nn.data.surface.read_emissivity_classes', 'read_emissivity_classes', ([], {}), '()\n', (1600, 1602), False, 'from gprof_nn.data.surface import read_land_mask, read_autosnow, read_emissivity_classes\n'), ((1914, 1948), 'numpy.all', 'np.all', (['(data_i.emissivity.data > 0)'], {}), '(data_i.emissivity.data > 0)\n', (1920, 1948), True, 'import numpy as np\n'), ((1748, 1785), 'numpy.isclose', 'np.isclose', (['data_i.emissivity.data', '(0)'], {}), '(data_i.emissivity.data, 0)\n', (1758, 1785), True, 'import numpy as np\n')]
|
""" gyrodata.py
Run one motor with a sinusoidal speed input and an attached gyro.
This example shows how use the gyro to measure angular position and velocity
by attaching it to the motor shaft.
Setup:
Connect one large motor to port 'A'
Connect the gyro sensor to port number 1.
Notes:
1. Remember there's a cable attached to the sensor, so limit the rotation
angle to approx. 180 degrees.
2. The maximum angular speed that the gyro can detect without saturating
is 440 deg./s (approx. 7.7 rad/s). Limit the motor speed % output to no
more than 35 %.
"""
# Importing modules and classes
import time
import numpy as np
from scipy import integrate
from pyev3.utils import plot_line
from pyev3.brick import LegoEV3
from pyev3.devices import Gyro, Motor
# Defining parameters (for one motor)
T = 2 # Period of sine wave (s)
u0 = 30 # Motor speed amplitude (%)
tstop = 2 # Sine wave duration (s)
# Pre-allocating output arrays
tmotor = []
theta = []
tgyro = []
angle = []
rate = []
# Creating LEGO EV3 objects
ev3 = LegoEV3()
motor = Motor(ev3, port='A')
gyro = Gyro(ev3, portnum=1, inputmode='angle&rate')
# Initializing motor
motor.outputmode = 'speed'
motor.output = 0
motor.reset_angle()
motor.start()
# Getting initial gyro sensor reading to remove drift in the data
angle0, rate0 = gyro.output
# Initializing current time stamp and starting clock
tcurr = 0
tstart = time.perf_counter()
# Running motor sine wave output
while tcurr <= tstop:
# Getting current time for motor (s)
tcurr = time.perf_counter() - tstart
# Assigning current motor sinusoidal
# output using the current time stamp
motor.output = u0 * np.sin((2*np.pi/T) * tcurr)
# Updating output arrays for motor
tmotor.append(tcurr)
theta.append(motor.angle)
# Getting current time for gyro (s)
tcurr = time.perf_counter() - tstart
# Updating output arrays for gyro
# (and converting from deg/s to rad/s)
anglecurr, ratecurr = gyro.output
tgyro.append(tcurr)
angle.append(anglecurr-angle0)
rate.append(np.pi/180 * (ratecurr-rate0))
# Stopping motor and closing brick connection
motor.stop(brake='off')
ev3.close()
# Calculating motor angular velocity (rad/s)
w = np.pi/180 * np.gradient(theta, tmotor)
# Plotting results
plot_line([tmotor, tgyro], [theta, angle], yname='Angular Position (deg.)',
legend=['Tacho', 'Gyro'], marker=True)
plot_line([tmotor, tgyro], [w, rate], yname='Angular velocity (rad/s)',
legend=['Tacho', 'Gyro'], marker=True)
|
[
"pyev3.brick.LegoEV3",
"time.perf_counter",
"pyev3.utils.plot_line",
"numpy.sin",
"pyev3.devices.Motor",
"pyev3.devices.Gyro",
"numpy.gradient"
] |
[((1049, 1058), 'pyev3.brick.LegoEV3', 'LegoEV3', ([], {}), '()\n', (1056, 1058), False, 'from pyev3.brick import LegoEV3\n'), ((1067, 1087), 'pyev3.devices.Motor', 'Motor', (['ev3'], {'port': '"""A"""'}), "(ev3, port='A')\n", (1072, 1087), False, 'from pyev3.devices import Gyro, Motor\n'), ((1095, 1139), 'pyev3.devices.Gyro', 'Gyro', (['ev3'], {'portnum': '(1)', 'inputmode': '"""angle&rate"""'}), "(ev3, portnum=1, inputmode='angle&rate')\n", (1099, 1139), False, 'from pyev3.devices import Gyro, Motor\n'), ((1408, 1427), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1425, 1427), False, 'import time\n'), ((2292, 2410), 'pyev3.utils.plot_line', 'plot_line', (['[tmotor, tgyro]', '[theta, angle]'], {'yname': '"""Angular Position (deg.)"""', 'legend': "['Tacho', 'Gyro']", 'marker': '(True)'}), "([tmotor, tgyro], [theta, angle], yname='Angular Position (deg.)',\n legend=['Tacho', 'Gyro'], marker=True)\n", (2301, 2410), False, 'from pyev3.utils import plot_line\n'), ((2417, 2531), 'pyev3.utils.plot_line', 'plot_line', (['[tmotor, tgyro]', '[w, rate]'], {'yname': '"""Angular velocity (rad/s)"""', 'legend': "['Tacho', 'Gyro']", 'marker': '(True)'}), "([tmotor, tgyro], [w, rate], yname='Angular velocity (rad/s)',\n legend=['Tacho', 'Gyro'], marker=True)\n", (2426, 2531), False, 'from pyev3.utils import plot_line\n'), ((2244, 2270), 'numpy.gradient', 'np.gradient', (['theta', 'tmotor'], {}), '(theta, tmotor)\n', (2255, 2270), True, 'import numpy as np\n'), ((1536, 1555), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1553, 1555), False, 'import time\n'), ((1672, 1701), 'numpy.sin', 'np.sin', (['(2 * np.pi / T * tcurr)'], {}), '(2 * np.pi / T * tcurr)\n', (1678, 1701), True, 'import numpy as np\n'), ((1846, 1865), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1863, 1865), False, 'import time\n')]
|
import numpy as np
import pytest
from numpy.testing import assert_allclose
from pybraw import _pybraw, verify
class CapturingCallback(_pybraw.BlackmagicRawCallback):
def ReadComplete(self, job, result, frame):
self.frame = frame
def ProcessComplete(self, job, result, processed_image):
self.processed_image = processed_image
@pytest.fixture
def callback(codec):
callback = CapturingCallback()
verify(codec.SetCallback(callback))
return callback
@pytest.fixture
def frame(codec, clip, callback):
read_job = verify(clip.CreateJobReadFrame(12))
verify(read_job.Submit())
read_job.Release()
verify(codec.FlushJobs())
return callback.frame
@pytest.mark.parametrize('format,max_val,is_planar,channels', [
(_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]),
(_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]),
(_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]),
])
def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels):
verify(frame.SetResourceFormat(format))
process_job = verify(frame.CreateJobDecodeAndProcessFrame())
process_job.Submit()
process_job.Release()
codec.FlushJobs()
resource_type = verify(callback.processed_image.GetResourceType())
assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU
resource_format = verify(callback.processed_image.GetResourceFormat())
assert resource_format == format
np_image = callback.processed_image.to_py()
del callback.processed_image
np_image = np_image / max_val
if is_planar:
np_image = np.transpose(np_image, (1, 2, 0))
expected = np.array([126, 131, 129, 255])[channels] / 255
assert_allclose(np_image[100, 200], expected, atol=1 / 255)
def test_SetResolutionScale(frame, codec, callback):
verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter))
process_job = verify(frame.CreateJobDecodeAndProcessFrame())
process_job.Submit()
process_job.Release()
codec.FlushJobs()
# Check that the resolution is one quarter of the original DCI full frame 4K.
width = verify(callback.processed_image.GetWidth())
assert width == 1024
height = verify(callback.processed_image.GetHeight())
assert height == 540
# from PIL import Image
# pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3])
# pil_image.show()
def test_CloneFrameProcessingAttributes(frame):
attributes = verify(frame.CloneFrameProcessingAttributes())
assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes)
iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py()
assert iso == 400
def test_GetMetadataIterator(frame):
iterator = verify(frame.GetMetadataIterator())
metadata = {}
while True:
result, key = iterator.GetKey()
if result == _pybraw.E_FAIL:
break
assert result == _pybraw.S_OK
metadata[key] = verify(iterator.GetData()).to_py()
verify(iterator.Next())
assert metadata['white_balance_kelvin'] == 5600
assert_allclose(metadata['sensor_rate'], np.array([25, 1]))
def test_GetMetadata(frame):
white_balance = verify(frame.GetMetadata('white_balance_kelvin'))
assert white_balance.to_py() == 5600
def test_SetMetadata(frame):
verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800)))
white_balance = verify(frame.GetMetadata('white_balance_kelvin'))
assert white_balance.to_py() == 2800
|
[
"numpy.testing.assert_allclose",
"numpy.transpose",
"pybraw._pybraw.VariantCreateU32",
"numpy.array",
"pytest.mark.parametrize"
] |
[((701, 1005), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""format,max_val,is_planar,channels"""', '[(_pybraw.blackmagicRawResourceFormatBGRAU8, 2 ** 8, False, [2, 1, 0, 3]),\n (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]),\n (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2 ** 16, True, [0, 1, 2])\n ]'], {}), "('format,max_val,is_planar,channels', [(_pybraw.\n blackmagicRawResourceFormatBGRAU8, 2 ** 8, False, [2, 1, 0, 3]), (\n _pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]), (\n _pybraw.blackmagicRawResourceFormatRGBU16Planar, 2 ** 16, True, [0, 1, 2])]\n )\n", (724, 1005), False, 'import pytest\n'), ((1777, 1836), 'numpy.testing.assert_allclose', 'assert_allclose', (['np_image[100, 200]', 'expected'], {'atol': '(1 / 255)'}), '(np_image[100, 200], expected, atol=1 / 255)\n', (1792, 1836), False, 'from numpy.testing import assert_allclose\n'), ((1677, 1710), 'numpy.transpose', 'np.transpose', (['np_image', '(1, 2, 0)'], {}), '(np_image, (1, 2, 0))\n', (1689, 1710), True, 'import numpy as np\n'), ((3257, 3274), 'numpy.array', 'np.array', (['[25, 1]'], {}), '([25, 1])\n', (3265, 3274), True, 'import numpy as np\n'), ((1726, 1756), 'numpy.array', 'np.array', (['[126, 131, 129, 255]'], {}), '([126, 131, 129, 255])\n', (1734, 1756), True, 'import numpy as np\n'), ((3502, 3532), 'pybraw._pybraw.VariantCreateU32', '_pybraw.VariantCreateU32', (['(2800)'], {}), '(2800)\n', (3526, 3532), False, 'from pybraw import _pybraw, verify\n')]
|
import math
import time
import pickle
import sys
import os
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from datasets.data_utils import project_image_to_rect, compute_box_3d
def adjust_coord_for_view(points):
return points[:, [2, 0, 1]] * np.array([1, -1, -1])
def draw_box3d(corners, ax):
'''
8, 3
'''
order = np.array([
0, 1,
1, 2,
2, 3,
3, 0,
4, 5,
5, 6,
6, 7,
7, 4,
3, 7,
0, 4,
2, 6,
1, 5]).reshape(-1, 2)
for i in range(len(order)):
ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2])
def draw_points(pts, ax):
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2])
def check_box_frustum(box, P, center, dimension, angle):
x1, y1, x2, y2 = box
box_corner = compute_box_3d(center, dimension, angle, P) # 8, 3
z1 = np.arange(0, 70, 0.1)
xyz1 = np.zeros((len(z1), 3))
xyz1[:, 0] = x1
xyz1[:, 1] = y1
xyz1[:, 2] = z1
xyz1_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x2
xyz1[:, 1] = y2
xyz1[:, 2] = z1
xyz2_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x1
xyz1[:, 1] = y2
xyz1[:, 2] = z1
xyz3_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x2
xyz1[:, 1] = y1
xyz1[:, 2] = z1
xyz4_rect = project_image_to_rect(xyz1, P)
fig = plt.figure()
ax = fig.gca(projection='3d')
draw_box3d(box_corner, ax)
draw_points(xyz1_rect, ax)
draw_points(xyz2_rect, ax)
draw_points(xyz3_rect, ax)
draw_points(xyz4_rect, ax)
plt.show()
def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners):
fig = plt.figure()
ax = fig.gca(projection='3d')
points = adjust_coord_for_view(points)
ref_points = adjust_coord_for_view(ref_points)
gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners)
pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners)
# ax.set_aspect('equal')
# ax.axis('equal')
ax.set_axis_on()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
draw_points(points, ax)
draw_points(ref_points, ax)
draw_box3d(gt_box3d_corners, ax)
draw_box3d(pred_box3d_corners, ax)
plt.show()
|
[
"datasets.data_utils.compute_box_3d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.arange",
"datasets.data_utils.project_image_to_rect",
"numpy.array"
] |
[((872, 915), 'datasets.data_utils.compute_box_3d', 'compute_box_3d', (['center', 'dimension', 'angle', 'P'], {}), '(center, dimension, angle, P)\n', (886, 915), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((934, 955), 'numpy.arange', 'np.arange', (['(0)', '(70)', '(0.1)'], {}), '(0, 70, 0.1)\n', (943, 955), True, 'import numpy as np\n'), ((1067, 1097), 'datasets.data_utils.project_image_to_rect', 'project_image_to_rect', (['xyz1', 'P'], {}), '(xyz1, P)\n', (1088, 1097), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((1175, 1205), 'datasets.data_utils.project_image_to_rect', 'project_image_to_rect', (['xyz1', 'P'], {}), '(xyz1, P)\n', (1196, 1205), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((1283, 1313), 'datasets.data_utils.project_image_to_rect', 'project_image_to_rect', (['xyz1', 'P'], {}), '(xyz1, P)\n', (1304, 1313), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((1391, 1421), 'datasets.data_utils.project_image_to_rect', 'project_image_to_rect', (['xyz1', 'P'], {}), '(xyz1, P)\n', (1412, 1421), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((1433, 1445), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1443, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1651), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1649, 1651), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1757), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1755, 1757), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2313), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2311, 2313), True, 'import matplotlib.pyplot as plt\n'), ((293, 314), 'numpy.array', 'np.array', (['[1, -1, -1]'], {}), '([1, -1, -1])\n', (301, 314), True, 'import numpy as np\n'), ((384, 470), 'numpy.array', 'np.array', (['[0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6, 6, 7, 7, 4, 3, 7, 0, 4, 2, 6, 1, 5]'], {}), '([0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6, 6, 7, 7, 4, 3, 7, 0, 4, 2, 6,\n 1, 5])\n', (392, 470), True, 'import numpy as np\n')]
|
import numpy as np
def integrate_displacement(displ_img_to_img):
"""Sum the image-to-image displacement value to
obtain image-to-reference displacement,
add zeros at the begining
Parameters
----------
displ_img_to_img : 3D array
3D array of shape `(nbr images - 1, nbr points, 2)`
Returns
-------
3D array of shape `(nbr images, nbr points, 2)`
"""
# add zeros at the begining
zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :]
displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0)
displ_image_to_ref = np.cumsum(displ_zero, axis=0)
return displ_image_to_ref
def get_center_points(xgrid, ygrid):
"""Cell center point coordinates"""
center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1])
center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1])
return center_x, center_y
def cellcentered_diff_2D(u, v):
"""
for a given 2D vector field [u, v](x, y) sampled on a grid
returns the centered finite difference for each cell
Cell abcd:
a───b
│ + │
c───d
du_x = (ub+ud)/2 - (ua+uc)/2
du_y = (ua+ub)/2 - (uc+ud)/2
"""
u_center_y = 0.5*(u[1:, :] + u[:-1, :])
u_center_x = 0.5*(u[:, 1:] + u[:, :-1])
v_center_y = 0.5*(v[1:, :] + v[:-1, :])
v_center_x = 0.5*(v[:, 1:] + v[:, :-1])
delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1]
delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :]
delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1]
delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :]
return delta_u_x, delta_u_y, delta_v_x, delta_v_y
def cellcentered_grad_rect2D(xgrid, ygrid, u, v):
"""Finite difference gradient for the vector fields u and v
evaluated at cell center
This is not a proper bilinear interpolation (ie. quad4 element).
The xy-grid has to be rectangular.
used to computed the "Displacement gradient tensor"
see Bower p.14
output: (dudx, dudy), (dvdx, dvdy)
"""
du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v)
dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid)
return [[du_x/dx, du_y/dy],
[dv_x/dx, dv_y/dy]]
# --- test cellcentered_grad_rect2D
xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2,
np.linspace(1, 5, 7)**0.5)
u = 5*xgrid + 3*ygrid
v = 2*xgrid + 7*ygrid
(dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v)
np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx))
np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx))
np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx))
np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx))
# ---
def get_LagrangeStrainTensor(xgrid, ygrid, u, v):
"""Lagrange Strain Tensor (E)
F = grad(u) + Id
E = 1/2*( FF^T - Id )
Parameters
----------
xgrid, ygrid : 2d arrays of shape (n_y, n_x)
underformed grid points
u, v : 2d arrays of shape (n_y, n_x)
displacements values (u along x, v along y)
Returns
-------
4D array of shape (n_y, n_x, 2, 2)
Lagrange Strain Tensor for all grid points
"""
grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v)
grad_u = np.stack(grad_u, axis=2)
grad_v = np.stack(grad_v, axis=2)
# u = 1*xgrid + 3*ygrid
# v = 5*xgrid + 7*ygrid
G = np.stack([grad_u, grad_v], axis=3)
G = np.transpose(G, axes=(0, 1, 3, 2))
# G >>> array([[1., 3.], [5., 7.]])
Id = np.ones((*grad_u.shape[:2], 2, 2))
Id[:, :] = np.eye(2, 2)
# Id[0, 0] >> array([[1., 0.], [0., 1.]])
F = G + Id
# Lagrange Strain Tensor
E = 0.5*( np.einsum('...ki,...kj', F, F) - Id )
return E
# --- test get_LagrangeStrainTensor
xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5),
np.linspace(1, 5, 7))
u = 1*xgrid + 3*ygrid
v = 5*xgrid + 7*ygrid
E = get_LagrangeStrainTensor(xgrid, ygrid, u, v)
# array([[[[14., 23.],
# [23., 36.]],
np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1]))
np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1]))
np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1]))
np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1]))
# ---
def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v):
"""Small Displacement Strain Tensor (E)
E = 1/2*( grad(u) + grad(u)^T )
Parameters
----------
xgrid, ygrid : 2d arrays of shape (n_y, n_x)
underformed grid points
u, v : 2d arrays of shape (n_y, n_x)
displacements values (u along x, v along y)
Returns
-------
4D array of shape (n_y, n_x, 2, 2)
Lagrange Strain Tensor for all grid points
"""
grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v)
grad_u = np.stack(grad_u, axis=2)
grad_v = np.stack(grad_v, axis=2)
# u = 1*xgrid + 3*ygrid
# v = 5*xgrid + 7*ygrid
G = np.stack([grad_u, grad_v], axis=3)
G = np.transpose(G, axes=(0, 1, 3, 2))
# G >>> array([[1., 3.], [5., 7.]])
# Strain Tensor
E = 0.5*( G + np.transpose(G, axes=(0, 1, 3, 2)) )
return E
def bilinear_fit(points, displacements):
"""Performs a bilinear fit on the displacements field
Solve the equation u = A*x + t
Parameters
----------
points : nd-array (nbr_points, 2)
coordinates of points (x, y)
displacements : nd-array (nbr_points, 2)
displacement for each point (u, v)
could include NaN
Returns
-------
nd-array (2, 3)
coefficients matrix (affine transformation + translation)
nd-array (nbr_points, 2)
residuals for each points
"""
u, v = displacements.T
mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v)))
u, v = u[mask], v[mask]
x, y = points[mask, :].T
ones = np.ones_like(x)
M = np.vstack([x, y, ones]).T
p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None)
p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None)
coefficients = np.vstack([p_ux, p_uy])
## Unbiased estimator variance (see p47 T. Hastie)
#sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1))
#sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1))
# Residuals:
u_linear = np.matmul( M, p_ux )
v_linear = np.matmul( M, p_uy )
residuals_x = u - u_linear
residuals_y = v - v_linear
residuals_xy = np.vstack([residuals_x, residuals_y]).T
# Merge with ignored NaN values:
residuals_NaN = np.full(displacements.shape, np.nan)
residuals_NaN[mask, :] = residuals_xy
return coefficients, residuals_NaN
|
[
"numpy.stack",
"numpy.full",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.linalg.lstsq",
"numpy.transpose",
"numpy.ones",
"numpy.einsum",
"numpy.isnan",
"numpy.cumsum",
"numpy.vstack",
"numpy.linspace",
"numpy.matmul",
"numpy.eye",
"numpy.concatenate"
] |
[((516, 565), 'numpy.concatenate', 'np.concatenate', (['[zeros, displ_img_to_img]'], {'axis': '(0)'}), '([zeros, displ_img_to_img], axis=0)\n', (530, 565), True, 'import numpy as np\n'), ((592, 621), 'numpy.cumsum', 'np.cumsum', (['displ_zero'], {'axis': '(0)'}), '(displ_zero, axis=0)\n', (601, 621), True, 'import numpy as np\n'), ((3319, 3343), 'numpy.stack', 'np.stack', (['grad_u'], {'axis': '(2)'}), '(grad_u, axis=2)\n', (3327, 3343), True, 'import numpy as np\n'), ((3357, 3381), 'numpy.stack', 'np.stack', (['grad_v'], {'axis': '(2)'}), '(grad_v, axis=2)\n', (3365, 3381), True, 'import numpy as np\n'), ((3447, 3481), 'numpy.stack', 'np.stack', (['[grad_u, grad_v]'], {'axis': '(3)'}), '([grad_u, grad_v], axis=3)\n', (3455, 3481), True, 'import numpy as np\n'), ((3490, 3524), 'numpy.transpose', 'np.transpose', (['G'], {'axes': '(0, 1, 3, 2)'}), '(G, axes=(0, 1, 3, 2))\n', (3502, 3524), True, 'import numpy as np\n'), ((3576, 3610), 'numpy.ones', 'np.ones', (['(*grad_u.shape[:2], 2, 2)'], {}), '((*grad_u.shape[:2], 2, 2))\n', (3583, 3610), True, 'import numpy as np\n'), ((3626, 3638), 'numpy.eye', 'np.eye', (['(2)', '(2)'], {}), '(2, 2)\n', (3632, 3638), True, 'import numpy as np\n'), ((3861, 3882), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(5)'], {}), '(-1, 1, 5)\n', (3872, 3882), True, 'import numpy as np\n'), ((3911, 3931), 'numpy.linspace', 'np.linspace', (['(1)', '(5)', '(7)'], {}), '(1, 5, 7)\n', (3922, 3931), True, 'import numpy as np\n'), ((4945, 4969), 'numpy.stack', 'np.stack', (['grad_u'], {'axis': '(2)'}), '(grad_u, axis=2)\n', (4953, 4969), True, 'import numpy as np\n'), ((4983, 5007), 'numpy.stack', 'np.stack', (['grad_v'], {'axis': '(2)'}), '(grad_v, axis=2)\n', (4991, 5007), True, 'import numpy as np\n'), ((5073, 5107), 'numpy.stack', 'np.stack', (['[grad_u, grad_v]'], {'axis': '(3)'}), '([grad_u, grad_v], axis=3)\n', (5081, 5107), True, 'import numpy as np\n'), ((5116, 5150), 'numpy.transpose', 'np.transpose', (['G'], {'axes': '(0, 1, 3, 2)'}), '(G, axes=(0, 1, 3, 2))\n', (5128, 5150), True, 'import numpy as np\n'), ((5983, 5998), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (5995, 5998), True, 'import numpy as np\n'), ((6069, 6102), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['M', 'v'], {'rcond': 'None'}), '(M, v, rcond=None)\n', (6084, 6102), True, 'import numpy as np\n'), ((6138, 6171), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['M', 'u'], {'rcond': 'None'}), '(M, u, rcond=None)\n', (6153, 6171), True, 'import numpy as np\n'), ((6192, 6215), 'numpy.vstack', 'np.vstack', (['[p_ux, p_uy]'], {}), '([p_ux, p_uy])\n', (6201, 6215), True, 'import numpy as np\n'), ((6435, 6453), 'numpy.matmul', 'np.matmul', (['M', 'p_ux'], {}), '(M, p_ux)\n', (6444, 6453), True, 'import numpy as np\n'), ((6471, 6489), 'numpy.matmul', 'np.matmul', (['M', 'p_uy'], {}), '(M, p_uy)\n', (6480, 6489), True, 'import numpy as np\n'), ((6673, 6709), 'numpy.full', 'np.full', (['displacements.shape', 'np.nan'], {}), '(displacements.shape, np.nan)\n', (6680, 6709), True, 'import numpy as np\n'), ((446, 480), 'numpy.zeros_like', 'np.zeros_like', (['displ_img_to_img[0]'], {}), '(displ_img_to_img[0])\n', (459, 480), True, 'import numpy as np\n'), ((2325, 2346), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(5)'], {}), '(-1, 1, 5)\n', (2336, 2346), True, 'import numpy as np\n'), ((2378, 2398), 'numpy.linspace', 'np.linspace', (['(1)', '(5)', '(7)'], {}), '(1, 5, 7)\n', (2389, 2398), True, 'import numpy as np\n'), ((2565, 2583), 'numpy.ones_like', 'np.ones_like', (['dudx'], {}), '(dudx)\n', (2577, 2583), True, 'import numpy as np\n'), ((2624, 2642), 'numpy.ones_like', 'np.ones_like', (['dudx'], {}), '(dudx)\n', (2636, 2642), True, 'import numpy as np\n'), ((2683, 2701), 'numpy.ones_like', 'np.ones_like', (['dudx'], {}), '(dudx)\n', (2695, 2701), True, 'import numpy as np\n'), ((2742, 2760), 'numpy.ones_like', 'np.ones_like', (['dudx'], {}), '(dudx)\n', (2754, 2760), True, 'import numpy as np\n'), ((4125, 4152), 'numpy.ones_like', 'np.ones_like', (['E[:, :, 0, 1]'], {}), '(E[:, :, 0, 1])\n', (4137, 4152), True, 'import numpy as np\n'), ((4203, 4230), 'numpy.ones_like', 'np.ones_like', (['E[:, :, 0, 1]'], {}), '(E[:, :, 0, 1])\n', (4215, 4230), True, 'import numpy as np\n'), ((4281, 4308), 'numpy.ones_like', 'np.ones_like', (['E[:, :, 0, 1]'], {}), '(E[:, :, 0, 1])\n', (4293, 4308), True, 'import numpy as np\n'), ((4359, 4386), 'numpy.ones_like', 'np.ones_like', (['E[:, :, 0, 1]'], {}), '(E[:, :, 0, 1])\n', (4371, 4386), True, 'import numpy as np\n'), ((6007, 6030), 'numpy.vstack', 'np.vstack', (['[x, y, ones]'], {}), '([x, y, ones])\n', (6016, 6030), True, 'import numpy as np\n'), ((6575, 6612), 'numpy.vstack', 'np.vstack', (['[residuals_x, residuals_y]'], {}), '([residuals_x, residuals_y])\n', (6584, 6612), True, 'import numpy as np\n'), ((3745, 3775), 'numpy.einsum', 'np.einsum', (['"""...ki,...kj"""', 'F', 'F'], {}), "('...ki,...kj', F, F)\n", (3754, 3775), True, 'import numpy as np\n'), ((5231, 5265), 'numpy.transpose', 'np.transpose', (['G'], {'axes': '(0, 1, 3, 2)'}), '(G, axes=(0, 1, 3, 2))\n', (5243, 5265), True, 'import numpy as np\n'), ((5887, 5898), 'numpy.isnan', 'np.isnan', (['u'], {}), '(u)\n', (5895, 5898), True, 'import numpy as np\n'), ((5900, 5911), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (5908, 5911), True, 'import numpy as np\n')]
|
import os
import sys
from datetime import datetime, timedelta
import numpy as np
data_path = "../../dat4figs_JAMES/Fig06"
os.makedirs( data_path, exist_ok=True )
USE_ARCH_DAT = True
#USE_ARCH_DAT = False
quick_hist = False
quick_bar = True
quick_bar = False
def d4_computation_time_nparray( top='' ):
dirs = [ f.name for f in os.scandir( top ) ] #if f.is_file() ]
path_l = []
ftimes = []
ctimes = []
# Prepare file path list
for dir_ in dirs:
path_l.append( os.path.join( top, dir_, ) )
scale_l = []
# Get computation time for SCALE
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '[Info:fcst] End forecast' in l:
data = l.split()
try:
ftimes.append( float( data[7] ) )
except:
print( "Failed", data )
elif '[Info:DA]' in l:
data = l.split()
try:
ctimes.append( float( data[6] ) )
except:
print( "Failed", data )
elif '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
dat_ = float( data[5] )
if tit_ == 'SCALE':
scale_l.append( dat_ )
except:
print( "Failed", data )
scale_l = np.array( scale_l )
key_l = [ "SCALE", "READ_OBS",
"OBS_OPERATOR",
"INITIALIZE",
"INITIALIZE_OTHERS",
"INIT_LETKF",
"PROCESS_OBS",
"SET_GRID",
"READ_GUES",
"GUES_MEAN",
"WRITE RESTART/GRADS(GUES)",
"DAS_LETKF",
"ANAL_MEAN",
"WRITE_ANAL",
"DEALLOCATE",
"WRITE RESTART/GRADS(ANAL)",
"OTHERS",
"FINALIZE",
"JIT_GET",
]
# prepare nan array
iarray = np.zeros( scale_l.shape )
iarray[:] = np.nan
DETAIL = {}
for key in key_l:
if key == 'SCALE':
DETAIL[key] = scale_l
else:
DETAIL[key] = np.copy( iarray )
# Get computation time for all
i = -1
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
tit4_ = data[4]
dat_ = float( data[5] )
if tit_ == 'SCALE':
i += 1
if tit_ == "WRITE":
dat_ = float( data[6] )
if tit4_ == "RESTART/GRADS(ANAL)":
tit_ = "WRITE RESTART/GRADS(ANAL)"
elif tit4_ == "RESTART/GRADS(GUES)":
tit_ = "WRITE RESTART/GRADS(GUES)"
i_ = i
if i_ < 0:
i_ = 0
if tit_ in DETAIL:
DETAIL[tit_][i_] = dat_
else:
DETAIL["OTHERS"][i_] = dat_
except:
print( "Failed", data )
elif '......jitdt_read_toshiba:jitget:' in l:
data = l.split()
try:
tit_ = "JIT_GET"
dat_ = float( data[1] )
DETAIL[tit_][i] = dat_
except:
print( "Failed", data )
return( ftimes, ctimes, DETAIL )
def d4_computation_time( top='', ctmax=600 ):
dirs = [ f.name for f in os.scandir( top ) ] #if f.is_file() ]
ftimes = []
ctimes = []
path_l = []
init = []
init_others = []
init_letkf = []
scale = []
others = []
read_obs = []
obsope = []
process_obs = []
set_grid = []
read_gues = []
gues_mean = []
write_restartg = []
das_letkf = []
anal_mean = []
write_anal = []
deallocate = []
write_restarta = []
others = []
finalize = []
jitget = []
DETAIL = { "SCALE": scale,
"READ_OBS":read_obs,
"OBS_OPERATOR": obsope,
"INITIALIZE": init,
"INITIALIZE_OTHERS": init_others,
"INIT_LETKF": init_letkf,
"PROCESS_OBS": process_obs,
"SET_GRID": set_grid,
"READ_GUES": read_gues,
"GUES_MEAN": gues_mean,
"WRITE RESTART/GRADS(GUES)": write_restartg,
"DAS_LETKF": das_letkf,
"ANAL_MEAN": anal_mean,
"WRITE_ANAL": write_anal,
"DEALLOCATE": deallocate,
"WRITE RESTART/GRADS(ANAL)": write_restarta,
"OTHERS": others,
"FINALIZE": finalize,
"JIT_GET": jitget,
}
# Prepare file path list
for dir_ in dirs:
fname = 'job.o' #[ f.name for f in os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file() ]
path_l.append( os.path.join( top, dir_, fname ) )
# Get computation time
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '[Info:fcst] End forecast' in l:
data = l.split()
try:
ftimes.append( float( data[7] ) )
except:
print( "Failed", data )
elif '[Info:DA]' in l:
data = l.split()
try:
ctimes.append( float( data[6] ) )
except:
print( "Failed", data )
elif '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
tit4_ = data[4]
dat_ = float( data[5] )
if tit_ == "WRITE":
dat_ = float( data[6] )
if tit4_ == "RESTART/GRADS(ANAL)":
tit_ = "WRITE RESTART/GRADS(ANAL)"
elif tit4_ == "RESTART/GRADS(GUES)":
tit_ = "WRITE RESTART/GRADS(GUES)"
if tit_ in DETAIL:
DETAIL[tit_].append( dat_ )
else:
DETAIL["OTHERS"].append( dat_ )
except:
print( "Failed", data )
elif '......jitdt_read_toshiba:jitget:' in l:
data = l.split()
try:
tit_ = "JIT_GET"
dat_ = float( data[1] )
DETAIL[tit_].append( dat_ )
except:
print( "Failed", data )
for key in DETAIL.keys():
DETAIL[key] = np.array( DETAIL[key] )
return( ftimes, ctimes, DETAIL )
def plot_hist( key="", dat=np.array([]) ):
import matplotlib.pyplot as plt
from scipy import stats
xmin = 0
xmax = 60
# Scott's choise
#h = 3.5 * np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0)
#bins = int( ( xmax - xmin ) / h )
# Square-root choice
bins = int( np.sqrt( dat.size ) )
fig, ax = plt.subplots( 1, 1, figsize=(6,4) )
fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, )
rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 )
imode = np.argmax( rn )
mode = np.mean( rbins[imode:imode+2] )
mean = np.mean( dat )
#print( len(rn), len(rbins), mode )
lw = 1.0
ymin = 0.0
ymax = 4000 #dat_.size
ls = 'dashed'
color = 'b'
ax.vlines( x=mode, ymin=ymin, ymax=ymax,
linewidths=lw, linestyles=ls, color=color )
color = 'k'
ax.vlines( x=mean, ymin=ymin, ymax=ymax,
linewidths=lw, linestyles=ls, color=color )
text_ = 'Mean:{0:.3f} s\nMode:{1:.3f} s\nN={2:}'.format( mean, mode, dat.size )
ax.text( 0.99, 0.99, text_,
fontsize=12, transform=ax.transAxes,
ha='right',
va='top' )
tit_ = key
ax.text( 0.5, 1.01, tit_,
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.set_xlim( xmin, xmax )
ax.set_ylim( ymin, ymax )
xlab = 'Computation time (s)'
ylab = 'Frequency'
ax.set_xlabel( xlab, fontsize=11)
ax.set_ylabel( ylab, fontsize=11)
key_ = key.replace( ' ', '_' ).replace( '/', '_' ) #.replace( '(', '_' ).replace( ')')
ofig = 'png/1p_d4_{0:}.png'.format( key_ )
print( ofig )
if quick_hist:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
return( mode, mean )
def plot_bar_2p( dic={}, ftimes=np.array([]) ):
import matplotlib.pyplot as plt
fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) )
# fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92,
wspace=0.3, hspace=0.05)
ax1.set_xlim( 0, 2.0 )
width1 = 0.8
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
ax1.bar( 1.0, dic[key], bottom=acm,
label=lab, color=c_l[i], width=width1 )
acm += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax1.get_legend_handles_labels()
ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=12 )
ax1.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 22, 2 )
ax1.set_ylim( 0, 20.0 )
ax1.set_yticks( yticks )
ax1.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
ax2.set_ylim( 0, 151.0 )
ax2.set_xlim( 0, 2.0 )
ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
width2 = 0.8
ax2.bar( 1, np.mean(ftimes), label="30-min forecast", width=width2,
color='dodgerblue' )
print( "std:", np.std( ftimes, ddof=1 ), len( ftimes ) )
ax2.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax_l = [ ax1, ax2 ]
tit_l = [ "Data assimilation",
"30-min forecast" ]
pnum_l = [ "(a)", "(b)" ]
for i, ax in enumerate( ax_l ):
ax.text( 0.5, 1.01, tit_l[i],
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.text( 0.0, 1.01, pnum_l[i],
fontsize=10, transform=ax.transAxes,
ha='left',
va='bottom' )
ofig = 'pdf/Fig06.pdf'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ):
import matplotlib.pyplot as plt
fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) )
# fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92,
wspace=0.3, hspace=0.05)
ax1.set_xlim( 0, 3.0 )
width1 = 0.8
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
ax1.bar( 1.0, dic[key], bottom=acm,
label=lab, color=c_l[i], width=width1 )
acm += dic[key]
acm2 = 0.0
for i, key in enumerate( dic2.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
print( "check", dic2[key] )
ax1.bar( 2.0, dic2[key], bottom=acm2,
label=None, color=c_l[i], width=width1 )
acm2 += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax1.get_legend_handles_labels()
ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=12 )
ax1.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 22, 2 )
ax1.set_ylim( 0, 20.0 )
ax1.set_yticks( yticks )
ax1.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
ax2.set_ylim( 0, 151.0 )
ax2.set_xlim( 0, 2.0 )
ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
width2 = 0.8
ax2.bar( 1, np.mean(ftimes), label="30-min forecast", width=width2,
color='dodgerblue' )
print( "std:", np.std( ftimes, ddof=1 ), len( ftimes ) )
ax2.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax_l = [ ax1, ax2 ]
tit_l = [ "Data assimilation",
"30-min forecast" ]
pnum_l = [ "(a)", "(b)" ]
for i, ax in enumerate( ax_l ):
ax.text( 0.5, 1.01, tit_l[i],
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.text( 0.0, 1.01, pnum_l[i],
fontsize=10, transform=ax.transAxes,
ha='left',
va='bottom' )
# ofig = 'png/2p_d4_bar_scale.png'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
def plot_bar( dic={} ):
import matplotlib.pyplot as plt
fig, ax = plt.subplots( 1, 1, figsize=(5,5) )
fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
ax.bar( '', dic[key], bottom=acm,
label=lab, color=c_l[i] )
acm += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax.get_legend_handles_labels()
ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=13 )
ax.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 32, 2 )
ax.set_ylim( 0, 31.0 )
ax.set_yticks( yticks )
ofig = 'png/1p_d4_bar.png'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
####
SUM = { "SCALE": 0.0,
"LETKF": 0.0,
"OBS": 0.0,
# "DATA TRANSFER": 0.0,
"JIT-DT": 0.0,
}
fn_sum = '{0:}/SUM.npz'.format( data_path, )
fn_ftimes = '{0:}/ftimes.npz'.format( data_path, )
if not USE_ARCH_DAT:
top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp'
top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m'
top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000'
#dtime_max = 1000
ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, )
ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, )
#print( DETAIL["DAS_LETKF"][0:5], DETAIL["WRITE_ANAL"][0:5])
#ftimes, ctimes, DETAIL = d4_computation_time( top=top, )
ctimes = np.array( ctimes )
print( '{0:} average: {1:} (N: {2:})'.format( "cycle", np.nanmean( ctimes ), len(ctimes) ) )
print( '{0:} average: {1:} (N: {2:})'.format( "fcst ", np.mean( ftimes ), len(ftimes) ) )
print("")
DETAIL_MODE = { }
DETAIL_MODE_test = { }
min_read_obs = 1.0
max_read_obs = 30.0
read_obs_ = DETAIL["READ_OBS"]
dat_jit = DETAIL['JIT_GET']
dat_jit[ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan
dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ]
for key in DETAIL.keys():
DETAIL[key][ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan
time_ = np.nanmean( DETAIL[key] )
dat = DETAIL[key]
dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ]
num = len( dat_ )
if key == "READ_OBS":
dat_ -= dat_jit_
print( "#### ", key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) )
if num > 100:
mode_, mean_ = plot_hist( key=key, dat=dat_ )
#DETAIL_MODE[key] = mode_
DETAIL_MODE[key] = mean_
else:
print( 'Not plot ', key)
read_obs_test = DETAIL_test["READ_OBS"]
#dat_jit_test = DETAIL_test['JIT_GET']
#dat_jit_test[ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan
#dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ]
for key in DETAIL_test.keys():
DETAIL_test[key][ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan
time_ = np.nanmean( DETAIL_test[key] )
dat = DETAIL_test[key]
print( key, dat )
#dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ]
dat_ = dat[ ~np.isnan(dat) ]
num = len( dat_ )
# if key == "READ_OBS":
# dat_ -= dat_jit_
print( "#### ", key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) )
if num > 100:
mode_, mean_ = plot_hist( key=key, dat=dat_ )
DETAIL_MODE_test[key] = mean_
else:
print( 'Not plot ', key)
for key in DETAIL_MODE.keys():
print( key )
if key == "SCALE":
SUM["SCALE"] += DETAIL_MODE[key]
elif key == "READ_OBS":
SUM["OBS"] += DETAIL_MODE[key]
# elif key == "READ_GUES" or key == "WRITE_ANAL":
# SUM["DATA TRANSFER"] += DETAIL_MODE[key]
elif key == "JIT_GET":
SUM["JIT-DT"] += DETAIL_MODE[key]
else:
SUM["LETKF"] += DETAIL_MODE[key]
SUM_test = { "SCALE": 0.0,
"LETKF": 0.0,
"OBS": 0.0,
"JIT-DT": 0.0,
}
for key in DETAIL_MODE_test.keys():
if key == "SCALE":
SUM_test["SCALE"] += DETAIL_MODE_test[key]
elif key == "READ_OBS":
SUM_test["OBS"] += DETAIL_MODE_test[key]
# elif key == "READ_GUES" or key == "WRITE_ANAL":
# SUM["DATA TRANSFER"] += DETAIL_MODE[key]
elif key == "JIT_GET":
SUM_test["JIT-DT"] += DETAIL_MODE_test[key]
else:
SUM_test["LETKF"] += DETAIL_MODE_test[key]
np.savez( fn_sum, **SUM, ftimes=ftimes )
np.savez( fn_ftimes, ftimes=ftimes )
else:
with np.load( fn_sum, allow_pickle=True ) as npz:
for key in SUM.keys():
SUM[key] = npz[key]
ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes']
print( SUM )
#print( DETAIL_MODE )
#print( SUM_test )
#print( DETAIL_MODE_test )
#sys.exit()
#plot_bar( dic=SUM )
plot_bar_2p( dic=SUM, ftimes=ftimes )
#plot_bar_2p_scale( dic=SUM, dic2=SUM_test, ftimes=ftimes )
|
[
"numpy.load",
"numpy.argmax",
"matplotlib.pyplot.clf",
"numpy.isnan",
"os.path.isfile",
"numpy.mean",
"numpy.arange",
"os.path.join",
"numpy.nanmean",
"numpy.copy",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.savez",
"os.scandir",
"numpy.nanmax",
"os.makedirs",
"numpy.zeros",
"numpy.nanmin",
"numpy.array",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((124, 161), 'os.makedirs', 'os.makedirs', (['data_path'], {'exist_ok': '(True)'}), '(data_path, exist_ok=True)\n', (135, 161), False, 'import os\n'), ((1638, 1655), 'numpy.array', 'np.array', (['scale_l'], {}), '(scale_l)\n', (1646, 1655), True, 'import numpy as np\n'), ((2270, 2293), 'numpy.zeros', 'np.zeros', (['scale_l.shape'], {}), '(scale_l.shape)\n', (2278, 2293), True, 'import numpy as np\n'), ((7731, 7743), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7739, 7743), True, 'import numpy as np\n'), ((8060, 8094), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6, 4)'}), '(1, 1, figsize=(6, 4))\n', (8072, 8094), True, 'import matplotlib.pyplot as plt\n'), ((8274, 8287), 'numpy.argmax', 'np.argmax', (['rn'], {}), '(rn)\n', (8283, 8287), True, 'import numpy as np\n'), ((8301, 8332), 'numpy.mean', 'np.mean', (['rbins[imode:imode + 2]'], {}), '(rbins[imode:imode + 2])\n', (8308, 8332), True, 'import numpy as np\n'), ((8344, 8356), 'numpy.mean', 'np.mean', (['dat'], {}), '(dat)\n', (8351, 8356), True, 'import numpy as np\n'), ((9699, 9711), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9707, 9711), True, 'import numpy as np\n'), ((9775, 9809), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6, 4)'}), '(1, 2, figsize=(6, 4))\n', (9787, 9809), True, 'import matplotlib.pyplot as plt\n'), ((10996, 11015), 'numpy.arange', 'np.arange', (['(0)', '(22)', '(2)'], {}), '(0, 22, 2)\n', (11005, 11015), True, 'import numpy as np\n'), ((12555, 12567), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12563, 12567), True, 'import numpy as np\n'), ((12640, 12674), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6, 4)'}), '(1, 2, figsize=(6, 4))\n', (12652, 12674), True, 'import matplotlib.pyplot as plt\n'), ((14289, 14308), 'numpy.arange', 'np.arange', (['(0)', '(22)', '(2)'], {}), '(0, 22, 2)\n', (14298, 14308), True, 'import numpy as np\n'), ((15898, 15932), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (15910, 15932), True, 'import matplotlib.pyplot as plt\n'), ((16932, 16951), 'numpy.arange', 'np.arange', (['(0)', '(32)', '(2)'], {}), '(0, 32, 2)\n', (16941, 16951), True, 'import numpy as np\n'), ((18199, 18215), 'numpy.array', 'np.array', (['ctimes'], {}), '(ctimes)\n', (18207, 18215), True, 'import numpy as np\n'), ((21424, 21462), 'numpy.savez', 'np.savez', (['fn_sum'], {'ftimes': 'ftimes'}), '(fn_sum, **SUM, ftimes=ftimes)\n', (21432, 21462), True, 'import numpy as np\n'), ((21468, 21502), 'numpy.savez', 'np.savez', (['fn_ftimes'], {'ftimes': 'ftimes'}), '(fn_ftimes, ftimes=ftimes)\n', (21476, 21502), True, 'import numpy as np\n'), ((7641, 7662), 'numpy.array', 'np.array', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (7649, 7662), True, 'import numpy as np\n'), ((8019, 8036), 'numpy.sqrt', 'np.sqrt', (['dat.size'], {}), '(dat.size)\n', (8026, 8036), True, 'import numpy as np\n'), ((9486, 9496), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9494, 9496), True, 'import matplotlib.pyplot as plt\n'), ((9514, 9568), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (9525, 9568), True, 'import matplotlib.pyplot as plt\n'), ((9599, 9608), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9606, 9608), True, 'import matplotlib.pyplot as plt\n'), ((9616, 9632), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9625, 9632), True, 'import matplotlib.pyplot as plt\n'), ((11546, 11561), 'numpy.mean', 'np.mean', (['ftimes'], {}), '(ftimes)\n', (11553, 11561), True, 'import numpy as np\n'), ((11655, 11677), 'numpy.std', 'np.std', (['ftimes'], {'ddof': '(1)'}), '(ftimes, ddof=1)\n', (11661, 11677), True, 'import numpy as np\n'), ((12368, 12378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12376, 12378), True, 'import matplotlib.pyplot as plt\n'), ((12396, 12450), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (12407, 12450), True, 'import matplotlib.pyplot as plt\n'), ((12481, 12490), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (12488, 12490), True, 'import matplotlib.pyplot as plt\n'), ((12498, 12514), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (12507, 12514), True, 'import matplotlib.pyplot as plt\n'), ((14839, 14854), 'numpy.mean', 'np.mean', (['ftimes'], {}), '(ftimes)\n', (14846, 14854), True, 'import numpy as np\n'), ((14948, 14970), 'numpy.std', 'np.std', (['ftimes'], {'ddof': '(1)'}), '(ftimes, ddof=1)\n', (14954, 14970), True, 'import numpy as np\n'), ((15672, 15682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15680, 15682), True, 'import matplotlib.pyplot as plt\n'), ((15700, 15754), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (15711, 15754), True, 'import matplotlib.pyplot as plt\n'), ((15785, 15794), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15792, 15794), True, 'import matplotlib.pyplot as plt\n'), ((15802, 15818), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (15811, 15818), True, 'import matplotlib.pyplot as plt\n'), ((17084, 17094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17092, 17094), True, 'import matplotlib.pyplot as plt\n'), ((17112, 17166), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (17123, 17166), True, 'import matplotlib.pyplot as plt\n'), ((17197, 17206), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (17204, 17206), True, 'import matplotlib.pyplot as plt\n'), ((17214, 17230), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (17223, 17230), True, 'import matplotlib.pyplot as plt\n'), ((18857, 18880), 'numpy.nanmean', 'np.nanmean', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (18867, 18880), True, 'import numpy as np\n'), ((19762, 19790), 'numpy.nanmean', 'np.nanmean', (['DETAIL_test[key]'], {}), '(DETAIL_test[key])\n', (19772, 19790), True, 'import numpy as np\n'), ((21521, 21555), 'numpy.load', 'np.load', (['fn_sum'], {'allow_pickle': '(True)'}), '(fn_sum, allow_pickle=True)\n', (21528, 21555), True, 'import numpy as np\n'), ((21637, 21674), 'numpy.load', 'np.load', (['fn_ftimes'], {'allow_pickle': '(True)'}), '(fn_ftimes, allow_pickle=True)\n', (21644, 21674), True, 'import numpy as np\n'), ((336, 351), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (346, 351), False, 'import os\n'), ((498, 521), 'os.path.join', 'os.path.join', (['top', 'dir_'], {}), '(top, dir_)\n', (510, 521), False, 'import os\n'), ((634, 654), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (648, 654), False, 'import os\n'), ((2456, 2471), 'numpy.copy', 'np.copy', (['iarray'], {}), '(iarray)\n', (2463, 2471), True, 'import numpy as np\n'), ((2572, 2592), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2586, 2592), False, 'import os\n'), ((4202, 4217), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (4212, 4217), False, 'import os\n'), ((5637, 5667), 'os.path.join', 'os.path.join', (['top', 'dir_', 'fname'], {}), '(top, dir_, fname)\n', (5649, 5667), False, 'import os\n'), ((5751, 5771), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (5765, 5771), False, 'import os\n'), ((11245, 11264), 'numpy.arange', 'np.arange', (['(4)', '(20)', '(4)'], {}), '(4, 20, 4)\n', (11254, 11264), True, 'import numpy as np\n'), ((14538, 14557), 'numpy.arange', 'np.arange', (['(4)', '(20)', '(4)'], {}), '(4, 20, 4)\n', (14547, 14557), True, 'import numpy as np\n'), ((18276, 18294), 'numpy.nanmean', 'np.nanmean', (['ctimes'], {}), '(ctimes)\n', (18286, 18294), True, 'import numpy as np\n'), ((18372, 18387), 'numpy.mean', 'np.mean', (['ftimes'], {}), '(ftimes)\n', (18379, 18387), True, 'import numpy as np\n'), ((18703, 18720), 'numpy.isnan', 'np.isnan', (['dat_jit'], {}), '(dat_jit)\n', (18711, 18720), True, 'import numpy as np\n'), ((19101, 19123), 'numpy.nanmax', 'np.nanmax', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (19110, 19123), True, 'import numpy as np\n'), ((19127, 19149), 'numpy.nanmin', 'np.nanmin', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (19136, 19149), True, 'import numpy as np\n'), ((20085, 20112), 'numpy.nanmax', 'np.nanmax', (['DETAIL_test[key]'], {}), '(DETAIL_test[key])\n', (20094, 20112), True, 'import numpy as np\n'), ((20116, 20143), 'numpy.nanmin', 'np.nanmin', (['DETAIL_test[key]'], {}), '(DETAIL_test[key])\n', (20125, 20143), True, 'import numpy as np\n'), ((19937, 19950), 'numpy.isnan', 'np.isnan', (['dat'], {}), '(dat)\n', (19945, 19950), True, 'import numpy as np\n'), ((18932, 18945), 'numpy.isnan', 'np.isnan', (['dat'], {}), '(dat)\n', (18940, 18945), True, 'import numpy as np\n'), ((18949, 18966), 'numpy.isnan', 'np.isnan', (['dat_jit'], {}), '(dat_jit)\n', (18957, 18966), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
sys.path.append('./method')
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pints
import pints.io
import pints.plot
import model as m
import parametertransform
import priors
"""
Run fit.
"""
model_list = ['A', 'B', 'C']
try:
which_model = sys.argv[1]
except:
print('Usage: python %s [str:which_model]' % os.path.basename(__file__))
sys.exit()
if which_model not in model_list:
raise ValueError('Input model %s is not available in the model list' \
% which_model)
# Get all input variables
import importlib
sys.path.append('./mmt-model-files')
info_id = 'model_%s' % which_model
info = importlib.import_module(info_id)
data_dir = './data'
savedir = './out/mcmc-' + info_id
if not os.path.isdir(savedir):
os.makedirs(savedir)
data_file_name = 'data-sinewave.csv'
print('Fitting to ', data_file_name)
print('Temperature: ', info.temperature)
saveas = info_id + '-' + data_file_name[5:][:-4]
# Protocol
protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1,
delimiter=',')
protocol_times = protocol[:, 0]
protocol = protocol[:, 1]
# Control fitting seed
# fit_seed = np.random.randint(0, 2**30)
fit_seed = 542811797
print('Fit seed: ', fit_seed)
np.random.seed(fit_seed)
# Set parameter transformation
transform_to_model_param = parametertransform.log_transform_to_model_param
transform_from_model_param = parametertransform.log_transform_from_model_param
# Load data
data = np.loadtxt(data_dir + '/' + data_file_name,
delimiter=',', skiprows=1) # headers
times = data[:, 0]
data = data[:, 1]
noise_sigma = np.std(data[:500])
print('Estimated noise level: ', noise_sigma)
# Model
model = m.Model(info.model_file,
variables=info.parameters,
current_readout=info.current_list,
set_ion=info.ions_conc,
transform=transform_to_model_param,
temperature=273.15 + info.temperature, # K
)
LogPrior = {
'model_A': priors.ModelALogPrior,
'model_B': priors.ModelBLogPrior,
}
# Update protocol
model.set_fixed_form_voltage_protocol(protocol, protocol_times)
# Create Pints stuffs
problem = pints.SingleOutputProblem(model, times, data)
loglikelihood = pints.GaussianLogLikelihood(problem)
logmodelprior = LogPrior[info_id](transform_to_model_param,
transform_from_model_param)
lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma])
logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior)
logposterior = pints.LogPosterior(loglikelihood, logprior)
# Check logposterior is working fine
priorparams = np.copy(info.base_param)
transform_priorparams = transform_from_model_param(priorparams)
priorparams = np.append(priorparams, noise_sigma)
transform_priorparams = np.append(transform_priorparams, noise_sigma)
print('Posterior at prior parameters: ',
logposterior(transform_priorparams))
for _ in range(10):
assert(logposterior(transform_priorparams) ==\
logposterior(transform_priorparams))
# Load fitting results
calloaddir = './out/' + info_id
load_seed = 542811797
fit_idx = [1, 2, 3]
transform_x0_list = []
print('MCMC starting point: ')
for i in fit_idx:
f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i)
p = np.loadtxt(f)
transform_x0_list.append(np.append(transform_from_model_param(p),
noise_sigma))
print(transform_x0_list[-1])
print('Posterior: ', logposterior(transform_x0_list[-1]))
# Run
mcmc = pints.MCMCController(logposterior, len(transform_x0_list),
transform_x0_list, method=pints.PopulationMCMC)
n_iter = 100000
mcmc.set_max_iterations(n_iter)
mcmc.set_initial_phase_iterations(int(0.05 * n_iter))
mcmc.set_parallel(False)
mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas))
mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas))
chains = mcmc.run()
# De-transform parameters
chains_param = np.zeros(chains.shape)
for i, c in enumerate(chains):
c_tmp = np.copy(c)
chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1])
chains_param[i, :, -1] = c_tmp[:, -1]
del(c_tmp)
# Save (de-transformed version)
pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param)
# Plot
# burn in and thinning
chains_final = chains[:, int(0.5 * n_iter)::5, :]
chains_param = chains_param[:, int(0.5 * n_iter)::5, :]
transform_x0 = transform_x0_list[0]
x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1])
pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0)
plt.savefig('%s/%s-fig1.png' % (savedir, saveas))
plt.close('all')
pints.plot.trace(chains_param, ref_parameters=x0)
plt.savefig('%s/%s-fig2.png' % (savedir, saveas))
plt.close('all')
|
[
"numpy.random.seed",
"pints.GaussianLogLikelihood",
"sys.path.append",
"numpy.copy",
"numpy.std",
"matplotlib.pyplot.close",
"numpy.append",
"numpy.loadtxt",
"pints.LogPosterior",
"pints.io.save_samples",
"pints.plot.pairwise",
"importlib.import_module",
"pints.UniformLogPrior",
"os.path.basename",
"matplotlib.use",
"pints.SingleOutputProblem",
"sys.exit",
"os.makedirs",
"os.path.isdir",
"pints.plot.trace",
"model.Model",
"numpy.zeros",
"pints.ComposedLogPrior",
"matplotlib.pyplot.savefig"
] |
[((72, 99), 'sys.path.append', 'sys.path.append', (['"""./method"""'], {}), "('./method')\n", (87, 99), False, 'import sys\n'), ((147, 168), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (161, 168), False, 'import matplotlib\n'), ((672, 708), 'sys.path.append', 'sys.path.append', (['"""./mmt-model-files"""'], {}), "('./mmt-model-files')\n", (687, 708), False, 'import sys\n'), ((751, 783), 'importlib.import_module', 'importlib.import_module', (['info_id'], {}), '(info_id)\n', (774, 783), False, 'import importlib\n'), ((1084, 1160), 'numpy.loadtxt', 'np.loadtxt', (['"""./protocol-time-series/sinewave.csv"""'], {'skiprows': '(1)', 'delimiter': '""","""'}), "('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',')\n", (1094, 1160), True, 'import numpy as np\n'), ((1344, 1368), 'numpy.random.seed', 'np.random.seed', (['fit_seed'], {}), '(fit_seed)\n', (1358, 1368), True, 'import numpy as np\n'), ((1575, 1645), 'numpy.loadtxt', 'np.loadtxt', (["(data_dir + '/' + data_file_name)"], {'delimiter': '""","""', 'skiprows': '(1)'}), "(data_dir + '/' + data_file_name, delimiter=',', skiprows=1)\n", (1585, 1645), True, 'import numpy as np\n'), ((1726, 1744), 'numpy.std', 'np.std', (['data[:500]'], {}), '(data[:500])\n', (1732, 1744), True, 'import numpy as np\n'), ((1808, 2003), 'model.Model', 'm.Model', (['info.model_file'], {'variables': 'info.parameters', 'current_readout': 'info.current_list', 'set_ion': 'info.ions_conc', 'transform': 'transform_to_model_param', 'temperature': '(273.15 + info.temperature)'}), '(info.model_file, variables=info.parameters, current_readout=info.\n current_list, set_ion=info.ions_conc, transform=\n transform_to_model_param, temperature=273.15 + info.temperature)\n', (1815, 2003), True, 'import model as m\n'), ((2273, 2318), 'pints.SingleOutputProblem', 'pints.SingleOutputProblem', (['model', 'times', 'data'], {}), '(model, times, data)\n', (2298, 2318), False, 'import pints\n'), ((2335, 2371), 'pints.GaussianLogLikelihood', 'pints.GaussianLogLikelihood', (['problem'], {}), '(problem)\n', (2362, 2371), False, 'import pints\n'), ((2484, 2548), 'pints.UniformLogPrior', 'pints.UniformLogPrior', (['[0.1 * noise_sigma]', '[10.0 * noise_sigma]'], {}), '([0.1 * noise_sigma], [10.0 * noise_sigma])\n', (2505, 2548), False, 'import pints\n'), ((2559, 2611), 'pints.ComposedLogPrior', 'pints.ComposedLogPrior', (['logmodelprior', 'lognoiseprior'], {}), '(logmodelprior, lognoiseprior)\n', (2581, 2611), False, 'import pints\n'), ((2627, 2670), 'pints.LogPosterior', 'pints.LogPosterior', (['loglikelihood', 'logprior'], {}), '(loglikelihood, logprior)\n', (2645, 2670), False, 'import pints\n'), ((2723, 2747), 'numpy.copy', 'np.copy', (['info.base_param'], {}), '(info.base_param)\n', (2730, 2747), True, 'import numpy as np\n'), ((2826, 2861), 'numpy.append', 'np.append', (['priorparams', 'noise_sigma'], {}), '(priorparams, noise_sigma)\n', (2835, 2861), True, 'import numpy as np\n'), ((2886, 2931), 'numpy.append', 'np.append', (['transform_priorparams', 'noise_sigma'], {}), '(transform_priorparams, noise_sigma)\n', (2895, 2931), True, 'import numpy as np\n'), ((4041, 4063), 'numpy.zeros', 'np.zeros', (['chains.shape'], {}), '(chains.shape)\n', (4049, 4063), True, 'import numpy as np\n'), ((4278, 4353), 'pints.io.save_samples', 'pints.io.save_samples', (["('%s/%s-chain.csv' % (savedir, saveas))", '*chains_param'], {}), "('%s/%s-chain.csv' % (savedir, saveas), *chains_param)\n", (4299, 4353), False, 'import pints\n'), ((4607, 4673), 'pints.plot.pairwise', 'pints.plot.pairwise', (['chains_param[0]'], {'kde': '(False)', 'ref_parameters': 'x0'}), '(chains_param[0], kde=False, ref_parameters=x0)\n', (4626, 4673), False, 'import pints\n'), ((4674, 4723), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/%s-fig1.png' % (savedir, saveas))"], {}), "('%s/%s-fig1.png' % (savedir, saveas))\n", (4685, 4723), True, 'import matplotlib.pyplot as plt\n'), ((4724, 4740), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4733, 4740), True, 'import matplotlib.pyplot as plt\n'), ((4742, 4791), 'pints.plot.trace', 'pints.plot.trace', (['chains_param'], {'ref_parameters': 'x0'}), '(chains_param, ref_parameters=x0)\n', (4758, 4791), False, 'import pints\n'), ((4792, 4841), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/%s-fig2.png' % (savedir, saveas))"], {}), "('%s/%s-fig2.png' % (savedir, saveas))\n", (4803, 4841), True, 'import matplotlib.pyplot as plt\n'), ((4842, 4858), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4851, 4858), True, 'import matplotlib.pyplot as plt\n'), ((847, 869), 'os.path.isdir', 'os.path.isdir', (['savedir'], {}), '(savedir)\n', (860, 869), False, 'import os\n'), ((875, 895), 'os.makedirs', 'os.makedirs', (['savedir'], {}), '(savedir)\n', (886, 895), False, 'import os\n'), ((3392, 3405), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (3402, 3405), True, 'import numpy as np\n'), ((4107, 4117), 'numpy.copy', 'np.copy', (['c'], {}), '(c)\n', (4114, 4117), True, 'import numpy as np\n'), ((480, 490), 'sys.exit', 'sys.exit', ([], {}), '()\n', (488, 490), False, 'import sys\n'), ((448, 474), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (464, 474), False, 'import os\n')]
|
import tensorflow as tf
import numpy as np
input = tf.placeholder(dtype=tf.float32,shape=[5,5,3])
filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32)
conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID')
with tf.Session() as sess:
img = np.array([3,5,5,3])
out = sess.run(conv0,feed_dict={input:img})
print(out.shape)
|
[
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.nn.atrous_conv2d",
"tensorflow.placeholder",
"numpy.array"
] |
[((52, 101), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[5, 5, 3]'}), '(dtype=tf.float32, shape=[5, 5, 3])\n', (66, 101), True, 'import tensorflow as tf\n'), ((108, 166), 'tensorflow.constant', 'tf.constant', ([], {'value': '(1)', 'shape': '[3, 3, 3, 5]', 'dtype': 'tf.float32'}), '(value=1, shape=[3, 3, 3, 5], dtype=tf.float32)\n', (119, 166), True, 'import tensorflow as tf\n'), ((172, 239), 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['input'], {'filters': 'filter', 'rate': '(2)', 'padding': '"""VALID"""'}), "(input, filters=filter, rate=2, padding='VALID')\n", (191, 239), True, 'import tensorflow as tf\n'), ((243, 255), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (253, 255), True, 'import tensorflow as tf\n'), ((275, 297), 'numpy.array', 'np.array', (['[3, 5, 5, 3]'], {}), '([3, 5, 5, 3])\n', (283, 297), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys, os, glob
import re
# Output data format
from configurations import *
design_pt_to_plot=2
#################################################################################
#### Try to figure out semi-automatically what observables to group together ####
#################################################################################
# This is the input:
# Specifies how observables are grouped according to these regular expression
# Also specify if they should be plotted on a linear or a log scale
regex_obs_to_group_list=[
(r'$\pi$/K/p dN/dy',"dN_dy_(pion|kaon|proton)",'log'),
(r'$\pi$/K/p $\langle p_T \rangle$',"mean_pT_(pion|kaon|proton)",'linear'),
(r'$\Lambda/\Omega/\Xi$ dN/dy',"dN_dy_(Lambda|Omega|Xi)",'log'),
(r'$v_n\{2\}$',"v[2-5+]2",'linear'),
(r'$dN_{ch}/d\eta$',"dNch_deta",'log'),
(r'$dE_T/d\eta$',"dET_deta",'log'),
(r'$\langle p_T \rangle$ fluct',"pT_fluct",'linear'),
]
# This parts figures out how to group observables based on the regular expressions
obs_to_group={}
# Loop over observables to see which ones to group
for system in system_strs:
obs_to_group[system]={}
for obs_name in obs_cent_list[system]:
found_match=False
for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list):
r = re.compile(regex_obs_to_group)
match=r.match(obs_name)
# No match means nothing to group
if (match is not None):
if (found_match):
print("Non-exclusive grouping. Can't work...")
exit(1)
else:
found_match=True
obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale)
if (not found_match):
obs_to_group[system][obs_name]=None
# Parse the previous list to make something useful out of it
final_obs_grouping = {}
#
for system in system_strs:
final_obs_grouping[system]={}
for n, (key, value) in enumerate(obs_to_group[system].items()):
if (value is None):
newvalue=(n,key)
else:
newvalue=value
final_obs_grouping[system].setdefault(newvalue, []).append(key)
##############
#### Plot ####
##############
def plot(calcs):
for system in system_strs:
# Count how many observables to plot
nb_obs=len(final_obs_grouping[system])
# Decide how many columns we want the plot to have
nb_of_cols=4
# COunt how many rows needed
nb_of_rows=int(np.ceil(nb_obs/nb_of_cols))
# Prepare figure
fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows))
line_list=[]
#Loop over grouped observables
#for n, (obs, cent) in enumerate(obs_cent_list.items()):
for n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()):
plt.subplot(nb_of_rows,nb_of_cols,n+1)
plt.xlabel(r'Centrality (%)', fontsize=10)
plt.ylabel(obs_name, fontsize=10)
plt.yscale(plot_scale)
# Loop over observable group
for obs, color in zip(obs_list,'rgbrgbrgb'):
cent=obs_cent_list[system][obs]
mid_centrality=[(low+up)/2. for low,up in cent]
#Loop over delta-f
idf_list=[0,1,2,3]
idf_sym=['D','o','^','.']
for idf, line in zip(idf_list, idf_sym):
mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot]
stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot]
line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4)
line_list.append(line_type)
if (plot_scale != "log"):
plt.ylim(ymin=0)
# Plot legend in first subplot only
if (0 == n):
plt.legend(line_list,["idf="+str(idf) for idf in idf_list],loc="upper right",fontsize=10)
plt.tight_layout(True)
#plt.savefig("obs.pdf")
plt.show()
if __name__ == '__main__':
results = []
for file in glob.glob(sys.argv[1]):
# Load calculations
calcs = np.fromfile(file, dtype=np.dtype(bayes_dtype))
entry = plot(calcs)
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show",
"numpy.ceil",
"matplotlib.pyplot.ylim",
"numpy.dtype",
"matplotlib.pyplot.figure",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.errorbar",
"re.compile"
] |
[((4351, 4373), 'glob.glob', 'glob.glob', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (4360, 4373), False, 'import sys, os, glob\n'), ((2720, 2772), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * nb_of_cols, 2 * nb_of_rows)'}), '(figsize=(2 * nb_of_cols, 2 * nb_of_rows))\n', (2730, 2772), True, 'import matplotlib.pyplot as plt\n'), ((4206, 4228), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', (['(True)'], {}), '(True)\n', (4222, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4269, 4279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4277, 4279), True, 'import matplotlib.pyplot as plt\n'), ((1427, 1457), 're.compile', 're.compile', (['regex_obs_to_group'], {}), '(regex_obs_to_group)\n', (1437, 1457), False, 'import re\n'), ((2653, 2681), 'numpy.ceil', 'np.ceil', (['(nb_obs / nb_of_cols)'], {}), '(nb_obs / nb_of_cols)\n', (2660, 2681), True, 'import numpy as np\n'), ((3018, 3060), 'matplotlib.pyplot.subplot', 'plt.subplot', (['nb_of_rows', 'nb_of_cols', '(n + 1)'], {}), '(nb_of_rows, nb_of_cols, n + 1)\n', (3029, 3060), True, 'import matplotlib.pyplot as plt\n'), ((3069, 3110), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Centrality (%)"""'], {'fontsize': '(10)'}), "('Centrality (%)', fontsize=10)\n", (3079, 3110), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3157), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['obs_name'], {'fontsize': '(10)'}), '(obs_name, fontsize=10)\n', (3134, 3157), True, 'import matplotlib.pyplot as plt\n'), ((3170, 3192), 'matplotlib.pyplot.yscale', 'plt.yscale', (['plot_scale'], {}), '(plot_scale)\n', (3180, 3192), True, 'import matplotlib.pyplot as plt\n'), ((3981, 3997), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (3989, 3997), True, 'import matplotlib.pyplot as plt\n'), ((4466, 4487), 'numpy.dtype', 'np.dtype', (['bayes_dtype'], {}), '(bayes_dtype)\n', (4474, 4487), True, 'import numpy as np\n'), ((3781, 3882), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['mid_centrality', 'mean_values'], {'yerr': 'stat_uncert', 'fmt': 'line', 'color': 'color', 'markersize': '(4)'}), '(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color\n =color, markersize=4)\n', (3793, 3882), True, 'import matplotlib.pyplot as plt\n')]
|
from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
import os
import re
import shutil
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from keras_frcnn import config, data_generators
from keras_frcnn import losses as losses
import keras_frcnn.roi_helpers as roi_helpers
from tensorflow.python.keras.utils import generic_utils
sys.setrecursionlimit(40000)
from tensorflow.python.ops.numpy_ops import np_config
np_config.enable_numpy_behavior()
# if Logs path directory exists, it will delete the directory
if os.path.exists('logs'):
shutil.rmtree('logs')
parser = OptionParser()
parser.add_option("-p", "--path", dest="train_path", help="Path to training data.")
parser.add_option("-v", "--valid_path", dest="valid_path", help="Path to validation data.")
parser.add_option("-o", "--parser", dest="parser",
help="Parser to use. One of simple or pascal_voc", default="pascal_voc")
parser.add_option("-n", "--num_rois", type="int", dest="num_rois",
help="Number of RoIs to process at once.", default=32)
parser.add_option("--network", dest="network",
help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("--hf", dest="horizontal_flips",
help="Augment with horizontal flips in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--vf", dest="vertical_flips",
help="Augment with vertical flips in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--rot", "--rot_90", dest="rot_90",
help="Augment with 90 degree rotations in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--num_epochs", type="int",
dest="num_epochs", help="Number of epochs.", default=2000)
parser.add_option("--config_filename", dest="config_filename",
help="Location to store all the metadata related to "
"the training (to be used when testing).",
default="config.pickle")
parser.add_option("--output_weight_path", dest="output_weight_path",
help="Output path for weights.", default='./model_frcnn.hdf5')
parser.add_option("--input_weight_path", dest="input_weight_path",
help="Input path for weights. If not specified, will try to"
" load default weights provided by keras.")
(options, args) = parser.parse_args()
if not options.train_path: # if filename is not given
parser.error('Error: path to training data must be specified. Pass --path to command line')
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == 'simple':
from keras_frcnn.simple_parser import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
# pass the settings from the command line, and persist them in the config object
C = config.Config()
C.use_horizontal_flips = bool(options.horizontal_flips)
C.use_vertical_flips = bool(options.vertical_flips)
C.rot_90 = bool(options.rot_90)
C.model_path = options.output_weight_path
model_path_regex = re.match("^(.+)(\.hdf5)$", C.model_path)
if model_path_regex.group(2) != '.hdf5':
print('Output weights must have .hdf5 filetype')
exit(1)
C.num_rois = int(options.num_rois)
if options.network == 'vgg':
C.network = 'vgg'
from keras_frcnn import vgg as nn
elif options.network == 'resnet50':
from keras_frcnn import resnet as nn
C.network = 'resnet50'
else:
print('Not a valid model')
raise ValueError
# check if weight path was passed via command line
if options.input_weight_path:
C.base_net_weights = options.input_weight_path
else:
# set the path to weights based on backend and model
C.base_net_weights = nn.get_weight_path()
train_imgs, classes_count, class_mapping = get_data(options.train_path)
val_imgs, _, _ = get_data(options.valid_path)
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
C.class_mapping = class_mapping
inv_map = {v: k for k, v in class_mapping.items()}
print('Training images per class:')
pprint.pprint(classes_count)
print(f'Num classes (including bg) = {len(classes_count)}')
config_output_filename = options.config_filename
with open(config_output_filename, 'wb') as config_f:
pickle.dump(C, config_f)
print(f'Config has been written to {config_output_filename}, '
f'and can be loaded when testing to ensure correct results')
num_imgs = len(train_imgs)
num_valid_imgs = len(val_imgs)
print(f'Num train samples {len(train_imgs)}')
print(f'Num val samples {len(val_imgs)}')
data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C,
nn.get_img_output_length,
K.image_data_format(), mode='train')
data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length,
K.image_data_format(), mode='val')
if K.image_data_format() == 'channels_first':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(shared_layers, roi_input, C.num_rois,
nb_classes=len(classes_count), trainable=True)
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier,
# used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
# Defining optimizers for all models
optimizer_rpn = Adam(learning_rate=1e-5)
optimizer_classifier = Adam(learning_rate=1e-5)
optimizer_all = SGD(learning_rate=0.01)
# Accuracy metrics for Fast RCNN model
train_classifier_metric = tf.keras.metrics.CategoricalAccuracy()
val_classifier_metric = tf.keras.metrics.CategoricalAccuracy()
# Loss function of RPN model and Fast RCNN model
rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors)
rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors)
fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss()
fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1)
# tensorboard writer, automatically creates directory and writes logs
train_writer = tf.summary.create_file_writer('logs/train/')
valid_writer = tf.summary.create_file_writer('logs/valid/')
@tf.function
def rpn_train_step(step, x_batch_train, y_batch_train):
with tf.GradientTape() as rpn_tape:
y_rpn_cls_true, y_rpn_regr_true = y_batch_train
y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True)
rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred)
rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred)
rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss],
model_rpn.trainable_weights)
optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights))
# write training loss and accuracy to the tensorboard
with train_writer.as_default():
tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step)
tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step)
return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss
@tf.function
def frcnn_train_step(step, x_batch_train, X2, Y1, Y2):
with tf.GradientTape() as frcnn_tape:
rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2],
training=True)
fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred)
fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred)
frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss],
model_classifier.trainable_weights)
optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights))
train_classifier_metric.update_state(Y1, rcnn_class_pred)
fast_rcnn_class_acc = train_classifier_metric.result()
# write training loss and accuracy to the tensorboard
with train_writer.as_default():
tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)
tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)
tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)
return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc
@tf.function
def rpn_valid_step(step, x_batch_train, y_batch_train):
with tf.GradientTape() as rpn_tape:
y_rpn_cls_true, y_rpn_regr_true = y_batch_train
y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False)
rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred)
rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred)
# write training loss and accuracy to the tensorboard
with valid_writer.as_default():
tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step)
tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step)
return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss
@tf.function
def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2):
with tf.GradientTape() as frcnn_tape:
rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2],
training=False)
fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred)
fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred)
val_classifier_metric.update_state(Y1, rcnn_class_pred)
fast_rcnn_class_acc = val_classifier_metric.result()
# write training loss and accuracy to the tensorboard
with valid_writer.as_default():
tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)
tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)
tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)
return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc
def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch):
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois // 2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2,
replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples,
C.num_rois - len(selected_pos_samples),
replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples,
C.num_rois - len(selected_pos_samples),
replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
return sel_samples
n_epochs = options.num_epochs
BATCH_SIZE = 1
n_steps = num_imgs // BATCH_SIZE
n_valid_steps = num_valid_imgs // BATCH_SIZE
losses = np.zeros((n_steps, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
valid_losses = np.zeros((n_valid_steps, 5))
rpn_accuracy_rpn_monitor_valid = []
rpn_accuracy_for_epoch_valid = []
best_loss = np.Inf
start_time = time.time()
class_mapping_inv = {v: k for k, v in class_mapping.items()}
global_step = tf.convert_to_tensor(0, tf.int64)
one_step = tf.convert_to_tensor(1, tf.int64)
print("Training started for %d epochs" % n_epochs)
for epoch in range(n_epochs):
print("\nStart of epoch %d" % (epoch + 1,))
progbar = generic_utils.Progbar(n_steps)
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train):
# print(step, img_data['filepath'])
y_rpn_cls_true, y_rpn_regr_true = y_batch_train
step = tf.cast(step, dtype=tf.int64)
global_step = tf.add(global_step, one_step)
y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step(
global_step, x_batch_train, y_batch_train)
R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(),
use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch)
x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32)
y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32)
y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32)
fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step(
global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor)
losses[step, 0] = rpn_class_loss
losses[step, 1] = rpn_reg_loss
losses[step, 2] = fast_rcnn_class_loss
losses[step, 3] = fast_rcnn_reg_loss
losses[step, 4] = fast_rcnn_class_acc
progbar.update(step + 1,
[('rpn_cls', rpn_class_loss),
('rpn_regr', rpn_reg_loss),
('detector_cls', fast_rcnn_class_loss),
('detector_regr', fast_rcnn_reg_loss)])
if step == n_steps - 1 and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor)
) / len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print(f'\nAverage number of overlapping bounding boxes '
f'from RPN = {mean_overlapping_bboxes} for {step} previous iterations')
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes.'
' Check RPN settings or keep training.')
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(
rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print(
f'\nMean number of bounding boxes from RPN overlapping '
f'ground truth boxes: {mean_overlapping_bboxes}')
print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}')
print(f'Loss RPN classifier: {loss_rpn_cls}')
print(f'Loss RPN regression: {loss_rpn_regr}')
print(f'Loss Detector classifier: {loss_class_cls}')
print(f'Loss Detector regression: {loss_class_regr}')
print(f'Elapsed time: {time.time() - start_time}')
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
print("Total Loss: %.4f" % curr_loss)
start_time = time.time()
if curr_loss < best_loss:
if C.verbose:
print(
f'Total loss decreased from {best_loss} to {curr_loss}, saving weights')
best_loss = curr_loss
model_all.save_weights(model_path_regex.group(1) + "_" + '{:04d}'.format(
epoch) + model_path_regex.group(2))
break
# # Log every 10 steps.
# if step % 10 == 0:
# print("Step %d, RPN Cls Loss: %.4f RPN reg Loss: %.4f "
# "FRCNN Cls Loss: %.4f FRCNN reg Loss: %.4f" % (
# step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss),
# float(fast_rcnn_reg_loss)))
# Reset training metrics at the end of each epoch
train_classifier_metric.reset_states()
progbar = generic_utils.Progbar(n_valid_steps)
# Iterate over the batches of the dataset.
for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val):
y_rpn_cls_true, y_rpn_regr_true = y_batch_val
y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step(
global_step, x_batch_val, y_batch_val)
R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(),
use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor_valid.append(0)
rpn_accuracy_for_epoch_valid.append(0)
continue
sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid,
rpn_accuracy_for_epoch_valid)
x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32)
y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32)
y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32)
fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step(
global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor)
valid_losses[step, 0] = rpn_class_loss
valid_losses[step, 1] = rpn_reg_loss
valid_losses[step, 2] = fast_rcnn_class_loss
valid_losses[step, 3] = fast_rcnn_reg_loss
valid_losses[step, 4] = fast_rcnn_class_acc
progbar.update(step + 1,
[('rpn_cls', rpn_class_loss),
('rpn_regr', rpn_reg_loss),
('detector_cls', fast_rcnn_class_loss),
('detector_regr', fast_rcnn_reg_loss)])
if step == n_valid_steps - 1 and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid)
) / len(rpn_accuracy_for_epoch_valid)
rpn_accuracy_rpn_monitor_valid = []
print(f'\nValidation: Average number of overlapping bounding boxes '
f'from RPN = {mean_overlapping_bboxes}')
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes.'
' Check RPN settings or keep training.')
loss_rpn_cls = np.mean(valid_losses[:, 0])
loss_rpn_regr = np.mean(valid_losses[:, 1])
loss_class_cls = np.mean(valid_losses[:, 2])
loss_class_regr = np.mean(valid_losses[:, 3])
class_acc = np.mean(valid_losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid)
) / len(rpn_accuracy_for_epoch_valid)
rpn_accuracy_for_epoch_valid = []
if C.verbose:
print("Validation Metrics: ")
print(
f'Mean number of bounding boxes from RPN overlapping '
f'ground truth boxes: {mean_overlapping_bboxes}')
print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}')
print(f'Loss RPN classifier: {loss_rpn_cls}')
print(f'Loss RPN regression: {loss_rpn_regr}')
print(f'Loss Detector classifier: {loss_class_cls}')
print(f'Loss Detector regression: {loss_class_regr}')
print(f'Elapsed time: {time.time() - start_time}')
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
print("Total validation loss: %.4f" % curr_loss)
start_time = time.time()
break
val_classifier_metric.reset_states()
|
[
"pickle.dump",
"optparse.OptionParser",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.python.ops.numpy_ops.np_config.enable_numpy_behavior",
"tensorflow.keras.optimizers.SGD",
"numpy.random.randint",
"numpy.mean",
"pprint.pprint",
"keras_frcnn.losses.RpnClassificationLoss",
"keras_frcnn.simple_parser.get_data",
"shutil.rmtree",
"sys.setrecursionlimit",
"os.path.exists",
"keras_frcnn.losses.RpnRegressionLoss",
"tensorflow.cast",
"tensorflow.python.keras.utils.generic_utils.Progbar",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input",
"numpy.random.choice",
"keras_frcnn.roi_helpers.calc_iou",
"tensorflow.summary.scalar",
"tensorflow.add",
"re.match",
"tensorflow.keras.models.Model",
"keras_frcnn.config.Config",
"tensorflow.keras.backend.image_data_format",
"tensorflow.convert_to_tensor",
"keras_frcnn.losses.FastrcnnClassLoss",
"numpy.zeros",
"keras_frcnn.resnet.get_weight_path",
"time.time",
"random.choice",
"numpy.where",
"keras_frcnn.resnet.nn_base",
"keras_frcnn.resnet.rpn",
"tensorflow.summary.create_file_writer",
"tensorflow.GradientTape"
] |
[((578, 606), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(40000)'], {}), '(40000)\n', (599, 606), False, 'import sys\n'), ((663, 696), 'tensorflow.python.ops.numpy_ops.np_config.enable_numpy_behavior', 'np_config.enable_numpy_behavior', ([], {}), '()\n', (694, 696), False, 'from tensorflow.python.ops.numpy_ops import np_config\n'), ((763, 785), 'os.path.exists', 'os.path.exists', (['"""logs"""'], {}), "('logs')\n", (777, 785), False, 'import os\n'), ((823, 837), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (835, 837), False, 'from optparse import OptionParser\n'), ((3291, 3306), 'keras_frcnn.config.Config', 'config.Config', ([], {}), '()\n', (3304, 3306), False, 'from keras_frcnn import config, data_generators\n'), ((3510, 3551), 're.match', 're.match', (['"""^(.+)(\\\\.hdf5)$"""', 'C.model_path'], {}), "('^(.+)(\\\\.hdf5)$', C.model_path)\n", (3518, 3551), False, 'import re\n'), ((4231, 4259), 'keras_frcnn.simple_parser.get_data', 'get_data', (['options.train_path'], {}), '(options.train_path)\n', (4239, 4259), False, 'from keras_frcnn.simple_parser import get_data\n'), ((4277, 4305), 'keras_frcnn.simple_parser.get_data', 'get_data', (['options.valid_path'], {}), '(options.valid_path)\n', (4285, 4305), False, 'from keras_frcnn.simple_parser import get_data\n'), ((4532, 4560), 'pprint.pprint', 'pprint.pprint', (['classes_count'], {}), '(classes_count)\n', (4545, 4560), False, 'import pprint\n'), ((5595, 5623), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape_img'}), '(shape=input_shape_img)\n', (5600, 5623), False, 'from tensorflow.keras.layers import Input\n'), ((5636, 5658), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, 4)'}), '(shape=(None, 4))\n', (5641, 5658), False, 'from tensorflow.keras.layers import Input\n'), ((5676, 5713), 'keras_frcnn.resnet.nn_base', 'nn.nn_base', (['img_input'], {'trainable': '(True)'}), '(img_input, trainable=True)\n', (5686, 5713), True, 'from keras_frcnn import resnet as nn\n'), ((5830, 5864), 'keras_frcnn.resnet.rpn', 'nn.rpn', (['shared_layers', 'num_anchors'], {}), '(shared_layers, num_anchors)\n', (5836, 5864), True, 'from keras_frcnn import resnet as nn\n'), ((6017, 6042), 'tensorflow.keras.models.Model', 'Model', (['img_input', 'rpn[:2]'], {}), '(img_input, rpn[:2])\n', (6022, 6042), False, 'from tensorflow.keras.models import Model\n'), ((6062, 6103), 'tensorflow.keras.models.Model', 'Model', (['[img_input, roi_input]', 'classifier'], {}), '([img_input, roi_input], classifier)\n', (6067, 6103), False, 'from tensorflow.keras.models import Model\n'), ((6222, 6273), 'tensorflow.keras.models.Model', 'Model', (['[img_input, roi_input]', '(rpn[:2] + classifier)'], {}), '([img_input, roi_input], rpn[:2] + classifier)\n', (6227, 6273), False, 'from tensorflow.keras.models import Model\n'), ((6328, 6353), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (6332, 6353), False, 'from tensorflow.keras.optimizers import Adam, SGD\n'), ((6376, 6401), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (6380, 6401), False, 'from tensorflow.keras.optimizers import Adam, SGD\n'), ((6417, 6440), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (6420, 6440), False, 'from tensorflow.keras.optimizers import Adam, SGD\n'), ((6507, 6545), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), '()\n', (6543, 6545), True, 'import tensorflow as tf\n'), ((6570, 6608), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), '()\n', (6606, 6608), True, 'import tensorflow as tf\n'), ((6679, 6720), 'keras_frcnn.losses.RpnClassificationLoss', 'losses.RpnClassificationLoss', (['num_anchors'], {}), '(num_anchors)\n', (6707, 6720), True, 'from keras_frcnn import losses as losses\n'), ((6739, 6776), 'keras_frcnn.losses.RpnRegressionLoss', 'losses.RpnRegressionLoss', (['num_anchors'], {}), '(num_anchors)\n', (6763, 6776), True, 'from keras_frcnn import losses as losses\n'), ((6803, 6829), 'keras_frcnn.losses.FastrcnnClassLoss', 'losses.FastrcnnClassLoss', ([], {}), '()\n', (6827, 6829), True, 'from keras_frcnn import losses as losses\n'), ((6987, 7031), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['"""logs/train/"""'], {}), "('logs/train/')\n", (7016, 7031), True, 'import tensorflow as tf\n'), ((7047, 7091), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['"""logs/valid/"""'], {}), "('logs/valid/')\n", (7076, 7091), True, 'import tensorflow as tf\n'), ((12710, 12732), 'numpy.zeros', 'np.zeros', (['(n_steps, 5)'], {}), '((n_steps, 5))\n', (12718, 12732), True, 'import numpy as np\n'), ((12807, 12835), 'numpy.zeros', 'np.zeros', (['(n_valid_steps, 5)'], {}), '((n_valid_steps, 5))\n', (12815, 12835), True, 'import numpy as np\n'), ((12939, 12950), 'time.time', 'time.time', ([], {}), '()\n', (12948, 12950), False, 'import time\n'), ((13028, 13061), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(0)', 'tf.int64'], {}), '(0, tf.int64)\n', (13048, 13061), True, 'import tensorflow as tf\n'), ((13073, 13106), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(1)', 'tf.int64'], {}), '(1, tf.int64)\n', (13093, 13106), True, 'import tensorflow as tf\n'), ((791, 812), 'shutil.rmtree', 'shutil.rmtree', (['"""logs"""'], {}), "('logs')\n", (804, 812), False, 'import shutil\n'), ((4166, 4186), 'keras_frcnn.resnet.get_weight_path', 'nn.get_weight_path', ([], {}), '()\n', (4184, 4186), True, 'from keras_frcnn import resnet as nn\n'), ((4728, 4752), 'pickle.dump', 'pickle.dump', (['C', 'config_f'], {}), '(C, config_f)\n', (4739, 4752), False, 'import pickle\n'), ((5237, 5258), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (5256, 5258), True, 'from tensorflow.keras import backend as K\n'), ((5418, 5439), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (5437, 5439), True, 'from tensorflow.keras import backend as K\n'), ((5457, 5478), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (5476, 5478), True, 'from tensorflow.keras import backend as K\n'), ((10949, 10976), 'numpy.where', 'np.where', (['(Y1[0, :, -1] == 1)'], {}), '(Y1[0, :, -1] == 1)\n', (10957, 10976), True, 'import numpy as np\n'), ((10995, 11022), 'numpy.where', 'np.where', (['(Y1[0, :, -1] == 0)'], {}), '(Y1[0, :, -1] == 0)\n', (11003, 11022), True, 'import numpy as np\n'), ((13251, 13281), 'tensorflow.python.keras.utils.generic_utils.Progbar', 'generic_utils.Progbar', (['n_steps'], {}), '(n_steps)\n', (13272, 13281), False, 'from tensorflow.python.keras.utils import generic_utils\n'), ((17862, 17898), 'tensorflow.python.keras.utils.generic_utils.Progbar', 'generic_utils.Progbar', (['n_valid_steps'], {}), '(n_valid_steps)\n', (17883, 17898), False, 'from tensorflow.python.keras.utils import generic_utils\n'), ((7172, 7189), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7187, 7189), True, 'import tensorflow as tf\n'), ((7802, 7864), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_class_loss"""', 'rpn_class_loss'], {'step': 'step'}), "('rpn_class_loss', rpn_class_loss, step=step)\n", (7819, 7864), True, 'import tensorflow as tf\n'), ((7873, 7931), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_reg_loss"""', 'rpn_reg_loss'], {'step': 'step'}), "('rpn_reg_loss', rpn_reg_loss, step=step)\n", (7890, 7931), True, 'import tensorflow as tf\n'), ((8085, 8102), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8100, 8102), True, 'import tensorflow as tf\n'), ((8893, 8967), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_class_loss"""', 'fast_rcnn_class_loss'], {'step': 'step'}), "('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)\n", (8910, 8967), True, 'import tensorflow as tf\n'), ((8976, 9046), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_reg_loss"""', 'fast_rcnn_reg_loss'], {'step': 'step'}), "('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)\n", (8993, 9046), True, 'import tensorflow as tf\n'), ((9055, 9127), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_class_acc"""', 'fast_rcnn_class_acc'], {'step': 'step'}), "('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)\n", (9072, 9127), True, 'import tensorflow as tf\n'), ((9282, 9299), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9297, 9299), True, 'import tensorflow as tf\n'), ((9704, 9766), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_class_loss"""', 'rpn_class_loss'], {'step': 'step'}), "('rpn_class_loss', rpn_class_loss, step=step)\n", (9721, 9766), True, 'import tensorflow as tf\n'), ((9775, 9833), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_reg_loss"""', 'rpn_reg_loss'], {'step': 'step'}), "('rpn_reg_loss', rpn_reg_loss, step=step)\n", (9792, 9833), True, 'import tensorflow as tf\n'), ((9987, 10004), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10002, 10004), True, 'import tensorflow as tf\n'), ((10540, 10614), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_class_loss"""', 'fast_rcnn_class_loss'], {'step': 'step'}), "('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)\n", (10557, 10614), True, 'import tensorflow as tf\n'), ((10623, 10693), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_reg_loss"""', 'fast_rcnn_reg_loss'], {'step': 'step'}), "('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)\n", (10640, 10693), True, 'import tensorflow as tf\n'), ((10702, 10774), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_class_acc"""', 'fast_rcnn_class_acc'], {'step': 'step'}), "('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)\n", (10719, 10774), True, 'import tensorflow as tf\n'), ((12406, 12429), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (12423, 12429), True, 'import numpy as np\n'), ((13531, 13560), 'tensorflow.cast', 'tf.cast', (['step'], {'dtype': 'tf.int64'}), '(step, dtype=tf.int64)\n', (13538, 13560), True, 'import tensorflow as tf\n'), ((13583, 13612), 'tensorflow.add', 'tf.add', (['global_step', 'one_step'], {}), '(global_step, one_step)\n', (13589, 13612), True, 'import tensorflow as tf\n'), ((14036, 14087), 'keras_frcnn.roi_helpers.calc_iou', 'roi_helpers.calc_iou', (['R', 'img_data', 'C', 'class_mapping'], {}), '(R, img_data, C, class_mapping)\n', (14056, 14087), True, 'import keras_frcnn.roi_helpers as roi_helpers\n'), ((14344, 14399), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['X2[:, sel_samples, :]', 'tf.float32'], {}), '(X2[:, sel_samples, :], tf.float32)\n', (14364, 14399), True, 'import tensorflow as tf\n'), ((14420, 14475), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Y1[:, sel_samples, :]', 'tf.float32'], {}), '(Y1[:, sel_samples, :], tf.float32)\n', (14440, 14475), True, 'import tensorflow as tf\n'), ((14496, 14551), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Y2[:, sel_samples, :]', 'tf.float32'], {}), '(Y2[:, sel_samples, :], tf.float32)\n', (14516, 14551), True, 'import tensorflow as tf\n'), ((18500, 18551), 'keras_frcnn.roi_helpers.calc_iou', 'roi_helpers.calc_iou', (['R', 'img_data', 'C', 'class_mapping'], {}), '(R, img_data, C, class_mapping)\n', (18520, 18551), True, 'import keras_frcnn.roi_helpers as roi_helpers\n'), ((18875, 18930), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['X2[:, sel_samples, :]', 'tf.float32'], {}), '(X2[:, sel_samples, :], tf.float32)\n', (18895, 18930), True, 'import tensorflow as tf\n'), ((18951, 19006), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Y1[:, sel_samples, :]', 'tf.float32'], {}), '(Y1[:, sel_samples, :], tf.float32)\n', (18971, 19006), True, 'import tensorflow as tf\n'), ((19027, 19082), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Y2[:, sel_samples, :]', 'tf.float32'], {}), '(Y2[:, sel_samples, :], tf.float32)\n', (19047, 19082), True, 'import tensorflow as tf\n'), ((12457, 12483), 'random.choice', 'random.choice', (['neg_samples'], {}), '(neg_samples)\n', (12470, 12483), False, 'import random\n'), ((12524, 12550), 'random.choice', 'random.choice', (['pos_samples'], {}), '(pos_samples)\n', (12537, 12550), False, 'import random\n'), ((13828, 13849), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (13847, 13849), True, 'from tensorflow.keras import backend as K\n'), ((15836, 15857), 'numpy.mean', 'np.mean', (['losses[:, 0]'], {}), '(losses[:, 0])\n', (15843, 15857), True, 'import numpy as np\n'), ((15886, 15907), 'numpy.mean', 'np.mean', (['losses[:, 1]'], {}), '(losses[:, 1])\n', (15893, 15907), True, 'import numpy as np\n'), ((15937, 15958), 'numpy.mean', 'np.mean', (['losses[:, 2]'], {}), '(losses[:, 2])\n', (15944, 15958), True, 'import numpy as np\n'), ((15989, 16010), 'numpy.mean', 'np.mean', (['losses[:, 3]'], {}), '(losses[:, 3])\n', (15996, 16010), True, 'import numpy as np\n'), ((16035, 16056), 'numpy.mean', 'np.mean', (['losses[:, 4]'], {}), '(losses[:, 4])\n', (16042, 16056), True, 'import numpy as np\n'), ((16997, 17008), 'time.time', 'time.time', ([], {}), '()\n', (17006, 17008), False, 'import time\n'), ((18291, 18312), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (18310, 18312), True, 'from tensorflow.keras import backend as K\n'), ((20398, 20425), 'numpy.mean', 'np.mean', (['valid_losses[:, 0]'], {}), '(valid_losses[:, 0])\n', (20405, 20425), True, 'import numpy as np\n'), ((20454, 20481), 'numpy.mean', 'np.mean', (['valid_losses[:, 1]'], {}), '(valid_losses[:, 1])\n', (20461, 20481), True, 'import numpy as np\n'), ((20511, 20538), 'numpy.mean', 'np.mean', (['valid_losses[:, 2]'], {}), '(valid_losses[:, 2])\n', (20518, 20538), True, 'import numpy as np\n'), ((20569, 20596), 'numpy.mean', 'np.mean', (['valid_losses[:, 3]'], {}), '(valid_losses[:, 3])\n', (20576, 20596), True, 'import numpy as np\n'), ((20621, 20648), 'numpy.mean', 'np.mean', (['valid_losses[:, 4]'], {}), '(valid_losses[:, 4])\n', (20628, 20648), True, 'import numpy as np\n'), ((21690, 21701), 'time.time', 'time.time', ([], {}), '()\n', (21699, 21701), False, 'import time\n'), ((11512, 11573), 'numpy.random.choice', 'np.random.choice', (['pos_samples', '(C.num_rois // 2)'], {'replace': '(False)'}), '(pos_samples, C.num_rois // 2, replace=False)\n', (11528, 11573), True, 'import numpy as np\n'), ((16805, 16816), 'time.time', 'time.time', ([], {}), '()\n', (16814, 16816), False, 'import time\n'), ((21487, 21498), 'time.time', 'time.time', ([], {}), '()\n', (21496, 21498), False, 'import time\n')]
|
# Copyright (C) 2017-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import dolfin
import numpy
from ocellaris import Simulation, setup_simulation
import pytest
from helpers import skip_in_parallel
ISO_INPUT = """
ocellaris:
type: input
version: 1.0
mesh:
type: Rectangle
Nx: 4
Ny: 4
probes:
- name: free_surface
enabled: yes
type: IsoSurface
value: 0.5
field: c
custom_hook: MultiPhaseModelUpdated
multiphase_solver:
type: BlendedAlgebraicVOF
function_space_colour: DG
polynomial_degree_colour: 0
solver: {type: AnalyticalSolution}
boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}]
physical_properties: {nu0: 1.0, nu1: 1, rho0: 1, rho1: 1}
output: {log_enabled: no}
"""
@pytest.mark.parametrize("degree", [0, 1, 2])
def test_isoline_horizontal(degree):
sim = Simulation()
sim.input.read_yaml(yaml_string=ISO_INPUT)
sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree)
setup_simulation(sim)
probe = sim.probes['free_surface']
# Initial value with sharp interface at x[1] == 0.5
Vc = sim.data['Vc']
c = sim.data['c']
dm = Vc.dofmap()
arr = c.vector().get_local()
for cell in dolfin.cells(sim.data['mesh']):
cell_value = 1 if cell.midpoint().y() < 0.5 else 0
for dof in dm.cell_dofs(cell.index()):
arr[dof] = cell_value
c.vector().set_local(arr)
c.vector().apply('insert')
lines = probe.run(force_active=True)
print('\nDegree:', degree, 'Vcdim:', Vc.dim())
print(probe.name, probe.field_name, probe.value)
print(len(lines))
if sim.ncpu > 1:
raise pytest.skip()
for x, y in lines:
print('x', x, '\ny', y)
assert all(abs(y - 0.5) < 1e-12)
# Results should be in sorted order
xdx = numpy.diff(x)
assert all(xdx > 0) or all(xdx < 0)
assert len(lines) == 1
@pytest.mark.parametrize("degree", [1])
def test_isoline_circle(degree):
sim = Simulation()
sim.input.read_yaml(yaml_string=ISO_INPUT)
sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree)
sim.input.set_value('mesh/Nx', 10)
sim.input.set_value('mesh/Ny', 10)
sim.input.set_value(
'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2), 0.5)'
)
setup_simulation(sim)
sim.data['c'].assign(sim.data['cp'])
probe = sim.probes['free_surface']
lines = probe.run(force_active=True)
if False:
from matplotlib import pyplot
c = dolfin.plot(sim.data['c'])
pyplot.colorbar(c)
for x, y in lines:
pyplot.plot(x, y)
pyplot.savefig('test_isoline_circle_%d.png' % degree)
pyplot.close()
print(probe.name, probe.field_name, probe.value)
print(len(lines))
for x, y in lines:
# Check that the radius is constant
r = ((x - 0.5) ** 2 + (y - 0.5) ** 2) ** 0.5
print('x', x)
print('y', y)
print('dr', r - 0.5 / 1.1)
assert all(abs(r - 0.5 / 1.1) < 5e-3)
# Check that the line is clockwise or counter clockwise
# for all segments, no going back and forth
theta = numpy.arctan2(y - 0.5, x - 0.5) * 180 / numpy.pi
theta[theta < 0] += 360
tdt = numpy.diff(theta)
tdt2 = tdt[abs(tdt) < 340]
print('dt', tdt)
assert all(tdt2 > 0) or all(tdt2 < 0)
if sim.ncpu == 1:
# The iso surface code is not written for full parallel support
assert len(lines) == 1
assert x[0] == x[-1] and y[0] == y[-1], "The loop should be closed"
|
[
"ocellaris.Simulation",
"numpy.arctan2",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"pytest.skip",
"dolfin.plot",
"matplotlib.pyplot.colorbar",
"numpy.diff",
"pytest.mark.parametrize",
"ocellaris.setup_simulation",
"dolfin.cells",
"matplotlib.pyplot.savefig"
] |
[((772, 816), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""degree"""', '[0, 1, 2]'], {}), "('degree', [0, 1, 2])\n", (795, 816), False, 'import pytest\n'), ((1936, 1974), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""degree"""', '[1]'], {}), "('degree', [1])\n", (1959, 1974), False, 'import pytest\n'), ((864, 876), 'ocellaris.Simulation', 'Simulation', ([], {}), '()\n', (874, 876), False, 'from ocellaris import Simulation, setup_simulation\n'), ((1006, 1027), 'ocellaris.setup_simulation', 'setup_simulation', (['sim'], {}), '(sim)\n', (1022, 1027), False, 'from ocellaris import Simulation, setup_simulation\n'), ((1240, 1270), 'dolfin.cells', 'dolfin.cells', (["sim.data['mesh']"], {}), "(sim.data['mesh'])\n", (1252, 1270), False, 'import dolfin\n'), ((2018, 2030), 'ocellaris.Simulation', 'Simulation', ([], {}), '()\n', (2028, 2030), False, 'from ocellaris import Simulation, setup_simulation\n'), ((2367, 2388), 'ocellaris.setup_simulation', 'setup_simulation', (['sim'], {}), '(sim)\n', (2383, 2388), False, 'from ocellaris import Simulation, setup_simulation\n'), ((1677, 1690), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (1688, 1690), False, 'import pytest\n'), ((1847, 1860), 'numpy.diff', 'numpy.diff', (['x'], {}), '(x)\n', (1857, 1860), False, 'import numpy\n'), ((2577, 2603), 'dolfin.plot', 'dolfin.plot', (["sim.data['c']"], {}), "(sim.data['c'])\n", (2588, 2603), False, 'import dolfin\n'), ((2612, 2630), 'matplotlib.pyplot.colorbar', 'pyplot.colorbar', (['c'], {}), '(c)\n', (2627, 2630), False, 'from matplotlib import pyplot\n'), ((2696, 2749), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (["('test_isoline_circle_%d.png' % degree)"], {}), "('test_isoline_circle_%d.png' % degree)\n", (2710, 2749), False, 'from matplotlib import pyplot\n'), ((2758, 2772), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (2770, 2772), False, 'from matplotlib import pyplot\n'), ((3322, 3339), 'numpy.diff', 'numpy.diff', (['theta'], {}), '(theta)\n', (3332, 3339), False, 'import numpy\n'), ((2670, 2687), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {}), '(x, y)\n', (2681, 2687), False, 'from matplotlib import pyplot\n'), ((3227, 3258), 'numpy.arctan2', 'numpy.arctan2', (['(y - 0.5)', '(x - 0.5)'], {}), '(y - 0.5, x - 0.5)\n', (3240, 3258), False, 'import numpy\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
def load_octmi_dat(acquisitionName, basePath="."):
# Vérification de l'existence du fichier
datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + "_MI.dat")
if not os.path.exists(datFilePath):
print("Could not stat file", datFilePath)
raise NameError("File does not exist")
# Décompte du nombre d'éléments
nval = 0
variableList = ""
with open(datFilePath, "r") as f:
for line in f:
if line[0] == "T":
if line != variableList:
variableList = line
# print variableList
else:
nval = nval + 1
variableList = variableList.split(" ")
dictionnaire = dict()
dictionnaire["nval"] = nval
if nval > 1:
for i in range(len(variableList)):
dictionnaire[variableList[i].strip()] = np.zeros(nval)
linenum = 0
with open(datFilePath, "r") as f:
for line in f:
contentList = line.split(" ")
if contentList[0] != "Time":
if nval == 1:
for i in range(len(variableList)):
dictionnaire[variableList[i].strip()] = eval(
contentList[i].strip()
)
else:
for i in range(len(variableList)):
if i < len(contentList):
dataStr = contentList[i].strip()
if dataStr.lower() == "nan":
dictionnaire[variableList[i].strip()][linenum] = np.nan
else:
dictionnaire[variableList[i].strip()][linenum] = eval(
contentList[i].strip()
)
else:
dictionnaire[variableList[i].strip()][linenum] = np.nan
linenum = linenum + 1
return dictionnaire
|
[
"numpy.zeros",
"os.path.exists",
"os.path.normpath"
] |
[((206, 232), 'os.path.normpath', 'os.path.normpath', (['basePath'], {}), '(basePath)\n', (222, 232), False, 'import os\n'), ((274, 301), 'os.path.exists', 'os.path.exists', (['datFilePath'], {}), '(datFilePath)\n', (288, 301), False, 'import os\n'), ((951, 965), 'numpy.zeros', 'np.zeros', (['nval'], {}), '(nval)\n', (959, 965), True, 'import numpy as np\n')]
|
#!\usr\bin\python
# coding=utf-8
# Author: youngfeng
# Update: 07/16/2018
"""
Flash, proposed by Nair et al. (arXiv '18), which aims to find the (near) optimal configuration in unevaluated set.
STEP 1: select 80%% of original data as dataset
STEP 2: split the dataset into training set (30 configs) and unevaluated set (remaining configs)
STEP 3: predict the optimal configuration in unevaluated set, then remove it from unevaluated set to training set.
STEP 4: repeat the STEP 4 until the budget (50 configs) is loss out.
The details of Progressive are introduced in paper "Finding Faster Configurations using FLASH".
"""
import pandas as pd
import random as rd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
class config_node:
"""
for each configuration, we create a config_node object to save its informations
index : actual rank
features : feature list
perfs : actual performance
"""
def __init__(self, index, features, perfs, predicted):
self.index = index
self.features = features
self.perfs = perfs
self.predicted = predicted
def remove_by_index(config_pool, index):
"""
remove the selected configuration
"""
for config in config_pool:
if config.index == index:
config_pool.remove(config)
break
return config_pool
def find_lowest_rank(train_set, test_set):
"""
return the lowest rank in top 10
"""
sorted_test = sorted(test_set, key=lambda x: x.perfs[-1])
# train data
train_features = [t.features for t in train_set]
train_perfs = [t.perfs[-1] for t in train_set]
# test data
test_perfs = [t.features for t in sorted_test]
cart_model = DecisionTreeRegressor()
cart_model.fit(train_features, train_perfs)
predicted = cart_model.predict(test_perfs)
predicted_id = [[i, p] for i, p in enumerate(predicted)]
# i-> actual rank, p -> predicted value
predicted_sorted = sorted(predicted_id, key=lambda x: x[-1])
# print(predicted_sorted)
# assigning predicted ranks
predicted_rank_sorted = [[p[0], p[-1], i] for i,p in enumerate(predicted_sorted)]
# p[0] -> actual rank, p[-1] -> perdicted value, i -> predicted rank
select_few = predicted_rank_sorted[:10]
# print the predcited top-10 configuration
# for sf in select_few:
# print("actual rank:", sf[0], " actual value:", sorted_test[sf[0]].perfs[-1], " predicted value:", sf[1], " predicted rank:", sf[2])
# print("-------------")
return np.min([sf[0] for sf in select_few])
def predict_by_cart(train_set, test_set):
"""
return the predicted optimal condiguration
"""
train_features = [config.features for config in train_set]
train_perfs = [config.perfs[-1] for config in train_set]
test_features = [config.features for config in test_set]
cart_model = DecisionTreeRegressor()
cart_model.fit(train_features, train_perfs)
predicted = cart_model.predict(test_features)
predicted_id = [[i,p] for i,p in enumerate(predicted)]
predicted_sorted = sorted(predicted_id, key=lambda x: x[-1]) # sort test_set by predicted performance
return test_set[predicted_sorted[0][0]] # the optimal configuration
def split_data_by_fraction(csv_file, fraction):
"""
split data set and return the 80% data
"""
# step1: read from csv file
pdcontent = pd.read_csv(csv_file)
attr_list = pdcontent.columns # all feature list
# step2: split attribute - method 1
features = [i for i in attr_list if "$<" not in i]
perfs = [i for i in attr_list if "$<" in i]
sortedcontent = pdcontent.sort_values(perfs[-1]) # from small to big
# print(len(sortedcontent))
# step3: collect configuration
configs = list()
for c in range(len(pdcontent)):
configs.append(config_node(c, # actual rank
sortedcontent.iloc[c][features].tolist(), # feature list
sortedcontent.iloc[c][perfs].tolist(), # performance list
sortedcontent.iloc[c][perfs].tolist(), # predicted performance list
))
# for config in configs:
# print(config.index, "-", config.perfs, "-", config.predicted, "-", config.rank)
# step4: data split
# fraction = 0.4 # split fraction
# rd.seed(seed) # random seed
rd.shuffle(configs) # shuffle the configs
indexes = range(len(configs))
train_index = indexes[:int(fraction*len(configs))]
dataset = [configs[i] for i in train_index]
# print(len(dataset))
return dataset
def predict_by_flash(dataset, size=30, budget=50):
"""
use the budget in dataset to train a best model,
return the train_set and unevaluated_set
"""
#initilize the train set with 30 configurations
rd.shuffle(dataset)
train_set = dataset[:size]
unevaluated_set = dataset
for config in train_set:
unevaluated_set = remove_by_index(unevaluated_set, config.index) # remove train_set
while budget >= 0: # budget equals to 50
optimal_config = predict_by_cart(train_set, unevaluated_set)
# print("[add]:", optimal_config.index)
unevaluated_set = remove_by_index(unevaluated_set, optimal_config.index)
train_set.append(optimal_config)
budget = budget - 1
return [train_set, unevaluated_set]
if __name__ == "__main__":
#######################################################################################
# select 80% data
dataset = split_data_by_fraction("data/Apache_AllMeasurements.csv", 0.8)
print("### initialzation")
for i in dataset:
print(str(i.index), ",", end="")
print("\n-------------")
data = predict_by_flash(dataset)
print("### finally split")
train_set = data[0]
uneval_set = data[1]
for i in train_set:
print(str(i.index), ",", end="")
print("\n-------------")
for i in uneval_set:
print(str(i.index), ",", end="")
print("\n-------------")
#######################################################################################
lowest_rank = find_lowest_rank(train_set, uneval_set)
print(lowest_rank)
|
[
"pandas.read_csv",
"random.shuffle",
"numpy.min",
"sklearn.tree.DecisionTreeRegressor"
] |
[((1632, 1655), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (1653, 1655), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((2411, 2447), 'numpy.min', 'np.min', (['[sf[0] for sf in select_few]'], {}), '([sf[0] for sf in select_few])\n', (2417, 2447), True, 'import numpy as np\n'), ((2738, 2761), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (2759, 2761), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((3228, 3249), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (3239, 3249), True, 'import pandas as pd\n'), ((4081, 4100), 'random.shuffle', 'rd.shuffle', (['configs'], {}), '(configs)\n', (4091, 4100), True, 'import random as rd\n'), ((4495, 4514), 'random.shuffle', 'rd.shuffle', (['dataset'], {}), '(dataset)\n', (4505, 4514), True, 'import random as rd\n')]
|
"""
Author: <NAME>
GitHub: wafflescore
"""
from minisom import MiniSom, asymptotic_decay
import numpy as np
import matplotlib.pyplot as plt
import itertools
from skimage import measure
from skimage.segmentation import random_walker
from skimage import filters
from scipy.spatial import distance
from collections import Counter
from timeit import default_timer as timer
import random
from acse_9_irp_wafflescore import MiscHelpers as mh
import logging
import sys
logging.basicConfig(format='%(asctime)s | %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
def compute_dim(num_sample):
"""
Compute a default dimension of the SOMs.
This function returns the dimension size of the SOMs.
The size returned is sqrt(5 * sqrt(num_sample)), with the exception
that the minimum dimension size = 10
Parameters
----------
num_sample : int
Total number of data points that will populate the SOMs
Returns
-------
int
Ideal dimension.
"""
dim = 5 * np.sqrt(num_sample)
dim = np.int(np.sqrt(dim))
if dim < 10:
return 10
else:
return dim
def som_assemble(in_data, seed, dim, lr=0.5, sigma=2.5):
"""Initialize the SOMs model for training
Parameters
----------
in_data : np.array or list
data matrix
seed : integer
random seed for reproducibility
dim : int
dimension of the SOMs distance matrix
lr : float, optional
learning rate, by default 0.5
sigma : float, optional
spread of the neighborhood function, by default 2.5
Returns
-------
MiniSom
an object of Minisom class, see minisom.py for further details
"""
# Initialization som and weights
num_features = np.shape(in_data)[1]
som = MiniSom(dim, dim, num_features, sigma=sigma, learning_rate=lr,
neighborhood_function='gaussian', random_seed=seed)
som.pca_weights_init(in_data)
return som
def plot_som(som, in_data, label, save=False, save_name='temp'):
"""plots the distance map / u-matrix of the SOMs along with the label
Parameters
----------
som : MiniSom
trained Minisom object
in_data : np.array or list
data matrix
label : np.array or list
the true label of each data point
save : bool, optional
flag, by default False
save_name : str, optional
the name which will be used to save the plot as png file,
by default 'temp'
"""
plt.figure(figsize=(9, 7))
# Plotting the response for each litho-class
plt.pcolor(som.distance_map().T, cmap='bone_r')
# plotting the distance map as background
plt.colorbar()
for t, xx in zip(label, in_data):
w = som.winner(xx) # getting the winner
# palce a marker on the winning position for the sample xx
plt.text(w[0]+.5, w[1]+.5, str(t),
color=plt.cm.rainbow(t/10.))
plt.axis([0, som.get_weights().shape[0], 0, som.get_weights().shape[1]])
if(save):
save_dir = 'SOMs_results/' + save_name + '_plot.png'
plt.savefig(save_dir)
print('Plot saved at:', save_dir)
plt.show()
def save_som_report(som, save_name, it, et, report=None):
param_vals = str(save_name) + '\n---' + \
'\niterations,' + str(it) + \
'\nelapsed time,' + str(et) + '\n\n'
# save report to file
fdir = save_name + '_report.csv'
print('Report saved at', fdir)
mode = 'w'
f1 = open(fdir, mode)
f1.write(param_vals)
if(report):
f1.write(str(report))
f1.write('\n\n--------------------\n\n')
f1.close()
print('Report saved at:', fdir)
def histedges_equalN(in_data, nbin=10):
"""generates a histogram where each bin will contain the same number of
data points
Parameters
----------
in_data : np.array or list
data array
nbin : int
number of bins to populate, by default 10
Returns
-------
np.array
numpy array of all the histogram bins
"""
ttl_dtp = len(in_data)
return np.interp(np.linspace(0, ttl_dtp, nbin + 1),
np.arange(ttl_dtp),
np.sort(in_data))
def plot_u_matrix(som_u_mat):
"""Plots the distance map / u-matrix of the SOMs
Parameters
----------
som : MiniSom
trained Minisom object
Returns
-------
np.array
numpy array of all the histogram bins
"""
f_image = som_u_mat.flatten()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
fig.show()
ax1.pcolor(som_u_mat, cmap='bone_r')
hist = plt.hist(f_image, histedges_equalN(f_image, 10), density=True)
return hist[1]
def gen_e_model(n_map, som_label):
"""generates the Earth model from neuron map"""
som_class = []
for i in range(len(n_map)):
som_class.append(som_label[n_map[i][0]][n_map[i][1]])
return np.array(som_class)
def closest_n(value):
"""Assign cluster number to the mask's border indexes by using the
closest neighbor's value
Parameters
----------
value : np.array
numpy array of the cluster number, noted that the borders are marked
with 0
Returns
-------
np.array
new label with all the border index populated
"""
borders = np.array(np.where(value == 0)).T
new_label = np.array(value)
vals = np.where(value != 0)
vals = np.array(vals).T
for b in borders:
# find index of the closest value
c_idx = distance.cdist([b], vals).argmin()
new_label[b[0], b[1]] = value[vals[c_idx, 0]][vals[c_idx, 1]]
return new_label
def KNN(value, k=5, border_val=0):
"""Assign cluster number to the mask's border indexes by using the
K-nearest neighbor method
Parameters
----------
value : np.array
numpy array of the cluster number, noted that the borders are marked
with 0
k : int, optional
number of neighbor to consider, by default 5
Returns
-------
np.array
new label with all the border index populated
"""
borders = np.array(np.where(value == border_val)).T
new_label = np.array(value)
vals = np.where(value != 0)
if(len(vals[0]) < 5):
logging.info("Not enough labeled neighbor to perform KNN.\n\
Will return the original inputted value.")
return value
vals = np.array(vals).T
for b in borders:
# find index of the closest k neighbors
dist = distance.cdist([b], vals)
c_idx = np.argpartition(dist, k)
c_idx = c_idx[0, :k]
mins_idx = np.array(list(zip(vals[c_idx, 0], vals[c_idx, 1])))
class_counter = Counter()
for idx in mins_idx:
class_counter[value[idx[0], idx[1]]] += 1
cl = class_counter.most_common(1)[0][0]
new_label[b[0], b[1]] = cl
return new_label
def watershed_level(image, bins, border_width=0.1, plot=False, conn=None):
num_bins = len(bins)
"""Computes and classify the SOM's u-matrix or total gradient using
watershed classification method
Parameters
----------
image : np.array
u-matrix or total gradient of the SOMs
bins : np.array
numpy array of all the histogram bins
plot : bool, optional
flag whether to plot the watershed level or not, by default False
conn : int, optional
connectivity flag for measure.label, by default None
Returns
-------
np.array
numpy array of predicted cluster labels from each watershed level
"""
ncols = 6
if(plot):
fig, axes = plt.subplots(ncols=ncols, nrows=num_bins,
figsize=(12, num_bins*3),
sharex=True, sharey=True)
ax = axes.ravel()
ws_labels = np.zeros((num_bins * ncols, image.shape[0], image.shape[1]))
for i in range(num_bins):
val = filters.threshold_local(image, block_size=3 + 2*i)
block_mask = (image < val)
markers = measure.label(block_mask, connectivity=conn)
ws_labels[i*ncols] = closest_n(markers) - 1
ws_labels[i*ncols + 1] = KNN(markers) - 1
ws_labels[i*ncols + 2] = random_walker(image, markers)
if(plot):
ax[i*ncols].imshow(ws_labels[i*ncols + 0], origin='lower')
ax[i*ncols].title.set_text('b_cn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 0]))))
ax[i*ncols + 1].imshow(ws_labels[i*ncols + 1], origin='lower')
ax[i*ncols + 1].title.set_text('b_knn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 1]))))
ax[i*ncols + 2].imshow(ws_labels[i*ncols + 2], origin='lower')
ax[i*ncols + 2].title.set_text('b_rw: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 2]))))
thres_mask = (image <= bins[i])
markers = measure.label(thres_mask, connectivity=conn)
ws_labels[i*ncols + 3] = closest_n(markers) - 1
ws_labels[i*ncols + 4] = KNN(markers) - 1
ws_labels[i*ncols + 5] = random_walker(image, markers)
if(plot):
ax[i*ncols + 3].imshow(ws_labels[i*ncols + 3], origin='lower')
ax[i*ncols + 3].title.set_text('b_cn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 3]))))
ax[i*ncols + 4].imshow(ws_labels[i*ncols + 4], origin='lower')
ax[i*ncols + 4].title.set_text('b_knn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 4]))))
ax[i*ncols + 5].imshow(ws_labels[i*ncols + 5], origin='lower')
ax[i*ncols + 5].title.set_text('b_rw: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 5]))))
return ws_labels
def eval_ws(in_data, ws_labels, n_map, label=None, re_all=False):
"""Evaluate and return the best watershed prediction result
Parameters
----------
in_data : np.array or list
data matrix
ws_labels : np.array
predicted cluster labels from watershed segmentation
n_map : np.array
array of the winner neuron
label : np.array or list, optional
the true label of each data point
Returns
-------
np.array
list of best watershed labels, may contain more than one set
"""
len_watershed = ws_labels.shape[0]
cluster_labels = np.zeros((len_watershed, len(in_data)))
avg_sils = np.full(len_watershed, np.nan)
ch_scs = np.full(len_watershed, np.nan)
if(label is not None):
avg_ents = np.full(len_watershed, np.nan)
avg_purs = np.full(len_watershed, np.nan)
for i in range(len_watershed):
param = {'watershed idx': i}
if(len(np.unique(ws_labels[i])) > 1):
cluster_labels[i] = gen_e_model(n_map, ws_labels[i])
avg_sils[i] = mh.int_eval_silhouette(in_data, cluster_labels[i],
method='som_watershed',
param=param)
try:
ch_scs[i] = mh.cal_har_sc(in_data, cluster_labels[i])
except:
ch_scs[i] = -1
if(label is not None):
avg_ents[i], avg_purs[i] = mh.ext_eval_entropy(label,
cluster_labels[i])
best_idx = []
best_idx.append(np.nanargmax(np.array(avg_sils))) # closest to 1
best_idx.append(np.nanargmax(ch_scs)) # higher = better
if(label is not None):
best_idx.append(np.nanargmin(np.array(avg_ents))) # closest to 0
best_idx.append(np.nanargmax(np.array(avg_purs))) # closest to 1
best_idx = np.unique(best_idx)
if(re_all):
return (cluster_labels, avg_sils,
ch_scs, best_idx)
else:
return (cluster_labels[best_idx], avg_sils[best_idx],
ch_scs[best_idx])
def run_SOMs(in_data, dim, iter_cnt, lr, sigma, seed=10):
"""Method to fully run SOMs
Parameters
----------
in_data : np.array or list
data matrix
dim : int
dimension of the SOMs distance matrix
iter_cnt : integer
number of iterations for SOMs to perform
lr : float
learning rate
sigma : float
spread of the neighborhood function, by default 2.5dim : int
seed : integer, optional
random seed for reproducibility, by default 10
Returns
-------
minisom
minisom object
np.array
cluster label
"""
som = som_assemble(in_data, seed, dim, lr, sigma)
som.train_random(in_data, iter_cnt, verbose=False)
u_matrix = som.distance_map().T
watershed_bins = histedges_equalN(u_matrix.flatten())
ws_labels = watershed_level(u_matrix, watershed_bins)
n_map = som.neuron_map(in_data)
cluster_labels, _, _ = eval_ws(in_data, ws_labels, n_map)
return som, cluster_labels
def gen_param_grid(init_guess):
g_dim, g_it, g_lr, g_sigma = init_guess
min_dim = g_dim - 10 if g_dim - 5 > 10 else 10
max_dim = g_dim + 10 if g_dim + 10 > 10 else 20
param_grid = {
'dim': list(range(min_dim, max_dim+1)),
'iter_cnt': list(range(g_it - 500, g_it + 500, 200)),
'learning_rate': list(np.logspace(np.log10(0.25), np.log10(0.75),
base=10, num=100)),
'sigma': list(np.linspace(g_sigma-1, g_sigma+1, num=30)),
}
return param_grid
def random_search_som(in_data, init_guess, max_eval=20, label=None, seed=10,
re_all=False):
"""perform random search for SOMs best parameters.
Parameters
----------
in_data : np.array or list
data matrix
init_guess : tuple
list of initial guess of the parameters, in order of dimension,
number of iterations, learning rate, and sigma
max_eval : int, optional
number of max iterartion to perform the search, by default 20
label : np.array or list, optional
the true label of each data point, by default None
seed : integer, optional
random seed for reproducibility, by default 10
Returns
-------
All cluster label and its counterpart parameters.
"""
random.seed(seed)
param_grid = gen_param_grid(init_guess)
dims = np.zeros(max_eval)
iters = np.zeros(max_eval)
lrs = np.zeros(max_eval)
sigmas = np.zeros(max_eval)
avg_sils = np.full(max_eval, np.nan)
ch_scs = np.full(max_eval, np.nan)
cluster_labels = np.zeros((max_eval, len(in_data)))
if(label is not None):
avg_ents = np.full(max_eval, np.nan)
avg_purs = np.full(max_eval, np.nan)
i = 0
while i < max_eval:
random_params = {k: random.sample(v, 1)[0]
for k, v in param_grid.items()}
dims[i], iters[i], lrs[i], sigmas[i] = list(random_params.values())
som = som_assemble(in_data, seed, int(dims[i]), lr=lrs[i], sigma=sigmas[i])
som.train_random(in_data, int(iters[i]), verbose=False)
u_matrix = som.distance_map().T
watershed_bins = histedges_equalN(u_matrix.flatten())
ws_labels = watershed_level(u_matrix, watershed_bins)
n_map = som.neuron_map(in_data)
_c, _as, _ch = eval_ws(in_data, ws_labels, n_map)
cluster_labels[i], avg_sils[i], ch_scs[i] = _c[0], _as[0], _ch[0]
n_clusters = len(np.unique(cluster_labels[i]))
if(n_clusters < 5 or n_clusters > 30):
logging.info("Random search using dim=%d, iter=%d, lr=%.6f, sigma=%.6f\
result to very small / large number of clusters (n_clusters = %d)\
" % (dims[i], iters[i], lrs[i], sigmas[i], n_clusters))
continue
logging.info("dim=%d, iter=%d, lr=%.6f, sigma=%.6f, sil=%.6f, ch=%.6f" % (dims[i], iters[i], lrs[i], sigmas[i], avg_sils[i], ch_scs[i]))
if(label is not None):
avg_ents[i], avg_purs[i] = mh.ext_eval_entropy(label, cluster_labels[i], init_clus=-1)
logging.info("ent=%.6f, pur=%.6f" % (avg_ents[i], avg_purs[i]))
i += 1
best_idx = []
best_idx.append(np.nanargmax(np.array(avg_sils))) # closest to 1
best_idx.append(np.nanargmax(ch_scs)) # higher = better
if(label is not None):
best_idx.append(np.nanargmin(np.array(avg_ents))) # closest to 0
best_idx.append(np.nanargmax(np.array(avg_purs))) # closest to 1
best_idx = np.unique(best_idx)
if(re_all):
return (cluster_labels, avg_sils,
ch_scs, dims, iters, lrs, sigmas, best_idx)
else:
return (cluster_labels[best_idx], avg_sils[best_idx],
ch_scs[best_idx], dims[best_idx], iters[best_idx],
lrs[best_idx], sigmas[best_idx])
|
[
"random.sample",
"matplotlib.pyplot.cm.rainbow",
"acse_9_irp_wafflescore.MiscHelpers.ext_eval_entropy",
"numpy.shape",
"numpy.argpartition",
"matplotlib.pyplot.figure",
"skimage.measure.label",
"numpy.arange",
"numpy.unique",
"numpy.full",
"matplotlib.pyplot.colorbar",
"random.seed",
"numpy.linspace",
"collections.Counter",
"numpy.log10",
"matplotlib.pyplot.subplots",
"scipy.spatial.distance.cdist",
"matplotlib.pyplot.show",
"skimage.filters.threshold_local",
"numpy.sort",
"numpy.nanargmax",
"acse_9_irp_wafflescore.MiscHelpers.cal_har_sc",
"logging.basicConfig",
"skimage.segmentation.random_walker",
"numpy.zeros",
"acse_9_irp_wafflescore.MiscHelpers.int_eval_silhouette",
"logging.info",
"numpy.where",
"numpy.array",
"minisom.MiniSom",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((468, 582), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s | %(levelname)s : %(message)s"""', 'level': 'logging.INFO', 'stream': 'sys.stdout'}), "(format='%(asctime)s | %(levelname)s : %(message)s',\n level=logging.INFO, stream=sys.stdout)\n", (487, 582), False, 'import logging\n'), ((1828, 1946), 'minisom.MiniSom', 'MiniSom', (['dim', 'dim', 'num_features'], {'sigma': 'sigma', 'learning_rate': 'lr', 'neighborhood_function': '"""gaussian"""', 'random_seed': 'seed'}), "(dim, dim, num_features, sigma=sigma, learning_rate=lr,\n neighborhood_function='gaussian', random_seed=seed)\n", (1835, 1946), False, 'from minisom import MiniSom, asymptotic_decay\n'), ((2546, 2572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (2556, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2724, 2738), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2736, 2738), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3222), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3220, 3222), True, 'import matplotlib.pyplot as plt\n'), ((4583, 4618), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 5)'}), '(1, 2, figsize=(12, 5))\n', (4595, 4618), True, 'import matplotlib.pyplot as plt\n'), ((4983, 5002), 'numpy.array', 'np.array', (['som_class'], {}), '(som_class)\n', (4991, 5002), True, 'import numpy as np\n'), ((5437, 5452), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (5445, 5452), True, 'import numpy as np\n'), ((5465, 5485), 'numpy.where', 'np.where', (['(value != 0)'], {}), '(value != 0)\n', (5473, 5485), True, 'import numpy as np\n'), ((6251, 6266), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (6259, 6266), True, 'import numpy as np\n'), ((6279, 6299), 'numpy.where', 'np.where', (['(value != 0)'], {}), '(value != 0)\n', (6287, 6299), True, 'import numpy as np\n'), ((7918, 7978), 'numpy.zeros', 'np.zeros', (['(num_bins * ncols, image.shape[0], image.shape[1])'], {}), '((num_bins * ncols, image.shape[0], image.shape[1]))\n', (7926, 7978), True, 'import numpy as np\n'), ((10748, 10778), 'numpy.full', 'np.full', (['len_watershed', 'np.nan'], {}), '(len_watershed, np.nan)\n', (10755, 10778), True, 'import numpy as np\n'), ((10792, 10822), 'numpy.full', 'np.full', (['len_watershed', 'np.nan'], {}), '(len_watershed, np.nan)\n', (10799, 10822), True, 'import numpy as np\n'), ((12026, 12045), 'numpy.unique', 'np.unique', (['best_idx'], {}), '(best_idx)\n', (12035, 12045), True, 'import numpy as np\n'), ((14588, 14605), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (14599, 14605), False, 'import random\n'), ((14671, 14689), 'numpy.zeros', 'np.zeros', (['max_eval'], {}), '(max_eval)\n', (14679, 14689), True, 'import numpy as np\n'), ((14702, 14720), 'numpy.zeros', 'np.zeros', (['max_eval'], {}), '(max_eval)\n', (14710, 14720), True, 'import numpy as np\n'), ((14731, 14749), 'numpy.zeros', 'np.zeros', (['max_eval'], {}), '(max_eval)\n', (14739, 14749), True, 'import numpy as np\n'), ((14763, 14781), 'numpy.zeros', 'np.zeros', (['max_eval'], {}), '(max_eval)\n', (14771, 14781), True, 'import numpy as np\n'), ((14798, 14823), 'numpy.full', 'np.full', (['max_eval', 'np.nan'], {}), '(max_eval, np.nan)\n', (14805, 14823), True, 'import numpy as np\n'), ((14837, 14862), 'numpy.full', 'np.full', (['max_eval', 'np.nan'], {}), '(max_eval, np.nan)\n', (14844, 14862), True, 'import numpy as np\n'), ((16878, 16897), 'numpy.unique', 'np.unique', (['best_idx'], {}), '(best_idx)\n', (16887, 16897), True, 'import numpy as np\n'), ((1052, 1071), 'numpy.sqrt', 'np.sqrt', (['num_sample'], {}), '(num_sample)\n', (1059, 1071), True, 'import numpy as np\n'), ((1089, 1101), 'numpy.sqrt', 'np.sqrt', (['dim'], {}), '(dim)\n', (1096, 1101), True, 'import numpy as np\n'), ((1797, 1814), 'numpy.shape', 'np.shape', (['in_data'], {}), '(in_data)\n', (1805, 1814), True, 'import numpy as np\n'), ((3144, 3165), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_dir'], {}), '(save_dir)\n', (3155, 3165), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4187), 'numpy.linspace', 'np.linspace', (['(0)', 'ttl_dtp', '(nbin + 1)'], {}), '(0, ttl_dtp, nbin + 1)\n', (4165, 4187), True, 'import numpy as np\n'), ((4210, 4228), 'numpy.arange', 'np.arange', (['ttl_dtp'], {}), '(ttl_dtp)\n', (4219, 4228), True, 'import numpy as np\n'), ((4251, 4267), 'numpy.sort', 'np.sort', (['in_data'], {}), '(in_data)\n', (4258, 4267), True, 'import numpy as np\n'), ((5497, 5511), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (5505, 5511), True, 'import numpy as np\n'), ((6334, 6470), 'logging.info', 'logging.info', (['"""Not enough labeled neighbor to perform KNN.\n Will return the original inputted value."""'], {}), '(\n """Not enough labeled neighbor to perform KNN.\n Will return the original inputted value."""\n )\n', (6346, 6470), False, 'import logging\n'), ((6492, 6506), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (6500, 6506), True, 'import numpy as np\n'), ((6595, 6620), 'scipy.spatial.distance.cdist', 'distance.cdist', (['[b]', 'vals'], {}), '([b], vals)\n', (6609, 6620), False, 'from scipy.spatial import distance\n'), ((6637, 6661), 'numpy.argpartition', 'np.argpartition', (['dist', 'k'], {}), '(dist, k)\n', (6652, 6661), True, 'import numpy as np\n'), ((6787, 6796), 'collections.Counter', 'Counter', ([], {}), '()\n', (6794, 6796), False, 'from collections import Counter\n'), ((7715, 7814), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'ncols', 'nrows': 'num_bins', 'figsize': '(12, num_bins * 3)', 'sharex': '(True)', 'sharey': '(True)'}), '(ncols=ncols, nrows=num_bins, figsize=(12, num_bins * 3),\n sharex=True, sharey=True)\n', (7727, 7814), True, 'import matplotlib.pyplot as plt\n'), ((8024, 8076), 'skimage.filters.threshold_local', 'filters.threshold_local', (['image'], {'block_size': '(3 + 2 * i)'}), '(image, block_size=3 + 2 * i)\n', (8047, 8076), False, 'from skimage import filters\n'), ((8128, 8172), 'skimage.measure.label', 'measure.label', (['block_mask'], {'connectivity': 'conn'}), '(block_mask, connectivity=conn)\n', (8141, 8172), False, 'from skimage import measure\n'), ((8308, 8337), 'skimage.segmentation.random_walker', 'random_walker', (['image', 'markers'], {}), '(image, markers)\n', (8321, 8337), False, 'from skimage.segmentation import random_walker\n'), ((9115, 9159), 'skimage.measure.label', 'measure.label', (['thres_mask'], {'connectivity': 'conn'}), '(thres_mask, connectivity=conn)\n', (9128, 9159), False, 'from skimage import measure\n'), ((9299, 9328), 'skimage.segmentation.random_walker', 'random_walker', (['image', 'markers'], {}), '(image, markers)\n', (9312, 9328), False, 'from skimage.segmentation import random_walker\n'), ((10870, 10900), 'numpy.full', 'np.full', (['len_watershed', 'np.nan'], {}), '(len_watershed, np.nan)\n', (10877, 10900), True, 'import numpy as np\n'), ((10920, 10950), 'numpy.full', 'np.full', (['len_watershed', 'np.nan'], {}), '(len_watershed, np.nan)\n', (10927, 10950), True, 'import numpy as np\n'), ((11776, 11796), 'numpy.nanargmax', 'np.nanargmax', (['ch_scs'], {}), '(ch_scs)\n', (11788, 11796), True, 'import numpy as np\n'), ((14966, 14991), 'numpy.full', 'np.full', (['max_eval', 'np.nan'], {}), '(max_eval, np.nan)\n', (14973, 14991), True, 'import numpy as np\n'), ((15011, 15036), 'numpy.full', 'np.full', (['max_eval', 'np.nan'], {}), '(max_eval, np.nan)\n', (15018, 15036), True, 'import numpy as np\n'), ((16129, 16270), 'logging.info', 'logging.info', (["('dim=%d, iter=%d, lr=%.6f, sigma=%.6f, sil=%.6f, ch=%.6f' % (dims[i],\n iters[i], lrs[i], sigmas[i], avg_sils[i], ch_scs[i]))"], {}), "('dim=%d, iter=%d, lr=%.6f, sigma=%.6f, sil=%.6f, ch=%.6f' % (\n dims[i], iters[i], lrs[i], sigmas[i], avg_sils[i], ch_scs[i]))\n", (16141, 16270), False, 'import logging\n'), ((16628, 16648), 'numpy.nanargmax', 'np.nanargmax', (['ch_scs'], {}), '(ch_scs)\n', (16640, 16648), True, 'import numpy as np\n'), ((5397, 5417), 'numpy.where', 'np.where', (['(value == 0)'], {}), '(value == 0)\n', (5405, 5417), True, 'import numpy as np\n'), ((6202, 6231), 'numpy.where', 'np.where', (['(value == border_val)'], {}), '(value == border_val)\n', (6210, 6231), True, 'import numpy as np\n'), ((11161, 11252), 'acse_9_irp_wafflescore.MiscHelpers.int_eval_silhouette', 'mh.int_eval_silhouette', (['in_data', 'cluster_labels[i]'], {'method': '"""som_watershed"""', 'param': 'param'}), "(in_data, cluster_labels[i], method='som_watershed',\n param=param)\n", (11183, 11252), True, 'from acse_9_irp_wafflescore import MiscHelpers as mh\n'), ((11714, 11732), 'numpy.array', 'np.array', (['avg_sils'], {}), '(avg_sils)\n', (11722, 11732), True, 'import numpy as np\n'), ((13733, 13778), 'numpy.linspace', 'np.linspace', (['(g_sigma - 1)', '(g_sigma + 1)'], {'num': '(30)'}), '(g_sigma - 1, g_sigma + 1, num=30)\n', (13744, 13778), True, 'import numpy as np\n'), ((15773, 15801), 'numpy.unique', 'np.unique', (['cluster_labels[i]'], {}), '(cluster_labels[i])\n', (15782, 15801), True, 'import numpy as np\n'), ((15862, 16096), 'logging.info', 'logging.info', (["('Random search using dim=%d, iter=%d, lr=%.6f, sigma=%.6f result to very small / large number of clusters (n_clusters = %d) '\n % (dims[i], iters[i], lrs[i], sigmas[i], n_clusters))"], {}), "(\n 'Random search using dim=%d, iter=%d, lr=%.6f, sigma=%.6f result to very small / large number of clusters (n_clusters = %d) '\n % (dims[i], iters[i], lrs[i], sigmas[i], n_clusters))\n", (15874, 16096), False, 'import logging\n'), ((16345, 16404), 'acse_9_irp_wafflescore.MiscHelpers.ext_eval_entropy', 'mh.ext_eval_entropy', (['label', 'cluster_labels[i]'], {'init_clus': '(-1)'}), '(label, cluster_labels[i], init_clus=-1)\n', (16364, 16404), True, 'from acse_9_irp_wafflescore import MiscHelpers as mh\n'), ((16418, 16481), 'logging.info', 'logging.info', (["('ent=%.6f, pur=%.6f' % (avg_ents[i], avg_purs[i]))"], {}), "('ent=%.6f, pur=%.6f' % (avg_ents[i], avg_purs[i]))\n", (16430, 16481), False, 'import logging\n'), ((16566, 16584), 'numpy.array', 'np.array', (['avg_sils'], {}), '(avg_sils)\n', (16574, 16584), True, 'import numpy as np\n'), ((2960, 2984), 'matplotlib.pyplot.cm.rainbow', 'plt.cm.rainbow', (['(t / 10.0)'], {}), '(t / 10.0)\n', (2974, 2984), True, 'import matplotlib.pyplot as plt\n'), ((5595, 5620), 'scipy.spatial.distance.cdist', 'distance.cdist', (['[b]', 'vals'], {}), '([b], vals)\n', (5609, 5620), False, 'from scipy.spatial import distance\n'), ((11039, 11062), 'numpy.unique', 'np.unique', (['ws_labels[i]'], {}), '(ws_labels[i])\n', (11048, 11062), True, 'import numpy as np\n'), ((11386, 11427), 'acse_9_irp_wafflescore.MiscHelpers.cal_har_sc', 'mh.cal_har_sc', (['in_data', 'cluster_labels[i]'], {}), '(in_data, cluster_labels[i])\n', (11399, 11427), True, 'from acse_9_irp_wafflescore import MiscHelpers as mh\n'), ((11557, 11602), 'acse_9_irp_wafflescore.MiscHelpers.ext_eval_entropy', 'mh.ext_eval_entropy', (['label', 'cluster_labels[i]'], {}), '(label, cluster_labels[i])\n', (11576, 11602), True, 'from acse_9_irp_wafflescore import MiscHelpers as mh\n'), ((11898, 11916), 'numpy.array', 'np.array', (['avg_ents'], {}), '(avg_ents)\n', (11906, 11916), True, 'import numpy as np\n'), ((11973, 11991), 'numpy.array', 'np.array', (['avg_purs'], {}), '(avg_purs)\n', (11981, 11991), True, 'import numpy as np\n'), ((13617, 13631), 'numpy.log10', 'np.log10', (['(0.25)'], {}), '(0.25)\n', (13625, 13631), True, 'import numpy as np\n'), ((13633, 13647), 'numpy.log10', 'np.log10', (['(0.75)'], {}), '(0.75)\n', (13641, 13647), True, 'import numpy as np\n'), ((15104, 15123), 'random.sample', 'random.sample', (['v', '(1)'], {}), '(v, 1)\n', (15117, 15123), False, 'import random\n'), ((16750, 16768), 'numpy.array', 'np.array', (['avg_ents'], {}), '(avg_ents)\n', (16758, 16768), True, 'import numpy as np\n'), ((16825, 16843), 'numpy.array', 'np.array', (['avg_purs'], {}), '(avg_purs)\n', (16833, 16843), True, 'import numpy as np\n'), ((8544, 8579), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 0]'], {}), '(ws_labels[i * ncols + 0])\n', (8553, 8579), True, 'import numpy as np\n'), ((8782, 8817), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 1]'], {}), '(ws_labels[i * ncols + 1])\n', (8791, 8817), True, 'import numpy as np\n'), ((9019, 9054), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 2]'], {}), '(ws_labels[i * ncols + 2])\n', (9028, 9054), True, 'import numpy as np\n'), ((9547, 9582), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 3]'], {}), '(ws_labels[i * ncols + 3])\n', (9556, 9582), True, 'import numpy as np\n'), ((9785, 9820), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 4]'], {}), '(ws_labels[i * ncols + 4])\n', (9794, 9820), True, 'import numpy as np\n'), ((10022, 10057), 'numpy.unique', 'np.unique', (['ws_labels[i * ncols + 5]'], {}), '(ws_labels[i * ncols + 5])\n', (10031, 10057), True, 'import numpy as np\n')]
|
import numpy as np
class DecisionTreeClassifierTranspiler(object):
def __init__(self, model):
self.model = model
self.build_classes()
self.build_feature_idx()
self.build_right_nodes()
self.build_thresholds()
def build_feature_idx(self):
self.features_idx = ','.join(self.model.tree_.feature.astype(str))
def build_classes(self):
class_aux = list(map(lambda x : x[0], self.model.tree_.value))
self.classes = np.argmax(class_aux, axis = 1)
self.classes = ','.join(self.classes.astype(str))
def build_right_nodes(self):
self.right_nodes = ','.join(self.model.tree_.children_right.astype(str)).replace('-1', '0')
def build_thresholds(self):
self.thresholds = ','.join(self.model.tree_.threshold.astype(str))
def generate_code(self):
return """
/*
The following code was generated using Clara.Transpiler. For more information please visit: https://github.com/asergiobranco/clara
*/
#define NO_NODES %s
unsigned char classes[NO_NODES] = {%s};
int FEATURE_IDX_NODE[NO_NODES] = {%s};
int RIGHT_CHILDS[NO_NODES] = {%s};
float THRESHOLDS[NO_NODES] = {%s};
int predict(double * sample){
unsigned int current_node = 0;
int feature_idx = FEATURE_IDX_NODE[0];
while(feature_idx >= 0){
if(sample[feature_idx] <= THRESHOLDS[current_node]){
current_node++;
}
else{
current_node = RIGHT_CHILDS[current_node];
}
feature_idx = FEATURE_IDX_NODE[current_node];
}
return classes[current_node];
}
""" % (self.model.tree_.node_count, self.classes, self.features_idx, self.right_nodes, self.thresholds)
|
[
"numpy.argmax"
] |
[((488, 516), 'numpy.argmax', 'np.argmax', (['class_aux'], {'axis': '(1)'}), '(class_aux, axis=1)\n', (497, 516), True, 'import numpy as np\n')]
|
import numpy as np
import pickle as pkl
def function_generator(init_num):
seq = np.array([], dtype='int')
n = init_num
seq = np.append(seq, n)
while True:
if ((n%2)==0):
next_number = n/2
next_number = np.asarray(next_number, dtype='int')
seq = np.append(seq, next_number)
if next_number==1:
break
else:
next_number = (3*n)+1
next_number = np.asarray(next_number, dtype='int')
seq = np.append(seq, next_number)
n = next_number
return seq
output_seq_data = []
output_seq_length = []
x_train = []
y_train = []
num = 0
for n in range(0,10000):
sequence = function_generator(n+1)
seq_len = len(sequence)
x_training = sequence[:(seq_len-1)]
x_training = np.array(x_training, dtype='int')
y_training = sequence[1:seq_len]
y_training = np.array(y_training, dtype='int')
output_seq_data.append(sequence)
output_seq_length.append(seq_len)
x_train.append(x_training)
y_train.append(y_training)
output_seq_data = np.asarray(output_seq_data)
x_train = np.asarray(x_train)
y_train = np.asarray(y_train)
print(y_train[26])
output_seq_length = np.asarray(output_seq_length)
max_length = output_seq_length.max()
# print(max_length)
# print(x_train[26])
# np.save('generated_data.npy', gen_data)
# np.save('x_train.npy', x_train)
# np.save('y_train.npy', y_train)
|
[
"numpy.append",
"numpy.asarray",
"numpy.array"
] |
[((1092, 1119), 'numpy.asarray', 'np.asarray', (['output_seq_data'], {}), '(output_seq_data)\n', (1102, 1119), True, 'import numpy as np\n'), ((1130, 1149), 'numpy.asarray', 'np.asarray', (['x_train'], {}), '(x_train)\n', (1140, 1149), True, 'import numpy as np\n'), ((1160, 1179), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (1170, 1179), True, 'import numpy as np\n'), ((1220, 1249), 'numpy.asarray', 'np.asarray', (['output_seq_length'], {}), '(output_seq_length)\n', (1230, 1249), True, 'import numpy as np\n'), ((86, 111), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (94, 111), True, 'import numpy as np\n'), ((139, 156), 'numpy.append', 'np.append', (['seq', 'n'], {}), '(seq, n)\n', (148, 156), True, 'import numpy as np\n'), ((813, 846), 'numpy.array', 'np.array', (['x_training'], {'dtype': '"""int"""'}), "(x_training, dtype='int')\n", (821, 846), True, 'import numpy as np\n'), ((901, 934), 'numpy.array', 'np.array', (['y_training'], {'dtype': '"""int"""'}), "(y_training, dtype='int')\n", (909, 934), True, 'import numpy as np\n'), ((252, 288), 'numpy.asarray', 'np.asarray', (['next_number'], {'dtype': '"""int"""'}), "(next_number, dtype='int')\n", (262, 288), True, 'import numpy as np\n'), ((307, 334), 'numpy.append', 'np.append', (['seq', 'next_number'], {}), '(seq, next_number)\n', (316, 334), True, 'import numpy as np\n'), ((462, 498), 'numpy.asarray', 'np.asarray', (['next_number'], {'dtype': '"""int"""'}), "(next_number, dtype='int')\n", (472, 498), True, 'import numpy as np\n'), ((517, 544), 'numpy.append', 'np.append', (['seq', 'next_number'], {}), '(seq, next_number)\n', (526, 544), True, 'import numpy as np\n')]
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import time
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
logger = logging.getLogger(__name__)
class FullModel(nn.Module):
"""
Distribute the loss on multi-gpu to reduce
the memory cost in the main gpu.
You can check the following discussion.
https://discuss.pytorch.org/t/dataparallel-imbalanced-memory-usage/22551/21
"""
def __init__(self, model, loss):
super(FullModel, self).__init__()
self.model = model
self.loss = loss
def forward(self, inputs, labels):
outputs = self.model(inputs)
loss = self.loss(outputs, labels)
return loss, outputs
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def create_logger(cfg, cfg_name, phase='train'):
root_output_dir = Path(cfg.OUTPUT_DIR)
# set up logger
if not root_output_dir.exists():
print('=> creating {}'.format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg.DATASET.DATASET
model = cfg.MODEL.NAME
cfg_name = os.path.basename(cfg_name).split('.')[0]
final_output_dir = root_output_dir / dataset / cfg_name
print('=> creating {}'.format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}_{}.log'.format(cfg_name, time_str, phase)
final_log_file = final_output_dir / log_file
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \
(cfg_name + '_' + time_str)
print('=> creating {}'.format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
return logger, str(final_output_dir), str(tensorboard_log_dir)
def get_confusion_matrix(label, pred, size, num_class, ignore=-1):
"""
Calcute the confusion matrix by given label and pred
"""
output = pred.cpu().numpy().transpose(0, 2, 3, 1)
seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
seg_gt = np.asarray(
label.cpu().numpy()[:, :size[-2], :size[-1]], dtype=np.int)
ignore_index = seg_gt != ignore
seg_gt = seg_gt[ignore_index]
seg_pred = seg_pred[ignore_index]
index = (seg_gt * num_class + seg_pred).astype('int32')
label_count = np.bincount(index)
confusion_matrix = np.zeros((num_class, num_class))
for i_label in range(num_class):
for i_pred in range(num_class):
cur_index = i_label * num_class + i_pred
if cur_index < len(label_count):
confusion_matrix[i_label,
i_pred] = label_count[cur_index]
return confusion_matrix
def get_optimizer(config, model):
_nwd_keys = ('bias', 'bn', 'norm', 'prelu', 'nwd')
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = config.TRAIN.LR
weight_decay = config.TRAIN.WD
if 'head' in key:
lr *= 10
if any(key.find(sub) != -1 for sub in _nwd_keys):
weight_decay = 0
logger.info(f'Params: {key}, LR: {lr}, Weight_Decay: {weight_decay}')
elif 'base' in key:
if any(key.find(sub) != -1 for sub in _nwd_keys):
weight_decay = 0
logger.info(f'Params: {key}, LR: {lr}, Weight_Decay: {weight_decay}')
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if config.TRAIN.OPTIMIZER == 'sgd':
optimizer = torch.optim.SGD(params,
lr=config.TRAIN.LR,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD,
nesterov=config.TRAIN.NESTEROV,
)
elif config.TRAIN.OPTIMIZER == 'adam':
optimizer = torch.optim.Adam(params,
lr=config.TRAIN.LR,
amsgrad=config.TRAIN.AMSGRAD
)
else:
raise NotImplementedError
return optimizer
def get_group_gn(dim, dim_per_gp, num_groups):
"""get number of groups used by GroupNorm, based on number of channels."""
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0, \
"dim: {}, dim_per_gp: {}".format(dim, dim_per_gp)
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0, \
"dim: {}, num_groups: {}".format(dim, num_groups)
group_gn = num_groups
return group_gn
|
[
"torch.distributed.is_initialized",
"numpy.argmax",
"torch.distributed.get_rank",
"os.path.basename",
"logging.StreamHandler",
"numpy.zeros",
"time.strftime",
"pathlib.Path",
"torch.optim.Adam",
"torch.distributed.get_world_size",
"numpy.bincount",
"logging.getLogger",
"torch.optim.SGD"
] |
[((491, 518), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (508, 518), False, 'import logging\n'), ((1111, 1145), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (1143, 1145), False, 'import torch\n'), ((1239, 1267), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (1265, 1267), False, 'import torch\n'), ((2178, 2198), 'pathlib.Path', 'Path', (['cfg.OUTPUT_DIR'], {}), '(cfg.OUTPUT_DIR)\n', (2182, 2198), False, 'from pathlib import Path\n'), ((2649, 2680), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M"""'], {}), "('%Y-%m-%d-%H-%M')\n", (2662, 2680), False, 'import time\n'), ((2938, 2957), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2955, 2957), False, 'import logging\n'), ((3006, 3029), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (3027, 3029), False, 'import logging\n'), ((3908, 3926), 'numpy.bincount', 'np.bincount', (['index'], {}), '(index)\n', (3919, 3926), True, 'import numpy as np\n'), ((3950, 3982), 'numpy.zeros', 'np.zeros', (['(num_class, num_class)'], {}), '((num_class, num_class))\n', (3958, 3982), True, 'import numpy as np\n'), ((1047, 1081), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1079, 1081), False, 'import torch\n'), ((1175, 1209), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1207, 1209), False, 'import torch\n'), ((3588, 3613), 'numpy.argmax', 'np.argmax', (['output'], {'axis': '(3)'}), '(output, axis=3)\n', (3597, 3613), True, 'import numpy as np\n'), ((5150, 5291), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': 'config.TRAIN.LR', 'momentum': 'config.TRAIN.MOMENTUM', 'weight_decay': 'config.TRAIN.WD', 'nesterov': 'config.TRAIN.NESTEROV'}), '(params, lr=config.TRAIN.LR, momentum=config.TRAIN.MOMENTUM,\n weight_decay=config.TRAIN.WD, nesterov=config.TRAIN.NESTEROV)\n', (5165, 5291), False, 'import torch\n'), ((3034, 3055), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (3051, 3055), False, 'import logging\n'), ((5533, 5607), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'config.TRAIN.LR', 'amsgrad': 'config.TRAIN.AMSGRAD'}), '(params, lr=config.TRAIN.LR, amsgrad=config.TRAIN.AMSGRAD)\n', (5549, 5607), False, 'import torch\n'), ((2421, 2447), 'os.path.basename', 'os.path.basename', (['cfg_name'], {}), '(cfg_name)\n', (2437, 2447), False, 'import os\n'), ((3103, 3120), 'pathlib.Path', 'Path', (['cfg.LOG_DIR'], {}), '(cfg.LOG_DIR)\n', (3107, 3120), False, 'from pathlib import Path\n')]
|
import numpy as np
from openmdao.api import CaseReader
from optigurator.utils import recording_filename
def get_case_reader(data_dir, problem_constants):
return CaseReader(recording_filename(data_dir, problem_constants.id))
def generate_valid_points(problem_constants, crm):
for (i, case_id) in enumerate(crm.list_cases()):
model_case = crm.get_case(case_id)
if (
model_case.outputs["usability.min_max_step_height"][1]
<= problem_constants.step_height.upper
and model_case.outputs["usability.min_max_step_depth"][0]
>= problem_constants.step_depth.lower
and model_case.outputs["usability.min_free_height"][0]
> problem_constants.free_height_lower
):
yield [
model_case.outputs["price_availability.total_price"][0],
model_case.outputs["usability.usability_penalty"][0],
model_case.outputs["price_availability.total_delivery_time"][0],
i,
]
def calculate(inputPoints, dominates):
paretoPoints = set()
candidateRowNr = 0
dominatedPoints = set()
normalizedRowNr = 0
# skapar en kopia på matrisen som normaliseras senare
normalizedPoints = np.array(inputPoints.copy())
sum1 = 0
sum2 = 0
sum3 = 0
sum4 = 0
for i in range(0, len(normalizedPoints)):
# summerar värden kolonnvis till nämnare för normalisering
sum1 = sum1 + normalizedPoints[i, 0] ** 2
sum2 = sum2 + normalizedPoints[i, 1] ** 2
sum3 = sum3 + normalizedPoints[i, 2] ** 2
# definerar en vektor med normaliseringsvärden
myarray_normalize = [sum1 ** 0.5, sum2 ** 0.5, sum3 ** 0.5, 1]
# Normaliserar matrisen
normalizedPoints = np.array(inputPoints) / np.array(myarray_normalize)
while True:
candidateRow = inputPoints[candidateRowNr]
normalized = normalizedPoints[normalizedRowNr]
normalizedPoints = np.delete(normalizedPoints, normalizedRowNr, 0)
inputPoints.remove(candidateRow)
rowNr = 0
nonDominated = True
while len(normalizedPoints) != 0 and rowNr < len(normalizedPoints):
row = normalizedPoints[rowNr]
rowIP = inputPoints[rowNr]
if dominates(
row, normalized
): # Går in om candidateRow är bättre än utmanaren.
normalizedPoints = np.delete(normalizedPoints, rowNr, 0)
inputPoints.remove(rowIP)
dominatedPoints.add(tuple(rowIP))
elif dominates(
normalized, row
): # Går in om utmanare är större än kandidaten.
nonDominated = False
dominatedPoints.add(tuple(candidateRow))
rowNr += 1
else:
rowNr += 1
if nonDominated: # Lägg till nondominated punkter till pareto
ID = int(normalized[3])
paretoPoints.add(tuple(candidateRow))
if len(normalizedPoints) == 0: # SLutar när man gått igenom alla punkter.
break
dp = np.array(list(dominatedPoints))
pp = np.array(list(paretoPoints))
return paretoPoints, dominatedPoints, dp, pp
def dominates(row, normalized): # Beräknar om utmanare är bättre än candidate.
return sum([row[x] >= normalized[x] for x in range(len(row) - 1)]) == len(row) - 1
def WeightPPpoints(pp, my_weights):
Pareto_points = pp
np.size(Pareto_points)
Nrofrows_pareto = np.size(Pareto_points, 0)
# skapar en vektor med ID
ID_vektor = np.delete(Pareto_points, [0, 1, 2], 1).tolist()
# skapar matris med outputvärden utan ID kolonn
A = np.delete(Pareto_points, 3, 1)
np.size(A)
# definerar storleken på matrisen som kommer som paretopoints output
Nrofcolumns = np.size(A, 1)
Nrofrows = np.size(A, 0)
sizeofA = (Nrofrows, Nrofcolumns)
# Skapar matris som sedan fylls med bästa lösningarnas ID
IDpoints = []
# skapar en kopia på matrisen som normaliseras senare
B = A.copy()
sum1 = 0
sum2 = 0
sum3 = 0
for i in range(0, Nrofrows):
# summerar värden kolonnvis till nämnare för normalisering
sum1 = sum1 + A[i, 0] ** 2
sum2 = sum2 + A[i, 1] ** 2
sum3 = sum3 + A[i, 2] ** 2
# definerar en vektor med normaliseringsvärden
myarray_normalize = [sum1 ** 0.5, sum2 ** 0.5, sum3 ** 0.5]
# Normaliserar matrisen
B = A / myarray_normalize
# kopierar matrisen och multiplicerar kolonnvis med viktningar
C = B.copy()
# Loop för 5 olika viktningar -> 5 optimala pareto punkter som output
for j in range(0, len(my_weights)):
for i in range(0, Nrofrows):
C[i, 0] = B[i, 0] * my_weights[j, 0]
C[i, 1] = B[i, 1] * my_weights[j, 1]
C[i, 2] = B[i, 2] * my_weights[j, 2]
# Definera ideala värden A_positive samt icke ideala värden A_negative
A_positive = [C[:, 0].min(), C[:, 1].min(), C[:, 2].min()]
A_negative = [C[:, 0].max(), C[:, 1].max(), C[:, 2].max()]
S_positive = np.zeros((Nrofrows, 1))
S_negative = np.zeros((Nrofrows, 1))
C_value = np.zeros((Nrofrows, 1))
# Vektor_ID_optimala=np.zeros((1,5))
for i in range(0, Nrofrows):
S_positive[i] = (
(C[i, 0] - A_positive[0]) ** 2
+ (C[i, 1] - A_positive[1]) ** 2
+ (C[i, 2] - A_positive[2]) ** 2
) ** 0.5
S_negative[i] = (
(C[i, 0] - A_negative[0]) ** 2
+ (C[i, 1] - A_negative[1]) ** 2
+ (C[i, 2] - A_negative[2]) ** 2
) ** 0.5
C_value[i] = S_negative[i] / (S_negative[i] + S_positive[i])
Best_value = C_value.max()
# ta fram vilken rad i C_vektorn som har det största värdet
Row_best_option = np.argmax(C_value)
# ta fram vilket ingående ID lösningen har
Vektor_ID_optimala = np.array(ID_vektor[Row_best_option]).tolist()
IDpoints.append(int(max(Vektor_ID_optimala)))
return IDpoints
def generate_pareto_cases(data_dir, problem_constants):
crm = get_case_reader(data_dir, problem_constants)
input_points = list(generate_valid_points(problem_constants, crm))
pareto_points, dominated_points, dp, pp = calculate(input_points, dominates)
my_weights = np.matrix(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
) # Weights used to pick points from the ParetoFront
pareto_case_ids = WeightPPpoints(pp, my_weights)
for i in pareto_case_ids:
yield crm.get_case(i)
|
[
"numpy.matrix",
"numpy.size",
"numpy.argmax",
"numpy.zeros",
"numpy.array",
"optigurator.utils.recording_filename",
"numpy.delete"
] |
[((3474, 3496), 'numpy.size', 'np.size', (['Pareto_points'], {}), '(Pareto_points)\n', (3481, 3496), True, 'import numpy as np\n'), ((3519, 3544), 'numpy.size', 'np.size', (['Pareto_points', '(0)'], {}), '(Pareto_points, 0)\n', (3526, 3544), True, 'import numpy as np\n'), ((3701, 3731), 'numpy.delete', 'np.delete', (['Pareto_points', '(3)', '(1)'], {}), '(Pareto_points, 3, 1)\n', (3710, 3731), True, 'import numpy as np\n'), ((3737, 3747), 'numpy.size', 'np.size', (['A'], {}), '(A)\n', (3744, 3747), True, 'import numpy as np\n'), ((3839, 3852), 'numpy.size', 'np.size', (['A', '(1)'], {}), '(A, 1)\n', (3846, 3852), True, 'import numpy as np\n'), ((3868, 3881), 'numpy.size', 'np.size', (['A', '(0)'], {}), '(A, 0)\n', (3875, 3881), True, 'import numpy as np\n'), ((6413, 6457), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (6422, 6457), True, 'import numpy as np\n'), ((179, 229), 'optigurator.utils.recording_filename', 'recording_filename', (['data_dir', 'problem_constants.id'], {}), '(data_dir, problem_constants.id)\n', (197, 229), False, 'from optigurator.utils import recording_filename\n'), ((1777, 1798), 'numpy.array', 'np.array', (['inputPoints'], {}), '(inputPoints)\n', (1785, 1798), True, 'import numpy as np\n'), ((1801, 1828), 'numpy.array', 'np.array', (['myarray_normalize'], {}), '(myarray_normalize)\n', (1809, 1828), True, 'import numpy as np\n'), ((1979, 2026), 'numpy.delete', 'np.delete', (['normalizedPoints', 'normalizedRowNr', '(0)'], {}), '(normalizedPoints, normalizedRowNr, 0)\n', (1988, 2026), True, 'import numpy as np\n'), ((5119, 5142), 'numpy.zeros', 'np.zeros', (['(Nrofrows, 1)'], {}), '((Nrofrows, 1))\n', (5127, 5142), True, 'import numpy as np\n'), ((5164, 5187), 'numpy.zeros', 'np.zeros', (['(Nrofrows, 1)'], {}), '((Nrofrows, 1))\n', (5172, 5187), True, 'import numpy as np\n'), ((5206, 5229), 'numpy.zeros', 'np.zeros', (['(Nrofrows, 1)'], {}), '((Nrofrows, 1))\n', (5214, 5229), True, 'import numpy as np\n'), ((5910, 5928), 'numpy.argmax', 'np.argmax', (['C_value'], {}), '(C_value)\n', (5919, 5928), True, 'import numpy as np\n'), ((3592, 3630), 'numpy.delete', 'np.delete', (['Pareto_points', '[0, 1, 2]', '(1)'], {}), '(Pareto_points, [0, 1, 2], 1)\n', (3601, 3630), True, 'import numpy as np\n'), ((2430, 2467), 'numpy.delete', 'np.delete', (['normalizedPoints', 'rowNr', '(0)'], {}), '(normalizedPoints, rowNr, 0)\n', (2439, 2467), True, 'import numpy as np\n'), ((6010, 6046), 'numpy.array', 'np.array', (['ID_vektor[Row_best_option]'], {}), '(ID_vektor[Row_best_option])\n', (6018, 6046), True, 'import numpy as np\n')]
|
import os
from typing import Dict
from abc import ABC
from easy_sdm.data import ShapefileRegion
import geopandas as gpd
import numpy as np
import pandas as pd
import requests
from easy_sdm.configs import configs
from easy_sdm.utils import logger
from typing import Dict, Optional
from pathlib import Path
class GBIFOccurencesRequester:
"""[This class makes request to GBIF]
"""
def __init__(self, taxon_key: int, species_name: str):
self.taxon_key = taxon_key
self.species_name = species_name
self.base_url = "http://api.gbif.org/v1/occurrence/search"
def request(self, offset: int = 0):
"""[ Request GBIF information about an species]
Args:
offset (int, optional): [Offsset is a parameter to where starting the
request in GBIF databse, since the requests have a
limit of 300 row for request]. Defaults to 0.
Returns:
[type]: [int]
"""
gbif_configs = configs["gbif"]
params = {
"taxonKey": str(self.taxon_key),
"limit": gbif_configs["one_request_limit"],
"hasCoordinate": True,
"year": f"{gbif_configs['low_year']},{gbif_configs['up_year']}",
"country": gbif_configs["country"],
"offset": offset,
}
r = requests.get(self.base_url, params=params)
status_code = r.status_code
if r.status_code != 200:
logger.logging.info(
f"API call failed at offset {offset} with a status code of {r.status_code}."
)
end_of_records = True
else:
r = r.json()
end_of_records = r["endOfRecords"]
return r, end_of_records, status_code
class Species:
def __init__(self, taxon_key: int, name: str):
self.taxon_key = taxon_key
self.name = name
def __str__(self) -> str:
return "Species {self.name} with taxon key {self.taxon_key}"
class SpeciesDFBuilder:
"""[This class organize data requested to GBIF into pandas dataframes]
"""
def __init__(self, species: Species):
self.gbif_occ_requester = GBIFOccurencesRequester(
species.taxon_key, species.name
)
self.__df_memory = None
def get_specie_df(self):
"""Get species as DataFrame"""
if self.__df_memory:
df = self.__df_memory
else:
df = self.__request_species_df()
df = self.__clean_species_df(df)
self.__df_memory = df
return df
def __request_species_df(self):
"""[Organizes GBIF information in a dataframe considering offsets ]"""
end_of_records = False
offset = 0
status = 200
df = None
while end_of_records == False and status == 200:
r, end_of_records, status = self.gbif_occ_requester.request(offset)
df = self.__build_species_df(r, df)
offset = len(df) + 1
self.__clean_species_df(df)
return df
def __build_species_df(self, request, df=None):
"""[Create species dataframe with the request data]
Args:
df ([type]): [description]
request ([type]): [description]
Returns:
[df]: [description]
"""
if df is None:
df = pd.DataFrame(
columns=[
"SCIENTIFIC_NAME",
"LONGITUDE",
"LATITUDE",
"COUNTRY",
"STATE_PROVINCE",
"IDENTIFICATION_DATE",
"DAY",
"MONTH",
"YEAR",
]
)
for result in request["results"]:
result = self.__refact_dict(result)
df = df.append(
{
"SCIENTIFIC_NAME": result["scientificName"],
"LONGITUDE": result["decimalLongitude"],
"LATITUDE": result["decimalLatitude"],
"COUNTRY": result["country"],
"STATE_PROVINCE": result["stateProvince"],
"IDENTIFICATION_DATE": result["eventDate"],
"DAY": result["day"],
"MONTH": result["month"],
"YEAR": result["year"],
},
ignore_index=True,
)
return df
def __refact_dict(self, result: Dict):
"""Refact dict placing None in empty cells"""
columns = result.keys()
desired_columns = [
"scientificName",
"decimalLongitude",
"decimalLatitude",
"country",
"stateProvince",
"eventDate",
"day",
"month",
"year",
"occurrenceRemarks",
]
for d_col in desired_columns:
if d_col not in columns:
result[d_col] = None
return result
def __clean_species_df(self, df: pd.DataFrame):
"""[Cleaning Gbif Data]
Args:
df ([pd.DaraFrame]): [description]
Returns:
[pd.DaraFrame]: [description]
"""
# Double check to certify there is no empty lat/long data
df = df[pd.notnull(df["LATITUDE"])]
df = df[pd.notnull(df["LONGITUDE"])]
# Removing duplicate data
df = (
df.drop_duplicates(ignore_index=True)
if configs["gbif"]["drop_duplicates"]
else df
)
# Sorting Data by STATE_PROVINCE
df.sort_values("STATE_PROVINCE", inplace=True, ignore_index=True)
return df
class SpeciesGDFBuilder(SpeciesDFBuilder):
"""[This class organize data requested to GBIF into geopandas geodataframes]
"""
def __init__(
self, species: Species, proposed_region: Optional[ShapefileRegion] = None
):
super().__init__(species)
self.proposed_region = proposed_region
self.__gdf_memory = None
def save_species_gdf(self, output_path: Path):
if not str(output_path).endswith(".shp"):
raise TypeError("output_path must ends with shp")
output_path.parent.mkdir(parents=True, exist_ok=True)
gdf = self.get_species_gdf()
gdf.to_file(output_path)
def get_species_gdf(self):
if not (self.__gdf_memory is None):
gdf = self.__gdf_memory
else:
df = self.get_specie_df()
gdf = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df.LONGITUDE, df.LATITUDE)
)
gdf = gdf.set_crs(f"EPSG:{configs['maps']['default_epsg']}")
gdf = (
self.__filter_species_in_region(gdf)
if not (self.proposed_region is None)
else gdf
)
self.__gdf_memory = gdf
return gdf
def __filter_species_in_region(self, gdf: gpd.GeoDataFrame):
return self.proposed_region.get_points_inside(gdf)
class SpeciesInfoExtractor:
"""[A Wrapper to extract relevant information from spescies geodataframes]
"""
def __init__(self, species_geodataframe: gpd.GeoDataFrame) -> None:
self.species_geodataframe = species_geodataframe
def get_coordinates(self,):
coordinates = np.array(
(
np.array(self.species_geodataframe["LATITUDE"]),
np.array(self.species_geodataframe["LONGITUDE"]),
)
).T
return coordinates
def get_longitudes(self,):
coordinates = self.get_coordinates()
return coordinates[:, 1]
def get_latitudes(self,):
coordinates = self.get_coordinates()
return coordinates[:, 0]
|
[
"pandas.DataFrame",
"pandas.notnull",
"easy_sdm.utils.logger.logging.info",
"geopandas.points_from_xy",
"numpy.array",
"requests.get"
] |
[((1336, 1378), 'requests.get', 'requests.get', (['self.base_url'], {'params': 'params'}), '(self.base_url, params=params)\n', (1348, 1378), False, 'import requests\n'), ((1460, 1567), 'easy_sdm.utils.logger.logging.info', 'logger.logging.info', (['f"""API call failed at offset {offset} with a status code of {r.status_code}."""'], {}), "(\n f'API call failed at offset {offset} with a status code of {r.status_code}.'\n )\n", (1479, 1567), False, 'from easy_sdm.utils import logger\n'), ((3360, 3506), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['SCIENTIFIC_NAME', 'LONGITUDE', 'LATITUDE', 'COUNTRY', 'STATE_PROVINCE',\n 'IDENTIFICATION_DATE', 'DAY', 'MONTH', 'YEAR']"}), "(columns=['SCIENTIFIC_NAME', 'LONGITUDE', 'LATITUDE', 'COUNTRY',\n 'STATE_PROVINCE', 'IDENTIFICATION_DATE', 'DAY', 'MONTH', 'YEAR'])\n", (3372, 3506), True, 'import pandas as pd\n'), ((5316, 5342), 'pandas.notnull', 'pd.notnull', (["df['LATITUDE']"], {}), "(df['LATITUDE'])\n", (5326, 5342), True, 'import pandas as pd\n'), ((5360, 5387), 'pandas.notnull', 'pd.notnull', (["df['LONGITUDE']"], {}), "(df['LONGITUDE'])\n", (5370, 5387), True, 'import pandas as pd\n'), ((6584, 6629), 'geopandas.points_from_xy', 'gpd.points_from_xy', (['df.LONGITUDE', 'df.LATITUDE'], {}), '(df.LONGITUDE, df.LATITUDE)\n', (6602, 6629), True, 'import geopandas as gpd\n'), ((7405, 7452), 'numpy.array', 'np.array', (["self.species_geodataframe['LATITUDE']"], {}), "(self.species_geodataframe['LATITUDE'])\n", (7413, 7452), True, 'import numpy as np\n'), ((7470, 7518), 'numpy.array', 'np.array', (["self.species_geodataframe['LONGITUDE']"], {}), "(self.species_geodataframe['LONGITUDE'])\n", (7478, 7518), True, 'import numpy as np\n')]
|
from __future__ import division
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.fpn import Head
from chainercv.links.model.fpn import head_loss_post
from chainercv.links.model.fpn import head_loss_pre
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
@testing.parameterize(
{'n_class': 1 + 1},
{'n_class': 5 + 1},
{'n_class': 20 + 1},
)
class TestHead(unittest.TestCase):
def setUp(self):
self.link = Head(n_class=self.n_class, scales=(1 / 2, 1 / 4, 1 / 8))
def _check_call(self):
hs = [
chainer.Variable(_random_array(self.link.xp, (2, 64, 32, 32))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 16, 16))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 8, 8))),
]
rois = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
self.link.xp.array((0,), dtype=np.int32),
self.link.xp.array((1, 0), dtype=np.int32),
self.link.xp.array((1,), dtype=np.int32),
]
locs, confs = self.link(hs, rois, roi_indices)
self.assertIsInstance(locs, chainer.Variable)
self.assertIsInstance(locs.array, self.link.xp.ndarray)
self.assertEqual(locs.shape, (4, self.n_class, 4))
self.assertIsInstance(confs, chainer.Variable)
self.assertIsInstance(confs.array, self.link.xp.ndarray)
self.assertEqual(confs.shape, (4, self.n_class))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
def _check_distribute(self):
rois = self.link.xp.array((
(0, 0, 10, 10),
(0, 1000, 0, 1000),
(0, 0, 224, 224),
(100, 100, 224, 224),
), dtype=np.float32)
roi_indices = self.link.xp.array((0, 1, 0, 0), dtype=np.int32)
rois, roi_indices = self.link.distribute(rois, roi_indices)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
for l in range(3):
self.assertIsInstance(rois[l], self.link.xp.ndarray)
self.assertIsInstance(roi_indices[l], self.link.xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(sum(rois[l].shape[0] for l in range(3)), 4)
def test_distribute_cpu(self):
self._check_distribute()
@attr.gpu
def test_distribute_gpu(self):
self.link.to_gpu()
self._check_distribute()
def _check_decode(self):
rois = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
self.link.xp.array((0,), dtype=np.int32),
self.link.xp.array((1, 0), dtype=np.int32),
self.link.xp.array((1,), dtype=np.int32),
]
locs = chainer.Variable(_random_array(
self.link.xp, (4, self.n_class, 4)))
confs = chainer.Variable(_random_array(
self.link.xp, (4, self.n_class)))
bboxes, labels, scores = self.link.decode(
rois, roi_indices,
locs, confs,
(0.4, 0.2), ((100, 100), (200, 200)),
0.5, 0.1)
self.assertEqual(len(bboxes), 2)
self.assertEqual(len(labels), 2)
self.assertEqual(len(scores), 2)
for n in range(2):
self.assertIsInstance(bboxes[n], self.link.xp.ndarray)
self.assertIsInstance(labels[n], self.link.xp.ndarray)
self.assertIsInstance(scores[n], self.link.xp.ndarray)
self.assertEqual(bboxes[n].shape[0], labels[n].shape[0])
self.assertEqual(bboxes[n].shape[0], scores[n].shape[0])
self.assertEqual(bboxes[n].shape[1:], (4,))
self.assertEqual(labels[n].shape[1:], ())
self.assertEqual(scores[n].shape[1:], ())
def test_decode_cpu(self):
self._check_decode()
@attr.gpu
def test_decode_gpu(self):
self.link.to_gpu()
self._check_decode()
class TestHeadLoss(unittest.TestCase):
def _check_head_loss_pre(self, xp):
rois = [
xp.array(((4, 1, 6, 3),), dtype=np.float32),
xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
xp.array((0,), dtype=np.int32),
xp.array((1, 0), dtype=np.int32),
xp.array((1,), dtype=np.int32),
]
bboxes = [
xp.array(((2, 4, 6, 7), (1, 12, 3, 30)), dtype=np.float32),
xp.array(((10, 2, 12, 12),), dtype=np.float32),
]
labels = [
xp.array((10, 4), dtype=np.float32),
xp.array((1,), dtype=np.float32),
]
rois, roi_indices, gt_locs, gt_labels = head_loss_pre(
rois, roi_indices, (0.1, 0.2), bboxes, labels)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
self.assertEqual(len(gt_locs), 3)
self.assertEqual(len(gt_labels), 3)
for l in range(3):
self.assertIsInstance(rois[l], xp.ndarray)
self.assertIsInstance(roi_indices[l], xp.ndarray)
self.assertIsInstance(gt_locs[l], xp.ndarray)
self.assertIsInstance(gt_labels[l], xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_locs[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_labels[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(gt_locs[l].shape[1:], (4,))
self.assertEqual(gt_labels[l].shape[1:], ())
def test_head_loss_pre_cpu(self):
self._check_head_loss_pre(np)
@attr.gpu
def test_head_loss_pre_gpu(self):
import cupy
self._check_head_loss_pre(cupy)
def _check_head_loss_post(self, xp):
locs = chainer.Variable(_random_array(xp, (20, 81, 4)))
confs = chainer.Variable(_random_array(xp, (20, 81)))
roi_indices = [
xp.random.randint(0, 2, size=5).astype(np.int32),
xp.random.randint(0, 2, size=7).astype(np.int32),
xp.random.randint(0, 2, size=8).astype(np.int32),
]
gt_locs = [
_random_array(xp, (5, 4)),
_random_array(xp, (7, 4)),
_random_array(xp, (8, 4)),
]
gt_labels = [
xp.random.randint(0, 80, size=5).astype(np.int32),
xp.random.randint(0, 80, size=7).astype(np.int32),
xp.random.randint(0, 80, size=8).astype(np.int32),
]
loc_loss, conf_loss = head_loss_post(
locs, confs, roi_indices, gt_locs, gt_labels, 2)
self.assertIsInstance(loc_loss, chainer.Variable)
self.assertIsInstance(loc_loss.array, xp.ndarray)
self.assertEqual(loc_loss.shape, ())
self.assertIsInstance(conf_loss, chainer.Variable)
self.assertIsInstance(conf_loss.array, xp.ndarray)
self.assertEqual(conf_loss.shape, ())
def test_head_loss_post_cpu(self):
self._check_head_loss_post(np)
@attr.gpu
def test_head_loss_post_gpu(self):
import cupy
self._check_head_loss_post(cupy)
testing.run_module(__name__, __file__)
|
[
"numpy.random.uniform",
"chainercv.links.model.fpn.head_loss_pre",
"chainercv.links.model.fpn.Head",
"chainer.testing.parameterize",
"chainercv.links.model.fpn.head_loss_post",
"chainer.testing.run_module"
] |
[((414, 499), 'chainer.testing.parameterize', 'testing.parameterize', (["{'n_class': 1 + 1}", "{'n_class': 5 + 1}", "{'n_class': 20 + 1}"], {}), "({'n_class': 1 + 1}, {'n_class': 5 + 1}, {'n_class': 20 +\n 1})\n", (434, 499), False, 'from chainer import testing\n'), ((8017, 8055), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (8035, 8055), False, 'from chainer import testing\n'), ((355, 391), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'shape'}), '(-1, 1, size=shape)\n', (372, 391), True, 'import numpy as np\n'), ((588, 644), 'chainercv.links.model.fpn.Head', 'Head', ([], {'n_class': 'self.n_class', 'scales': '(1 / 2, 1 / 4, 1 / 8)'}), '(n_class=self.n_class, scales=(1 / 2, 1 / 4, 1 / 8))\n', (592, 644), False, 'from chainercv.links.model.fpn import Head\n'), ((5493, 5553), 'chainercv.links.model.fpn.head_loss_pre', 'head_loss_pre', (['rois', 'roi_indices', '(0.1, 0.2)', 'bboxes', 'labels'], {}), '(rois, roi_indices, (0.1, 0.2), bboxes, labels)\n', (5506, 5553), False, 'from chainercv.links.model.fpn import head_loss_pre\n'), ((7417, 7480), 'chainercv.links.model.fpn.head_loss_post', 'head_loss_post', (['locs', 'confs', 'roi_indices', 'gt_locs', 'gt_labels', '(2)'], {}), '(locs, confs, roi_indices, gt_locs, gt_labels, 2)\n', (7431, 7480), False, 'from chainercv.links.model.fpn import head_loss_post\n')]
|
#!/usr/bin/env python
from itertools import izip
import numpy as np
import h5py
from progress.bar import Bar
import sys
import rospy
import rosbag
from sensor_msgs.msg import Imu, Image
def main():
if len(sys.argv) < 2:
print("Usage: {} dataset_name".format(sys.argv[0]))
exit(1)
file_name = sys.argv[1]
log_file = h5py.File('../dataset/log/{}.h5'.format(file_name))
camera_file = h5py.File('../dataset/camera/{}.h5'.format(file_name))
zipped_log = izip(
log_file['times'],
log_file['fiber_accel'],
log_file['fiber_gyro'])
with rosbag.Bag('{}.bag'.format(file_name), 'w') as bag:
bar = Bar('Camera', max=len(camera_file['X']))
for i, img_data in enumerate(camera_file['X']):
m_img = Image()
m_img.header.stamp = rospy.Time.from_sec(0.01 * i)
m_img.height = img_data.shape[1]
m_img.width = img_data.shape[2]
m_img.step = 3 * img_data.shape[2]
m_img.encoding = 'rgb8'
m_img.data = np.transpose(img_data, (1, 2, 0)).flatten().tolist()
bag.write('/camera/image_raw', m_img, m_img.header.stamp)
bar.next()
bar.finish()
bar = Bar('IMU', max=len(log_file['times']))
for time, v_accel, v_gyro in zipped_log:
m_imu = Imu()
m_imu.header.stamp = rospy.Time.from_sec(time)
[setattr(m_imu.linear_acceleration, c, v_accel[i]) for i, c in enumerate('xyz')]
[setattr(m_imu.angular_velocity, c, v_gyro[i]) for i, c in enumerate('xyz')]
bag.write('/fiber_imu', m_imu, m_imu.header.stamp)
bar.next()
bar.finish()
if __name__ == "__main__":
main()
|
[
"sensor_msgs.msg.Image",
"numpy.transpose",
"sensor_msgs.msg.Imu",
"rospy.Time.from_sec",
"itertools.izip"
] |
[((474, 546), 'itertools.izip', 'izip', (["log_file['times']", "log_file['fiber_accel']", "log_file['fiber_gyro']"], {}), "(log_file['times'], log_file['fiber_accel'], log_file['fiber_gyro'])\n", (478, 546), False, 'from itertools import izip\n'), ((737, 744), 'sensor_msgs.msg.Image', 'Image', ([], {}), '()\n', (742, 744), False, 'from sensor_msgs.msg import Imu, Image\n'), ((772, 801), 'rospy.Time.from_sec', 'rospy.Time.from_sec', (['(0.01 * i)'], {}), '(0.01 * i)\n', (791, 801), False, 'import rospy\n'), ((1243, 1248), 'sensor_msgs.msg.Imu', 'Imu', ([], {}), '()\n', (1246, 1248), False, 'from sensor_msgs.msg import Imu, Image\n'), ((1276, 1301), 'rospy.Time.from_sec', 'rospy.Time.from_sec', (['time'], {}), '(time)\n', (1295, 1301), False, 'import rospy\n'), ((969, 1002), 'numpy.transpose', 'np.transpose', (['img_data', '(1, 2, 0)'], {}), '(img_data, (1, 2, 0))\n', (981, 1002), True, 'import numpy as np\n')]
|
"""
Obtain the single photoelectron response for an SiPM. Can be used as an input
to sim_telarray after normalisation with Konrads script
"""
import argparse
from argparse import ArgumentDefaultsHelpFormatter as Formatter
import numpy as np
from scipy.special import binom
from scipy.stats import norm
from IPython import embed
from matplotlib import pyplot as plt
import os
def sipm_enf(x, spe_sigma, opct, pap, dap):
"""
SiPM formula from Gentile 2010
http://adsabs.harvard.edu/abs/2010arXiv1006.3263G
This implementation only considers the case for a 100% probability of a
single inital fired microcell
Parameters
----------
x : ndarray
X points to evaluate at
spe_sigma : float
Width of the single photoelectron peak
opct : float
Probability of optical crosstalk
pap : float
Probability of afterpulse
dap : float
Distance of afterpulse peak from main peak
"""
n_peaks = 100
N = np.arange(n_peaks)[:, None]
K = np.arange(1, n_peaks)[:, None]
# Probability of n fired microcells due to optical crosstalk
pct = ((1 - opct) * np.power(opct, N - 1) * binom(N - 1, 0))[:, 0]
sap = spe_sigma
papk = np.power(1 - pap, N[:, 0])
p0ap = pct * papk
pap1 = pct * (1-papk) * papk
pe_sigma = np.sqrt(K * spe_sigma ** 2)
ap_sigma = np.sqrt(K * sap ** 2)
signal = p0ap[K] * norm.pdf(x, K, pe_sigma)
signal += pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma)
return signal.sum(0)
def main():
description = ('Obtain the single photoelectron response for an SiPM. '
'Can be used as an input to sim_telarray after '
'normalisation with Konrads script')
parser = argparse.ArgumentParser(description=description,
formatter_class=Formatter)
parser.add_argument('-o', '--output', dest='output_dir', action='store',
required=True,
help='Output directory for the files')
parser.add_argument('--spe_sigma', dest='spe_sigma', action='store',
default=0.1, type=float,
help='Value for the standard deviation of the single '
'photoelectron peak')
parser.add_argument('--opct', dest='opct', action='store', default=0.1,
type = float,
help='Value for optical crosstalk')
parser.add_argument('--pap', dest='pap', action='store', default=0,
type=float,
help='Value for the probability of afterpulses')
parser.add_argument('--dap', dest='dap', action='store', default=0,
type=float,
help='Value for the distance of the afterpulse peak '
'from main peak')
args = parser.parse_args()
output_dir = args.output_dir
spe_sigma = args.spe_sigma
opct = args.opct
pap = args.pap
dap = args.dap
print(
"""
SPE Parameters: spe_sigma = {}
opct = {}
pap = {}
dap = {}
""".format(spe_sigma, opct, pap, dap)
)
x = np.linspace(0, 100, 1000)
y = sipm_enf(x, spe_sigma, opct, pap, dap)
gt = y > 1E-15
x = x[gt]
y = y[gt]
# Resample
x = np.linspace(x.min(), x.max(), 1000)
y = sipm_enf(x, spe_sigma, opct, pap, dap)
if not os.path.exists(output_dir):
print("Creating directory: {}".format(output_dir))
os.makedirs(output_dir)
output_path = os.path.join(output_dir, "checs_spe_spectrum.txt")
np.savetxt(output_path, np.column_stack((x, y, y)))
print("Created config : {}".format(output_path))
output_path = os.path.join(output_dir, "checs_spe_spectrum.pdf")
plt.semilogy(x, y)
plt.savefig(output_path, bbox_inches='tight')
print("Created figure : {}".format(output_path))
if __name__ == '__main__':
main()
|
[
"scipy.special.binom",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.power",
"os.path.exists",
"scipy.stats.norm.pdf",
"numpy.arange",
"numpy.linspace",
"numpy.column_stack",
"matplotlib.pyplot.semilogy",
"os.path.join",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((1223, 1249), 'numpy.power', 'np.power', (['(1 - pap)', 'N[:, 0]'], {}), '(1 - pap, N[:, 0])\n', (1231, 1249), True, 'import numpy as np\n'), ((1321, 1348), 'numpy.sqrt', 'np.sqrt', (['(K * spe_sigma ** 2)'], {}), '(K * spe_sigma ** 2)\n', (1328, 1348), True, 'import numpy as np\n'), ((1364, 1385), 'numpy.sqrt', 'np.sqrt', (['(K * sap ** 2)'], {}), '(K * sap ** 2)\n', (1371, 1385), True, 'import numpy as np\n'), ((1749, 1824), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'formatter_class': 'Formatter'}), '(description=description, formatter_class=Formatter)\n', (1772, 1824), False, 'import argparse\n'), ((3253, 3278), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (3264, 3278), True, 'import numpy as np\n'), ((3630, 3680), 'os.path.join', 'os.path.join', (['output_dir', '"""checs_spe_spectrum.txt"""'], {}), "(output_dir, 'checs_spe_spectrum.txt')\n", (3642, 3680), False, 'import os\n'), ((3809, 3859), 'os.path.join', 'os.path.join', (['output_dir', '"""checs_spe_spectrum.pdf"""'], {}), "(output_dir, 'checs_spe_spectrum.pdf')\n", (3821, 3859), False, 'import os\n'), ((3864, 3882), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['x', 'y'], {}), '(x, y)\n', (3876, 3882), True, 'from matplotlib import pyplot as plt\n'), ((3887, 3932), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {'bbox_inches': '"""tight"""'}), "(output_path, bbox_inches='tight')\n", (3898, 3932), True, 'from matplotlib import pyplot as plt\n'), ((986, 1004), 'numpy.arange', 'np.arange', (['n_peaks'], {}), '(n_peaks)\n', (995, 1004), True, 'import numpy as np\n'), ((1022, 1043), 'numpy.arange', 'np.arange', (['(1)', 'n_peaks'], {}), '(1, n_peaks)\n', (1031, 1043), True, 'import numpy as np\n'), ((1410, 1434), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', 'K', 'pe_sigma'], {}), '(x, K, pe_sigma)\n', (1418, 1434), False, 'from scipy.stats import norm\n'), ((1459, 1497), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', '(K * (1.0 - dap))', 'ap_sigma'], {}), '(x, K * (1.0 - dap), ap_sigma)\n', (1467, 1497), False, 'from scipy.stats import norm\n'), ((3492, 3518), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (3506, 3518), False, 'import os\n'), ((3587, 3610), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (3598, 3610), False, 'import os\n'), ((3709, 3735), 'numpy.column_stack', 'np.column_stack', (['(x, y, y)'], {}), '((x, y, y))\n', (3724, 3735), True, 'import numpy as np\n'), ((1167, 1182), 'scipy.special.binom', 'binom', (['(N - 1)', '(0)'], {}), '(N - 1, 0)\n', (1172, 1182), False, 'from scipy.special import binom\n'), ((1143, 1164), 'numpy.power', 'np.power', (['opct', '(N - 1)'], {}), '(opct, N - 1)\n', (1151, 1164), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
nus_lpf,mu_lpf=np.load("clpf.npz",allow_pickle=True)["arr_0"]
nus_modit,mu_modit=np.load("cmodit4500.npz",allow_pickle=True)["arr_0"]
fig=plt.figure(figsize=(8,4))
plt.plot(nus_modit,mu_modit,label="MODIT",color="C1")
plt.plot(nus_lpf,mu_lpf,label="DIRECT",ls="dashed",color="C0")
plt.xlabel("wavenumber (cm-1)")
plt.ylabel("spectrum")
plt.legend()
plt.savefig("compspec_luhman16A.png")
plt.show()
|
[
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((192, 218), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (202, 218), True, 'import matplotlib.pyplot as plt\n'), ((218, 274), 'matplotlib.pyplot.plot', 'plt.plot', (['nus_modit', 'mu_modit'], {'label': '"""MODIT"""', 'color': '"""C1"""'}), "(nus_modit, mu_modit, label='MODIT', color='C1')\n", (226, 274), True, 'import matplotlib.pyplot as plt\n'), ((272, 338), 'matplotlib.pyplot.plot', 'plt.plot', (['nus_lpf', 'mu_lpf'], {'label': '"""DIRECT"""', 'ls': '"""dashed"""', 'color': '"""C0"""'}), "(nus_lpf, mu_lpf, label='DIRECT', ls='dashed', color='C0')\n", (280, 338), True, 'import matplotlib.pyplot as plt\n'), ((336, 367), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""wavenumber (cm-1)"""'], {}), "('wavenumber (cm-1)')\n", (346, 367), True, 'import matplotlib.pyplot as plt\n'), ((368, 390), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""spectrum"""'], {}), "('spectrum')\n", (378, 390), True, 'import matplotlib.pyplot as plt\n'), ((391, 403), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (401, 403), True, 'import matplotlib.pyplot as plt\n'), ((404, 441), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""compspec_luhman16A.png"""'], {}), "('compspec_luhman16A.png')\n", (415, 441), True, 'import matplotlib.pyplot as plt\n'), ((442, 452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (450, 452), True, 'import matplotlib.pyplot as plt\n'), ((68, 106), 'numpy.load', 'np.load', (['"""clpf.npz"""'], {'allow_pickle': '(True)'}), "('clpf.npz', allow_pickle=True)\n", (75, 106), True, 'import numpy as np\n'), ((134, 178), 'numpy.load', 'np.load', (['"""cmodit4500.npz"""'], {'allow_pickle': '(True)'}), "('cmodit4500.npz', allow_pickle=True)\n", (141, 178), True, 'import numpy as np\n')]
|
import numpy as np
from ligo.skymap import kde
import matplotlib
matplotlib.use('Agg')
from matplotlib.colors import to_rgb
from matplotlib import pyplot as plt
from mpl_toolkits.basemap import Basemap
#matplotlib.rc('text', usetex=True)
def greedy(density):
i,j = np.shape(density)
idx = np.argsort(density.flatten())[::-1]
c = np.cumsum(density.flatten()[idx])
c = c/c[-1]
np.append(c,1.0)
p = np.zeros(i*j)
p[idx] = c[:]
return p.reshape(i,j)
def plot_sky(pts,contour=True,filled=False,ax=None,trueloc=None,cmap='Reds',col='red'):
cls = kde.Clustered2DSkyKDE
pts[:,0] = pts[:,0] - np.pi
skypost = cls(pts, trials=5, jobs=8)
# make up some data on a regular lat/lon grid.
# nlats = 145; nlons = 291; delta = 2.*np.pi/(nlons-1)
nlats = 145; nlons = 291; delta = 2.*np.pi/(nlons-1)
lats = (0.5*np.pi-delta*np.indices((nlats,nlons))[0,:,:])
# lons = (delta*np.indices((nlats,nlons))[1,:,:])
lons = (delta*np.indices((nlats,nlons))[1,:,:]-np.pi)
locs = np.column_stack((lons.flatten(),lats.flatten()))
prob = skypost(locs).reshape(nlats,nlons)
p1 = greedy(prob)
# compute mean location of samples
nx = np.cos(pts[:,1])*np.cos(pts[:,0])
ny = np.cos(pts[:,1])*np.sin(pts[:,0])
nz = np.sin(pts[:,1])
mean_n = [np.mean(nx),np.mean(ny),np.mean(nz)]
# bestloc = [np.remainder(np.arctan2(mean_n[1],mean_n[0]),2.0*np.pi),np.arctan2(mean_n[2],np.sqrt(mean_n[0]**2 + mean_n[1]**2))]
bestloc = [trueloc[0],trueloc[1]]
if ax is None:
# map = Basemap(projection='ortho',lon_0=-bestloc[0]*180/np.pi,lat_0=bestloc[1]*180/np.pi,resolution=None,celestial=True)
map = Basemap(projection='moll',lon_0=0,resolution=None,celestial=True)
map.drawmapboundary(fill_color='white')
# draw lat/lon grid lines every 30 degrees.
# map.drawmeridians(np.arange(0,360,30))
meridian = ["-180","-150","-120","-90","-60","-30","0","30","+60","+90","+120","+150"]
map.drawmeridians(np.arange(-180,180,30),labels=[1,1,1,1])
for i in np.arange(len(meridian)):
plt.annotate(r"$\textrm{%s}$" % meridian[i] + u"\u00b0",xy=map(np.arange(-180,180,30)[i],0),xycoords='data')
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
else:
map = ax
# compute native map projection coordinates of lat/lon grid.
# x, y = map(lons*180./np.pi, lats*180./np.pi)
x, y = map(lons*180./np.pi, lats*180./np.pi)
# contour data over the map.
if filled:
base_color = np.array(to_rgb(col))
opp_color = 1.0 - base_color
cs1 = map.contourf(x,y,1.0-p1,levels=[0.0,0.1,0.5,1.0],colors=[base_color+opp_color,base_color+0.8*opp_color,base_color+0.6*opp_color,base_color])
cs2 = map.contour(x,y,p1,levels=[0.5,0.9],linewidths=2.0,colors=col)
if trueloc is not None:
xx, yy = map((trueloc[0]*180./np.pi)-180.0, trueloc[1]*180./np.pi)
map.plot(xx,yy,marker='+',markersize=20,linewidth=5,color='black')
return map
|
[
"numpy.zeros",
"matplotlib.colors.to_rgb",
"numpy.shape",
"numpy.append",
"numpy.indices",
"matplotlib.use",
"numpy.sin",
"numpy.mean",
"numpy.cos",
"numpy.arange",
"mpl_toolkits.basemap.Basemap"
] |
[((65, 86), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (79, 86), False, 'import matplotlib\n'), ((270, 287), 'numpy.shape', 'np.shape', (['density'], {}), '(density)\n', (278, 287), True, 'import numpy as np\n'), ((396, 413), 'numpy.append', 'np.append', (['c', '(1.0)'], {}), '(c, 1.0)\n', (405, 413), True, 'import numpy as np\n'), ((421, 436), 'numpy.zeros', 'np.zeros', (['(i * j)'], {}), '(i * j)\n', (429, 436), True, 'import numpy as np\n'), ((1278, 1295), 'numpy.sin', 'np.sin', (['pts[:, 1]'], {}), '(pts[:, 1])\n', (1284, 1295), True, 'import numpy as np\n'), ((1192, 1209), 'numpy.cos', 'np.cos', (['pts[:, 1]'], {}), '(pts[:, 1])\n', (1198, 1209), True, 'import numpy as np\n'), ((1209, 1226), 'numpy.cos', 'np.cos', (['pts[:, 0]'], {}), '(pts[:, 0])\n', (1215, 1226), True, 'import numpy as np\n'), ((1235, 1252), 'numpy.cos', 'np.cos', (['pts[:, 1]'], {}), '(pts[:, 1])\n', (1241, 1252), True, 'import numpy as np\n'), ((1252, 1269), 'numpy.sin', 'np.sin', (['pts[:, 0]'], {}), '(pts[:, 0])\n', (1258, 1269), True, 'import numpy as np\n'), ((1309, 1320), 'numpy.mean', 'np.mean', (['nx'], {}), '(nx)\n', (1316, 1320), True, 'import numpy as np\n'), ((1321, 1332), 'numpy.mean', 'np.mean', (['ny'], {}), '(ny)\n', (1328, 1332), True, 'import numpy as np\n'), ((1333, 1344), 'numpy.mean', 'np.mean', (['nz'], {}), '(nz)\n', (1340, 1344), True, 'import numpy as np\n'), ((1679, 1747), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""moll"""', 'lon_0': '(0)', 'resolution': 'None', 'celestial': '(True)'}), "(projection='moll', lon_0=0, resolution=None, celestial=True)\n", (1686, 1747), False, 'from mpl_toolkits.basemap import Basemap\n'), ((2014, 2038), 'numpy.arange', 'np.arange', (['(-180)', '(180)', '(30)'], {}), '(-180, 180, 30)\n', (2023, 2038), True, 'import numpy as np\n'), ((2245, 2267), 'numpy.arange', 'np.arange', (['(-90)', '(90)', '(30)'], {}), '(-90, 90, 30)\n', (2254, 2267), True, 'import numpy as np\n'), ((2560, 2571), 'matplotlib.colors.to_rgb', 'to_rgb', (['col'], {}), '(col)\n', (2566, 2571), False, 'from matplotlib.colors import to_rgb\n'), ((869, 895), 'numpy.indices', 'np.indices', (['(nlats, nlons)'], {}), '((nlats, nlons))\n', (879, 895), True, 'import numpy as np\n'), ((974, 1000), 'numpy.indices', 'np.indices', (['(nlats, nlons)'], {}), '((nlats, nlons))\n', (984, 1000), True, 'import numpy as np\n'), ((2173, 2197), 'numpy.arange', 'np.arange', (['(-180)', '(180)', '(30)'], {}), '(-180, 180, 30)\n', (2182, 2197), True, 'import numpy as np\n')]
|
import numpy as np
def align_depth_to_rgb(
depth,
bgr_cameramodel,
depth_cameramodel,
depth_to_rgb_transform):
"""Align depth image to color image.
Parameters
----------
depth : numpy.ndarray
depth image in meter order.
bgr_cameramodel : cameramodels.PinholeCameraModel
bgr cameramodel
depth_cameramodel : cameramodels.PinholeCameraModel
depth cameramodel
depth_to_rgb_transform : numpy.ndarray
4x4 transformation matrix.
Returns
-------
aligned_img : numpy.ndarray
aligned image.
"""
if depth.shape[0] != depth_cameramodel.height \
or depth.shape[1] != depth_cameramodel.width:
raise ValueError
depth = depth.copy()
aligned_img = np.zeros((bgr_cameramodel.height, bgr_cameramodel.width),
dtype=np.float32)
depth[np.isnan(depth)] = 0
v, u = np.array(np.where(depth))
uv = np.array([u, v]).T
rotation = depth_to_rgb_transform[:3, :3]
translation = depth_to_rgb_transform[:3, 3]
xyz_depth_frame = depth_cameramodel.batch_project_pixel_to_3d_ray(
uv, depth=depth[depth > 0])
xyz_rgb_frame = (np.matmul(
rotation.T, xyz_depth_frame.T)
- np.matmul(
rotation.T, translation).reshape(3, -1)).T
rgb_uv, indices = bgr_cameramodel.batch_project3d_to_pixel(
xyz_rgb_frame,
project_valid_depth_only=True,
return_indices=True)
aligned_img.reshape(-1)[bgr_cameramodel.flatten_uv(rgb_uv)] = \
depth[depth > 0][indices]
return aligned_img
|
[
"numpy.zeros",
"numpy.isnan",
"numpy.where",
"numpy.array",
"numpy.matmul"
] |
[((780, 855), 'numpy.zeros', 'np.zeros', (['(bgr_cameramodel.height, bgr_cameramodel.width)'], {'dtype': 'np.float32'}), '((bgr_cameramodel.height, bgr_cameramodel.width), dtype=np.float32)\n', (788, 855), True, 'import numpy as np\n'), ((893, 908), 'numpy.isnan', 'np.isnan', (['depth'], {}), '(depth)\n', (901, 908), True, 'import numpy as np\n'), ((934, 949), 'numpy.where', 'np.where', (['depth'], {}), '(depth)\n', (942, 949), True, 'import numpy as np\n'), ((960, 976), 'numpy.array', 'np.array', (['[u, v]'], {}), '([u, v])\n', (968, 976), True, 'import numpy as np\n'), ((1203, 1243), 'numpy.matmul', 'np.matmul', (['rotation.T', 'xyz_depth_frame.T'], {}), '(rotation.T, xyz_depth_frame.T)\n', (1212, 1243), True, 'import numpy as np\n'), ((1276, 1310), 'numpy.matmul', 'np.matmul', (['rotation.T', 'translation'], {}), '(rotation.T, translation)\n', (1285, 1310), True, 'import numpy as np\n')]
|
import numpy as np
import torch
def compute_lid(x, x_train, k, exclude_self=False):
"""
Calculate LID using the estimation from [1]
[1] Ma et al., "Characterizing Adversarial Subspaces Using
Local Intrinsic Dimensionality," ICLR 2018.
"""
with torch.no_grad():
x = x.view((x.size(0), -1))
x_train = x_train.view((x_train.size(0), -1))
lid = torch.zeros((x.size(0), ))
for i, x_cur in enumerate(x):
dist = (x_cur.view(1, -1) - x_train).norm(2, 1)
# `largest` should be True when using cosine distance
if exclude_self:
topk_dist = dist.topk(k + 1, largest=False)[0][1:]
else:
topk_dist = dist.topk(k, largest=False)[0]
mean_log = torch.log(topk_dist / topk_dist[-1]).mean()
lid[i] = -1 / mean_log
return lid
# def cal_class_lid(x, x_train, k, exclude_self=False):
# """
# Calculate LID on sample using the estimation from [1]
# [1] Ma et al., "Characterizing Adversarial Subspaces Using
# Local Intrinsic Dimensionality," ICLR 2018.
# """
# x = x.view((x.size(0), -1))
# x_train = x_train.view((x_train.size(0), -1))
# lid = torch.zeros((x.size(0), ))
# for i, x_cur in enumerate(x):
# dist = (x_cur.view(1, -1) - x_train).norm(2, 1)
# # `largest` should be True when using cosine distance
# if exclude_self:
# topk_dist = dist.topk(k + 1, largest=False)[0][1:]
# else:
# topk_dist = dist.topk(k, largest=False)[0]
# mean_log = torch.log(topk_dist / topk_dist[-1]).mean()
# lid[i] = -1 / mean_log
# return lid
def compute_spnorm(inputs, dknn, layers, batch_size=200):
assert inputs.requires_grad
num_total = inputs.size(0)
norm = np.zeros((num_total, len(layers)))
num_batches = int(np.ceil(num_total / batch_size))
for i in range(num_batches):
begin, end = i * batch_size, (i + 1) * batch_size
x = inputs[begin:end]
reps = dknn.get_activations(x)
for l, layer in enumerate(layers):
y = reps[layer]
norm[begin:end, l] = compute_spnorm_batch(x, y)
return norm
def compute_spnorm_batch(inputs, output):
"""
:param inputs: (batch_size, input_size)
:param output: (batch_size, output_size)
:return: jacobian: (batch_size, output_size, input_size)
"""
batch_size, input_dim = inputs.view(inputs.size(0), -1).size()
output = output.view(batch_size, -1)
jacobian = torch.zeros((batch_size, output.size(1), input_dim))
for i in range(output.size(1)):
grad = torch.autograd.grad(
output[:, i].sum(), inputs, retain_graph=True)[0]
jacobian[:, i, :] = grad.view(batch_size, input_dim)
norm = np.zeros((batch_size, ))
for i in range(batch_size):
norm[i] = np.linalg.norm(jacobian[i].detach().cpu().numpy(), 2)
return norm
|
[
"torch.no_grad",
"numpy.zeros",
"numpy.ceil",
"torch.log"
] |
[((2835, 2858), 'numpy.zeros', 'np.zeros', (['(batch_size,)'], {}), '((batch_size,))\n', (2843, 2858), True, 'import numpy as np\n'), ((276, 291), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (289, 291), False, 'import torch\n'), ((1899, 1930), 'numpy.ceil', 'np.ceil', (['(num_total / batch_size)'], {}), '(num_total / batch_size)\n', (1906, 1930), True, 'import numpy as np\n'), ((785, 821), 'torch.log', 'torch.log', (['(topk_dist / topk_dist[-1])'], {}), '(topk_dist / topk_dist[-1])\n', (794, 821), False, 'import torch\n')]
|
#imports
import haversine as hs
import pandas as pd
import numpy as np
import random
import time
from concurrent import futures
import grpc
import databroker_pb2_grpc
import databroker_pb2
port = 8061
class Databroker(databroker_pb2_grpc.DatabrokerServicer):
def __init__(self):
self.current_row = 0
#load required datasets
self.no2_data = pd.read_csv('./data/no2_testset.csv')
self.pm10_data = pd.read_csv('./data/pm10_testset.csv')
self.pm25_data = pd.read_csv('./data/pm25_testset.csv')
self.gps_data = pd.read_csv('./data/sensor_gps.csv')
self.sensor_gps = pd.read_csv('./data/low_cost_sensors.csv')
def get_next(self, request, context):
response = databroker_pb2.Features()
if self.current_row >= self.no2_data.shape[0]:
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details("all data has been processed")
else:
#load 1 row from each dataset and convert to numpy
# create response format dataframe
no2 = pd.DataFrame(data=None, columns=self.no2_data.columns)
pm10 = pd.DataFrame(data=None, columns=self.pm10_data.columns)
pm25 = pd.DataFrame(data=None, columns=self.pm25_data.columns)
for sensor in range(self.sensor_gps.shape[0]):
id = self.sensor_gps.deviceID[sensor]
counter=1
for i in range(23,0,-1):
lat1 = np.rad2deg(self.sensor_gps.iloc[sensor,4])
lon1 = np.rad2deg(self.sensor_gps.iloc[sensor,5])
lat2 = self.gps_data.iloc[0,i*2+1]
lon2 = self.gps_data.iloc[0,i*2]
distance = hs.haversine((lat2, lon2), (lat1, lon1))
self.no2_data.iloc[self.current_row,counter] = distance
self.pm10_data.iloc[self.current_row,counter] = distance
self.pm25_data.iloc[self.current_row,counter] = distance
counter +=1
no2 = no2.append(self.no2_data.iloc[self.current_row,:])
pm10 = pm10.append(self.pm10_data.iloc[self.current_row,:])
pm25 = pm25.append(self.pm25_data.iloc[self.current_row,:])
no2_input= no2.iloc[:,1:].to_numpy()
pm10_input= pm10.iloc[:,1:].to_numpy()
pm25_input= pm25.iloc[:,1:].to_numpy()
no2_input = np.ndarray.tobytes(no2_input)
pm10_input = np.ndarray.tobytes(pm10_input)
pm25_input = np.ndarray.tobytes(pm25_input)
#add output to response
response.no2_data = no2_input
response.pm10_data = pm10_input
response.pm25_data = pm25_input
#add 1 to row counter(maybe we could make it cyclical with mod later)
self.current_row += 1
return response
#host server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
databroker_pb2_grpc.add_DatabrokerServicer_to_server(Databroker(), server)
print("Starting server. Listening on port : " + str(port))
server.add_insecure_port("[::]:{}".format(port))
server.start()
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
|
[
"pandas.DataFrame",
"pandas.read_csv",
"haversine.haversine",
"time.sleep",
"numpy.rad2deg",
"databroker_pb2.Features",
"concurrent.futures.ThreadPoolExecutor",
"numpy.ndarray.tobytes"
] |
[((3015, 3057), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(10)'}), '(max_workers=10)\n', (3041, 3057), False, 'from concurrent import futures\n'), ((372, 409), 'pandas.read_csv', 'pd.read_csv', (['"""./data/no2_testset.csv"""'], {}), "('./data/no2_testset.csv')\n", (383, 409), True, 'import pandas as pd\n'), ((435, 473), 'pandas.read_csv', 'pd.read_csv', (['"""./data/pm10_testset.csv"""'], {}), "('./data/pm10_testset.csv')\n", (446, 473), True, 'import pandas as pd\n'), ((499, 537), 'pandas.read_csv', 'pd.read_csv', (['"""./data/pm25_testset.csv"""'], {}), "('./data/pm25_testset.csv')\n", (510, 537), True, 'import pandas as pd\n'), ((562, 598), 'pandas.read_csv', 'pd.read_csv', (['"""./data/sensor_gps.csv"""'], {}), "('./data/sensor_gps.csv')\n", (573, 598), True, 'import pandas as pd\n'), ((625, 667), 'pandas.read_csv', 'pd.read_csv', (['"""./data/low_cost_sensors.csv"""'], {}), "('./data/low_cost_sensors.csv')\n", (636, 667), True, 'import pandas as pd\n'), ((731, 756), 'databroker_pb2.Features', 'databroker_pb2.Features', ([], {}), '()\n', (754, 756), False, 'import databroker_pb2\n'), ((3287, 3304), 'time.sleep', 'time.sleep', (['(86400)'], {}), '(86400)\n', (3297, 3304), False, 'import time\n'), ((1073, 1127), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'None', 'columns': 'self.no2_data.columns'}), '(data=None, columns=self.no2_data.columns)\n', (1085, 1127), True, 'import pandas as pd\n'), ((1147, 1202), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'None', 'columns': 'self.pm10_data.columns'}), '(data=None, columns=self.pm10_data.columns)\n', (1159, 1202), True, 'import pandas as pd\n'), ((1222, 1277), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'None', 'columns': 'self.pm25_data.columns'}), '(data=None, columns=self.pm25_data.columns)\n', (1234, 1277), True, 'import pandas as pd\n'), ((2491, 2520), 'numpy.ndarray.tobytes', 'np.ndarray.tobytes', (['no2_input'], {}), '(no2_input)\n', (2509, 2520), True, 'import numpy as np\n'), ((2550, 2580), 'numpy.ndarray.tobytes', 'np.ndarray.tobytes', (['pm10_input'], {}), '(pm10_input)\n', (2568, 2580), True, 'import numpy as np\n'), ((2610, 2640), 'numpy.ndarray.tobytes', 'np.ndarray.tobytes', (['pm25_input'], {}), '(pm25_input)\n', (2628, 2640), True, 'import numpy as np\n'), ((1493, 1536), 'numpy.rad2deg', 'np.rad2deg', (['self.sensor_gps.iloc[sensor, 4]'], {}), '(self.sensor_gps.iloc[sensor, 4])\n', (1503, 1536), True, 'import numpy as np\n'), ((1563, 1606), 'numpy.rad2deg', 'np.rad2deg', (['self.sensor_gps.iloc[sensor, 5]'], {}), '(self.sensor_gps.iloc[sensor, 5])\n', (1573, 1606), True, 'import numpy as np\n'), ((1745, 1785), 'haversine.haversine', 'hs.haversine', (['(lat2, lon2)', '(lat1, lon1)'], {}), '((lat2, lon2), (lat1, lon1))\n', (1757, 1785), True, 'import haversine as hs\n')]
|
''' imports '''
# filesystem management
import os
# tensors and nn modules
import torch
# array handling
import numpy as np
# midi file import and parse
from mido import MidiFile
class MelodyDataset(torch.utils.data.Dataset):
''' dataset class for midi files '''
def __init__(self, dir_path: str, cache = False, ds: int = 20):
''' init dataset, import midi files '''
super().__init__()
# store downsampling factor
self.ds = ds
# get and store list midi files in directory
self.file_names = [ name for name in os.listdir(dir_path) if 'mid' in name[-4:] ]
# import and store midi files
self.midi_files = [ MidiFile(os.path.join(dir_path, file_name))
for file_name in self.file_names ]
# case filter by key
if False:
# get index for only midi with meta plus [melody, chords, bass] tracks
j = [ i for i in range(len(self.file_names))
if len(self.midi_files[i].tracks) > 3
and "key='{}'".format(key) in str(self.midi_files[i].tracks[0][2]) ]
if False:
# get index for only midi with meta plus [melody, chords, bass] tracks
j = [ i for i in range(len(self.file_names))
if len(self.midi_files[i].tracks) > 3 ]
# filter midi file and file name lists
self.midi_files = [ self.midi_files[i] for i in j ]
self.file_names = [ self.file_names[i] for i in j ]
# init store of import state
self.import_list = [ None for _ in range(len(self.midi_files)) ]
# pre-cache all data
if cache:
# iterate through midi files
for index in range(len(self.file_names)):
# import data to memory
self.import_data(index)
def import_data(self, index):
''' import midi data to memory '''
# get midi by index
midi = self.midi_files[index]
# get midi tracks
tracks = self.midi2tracks(midi)
# get note tracks matrix
matrix = self.tracks2matrix(tracks)
# get melody format from matrix
melody = self.matrix2melody(matrix)
# downsample over time
melody = melody[::self.ds]
# store matrix in import list
self.import_list[index] = melody
def midi2tracks(self, midi):
''' extract tracks from mido.MidiFile '''
# initialise tracks list
tracks = []
if len(midi.tracks) == 1:
ts = [0]
else:
ts = range(len(midi.tracks))[1:4]
# iterate over tracks in midi (excl. meta track, extra), [melody, chords, bass]
#for i in range(len(midi.tracks))[1:4]:
for i in ts:
# store track data as dict for processing
track = []
# iterate messages in track
for msg in midi.tracks[i][:]:
# ensure note data only
if msg.type in ['note_on', 'note_off']:
# init note data dict
note = {}
# store each note data
#note['type'] = msg.type
#note['channel'] = msg.channel
note['note'] = msg.note
note['time'] = msg.time
#note['velocity'] = msg.velocity
note['velocity'] = 0 if msg.type == 'note_off' else 1
# store note data
track.append(note)
# store track notes
tracks.append(track)
# return extracted midi tracks
return tracks
def tracks2matrix(self, tracks: list):
''' convert tracks to matrix '''
# initialise track matricies list
m = []
# iterate tracks
for track in tracks:
# initialise note state vector, 7-bit note depth
N = np.zeros(128, dtype = np.int16)
# initialise track note matrix (zero init column)
M = np.zeros((128, 1), dtype = np.int16)
# iterate messages in track
for msg in track:
# if time step changes, store intermediate notes
if int(msg['time']) != 0:
# extend note state vector over range time step
n = np.stack([ N for _ in range( int(msg['time']) ) ]).T
# append note state vector to track note matrix
M = np.concatenate( [M, n], axis = 1 )
# update value of note vector by index
N[int(msg['note'])] = int(msg['velocity'])
# store track note matrix
m.append(M)
# get max length track
s = max([ track.shape[1] for track in m ])
# pad tracks to max length of time axis, stack on new axis
M = np.stack([ np.pad(track, ((0, 0), (0, s - track.shape[1])))
for track in m ], axis = 2)
# return stacked tracks note matrix
return M
def matrix2melody(self, matrix):
''' extract melody from note matrix '''
# get track note matrix for melody only
M = matrix[:,:,0]
# init zero melody, default negative one
#melody = np.ones(M.shape[1])*-1
melody = np.zeros(M.shape[1])
# get index (note, time) where nonzero
j = np.where( M != 0 )
# set melody note at time by index
melody[j[1]] = j[0]
# return extracted melody
return melody
def __getitem__(self, index):
''' return tracks note matrix '''
# check for import state
if self.import_list[index] is None:
# import data to memory
self.import_data(index)
# return data if already imported
return self.import_list[index]
'''
def linear_quantize(samples, q_levels):
samples = samples.clone()
samples -= samples.min(dim=-1)[0].expand_as(samples)
samples /= samples.max(dim=-1)[0].expand_as(samples)
samples *= q_levels - EPSILON
samples += EPSILON / 2
return samples.long()
def linear_dequantize(samples, q_levels):
return samples.float() / (q_levels / 2) - 1
def q_zero(q_levels):
return q_levels // 2
'''
def __len__(self):
''' return total midi files '''
# return number of midi files
return len(self.file_names)
class MelodyDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size, seq_len, overlap_len,
*args, **kwargs):
super().__init__(dataset, batch_size, *args, **kwargs)
self.seq_len = seq_len
self.overlap_len = overlap_len
def __iter__(self):
for batch in super().__iter__():
(batch_size, n_samples) = batch.size()
reset = True
#print(self.overlap_len, n_samples, self.seq_len)
for seq_begin in range(self.overlap_len, n_samples, self.seq_len)[:-1]:
from_index = seq_begin - self.overlap_len
to_index = seq_begin + self.seq_len
sequences = batch[:, from_index : to_index]
input_sequences = sequences[:, : -1]
#print(input_sequences.shape)
target_sequences = sequences[:, self.overlap_len :].contiguous()
yield (input_sequences, reset, target_sequences)
reset = False
def __len__(self):
raise NotImplementedError()
|
[
"numpy.pad",
"numpy.zeros",
"numpy.where",
"os.path.join",
"os.listdir",
"numpy.concatenate"
] |
[((5349, 5369), 'numpy.zeros', 'np.zeros', (['M.shape[1]'], {}), '(M.shape[1])\n', (5357, 5369), True, 'import numpy as np\n'), ((5430, 5446), 'numpy.where', 'np.where', (['(M != 0)'], {}), '(M != 0)\n', (5438, 5446), True, 'import numpy as np\n'), ((3972, 4001), 'numpy.zeros', 'np.zeros', (['(128)'], {'dtype': 'np.int16'}), '(128, dtype=np.int16)\n', (3980, 4001), True, 'import numpy as np\n'), ((4083, 4117), 'numpy.zeros', 'np.zeros', (['(128, 1)'], {'dtype': 'np.int16'}), '((128, 1), dtype=np.int16)\n', (4091, 4117), True, 'import numpy as np\n'), ((579, 599), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (589, 599), False, 'import os\n'), ((700, 733), 'os.path.join', 'os.path.join', (['dir_path', 'file_name'], {}), '(dir_path, file_name)\n', (712, 733), False, 'import os\n'), ((4926, 4974), 'numpy.pad', 'np.pad', (['track', '((0, 0), (0, s - track.shape[1]))'], {}), '(track, ((0, 0), (0, s - track.shape[1])))\n', (4932, 4974), True, 'import numpy as np\n'), ((4538, 4568), 'numpy.concatenate', 'np.concatenate', (['[M, n]'], {'axis': '(1)'}), '([M, n], axis=1)\n', (4552, 4568), True, 'import numpy as np\n')]
|
import cv2
import math
import imutils
import numpy as np
import warnings
from sklearn.cluster import KMeans
from skimage.morphology import *
from skimage.util import *
class OD_CV:
def loadImage(self, filepath):
return cv2.imread(filepath)
def resizeImage(self, image, kar, width, height):
if kar:
return imutils.resize(image, width=width)
else:
return cv2.resize(image, (width, height))
def maskIMG(self, image, pts):
mask = np.zeros(image.shape[:2], np.uint8)
mask = cv2.drawContours(mask, [pts], -1, (255,255,255), -1)
image = cv2.bitwise_and(image.copy(), image.copy(), mask=mask)
return image
def cropIMG(self, image, coords):
return image[coords[1]:coords[1]+coords[3], coords[0]:coords[0]+coords[2]]
def dmntCOLOR(self, image):
image = cv2.resize(image, (0, 0), None, 0.5, 0.5)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clt = KMeans(n_clusters=5, random_state=0).fit(image.reshape(-1, 3))
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
hist, _ = np.histogram(clt.labels_, bins=numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
palette = np.zeros((40, 200, 3), dtype="uint8")
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for percent, color in zip(hist, clt.cluster_centers_):
# plot the relative percentage of each cluster
endX = startX + (percent * 200)
cv2.rectangle(palette, (int(startX), 0), (int(endX), 40), color.astype("uint8").tolist(), -1)
startX = endX
return palette
def thinning(self, image, flag):
image = img_as_float(image)
if flag: #live streaming, faster computation
skeleton = skeletonize(image > 0)
else: # upload image mode
skeleton = skeletonize(image > 0, method='lee')
return img_as_ubyte(skeleton)
def thresholding(self, image, auto, lower, max):
if auto:
_, image = cv2.threshold(image.copy(), 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
else:
_, image = cv2.threshold(image.copy(), lower, max, cv2.THRESH_BINARY)
return image
def color_CVT(self, image, flag):
if flag==1:
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
elif flag==2:
return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
def compareIMG(self, image):
h,w = image[0].shape[:2]
bg = np.zeros((h*2+3, w*2+3, 3), np.uint8)
bg[0:h, 0:w] = image[0]
bg[0:h, w+3:w*2+3] = image[1]
bg[h+3:h*2+3, 0:w] = image[2]
bg[h+3:h*2+3, w+3:w*2+3] = image[3]
bg[0:h*2+3, w:w+3] = (255,255,255)
bg[0:h * 2 + 3, w+1:w + 2] = (0,0,0)
bg[h:h+3, 0:w*2+3] = (255,255,255)
bg[h+1:h + 2, 0:w * 2 + 3] = (0,0,0)
return bg
def Color_picker(self, color, size, wid=(10,20)):
image = np.zeros((size[0], size[1], 3), np.uint8)
image[:] = color
if wid[0]>0:
cv2.rectangle(image, (int(size[0]*.01), int(size[1]*.01)), (int(size[0]*.99), int(size[1]*.99)), (0,0,0), wid[0], cv2.LINE_AA)
if wid[1]>0:
cv2.rectangle(image, (int(size[0]*.1), int(size[1]*.1)), (int(size[0]*.9), int(size[1]*.9)), (255,255,255), wid[1], cv2.LINE_AA)
return image
def drawPrimitives(self, image, flag, points, color, thick, width=None, height=None):
if flag==1:
cv2.polylines(image, points, True, color, thick)
elif flag==2:
cv2.rectangle(image, (points[0]-10, points[1]-10), (points[0]+points[2]+10, points[1]+points[3]+10), color, thick)
elif flag==3:
x, y, w, h = points
width_Total = x+int(w*0.05)+width
if width_Total>x+w+10:
width_Total = x+w+10
cv2.rectangle(image, (x+int(w*0.05),y-10-height), (width_Total, y-10-2), color, thick)
elif flag == 4:
x, y, w, h = points
if width!=0:
w = width
cv2.rectangle(image, (x-10,y+10+h), (x+10+w, y+10+h+height), color, thick)
def drawText(self, flag, image, text, coords, fontstyle, color, thick, height=None):
font = None
if fontstyle == 0:
font = cv2.FONT_HERSHEY_COMPLEX
elif fontstyle == 1:
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
elif fontstyle == 2:
font = cv2.FONT_HERSHEY_DUPLEX
elif fontstyle == 3:
font = cv2.FONT_HERSHEY_PLAIN
elif fontstyle == 4:
font = cv2.FONT_HERSHEY_SCRIPT_COMPLEX
elif fontstyle == 5:
font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
elif fontstyle == 6:
font = cv2.FONT_HERSHEY_TRIPLEX
elif fontstyle == 7:
font = cv2.FONT_ITALIC
x, y, w, h = coords
if flag==1:
cv2.putText(image, text, (x+int(w*0.07),y-19), font, thick, color, 1)
elif flag==2:
cv2.putText(image, text, (x-10,y+10+h+height-5), font, thick, color, 1)
def canny(self, image, GK_size, GSigma, DK_size, D_i, EK_size, E_i, cAuto, cThres_L, cThres_H, isDIL, isERO, isThin=None):
imgGray = self.color_CVT(image.copy(), 1)
image = cv2.GaussianBlur(imgGray, (GK_size, GK_size), GSigma)
if cAuto:
sigma = 0.33
v = np.median(image.copy())
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
else:
lower, upper = cThres_L, cThres_H
image = cv2.Canny(image, lower, upper)
if isThin:
image = self.thinning(image)
edge = image.copy()
if isDIL:
Dial_K = np.ones((DK_size, DK_size))
image = cv2.dilate(image, Dial_K, iterations=D_i)
if isERO:
Ero_K = np.ones((EK_size, EK_size))
image = cv2.erode(image, Ero_K, iterations=E_i)
return image, edge
def sobel(self, image, GK_size, GSigma, DK_size, D_i, EK_size, E_i, Ksize, isDIL, isERO, isThin, Thres_auto, Thres_L, Thres_H, isThres, live_flag):
imgGray = self.color_CVT(image.copy(), 1)
imgBlur = cv2.GaussianBlur(imgGray, (GK_size, GK_size), GSigma)
Sobel_X = cv2.Sobel(imgBlur.copy(), cv2.CV_64F, 1, 0, ksize=Ksize)
Sobel_Y = cv2.Sobel(imgBlur.copy(), cv2.CV_64F, 0, 1, ksize=Ksize)
sobel_img = cv2.bitwise_or(cv2.convertScaleAbs(Sobel_X), cv2.convertScaleAbs(Sobel_Y))
if isThres:
sobel_img = self.thresholding(sobel_img.copy(), Thres_auto, Thres_L, Thres_H)
if isThin:
sobel_img = self.thinning(sobel_img, live_flag)
image = sobel_img
edge = image.copy()
if isDIL:
Dial_K = np.ones((DK_size, DK_size))
image = cv2.dilate(image, Dial_K, iterations=D_i)
if isERO:
Ero_K = np.ones((EK_size, EK_size))
image = cv2.erode(image, Ero_K, iterations=E_i)
return image, edge
def prewitt(self, image, GK_size, GSigma, DK_size, D_i, EK_size, E_i, isDIL, isERO, isThin, Thres_auto, Thres_L, Thres_H, isThres, live_flag):
imgGray = self.color_CVT(image.copy(), 1)
imgBlur = cv2.GaussianBlur(imgGray, (GK_size, GK_size), GSigma)
kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
kernelx2 = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
kernely2 = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
kernels = [kernelx, kernelx2, kernely, kernely2]
prewitt_img = np.zeros_like(imgGray)
for k in kernels:
prewitt_img = cv2.bitwise_or(prewitt_img, cv2.filter2D(imgBlur.copy(), -1, k))
if isThres:
prewitt_img = self.thresholding(prewitt_img.copy(), Thres_auto, Thres_L, Thres_H)
if isThin:
prewitt_img = self.thinning(prewitt_img, live_flag)
image = prewitt_img
edge = image.copy()
if isDIL:
Dial_K = np.ones((DK_size, DK_size))
image = cv2.dilate(image, Dial_K, iterations=D_i)
if isERO:
Ero_K = np.ones((EK_size, EK_size))
image = cv2.erode(image, Ero_K, iterations=E_i)
return image, edge
def getTarget_Contour(self, image, image_edg, minArea, shapes, circular, color, thick):
contours, _ = cv2.findContours(image_edg.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
finalCountours = []
for c in contours:
for i, shape in enumerate(shapes):
if not shape:
continue
area = cv2.contourArea(c)
if area > minArea[i]:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
bbox = cv2.boundingRect(approx)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
rbox = np.int0(box)
if i==0 and len(approx) == 3: #Shape >>> vertices
finalCountours.append((approx, bbox, c, i, rbox))
elif i==1 and len(approx) == 4:
finalCountours.append((approx, bbox, c, i, rbox))
elif i==2:
if len(approx) < 8:
continue
circularity = 4 * math.pi * (area / (peri*peri))
if circular[0] < circularity < circular[1]:
finalCountours.append((approx, bbox, c, i, rbox))
elif i==3:
finalCountours.append((approx, bbox, c, i, rbox))
finalCountours = sorted(finalCountours, key=lambda x:x[1], reverse=True)
if thick==0:
thick = -1
for cont in finalCountours:
cv2.drawContours(image, [cont[2]], -1, color, thick)
return finalCountours, image
def reorder(self, points):
NewPoints = np.zeros_like(points)
points = points.reshape((4,2))
add = points.sum(1)
NewPoints[0] = points[np.argmin(add)]
NewPoints[2] = points[np.argmax(add)]
d_dx = np.diff(points, axis=1)
NewPoints[1] = points[np.argmin(d_dx)]
NewPoints[3] = points[np.argmax(d_dx)]
return NewPoints
def warpImg(self, image, points, size, pad=3):
points = self.reorder(points)
# if not size:
w, h = points[1][0][0] - points[0][0][0], points[3][0][1]-points[0][0][1]
sw,sh = w/size[0], h/size[1]
# w,h = size
pts1 = np.float32(points)
pts2 = np.float32([[0,0], [w,0], [w,h], [0,h]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarp = cv2.warpPerspective(image, matrix, (w,h))
imgWarp = imgWarp[pad:imgWarp.shape[0]-pad, pad:imgWarp.shape[1]-pad] #remove boundary
return imgWarp, (sw,sh)
def findDist(self, flag, pts, scale, unit, deci):
unit_conv = 1
if unit[0]==0:
unit_conv = 1
elif unit[0]==1:
unit_conv = 10
elif unit[0]==2:
unit_conv = 1000
if unit[1]==0:
unit_conv /= 1
elif unit[1]==1:
unit_conv /= 10
elif unit[1]==2:
unit_conv /= 1000
def dist(pt1, pt2):
return ((pt2[0] // scale[0] - pt1[0] // scale[0]) ** 2 + (pt2[1] // scale[1] - pt1[1] // scale[1]) ** 2) ** 0.5
# if flag==1: # rect
pts = self.reorder(pts)
if flag==1: #rect
p1, p2, p3 = pts[0][0], pts[1][0], pts[3][0]
else:
p1, p2, p3 = pts[0], pts[1], pts[3]
if p1[1]==p2[1]:
newW = (p2[0]-p1[0])//scale[0]
else:
newW = dist(p1, p2)
if p1[0]==p3[0]:
newH = (p3[1]-p1[1])//scale[1]
else:
newH = dist(p1, p3)
newW = newW*unit_conv
newH = newH*unit_conv
return "{:.{}f}".format(newW, deci), "{:.{}f}".format(newH, deci)
def deviceList(self):
index = 0
arr, res = [], []
while True:
cap = cv2.VideoCapture(index)
if not cap.read()[0]:
break
else:
arr.append(str(index))
res.append((cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
cap.release()
index += 1
return arr, res
|
[
"cv2.GaussianBlur",
"numpy.argmax",
"cv2.getPerspectiveTransform",
"cv2.arcLength",
"cv2.approxPolyDP",
"numpy.ones",
"numpy.argmin",
"numpy.histogram",
"cv2.boxPoints",
"cv2.rectangle",
"imutils.resize",
"cv2.erode",
"cv2.minAreaRect",
"numpy.unique",
"cv2.warpPerspective",
"numpy.zeros_like",
"cv2.contourArea",
"warnings.simplefilter",
"cv2.dilate",
"cv2.cvtColor",
"sklearn.cluster.KMeans",
"warnings.catch_warnings",
"cv2.convertScaleAbs",
"cv2.drawContours",
"cv2.boundingRect",
"cv2.resize",
"cv2.Canny",
"numpy.int0",
"cv2.putText",
"cv2.polylines",
"numpy.float32",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.imread",
"numpy.diff",
"numpy.array"
] |
[((245, 265), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (255, 265), False, 'import cv2\n'), ((519, 554), 'numpy.zeros', 'np.zeros', (['image.shape[:2]', 'np.uint8'], {}), '(image.shape[:2], np.uint8)\n', (527, 554), True, 'import numpy as np\n'), ((571, 625), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[pts]', '(-1)', '(255, 255, 255)', '(-1)'], {}), '(mask, [pts], -1, (255, 255, 255), -1)\n', (587, 625), False, 'import cv2\n'), ((895, 936), 'cv2.resize', 'cv2.resize', (['image', '(0, 0)', 'None', '(0.5)', '(0.5)'], {}), '(image, (0, 0), None, 0.5, 0.5)\n', (905, 936), False, 'import cv2\n'), ((1191, 1232), 'numpy.histogram', 'np.histogram', (['clt.labels_'], {'bins': 'numLabels'}), '(clt.labels_, bins=numLabels)\n', (1203, 1232), True, 'import numpy as np\n'), ((1378, 1415), 'numpy.zeros', 'np.zeros', (['(40, 200, 3)'], {'dtype': '"""uint8"""'}), "((40, 200, 3), dtype='uint8')\n", (1386, 1415), True, 'import numpy as np\n'), ((2752, 2797), 'numpy.zeros', 'np.zeros', (['(h * 2 + 3, w * 2 + 3, 3)', 'np.uint8'], {}), '((h * 2 + 3, w * 2 + 3, 3), np.uint8)\n', (2760, 2797), True, 'import numpy as np\n'), ((3219, 3260), 'numpy.zeros', 'np.zeros', (['(size[0], size[1], 3)', 'np.uint8'], {}), '((size[0], size[1], 3), np.uint8)\n', (3227, 3260), True, 'import numpy as np\n'), ((5596, 5649), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['imgGray', '(GK_size, GK_size)', 'GSigma'], {}), '(imgGray, (GK_size, GK_size), GSigma)\n', (5612, 5649), False, 'import cv2\n'), ((5999, 6029), 'cv2.Canny', 'cv2.Canny', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (6008, 6029), False, 'import cv2\n'), ((6635, 6688), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['imgGray', '(GK_size, GK_size)', 'GSigma'], {}), '(imgGray, (GK_size, GK_size), GSigma)\n', (6651, 6688), False, 'import cv2\n'), ((7695, 7748), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['imgGray', '(GK_size, GK_size)', 'GSigma'], {}), '(imgGray, (GK_size, GK_size), GSigma)\n', (7711, 7748), False, 'import cv2\n'), ((7768, 7814), 'numpy.array', 'np.array', (['[[1, 1, 1], [0, 0, 0], [-1, -1, -1]]'], {}), '([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])\n', (7776, 7814), True, 'import numpy as np\n'), ((7835, 7881), 'numpy.array', 'np.array', (['[[-1, -1, -1], [0, 0, 0], [1, 1, 1]]'], {}), '([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])\n', (7843, 7881), True, 'import numpy as np\n'), ((7901, 7947), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\n', (7909, 7947), True, 'import numpy as np\n'), ((7968, 8014), 'numpy.array', 'np.array', (['[[1, 0, -1], [1, 0, -1], [1, 0, -1]]'], {}), '([[1, 0, -1], [1, 0, -1], [1, 0, -1]])\n', (7976, 8014), True, 'import numpy as np\n'), ((8096, 8118), 'numpy.zeros_like', 'np.zeros_like', (['imgGray'], {}), '(imgGray)\n', (8109, 8118), True, 'import numpy as np\n'), ((10586, 10607), 'numpy.zeros_like', 'np.zeros_like', (['points'], {}), '(points)\n', (10599, 10607), True, 'import numpy as np\n'), ((10787, 10810), 'numpy.diff', 'np.diff', (['points'], {'axis': '(1)'}), '(points, axis=1)\n', (10794, 10810), True, 'import numpy as np\n'), ((11209, 11227), 'numpy.float32', 'np.float32', (['points'], {}), '(points)\n', (11219, 11227), True, 'import numpy as np\n'), ((11244, 11288), 'numpy.float32', 'np.float32', (['[[0, 0], [w, 0], [w, h], [0, h]]'], {}), '([[0, 0], [w, 0], [w, h], [0, h]])\n', (11254, 11288), True, 'import numpy as np\n'), ((11303, 11342), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (11330, 11342), False, 'import cv2\n'), ((11362, 11404), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'matrix', '(w, h)'], {}), '(image, matrix, (w, h))\n', (11381, 11404), False, 'import cv2\n'), ((360, 394), 'imutils.resize', 'imutils.resize', (['image'], {'width': 'width'}), '(image, width=width)\n', (374, 394), False, 'import imutils\n'), ((430, 464), 'cv2.resize', 'cv2.resize', (['image', '(width, height)'], {}), '(image, (width, height))\n', (440, 464), False, 'import cv2\n'), ((951, 976), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (974, 976), False, 'import warnings\n'), ((991, 1022), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1012, 1022), False, 'import warnings\n'), ((2545, 2584), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2557, 2584), False, 'import cv2\n'), ((3762, 3810), 'cv2.polylines', 'cv2.polylines', (['image', 'points', '(True)', 'color', 'thick'], {}), '(image, points, True, color, thick)\n', (3775, 3810), False, 'import cv2\n'), ((6162, 6189), 'numpy.ones', 'np.ones', (['(DK_size, DK_size)'], {}), '((DK_size, DK_size))\n', (6169, 6189), True, 'import numpy as np\n'), ((6211, 6252), 'cv2.dilate', 'cv2.dilate', (['image', 'Dial_K'], {'iterations': 'D_i'}), '(image, Dial_K, iterations=D_i)\n', (6221, 6252), False, 'import cv2\n'), ((6293, 6320), 'numpy.ones', 'np.ones', (['(EK_size, EK_size)'], {}), '((EK_size, EK_size))\n', (6300, 6320), True, 'import numpy as np\n'), ((6342, 6381), 'cv2.erode', 'cv2.erode', (['image', 'Ero_K'], {'iterations': 'E_i'}), '(image, Ero_K, iterations=E_i)\n', (6351, 6381), False, 'import cv2\n'), ((6877, 6905), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['Sobel_X'], {}), '(Sobel_X)\n', (6896, 6905), False, 'import cv2\n'), ((6907, 6935), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['Sobel_Y'], {}), '(Sobel_Y)\n', (6926, 6935), False, 'import cv2\n'), ((7227, 7254), 'numpy.ones', 'np.ones', (['(DK_size, DK_size)'], {}), '((DK_size, DK_size))\n', (7234, 7254), True, 'import numpy as np\n'), ((7276, 7317), 'cv2.dilate', 'cv2.dilate', (['image', 'Dial_K'], {'iterations': 'D_i'}), '(image, Dial_K, iterations=D_i)\n', (7286, 7317), False, 'import cv2\n'), ((7358, 7385), 'numpy.ones', 'np.ones', (['(EK_size, EK_size)'], {}), '((EK_size, EK_size))\n', (7365, 7385), True, 'import numpy as np\n'), ((7407, 7446), 'cv2.erode', 'cv2.erode', (['image', 'Ero_K'], {'iterations': 'E_i'}), '(image, Ero_K, iterations=E_i)\n', (7416, 7446), False, 'import cv2\n'), ((8538, 8565), 'numpy.ones', 'np.ones', (['(DK_size, DK_size)'], {}), '((DK_size, DK_size))\n', (8545, 8565), True, 'import numpy as np\n'), ((8587, 8628), 'cv2.dilate', 'cv2.dilate', (['image', 'Dial_K'], {'iterations': 'D_i'}), '(image, Dial_K, iterations=D_i)\n', (8597, 8628), False, 'import cv2\n'), ((8669, 8696), 'numpy.ones', 'np.ones', (['(EK_size, EK_size)'], {}), '((EK_size, EK_size))\n', (8676, 8696), True, 'import numpy as np\n'), ((8718, 8757), 'cv2.erode', 'cv2.erode', (['image', 'Ero_K'], {'iterations': 'E_i'}), '(image, Ero_K, iterations=E_i)\n', (8727, 8757), False, 'import cv2\n'), ((10440, 10492), 'cv2.drawContours', 'cv2.drawContours', (['image', '[cont[2]]', '(-1)', 'color', 'thick'], {}), '(image, [cont[2]], -1, color, thick)\n', (10456, 10492), False, 'import cv2\n'), ((10708, 10722), 'numpy.argmin', 'np.argmin', (['add'], {}), '(add)\n', (10717, 10722), True, 'import numpy as np\n'), ((10755, 10769), 'numpy.argmax', 'np.argmax', (['add'], {}), '(add)\n', (10764, 10769), True, 'import numpy as np\n'), ((10842, 10857), 'numpy.argmin', 'np.argmin', (['d_dx'], {}), '(d_dx)\n', (10851, 10857), True, 'import numpy as np\n'), ((10890, 10905), 'numpy.argmax', 'np.argmax', (['d_dx'], {}), '(d_dx)\n', (10899, 10905), True, 'import numpy as np\n'), ((12808, 12831), 'cv2.VideoCapture', 'cv2.VideoCapture', (['index'], {}), '(index)\n', (12824, 12831), False, 'import cv2\n'), ((2628, 2667), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2BGR'], {}), '(image, cv2.COLOR_GRAY2BGR)\n', (2640, 2667), False, 'import cv2\n'), ((3847, 3978), 'cv2.rectangle', 'cv2.rectangle', (['image', '(points[0] - 10, points[1] - 10)', '(points[0] + points[2] + 10, points[1] + points[3] + 10)', 'color', 'thick'], {}), '(image, (points[0] - 10, points[1] - 10), (points[0] + points[\n 2] + 10, points[1] + points[3] + 10), color, thick)\n', (3860, 3978), False, 'import cv2\n'), ((5326, 5412), 'cv2.putText', 'cv2.putText', (['image', 'text', '(x - 10, y + 10 + h + height - 5)', 'font', 'thick', 'color', '(1)'], {}), '(image, text, (x - 10, y + 10 + h + height - 5), font, thick,\n color, 1)\n', (5337, 5412), False, 'import cv2\n'), ((9173, 9191), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (9188, 9191), False, 'import cv2\n'), ((1042, 1078), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(5)', 'random_state': '(0)'}), '(n_clusters=5, random_state=0)\n', (1048, 1078), False, 'from sklearn.cluster import KMeans\n'), ((1143, 1165), 'numpy.unique', 'np.unique', (['clt.labels_'], {}), '(clt.labels_)\n', (1152, 1165), True, 'import numpy as np\n'), ((9259, 9281), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (9272, 9281), False, 'import cv2\n'), ((9312, 9350), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['c', '(0.02 * peri)', '(True)'], {}), '(c, 0.02 * peri, True)\n', (9328, 9350), False, 'import cv2\n'), ((9379, 9403), 'cv2.boundingRect', 'cv2.boundingRect', (['approx'], {}), '(approx)\n', (9395, 9403), False, 'import cv2\n'), ((9432, 9450), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (9447, 9450), False, 'import cv2\n'), ((9478, 9497), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (9491, 9497), False, 'import cv2\n'), ((9526, 9538), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (9533, 9538), True, 'import numpy as np\n'), ((4363, 4459), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x - 10, y + 10 + h)', '(x + 10 + w, y + 10 + h + height)', 'color', 'thick'], {}), '(image, (x - 10, y + 10 + h), (x + 10 + w, y + 10 + h + height\n ), color, thick)\n', (4376, 4459), False, 'import cv2\n')]
|
import glob
import cv2
import numpy as np
def globimgs(path, globs:list):
"""returns a list of files with path with globing with more than one extensions"""
imgs = []
for i in globs:
imgs.extend(glob.glob(path + i))
paths = []
for path in imgs:
paths.append(path.replace("\\", "/"))
return paths
def scaneffects(img):
dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 15)
diff_img = 255 - cv2.absdiff(img, bg_img)
norm_img = diff_img.copy()
cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
_, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC)
cv2.normalize(thr_img, thr_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
return thr_img
|
[
"cv2.medianBlur",
"cv2.threshold",
"numpy.ones",
"glob.glob",
"cv2.normalize",
"cv2.absdiff"
] |
[((408, 439), 'cv2.medianBlur', 'cv2.medianBlur', (['dilated_img', '(15)'], {}), '(dilated_img, 15)\n', (422, 439), False, 'import cv2\n'), ((522, 625), 'cv2.normalize', 'cv2.normalize', (['diff_img', 'norm_img'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_8UC1'}), '(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.\n NORM_MINMAX, dtype=cv2.CV_8UC1)\n', (535, 625), False, 'import cv2\n'), ((638, 687), 'cv2.threshold', 'cv2.threshold', (['norm_img', '(230)', '(0)', 'cv2.THRESH_TRUNC'], {}), '(norm_img, 230, 0, cv2.THRESH_TRUNC)\n', (651, 687), False, 'import cv2\n'), ((692, 793), 'cv2.normalize', 'cv2.normalize', (['thr_img', 'thr_img'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_8UC1'}), '(thr_img, thr_img, alpha=0, beta=255, norm_type=cv2.\n NORM_MINMAX, dtype=cv2.CV_8UC1)\n', (705, 793), False, 'import cv2\n'), ((368, 393), 'numpy.ones', 'np.ones', (['(7, 7)', 'np.uint8'], {}), '((7, 7), np.uint8)\n', (375, 393), True, 'import numpy as np\n'), ((461, 485), 'cv2.absdiff', 'cv2.absdiff', (['img', 'bg_img'], {}), '(img, bg_img)\n', (472, 485), False, 'import cv2\n'), ((203, 222), 'glob.glob', 'glob.glob', (['(path + i)'], {}), '(path + i)\n', (212, 222), False, 'import glob\n')]
|
#!/usr/bin/env python
import getopt, sys, os
import numpy as np
import pyfits
from pylab import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid.inset_locator import mark_inset
#fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits'
fname_ext = sys.argv[1]
fname = fname_ext.split('.')[0]
out_fname = fname + '.png'
print('displaying ' + fname)
title_str = fname.split(os.sep)[-1]
t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T)
fig = plt.figure(1, [5,4])
ax = fig.add_subplot(111)
#imshow(t , interpolation="nearest")
#imshow((t - t.min())) ** .25, interpolation="nearest")
tt = t ** .25
tt[np.isnan(tt)] = 0
extent = [0., 192., 0., 192.]
ax.imshow(tt, extent=extent, interpolation="nearest")
tzoom = tt[135:155, 80:100,]
axins = zoomed_inset_axes(ax, 2, loc=3) # zoom = 6
extent = [80., 100., 192. - 155., 192. - 135, ]
im = axins.imshow(tzoom, extent=extent, interpolation="nearest")
im.set_clim([tt.min(), tt.max()])
plt.xticks(visible=False)
plt.yticks(visible=False)
#x1, x2, y1, y2 = 80., 100., 135., 155.,
#axins.set_xlim(x1, x2)
#axins.set_ylim(y1, y2)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
#plt.title(title_str)
#plt.colorbar()
#plt.xlabel('Right Ascension')
#plt.ylabel('Declination')
plt.show()
fig.savefig(out_fname)
|
[
"pyfits.fitsopen",
"matplotlib.pyplot.show",
"mpl_toolkits.axes_grid.inset_locator.mark_inset",
"matplotlib.pyplot.yticks",
"numpy.isnan",
"matplotlib.pyplot.figure",
"mpl_toolkits.axes_grid.inset_locator.zoomed_inset_axes",
"matplotlib.pyplot.xticks"
] |
[((544, 565), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '[5, 4]'], {}), '(1, [5, 4])\n', (554, 565), True, 'import matplotlib.pyplot as plt\n'), ((842, 873), 'mpl_toolkits.axes_grid.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax', '(2)'], {'loc': '(3)'}), '(ax, 2, loc=3)\n', (859, 873), False, 'from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes\n'), ((1032, 1057), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'visible': '(False)'}), '(visible=False)\n', (1042, 1057), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1083), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'visible': '(False)'}), '(visible=False)\n', (1068, 1083), True, 'import matplotlib.pyplot as plt\n'), ((1173, 1231), 'mpl_toolkits.axes_grid.inset_locator.mark_inset', 'mark_inset', (['ax', 'axins'], {'loc1': '(2)', 'loc2': '(4)', 'fc': '"""none"""', 'ec': '"""0.5"""'}), "(ax, axins, loc1=2, loc2=4, fc='none', ec='0.5')\n", (1183, 1231), False, 'from mpl_toolkits.axes_grid.inset_locator import mark_inset\n'), ((1329, 1339), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1337, 1339), True, 'import matplotlib.pyplot as plt\n'), ((702, 714), 'numpy.isnan', 'np.isnan', (['tt'], {}), '(tt)\n', (710, 714), True, 'import numpy as np\n'), ((500, 526), 'pyfits.fitsopen', 'pyfits.fitsopen', (['fname_ext'], {}), '(fname_ext)\n', (515, 526), False, 'import pyfits\n')]
|
# rough copy of https://github.com/geohot/tinygrad/blob/master/examples/mnist_gan.py
from simplegrad import Tensor, Device, Adam
import numpy as np
import itertools as it
from torchvision.utils import make_grid, save_image
import torch
from abc import abstractmethod
import os
def leakyrelu(x, neg_slope=0.2):
return x.relu().sub(x.fork().mul(Tensor(neg_slope).mul(Tensor(-1.0))).relu())
torch.functional.F.leaky_relu(torch.tensor(x.val), negative_slope=0.2)
def random_uniform(*shape):
return np.random.uniform(-1., 1., size=shape)/np.sqrt(np.prod(shape)).astype(np.float32)
class nn:
@abstractmethod
def forward(self, x):
raise NotImplementedError
@property
def params(self):
return tuple(v for k,v in self.__dict__.items() if isinstance(v, Tensor))
class LinearGen(nn):
def __init__(self):
self.l1 = Tensor(random_uniform(128,256))
self.l2 = Tensor(random_uniform(256, 512))
self.l3 = Tensor(random_uniform(512, 1024))
self.l4 = Tensor(random_uniform(1024, 784))
def forward(self, x):
for layer in [self.l1, self.l2, self.l3]:
leakyrelu(x.dot(layer))
return x.dot(self.l4).tanh()
class LinearDisc(nn):
def __init__(self):
self.l1 = Tensor(random_uniform(784, 1024))
self.l2 = Tensor(random_uniform(1024, 512))
self.l3 = Tensor(random_uniform(512, 256))
self.l4 = Tensor(random_uniform(256, 2))
def forward(self, x):
for layer in [self.l1, self.l2, self.l3]:
leakyrelu(x.dot(layer))
return x.dot(self.l4).logsoftmax()
import gzip
def fetch(url):
import requests, tempfile, os
fp = os.path.join(tempfile.gettempdir(), url.encode()[-10:].hex())
if os.path.isfile(fp) and os.stat(fp).st_size:
with open(fp, 'rb') as f:
return f.read()
dat = requests.get(url).content
with open(fp + '.tmp', 'wb') as f:
f.write(dat)
os.rename(fp+'.tmp', fp)
return dat
def test_minst_gan():
generator = LinearGen()
discriminator = LinearDisc()
parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()
x_train = parse(fetch(url = "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"))[0x10:].reshape((-1, 28*28)).astype(np.float32)
# Hyperparameters
epochs = 10
batch_size = 512
n_batches = int(len(x_train) / batch_size)
output_folder = "outputs"
ds_noise = np.random.randn(64,128).astype(np.float32)
optim_g = Adam(generator.params, learning_rate=0.0002, beta1=0.5)
optim_d = Adam(discriminator.params, learning_rate=0.0002, beta1=0.5)
def batches_generator():
batch_nr = 0
while batch_nr < n_batches:
idx = np.random.randint(0, x_train.shape[0], size=(batch_size))
image_b = x_train[idx].reshape(-1, 28*28).astype(np.float32)/255.
image_b = (image_b - 0.5)/0.5
yield image_b
batch_nr += 1
def real_label(bs):
y = np.zeros((bs,2), np.float32)
y[range(bs), [1]*bs] = -2.0
real_labels = Tensor(y)
return real_labels
def fake_label(bs):
y = np.zeros((bs,2), np.float32)
y[range(bs), [0]*bs] = -2.0
fake_labels = Tensor(y)
return fake_labels
def train_discriminator(optim, data_real, data_fake):
real_labels = real_label(batch_size)
fake_labels = fake_label(batch_size)
optim.zero_grad()
output_real = discriminator.forward(data_real)
loss_real = real_labels.mul(output_real).mean(axis=(0,1))
output_fake = discriminator.forward(data_fake)
loss_fake = fake_labels.mul(output_fake).mean(axis=(0,1))
loss_real.backward()
loss_fake.backward()
optim.step()
return loss_fake.val + loss_real.val
def train_generator(optim, data_fake):
real_labels = real_label(batch_size)
optim.zero_grad()
output = discriminator.forward(data_fake)
loss = real_labels.mul(output).mean(axis=(0,1))
loss.backward()
optim.step()
return loss.val
for epoch in range(epochs):
batches = tuple(batches_generator())
for data_real in batches:
data_real = Tensor(data_real)
noise = Tensor(np.random.randn(batch_size, 128))
data_fake = generator.forward(noise)
data_fake = Tensor(data_fake.val)
loss_d = train_discriminator(optim_d, data_real, data_fake).item()
noise = Tensor(np.random.randn(batch_size, 128))
data_fake = generator.forward(noise)
loss_g = train_generator(optim_g, data_fake).item()
# generate images after each epoch
fake_images = generator.forward(Tensor(ds_noise)).val
fake_images = (fake_images.reshape(-1, 1, 28, 28)+ 1) / 2
fake_images = make_grid(torch.tensor(fake_images))
save_image(fake_images, os.path.join(output_folder, f'image_{epoch}.jpg'))
|
[
"numpy.random.uniform",
"os.stat",
"numpy.random.randn",
"os.rename",
"tempfile.gettempdir",
"numpy.zeros",
"gzip.decompress",
"numpy.prod",
"os.path.isfile",
"numpy.random.randint",
"requests.get",
"simplegrad.Tensor",
"os.path.join",
"simplegrad.Adam",
"torch.tensor"
] |
[((1961, 1987), 'os.rename', 'os.rename', (["(fp + '.tmp')", 'fp'], {}), "(fp + '.tmp', fp)\n", (1970, 1987), False, 'import requests, tempfile, os\n'), ((2523, 2578), 'simplegrad.Adam', 'Adam', (['generator.params'], {'learning_rate': '(0.0002)', 'beta1': '(0.5)'}), '(generator.params, learning_rate=0.0002, beta1=0.5)\n', (2527, 2578), False, 'from simplegrad import Tensor, Device, Adam\n'), ((2593, 2652), 'simplegrad.Adam', 'Adam', (['discriminator.params'], {'learning_rate': '(0.0002)', 'beta1': '(0.5)'}), '(discriminator.params, learning_rate=0.0002, beta1=0.5)\n', (2597, 2652), False, 'from simplegrad import Tensor, Device, Adam\n'), ((428, 447), 'torch.tensor', 'torch.tensor', (['x.val'], {}), '(x.val)\n', (440, 447), False, 'import torch\n'), ((509, 549), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {'size': 'shape'}), '(-1.0, 1.0, size=shape)\n', (526, 549), True, 'import numpy as np\n'), ((1698, 1719), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1717, 1719), False, 'import requests, tempfile, os\n'), ((1754, 1772), 'os.path.isfile', 'os.path.isfile', (['fp'], {}), '(fp)\n', (1768, 1772), False, 'import requests, tempfile, os\n'), ((1871, 1888), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1883, 1888), False, 'import requests, tempfile, os\n'), ((3025, 3054), 'numpy.zeros', 'np.zeros', (['(bs, 2)', 'np.float32'], {}), '((bs, 2), np.float32)\n', (3033, 3054), True, 'import numpy as np\n'), ((3112, 3121), 'simplegrad.Tensor', 'Tensor', (['y'], {}), '(y)\n', (3118, 3121), False, 'from simplegrad import Tensor, Device, Adam\n'), ((3186, 3215), 'numpy.zeros', 'np.zeros', (['(bs, 2)', 'np.float32'], {}), '((bs, 2), np.float32)\n', (3194, 3215), True, 'import numpy as np\n'), ((3273, 3282), 'simplegrad.Tensor', 'Tensor', (['y'], {}), '(y)\n', (3279, 3282), False, 'from simplegrad import Tensor, Device, Adam\n'), ((1777, 1788), 'os.stat', 'os.stat', (['fp'], {}), '(fp)\n', (1784, 1788), False, 'import requests, tempfile, os\n'), ((2465, 2489), 'numpy.random.randn', 'np.random.randn', (['(64)', '(128)'], {}), '(64, 128)\n', (2480, 2489), True, 'import numpy as np\n'), ((2758, 2813), 'numpy.random.randint', 'np.random.randint', (['(0)', 'x_train.shape[0]'], {'size': 'batch_size'}), '(0, x_train.shape[0], size=batch_size)\n', (2775, 2813), True, 'import numpy as np\n'), ((4281, 4298), 'simplegrad.Tensor', 'Tensor', (['data_real'], {}), '(data_real)\n', (4287, 4298), False, 'from simplegrad import Tensor, Device, Adam\n'), ((4433, 4454), 'simplegrad.Tensor', 'Tensor', (['data_fake.val'], {}), '(data_fake.val)\n', (4439, 4454), False, 'from simplegrad import Tensor, Device, Adam\n'), ((4913, 4938), 'torch.tensor', 'torch.tensor', (['fake_images'], {}), '(fake_images)\n', (4925, 4938), False, 'import torch\n'), ((4972, 5021), 'os.path.join', 'os.path.join', (['output_folder', 'f"""image_{epoch}.jpg"""'], {}), "(output_folder, f'image_{epoch}.jpg')\n", (4984, 5021), False, 'import requests, tempfile, os\n'), ((4326, 4358), 'numpy.random.randn', 'np.random.randn', (['batch_size', '(128)'], {}), '(batch_size, 128)\n', (4341, 4358), True, 'import numpy as np\n'), ((4562, 4594), 'numpy.random.randn', 'np.random.randn', (['batch_size', '(128)'], {}), '(batch_size, 128)\n', (4577, 4594), True, 'import numpy as np\n'), ((4793, 4809), 'simplegrad.Tensor', 'Tensor', (['ds_noise'], {}), '(ds_noise)\n', (4799, 4809), False, 'from simplegrad import Tensor, Device, Adam\n'), ((556, 570), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (563, 570), True, 'import numpy as np\n'), ((2124, 2144), 'gzip.decompress', 'gzip.decompress', (['dat'], {}), '(dat)\n', (2139, 2144), False, 'import gzip\n'), ((371, 383), 'simplegrad.Tensor', 'Tensor', (['(-1.0)'], {}), '(-1.0)\n', (377, 383), False, 'from simplegrad import Tensor, Device, Adam\n'), ((349, 366), 'simplegrad.Tensor', 'Tensor', (['neg_slope'], {}), '(neg_slope)\n', (355, 366), False, 'from simplegrad import Tensor, Device, Adam\n')]
|
import numpy as np
import torch
import math
import ray
import copy
import networks
import global_config
def play_one_game(model, env_func, config, temperature, save=False, filename = ''):
game_history = GameHistory()
game = env_func(max_steps = config.max_moves, window_size = config.observation_shape[1])
observation = game.reset()
game_history.action_history.append(0)
game_history.observation_history.append(observation)
game_history.reward_history.append(0)
done = False
with torch.no_grad():
while (not done and len(game_history.action_history) <= config.max_moves):
root = MCTS(config).run(model, observation, game.actions,
False if temperature == 0 else True)
action = select_action(root, temperature
if len(game_history.action_history) < config.temperature_threshold else 0)
observation, reward, done, _ = game.step(action)
game_history.store_search_statistics(root, [i for i in range(config.action_space_size)])
game_history.action_history.append(action)
game_history.observation_history.append(observation)
game_history.reward_history.append(reward)
if save:
game.plot_toolpath(save = True, folder = config.logdir, filename = filename)
game.close()
return game_history
def select_action(node, temperature):
visit_counts = np.array(
[child.visit_count for child in node.children.values()]
)
actions = [action for action in node.children.keys()]
if temperature == 0:
action = actions[np.argmax(visit_counts)]
elif temperature == float("inf"):
action = np.random.choice(actions)
else:
visit_count_distribution = visit_counts ** (1 / temperature)
visit_count_distribution = visit_count_distribution / sum(
visit_count_distribution
)
action = np.random.choice(actions, p=visit_count_distribution)
return action
class MCTS:
def __init__(self, config):
self.config = config
def run(self, model, observation, legal_actions, add_exploration_noise):
root = Node(0)
observation = (torch.tensor(observation).float().unsqueeze(0).to(next(model.parameters()).device))
_, reward, policy_logits, hidden_state = model.initial_inference(observation)
reward = reward.item()
root.expand(legal_actions, reward, policy_logits, hidden_state)
if add_exploration_noise:
root.add_exploration_noise(
dirichlet_alpha=self.config.root_dirichlet_alpha,
exploration_fraction=self.config.root_exploration_fraction,
)
min_max_stats = MinMaxStats()
for _ in range(self.config.num_simulations):
node = root
search_path = [node]
while node.expanded():
action, node = self.select_child(node, min_max_stats)
search_path.append(node)
parent = search_path[-2]
value, reward, policy_logits, hidden_state = model.recurrent_inference(
parent.hidden_state,
torch.tensor([[action]]).to(parent.hidden_state.device),
)
value = networks.support_to_scalar(value).item()
reward = reward.item()
node.expand(
[i for i in range(self.config.action_space_size)],
reward,
policy_logits,
hidden_state,
)
self.backpropagate(
search_path, value, min_max_stats
)
return root
def select_child(self, node, min_max_stats):
max_ucb = max(self.ucb_score(node, child, min_max_stats) for action, child in node.children.items())
action = np.random.choice([action for action, child in node.children.items() if self.ucb_score(node, child, min_max_stats) == max_ucb])
return action, node.children[action]
def ucb_score(self, parent, child, min_max_stats):
pb_c = (
math.log(
(parent.visit_count + self.config.pb_c_base + 1) / self.config.pb_c_base
)
+ self.config.pb_c_init
)
pb_c *= math.sqrt(parent.visit_count) / (child.visit_count + 1)
prior_score = pb_c * child.prior
if child.visit_count > 0:
value_score = min_max_stats.normalize(
child.reward + self.config.discount * child.value()
)
else:
value_score = 0
return prior_score + value_score
def backpropagate(self, search_path, value, min_max_stats):
for node in reversed(search_path):
node.value_sum += value #if node.to_play == to_play else -value
node.visit_count += 1
min_max_stats.update(node.reward + self.config.discount * node.value())
value = node.reward + self.config.discount * value
class Node:
def __init__(self, prior):
self.visit_count = 0
self.prior = prior
self.value_sum = 0
self.children = {}
self.hidden_state = None
self.reward = 0
def expanded(self):
return len(self.children) > 0
def value(self):
if self.visit_count == 0:
return 0
return self.value_sum / self.visit_count
def expand(self, actions, reward, policy_logits, hidden_state):
self.reward = reward
self.hidden_state = hidden_state
policy = {}
for a in actions:
try:
policy[a] = 1/sum(torch.exp(policy_logits[0] - policy_logits[0][a]))
except OverflowError:
print("Warning: prior has been approximated")
policy[a] = 0.0
for action, p in policy.items():
self.children[action] = Node(p)
def add_exploration_noise(self, dirichlet_alpha, exploration_fraction):
actions = list(self.children.keys())
noise = np.random.dirichlet([dirichlet_alpha] * len(actions))
frac = exploration_fraction
for a, n in zip(actions, noise):
self.children[a].prior = self.children[a].prior * (1 - frac) + n * frac
class GameHistory:
def __init__(self):
self.observation_history = []
self.action_history = []
self.reward_history = []
self.child_visits = []
self.root_values = []
def store_search_statistics(self, root, action_space):
if root is not None:
sum_visits = sum(child.visit_count for child in root.children.values())
self.child_visits.append([root.children[a].visit_count / sum_visits
if a in root.children else 0 for a in action_space])
self.root_values.append(root.value())
else:
self.root_values.append(None)
class MinMaxStats:
def __init__(self):
self.maximum = -float("inf")
self.minimum = float("inf")
def update(self, value):
self.maximum = max(self.maximum, value)
self.minimum = min(self.minimum, value)
def normalize(self, value):
if self.maximum > self.minimum:
return (value - self.minimum) / (self.maximum - self.minimum)
return value
if global_config.use_ray:
play_one_game = ray.remote(play_one_game)
|
[
"ray.remote",
"torch.tensor",
"math.sqrt",
"numpy.argmax",
"torch.exp",
"numpy.random.choice",
"math.log",
"torch.no_grad",
"networks.support_to_scalar"
] |
[((7362, 7387), 'ray.remote', 'ray.remote', (['play_one_game'], {}), '(play_one_game)\n', (7372, 7387), False, 'import ray\n'), ((518, 533), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (531, 533), False, 'import torch\n'), ((1616, 1639), 'numpy.argmax', 'np.argmax', (['visit_counts'], {}), '(visit_counts)\n', (1625, 1639), True, 'import numpy as np\n'), ((1696, 1721), 'numpy.random.choice', 'np.random.choice', (['actions'], {}), '(actions)\n', (1712, 1721), True, 'import numpy as np\n'), ((1932, 1985), 'numpy.random.choice', 'np.random.choice', (['actions'], {'p': 'visit_count_distribution'}), '(actions, p=visit_count_distribution)\n', (1948, 1985), True, 'import numpy as np\n'), ((4088, 4175), 'math.log', 'math.log', (['((parent.visit_count + self.config.pb_c_base + 1) / self.config.pb_c_base)'], {}), '((parent.visit_count + self.config.pb_c_base + 1) / self.config.\n pb_c_base)\n', (4096, 4175), False, 'import math\n'), ((4263, 4292), 'math.sqrt', 'math.sqrt', (['parent.visit_count'], {}), '(parent.visit_count)\n', (4272, 4292), False, 'import math\n'), ((3270, 3303), 'networks.support_to_scalar', 'networks.support_to_scalar', (['value'], {}), '(value)\n', (3296, 3303), False, 'import networks\n'), ((3179, 3203), 'torch.tensor', 'torch.tensor', (['[[action]]'], {}), '([[action]])\n', (3191, 3203), False, 'import torch\n'), ((5615, 5664), 'torch.exp', 'torch.exp', (['(policy_logits[0] - policy_logits[0][a])'], {}), '(policy_logits[0] - policy_logits[0][a])\n', (5624, 5664), False, 'import torch\n'), ((2204, 2229), 'torch.tensor', 'torch.tensor', (['observation'], {}), '(observation)\n', (2216, 2229), False, 'import torch\n')]
|
import numpy
import sys
import scipy
from scipy.signal import find_peaks_cwt
import matplotlib.pyplot as plt
from headbang.params import DEFAULTS
from headbang.util import find_closest
openpose_install_path = "/home/sevagh/thirdparty-repos/openpose"
openpose_dir = openpose_install_path
sys.path.append(openpose_dir + "/build/python/openpose")
import pyopenpose as op
class OpenposeDetector:
undef_coord_default = numpy.nan
object_limit = 3
min_confidence = 0.5
def __init__(
self,
n_frames,
frame_duration,
keypoints=DEFAULTS["pose_keypoints"],
):
config = {}
config["logging_level"] = 3
config["net_resolution"] = "320x320"
config["model_pose"] = "BODY_25"
config["alpha_pose"] = 0.6
config["scale_gap"] = 0.3
config["scale_number"] = 1
config["render_threshold"] = 0.05
config["num_gpu_start"] = 0
config["disable_blending"] = False
config["model_folder"] = openpose_dir + "/models/"
self.opWrapper = op.WrapperPython()
self.opWrapper.configure(config)
self.opWrapper.start()
self.keypoints = [int(i) for i in keypoints.split(",")]
self.n_frames = int(n_frames)
self.all_y_coords = [OpenposeDetector.undef_coord_default] * self.n_frames
self.frame_idx = 0
self.frame_duration = frame_duration
self.total_duration = self.frame_duration * self.n_frames
print("Started OpenposeDetector for keypoints {0}".format(self.keypoints))
def detect_pose(self, image):
datum = op.Datum()
datum.cvInputData = image
ret = self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))
if not ret:
raise ValueError("couldn't emplaceAndPop")
return datum.poseKeypoints, datum.cvOutputData
def process_frame(self, frame):
multiple_detected_poses, outframe = self.detect_pose(frame)
if multiple_detected_poses is not None:
poses_of_interest = []
# collect (x, y) coordinates of the head, median across the first object_limit objects
for detected_poses in multiple_detected_poses[
: OpenposeDetector.object_limit
]:
for keypoint, d in enumerate(detected_poses):
if (
keypoint in self.keypoints
and d[2] > OpenposeDetector.min_confidence
):
poses_of_interest.append((d[0], d[1]))
poses_of_interest = numpy.asarray(poses_of_interest)
median_coords = numpy.median(poses_of_interest, axis=0)
if not numpy.any(numpy.isnan(median_coords)):
median_y = median_coords[1]
y_norm = median_y / frame.shape[0]
self.all_y_coords[self.frame_idx] = y_norm
self.frame_idx += 1
return outframe
def find_peaks(self):
min_coord = numpy.nanmin(self.all_y_coords)
adjusted_y_coords = numpy.nan_to_num(self.all_y_coords, nan=min_coord)
# wavelets are good for peaks
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2631518/
peaks = find_peaks_cwt(adjusted_y_coords, numpy.arange(2, 4))
peaks = peaks[numpy.where(numpy.diff(peaks) > 11)[0]]
return peaks
def plot_ycoords(
self, bop_bpm_plot_history, debug_bpm=False, debug_bpm_frame_skip=30
):
plt.figure(1)
plt.title("normalized median y coordinate motion")
plt.xlabel("time (s)")
plt.ylabel("normalized y coordinate")
frame_times = numpy.arange(0.0, self.total_duration, self.frame_duration)
peaks = self.find_peaks()
y_coords = numpy.asarray(self.all_y_coords)
plt.plot(
frame_times,
y_coords,
"-D",
markevery=peaks,
mec="black",
)
if debug_bpm:
# skip every 10 frames for bpm plot
for i, bop_bpm_hist in enumerate(
bop_bpm_plot_history[:-debug_bpm_frame_skip]
):
if i % debug_bpm_frame_skip != 0:
continue
bop_times, bpm = bop_bpm_hist
x = find_closest(frame_times, bop_times)
if x.size > 2:
text_x = (
frame_times[x[-2]]
+ (frame_times[x[-1]] - frame_times[x[-2]]) / 2
)
y = y_coords[x]
text_y = max(y) + 0.03
plt.plot(frame_times[x], y, "r")
plt.text(text_x, text_y, "{0}".format(int(round(bpm))))
plt.grid()
plt.show()
def bpm_from_beats(beats):
if beats.size == 0:
return 0
m_res = scipy.stats.linregress(numpy.arange(len(beats)), beats)
beat_step = m_res.slope
return 60 / beat_step
def align_beats_motion(beats, motion, thresh):
i = 0
j = 0
aligned_beats = []
while i < len(motion) and j < len(beats):
curr_motion = motion[i]
curr_beat = beats[j]
if numpy.abs(curr_motion - curr_beat) <= thresh:
aligned_beats.append(min(curr_motion, curr_beat))
i += 1
j += 1
continue
if curr_beat < curr_motion:
# increment beats
j += 1
elif curr_beat > curr_motion:
i += 1
return aligned_beats
|
[
"matplotlib.pyplot.title",
"numpy.abs",
"numpy.nan_to_num",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.arange",
"sys.path.append",
"pyopenpose.WrapperPython",
"matplotlib.pyplot.show",
"numpy.median",
"numpy.asarray",
"pyopenpose.VectorDatum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"pyopenpose.Datum",
"headbang.util.find_closest",
"numpy.nanmin",
"numpy.diff",
"matplotlib.pyplot.xlabel"
] |
[((289, 345), 'sys.path.append', 'sys.path.append', (["(openpose_dir + '/build/python/openpose')"], {}), "(openpose_dir + '/build/python/openpose')\n", (304, 345), False, 'import sys\n'), ((1059, 1077), 'pyopenpose.WrapperPython', 'op.WrapperPython', ([], {}), '()\n', (1075, 1077), True, 'import pyopenpose as op\n'), ((1610, 1620), 'pyopenpose.Datum', 'op.Datum', ([], {}), '()\n', (1618, 1620), True, 'import pyopenpose as op\n'), ((3002, 3033), 'numpy.nanmin', 'numpy.nanmin', (['self.all_y_coords'], {}), '(self.all_y_coords)\n', (3014, 3033), False, 'import numpy\n'), ((3062, 3112), 'numpy.nan_to_num', 'numpy.nan_to_num', (['self.all_y_coords'], {'nan': 'min_coord'}), '(self.all_y_coords, nan=min_coord)\n', (3078, 3112), False, 'import numpy\n'), ((3484, 3497), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3494, 3497), True, 'import matplotlib.pyplot as plt\n'), ((3506, 3556), 'matplotlib.pyplot.title', 'plt.title', (['"""normalized median y coordinate motion"""'], {}), "('normalized median y coordinate motion')\n", (3515, 3556), True, 'import matplotlib.pyplot as plt\n'), ((3566, 3588), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (s)"""'], {}), "('time (s)')\n", (3576, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3597, 3634), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""normalized y coordinate"""'], {}), "('normalized y coordinate')\n", (3607, 3634), True, 'import matplotlib.pyplot as plt\n'), ((3658, 3717), 'numpy.arange', 'numpy.arange', (['(0.0)', 'self.total_duration', 'self.frame_duration'], {}), '(0.0, self.total_duration, self.frame_duration)\n', (3670, 3717), False, 'import numpy\n'), ((3772, 3804), 'numpy.asarray', 'numpy.asarray', (['self.all_y_coords'], {}), '(self.all_y_coords)\n', (3785, 3804), False, 'import numpy\n'), ((3814, 3881), 'matplotlib.pyplot.plot', 'plt.plot', (['frame_times', 'y_coords', '"""-D"""'], {'markevery': 'peaks', 'mec': '"""black"""'}), "(frame_times, y_coords, '-D', markevery=peaks, mec='black')\n", (3822, 3881), True, 'import matplotlib.pyplot as plt\n'), ((4746, 4756), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4754, 4756), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4775), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4773, 4775), True, 'import matplotlib.pyplot as plt\n'), ((1698, 1721), 'pyopenpose.VectorDatum', 'op.VectorDatum', (['[datum]'], {}), '([datum])\n', (1712, 1721), True, 'import pyopenpose as op\n'), ((2588, 2620), 'numpy.asarray', 'numpy.asarray', (['poses_of_interest'], {}), '(poses_of_interest)\n', (2601, 2620), False, 'import numpy\n'), ((2649, 2688), 'numpy.median', 'numpy.median', (['poses_of_interest'], {'axis': '(0)'}), '(poses_of_interest, axis=0)\n', (2661, 2688), False, 'import numpy\n'), ((3266, 3284), 'numpy.arange', 'numpy.arange', (['(2)', '(4)'], {}), '(2, 4)\n', (3278, 3284), False, 'import numpy\n'), ((5181, 5215), 'numpy.abs', 'numpy.abs', (['(curr_motion - curr_beat)'], {}), '(curr_motion - curr_beat)\n', (5190, 5215), False, 'import numpy\n'), ((4291, 4327), 'headbang.util.find_closest', 'find_closest', (['frame_times', 'bop_times'], {}), '(frame_times, bop_times)\n', (4303, 4327), False, 'from headbang.util import find_closest\n'), ((2719, 2745), 'numpy.isnan', 'numpy.isnan', (['median_coords'], {}), '(median_coords)\n', (2730, 2745), False, 'import numpy\n'), ((4628, 4660), 'matplotlib.pyplot.plot', 'plt.plot', (['frame_times[x]', 'y', '"""r"""'], {}), "(frame_times[x], y, 'r')\n", (4636, 4660), True, 'import matplotlib.pyplot as plt\n'), ((3320, 3337), 'numpy.diff', 'numpy.diff', (['peaks'], {}), '(peaks)\n', (3330, 3337), False, 'import numpy\n')]
|
import sys
cmd_folder = "../../../vis" # nopep8
if cmd_folder not in sys.path: # nopep8
sys.path.insert(0, cmd_folder)
from tile_mov import tile_movie
from make_mov import make_all, get_particle_trajectories
import pylab as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
# ==============================================================================
# MAKE MOVIES
# ==============================================================================
def smooth_limits(vmin, vmax):
from scipy.signal import savgol_filter
vmin = savgol_filter(vmin, 11, 3)
vmax = savgol_filter(vmax, 11, 3)
return vmin, vmax
def get_number_density(ds, c):
x, r = ds.get("rho-%s"%c["component"])
x, m = ds.get("mass-%s"%c["component"], grid='node')
return {"x":x[0], "y":x[1], "value":r/m}
def get_D_mag(ds, c):
x, Dx = ds.get("x_D-field")
x, Dy = ds.get("y_D-field")
return {"x":x[0], "y":x[1], "value":np.sqrt(Dx**2 + Dy**2)}
def get_Bz(ds, c):
x, Bz = ds.get("z_B-field")
return {"x":x[0], "y":x[1], "value":Bz}
def plot(frame, data, output_name):
xn = data["nd-ion"]["x"][()]
yn = data["nd-ion"]["y"][()]
ni = data["nd-ion"]["value"][()]
ni_min = frame["nd-ion"]["min"]
ni_max = frame["nd-ion"]["max"]
ne = data["nd-electron"]["value"][()]
ne_min = frame["nd-electron"]["min"]
ne_max = frame["nd-electron"]["max"]
D = data["D"]["value"][()]
D_min = frame["D"]["min"]
D_max = frame["D"]["max"]
B = data["B"]["value"][()]
B_min = frame["B"]["min"]
B_max = frame["B"]["max"]
x = np.concatenate((-xn[::-1][0:-1], xn))
y = np.concatenate((-yn[::-1][0:-1], yn))
y, x = np.meshgrid(y, x)
axes = []
# join the data
nx = xn.size - 1
ny = yn.size - 1
fig = plt.figure(figsize=(3,3))
gs = gridspec.GridSpec(ncols=1, nrows=1, hspace=0.01, wspace=0.01)
ax = fig.add_subplot(gs[0,0]); axes.append(ax)
# number densities
J = np.zeros((2*nx, 2*ny))*np.nan
# J[0:nx, 0:ny] = np.rot90(ne.T,2)
J[0:nx, ny::] = np.rot90(ne)
# J[nx::, 0:ny] = np.rot90(ni.T,3)
J[nx::, ny::] = ni
vmin = min(ne_min, ni_min)
vmax = max(ne_max, ni_max)
pcm = ax.pcolormesh(x, y, J, vmin=vmin, vmax=vmax)
ax.text(0.025, 0.975, r'$n_e$', horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes, fontsize=10)
ax.text(0.975, 0.975, r'$n_i$', horizontalalignment='right',
verticalalignment='top', transform=ax.transAxes, fontsize=10)
# fields
J = np.zeros((2*nx, 2*ny))*np.nan
J[0:nx, 0:ny] = np.rot90(D.T,2)
pcm = ax.pcolormesh(x, y, J, vmin=D_min, vmax=D_max)
J = np.zeros((2*nx, 2*ny))*np.nan
J[nx::, 0:ny] = np.rot90(B.T,3)
big = max(abs(B_max), abs(B_min))
pcm = ax.pcolormesh(x, y, J, vmin=-big, vmax=big, cmap="bwr")
ax.text(0.025, 0.025, r'$\left|\vec{D}\right|$', horizontalalignment='left',
verticalalignment='bottom', transform=ax.transAxes, fontsize=10)
ax.text(0.975, 0.025, r'$B_z$', horizontalalignment='right',
verticalalignment='bottom', transform=ax.transAxes, fontsize=10)
for ax in axes:
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_aspect(1)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
# fig.tight_layout()
fig.savefig(output_name, dpi=300, bbox_inches="tight")
plt.close(fig)
return
if 1:
Q = []
q = {}
q["files_dir"] = "."
q["level"] = -1
q["get"] = [
{"func":get_number_density, "tag":"nd-ion", "component":"ion"},
{"func":get_number_density, "tag":"nd-electron", "component":"electron"},
{"func":get_D_mag, "tag":"D"},
{"func":get_Bz, "tag":"B"}
]
q["plot"] = plot
q["name"] = "movie"
dt = 0.005
##
q["framerate"] = 20
q["mov_save"] = q["files_dir"] + "/mov"
q["offset"] = [0.0, 0.0]
q["xy_limits"] = [[0,0], [4,4]]
q["file_include"] = ["TRMI.plt"]
q["file_exclude"] = []
q["cores"] = 11
q["time_span"] = [] #np.arange(1.95,2+dt, dt)
q["force_data"] = False
q["force_frames"] = True
q["only_frames"] = False
q["redo_streaks"] = False
q["dpi"] = 300
q["normalize"] = "none" #{"smooth":smooth_limits}
Q.append(q)
make_all(Q)
print("DONE")
|
[
"pylab.close",
"scipy.signal.savgol_filter",
"numpy.meshgrid",
"pylab.rcParams.update",
"numpy.zeros",
"sys.path.insert",
"make_mov.make_all",
"numpy.rot90",
"pylab.figure",
"matplotlib.gridspec.GridSpec",
"numpy.concatenate",
"numpy.sqrt"
] |
[((349, 458), 'pylab.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': True, 'font.family': 'sans-serif', 'font.sans-serif': [\n 'Helvetica']}"], {}), "({'text.usetex': True, 'font.family': 'sans-serif',\n 'font.sans-serif': ['Helvetica']})\n", (368, 458), True, 'import pylab as plt\n'), ((94, 124), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cmd_folder'], {}), '(0, cmd_folder)\n', (109, 124), False, 'import sys\n'), ((733, 759), 'scipy.signal.savgol_filter', 'savgol_filter', (['vmin', '(11)', '(3)'], {}), '(vmin, 11, 3)\n', (746, 759), False, 'from scipy.signal import savgol_filter\n'), ((771, 797), 'scipy.signal.savgol_filter', 'savgol_filter', (['vmax', '(11)', '(3)'], {}), '(vmax, 11, 3)\n', (784, 797), False, 'from scipy.signal import savgol_filter\n'), ((1781, 1818), 'numpy.concatenate', 'np.concatenate', (['(-xn[::-1][0:-1], xn)'], {}), '((-xn[::-1][0:-1], xn))\n', (1795, 1818), True, 'import numpy as np\n'), ((1827, 1864), 'numpy.concatenate', 'np.concatenate', (['(-yn[::-1][0:-1], yn)'], {}), '((-yn[::-1][0:-1], yn))\n', (1841, 1864), True, 'import numpy as np\n'), ((1877, 1894), 'numpy.meshgrid', 'np.meshgrid', (['y', 'x'], {}), '(y, x)\n', (1888, 1894), True, 'import numpy as np\n'), ((1985, 2011), 'pylab.figure', 'plt.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (1995, 2011), True, 'import pylab as plt\n'), ((2020, 2081), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'ncols': '(1)', 'nrows': '(1)', 'hspace': '(0.01)', 'wspace': '(0.01)'}), '(ncols=1, nrows=1, hspace=0.01, wspace=0.01)\n', (2037, 2081), True, 'import matplotlib.gridspec as gridspec\n'), ((2255, 2267), 'numpy.rot90', 'np.rot90', (['ne'], {}), '(ne)\n', (2263, 2267), True, 'import numpy as np\n'), ((2787, 2803), 'numpy.rot90', 'np.rot90', (['D.T', '(2)'], {}), '(D.T, 2)\n', (2795, 2803), True, 'import numpy as np\n'), ((2920, 2936), 'numpy.rot90', 'np.rot90', (['B.T', '(3)'], {}), '(B.T, 3)\n', (2928, 2936), True, 'import numpy as np\n'), ((3600, 3614), 'pylab.close', 'plt.close', (['fig'], {}), '(fig)\n', (3609, 3614), True, 'import pylab as plt\n'), ((4509, 4520), 'make_mov.make_all', 'make_all', (['Q'], {}), '(Q)\n', (4517, 4520), False, 'from make_mov import make_all, get_particle_trajectories\n'), ((1127, 1153), 'numpy.sqrt', 'np.sqrt', (['(Dx ** 2 + Dy ** 2)'], {}), '(Dx ** 2 + Dy ** 2)\n', (1134, 1153), True, 'import numpy as np\n'), ((2165, 2191), 'numpy.zeros', 'np.zeros', (['(2 * nx, 2 * ny)'], {}), '((2 * nx, 2 * ny))\n', (2173, 2191), True, 'import numpy as np\n'), ((2737, 2763), 'numpy.zeros', 'np.zeros', (['(2 * nx, 2 * ny)'], {}), '((2 * nx, 2 * ny))\n', (2745, 2763), True, 'import numpy as np\n'), ((2870, 2896), 'numpy.zeros', 'np.zeros', (['(2 * nx, 2 * ny)'], {}), '((2 * nx, 2 * ny))\n', (2878, 2896), True, 'import numpy as np\n')]
|
# Copyright 2020 Yalfoosh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Optional, Tuple, Union
import numpy as np
from . import constants
from .function import Function
def clean_nelder_mead_simplex_search_arguments(
function: Function,
alpha: float,
beta: float,
gamma: float,
sigma: float,
use_jakobovic_expand: bool,
epsilon: float,
max_iterations: int,
verbosity: Optional[str],
decimal_precision: int,
) -> Tuple[Function, float, float, float, float, bool, float, int, int, int]:
"""
Checks the Nelder Mead Simplex Search arguments and returns them prepared for work.
Args:
function (Function): A Function representing the loss function.
alpha (float): A float used in point reflection.
beta (float): A float used in point contraction.
gamma (float): A float used in point expansion.
sigma (float): A float used when moving points to the optimum.
use_jakobovic_expand (bool): A bool determining whether or not to use the
__expand_jakobovic method instead of the __expand method for point expansion.
Defaults to False.
epsilon (float): A float representing the error threshold.
max_iterations (int): An int representing the maximum number of iterations
before the algorithm times out and returns the last found optimum.
verbosity (Optional[str]): A str representing the verbosity of the output during
algorithm execution.
decimal_precision (int): An int representing the number of decimal digits to
round numbers outputted during algorithm execution.
Raises:
TypeError: Raised if argument function is not a Function.
TypeError: Raised if argument alpha is not a float.
TypeError: Raised if argument beta is not a float.
TypeError: Raised if argument gamma is not a float.
TypeError: Raised if argument sigma is not a float.
TypeError: Raised if argument use_jakobovic_expand is not a bool.
TypeError: Raised if argument epsilon is not a float.
ValueError: Raised if argument epsilon is a negative number.
TypeError: Raised if argument max_iterations is not an int.
ValueError: Raised if argument max_iterations is a negative number.
TypeError: Raised if argument verbosity is not a str.
KeyError: Raised if argument verbosity is an invalid key.
TypeError: Raised if argument decimal_precision is not an int.
ValueError: Raised if argument decimal_precision is a negative number.
Returns:
Tuple[Function, float, float, float, float, bool, float, int, int, int]: Cleaned
arguments.
"""
if not isinstance(function, Function):
raise TypeError(
"Expected argument function to be a Function, instead it is "
f"{type(function)}."
)
if isinstance(alpha, int):
alpha = float(alpha)
if not isinstance(alpha, float):
raise TypeError(
"Expected argument alpha to be a float, instead it is " f"{type(alpha)}."
)
if isinstance(beta, int):
beta = float(beta)
if not isinstance(beta, float):
raise TypeError(
"Expected argument beta to be a float, instead it is " f"{type(beta)}."
)
if isinstance(gamma, int):
gamma = float(gamma)
if not isinstance(gamma, float):
raise TypeError(
"Expected argument gamma to be a float, instead it is " f"{type(gamma)}."
)
if isinstance(sigma, int):
sigma = float(sigma)
if not isinstance(sigma, float):
raise TypeError(
"Expected argument sigma to be a float, instead it is " f"{type(sigma)}."
)
if not isinstance(use_jakobovic_expand, bool):
raise TypeError(
"Expected argument use_jakobovic_expand to be a bool, instead it is "
f"{type(use_jakobovic_expand)}."
)
if not isinstance(epsilon, float):
raise TypeError(
"Expected argument epsilon to be a float, instead it is "
f"{type(epsilon)}."
)
if epsilon < 0:
raise ValueError(
"Expected argument epsilon to be a positive float, instead it is "
f"{epsilon}."
)
if not isinstance(max_iterations, int):
raise TypeError(
"Expected argument max_interations to be an int, instead it is "
f"{type(max_iterations)}."
)
if max_iterations < 1:
raise ValueError(
"Expected argument max_interations to be a positive integer, instead it is "
f"{max_iterations}."
)
if verbosity is None:
verbosity = "none"
if not isinstance(verbosity, str):
raise TypeError(
f"Expected argument verbosity to be a str, instead it is {type(verbosity)}."
)
if verbosity not in constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT:
verbosity_dict_length = len(constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT)
if verbosity_dict_length == 0:
verbosity_string = "There are no keys available."
elif verbosity_dict_length == 1:
_key = list(constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT.keys())[0]
verbosity_string = f'The only available key is "{_key}".'
else:
_keys = list(sorted(constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT.keys()))
verbosity_string = "The available keys are "
verbosity_string += ", ".join([str(f'"{x}"') for x in _keys[:-1]])
verbosity_string += f' and "{_keys[-1]}"".'
raise KeyError(
f'Verbosity key "{verbosity}" is not in the Nelder Mead Simplex Verbosity '
f"dictionary. {verbosity_string}"
)
verbosity = constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT[verbosity]
if not isinstance(decimal_precision, int):
raise TypeError(
"Expected argument decimal_precision to be an int, instead it is "
f"{type(decimal_precision)}."
)
if decimal_precision < 1:
raise ValueError(
"Expected argument decimal_precision to be a positive int, instead it is"
f"{decimal_precision}."
)
return (
function,
alpha,
beta,
gamma,
sigma,
use_jakobovic_expand,
epsilon,
max_iterations,
verbosity,
decimal_precision,
)
def clean_get_simplex_points(
start: np.ndarray, stride: Union[float, int]
) -> Tuple[np.ndarray, float]:
"""
Checks the __get_simplex_points arguments and returns them prepared for work.
Args:
start (np.ndarray): A numpy.ndarray representing the starting point for simplex
generation.
stride (Union[float, int]): A float or int representing the stride.
Raises:
TypeError: Raised if argument start is not a numpy.ndarray.
ValueError: Raised if argument start is a zero-length vector.
TypeError: Raised if argument stride is not a float or int.
Returns:
Tuple[np.ndarray, float]: Cleaned arguments.
"""
if not isinstance(start, np.ndarray):
raise TypeError(
"Expected argument start to be a numpy.ndarray, instead it is "
f"{type(start)}."
)
start = np.reshape(start, -1)
if start.shape[0] == 0:
raise ValueError(
"Expected argument starting point to be a vector with at least one "
"element, instead it is empty."
)
if not isinstance(stride, (float, int)):
raise TypeError(
"Expected argument stride to be a float or int, instead it is "
f"{type(stride)}."
)
stride = float(stride)
return start, stride
def __get_simplex_points(start: np.ndarray, stride: float) -> np.ndarray:
"""
Generates simplex points for a starting point.
Args:
start (np.ndarray): A numpy.ndarray representing the starting point for simplex
generation.
stride (float): A float representing the stride.
Returns:
np.ndarray: A matrix with each row representing a point of the simplex.
"""
points = np.tile(start, reps=(start.shape[0], 1))
points = points + stride * np.eye(points.shape[0])
return np.vstack([start, points])
def __reflect(
centroid: np.ndarray, maximum_point: np.ndarray, alpha: float
) -> np.ndarray:
"""
Reflects argument maximum_points wrt centroid by argument alpha.
Args:
centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.
maximum_point (np.ndarray): A numpy.ndarray representing the worst point of a
simplex.
alpha (float): A float representing the amount a point will be reflected.
Returns:
np.ndarray: A numpy.ndarray representing the reflected point.
"""
return (1 + alpha) * centroid - alpha * maximum_point
def __contract(
centroid: np.ndarray, maximum_point: np.ndarray, beta: float
) -> np.ndarray:
"""
Contracts argument maximum_points wrt centroid by argument beta.
Args:
centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.
maximum_point (np.ndarray): A numpy.ndarray representing the worst point of a
simplex.
beta (float): A float representing the amount a point will be contracted.
Returns:
np.ndarray: A numpy.ndarray representing the contracted point.
"""
return (1 - beta) * centroid + beta * maximum_point
def __expand(
centroid: np.ndarray, reflected_point: np.ndarray, gamma: float
) -> np.ndarray:
"""
Expands argument reflected_point wrt centroid by argument alpha.
Args:
centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.
maximum_point (np.ndarray): A numpy.ndarray representing the worst point of a
simplex.
gamma (float): A float representing the amount a point will be expanded.
Returns:
np.ndarray: A numpy.ndarray representing the expanded point.
"""
return (1 - gamma) * centroid + gamma * reflected_point
def __expand_jakobovic(
centroid: np.ndarray, reflected_point: np.ndarray, gamma: float
) -> np.ndarray:
"""
Expands argument reflected_point wrt centroid by argument alpha. This is a modified
version which is supposedly the correct one, as said by prof. Jakobović.
Args:
centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.
maximum_point (np.ndarray): A numpy.ndarray representing the worst point of a
simplex.
gamma (float): A float representing the amount a point will be expanded.
Returns:
np.ndarray: A numpy.ndarray representing the expanded point.
"""
return (1 - gamma) * centroid - gamma * reflected_point
def __time_to_stop(
simplex_values: np.ndarray, centroid_value: float, epsilon: float
) -> bool:
"""
Checks if it's time to stop Nelder Mead Simplex Search.
Args:
simplex_values (np.ndarray): A numpy.ndarray representing the vector of simplex
values.
centroid_value (float): A float representing the value of the simplex centroid.
epsilon (float): A float representing the error threshold.
Returns:
bool: True if the stopping condition of Nelder Mead Simplex Search has been met,
False otherwise.
"""
difference_in_values = simplex_values - centroid_value
squared_difference_in_values = np.square(difference_in_values)
mean_squared_difference_in_values = np.mean(squared_difference_in_values)
return np.sqrt(mean_squared_difference_in_values) <= epsilon
def __print_nmss_values(
function: Function,
centroid: np.ndarray,
verbosity: int,
decimal_precision: int,
):
"""
Prints the Nelder Mead Simplex Search values.
Args:
function (Function): A Function representing the loss function.
centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.
verbosity (int): An int representing the level of verbosity of the output during
algorithm execution.
decimal_precision (int): An int representing the number of decimal digits to
round numbers outputted during algorithm execution.
"""
if verbosity == 1:
print(f"c = {np.around(centroid, decimal_precision)}")
elif verbosity > 1:
result = function(centroid, dont_count=True)
result = (
np.around(result, 3)
if isinstance(result, np.ndarray)
else f"{result:.0{decimal_precision}f}"
)
print(f"F(c = {np.around(centroid, decimal_precision)}) = {result}")
def nelder_mead_simplex_search(
function: Function,
start: np.ndarray,
stride: Union[float, int] = 1,
alpha: float = 1.0,
beta: float = 0.5,
gamma: float = 2.0,
sigma: float = 0.5,
use_jakobovic_expand: bool = False,
epsilon: float = 1e-6,
max_iterations: int = 100000,
verbosity: Optional[str] = None,
decimal_precision: int = 3,
) -> np.ndarray:
"""
Uses Nelder Mead Simplex Search to find an n-D optimum of a function.
Args:
function (Function): A Function representing the loss function.
start (np.ndarray): A numpy.ndarray representing the starting point of the
search.
stride (Union[float, int], optional): A float or int representing the stride for
simplex generation. Defaults to 1.
alpha (float, optional): A float used in point reflection. Defaults to 1.0.
beta (float, optional): A float used in point contraction. Defaults to 0.5.
gamma (float, optional): A float used in point expansion. Defaults to 2.0.
sigma (float, optional): A float used when moving points to the optimum.
Defaults to 0.5.
use_jakobovic_expand (float, optional): A bool determining whether or not to use
the __expand_jakobovic method instead of the __expand method for point
expansion. Defaults to False.
epsilon (float, optional): A float representing the error threshold. Defaults to
1e-6.
max_iterations (int, optional): An int representing the maximum number of
iterations before the algorithm times out and returns the last found optimum.
Defaults to 100000.
verbosity (Optional[str], optional): A str representing the verbosity of the
output during algorithm execution. Defaults to None (no output during algorithm
execution).
decimal_precision (int, optional): An int representing the number of decimal
digits to round numbers outputted during algorithm execution. Defaults to 3.
Returns:
np.ndarray: A numpy.ndarray representing the last found optimum.
"""
(
function,
alpha,
beta,
gamma,
sigma,
use_jakobovic_expand,
epsilon,
max_iterations,
verbosity,
decimal_precision,
) = clean_nelder_mead_simplex_search_arguments(
function=function,
alpha=alpha,
beta=beta,
gamma=gamma,
sigma=sigma,
use_jakobovic_expand=use_jakobovic_expand,
epsilon=epsilon,
max_iterations=max_iterations,
verbosity=verbosity,
decimal_precision=decimal_precision,
)
start, stride = clean_get_simplex_points(start=start, stride=stride)
simplex_points = __get_simplex_points(start=start, stride=stride)
simplex_values = np.array([function(x) for x in simplex_points])
timed_out = True
expansion_method = __expand_jakobovic if use_jakobovic_expand else __expand
for _ in range(max_iterations):
minimum_index = np.argmin(simplex_values)
maximum_index = np.argmax(simplex_values)
centroid = np.mean(np.delete(simplex_points, maximum_index, axis=0), axis=0)
__print_nmss_values(
function=function,
centroid=centroid,
verbosity=verbosity,
decimal_precision=decimal_precision,
)
reflected_point = __reflect(
centroid=centroid, maximum_point=simplex_points[maximum_index], alpha=alpha
)
reflected_value = function(reflected_point)
minimum_value = simplex_values[minimum_index]
if reflected_value < minimum_value:
expanded_point = expansion_method(
centroid=centroid, reflected_point=reflected_point, gamma=gamma
)
expanded_value = function(expanded_point)
if expanded_value < minimum_value:
simplex_points[maximum_index] = expanded_point
simplex_values[maximum_index] = expanded_value
else:
simplex_points[maximum_index] = reflected_point
simplex_values[maximum_index] = reflected_value
else:
maximum_value = simplex_values[maximum_index]
if all(np.delete(simplex_values, maximum_index, axis=0) < reflected_value):
if reflected_value < maximum_value:
simplex_points[maximum_index] = reflected_point
simplex_values[maximum_index] = reflected_value
# We need this here since we're introducing a new point and value
minimum_index = np.argmin(simplex_values)
maximum_index = np.argmax(simplex_values)
# We need to do this since the maximum value has potentially changed
maximum_value = simplex_values[maximum_index]
contracted_point = __contract(
centroid=centroid,
maximum_point=simplex_points[maximum_index],
beta=beta,
)
contracted_value = function(contracted_point)
if contracted_value < maximum_value:
simplex_points[maximum_index] = contracted_point
simplex_values[maximum_index] = contracted_value
else:
for i, simplex_point in enumerate(simplex_points):
if i == minimum_index:
continue
simplex_points[i] += (
simplex_points[minimum_index] - simplex_points[i]
) * sigma
simplex_values[i] = function(simplex_points[i])
else:
simplex_points[maximum_index] = reflected_point
simplex_values[maximum_index] = reflected_value
if __time_to_stop(
simplex_values=simplex_values,
centroid_value=function(centroid),
epsilon=epsilon,
):
timed_out = False
break
if timed_out:
print(
f"WARNING: Nelder Mead Simplex Search timed out after {max_iterations} "
"iterations - result might not be a minimum.",
file=sys.stderr,
)
# Do this to get a more precise result
maximum_index = np.argmax(simplex_values)
centroid = np.mean(np.delete(simplex_points, maximum_index, axis=0), axis=0)
return centroid
|
[
"numpy.argmax",
"numpy.square",
"numpy.argmin",
"numpy.around",
"numpy.mean",
"numpy.tile",
"numpy.reshape",
"numpy.eye",
"numpy.delete",
"numpy.vstack",
"numpy.sqrt"
] |
[((7922, 7943), 'numpy.reshape', 'np.reshape', (['start', '(-1)'], {}), '(start, -1)\n', (7932, 7943), True, 'import numpy as np\n'), ((8803, 8843), 'numpy.tile', 'np.tile', (['start'], {'reps': '(start.shape[0], 1)'}), '(start, reps=(start.shape[0], 1))\n', (8810, 8843), True, 'import numpy as np\n'), ((8911, 8937), 'numpy.vstack', 'np.vstack', (['[start, points]'], {}), '([start, points])\n', (8920, 8937), True, 'import numpy as np\n'), ((12136, 12167), 'numpy.square', 'np.square', (['difference_in_values'], {}), '(difference_in_values)\n', (12145, 12167), True, 'import numpy as np\n'), ((12208, 12245), 'numpy.mean', 'np.mean', (['squared_difference_in_values'], {}), '(squared_difference_in_values)\n', (12215, 12245), True, 'import numpy as np\n'), ((19743, 19768), 'numpy.argmax', 'np.argmax', (['simplex_values'], {}), '(simplex_values)\n', (19752, 19768), True, 'import numpy as np\n'), ((12258, 12300), 'numpy.sqrt', 'np.sqrt', (['mean_squared_difference_in_values'], {}), '(mean_squared_difference_in_values)\n', (12265, 12300), True, 'import numpy as np\n'), ((16382, 16407), 'numpy.argmin', 'np.argmin', (['simplex_values'], {}), '(simplex_values)\n', (16391, 16407), True, 'import numpy as np\n'), ((16432, 16457), 'numpy.argmax', 'np.argmax', (['simplex_values'], {}), '(simplex_values)\n', (16441, 16457), True, 'import numpy as np\n'), ((19792, 19840), 'numpy.delete', 'np.delete', (['simplex_points', 'maximum_index'], {'axis': '(0)'}), '(simplex_points, maximum_index, axis=0)\n', (19801, 19840), True, 'import numpy as np\n'), ((8875, 8898), 'numpy.eye', 'np.eye', (['points.shape[0]'], {}), '(points.shape[0])\n', (8881, 8898), True, 'import numpy as np\n'), ((16485, 16533), 'numpy.delete', 'np.delete', (['simplex_points', 'maximum_index'], {'axis': '(0)'}), '(simplex_points, maximum_index, axis=0)\n', (16494, 16533), True, 'import numpy as np\n'), ((13128, 13148), 'numpy.around', 'np.around', (['result', '(3)'], {}), '(result, 3)\n', (13137, 13148), True, 'import numpy as np\n'), ((12978, 13016), 'numpy.around', 'np.around', (['centroid', 'decimal_precision'], {}), '(centroid, decimal_precision)\n', (12987, 13016), True, 'import numpy as np\n'), ((17622, 17670), 'numpy.delete', 'np.delete', (['simplex_values', 'maximum_index'], {'axis': '(0)'}), '(simplex_values, maximum_index, axis=0)\n', (17631, 17670), True, 'import numpy as np\n'), ((18002, 18027), 'numpy.argmin', 'np.argmin', (['simplex_values'], {}), '(simplex_values)\n', (18011, 18027), True, 'import numpy as np\n'), ((18064, 18089), 'numpy.argmax', 'np.argmax', (['simplex_values'], {}), '(simplex_values)\n', (18073, 18089), True, 'import numpy as np\n'), ((13281, 13319), 'numpy.around', 'np.around', (['centroid', 'decimal_precision'], {}), '(centroid, decimal_precision)\n', (13290, 13319), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
from multiprocessing import Pool, Manager, cpu_count
from functools import partial
import numpy as np
from bs4 import BeautifulSoup
from colour import Color
import copy
import math
import re
import time
from consts import QWERTY, THUMBS, COORDS
CACHE = {}
def cleanhtml(raw_html):
soup = BeautifulSoup(raw_html, "lxml")
spans = soup.find_all('span')
lowercase = ''.join([i.text.replace('Пользователь 2: ', '').replace('Пользователь 1: ', '') for i in spans]).lower()
return re.sub('[^а-я]+', '', lowercase)
def generate_strokes(sample, QWERTY):
zones = {}
for idr, row in enumerate(QWERTY):
for idk, key in enumerate(row):
zones[key] = THUMBS[idr][idk]
strokes = {}
stroke = ''
for idx, char in enumerate(sample):
current_zone = zones[char]
stroke += char
if idx + 1 < len(sample) and zones[sample[idx + 1]] != current_zone:
r_stroke = stroke[::-1]
if stroke in strokes:
strokes[stroke]["count"] += 1
elif r_stroke in strokes:
strokes[r_stroke]["count"] += 1
else:
strokes[stroke] = {"zone": current_zone, "count": 1}
stroke = ''
if idx + 1 == len(sample):
r_stroke = stroke[::-1]
if stroke in strokes:
strokes[stroke]["count"] += 1
elif r_stroke in strokes:
strokes[r_stroke]["count"] += 1
else:
strokes[stroke] = {"zone": current_zone, "count": 1}
return strokes
def calculateDistance(x1,y1,x2,y2):
global CACHE
if f"{x1}{y1}{x2}{y2}" in CACHE:
return CACHE[f"{x1}{y1}{x2}{y2}"]
if f"{x2}{y2}{x1}{y1}" in CACHE:
return CACHE[f"{x2}{y2}{x1}{y1}"]
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
CACHE[f"{x1}{y1}{x2}{y2}"] = dist
return dist
def finger_heatmap(finger_distances):
return [[
finger_distances['ЛМ'],
finger_distances['ЛБ'],
finger_distances['ЛС'],
finger_distances['ЛУ'],
finger_distances['ПУ'],
finger_distances['ПС'],
finger_distances['ПБ'],
finger_distances['ПМ']
]]
def shift_row(c, row_num, value):
new_coords = copy.deepcopy(c)
for idx, cell in enumerate(new_coords[row_num]):
new_coords[row_num][idx][0] = new_coords[row_num][idx][0] + value
return new_coords
def shift_col(c, col_num, value):
new_coords = copy.deepcopy(c)
for idx, row in enumerate(new_coords):
new_coords[idx][col_num][1] = new_coords[idx][col_num][1] + value
return new_coords
def get_mapper(c, k):
text_mapper = {
item: {
'x': c[idx][idy][0],
'y': c[idx][idy][1],
'thumb': THUMBS[idx][idy]
} for idx, sublist in enumerate(k) for idy, item in enumerate(sublist)
}
# print(json.dumps(text_mapper, indent=2, ensure_ascii=False))
return text_mapper
def draw_keyboard(coords, QWERTY):
x = [i[0] for i in [item for sublist in coords for item in sublist]]
y = [i[1] for i in [item for sublist in coords for item in sublist]]
n = [item for sublist in QWERTY for item in sublist]
fig, ax = plt.subplots()
ax.scatter(x, y, marker=",", s=620, color=(0.5, 0.5, 0.5))
ax.set_title('Координаты клавиш', fontsize=10)
ax.set_aspect('equal', 'box')
# Or if you want different settings for the grids:
major_ticks = np.arange(-20, 210, 20)
minor_ticks = np.arange(-20, 210, 5)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.set_yticks(major_ticks)
ax.set_yticks(minor_ticks, minor=True)
# And a corresponding grid
ax.grid(which='both')
# Or if you want different settings for the grids:
ax.grid(which='minor', alpha=0.2)
ax.grid(which='major', alpha=0.5)
ax.axis([-12, 210, -12, 48])
for i, txt in enumerate(n):
ax.annotate(txt, (x[i], y[i]), color=(1, 1, 1))
def get_keyboard(coords, QWERTY):
x = [i[0] for i in [item for sublist in coords for item in sublist]]
y = [i[1] for i in [item for sublist in coords for item in sublist]]
n = [item for sublist in QWERTY for item in sublist]
fig, ax = plt.subplots()
ax.scatter(x, y, marker=",", s=620, color=(0.5, 0.5, 0.5))
ax.set_title('Координаты клавиш', fontsize=10)
ax.set_aspect('equal', 'box')
# Or if you want different settings for the grids:
major_ticks = np.arange(-20, 210, 20)
minor_ticks = np.arange(-20, 210, 5)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.set_yticks(major_ticks)
ax.set_yticks(minor_ticks, minor=True)
# And a corresponding grid
ax.grid(which='both')
# Or if you want different settings for the grids:
ax.grid(which='minor', alpha=0.2)
ax.grid(which='major', alpha=0.5)
ax.axis([-12, 210, -12, 48])
for i, txt in enumerate(n):
ax.annotate(txt, (x[i], y[i]), color=(1, 1, 1))
return ax
def count_presses(text):
press_count = {}
for idx, char in enumerate(text):
if char not in press_count:
press_count[char] = 1
else:
press_count[char] += 1
return press_count
def press_heatmap(presses_counts, QWERTY):
return [[presses_counts[item] if item in presses_counts else 0 for item in row] for row in QWERTY]
def zone_distances(zone, press_count):
keys = []
default_position = {
'ЛМ': COORDS[1][0],
'ЛБ': COORDS[1][1],
'ЛС': COORDS[1][2],
'ЛУ': COORDS[1][3],
'ПУ': COORDS[1][6],
'ПС': COORDS[1][7],
'ПБ': COORDS[1][8],
'ПМ': COORDS[1][9],
}
for idr, row in enumerate(QWERTY):
for idk, key in enumerate(row):
if THUMBS[idr][idk] == zone and len(QWERTY[idr][idk]) > 0:
x1, y1 = default_position[zone][0], default_position[zone][1]
x2, y2 = COORDS[idr][idk][0], COORDS[idr][idk][1]
distance = calculateDistance(x1, y1, x2, y2)
keys.append({
"symbol": QWERTY[idr][idk],
"distance": distance,
"press_count": press_count[QWERTY[idr][idk]]
})
return sorted(keys, key=lambda i: i["press_count"], reverse=True)
def distance_deltas(distance, distance_1):
sum = 0
for k, v in distance.items():
delta = v - distance_1[k]
sum += delta
print(f"{k}: {distance_1[k] / 1000:.2f} м - меньше на {delta / 1000:.2f} м ({(1 - (distance_1[k] / v)) * 100:.2f}%)")
print(f"\nОбщая дистанция уменшилась на {sum / 1000:.2f} м")
def count_stroke_distance(default_position, default_keys, mapper, stroke):
text = stroke["stroke"]
zone = stroke["zone"]
count = stroke["count"]
pairs = []
total_distance = 0
if len(text) <= 1:
return
for idx, char in enumerate(text):
if idx + 1 == len(text):
char_1 = char
x1 = default_position[mapper[char]['thumb']][0]
y1 = default_position[mapper[char]['thumb']][1]
char_2 = default_keys[zone]
x2 = mapper[char]['x']
y2 = mapper[char]['y']
distance = calculateDistance(x1, y1, x2, y2)
total_distance += distance
pair = f"{char_1}{char_2}"
pairs.append({
"pair": pair,
"distance": distance
})
if idx == 0:
char_1 = default_keys[zone]
x1 = default_position[mapper[char]['thumb']][0]
y1 = default_position[mapper[char]['thumb']][1]
char_2 = char
x2 = mapper[char]['x']
y2 = mapper[char]['y']
distance = calculateDistance(x1, y1, x2, y2)
total_distance += distance
pair = f"{char_1}{char_2}"
pairs.append({
"pair": pair,
"distance": distance
})
else:
char_1 = text[idx - 1]
x1 = mapper[char_1]['x']
y1 = mapper[char_1]['y']
char_2 = char
x2 = mapper[char_2]['x']
y2 = mapper[char_2]['y']
distance = calculateDistance(x1, y1, x2, y2)
total_distance += distance
pair = f"{char_1}{char_2}"
pairs.append({
"pair": pair,
"distance": distance
})
return {
"pairs": pairs,
"count": count,
"total_distance": total_distance,
"zone": zone
}
def draw_stroke_lines(pairs, COORDS, QWERTY, row_count, max_value, max_line_width):
ax = get_keyboard(COORDS, QWERTY)
mapper = get_mapper(COORDS, QWERTY)
red = Color("green")
colors = list(red.range_to(Color("red"),100))
for pair, distance in pairs.items():
stroke_a, stroke_b = pair[0], pair[1]
x1 = mapper[stroke_a]['x']
y1 = mapper[stroke_a]['y']
x2 = mapper[stroke_b]['x']
y2 = mapper[stroke_b]['y']
linewidth = (max_line_width / max_value) * distance
color_hue = (100 / max_value) * distance
color_hue = int(round(color_hue))
r, g, b = colors[color_hue - 1].rgb
ax.plot([x1,x2],[y1,y2], linewidth=linewidth, color=(r, g, b, 1))
def process_strokes(strokes, coords, qwerty):
distances = {
'ЛМ': 0,
'ЛБ': 0,
'ЛС': 0,
'ЛУ': 0,
'ПУ': 0,
'ПС': 0,
'ПБ': 0,
'ПМ': 0,
}
default_keys = {
'ЛМ': qwerty[1][0],
'ЛБ': qwerty[1][1],
'ЛС': qwerty[1][2],
'ЛУ': qwerty[1][3],
'ПУ': qwerty[1][6],
'ПС': qwerty[1][7],
'ПБ': qwerty[1][8],
'ПМ': qwerty[1][9],
}
default_position = {
'ЛМ': coords[1][0],
'ЛБ': coords[1][1],
'ЛС': coords[1][2],
'ЛУ': coords[1][3],
'ПУ': coords[1][6],
'ПС': coords[1][7],
'ПБ': coords[1][8],
'ПМ': coords[1][9],
}
start_time = time.time()
mapper = get_mapper(coords, qwerty)
pairs = {}
num_workers = cpu_count()
p = Pool(num_workers)
manager = Manager()
func = partial(count_stroke_distance, default_position, default_keys, mapper)
results = p.map_async(func, strokes).get()
p.close()
p.join()
for stroke_distance in results:
if stroke_distance is None:
continue
# stroke_distance = count_stroke_distance(COORDS, QWERTY, THUMBS, default_position, default_keys, stroke)
distances[stroke_distance["zone"]] += stroke_distance["total_distance"] * stroke_distance["count"]
for pair in stroke_distance["pairs"]:
if pair["pair"] in pairs:
pairs[pair["pair"]] += pair["distance"] * stroke_distance["count"]
elif f'{pair["pair"][1]}{pair["pair"][0]}' in pairs:
pairs[f'{pair["pair"][1]}{pair["pair"][0]}'] += pair["distance"] * stroke_distance["count"]
else:
pairs[pair["pair"]] = pair["distance"] * stroke_distance["count"]
print("--- %s seconds ---" % (time.time() - start_time))
return {
"pairs": pairs,
"distances": distances
}
|
[
"colour.Color",
"functools.partial",
"copy.deepcopy",
"math.sqrt",
"multiprocessing.Manager",
"time.time",
"numpy.arange",
"multiprocessing.Pool",
"bs4.BeautifulSoup",
"matplotlib.pyplot.subplots",
"re.sub",
"multiprocessing.cpu_count"
] |
[((328, 359), 'bs4.BeautifulSoup', 'BeautifulSoup', (['raw_html', '"""lxml"""'], {}), "(raw_html, 'lxml')\n", (341, 359), False, 'from bs4 import BeautifulSoup\n'), ((526, 558), 're.sub', 're.sub', (['"""[^а-я]+"""', '""""""', 'lowercase'], {}), "('[^а-я]+', '', lowercase)\n", (532, 558), False, 'import re\n'), ((1829, 1871), 'math.sqrt', 'math.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (1838, 1871), False, 'import math\n'), ((2295, 2311), 'copy.deepcopy', 'copy.deepcopy', (['c'], {}), '(c)\n', (2308, 2311), False, 'import copy\n'), ((2514, 2530), 'copy.deepcopy', 'copy.deepcopy', (['c'], {}), '(c)\n', (2527, 2530), False, 'import copy\n'), ((3264, 3278), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3276, 3278), True, 'import matplotlib.pyplot as plt\n'), ((3501, 3524), 'numpy.arange', 'np.arange', (['(-20)', '(210)', '(20)'], {}), '(-20, 210, 20)\n', (3510, 3524), True, 'import numpy as np\n'), ((3543, 3565), 'numpy.arange', 'np.arange', (['(-20)', '(210)', '(5)'], {}), '(-20, 210, 5)\n', (3552, 3565), True, 'import numpy as np\n'), ((4282, 4296), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4294, 4296), True, 'import matplotlib.pyplot as plt\n'), ((4519, 4542), 'numpy.arange', 'np.arange', (['(-20)', '(210)', '(20)'], {}), '(-20, 210, 20)\n', (4528, 4542), True, 'import numpy as np\n'), ((4561, 4583), 'numpy.arange', 'np.arange', (['(-20)', '(210)', '(5)'], {}), '(-20, 210, 5)\n', (4570, 4583), True, 'import numpy as np\n'), ((8824, 8838), 'colour.Color', 'Color', (['"""green"""'], {}), "('green')\n", (8829, 8838), False, 'from colour import Color\n'), ((10123, 10134), 'time.time', 'time.time', ([], {}), '()\n', (10132, 10134), False, 'import time\n'), ((10208, 10219), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (10217, 10219), False, 'from multiprocessing import Pool, Manager, cpu_count\n'), ((10228, 10245), 'multiprocessing.Pool', 'Pool', (['num_workers'], {}), '(num_workers)\n', (10232, 10245), False, 'from multiprocessing import Pool, Manager, cpu_count\n'), ((10260, 10269), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (10267, 10269), False, 'from multiprocessing import Pool, Manager, cpu_count\n'), ((10281, 10351), 'functools.partial', 'partial', (['count_stroke_distance', 'default_position', 'default_keys', 'mapper'], {}), '(count_stroke_distance, default_position, default_keys, mapper)\n', (10288, 10351), False, 'from functools import partial\n'), ((8870, 8882), 'colour.Color', 'Color', (['"""red"""'], {}), "('red')\n", (8875, 8882), False, 'from colour import Color\n'), ((11215, 11226), 'time.time', 'time.time', ([], {}), '()\n', (11224, 11226), False, 'import time\n')]
|
import numpy as np
import pandas as pd
def get_bootstrap_indices(data, cluster_by=None, seed=None, n_draws=1000):
"""Draw positional indices for the construction of bootstrap samples.
Storing the positional indices instead of the full bootstrap samples saves a lot
of memory for datasets with many variables.
Args:
data (pandas.DataFrame): original dataset.
cluster_by (str): column name of the variable to cluster by.
seed (int): Random seed.
n_draws (int): number of draws, only relevant if seeds is None.
Returns:
list: list of numpy arrays with positional indices
"""
np.random.seed(seed)
n_obs = len(data)
if cluster_by is None:
bootstrap_indices = list(np.random.randint(0, n_obs, size=(n_draws, n_obs)))
else:
clusters = data[cluster_by].unique()
drawn_clusters = np.random.choice(
clusters, size=(n_draws, len(clusters)), replace=True
)
bootstrap_indices = _convert_cluster_ids_to_indices(
data[cluster_by], drawn_clusters
)
return bootstrap_indices
def _convert_cluster_ids_to_indices(cluster_col, drawn_clusters):
"""Convert the drawn clusters to positional indices of individual observations.
Args:
cluster_col (pandas.Series):
"""
bootstrap_indices = []
cluster_to_locs = pd.Series(np.arange(len(cluster_col)), index=cluster_col)
for draw in drawn_clusters:
bootstrap_indices.append(cluster_to_locs[draw].to_numpy())
return bootstrap_indices
def get_bootstrap_samples(data, cluster_by=None, seed=None, n_draws=1000):
"""Draw bootstrap samples.
If you have memory issues you should use get_bootstrap_indices instead and construct
the full samples only as needed.
Args:
data (pandas.DataFrame): original dataset.
cluster_by (str): column name of the variable to cluster by.
seed (int): Random seed.
n_draws (int): number of draws, only relevant if seeds is None.
Returns:
list: list of resampled datasets.
"""
indices = get_bootstrap_indices(
data=data,
cluster_by=cluster_by,
seed=seed,
n_draws=n_draws,
)
datasets = _get_bootstrap_samples_from_indices(data=data, bootstrap_indices=indices)
return datasets
def _get_bootstrap_samples_from_indices(data, bootstrap_indices):
"""convert bootstrap indices into actual bootstrap samples.
Args:
data (pandas.DataFrame): original dataset.
bootstrap_indices (list): List with numpy arrays containing positional indices
of observations in data.
Returns:
list: list of DataFrames
"""
out = [data.iloc[idx] for idx in bootstrap_indices]
return out
|
[
"numpy.random.randint",
"numpy.random.seed"
] |
[((646, 666), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (660, 666), True, 'import numpy as np\n'), ((750, 800), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_obs'], {'size': '(n_draws, n_obs)'}), '(0, n_obs, size=(n_draws, n_obs))\n', (767, 800), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.