code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import torch
from torch.nn.functional import leaky_relu
from rational.torch import Rational
import numpy as np
t = torch.tensor([-2., -1, 0., 1., 2.])
expected_res = np.array(leaky_relu(t))
inp = torch.from_numpy(np.array(t)).reshape(-1)
cuda_inp = torch.tensor(np.array(t), dtype=torch.float, device="cuda").reshape(-1)
rationalA_lrelu_gpu = Rational(version='A', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalB_lrelu_gpu = Rational(version='B', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalC_lrelu_gpu = Rational(version='C', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalD_lrelu_gpu = Rational(version='D', cuda=True, trainable=False)(cuda_inp).clone().detach().cpu().numpy()
# Tests on GPU
def test_rationalA_gpu_lrelu():
assert np.all(np.isclose(rationalA_lrelu_gpu, expected_res, atol=5e-02))
def test_rationalB_gpu_lrelu():
assert np.all(np.isclose(rationalB_lrelu_gpu, expected_res, atol=5e-02))
def test_rationalC_gpu_lrelu():
assert np.all(np.isclose(rationalC_lrelu_gpu, expected_res, atol=5e-02))
def test_rationalD_gpu_lrelu():
assert np.all(np.isclose(rationalD_lrelu_gpu, expected_res, atol=5e-02))
|
[
"torch.nn.functional.leaky_relu",
"numpy.isclose",
"torch.tensor",
"numpy.array",
"rational.torch.Rational"
] |
[((117, 156), 'torch.tensor', 'torch.tensor', (['[-2.0, -1, 0.0, 1.0, 2.0]'], {}), '([-2.0, -1, 0.0, 1.0, 2.0])\n', (129, 156), False, 'import torch\n'), ((177, 190), 'torch.nn.functional.leaky_relu', 'leaky_relu', (['t'], {}), '(t)\n', (187, 190), False, 'from torch.nn.functional import leaky_relu\n'), ((793, 849), 'numpy.isclose', 'np.isclose', (['rationalA_lrelu_gpu', 'expected_res'], {'atol': '(0.05)'}), '(rationalA_lrelu_gpu, expected_res, atol=0.05)\n', (803, 849), True, 'import numpy as np\n'), ((904, 960), 'numpy.isclose', 'np.isclose', (['rationalB_lrelu_gpu', 'expected_res'], {'atol': '(0.05)'}), '(rationalB_lrelu_gpu, expected_res, atol=0.05)\n', (914, 960), True, 'import numpy as np\n'), ((1015, 1071), 'numpy.isclose', 'np.isclose', (['rationalC_lrelu_gpu', 'expected_res'], {'atol': '(0.05)'}), '(rationalC_lrelu_gpu, expected_res, atol=0.05)\n', (1025, 1071), True, 'import numpy as np\n'), ((1126, 1182), 'numpy.isclose', 'np.isclose', (['rationalD_lrelu_gpu', 'expected_res'], {'atol': '(0.05)'}), '(rationalD_lrelu_gpu, expected_res, atol=0.05)\n', (1136, 1182), True, 'import numpy as np\n'), ((215, 226), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (223, 226), True, 'import numpy as np\n'), ((264, 275), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (272, 275), True, 'import numpy as np\n'), ((347, 379), 'rational.torch.Rational', 'Rational', ([], {'version': '"""A"""', 'cuda': '(True)'}), "(version='A', cuda=True)\n", (355, 379), False, 'from rational.torch import Rational\n'), ((443, 475), 'rational.torch.Rational', 'Rational', ([], {'version': '"""B"""', 'cuda': '(True)'}), "(version='B', cuda=True)\n", (451, 475), False, 'from rational.torch import Rational\n'), ((539, 571), 'rational.torch.Rational', 'Rational', ([], {'version': '"""C"""', 'cuda': '(True)'}), "(version='C', cuda=True)\n", (547, 571), False, 'from rational.torch import Rational\n'), ((635, 684), 'rational.torch.Rational', 'Rational', ([], {'version': '"""D"""', 'cuda': '(True)', 'trainable': '(False)'}), "(version='D', cuda=True, trainable=False)\n", (643, 684), False, 'from rational.torch import Rational\n')]
|
"""
The :mod:`sportsbed.datasets._soccer` includes functions
to fetch soccer historical and fixtures data.
"""
import numpy as np
HOME_WIN = lambda outputs, col1, col2, offset: outputs[col1] - outputs[col2] > offset
AWAY_WIN = lambda outputs, col1, col2, offset: outputs[col1] - outputs[col2] < -offset
DRAW = lambda outputs, col1, col2, offset: np.abs(outputs[col1] - outputs[col2]) <= offset
OVER = lambda outputs, col1, col2, offset: outputs[col1] - outputs[col2] > offset
UNDER = lambda outputs, col1, col2, offset: outputs[col1] - outputs[col2] < offset
TARGETS = [
('home_win__full_time_goals', lambda outputs: HOME_WIN(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 0.0)),
('away_win__full_time_goals', lambda outputs: AWAY_WIN(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 0.0)),
('draw__full_time_goals', lambda outputs: DRAW(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 0.0)),
('over_1.5__full_time_goals', lambda outputs: OVER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 1.5)),
('over_2.5__full_time_goals', lambda outputs: OVER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 2.5)),
('over_3.5__full_time_goals', lambda outputs: OVER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 3.5)),
('over_4.5__full_time_goals', lambda outputs: OVER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 4.5)),
('under_1.5__full_time_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 1.5)),
('under_2.5__full_time_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 2.5)),
('under_3.5__full_time_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 3.5)),
('under_4.5__full_time_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 4.5)),
('home_win__full_time_adjusted_goals', lambda outputs: HOME_WIN(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 0.5)),
('away_win__full_time_adjusted_goals', lambda outputs: AWAY_WIN(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 0.5)),
('draw__full_time_adjusted_goals', lambda outputs: DRAW(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 0.5)),
('over_1.5__full_time_adjusted_goals', lambda outputs: OVER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 1.5)),
('over_2.5__full_time_adjusted_goals', lambda outputs: OVER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 2.5)),
('over_3.5__full_time_adjusted_goals', lambda outputs: OVER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 3.5)),
('over_4.5__full_time_adjusted_goals', lambda outputs: OVER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 4.5)),
('under_1.5__full_time_adjusted_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 1.5)),
('under_2.5__full_time_adjusted_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 2.5)),
('under_3.5__full_time_adjusted_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 3.5)),
('under_4.5__full_time_adjusted_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 4.5))
]
|
[
"numpy.abs"
] |
[((348, 385), 'numpy.abs', 'np.abs', (['(outputs[col1] - outputs[col2])'], {}), '(outputs[col1] - outputs[col2])\n', (354, 385), True, 'import numpy as np\n')]
|
import face_recognition
import cv2
import numpy as np
# getMouthImage (from TLR Teeth Appearance Calculation.ipynb)
def getMouthImage(faceImage,margin=0):
# face_locations = face_recognition.face_locations(faceImage)
face_landmarks_list = face_recognition.face_landmarks(faceImage)
if len(face_landmarks_list) == 0:
return None
minx = miny = float('inf')
maxx = maxy = float('-inf')
for x,y in face_landmarks_list[0]['top_lip']:
minx = min(minx,x)
miny = min(miny,y)
for x,y in face_landmarks_list[0]['bottom_lip']:
maxx = max(maxx,x)
maxy = max(maxy,y)
mouthImage = faceImage[miny-margin:maxy+margin,minx-margin:maxx+margin]
# lip_landmarks must be translate to origin (0,0) by minx, miny
lip_landmarks = {
'top_lip': [],
'bottom_lip': []
}
for p in face_landmarks_list[0]['top_lip']:
p2 = (p[0] - minx, p[1] - miny)
lip_landmarks['top_lip'].append(p2)
for p in face_landmarks_list[0]['bottom_lip']:
p2 = (p[0] - minx, p[1] - miny)
lip_landmarks['bottom_lip'].append(p2)
return mouthImage,lip_landmarks
# Ray tracing (from TLR Teeth Appearance Calculation.ipynb)
def ray_tracing_method(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# isin_inner_mouth (from TLR Teeth Appearance Calculation.ipynb)
def isin_inner_mouth(lip_boundary,x,y):
top_lip = lip_boundary['top_lip']
bottom_lip = lip_boundary['bottom_lip']
bounds = np.concatenate((top_lip[6:], bottom_lip[6:]),axis=0)
isin = ray_tracing_method(x,y,bounds)
return isin
# findCavity (from TLR Teeth Appearance Calculation.ipynb)
def findCavity(top_lip,bottom_lip):
return np.concatenate((top_lip[6:], bottom_lip[6:]),axis=0)
# cavityArea (from TLR Teeth Appearance Calculation.ipynb)
def cavityArea(top_lip,bottom_lip):
cavity = findCavity(top_lip,bottom_lip)
# cavity = np.concatenate((top_lip[6:], bottom_lip[6:]),axis=0)
x = cavity[:,0]
y = cavity[:,1]
return PolyArea(x,y)
# getTeethScore (from TLR Teeth Appearance Calculation.ipynb)
def getTeethScore(mouthImage,lip_landmarks=None):
height, width, channels = mouthImage.shape
area = height * width
# Operate in BGR (imread loads in BGR)
# OR WHAT???
# Working with VDO frame
# - RGB2Lab gives all WHITE region
lab = cv2.cvtColor(mouthImage, cv2.COLOR_RGB2Lab)
luv = cv2.cvtColor(mouthImage, cv2.COLOR_RGB2Luv)
# lab = cv2.cvtColor(mouthImage, cv2.COLOR_BGR2Lab)
# luv = cv2.cvtColor(mouthImage, cv2.COLOR_BGR2Luv)
lab_ud = lab[:,:,1].mean() - lab[:,:,1].std()
ta = lab_ud # From THESIS (LAB, LUV)
luv_ud = luv[:,:,1].mean() - luv[:,:,1].std()
tu = luv_ud # from thesis
# WHY do we copy?
lab2 = np.copy(lab)
luv2 = np.copy(luv)
# Copy for teeth hilight
hilightedMouthImage = np.copy(mouthImage)
# Pixel-wise operation
# TODO make it faster?
lab_c = luv_c = 0 # Counters
for y in range(len(hilightedMouthImage)):
row = hilightedMouthImage[y]
for x in range(len(row)):
inMouth = False
if lip_landmarks == None:
inMouth = isin_mouth(hilightedMouthImage,x,y)
else:
inMouth = isin_inner_mouth(lip_landmarks,x,y)
if inMouth:
p = row[x]
lab_a = lab2[y,x,1]
luv_a = luv2[y,x,1]
if lab_a <= ta:
p[0] = 255 # L
p[1] = 255 # L
p[2] = 255 # L
lab_c += 1
if luv_a <= tu:
p[0] = 255 # L
p[1] = 255 # L
p[2] = 255 # L
luv_c += 1
return (hilightedMouthImage,lab,luv,lab_c,luv_c)
# draw_bounary
def draw_bounary(facial_feature):
# print(type(face_landmarks[facial_feature]),face_landmarks[facial_feature])
points = face_landmarks[facial_feature]
points = np.array(points, np.int32)
points = points.reshape((-1,1,2))
cv2.polylines(frame,points,True,(255,255,255),thickness=4)
def extract_features(image):
frame = image
rgb_frame = frame[:, :, ::-1]
face_landmarks_list = face_recognition.face_landmarks(rgb_frame)
if len(face_landmarks_list) == 0:
return None
face_landmarks = face_landmarks_list[0]
mouthImage,lip_landmarks = getMouthImage(rgb_frame)
score = getTeethScore(mouthImage,lip_landmarks)
markedMouthImage = score[0]
lab_c = score[3]
luv_c = score[4]
lip_features = {
# "frame_id": frame_number,
"top_lip": face_landmarks_list[0]['top_lip'],
"bottom_lip": face_landmarks_list[0]['bottom_lip'],
"teeth_appearance": {
"LAB": lab_c,
"LUV": luv_c
}
}
x_offset = y_offset = float('inf')
for x,y in face_landmarks_list[0]['top_lip']:
x_offset = min(x_offset,x)
y_offset = min(y_offset,y)
markedMouthImage = markedMouthImage[:, :, ::-1]
frame[y_offset:y_offset+markedMouthImage.shape[0], x_offset:x_offset+markedMouthImage.shape[1]] = markedMouthImage
return frame,lip_features
|
[
"numpy.copy",
"cv2.polylines",
"face_recognition.face_landmarks",
"numpy.array",
"cv2.cvtColor",
"numpy.concatenate"
] |
[((244, 286), 'face_recognition.face_landmarks', 'face_recognition.face_landmarks', (['faceImage'], {}), '(faceImage)\n', (275, 286), False, 'import face_recognition\n'), ((1845, 1898), 'numpy.concatenate', 'np.concatenate', (['(top_lip[6:], bottom_lip[6:])'], {'axis': '(0)'}), '((top_lip[6:], bottom_lip[6:]), axis=0)\n', (1859, 1898), True, 'import numpy as np\n'), ((2059, 2112), 'numpy.concatenate', 'np.concatenate', (['(top_lip[6:], bottom_lip[6:])'], {'axis': '(0)'}), '((top_lip[6:], bottom_lip[6:]), axis=0)\n', (2073, 2112), True, 'import numpy as np\n'), ((2693, 2736), 'cv2.cvtColor', 'cv2.cvtColor', (['mouthImage', 'cv2.COLOR_RGB2Lab'], {}), '(mouthImage, cv2.COLOR_RGB2Lab)\n', (2705, 2736), False, 'import cv2\n'), ((2745, 2788), 'cv2.cvtColor', 'cv2.cvtColor', (['mouthImage', 'cv2.COLOR_RGB2Luv'], {}), '(mouthImage, cv2.COLOR_RGB2Luv)\n', (2757, 2788), False, 'import cv2\n'), ((3103, 3115), 'numpy.copy', 'np.copy', (['lab'], {}), '(lab)\n', (3110, 3115), True, 'import numpy as np\n'), ((3125, 3137), 'numpy.copy', 'np.copy', (['luv'], {}), '(luv)\n', (3132, 3137), True, 'import numpy as np\n'), ((3193, 3212), 'numpy.copy', 'np.copy', (['mouthImage'], {}), '(mouthImage)\n', (3200, 3212), True, 'import numpy as np\n'), ((4171, 4197), 'numpy.array', 'np.array', (['points', 'np.int32'], {}), '(points, np.int32)\n', (4179, 4197), True, 'import numpy as np\n'), ((4237, 4301), 'cv2.polylines', 'cv2.polylines', (['frame', 'points', '(True)', '(255, 255, 255)'], {'thickness': '(4)'}), '(frame, points, True, (255, 255, 255), thickness=4)\n', (4250, 4301), False, 'import cv2\n'), ((4405, 4447), 'face_recognition.face_landmarks', 'face_recognition.face_landmarks', (['rgb_frame'], {}), '(rgb_frame)\n', (4436, 4447), False, 'import face_recognition\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 08 21:24:18 2014
@author: Derrick
Module containing import detex classes
"""
# python 2 and 3 compatibility imports
from __future__ import print_function, absolute_import, unicode_literals, division
import json
import numbers
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import obspy
import pandas as pd
import scipy
from six import string_types
import detex
try: # python 2/3 compat
import cPickle
except ImportError:
import pickle as cPickle
import itertools
import copy
import colorsys
from struct import pack
import PyQt4
import sys
from scipy.cluster.hierarchy import dendrogram, fcluster
from detex.detect import _SSDetex
pd.options.mode.chained_assignment = None # mute setting copy warning
# warnings.filterwarnings('error') #uncomment this to make all warnings errors
# lines for backward compat.
class ClusterStream(object):
"""
A container for multiple cluster objects, should only be called with
detex.construct.createCluster
"""
def __init__(self, trdf, temkey, stakey, fetcher, eventList, ccReq, filt,
decimate, trim, fileName, eventsOnAllStations, enforceOrigin):
self.__dict__.update(locals()) # Instantiate all input variables
self.ccReq = None # set to None because it can vary between stations
self.clusters = [0] * len(trdf)
self.stalist = trdf.Station.values.tolist() # station lists
self.stalist2 = [x.split('.')[1] for x in self.stalist]
self.filename = fileName
self.eventCodes = self._makeCodes()
for num, row in trdf.iterrows():
if not eventsOnAllStations:
evlist = row.Events
else:
evlist = eventList
self.clusters[num] = Cluster(self, row.Station, temkey, evlist,
row.Link, ccReq, filt, decimate, trim,
row.CCs)
def writeSimpleHypoDDInput(self, fileName='dt.cc', coef=1, minCC=.35):
"""
Create a hypoDD cross correlation file (EG dt.cc), assuming the lag
times are pure S times (should be true if S amplitude is dominant)
Parameters
----------
fileName : str
THe path to the new file to be created
coef : float or int
The exponential coeficient to apply to the correlation
coeficient when creating file, usefull to downweight lower cc
values
"""
if not self.enforceOrigin:
msg = ('Sample Lags are not meaningful unless origin times are '
'enforced on each waveform. re-run detex.subspace.'
'createCluster with enforceOrigin=True')
detex.log(__name__, msg, level='error')
fil = open(fileName, 'wb')
# required number of zeros for numbering all events
reqZeros = int(np.ceil(np.log10(len(self.temkey))))
for num1, everow1 in self.temkey.iterrows():
for num2, everow2 in self.temkey.iterrows():
if num1 >= num2: # if autocors or redundant pair then skip
continue
ev1, ev2 = everow1.NAME, everow2.NAME
header = self._makeHeader(num1, num2, reqZeros)
count = 0
for sta in self.stalist: # iter through each station
Clu = self[sta]
try:
# find station specific index for event1
ind1 = np.where(np.array(Clu.key) == ev1)[0][0]
ind2 = np.where(np.array(Clu.key) == ev2)[0][0]
except IndexError: # if either event is not in index
msg = ('%s or %s not found on station %s' %
(ev1, ev2, sta))
detex.log(__name__, msg, level='warning', pri=True)
continue
# get data specific to this station
trdf = self.trdf[self.trdf.Station == sta].iloc[0]
sr1 = trdf.Stats[ev1]['sampling_rate']
sr2 = trdf.Stats[ev2]['sampling_rate']
if sr1 != sr2:
msg = 'Samp. rates not equal on %s and %s' % (ev1, ev2)
detex.log(__name__, msg, level='error')
else:
sr = sr1
Nc1, Nc2 = trdf.Stats[ev1]['Nc'], trdf.Stats[ev2]['Nc']
if Nc1 != Nc2:
msg = ('Num. of channels not equal for %s and %s on %s'
% (ev1, ev2))
detex.log(__name__, msg, level='warning', pri=True)
continue
else:
Nc = Nc1
cc = trdf.CCs[ind2][ind1] # get cc value
if np.isnan(cc): # get other part of symetric matrix
try:
cc = trdf.CCs[ind1][ind2]
except KeyError:
msg = ('%s - %s pair not in CCs matrix' %
(ev1, ev2))
detex.log(__name__, msg, level='warning', pri=True)
continue
if np.isnan(cc): # second pass required
msg = ('%s - %s pair returning NaN' %
(ev1, ev2))
detex.log(__name__, msg, level='error', pri=True)
continue
if cc < minCC:
continue
lagsamps = trdf.Lags[ind2][ind1]
subsamps = trdf.Subsamp[ind2][ind1]
if np.isnan(lagsamps): # if lag from other end of mat
lagsamps = -trdf.Lags[ind1][ind2]
subsamps = trdf.Subsamp[ind1][ind2]
lags = lagsamps / (sr * Nc) + subsamps
obsline = self._makeObsLine(sta, lags, cc ** coef)
if isinstance(obsline, string_types):
count += 1
if count == 1:
fil.write(header + '\n')
fil.write(obsline + '\n')
fil.close()
def _makeObsLine(self, sta, dt, cc, pha='S'):
line = '%s %0.4f %0.4f %s' % (sta, dt, cc, pha)
return line
def _makeHeader(self, num1, num2, reqZeros):
fomatstr = '{:0' + "{:d}".format(reqZeros) + 'd}'
# assume cross corr and cat origins are identical
head = '# ' + fomatstr.format(num1) + \
' ' + fomatstr.format(num2) + ' ' + '0.0'
return head
def _makeCodes(self):
evcodes = {}
for num, row in self.temkey.iterrows():
evcodes[num] = row.NAME
return evcodes
def updateReqCC(self, reqCC):
"""
Updates the required correlation coefficient for clusters to form on
all stations or individual stations.
Parameters
--------------
reqCC : float (between 0 and 1), or dict of reference keys and floats
if reqCC is a float the required correlation coeficient for
clusters to form will be set to reqCC on all stations.
If dict keys must be indicies for each cluster object (IE net.sta,
sta, or int index) and values are the reqCC for that station.
Notes
---------------
The Cluster class also have a similar method that can be more
intuitive to use, as in the tutorial
"""
if isinstance(reqCC, float):
if reqCC < 0 or reqCC > 1:
msg = 'reqCC must be between 0 and 1'
detex.log(__name__, msg, level='error')
for cl in self.clusters:
cl.updateReqCC(reqCC)
elif isinstance(reqCC, dict):
for key in reqCC.keys():
self[key].updateReqCC(reqCC[key])
elif isinstance(reqCC, list):
for num, cc in enumerate(reqCC):
self[num].updateReqCC(cc)
def printAtr(self): # print out basic attributes used to make cluster
for cl in self.clusters:
cl.printAtr()
def dendro(self, **kwargs):
"""
Create dendrograms for each station
"""
for cl in self.clusters:
cl.dendro(**kwargs)
def simMatrix(self, groupClusts=False, savename=False, returnMat=False,
**kwargs):
"""
Function to create similarity matrix of each event pair
Parameters
-------
groupClusts : bool
If True order by clusters on the simmatrix with the singletons
coming last
savename : str or False
If not False, a path used by plt.savefig to save the current
figure. The extension is necesary for specifying format.
See plt.savefig for details
returnMat : bool
If true return the similarity matrix
"""
out = []
for cl in self.clusters:
dout = cl.simMatrix(groupClusts, savename, returnMat, **kwargs)
out.append(dout)
def plotEvents(self, projection='merc', plotSingles=True, **kwargs):
"""
Plot the event locations for each station using basemap. Calls the
plotEvents method of the Cluster class, see its docs for accepted
kwargs.
Parameters
---------
projection : str
The pojection type to pass to basemap
plotSingles : bool
If True also plot the singletons (events that dont cluster)
Notes
-------
kwargs are passed to basemap
If no working installation of basemap is found an ImportError will
be raised. See the following URL for tips on installing it:
http://matplotlib.org/basemap/users/installing.html, good luck!
"""
for cl in self.clusters:
cl.plotEvents(projection, plotSingles, **kwargs)
def write(self): # uses pickle to write class to disk
"""
Write instance to file (name is the filename attribute)
"""
msg = 'writing ClusterStream instance as %s' % self.filename
detex.log(__name__, msg, level='info', pri=True)
cPickle.dump(self, open(self.filename, 'wb'))
def __getitem__(self, key): # allows indexing of children Cluster objects
if isinstance(key, int):
return self.clusters[key]
elif isinstance(key, string_types):
if len(key.split('.')) == 1:
return self.clusters[self.stalist2.index(key)]
elif len(key.split('.')) == 2:
return self.clusters[self.stalist.index(key)]
else:
msg = ('indexer must either be an int or str of sta.net or sta'
' you passed %s' % key)
detex.log(__name__, msg, level='error')
def __len__(self):
return len(self.clusters)
def __repr__(self):
outstr = 'SSClusterStream with %d stations ' % (len(self.stalist))
return outstr
class Cluster(object):
def __init__(self, clustStream, station, temkey, eventList, link, ccReq,
filt, decimate, trim, DFcc):
# instantiate a few needed varaibles (not all to save space)
self.link = link
self.DFcc = DFcc
self.station = station
self.temkey = temkey
self.key = eventList
self.updateReqCC(ccReq)
self.trim = trim
self.decimate = decimate
self.nonClustColor = '0.6' # use a grey of 0.6 for singletons
def updateReqCC(self, newccReq):
"""
Function to update the required correlation coeficient for
this station
Parameters
-------------
newccReq : float (between 0 and 1)
Required correlation coef
"""
if newccReq < 0. or newccReq > 1.:
msg = 'Parameter ccReq must be between 0 and 1'
detex.log(__name__, msg, level='error')
self.ccReq = newccReq
self.dflink, serclus = self._makeDFLINK(truncate=False)
# get events that actually cluster (filter out singletons)
dfcl = self.dflink[self.dflink.disSim <= 1 - self.ccReq]
# sort putting highest links in cluster on top
dfcl.sort_values(by='disSim', inplace=True, ascending=False)
dfcl.reset_index(inplace=True, drop=True)
dftemp = dfcl.copy()
clustlinks = {}
clustEvents = {}
clnum = 0
while len(dftemp) > 0:
ser = dftemp.iloc[0]
ndf = dftemp[[set(x).issubset(ser.II) for x in dftemp.II]]
clustlinks[clnum] = ndf.clust
valset = set([y for x in ndf.II.values for y in x])
clustEvents[clnum] = list(valset)
dftemp = dftemp[~dftemp.index.isin(ndf.index)]
clnum += 1
self.clustlinks = clustlinks
self.clusts = [[self.key[y] for y in clustEvents[x]]
for x in clustEvents.keys()]
keyset = set(self.key)
clustset = set([y for x in self.clusts for y in x])
self.singles = list(keyset.difference(clustset))
self.clustcount = np.sum([len(x) for x in self.clusts])
self.clustColors = self._getColors(len(self.clusts))
msg = ('ccReq for station %s updated to ccReq=%1.3f' %
(self.station, newccReq))
detex.log(__name__, msg, level='info', pri=True)
def _getColors(self, numClusts):
"""
See if there are enough defualt colors for the clusters, if not
Generate N unique colors (that probably dont look good together)
"""
clustColorsDefault = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
# if there are enough default python colors use them
if numClusts <= len(clustColorsDefault):
return clustColorsDefault[:numClusts]
else: # if not generaete N unique colors
colors = []
for i in np.arange(0., 360., 360. / numClusts):
hue = i / 360.
lightness = (50 + np.random.rand() * 10) / 100.
saturation = (90 + np.random.rand() * 10) / 100.
cvect = colorsys.hls_to_rgb(hue, lightness, saturation)
rgb = [int(x * 255) for x in cvect]
# covnert to hex code
colors.append('#' + pack("BBB", *rgb).encode('hex'))
return colors
def _makeColorDict(self, clustColors, nonClustColor):
if len(self.clusts) < 1:
colorsequence = clustColors
# if not enough colors repeat color matrix
elif float(len(clustColors)) / len(self.clusts) < 1:
colorsequence = clustColors * \
int(np.ceil((float(len(self.clusts)) / len(clustColors))))
else:
colorsequence = clustColors
# unitialize color list with default color
color_list = [nonClustColor] * 3 * len(self.dflink)
for a in range(len(self.clusts)):
for b in self.clustlinks[a]:
color_list[int(b)] = colorsequence[a]
return color_list
def _makeDFLINK(self, truncate=True): # make the link dataframe
N = len(self.link)
# append cluster numbers to link array
link = np.append(self.link, np.arange(N + 1, N + N + 1).reshape(N, 1), 1)
if truncate: # truncate after required coeficient
linkup = link[link[:, 2] <= 1 - self.ccReq]
else:
linkup = link
T = fcluster(link[:, 0:4], 1 - self.ccReq, criterion='distance')
serclus = pd.Series(T)
clusdict = pd.Series([np.array([x]) for x in np.arange(
0, N + 1)], index=np.arange(0, N + 1))
for a in range(len(linkup)):
clusdict[int(linkup[a, 4])] = np.append(
clusdict[int(linkup[a, 0])], clusdict[int(linkup[a, 1])])
columns = ['i1', 'i2', 'disSim', 'num', 'clust']
dflink = pd.DataFrame(linkup, columns=columns)
if len(dflink) > 0:
dflink['II'] = list
else:
msg = 'No events cluster with corr coef = %1.3f' % self.ccReq
detex.log(__name__, msg, level='info', pri=True)
for a in dflink.iterrows(): # enumerate cluster contents
ar1 = list(np.array(clusdict[int(a[1].i1)]))
ar2 = list(np.array(clusdict[int(a[1].i2)]))
dflink['II'][a[0]] = ar1 + ar2
return dflink, serclus
# creates a basic dendrogram plot
def dendro(self, hideEventLabels=True, show=True, saveName=False,
legend=True, **kwargs):
"""
Function to plot dendrograms of the clusters
Parameters
-----
hideEventLabels : bool
turns x axis labeling on/off. Better set to false
if many events are in event pool
show : bool
If true call plt.show
saveName : str or False
path to save figure. Extention denotes format. See plt.savefig
for details
legend : bool
If true plot a legend on the side of the dendrogram
Note
----------
kwargs are passed to scipy.cluster.hierarchy.dendrogram, see docs
for acceptable arguments and descriptions
"""
# Get color schemes
color_list = self._makeColorDict(self.clustColors, self.nonClustColor)
for a in range(len(self.clusts)):
plt.plot([], [], '-', color=self.clustColors[a])
plt.plot([], [], '-', color=self.nonClustColor)
dendrogram(self.link, color_threshold=1 - self.ccReq, count_sort=True,
link_color_func=lambda x: color_list[x], **kwargs)
ax = plt.gca()
if legend:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend([str(x) for x in range(1, len(self.clusts) + 1)] +
['N/A'], loc='center left', bbox_to_anchor=(1, .5),
title='Clusters')
ax.set_ylim([0, 1])
if hideEventLabels:
ax.set_xticks([])
plt.xlabel('Events')
plt.ylabel('Dissimilarity')
plt.title(self.station)
if saveName:
plt.savefig(saveName, **kwargs)
if show:
plt.show()
def plotEvents(self, projection='merc', plotSingles=True, **kwargs):
"""
Plot the event locations for each station using basemap. Calls the
plotEvents method of the Cluster class, see its docs for accepted
kwargs.
Parameters
---------
projection : str
The pojection type to pass to basemap
plotSingles : bool
If True also plot the singletons (events that dont cluster)
Notes
-------
kwargs are passed to basemap
If no working installation of basemap is found an ImportError will
be raised. See the following URL for tips on installing it:
http://matplotlib.org/basemap/users/installing.html, good luck!
"""
# TODO make dot size scale with magnitudes
# make sure basemap is installed
try:
from mpl_toolkits.basemap import Basemap
except ImportError:
msg = 'mpl_toolskits basemap not installed, cant plot'
detex.log(__name__, msg, level='error', e=ImportError)
# init figures and get limits
fig_map, emap, horrange = self._init_map(Basemap, projection, kwargs)
zmin, zmax, zscale = self._get_z_scaling(horrange)
fig_lat = self._init_profile_figs(zmin, zmax, zscale)
fig_lon = self._init_profile_figs(zmin, zmax, zscale)
# seperate singletons from clustered events
cl_dfs, sing_df = self._get_singletons_and_clusters()
self._plot_map_view(emap, fig_map, horrange, cl_dfs, sing_df)
self._plot_profile_view(zmin, zmax, zscale, fig_lat, fig_lon, cl_dfs,
sing_df, emap)
def _init_map(self, Basemap, projection, kwargs):
"""
Function to setup the map figure with basemap returns the
figure instance and basemap instance and horizontal range of plot
"""
map_fig = plt.figure()
# get map bounds
latmin = self.temkey.LAT.min()
latmax = self.temkey.LAT.max()
lonmin = self.temkey.LON.min()
lonmax = self.temkey.LON.max()
# create buffers so there is a slight border with no events around map
latbuff = abs((latmax - latmin) * 0.1)
lonbuff = abs((lonmax - lonmin) * 0.1)
# get the total horizontal distance of plot in km
totalxdist = obspy.core.util.geodetics.gps2DistAzimuth(
latmin, lonmin, latmin, lonmax)[0] / 1000
# init projection
emap = Basemap(projection=projection,
lat_0=np.mean([latmin, latmax]),
lon_0=np.mean([lonmin, lonmax]),
resolution='h',
area_thresh=0.1,
llcrnrlon=lonmin - lonbuff,
llcrnrlat=latmin - latbuff,
urcrnrlon=lonmax + lonbuff,
urcrnrlat=latmax + latbuff,
**kwargs)
# draw scale
emap.drawmapscale(lonmin, latmin, lonmin, latmin, totalxdist / 4.5)
# get limits in projection
xmax, xmin, ymax, ymin = emap.xmax, emap.xmin, emap.ymax, emap.ymin
horrange = max((xmax - xmin), (ymax - ymin)) # horizontal range
# get maximum degree distance for setting scalable ticks
latdi, londi = [abs(latmax - latmin), abs(lonmax - lonmin)]
maxdeg = max(latdi, londi)
parallels = np.arange(0., 80, maxdeg / 4)
emap.drawparallels(parallels, labels=[1, 0, 0, 1])
meridians = np.arange(10., 360., maxdeg / 4)
mers = emap.drawmeridians(meridians, labels=[1, 0, 0, 1])
for m in mers: # rotate meridian labels
try:
mers[m][1][0].set_rotation(90)
except:
pass
plt.title('Clusters on %s' % self.station)
return map_fig, emap, horrange
def _init_profile_figs(self, zmin, zmax, zscale):
"""
init figs for plotting the profiles of the events
"""
# init profile figures
profile_fig = plt.figure()
z1 = zmin * zscale
z2 = zmax * zscale
tickfor = ['%0.1f' % x1 for x1 in np.linspace(zmin, zmax, 10)]
plt.yticks(np.linspace(z1, z2, 10), tickfor)
plt.gca().invert_yaxis()
plt.xticks([])
plt.ylabel('Depth (km)')
return profile_fig
def _get_z_scaling(self, horrange):
"""
Return z limits and scale factors
"""
zmin, zmax = self.temkey.DEPTH.min(), self.temkey.DEPTH.max()
zscale = horrange / (zmax - zmin)
return zmin, zmax, zscale
def _get_singletons_and_clusters(self):
"""
get dataframes of clustered events and singletons
Note: cl_dfs is a list of dfs whereas sing_df is just a df
"""
cl_dfs = [self.temkey[self.temkey.NAME.isin(x)] for x in self.clusts]
sing_df = self.temkey[self.temkey.NAME.isin([x for x in self.singles])]
return cl_dfs, sing_df
def _plot_map_view(self, emap, map_fig, horrange, cl_dfs, sing_df):
"""
plot the map figure
"""
plt.figure(map_fig.number) # set to map figure
# plot singles
x, y = emap(sing_df.LON.values, sing_df.LAT.values)
emap.plot(x, y, '.', color=self.nonClustColor, ms=6.0)
for clnum, cl in enumerate(cl_dfs):
x, y = emap(cl.LON.values, cl.LAT.values)
emap.plot(x, y, '.', color=self.clustColors[clnum])
def _plot_profile_view(self, zmin, zmax, zscale, fig_lat, fig_lon, cl_df,
sing_df, emap):
"""
plot the profile view
"""
x_sing, y_sing = emap(sing_df.LON.values, sing_df.LAT.values)
# plot singletons
nccolor = self.nonClustColor
plt.figure(fig_lon.number)
plt.plot(x_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)
plt.xlabel('Longitude')
plt.figure(fig_lat.number)
plt.plot(y_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)
plt.xlabel('Latitude')
# plot clusters
for clnum, cl in enumerate(cl_df):
ccolor = self.clustColors[clnum]
x, y = emap(cl.LON.values, cl.LAT.values)
plt.figure(fig_lon.number)
plt.plot(x, cl.DEPTH * zscale, '.', color=ccolor)
plt.figure(fig_lat.number)
plt.plot(y, cl.DEPTH * zscale, '.', color=ccolor)
# set buffers so nothing plots right on edge
for fig in [fig_lat, fig_lon]:
plt.figure(fig.number)
xlim = plt.xlim()
xdist = abs(max(xlim) - min(xlim))
plt.xlim(xlim[0] - xdist * .1, xlim[1] + xdist * .1)
ylim = plt.ylim()
ydist = abs(max(xlim) - min(xlim))
plt.ylim(ylim[0] - ydist * .1, ylim[1] + ydist * .1)
def simMatrix(self, groupClusts=False, savename=False, returnMat=False,
**kwargs):
"""
Function to create basic similarity matrix of the values
in the cluster object
Parameters
-------
groupClusts : boolean
If True order by clusters on the simmatrix with the
singletons coming last
savename : str or False
If not False, a path used by plt.savefig to save the current
figure. The extension is necesary for specifying format. See
plt.savefig for details
returnMat : boolean
If true return the similarity matrix
"""
if groupClusts: # if grouping clusters together
clusts = copy.deepcopy(self.clusts) # get cluster list
clusts.append(self.singles) # add singles list at end
eveOrder = list(itertools.chain.from_iterable(clusts))
indmask = {
num: list(self.key).index(eve) for num,
eve in enumerate(eveOrder)} # create a mask forthe order
else:
# blank index mask if not
indmask = {x: x for x in range(len(self.key))}
plt.figure()
le = self.DFcc.columns.values.max()
mat = np.zeros((le + 1, le + 1))
# deb([le,indmask,self.DFcc])
for a in range(le + 1):
for b in range(le + 1):
if a == b:
mat[a, b] = 1
else:
# new a and b coords based on mask
a1, b1 = indmask[a], indmask[b]
gi = max(a1, b1)
li = min(a1, b1)
mat[a, b] = self.DFcc.loc[li, gi]
mat[b, a] = self.DFcc.loc[li, gi]
cmap = mpl.colors.LinearSegmentedColormap.from_list(
'my_colormap', ['blue', 'red'], 256)
img = plt.imshow(
mat,
interpolation='nearest',
cmap=cmap,
origin='upper',
vmin=0,
vmax=1)
plt.clim(0, 1)
plt.grid(True, color='white')
plt.colorbar(img, cmap=cmap)
plt.title(self.station)
if savename:
plt.savefig(savename, **kwargs)
if returnMat:
return mat
def write(self): # uses pickle to write class to disk
cPickle.dump(self, open(self.filename, 'wb'))
def printAtr(self): # print out basic attributes used to make cluster
print('%s Cluster' % self.station)
print('%d Events cluster out of %d' %
(self.clustcount, len(self.singles) + self.clustcount))
print('Total number of clusters = %d' % len(self.clusts))
print('Required Cross Correlation Coeficient = %.3f' % self.ccReq)
def __getitem__(self, index): # allow indexing
return self.clusts[index]
def __iter__(self): # make class iterable
return iter(self.clusts)
def __len__(self):
return len(self.clusts)
# def __repr__(self):
# self.printAtr()
# return ''
class SubSpace(object):
""" Class used to hold subspaces for detector
Holds both subspaces (as defined from the SScluster object) and
single event clusters, or singles
"""
def __init__(self, singlesDict, subSpaceDict, cl, dtype, Pf, cfetcher):
self.cfetcher = cfetcher
self.clusters = cl
self.subspaces = subSpaceDict
self.singles = singlesDict
self.singletons = singlesDict
self.dtype = dtype
self.Pf = Pf
self.ssStations = self.subspaces.keys()
self.singStations = self.singles.keys()
self.Stations = list(set(self.ssStations) | set(self.singStations))
self.Stations.sort()
self._stakey2 = {x: x for x in self.ssStations}
self._stakey1 = {x.split('.')[1]: x for x in self.ssStations}
################################ Validate Cluster functions
def validateClusters(self):
"""
Method to check for misaligned waveforms and discard those that no
longer meet the required correlation coeficient for each cluster.
See Issue 25 (www.github.com/d-chambers/detex) for why this might
be useful.
"""
msg = 'Validating aligned (and trimmed) waveforms in each cluster'
detex.log(__name__, msg, level='info', pri=True)
for sta in self.subspaces.keys():
subs = self.subspaces[sta]
c = self.clusters[sta]
ccreq = c.ccReq
for clustNum, row in subs.iterrows():
stKeys = row.SampleTrims.keys()
# get trim times if defined
if 'Starttime' in stKeys and 'Endtime' in stKeys:
start = row.SampleTrims['Starttime']
stop = row.SampleTrims['Endtime']
else:
start = 0
stop = -1
for ev1num, ev1 in enumerate(row.Events[:-1]):
ccs = [] # blank list for storing ccs of aligned WFs
for ev2 in row.Events[ev1num + 1:]:
t = row.AlignedTD[ev1][start: stop]
s = row.AlignedTD[ev2][start: stop]
maxcc = detex.construct.fast_normcorr(t, s)
ccs.append(maxcc)
if len(ccs) > 0 and max(ccs) < ccreq:
msg = (('%s fails validation check or is ill-aligned '
'on station %s, removing') % (ev1, row.Station))
detex.log(__name__, msg, pri=True)
self._removeEvent(sta, ev1, clustNum)
msg = 'Finished validateCluster call'
detex.log(__name__, msg, level='info', pri=True)
def _removeEvent(self, sta, event, clustNum):
"""
Function to remove an event from a SubSpace instance
"""
# remove from eventList
srow = self.subspaces[sta].loc[clustNum]
srow.Events.remove(event)
srow.AlignedTD.pop(event, None)
################################ SVD Functions
def SVD(self, selectCriteria=2, selectValue=0.9, conDatNum=100,
threshold=None, normalize=False, useSingles=True,
validateWaveforms=True, backupThreshold=None, **kwargs):
"""
Function to perform SVD on the alligned waveforms and select which
of the SVD basis are to be used in event detection. Also assigns
a detection threshold to each subspace-station pair.
Parameters
----------------
selctionCriteria : int, selectValue : number
selectCriteria is the method for selecting which basis vectors
will be used as detectors. selectValue depends on selectCriteria
Valid options are:
0 - using the given Pf, find number of dimensions to maximize
detection probability !!! NOT YET IMPLIMENTED!!!
selectValue - Not used
(Need to find a way to use the doubly-non central F
distribution in python)
1 - Failed implementation, not supported
2 - select basis number based on an average fractional signal
energy captured (see Figure 8 of Harris 2006). Then calculate
an empirical distribution of the detection statistic by running
each subspace over random continuous data with no high amplitude
signals (see getFAS method). A beta distribution is then fit to
the data and the DS value that sets the probability of false
detection to the Pf defined in the subspace instance is selected
as the threshold.
selectValue - Average fractional energy captured,
can range from 0 (use no basis vectors) to 1
(use all basis vectors). A value between 0.75 and 0.95
is recommended.
3 - select basis number based on an average fractional signal
energy captured (see Figure 8 of Harris 2006).
Then set detection threshold to a percentage of the minimum
fractional energy captured. This method is a bit quick and dirty
but ensures all events in the waveform pool will be detected.
select value is a fraction representing the fraction of
the minum fractional energy captured (between 0 and 1).
4 - use a user defined number of basis vectors, beginning with the
most significant (Barrett and Beroza 2014 use first two basis
vectors as an "empirical" subspace detector). Then use the same
technique in method one to set threshold
selectValue - can range from 0 to number of events in
subspace, if selectValue is greater than number of events
all events are used
conDatNum : int
The number of continuous data chunks to use to estimate the
effective dimension of the signal space or to estimate the null
distribution. Used if selectCriteria == 1,2,4
threshold : float or None
Used to set each subspace at a user defined threshold. If any
value is set it overrides any of the previously defined methods and
avoids estimating the effective dimension of representation or
distribution of the null space. Can be useful if problems arise
in the false alarm statistic calculation
normalize : bool
If true normalize the amplitude of all the training events before
preforming the SVD. Keeps higher amplitude events from dominating
the SVD vectors but can over emphasize noise. Haris 2006 recomends
using normalization but the personal experience of the author has
found normalization can increase the detector's propensity to
return false detections.
useSingles : bool
If True also calculate the thresholds for singles
validateWaveforms : bool
If True call the validateClusters method before the performing SVD
to make sure each trimed aligned waveform still meets the
required correlation coeficient. Any waveforms that do not will
be discarded.
backupThreshold : None or float
A backup threshold to use if approximation fails. Typically,
using the default detex settings, a reasonable value would be
0.25
kwargs are passed to the getFAS call (if used)
"""
# make sure user defined options are kosher
self._checkSelection(selectCriteria, selectValue, threshold)
# Iterate through all subspaces defined by stations
for station in self.ssStations:
for ind, row in self.subspaces[station].iterrows():
self.subspaces[station].UsedSVDKeys[ind] = []
svdDict = {} # initialize dict to put SVD vectors in
keys = sorted(row.Events)
arr, basisLength = self._trimGroups(ind, row, keys, station)
if basisLength == 0:
msg = (('subspace %d on %s is failing alignment and '
'trimming, deleting it') % (ind, station))
detex.log(__name__, msg, level='warn')
self._drop_subspace(station, ind)
continue
if normalize:
arr = np.array([x / np.linalg.norm(x) for x in arr])
tparr = np.transpose(arr)
# perform SVD
U, s, Vh = scipy.linalg.svd(tparr, full_matrices=False)
# make dict with sing. value as key and sing. vector as value
for einum, eival in enumerate(s):
svdDict[eival] = U[:, einum]
# asign Parameters back to subspace dataframes
self.subspaces[station].SVD[ind] = svdDict # assign SVD
fracEnergy = self._getFracEnergy(ind, row, svdDict, U)
usedBasis = self._getUsedBasis(ind, row, svdDict, fracEnergy,
selectCriteria, selectValue)
# Add fracEnergy and SVD keys (sing. vals) to main DataFrames
self.subspaces[station].FracEnergy[ind] = fracEnergy
self.subspaces[station].UsedSVDKeys[ind] = usedBasis
self.subspaces[station].SVDdefined[ind] = True
numBas = len(self.subspaces[station].UsedSVDKeys[ind])
self.subspaces[station].NumBasis[ind] = numBas
if len(self.ssStations) > 0:
self._setThresholds(selectCriteria, selectValue, conDatNum,
threshold, basisLength, backupThreshold, kwargs)
if len(self.singStations) > 0 and useSingles:
self.setSinglesThresholds(conDatNum=conDatNum, threshold=threshold,
backupThreshold=backupThreshold,
kwargs=kwargs)
def _drop_subspace(self, station, ssnum):
"""
Drop a subspace that is misbehaving
"""
space = self.subspaces[station]
self.subspaces[station] = space[space.index != int(ssnum)]
def _trimGroups(self, ind, row, keys, station):
"""
function to get trimed subspaces if trim times are defined, and
return an array of the aligned waveforms for the SVD to act on
"""
stkeys = row.SampleTrims.keys()
aliTD = row.AlignedTD
if 'Starttime' in stkeys and 'Endtime' in stkeys:
stim = row.SampleTrims['Starttime']
etim = row.SampleTrims['Endtime']
if stim < 0: # make sure stim is not less than 0
stim = 0
Arr = np.vstack([aliTD[x][stim:etim] -
np.mean(aliTD[x][stim:etim]) for x in keys])
basisLength = Arr.shape[1]
else:
msg = ('No trim times for %s and station %s, try running '
'pickTimes or attachPickTimes' % (row.Name, station))
detex.log(__name__, msg, level='warn', pri=True)
Arr = np.vstack([aliTD[x] - np.mean(aliTD[x]) for x in keys])
basisLength = Arr.shape[1]
return Arr, basisLength
def _checkSelection(self, selectCriteria, selectValue, threshold):
"""
Make sure all user defined values are kosher for SVD call
"""
if selectCriteria in [1, 2, 3]:
if selectValue > 1 or selectValue < 0:
msg = ('When selectCriteria==%d selectValue must be a float'
' between 0 and 1' % selectCriteria)
detex.log(__name__, msg, level='error', e=ValueError)
elif selectCriteria == 4:
if selectValue < 0 or not isinstance(selectValue, int):
msg = ('When selectCriteria==3 selectValue must be an'
'integer greater than 0')
detex.log(__name__, msg, level='error', e=ValueError)
else:
msg = 'selectCriteria of %s is not supported' % selectCriteria
detex.log(__name__, msg, level='error')
if threshold is not None:
if not isinstance(threshold, numbers.Number) or threshold < 0:
msg = 'Unsupported type for threshold, must be None or float'
detex.log(__name__, msg, level='error', e=ValueError)
def _getFracEnergy(self, ind, row, svdDict, U):
"""
calculates the % energy capture for each stubspace for each possible
dimension of rep. (up to # of events that go into the subspace)
"""
fracDict = {}
keys = row.Events
svales = svdDict.keys()
svales.sort(reverse=True)
stkeys = row.SampleTrims.keys() # dict defining sample trims
for key in keys:
aliTD = row.AlignedTD[key] # aligned waveform for event key
if 'Starttime' in stkeys and 'Endtime' in stkeys:
start = row.SampleTrims['Starttime'] # start of trim in samps
end = row.SampleTrims['Endtime'] # end of trim in samps
aliwf = aliTD[start: end]
else:
aliwf = aliTD
Ut = np.transpose(U) # transpose of basis vects
# normalized dot product (mat. mult.)
normUtAliwf = scipy.dot(Ut, aliwf) / scipy.linalg.norm(aliwf)
# add 0% energy capture for dim of 0
repvect = np.insert(np.square(normUtAliwf), 0, 0)
# cumul. energy captured for increasing dim. reps
cumrepvect = [np.sum(repvect[:x + 1]) for x in range(len(repvect))]
fracDict[key] = cumrepvect # add cumul. to keys
# get average and min energy capture, append value to dict
fracDict['Average'] = np.average([fracDict[x] for x in keys], axis=0)
fracDict['Minimum'] = np.min([fracDict[x] for x in keys], axis=0)
return (fracDict)
def _getUsedBasis(self, ind, row, svdDict, cumFracEnergy,
selectCriteria, selectValue):
"""
function to populate the keys of the selected SVD basis vectors
"""
keys = svdDict.keys()
keys.sort(reverse=True)
if selectCriteria in [1, 2, 3]:
# make sure last element is exactly 1
cumFracEnergy['Average'][-1] = 1.00
ndim = np.argmax(cumFracEnergy['Average'] >= selectValue)
selKeys = keys[:ndim] # selected keys
if selectCriteria == 4:
selKeys = keys[:selectValue + 1]
return selKeys
def _setThresholds(self, selectCriteria, selectValue, conDatNum,
threshold, basisLength, backupThreshold, kwargs={}):
if threshold > 0:
for station in self.ssStations:
subspa = self.subspaces[station]
for ind, row in subspa.iterrows():
self.subspaces[station].Threshold[ind] = threshold
elif selectCriteria == 1:
msg = 'selectCriteria 1 currently not supported'
detex.log(__name__, msg, level='error', e=ValueError)
elif selectCriteria in [2, 4]:
# call getFAS to estimate null space dist.
self.getFAS(conDatNum, **kwargs)
for station in self.ssStations:
subspa = self.subspaces[station]
for ind, row in subspa.iterrows():
beta_a, beta_b = row.FAS['betadist'][0:2]
# get threshold from beta dist.
# TODO consider implementing other dist. options as well
th = scipy.stats.beta.isf(self.Pf, beta_a, beta_b, 0, 1)
if th > .9:
th, Pftemp = self._approxThld(beta_a, beta_b, station,
row, self.Pf, 1000, 3,
backupThreshold)
msg = ('Scipy.stats.beta.isf failed with pf=%e, '
'approximated threshold to %f with a Pf of %e '
'for station %s %s using forward grid search' %
(self.Pf, th, Pftemp, station, row.Name))
detex.log(__name__, msg, level='warning')
self.subspaces[station].Threshold[ind] = th
elif selectCriteria == 3:
for station in self.ssStations:
subspa = self.subspaces[station]
for ind, row in subspa.iterrows():
th = row.FracEnergy['Minimum'][row.NumBasis] * selectValue
self.subspaces[station].Threshold[ind] = th
def setSinglesThresholds(self, conDatNum=50, recalc=False,
threshold=None, backupThreshold=None, **kwargs):
"""
Set thresholds for the singletons (unclustered events) by fitting
a beta distribution to estimation of null space
Parameters
----------
condatNum : int
The number of continuous data chunks to use to fit PDF
recalc : boolean
If true recalculate the the False Alarm Statistics
threshold : None or float between 0 and 1
If number, don't call getFAS simply use given threshold
backupThreshold : None or float
If approximate a threshold fails then use backupThreshold. If None
then raise.
Note
----------
Any singles without pick times will not be used. In this way singles
can be rejected
"""
for sta in self.singStations:
sing = self.singles[sta] # singles on station
sampTrims = self.singles[sta].SampleTrims
self.singles[sta].Name = ['SG%d' % x for x in range(len(sing))]
# get singles that have phase picks
singsAccepted = sing[[len(x.keys()) > 0 for x in sampTrims]]
self.singles[sta] = singsAccepted
self.singles[sta].reset_index(inplace=True, drop=True)
if threshold is None:
# get empirical dist unless manual threshold is passed
self.getFAS(conDatNum, useSingles=True,
useSubSpaces=False, **kwargs)
for sta in self.singStations:
for ind, row in self.singles[sta].iterrows():
if len(row.SampleTrims.keys()) < 1: # skip singles with no pick times
continue
if threshold:
th = threshold
else:
beta_a, beta_b = row.FAS[0]['betadist'][0:2]
th = scipy.stats.beta.isf(self.Pf, beta_a, beta_b, 0, 1)
if th > .9:
th, Pftemp = self._approxThld(beta_a, beta_b, sta,
row, self.Pf, 1000, 3,
backupThreshold)
msg = ('Scipy.stats.beta.isf failed with pf=%e, '
'approximated threshold to %f with a Pf of %e '
'for station %s %s using forward grid search' %
(self.Pf, th, Pftemp, sta, row.Name))
detex.log(__name__, msg, level='warning')
self.singles[sta]['Threshold'][ind] = th
def _approxThld(self, beta_a, beta_b, sta, row, target, numint, numloops,
backupThreshold):
"""
Because scipy.stats.beta.isf can break, if it returns a value near 1
when this is obviously wrong initialize grid search algorithm to get
close to desired threshold using forward problem which seems to work
where inverse fails See this bug report:
https://github.com/scipy/scipy/issues/4677
"""
startVal, stopVal = 0, 1
loops = 0
while loops < numloops:
Xs = np.linspace(startVal, stopVal, numint)
pfs = np.array([scipy.stats.beta.sf(x, beta_a, beta_b) for x in Xs])
resids = abs(pfs - target)
minind = resids.argmin()
if minind == 0 or minind == numint - 1:
msg1 = (('Grid search for threshold failing for %s on %s, '
'set it manually or use default') % (sta, row.name))
msg2 = (('Grid search for threshold failing for %s on %s, '
'using backup %.2f') % (sta, row.name, backupThreshold))
if backupThreshold is None:
detex.log(__name__, msg1, level='error', e=ValueError)
else:
detex.log(__name__, msg2, level='warn', pri=True)
return backupThreshold, target
bestPf = pfs[minind]
bestX = Xs[minind]
startVal, stopVal = Xs[minind - 1], Xs[minind + 1]
loops += 1
return bestX, bestPf
########################### Visualization Methods
def plotThresholds(self, conDatNum, xlim=[-.01, .5], **kwargs):
"""
Function to sample the continuous data and plot the thresholds
calculated with the SVD call with a histogram of detex's best
estimate of the null space (see getFAS for more details)
Parameters
------
conDatNum : int
The number of continuous data chunks to use in the sampling,
duration of chunks defined in data fetcher
xlim : list (number, number)
The x limits on the plot (often it is useful to zoom in around 0)
**kwargs are passed to the getFAS call
"""
self.getFAS(conDatNum, **kwargs)
count = 0
for station in self.ssStations:
for ind, row in self.subspaces[station].iterrows():
beta_a, beta_b = row.FAS['betadist'][0:2]
plt.figure(count)
plt.subplot(2, 1, 1)
bins = np.mean(
[row.FAS['bins'][1:], row.FAS['bins'][:-1]], axis=0)
plt.plot(bins, row.FAS['hist'])
plt.title('Station %s %s' % (station, row.Name))
plt.axvline(row.Threshold, color='g')
beta = scipy.stats.beta.pdf(bins, beta_a, beta_b)
plt.plot(bins, beta * (max(row.FAS['hist']) / max(beta)), 'k')
plt.title('%s station %s' % (row.Name, row.Station))
plt.xlim(xlim)
plt.ylabel('Count')
plt.subplot(2, 1, 2)
bins = np.mean(
[row.FAS['bins'][1:], row.FAS['bins'][:-1]], axis=0)
plt.plot(bins, row.FAS['hist'])
plt.axvline(row.Threshold, color='g')
plt.plot(bins, beta * (max(row.FAS['hist']) / max(beta)), 'k')
plt.xlabel('Detection Statistic')
plt.ylabel('Count')
plt.semilogy()
plt.ylim(ymin=10 ** -1)
plt.xlim(xlim)
count += 1
def plotFracEnergy(self):
"""
Method to plot the fractional energy captured of by the subspace for
various dimensions of rep. Each event is plotted as a grey dotted
line, the average as a red solid line, and the chosen degree of rep.
is plotted as a solid green vertical line.
Similar to Harris 2006 Fig 8
"""
for a, station in enumerate(self.ssStations):
f = plt.figure(a + 1)
f.set_figheight(1.85 * len(self.subspaces[station]))
for ind, row in self.subspaces[station].iterrows():
if not isinstance(row.FracEnergy, dict):
msg = 'fractional energy not defiend, call SVD'
detex.log(__name__, msg, level='error')
plt.subplot(len(self.subspaces[station]), 1, ind + 1)
for event in row.Events:
plt.plot(row.FracEnergy[event], '--', color='0.6')
plt.plot(row.FracEnergy['Average'], 'r')
plt.axvline(row.NumBasis, 0, 1, color='g')
plt.ylim([0, 1.1])
plt.title('Station %s, %s' % (row.Station, row.Name))
f.subplots_adjust(hspace=.4)
f.text(0.5, 0.06, 'Dimension of Representation', ha='center')
f.text(0.04, 0.5, 'Fraction of Energy Captured',
va='center', rotation='vertical')
plt.show()
def plotAlignedEvents(self): # plot aligned subspaces in SubSpaces object
"""
Plots the aligned events for each station in each cluster.
Will trim waveforms if trim times (by pickTimes or attachPickTimes)
are defined.
"""
for a, station in enumerate(self.ssStations):
for ind, row in self.subspaces[station].iterrows():
plt.figure(figsize=[10, .9 * len(row.Events)])
# f.set_figheight(1.85 * len(row.Events))
# plt.subplot(len(self.subspaces[station]), 1, ind + 1)
events = row.Events
stKeys = row.SampleTrims.keys() # sample trim keys
for evenum, eve in enumerate(events):
# plt.subplot(len(self.subspaces[station]), 1, evenum + 1)
aliTD = row.AlignedTD[eve] # aligned wf for event eve
if 'Starttime' in stKeys and 'Endtime' in stKeys:
start = row.SampleTrims['Starttime']
stop = row.SampleTrims['Endtime']
aliwf = aliTD[start: stop]
else:
aliwf = row.AlignedTD[eve]
plt.plot(aliwf / (2 * max(aliwf)) + 1.5 * evenum, c='k')
plt.xlim([0, len(aliwf)])
plt.ylim(-1, 1.5 * evenum + 1)
plt.xticks([])
plt.yticks([])
plt.title('Station %s, %s, %d events' % (station, row.Name, len(events)))
plt.show()
def plotBasisVectors(self, onlyused=False):
"""
Plots the basis vectors selected after performing the SVD
If SVD has not been called will throw error
Parameters
------------
onlyUsed : bool
If true only the selected basis vectors will be plotted. See
SVD for how detex selects basis vectors.
If false all will be plotted (used in blue, unused in red)
"""
if not self.subspaces.values()[0].iloc[0].SVDdefined:
msg = 'SVD not performed, call SVD before plotting basis vectors'
detex.log(__name__, msg, level='error')
for subnum, station in enumerate(self.ssStations):
subsp = self.subspaces[station]
for ind, row in subsp.iterrows():
num_wfs = len(row.UsedSVDKeys) if onlyused else len(row.SVD)
keyz = row.SVD.keys()
keyz.sort(reverse=True)
keyz = keyz[:num_wfs]
plt.figure(figsize=[10, .9 * num_wfs])
for keynum, key in enumerate(keyz):
wf = row.SVD[key] / (2 * max(row.SVD[key])) - 1.5 * keynum
c = 'b' if keynum < len(row.UsedSVDKeys) else '.5'
plt.plot(wf, c=c)
plt.ylim(-1.5 * keynum - 1, 1)
plt.yticks([])
plt.xticks([])
plt.title('%s station %s' % (row.Name, row.Station))
def plotOffsetTimes(self):
"""
Function to loop through each station/subspace pair and make
histograms of offset times
"""
count = 1
for station in self.ssStations:
for ind, row in self.subspaces[station].iterrows():
if len(row.SampleTrims.keys()) < 1:
msg = 'subspaces must be trimmed before plotting offsets'
detex.log(__name__, msg, level='error')
plt.figure(count)
keys = row.Events
offsets = [row.Stats[x]['offset'] for x in keys]
plt.hist(offsets)
plt.title('%s %s' % (row.Station, row.Name))
plt.figure(count + 1)
numEvs = len(row.Events)
ranmin = np.zeros(numEvs)
ranmax = np.zeros(numEvs)
orsamps = np.zeros(numEvs)
for evenum, eve in enumerate(row.Events):
tem = self.clusters.temkey[
self.clusters.temkey.NAME == eve].iloc[0]
condat = row.AlignedTD[
eve] / max(2 * abs(row.AlignedTD[eve])) + evenum + 1
Nc, Sr = row.Stats[eve]['Nc'], row.Stats[
eve]['sampling_rate']
starTime = row.Stats[eve]['starttime']
ortime = obspy.core.UTCDateTime(tem.TIME).timestamp
orsamps[evenum] = row.SampleTrims[
'Starttime'] - (starTime - ortime) * Nc * Sr
plt.plot(condat, 'k')
plt.axvline(row.SampleTrims['Starttime'], c='g')
plt.plot(orsamps[evenum], evenum + 1, 'r*')
ran = row.SampleTrims['Endtime'] - orsamps[evenum]
ranmin[evenum] = orsamps[evenum] - ran * .1
ranmax[evenum] = row.SampleTrims['Endtime'] + ran * .1
plt.xlim(int(min(ranmin)), int(max(ranmax)))
plt.axvline(min(orsamps), c='r')
plt.axvline(max(orsamps), c='r')
count += 2
############################# Pick Times functions
def pickTimes(self, duration=30, traceLimit=15, repick=False,
subspace=True, singles=True):
"""
Calls a modified version of obspyck (https://github.com/megies/obspyck)
, a GUI for picking phases, so user can manually select start times
(trim) of unclustered and clustered events.
Triming down each waveform group to only include event phases,
and not pre and post event noise, will significantly decrease the
runtime for the subspace detection (called with detex method).
Trimming is required for singletons as any singletons without trim
times will not be used as detectors).
Parameters
--------------
duration : real number
the time after the first pick (in seconds) to trim waveforms.
The fact that the streams are multiplexed is taken into account.
If None is passed then the last pick will be used as the end time
for truncating waveforms.
traceLimit : int
Limits the number of traces that will show up to be manually
picked to traceLimit events. Avoids bogging down and/or killing
the GUI with too many events.
repick : boolean
If true repick times that already have sample trim times, else
only pick those that do not.
subspace : boolean
If true pick subspaces
singles : boolean
If true pick singletons
"""
qApp = PyQt4.QtGui.QApplication(sys.argv)
if subspace:
self._pickTimes(self.subspaces, duration, traceLimit,
qApp, repick=repick)
if singles:
self._pickTimes(self.singles, duration, traceLimit, qApp,
issubspace=False, repick=repick)
def _pickTimes(self, trdfDict, duration, traceLimit, qApp,
issubspace=True, repick=False):
"""
Function to initate GUI for picking, called by pickTimes
"""
for sta in trdfDict.keys():
for ind, row in trdfDict[sta].iterrows():
if not row.SampleTrims or repick: # if not picked or repick
# Make a modified obspy stream to pass to streamPick
st = self._makeOpStream(ind, row, traceLimit)
Pks = None # This is needed or it crashes OS X
Pks = detex.streamPick.streamPick(st, ap=qApp)
d1 = {}
for b in Pks._picks:
if b: # if any picks made
d1[b.phase_hint] = b.time.timestamp
if len(d1.keys()) > 0: # if any picks made
# get sample rate and number of chans
sr = row.Stats[row.Events[0]]['sampling_rate']
Nc = row.Stats[row.Events[0]]['Nc']
# get sample divisible by NC to keep traces aligned
fp = int(min(d1.values())) # first picked phase
d1['Starttime'] = fp - fp % Nc
# if duration paramenter is defined (it is usually
# better to leave it defined)
stime = d1['Starttime']
if duration:
etime = stime + int(duration * sr * Nc)
d1['Endtime'] = etime
d1['DurationSeconds'] = duration
else:
etime = int(max(d1.values()))
d1['Endtime'] = etime
dursecs = (etime - stime) / (sr * Nc)
d1['DurationSeconds'] = dursecs
trdfDict[sta].SampleTrims[ind] = d1
for event in row.Events: # update starttimes
sspa = trdfDict[sta]
stimeOld = sspa.Stats[ind][event]['starttime']
# get updated start time
stN = stimeOld + d1['Starttime'] / (Nc * sr)
ot = trdfDict[sta].Stats[ind][event]['origintime']
offset = stN - ot
trdfDict[sta].Stats[ind][event]['starttime'] = stN
trdfDict[sta].Stats[ind][event]['offset'] = offset
if not Pks.KeepGoing:
msg = 'aborting picking, progress saved'
detex.log(__name__, msg, pri=1)
return None
self._updateOffsets()
def _makeOpStream(self, ind, row, traceLimit):
"""
Make an obspy stream of the multiplexed data stored in main detex
DataFrame
"""
st = obspy.core.Stream()
count = 0
if 'AlignedTD' in row: # if this is a subspace
for key in row.Events:
if count < traceLimit:
tr = obspy.core.Trace(data=row.AlignedTD[key])
tr.stats.channel = key
tr.stats.network = row.Name
tr.stats.station = row.Station
st += tr
count += 1
return st
else: # if this is a single event
for key in row.Events:
tr = obspy.core.Trace(data=row.MPtd[key])
tr.stats.channel = key
tr.stats.station = row.Station
st += tr
return st
def _updateOffsets(self):
"""
Calculate offset (predicted origin times), throw out extreme
outliers using median and median scaling
"""
for sta in self.subspaces.keys():
for num, row in self.subspaces[sta].iterrows():
keys = row.Stats.keys()
offsets = [row.Stats[x]['offset'] for x in keys]
self.subspaces[sta].Offsets[
num] = self._getOffsets(np.array(offsets))
for sta in self.singles.keys():
for num, row in self.singles[sta].iterrows():
keys = row.Stats.keys()
offsets = [row.Stats[x]['offset'] for x in keys]
self.singles[sta].Offsets[
num] = self._getOffsets(np.array(offsets))
def attachPickTimes(self, pksFile='PhasePicks.csv',
function='median', defaultDuration=30):
"""
Rather than picking times manually attach a file (either csv or pkl
of pandas dataframe) with pick times. Pick time file must have the
following fields: TimeStamp, Station, Event, Phase.
This file can be created by detex.util.pickPhases. If trims are
already defined attachPickTimes will not override.
----------
pksFile : str
Path to the input file (either csv or pickle)
function : str ('Average','Max', or 'Min')
Describes how to handle selecting a common pick time for
subspace groups (each event in a subspace cannot be treated
independently as the entire group is aligned to maximize
similarity). Does not apply for singles.
mean - Trims the group to the sample corresponding to the
average of the first arriving phase
median - Trims the group to the sample corresponding to the
median of the first arriving phase
max - trim to max value of start times for group
min - trim to min value of end times for group
defaultDuration : int or None
if Int, the default duration (in seconds) to trim the signal to
starting from the first arrival in pksFile for each event or
subspace group. If None, then durations are defined by first
arriving phase (start) and last arriving phase (stop) for each
event
"""
try: # read pksFile
pks = pd.read_csv(pksFile)
except Exception:
try:
pks = pd.read_pickle(pksFile)
except Exception:
msg = ('%s does not exist, or it is not a pkl or csv file'
% pksFile)
detex.log(__name__, msg, level='error')
# get appropriate function according to ssmod
if function == 'mean':
fun = np.mean
elif function == 'max':
fun = np.max
elif function == 'min':
fun = np.min
elif function == 'median':
fun = np.median
else:
msg = ('function %s not supported, options are: mean, median, min,'
' max' % function)
detex.log(__name__, msg, level='error')
# loop through each station in cluster, get singles and subspaces
for cl in self.clusters:
sta = cl.station # current station
# Attach singles
if sta in self.singles.keys():
for ind, row in self.singles[sta].iterrows():
if len(row.SampleTrims.keys()) > 0:
continue # skip if sampletrims already defined
# get phases that apply to current event and station
con1 = pks.Event.isin(row.Events)
con2 = pks.Station == sta
pk = pks[(con1) & (con2)]
eves, starttimes, Nc, Sr = self._getStats(row)
if len(pk) > 0:
trims = self._getSampTrim(eves, starttimes, Nc, Sr, pk,
defaultDuration, fun, sta,
ind, self.singles[sta], row)
if isinstance(trims, dict):
self.singles[sta].SampleTrims[ind] = trims
self._updateOffsets()
# Attach Subspaces
if sta in self.subspaces.keys():
for ind, row in self.subspaces[sta].iterrows():
if len(row.SampleTrims.keys()) > 0:
continue # skip if sampletrims already defined
# phases that apply to current event and station
con1 = pks.Event.isin(row.Events)
con2 = pks.Station == sta
pk = pks[(con1) & (con2)]
eves, starttimes, Nc, Sr = self._getStats(row)
if len(pk) > 0:
trims = self._getSampTrim(eves, starttimes, Nc, Sr, pk,
defaultDuration, fun, sta,
ind, self.subspaces[sta], row)
if isinstance(trims, dict):
self.subspaces[sta].SampleTrims[ind] = trims
self._updateOffsets()
def _getSampTrim(self, eves, starttimes, Nc, Sr, pk, defaultDuration,
fun, sta, num, DF, row):
"""
Determine sample trims for each single or subspace
"""
# stdict={}#intialize sample trim dict
startsamps = []
stopsamps = []
secduration = []
for ev in eves: # loop through each event
p = pk[pk.Event == ev]
if len(p) < 1: # if event is not recorded skip
continue
start = p.TimeStamp.min()
startsampsEve = (start - starttimes[ev]) * (Nc * Sr)
# see if any of the samples would be trimmed too much
try: # assume is single
len_test = len(row.MPtd[ev]) < startsampsEve
except AttributeError: # this is really a subspace
len_test = len(row.AlignedTD[ev]) < startsampsEve
if len_test:
utc_start = obspy.UTCDateTime(start)
msg = (('Start samples for %s on %s exceeds avaliable data,'
'check waveform quality and ensure phase pick is for '
'the correct event. The origin time is %s and the '
'pick time is %s, Skipping attaching pick. '
) % (ev, sta, ev, str(utc_start)))
detex.log(__name__, msg, level='warn')
return
# make sure starting time is not less than 0 else set to zero
if startsampsEve < 0:
startsampsEve = 0
start = starttimes[ev]
msg = 'Start time in phase file < 0 for event %s' % ev
detex.log(__name__, msg, level='warning', pri=False)
if defaultDuration:
stop = start + defaultDuration
secduration.append(defaultDuration)
else:
stop = p.TimeStamp.max()
secduration.append(stop - start)
assert stop > start # Make sure stop is greater than start
assert stop > starttimes[ev]
endsampsEve = (stop - starttimes[ev]) * (Nc * Sr)
startsamps.append(startsampsEve)
stopsamps.append(endsampsEve)
# update stats attached to each event to reflect new start time
otime = DF.Stats[num][ev]['origintime'] # origin time
DF.Stats[num][ev]['Starttime'] = start
DF.Stats[num][ev]['offset'] = start - otime
if len(startsamps) > 0:
sSamps = int(fun(startsamps))
rSSamps = sSamps - sSamps % Nc
eSamps = int(fun(stopsamps))
rESamps = eSamps - eSamps % Nc
dursec = int(fun(secduration))
outdict = {'Starttime': rSSamps, 'Endtime': rESamps,
'DurationSeconds': dursec}
return outdict
else:
return
def _getStats(self, row):
"""
Get the sampling rate, starttime, and number of channels for
each event group
"""
eves = row.Events
sr = [np.round(row.Stats[x]['sampling_rate']) for x in eves]
if len(set(sr)) != 1:
msg = ('Events %s on Station %s have different sampling rates or '
'no sampling rates' % (row.Station, row.events))
detex.log(__name__, msg, level='error')
Nc = [row.Stats[x]['Nc'] for x in eves]
if len(set(Nc)) != 1:
msg = (('Events %s on Station %s do not have the same channels or'
' have no channels') % (row.Station, row.events))
detex.log(__name__, msg, level='error')
starttimes = {x: row.Stats[x]['starttime'] for x in eves}
return eves, starttimes, list(set(Nc))[0], list(set(sr))[0]
def _getOffsets(self, offsets, m=25.):
"""
Get offsets, reject outliers bassed on median values (accounts
for possible mismatch in events and origin times)
"""
if len(offsets) == 1:
return offsets[0], offsets[0], offsets[0]
d = np.abs(offsets - np.median(offsets))
mdev = np.median(d)
s = d / mdev if mdev else 0.
if isinstance(s, float):
offs = offsets
else:
offs = offsets[s < m]
return [np.min(offs), np.median(offs), np.max(offs)]
def getFAS(
self,
conDatNum,
LTATime=5,
STATime=0.5,
staltalimit=8.0,
useSubSpaces=True,
useSingles=False,
numBins=401,
recalc=False,
**kwargs):
"""
Function to initialize a FAS (false alarm statistic) instance, used
primarily for sampling and characterizing the null space of the
subspaces and singletons. Random samples of the continuous data are
loaded, examined for high amplitude signals with a basic STA/LTA
method, and any traces with STA/LTA ratios higher than the
staltalimit parameter are rejected. The continuous DataFetcher
already attached to the SubSpace instance will be used to get
the continuous data.
Parameters
-------------
ConDatNum : int
The number of continuous data files (by default in hour chunks)
to use.
LTATime : float
The long term average time window in seconds used for
checking continuous data
STATime : float
The short term average time window in seconds for checking
continuous data
staltalimit : int or float
The value at which continuous data gets rejected as too noisey
(IE transient signals are present)
useSubSpaces : bool
If True calculate FAS for subspaces
useSingles : bool
If True calculate FAS for singles
numBins : int
Number of bins for binning distributions (so distribution can be
loaded and plotted later)
Note
---------
The results are stored in a DataFrame for each subspace/singleton
under the "FAS" column of the main DataFrame
"""
if useSubSpaces:
self._updateOffsets() # make sure offset times are up to date
for sta in self.subspaces.keys():
# check if FAS already calculated, only recalc if recalc
fas1 = self.subspaces[sta]['FAS'][0]
if isinstance(fas1, dict) and not recalc:
msg = ('FAS for station %s already calculated, to '
'recalculate pass True to the parameter recalc' %
sta)
detex.log(__name__, msg, pri=True)
else:
self.subspaces[sta]['FAS'] = detex.fas._initFAS(
self.subspaces[sta],
conDatNum,
self.clusters,
self.cfetcher,
LTATime=LTATime,
STATime=STATime,
staltalimit=staltalimit,
numBins=numBins,
dtype=self.dtype)
if useSingles:
for sta in self.singles.keys():
for a in range(len(self.singles[sta])):
fas1 = self.singles[sta]['FAS'][a]
if isinstance(fas1, dict) and not recalc:
msg = (('FAS for singleton %d already calculated on '
'station %s, to recalculate pass True to the '
'parameter recalc') % (a, sta))
detex.log(__name__, msg, pri=True)
# skip any events that have not been trimmed
elif len(self.singles[sta]['SampleTrims'][a].keys()) < 1:
continue
else:
self.singles[sta]['FAS'][a] = detex.fas._initFAS(
self.singles[sta][a:a + 1],
conDatNum,
self.clusters,
self.cfetcher,
LTATime=LTATime,
STATime=STATime,
staltalimit=staltalimit,
numBins=numBins,
dtype=self.dtype,
issubspace=False)
def detex(self,
utcStart=None,
utcEnd=None,
subspaceDB='SubSpace.db',
trigCon=0,
triggerLTATime=5,
triggerSTATime=0,
multiprocess=False,
delOldCorrs=True,
calcHist=True,
useSubSpaces=True,
useSingles=False,
estimateMags=True,
classifyEvents=None,
eventCorFile='EventCors',
utcSaves=None,
fillZeros=False):
"""
function to run subspace detection over continuous data and store
results in SQL database subspaceDB
Parameters
------------
utcStart : str or num
An obspy.core.UTCDateTime readable object defining the start time
of the correlations if not all avaliable data are to be used
utcEnd : str num
An obspy.core.UTCDateTime readable object defining the end time
of the correlations
subspaceDB : str
Path to the SQLite database to store detections in. If it already
exists delOldCorrs parameters governs if it will be deleted before
running new detections, or appended to.
trigCon is the condition for which detections should trigger.
Once the condition is set the variable minCoef is used:
0 is based on the detection statistic threshold
1 is based on the STA/LTA of the detection statistic threshold
(Only 0 is currently supported)
triggerLTATime : number
The long term average for the STA/LTA calculations in seconds.
triggerSTATime : number
The short term average for the STA/LTA calculations in seconds.
If ==0 then one sample is used.
multiprocess : bool
Determine if each station should be forked into its own process
for potential speed ups. Currently not implemented.
delOldCorrs : bool
Determines if subspaceDB should be deleted before performing
detections. If False old database is appended to.
calcHist : boolean
If True calculates the histagram for every point of the detection
statistic vectors (all hours, stations and subspaces) by keeping a
a cumulative bin count. Only slows the detections down slightly
and can be useful for threshold sanity checks. The histograms are
then returned to the main DataFrame in the SubSpace instance
as the column histSubSpaces, and saved in the subspaceDB under the
ss_hist and sg_hists tables for subspacs and singletons.
useSubspace : bool
If True the subspaces will be used as detectors to scan
continuous data
useSingles : bool
If True the singles (events that did not cluster) will be used as
detectors to scan continuous data
estimateMags : bool
If True, magnitudes will be estimated for each detection by using
two methods. The first is using standard deviation ratios, and the
second uses projected energy ratios (see chambers et al. 2015 for
details).
classifyEvents : None, str, or DataFrame
If None subspace detectors will be run over continuous data.
Else, detex will be run over event waveforms in order to classify
events into groups bassed on which subspace they are most similar
to. In the latter case the classifyEvents argument must be a
str (path to template key like csv) or DataFrame (loaded template
key file). The same event DataFetcher attached to the cluster
object will be used to get the data. This feature is Experimental.
eventCorFile : str
A path to a new pickled DataFrame created when the eventDir option
is used. Records the highest detection statistic in the file
for each event, station, and subspace. Useful when trying to
characterize events.
utcSaves : None or list of obspy DateTime readable objects
Either none (not used) or an iterrable of objects readable by
obspy.UTCDateTime. When the detections are run if the continous
data cover a time indicated in UTCSaves then the continuous data
and detection statistic vectors,are saved to a pickled dataframe
of the name "UTCsaves.pkl". This can be useful for debugging, or
extracting the DS vector for a time of interest.
fillZeros : bool
If true fill the gaps in continuous data with 0s. If True
STA/LTA of detection statistic cannot be calculated in order to
avoid dividing by 0.
Notes
----------
The same filter and decimation parameters that were used in the
ClusterStream instance will be applied.
"""
# make sure no parameters that dont work yet are selected
if multiprocess or trigCon != 0:
msg = 'multiprocessing and trigcon other than 0 not supported'
detex.log(__name__, msg, level='error')
if os.path.exists(subspaceDB):
if delOldCorrs:
os.remove(subspaceDB)
msg = 'Deleting old subspace database %s' % subspaceDB
detex.log(__name__, msg, pri=True)
else:
msg = 'Not deleting old subspace database %s' % subspaceDB
detex.log(__name__, msg, pri=True)
if useSubSpaces: # run subspaces
TRDF = self.subspaces
# determine if subspaces are defined (ie SVD has been called)
stas = self.subspaces.keys()
sv = [all(TRDF[sta].SVDdefined) for sta in stas]
if not all(sv):
msg = 'call SVD before running subspace detectors'
detex.log(__name__, msg, level='error')
Det = _SSDetex(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters,
subspaceDB, trigCon, triggerLTATime, triggerSTATime,
multiprocess, calcHist, self.dtype, estimateMags,
classifyEvents, eventCorFile, utcSaves, fillZeros)
self.histSubSpaces = Det.hist
if useSingles: # run singletons
# make sure thresholds are calcualted
self.setSinglesThresholds()
TRDF = self.singles
Det = _SSDetex(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters,
subspaceDB, trigCon, triggerLTATime, triggerSTATime,
multiprocess, calcHist, self.dtype, estimateMags,
classifyEvents, eventCorFile, utcSaves, fillZeros,
issubspace=False)
self.histSingles = Det.hist
# save addational info to sql database
if useSubSpaces or useSingles:
cols = ['FREQMIN', 'FREQMAX', 'CORNERS', 'ZEROPHASE']
dffil = pd.DataFrame([self.clusters.filt], columns=cols, index=[0])
detex.util.saveSQLite(dffil, subspaceDB, 'filt_params')
# get general info on each singleton/subspace and save
ssinfo, sginfo = self._getInfoDF()
sshists, sghists = self._getHistograms(useSubSpaces, useSingles)
if useSubSpaces and ssinfo is not None:
# save subspace info
detex.util.saveSQLite(ssinfo, subspaceDB, 'ss_info')
if useSingles and sginfo is not None:
# save singles info
detex.util.saveSQLite(sginfo, subspaceDB, 'sg_info')
if useSubSpaces and sshists is not None:
# save subspace histograms
detex.util.saveSQLite(sshists, subspaceDB, 'ss_hist')
if useSingles and sghists is not None:
# save singles histograms
detex.util.saveSQLite(sghists, subspaceDB, 'sg_hist')
def _getInfoDF(self):
"""
get dataframes that have info about each subspace and single
"""
sslist = [] # list in which to put DFs for each subspace/station pair
sglist = [] # list in which to put DFs for each single/station pair
for sta in self.Stations:
if sta not in self.ssStations:
msg = 'No subspaces on station %s' % sta
detex.log(__name__, msg, pri=True)
continue
for num, ss in self.subspaces[sta].iterrows(): # write ss info
name = ss.Name
station = ss.Station
events = ','.join(ss.Events)
numbasis = ss.NumBasis
thresh = ss.Threshold
if isinstance(ss.FAS, dict) and len(ss.FAS.keys()) > 1:
b1, b2 = ss.FAS['betadist'][0], ss.FAS['betadist'][1]
else:
b1, b2 = np.nan, np.nan
cols = ['Name', 'Sta', 'Events', 'Threshold', 'NumBasisUsed',
'beta1', 'beta2']
dat = [[name, station, events, thresh, numbasis, b1, b2]]
sslist.append(pd.DataFrame(dat, columns=cols))
for sta in self.Stations:
if sta not in self.singStations:
msg = 'No singletons on station %s' % sta
detex.log(__name__, msg, pri=True)
continue
for num, ss in self.singles[sta].iterrows(): # write singles info
name = ss.Name
station = ss.Station
events = ','.join(ss.Events)
thresh = ss.Threshold
if isinstance(ss.FAS, list) and len(ss.FAS[0].keys()) > 1:
b1, b2 = ss.FAS[0]['betadist'][0], ss.FAS[0]['betadist'][1]
else:
b1, b2 = np.nan, np.nan
cols = ['Name', 'Sta', 'Events', 'Threshold', 'beta1', 'beta2']
dat = [[name, station, events, thresh, b1, b2]]
sglist.append(pd.DataFrame(dat, columns=cols))
if len(sslist) > 0:
ssinfo = pd.concat(sslist, ignore_index=True)
else:
ssinfo = None
if len(sglist) > 0:
sginfo = pd.concat(sglist, ignore_index=True)
else:
sginfo = None
return ssinfo, sginfo
def _getHistograms(self, useSubSpaces, useSingles):
"""
Pull out the histogram info for saving to database
"""
cols = ['Name', 'Sta', 'Value']
if useSubSpaces:
bins = json.dumps(self.histSubSpaces['Bins'].tolist())
dat = [['Bins', 'Bins', bins]]
sshists = [pd.DataFrame(dat, columns=cols)]
for sta in self.Stations:
if sta in self.histSubSpaces.keys():
for skey in self.histSubSpaces[sta]:
try:
vl = json.dumps(self.histSubSpaces[sta][skey].tolist())
except AttributeError:
continue
dat = [[skey, sta, vl]]
sshists.append(pd.DataFrame(dat, columns=cols))
sshist = pd.concat(sshists, ignore_index=True)
else:
sshist = None
if useSingles:
bins = json.dumps(self.histSingles['Bins'].tolist())
dat = [['Bins', 'Bins', bins]]
sghists = [pd.DataFrame(dat, columns=cols)]
for sta in self.Stations:
if sta in self.histSingles.keys():
for skey in self.histSingles[sta]:
try:
vl = json.dumps(self.histSingles[sta][skey].tolist())
except AttributeError:
pass
dat = [[skey, sta, vl]]
sghists.append(pd.DataFrame(dat, columns=cols))
sghist = pd.concat(sghists, ignore_index=True)
else:
sghist = None
return sshist, sghist
########################### Python Class Attributes
def __getitem__(self, key): # make object indexable
if isinstance(key, int):
return self.subspaces[self.ssStations[key]]
elif isinstance(key, string_types):
if len(key.split('.')) == 2:
return self.subspaces[self._stakey2[key]]
elif len(key.split('.')) == 1:
return self.subspaces[self._stakey1[key]]
else:
msg = '%s is not a station in this cluster object' % key
detex.log(__name__, msg, level='error')
else:
msg = '%s must either be a int or str of station name' % key
detex.log(__name__, msg, level='error')
def __len__(self):
return len(self.subspaces)
############ MISC
def write(self, filename='subspace.pkl'):
"""
pickle the subspace class
Parameters
-------------
filename : str
Path of the file to be created
"""
cPickle.dump(self, open(filename, 'wb'))
def printOffsets(self):
"""
Function to print out the offset min max and ranges for each
station/subpace pair
"""
for station in self.ssStations:
for num, row in self.subspaces[station].iterrows():
print('%s, %s, min=%3f, max=%3f, range=%3f' %
(row.Station, row.Name, row.Offsets[0], row.Offsets[2],
row.Offsets[2] - row.Offsets[0]))
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"numpy.random.rand",
"obspy.core.Stream",
"matplotlib.pyplot.ylabel",
"pandas.read_csv",
"detex.construct.fast_normcorr",
"obspy.core.UTCDateTime",
"numpy.array",
"detex.util.saveSQLite",
"copy.deepcopy",
"numpy.linalg.norm",
"scipy.dot",
"scipy.stats.beta.isf",
"scipy.cluster.hierarchy.fcluster",
"numpy.arange",
"os.remove",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.mean",
"matplotlib.pyplot.semilogy",
"pandas.read_pickle",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"itertools.chain.from_iterable",
"numpy.linspace",
"obspy.core.Trace",
"matplotlib.pyplot.yticks",
"numpy.min",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"numpy.round",
"PyQt4.QtGui.QApplication",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"numpy.average",
"scipy.stats.beta.sf",
"matplotlib.pyplot.gca",
"obspy.core.util.geodetics.gps2DistAzimuth",
"obspy.UTCDateTime",
"numpy.argmax",
"struct.pack",
"numpy.square",
"matplotlib.pyplot.subplot",
"detex.log",
"numpy.isnan",
"scipy.linalg.svd",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.transpose",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show",
"pandas.Series",
"matplotlib.pyplot.clim",
"numpy.median",
"scipy.cluster.hierarchy.dendrogram",
"matplotlib.pyplot.colorbar",
"colorsys.hls_to_rgb",
"detex.streamPick.streamPick",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"detex.detect._SSDetex",
"scipy.linalg.norm",
"detex.fas._initFAS",
"scipy.stats.beta.pdf",
"pandas.concat",
"matplotlib.pyplot.axvline"
] |
[((10480, 10528), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (10489, 10528), False, 'import detex\n'), ((13698, 13746), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (13707, 13746), False, 'import detex\n'), ((15826, 15886), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (['link[:, 0:4]', '(1 - self.ccReq)'], {'criterion': '"""distance"""'}), "(link[:, 0:4], 1 - self.ccReq, criterion='distance')\n", (15834, 15886), False, 'from scipy.cluster.hierarchy import dendrogram, fcluster\n'), ((15905, 15917), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (15914, 15917), True, 'import pandas as pd\n'), ((16272, 16309), 'pandas.DataFrame', 'pd.DataFrame', (['linkup'], {'columns': 'columns'}), '(linkup, columns=columns)\n', (16284, 16309), True, 'import pandas as pd\n'), ((17818, 17865), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]', '"""-"""'], {'color': 'self.nonClustColor'}), "([], [], '-', color=self.nonClustColor)\n", (17826, 17865), True, 'import matplotlib.pyplot as plt\n'), ((17874, 17999), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['self.link'], {'color_threshold': '(1 - self.ccReq)', 'count_sort': '(True)', 'link_color_func': '(lambda x: color_list[x])'}), '(self.link, color_threshold=1 - self.ccReq, count_sort=True,\n link_color_func=lambda x: color_list[x], **kwargs)\n', (17884, 17999), False, 'from scipy.cluster.hierarchy import dendrogram, fcluster\n'), ((18028, 18037), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (18035, 18037), True, 'import matplotlib.pyplot as plt\n'), ((18449, 18469), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Events"""'], {}), "('Events')\n", (18459, 18469), True, 'import matplotlib.pyplot as plt\n'), ((18478, 18505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dissimilarity"""'], {}), "('Dissimilarity')\n", (18488, 18505), True, 'import matplotlib.pyplot as plt\n'), ((18514, 18537), 'matplotlib.pyplot.title', 'plt.title', (['self.station'], {}), '(self.station)\n', (18523, 18537), True, 'import matplotlib.pyplot as plt\n'), ((20578, 20590), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20588, 20590), True, 'import matplotlib.pyplot as plt\n'), ((22100, 22130), 'numpy.arange', 'np.arange', (['(0.0)', '(80)', '(maxdeg / 4)'], {}), '(0.0, 80, maxdeg / 4)\n', (22109, 22130), True, 'import numpy as np\n'), ((22209, 22243), 'numpy.arange', 'np.arange', (['(10.0)', '(360.0)', '(maxdeg / 4)'], {}), '(10.0, 360.0, maxdeg / 4)\n', (22218, 22243), True, 'import numpy as np\n'), ((22471, 22513), 'matplotlib.pyplot.title', 'plt.title', (["('Clusters on %s' % self.station)"], {}), "('Clusters on %s' % self.station)\n", (22480, 22513), True, 'import matplotlib.pyplot as plt\n'), ((22743, 22755), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22753, 22755), True, 'import matplotlib.pyplot as plt\n'), ((22975, 22989), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (22985, 22989), True, 'import matplotlib.pyplot as plt\n'), ((22998, 23022), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Depth (km)"""'], {}), "('Depth (km)')\n", (23008, 23022), True, 'import matplotlib.pyplot as plt\n'), ((23827, 23853), 'matplotlib.pyplot.figure', 'plt.figure', (['map_fig.number'], {}), '(map_fig.number)\n', (23837, 23853), True, 'import matplotlib.pyplot as plt\n'), ((24500, 24526), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_lon.number'], {}), '(fig_lon.number)\n', (24510, 24526), True, 'import matplotlib.pyplot as plt\n'), ((24535, 24603), 'matplotlib.pyplot.plot', 'plt.plot', (['x_sing', '(sing_df.DEPTH * zscale)', '"""."""'], {'color': 'nccolor', 'ms': '(6.0)'}), "(x_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)\n", (24543, 24603), True, 'import matplotlib.pyplot as plt\n'), ((24612, 24635), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude"""'], {}), "('Longitude')\n", (24622, 24635), True, 'import matplotlib.pyplot as plt\n'), ((24644, 24670), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_lat.number'], {}), '(fig_lat.number)\n', (24654, 24670), True, 'import matplotlib.pyplot as plt\n'), ((24679, 24747), 'matplotlib.pyplot.plot', 'plt.plot', (['y_sing', '(sing_df.DEPTH * zscale)', '"""."""'], {'color': 'nccolor', 'ms': '(6.0)'}), "(y_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)\n", (24687, 24747), True, 'import matplotlib.pyplot as plt\n'), ((24756, 24778), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Latitude"""'], {}), "('Latitude')\n", (24766, 24778), True, 'import matplotlib.pyplot as plt\n'), ((26810, 26822), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26820, 26822), True, 'import matplotlib.pyplot as plt\n'), ((26881, 26907), 'numpy.zeros', 'np.zeros', (['(le + 1, le + 1)'], {}), '((le + 1, le + 1))\n', (26889, 26907), True, 'import numpy as np\n'), ((27402, 27487), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""my_colormap"""', "['blue', 'red']", '(256)'], {}), "('my_colormap', ['blue', 'red'],\n 256)\n", (27446, 27487), True, 'import matplotlib as mpl\n'), ((27511, 27598), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mat'], {'interpolation': '"""nearest"""', 'cmap': 'cmap', 'origin': '"""upper"""', 'vmin': '(0)', 'vmax': '(1)'}), "(mat, interpolation='nearest', cmap=cmap, origin='upper', vmin=0,\n vmax=1)\n", (27521, 27598), True, 'import matplotlib.pyplot as plt\n'), ((27676, 27690), 'matplotlib.pyplot.clim', 'plt.clim', (['(0)', '(1)'], {}), '(0, 1)\n', (27684, 27690), True, 'import matplotlib.pyplot as plt\n'), ((27699, 27728), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'color': '"""white"""'}), "(True, color='white')\n", (27707, 27728), True, 'import matplotlib.pyplot as plt\n'), ((27737, 27765), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['img'], {'cmap': 'cmap'}), '(img, cmap=cmap)\n', (27749, 27765), True, 'import matplotlib.pyplot as plt\n'), ((27774, 27797), 'matplotlib.pyplot.title', 'plt.title', (['self.station'], {}), '(self.station)\n', (27783, 27797), True, 'import matplotlib.pyplot as plt\n'), ((29954, 30002), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (29963, 30002), False, 'import detex\n'), ((31364, 31412), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (31373, 31412), False, 'import detex\n'), ((42719, 42766), 'numpy.average', 'np.average', (['[fracDict[x] for x in keys]'], {'axis': '(0)'}), '([fracDict[x] for x in keys], axis=0)\n', (42729, 42766), True, 'import numpy as np\n'), ((42797, 42840), 'numpy.min', 'np.min', (['[fracDict[x] for x in keys]'], {'axis': '(0)'}), '([fracDict[x] for x in keys], axis=0)\n', (42803, 42840), True, 'import numpy as np\n'), ((60197, 60231), 'PyQt4.QtGui.QApplication', 'PyQt4.QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (60221, 60231), False, 'import PyQt4\n'), ((63545, 63564), 'obspy.core.Stream', 'obspy.core.Stream', ([], {}), '()\n', (63562, 63564), False, 'import obspy\n'), ((73810, 73822), 'numpy.median', 'np.median', (['d'], {}), '(d)\n', (73819, 73822), True, 'import numpy as np\n'), ((83483, 83509), 'os.path.exists', 'os.path.exists', (['subspaceDB'], {}), '(subspaceDB)\n', (83497, 83509), False, 'import os\n'), ((2814, 2853), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (2823, 2853), False, 'import detex\n'), ((12258, 12297), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (12267, 12297), False, 'import detex\n'), ((14274, 14314), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(360.0 / numClusts)'], {}), '(0.0, 360.0, 360.0 / numClusts)\n', (14283, 14314), True, 'import numpy as np\n'), ((16470, 16518), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (16479, 16518), False, 'import detex\n'), ((17761, 17809), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]', '"""-"""'], {'color': 'self.clustColors[a]'}), "([], [], '-', color=self.clustColors[a])\n", (17769, 17809), True, 'import matplotlib.pyplot as plt\n'), ((18571, 18602), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveName'], {}), '(saveName, **kwargs)\n', (18582, 18602), True, 'import matplotlib.pyplot as plt\n'), ((18632, 18642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18640, 18642), True, 'import matplotlib.pyplot as plt\n'), ((22900, 22923), 'numpy.linspace', 'np.linspace', (['z1', 'z2', '(10)'], {}), '(z1, z2, 10)\n', (22911, 22923), True, 'import numpy as np\n'), ((24957, 24983), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_lon.number'], {}), '(fig_lon.number)\n', (24967, 24983), True, 'import matplotlib.pyplot as plt\n'), ((24996, 25045), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(cl.DEPTH * zscale)', '"""."""'], {'color': 'ccolor'}), "(x, cl.DEPTH * zscale, '.', color=ccolor)\n", (25004, 25045), True, 'import matplotlib.pyplot as plt\n'), ((25058, 25084), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_lat.number'], {}), '(fig_lat.number)\n', (25068, 25084), True, 'import matplotlib.pyplot as plt\n'), ((25097, 25146), 'matplotlib.pyplot.plot', 'plt.plot', (['y', '(cl.DEPTH * zscale)', '"""."""'], {'color': 'ccolor'}), "(y, cl.DEPTH * zscale, '.', color=ccolor)\n", (25105, 25146), True, 'import matplotlib.pyplot as plt\n'), ((25251, 25273), 'matplotlib.pyplot.figure', 'plt.figure', (['fig.number'], {}), '(fig.number)\n', (25261, 25273), True, 'import matplotlib.pyplot as plt\n'), ((25293, 25303), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (25301, 25303), True, 'import matplotlib.pyplot as plt\n'), ((25363, 25417), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xlim[0] - xdist * 0.1)', '(xlim[1] + xdist * 0.1)'], {}), '(xlim[0] - xdist * 0.1, xlim[1] + xdist * 0.1)\n', (25371, 25417), True, 'import matplotlib.pyplot as plt\n'), ((25435, 25445), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (25443, 25445), True, 'import matplotlib.pyplot as plt\n'), ((25505, 25559), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ylim[0] - ydist * 0.1)', '(ylim[1] + ydist * 0.1)'], {}), '(ylim[0] - ydist * 0.1, ylim[1] + ydist * 0.1)\n', (25513, 25559), True, 'import matplotlib.pyplot as plt\n'), ((26321, 26347), 'copy.deepcopy', 'copy.deepcopy', (['self.clusts'], {}), '(self.clusts)\n', (26334, 26347), False, 'import copy\n'), ((27831, 27862), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savename'], {}), '(savename, **kwargs)\n', (27842, 27862), True, 'import matplotlib.pyplot as plt\n'), ((39965, 40013), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warn"""', 'pri': '(True)'}), "(__name__, msg, level='warn', pri=True)\n", (39974, 40013), False, 'import detex\n'), ((42139, 42154), 'numpy.transpose', 'np.transpose', (['U'], {}), '(U)\n', (42151, 42154), True, 'import numpy as np\n'), ((43298, 43348), 'numpy.argmax', 'np.argmax', (["(cumFracEnergy['Average'] >= selectValue)"], {}), "(cumFracEnergy['Average'] >= selectValue)\n", (43307, 43348), True, 'import numpy as np\n'), ((48899, 48937), 'numpy.linspace', 'np.linspace', (['startVal', 'stopVal', 'numint'], {}), '(startVal, stopVal, numint)\n', (48910, 48937), True, 'import numpy as np\n'), ((52446, 52463), 'matplotlib.pyplot.figure', 'plt.figure', (['(a + 1)'], {}), '(a + 1)\n', (52456, 52463), True, 'import matplotlib.pyplot as plt\n'), ((53422, 53432), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (53430, 53432), True, 'import matplotlib.pyplot as plt\n'), ((55597, 55636), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (55606, 55636), False, 'import detex\n'), ((66776, 66796), 'pandas.read_csv', 'pd.read_csv', (['pksFile'], {}), '(pksFile)\n', (66787, 66796), True, 'import pandas as pd\n'), ((72767, 72806), 'numpy.round', 'np.round', (["row.Stats[x]['sampling_rate']"], {}), "(row.Stats[x]['sampling_rate'])\n", (72775, 72806), True, 'import numpy as np\n'), ((73011, 73050), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (73020, 73050), False, 'import detex\n'), ((73290, 73329), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (73299, 73329), False, 'import detex\n'), ((73984, 73996), 'numpy.min', 'np.min', (['offs'], {}), '(offs)\n', (73990, 73996), True, 'import numpy as np\n'), ((73998, 74013), 'numpy.median', 'np.median', (['offs'], {}), '(offs)\n', (74007, 74013), True, 'import numpy as np\n'), ((74015, 74027), 'numpy.max', 'np.max', (['offs'], {}), '(offs)\n', (74021, 74027), True, 'import numpy as np\n'), ((83431, 83470), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (83440, 83470), False, 'import detex\n'), ((84266, 84491), 'detex.detect._SSDetex', '_SSDetex', (['TRDF', 'utcStart', 'utcEnd', 'self.cfetcher', 'self.clusters', 'subspaceDB', 'trigCon', 'triggerLTATime', 'triggerSTATime', 'multiprocess', 'calcHist', 'self.dtype', 'estimateMags', 'classifyEvents', 'eventCorFile', 'utcSaves', 'fillZeros'], {}), '(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters, subspaceDB,\n trigCon, triggerLTATime, triggerSTATime, multiprocess, calcHist, self.\n dtype, estimateMags, classifyEvents, eventCorFile, utcSaves, fillZeros)\n', (84274, 84491), False, 'from detex.detect import _SSDetex\n'), ((84788, 85035), 'detex.detect._SSDetex', '_SSDetex', (['TRDF', 'utcStart', 'utcEnd', 'self.cfetcher', 'self.clusters', 'subspaceDB', 'trigCon', 'triggerLTATime', 'triggerSTATime', 'multiprocess', 'calcHist', 'self.dtype', 'estimateMags', 'classifyEvents', 'eventCorFile', 'utcSaves', 'fillZeros'], {'issubspace': '(False)'}), '(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters, subspaceDB,\n trigCon, triggerLTATime, triggerSTATime, multiprocess, calcHist, self.\n dtype, estimateMags, classifyEvents, eventCorFile, utcSaves, fillZeros,\n issubspace=False)\n', (84796, 85035), False, 'from detex.detect import _SSDetex\n'), ((85344, 85403), 'pandas.DataFrame', 'pd.DataFrame', (['[self.clusters.filt]'], {'columns': 'cols', 'index': '[0]'}), '([self.clusters.filt], columns=cols, index=[0])\n', (85356, 85403), True, 'import pandas as pd\n'), ((85416, 85471), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['dffil', 'subspaceDB', '"""filt_params"""'], {}), "(dffil, subspaceDB, 'filt_params')\n", (85437, 85471), False, 'import detex\n'), ((88447, 88483), 'pandas.concat', 'pd.concat', (['sslist'], {'ignore_index': '(True)'}), '(sslist, ignore_index=True)\n', (88456, 88483), True, 'import pandas as pd\n'), ((88573, 88609), 'pandas.concat', 'pd.concat', (['sglist'], {'ignore_index': '(True)'}), '(sglist, ignore_index=True)\n', (88582, 88609), True, 'import pandas as pd\n'), ((89537, 89574), 'pandas.concat', 'pd.concat', (['sshists'], {'ignore_index': '(True)'}), '(sshists, ignore_index=True)\n', (89546, 89574), True, 'import pandas as pd\n'), ((90279, 90316), 'pandas.concat', 'pd.concat', (['sghists'], {'ignore_index': '(True)'}), '(sghists, ignore_index=True)\n', (90288, 90316), True, 'import pandas as pd\n'), ((7906, 7945), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (7915, 7945), False, 'import detex\n'), ((11132, 11171), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (11141, 11171), False, 'import detex\n'), ((14497, 14544), 'colorsys.hls_to_rgb', 'colorsys.hls_to_rgb', (['hue', 'lightness', 'saturation'], {}), '(hue, lightness, saturation)\n', (14516, 14544), False, 'import colorsys\n'), ((15949, 15962), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (15957, 15962), True, 'import numpy as np\n'), ((16013, 16032), 'numpy.arange', 'np.arange', (['(0)', '(N + 1)'], {}), '(0, N + 1)\n', (16022, 16032), True, 'import numpy as np\n'), ((19677, 19731), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ImportError'}), "(__name__, msg, level='error', e=ImportError)\n", (19686, 19731), False, 'import detex\n'), ((21032, 21105), 'obspy.core.util.geodetics.gps2DistAzimuth', 'obspy.core.util.geodetics.gps2DistAzimuth', (['latmin', 'lonmin', 'latmin', 'lonmax'], {}), '(latmin, lonmin, latmin, lonmax)\n', (21073, 21105), False, 'import obspy\n'), ((21230, 21255), 'numpy.mean', 'np.mean', (['[latmin, latmax]'], {}), '([latmin, latmax])\n', (21237, 21255), True, 'import numpy as np\n'), ((21286, 21311), 'numpy.mean', 'np.mean', (['[lonmin, lonmax]'], {}), '([lonmin, lonmax])\n', (21293, 21311), True, 'import numpy as np\n'), ((22852, 22879), 'numpy.linspace', 'np.linspace', (['zmin', 'zmax', '(10)'], {}), '(zmin, zmax, 10)\n', (22863, 22879), True, 'import numpy as np\n'), ((22942, 22951), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22949, 22951), True, 'import matplotlib.pyplot as plt\n'), ((26463, 26500), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['clusts'], {}), '(clusts)\n', (26492, 26500), False, 'import itertools\n'), ((37358, 37375), 'numpy.transpose', 'np.transpose', (['arr'], {}), '(arr)\n', (37370, 37375), True, 'import numpy as np\n'), ((37433, 37477), 'scipy.linalg.svd', 'scipy.linalg.svd', (['tparr'], {'full_matrices': '(False)'}), '(tparr, full_matrices=False)\n', (37449, 37477), False, 'import scipy\n'), ((40565, 40618), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg, level='error', e=ValueError)\n", (40574, 40618), False, 'import detex\n'), ((41012, 41051), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (41021, 41051), False, 'import detex\n'), ((41256, 41309), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg, level='error', e=ValueError)\n", (41265, 41309), False, 'import detex\n'), ((42260, 42280), 'scipy.dot', 'scipy.dot', (['Ut', 'aliwf'], {}), '(Ut, aliwf)\n', (42269, 42280), False, 'import scipy\n'), ((42283, 42307), 'scipy.linalg.norm', 'scipy.linalg.norm', (['aliwf'], {}), '(aliwf)\n', (42300, 42307), False, 'import scipy\n'), ((42389, 42411), 'numpy.square', 'np.square', (['normUtAliwf'], {}), '(normUtAliwf)\n', (42398, 42411), True, 'import numpy as np\n'), ((42507, 42530), 'numpy.sum', 'np.sum', (['repvect[:x + 1]'], {}), '(repvect[:x + 1])\n', (42513, 42530), True, 'import numpy as np\n'), ((43995, 44048), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg, level='error', e=ValueError)\n", (44004, 44048), False, 'import detex\n'), ((50856, 50873), 'matplotlib.pyplot.figure', 'plt.figure', (['count'], {}), '(count)\n', (50866, 50873), True, 'import matplotlib.pyplot as plt\n'), ((50890, 50910), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (50901, 50910), True, 'import matplotlib.pyplot as plt\n'), ((50934, 50994), 'numpy.mean', 'np.mean', (["[row.FAS['bins'][1:], row.FAS['bins'][:-1]]"], {'axis': '(0)'}), "([row.FAS['bins'][1:], row.FAS['bins'][:-1]], axis=0)\n", (50941, 50994), True, 'import numpy as np\n'), ((51032, 51063), 'matplotlib.pyplot.plot', 'plt.plot', (['bins', "row.FAS['hist']"], {}), "(bins, row.FAS['hist'])\n", (51040, 51063), True, 'import matplotlib.pyplot as plt\n'), ((51080, 51128), 'matplotlib.pyplot.title', 'plt.title', (["('Station %s %s' % (station, row.Name))"], {}), "('Station %s %s' % (station, row.Name))\n", (51089, 51128), True, 'import matplotlib.pyplot as plt\n'), ((51146, 51183), 'matplotlib.pyplot.axvline', 'plt.axvline', (['row.Threshold'], {'color': '"""g"""'}), "(row.Threshold, color='g')\n", (51157, 51183), True, 'import matplotlib.pyplot as plt\n'), ((51207, 51249), 'scipy.stats.beta.pdf', 'scipy.stats.beta.pdf', (['bins', 'beta_a', 'beta_b'], {}), '(bins, beta_a, beta_b)\n', (51227, 51249), False, 'import scipy\n'), ((51345, 51397), 'matplotlib.pyplot.title', 'plt.title', (["('%s station %s' % (row.Name, row.Station))"], {}), "('%s station %s' % (row.Name, row.Station))\n", (51354, 51397), True, 'import matplotlib.pyplot as plt\n'), ((51414, 51428), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (51422, 51428), True, 'import matplotlib.pyplot as plt\n'), ((51445, 51464), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (51455, 51464), True, 'import matplotlib.pyplot as plt\n'), ((51482, 51502), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (51493, 51502), True, 'import matplotlib.pyplot as plt\n'), ((51526, 51586), 'numpy.mean', 'np.mean', (["[row.FAS['bins'][1:], row.FAS['bins'][:-1]]"], {'axis': '(0)'}), "([row.FAS['bins'][1:], row.FAS['bins'][:-1]], axis=0)\n", (51533, 51586), True, 'import numpy as np\n'), ((51624, 51655), 'matplotlib.pyplot.plot', 'plt.plot', (['bins', "row.FAS['hist']"], {}), "(bins, row.FAS['hist'])\n", (51632, 51655), True, 'import matplotlib.pyplot as plt\n'), ((51672, 51709), 'matplotlib.pyplot.axvline', 'plt.axvline', (['row.Threshold'], {'color': '"""g"""'}), "(row.Threshold, color='g')\n", (51683, 51709), True, 'import matplotlib.pyplot as plt\n'), ((51805, 51838), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Detection Statistic"""'], {}), "('Detection Statistic')\n", (51815, 51838), True, 'import matplotlib.pyplot as plt\n'), ((51855, 51874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (51865, 51874), True, 'import matplotlib.pyplot as plt\n'), ((51891, 51905), 'matplotlib.pyplot.semilogy', 'plt.semilogy', ([], {}), '()\n', (51903, 51905), True, 'import matplotlib.pyplot as plt\n'), ((51922, 51945), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(10 ** -1)'}), '(ymin=10 ** -1)\n', (51930, 51945), True, 'import matplotlib.pyplot as plt\n'), ((51962, 51976), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (51970, 51976), True, 'import matplotlib.pyplot as plt\n'), ((52976, 53016), 'matplotlib.pyplot.plot', 'plt.plot', (["row.FracEnergy['Average']", '"""r"""'], {}), "(row.FracEnergy['Average'], 'r')\n", (52984, 53016), True, 'import matplotlib.pyplot as plt\n'), ((53033, 53075), 'matplotlib.pyplot.axvline', 'plt.axvline', (['row.NumBasis', '(0)', '(1)'], {'color': '"""g"""'}), "(row.NumBasis, 0, 1, color='g')\n", (53044, 53075), True, 'import matplotlib.pyplot as plt\n'), ((53092, 53110), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.1]'], {}), '([0, 1.1])\n', (53100, 53110), True, 'import matplotlib.pyplot as plt\n'), ((53127, 53180), 'matplotlib.pyplot.title', 'plt.title', (["('Station %s, %s' % (row.Station, row.Name))"], {}), "('Station %s, %s' % (row.Station, row.Name))\n", (53136, 53180), True, 'import matplotlib.pyplot as plt\n'), ((54782, 54812), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1.5 * evenum + 1)'], {}), '(-1, 1.5 * evenum + 1)\n', (54790, 54812), True, 'import matplotlib.pyplot as plt\n'), ((54829, 54843), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (54839, 54843), True, 'import matplotlib.pyplot as plt\n'), ((54860, 54874), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (54870, 54874), True, 'import matplotlib.pyplot as plt\n'), ((54981, 54991), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54989, 54991), True, 'import matplotlib.pyplot as plt\n'), ((55996, 56035), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[10, 0.9 * num_wfs]'}), '(figsize=[10, 0.9 * num_wfs])\n', (56006, 56035), True, 'import matplotlib.pyplot as plt\n'), ((56291, 56321), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5 * keynum - 1)', '(1)'], {}), '(-1.5 * keynum - 1, 1)\n', (56299, 56321), True, 'import matplotlib.pyplot as plt\n'), ((56338, 56352), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (56348, 56352), True, 'import matplotlib.pyplot as plt\n'), ((56369, 56383), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (56379, 56383), True, 'import matplotlib.pyplot as plt\n'), ((56400, 56452), 'matplotlib.pyplot.title', 'plt.title', (["('%s station %s' % (row.Name, row.Station))"], {}), "('%s station %s' % (row.Name, row.Station))\n", (56409, 56452), True, 'import matplotlib.pyplot as plt\n'), ((56942, 56959), 'matplotlib.pyplot.figure', 'plt.figure', (['count'], {}), '(count)\n', (56952, 56959), True, 'import matplotlib.pyplot as plt\n'), ((57075, 57092), 'matplotlib.pyplot.hist', 'plt.hist', (['offsets'], {}), '(offsets)\n', (57083, 57092), True, 'import matplotlib.pyplot as plt\n'), ((57109, 57153), 'matplotlib.pyplot.title', 'plt.title', (["('%s %s' % (row.Station, row.Name))"], {}), "('%s %s' % (row.Station, row.Name))\n", (57118, 57153), True, 'import matplotlib.pyplot as plt\n'), ((57170, 57191), 'matplotlib.pyplot.figure', 'plt.figure', (['(count + 1)'], {}), '(count + 1)\n', (57180, 57191), True, 'import matplotlib.pyplot as plt\n'), ((57258, 57274), 'numpy.zeros', 'np.zeros', (['numEvs'], {}), '(numEvs)\n', (57266, 57274), True, 'import numpy as np\n'), ((57300, 57316), 'numpy.zeros', 'np.zeros', (['numEvs'], {}), '(numEvs)\n', (57308, 57316), True, 'import numpy as np\n'), ((57343, 57359), 'numpy.zeros', 'np.zeros', (['numEvs'], {}), '(numEvs)\n', (57351, 57359), True, 'import numpy as np\n'), ((64103, 64139), 'obspy.core.Trace', 'obspy.core.Trace', ([], {'data': 'row.MPtd[key]'}), '(data=row.MPtd[key])\n', (64119, 64139), False, 'import obspy\n'), ((70623, 70647), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['start'], {}), '(start)\n', (70640, 70647), False, 'import obspy\n'), ((71024, 71062), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warn"""'}), "(__name__, msg, level='warn')\n", (71033, 71062), False, 'import detex\n'), ((71354, 71406), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""', 'pri': '(False)'}), "(__name__, msg, level='warning', pri=False)\n", (71363, 71406), False, 'import detex\n'), ((73775, 73793), 'numpy.median', 'np.median', (['offsets'], {}), '(offsets)\n', (73784, 73793), True, 'import numpy as np\n'), ((83555, 83576), 'os.remove', 'os.remove', (['subspaceDB'], {}), '(subspaceDB)\n', (83564, 83576), False, 'import os\n'), ((83664, 83698), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (83673, 83698), False, 'import detex\n'), ((83808, 83842), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (83817, 83842), False, 'import detex\n'), ((84207, 84246), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (84216, 84246), False, 'import detex\n'), ((85769, 85821), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['ssinfo', 'subspaceDB', '"""ss_info"""'], {}), "(ssinfo, subspaceDB, 'ss_info')\n", (85790, 85821), False, 'import detex\n'), ((85924, 85976), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['sginfo', 'subspaceDB', '"""sg_info"""'], {}), "(sginfo, subspaceDB, 'sg_info')\n", (85945, 85976), False, 'import detex\n'), ((86089, 86142), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['sshists', 'subspaceDB', '"""ss_hist"""'], {}), "(sshists, subspaceDB, 'ss_hist')\n", (86110, 86142), False, 'import detex\n'), ((86252, 86305), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['sghists', 'subspaceDB', '"""sg_hist"""'], {}), "(sghists, subspaceDB, 'sg_hist')\n", (86273, 86305), False, 'import detex\n'), ((86732, 86766), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (86741, 86766), False, 'import detex\n'), ((87680, 87714), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (87689, 87714), False, 'import detex\n'), ((89018, 89049), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (89030, 89049), True, 'import pandas as pd\n'), ((89770, 89801), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (89782, 89801), True, 'import pandas as pd\n'), ((91082, 91121), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (91091, 91121), False, 'import detex\n'), ((4979, 4991), 'numpy.isnan', 'np.isnan', (['cc'], {}), '(cc)\n', (4987, 4991), True, 'import numpy as np\n'), ((5881, 5899), 'numpy.isnan', 'np.isnan', (['lagsamps'], {}), '(lagsamps)\n', (5889, 5899), True, 'import numpy as np\n'), ((15613, 15640), 'numpy.arange', 'np.arange', (['(N + 1)', '(N + N + 1)'], {}), '(N + 1, N + N + 1)\n', (15622, 15640), True, 'import numpy as np\n'), ((15972, 15991), 'numpy.arange', 'np.arange', (['(0)', '(N + 1)'], {}), '(0, N + 1)\n', (15981, 15991), True, 'import numpy as np\n'), ((37109, 37147), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warn"""'}), "(__name__, msg, level='warn')\n", (37118, 37147), False, 'import detex\n'), ((40857, 40910), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg, level='error', e=ValueError)\n", (40866, 40910), False, 'import detex\n'), ((47585, 47636), 'scipy.stats.beta.isf', 'scipy.stats.beta.isf', (['self.Pf', 'beta_a', 'beta_b', '(0)', '(1)'], {}), '(self.Pf, beta_a, beta_b, 0, 1)\n', (47605, 47636), False, 'import scipy\n'), ((48966, 49004), 'scipy.stats.beta.sf', 'scipy.stats.beta.sf', (['x', 'beta_a', 'beta_b'], {}), '(x, beta_a, beta_b)\n', (48985, 49004), False, 'import scipy\n'), ((49523, 49577), 'detex.log', 'detex.log', (['__name__', 'msg1'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg1, level='error', e=ValueError)\n", (49532, 49577), False, 'import detex\n'), ((49620, 49669), 'detex.log', 'detex.log', (['__name__', 'msg2'], {'level': '"""warn"""', 'pri': '(True)'}), "(__name__, msg2, level='warn', pri=True)\n", (49629, 49669), False, 'import detex\n'), ((52738, 52777), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (52747, 52777), False, 'import detex\n'), ((52909, 52959), 'matplotlib.pyplot.plot', 'plt.plot', (['row.FracEnergy[event]', '"""--"""'], {'color': '"""0.6"""'}), "(row.FracEnergy[event], '--', color='0.6')\n", (52917, 52959), True, 'import matplotlib.pyplot as plt\n'), ((56257, 56274), 'matplotlib.pyplot.plot', 'plt.plot', (['wf'], {'c': 'c'}), '(wf, c=c)\n', (56265, 56274), True, 'import matplotlib.pyplot as plt\n'), ((56886, 56925), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (56895, 56925), False, 'import detex\n'), ((58063, 58084), 'matplotlib.pyplot.plot', 'plt.plot', (['condat', '"""k"""'], {}), "(condat, 'k')\n", (58071, 58084), True, 'import matplotlib.pyplot as plt\n'), ((58105, 58153), 'matplotlib.pyplot.axvline', 'plt.axvline', (["row.SampleTrims['Starttime']"], {'c': '"""g"""'}), "(row.SampleTrims['Starttime'], c='g')\n", (58116, 58153), True, 'import matplotlib.pyplot as plt\n'), ((58174, 58217), 'matplotlib.pyplot.plot', 'plt.plot', (['orsamps[evenum]', '(evenum + 1)', '"""r*"""'], {}), "(orsamps[evenum], evenum + 1, 'r*')\n", (58182, 58217), True, 'import matplotlib.pyplot as plt\n'), ((61123, 61163), 'detex.streamPick.streamPick', 'detex.streamPick.streamPick', (['st'], {'ap': 'qApp'}), '(st, ap=qApp)\n', (61150, 61163), False, 'import detex\n'), ((63738, 63779), 'obspy.core.Trace', 'obspy.core.Trace', ([], {'data': 'row.AlignedTD[key]'}), '(data=row.AlignedTD[key])\n', (63754, 63779), False, 'import obspy\n'), ((64743, 64760), 'numpy.array', 'np.array', (['offsets'], {}), '(offsets)\n', (64751, 64760), True, 'import numpy as np\n'), ((65052, 65069), 'numpy.array', 'np.array', (['offsets'], {}), '(offsets)\n', (65060, 65069), True, 'import numpy as np\n'), ((66862, 66885), 'pandas.read_pickle', 'pd.read_pickle', (['pksFile'], {}), '(pksFile)\n', (66876, 66885), True, 'import pandas as pd\n'), ((76423, 76457), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (76432, 76457), False, 'import detex\n'), ((76529, 76712), 'detex.fas._initFAS', 'detex.fas._initFAS', (['self.subspaces[sta]', 'conDatNum', 'self.clusters', 'self.cfetcher'], {'LTATime': 'LTATime', 'STATime': 'STATime', 'staltalimit': 'staltalimit', 'numBins': 'numBins', 'dtype': 'self.dtype'}), '(self.subspaces[sta], conDatNum, self.clusters, self.\n cfetcher, LTATime=LTATime, STATime=STATime, staltalimit=staltalimit,\n numBins=numBins, dtype=self.dtype)\n', (76547, 76712), False, 'import detex\n'), ((87494, 87525), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (87506, 87525), True, 'import pandas as pd\n'), ((88365, 88396), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (88377, 88396), True, 'import pandas as pd\n'), ((4391, 4430), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (4400, 4430), False, 'import detex\n'), ((4750, 4801), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""', 'pri': '(True)'}), "(__name__, msg, level='warning', pri=True)\n", (4759, 4801), False, 'import detex\n'), ((5415, 5427), 'numpy.isnan', 'np.isnan', (['cc'], {}), '(cc)\n', (5423, 5427), True, 'import numpy as np\n'), ((30893, 30928), 'detex.construct.fast_normcorr', 'detex.construct.fast_normcorr', (['t', 's'], {}), '(t, s)\n', (30922, 30928), False, 'import detex\n'), ((31213, 31247), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (31222, 31247), False, 'import detex\n'), ((39710, 39738), 'numpy.mean', 'np.mean', (['aliTD[x][stim:etim]'], {}), '(aliTD[x][stim:etim])\n', (39717, 39738), True, 'import numpy as np\n'), ((40054, 40071), 'numpy.mean', 'np.mean', (['aliTD[x]'], {}), '(aliTD[x])\n', (40061, 40071), True, 'import numpy as np\n'), ((48217, 48258), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""'}), "(__name__, msg, level='warning')\n", (48226, 48258), False, 'import detex\n'), ((57858, 57890), 'obspy.core.UTCDateTime', 'obspy.core.UTCDateTime', (['tem.TIME'], {}), '(tem.TIME)\n', (57880, 57890), False, 'import obspy\n'), ((63261, 63292), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(1)'}), '(__name__, msg, pri=1)\n', (63270, 63292), False, 'import detex\n'), ((67041, 67080), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (67050, 67080), False, 'import detex\n'), ((67514, 67553), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (67523, 67553), False, 'import detex\n'), ((77406, 77440), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (77415, 77440), False, 'import detex\n'), ((90943, 90982), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (90952, 90982), False, 'import detex\n'), ((3922, 3973), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""', 'pri': '(True)'}), "(__name__, msg, level='warning', pri=True)\n", (3931, 3973), False, 'import detex\n'), ((5594, 5643), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'pri': '(True)'}), "(__name__, msg, level='error', pri=True)\n", (5603, 5643), False, 'import detex\n'), ((14378, 14394), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14392, 14394), True, 'import numpy as np\n'), ((14443, 14459), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14457, 14459), True, 'import numpy as np\n'), ((44550, 44601), 'scipy.stats.beta.isf', 'scipy.stats.beta.isf', (['self.Pf', 'beta_a', 'beta_b', '(0)', '(1)'], {}), '(self.Pf, beta_a, beta_b, 0, 1)\n', (44570, 44601), False, 'import scipy\n'), ((77697, 77905), 'detex.fas._initFAS', 'detex.fas._initFAS', (['self.singles[sta][a:a + 1]', 'conDatNum', 'self.clusters', 'self.cfetcher'], {'LTATime': 'LTATime', 'STATime': 'STATime', 'staltalimit': 'staltalimit', 'numBins': 'numBins', 'dtype': 'self.dtype', 'issubspace': '(False)'}), '(self.singles[sta][a:a + 1], conDatNum, self.clusters,\n self.cfetcher, LTATime=LTATime, STATime=STATime, staltalimit=\n staltalimit, numBins=numBins, dtype=self.dtype, issubspace=False)\n', (77715, 77905), False, 'import detex\n'), ((89483, 89514), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (89495, 89514), True, 'import pandas as pd\n'), ((90225, 90256), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (90237, 90256), True, 'import pandas as pd\n'), ((5299, 5350), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""', 'pri': '(True)'}), "(__name__, msg, level='warning', pri=True)\n", (5308, 5350), False, 'import detex\n'), ((14671, 14688), 'struct.pack', 'pack', (['"""BBB"""', '*rgb'], {}), "('BBB', *rgb)\n", (14675, 14688), False, 'from struct import pack\n'), ((37301, 37318), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (37315, 37318), True, 'import numpy as np\n'), ((45191, 45232), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""'}), "(__name__, msg, level='warning')\n", (45200, 45232), False, 'import detex\n'), ((3604, 3621), 'numpy.array', 'np.array', (['Clu.key'], {}), '(Clu.key)\n', (3612, 3621), True, 'import numpy as np\n'), ((3676, 3693), 'numpy.array', 'np.array', (['Clu.key'], {}), '(Clu.key)\n', (3684, 3693), True, 'import numpy as np\n')]
|
from torchvision import models
import numpy as np
import torch
import os
from moviepy.editor import VideoFileClip
SKIP_FRAME_RATE = 10
MINIMAX_FRAME = 4
# 함수에서 documentaiton 읽기
model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
model.eval()
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def extract_boxes(reference_clip, compare_clip):
clips = [reference_clip, compare_clip]
clips_frame_info = []
for clip in clips:
i = 0
every_frame_info = []
# loop over the frames from the video stream
while True:
i+=SKIP_FRAME_RATE # 1초에 60 fps가 있으므로 몇개는 skip해도 될거 같음!
if (i*1.0/clip.fps)> clip.duration:
break
frame = clip.get_frame(i*1.0/clip.fps)
frame = frame/255 # image, and should be in ``0-1`` range.
frame = np.transpose(frame, (2,0,1)) # HWC -> CHW(그 위치에 몇차원 애를 넣을거냔?)
x = [torch.from_numpy(frame).float()]
# label list https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_label_map.pbtxt
predictions = model(x)
prediction= predictions[0]
each_box_list = zip(prediction['boxes'].tolist(), prediction['labels'].tolist(), prediction['scores'].tolist())
# 0.95 정도 올려야 까맣게 보이는 관중이 없어짐!
filtered_box_list = filter(lambda x: x[1]==1 and x[2] >= 0.95, each_box_list)
filtered_center_dot_list = list(map(lambda x: [(x[0][0]+x[0][2])/2, (x[0][1]+x[0][3])/2], filtered_box_list))
# x좌표로 정렬하기(대형이 가로로 늘어져 있다고 가정하고 순서대로 정렬)
sorted_dot_list = sorted(filtered_center_dot_list, key = lambda x: x[0])
every_frame_info.append(sorted_dot_list) # 프레임별 정보
clips_frame_info.append(np.array(every_frame_info)) # 각 영상별로 붙이기
return clips_frame_info
def calculate_pose_distance(reference_clip, compare_clip):
clips_frame_info = extract_boxes(reference_clip, compare_clip) # 모든 프레임마다 길이 계산해줌
min_size = min(len(clips_frame_info[0]),len(clips_frame_info[1]))
dist_arr = list()
# Calculate distance (by frame)
for i in range(min_size):
if len(clips_frame_info[0][i])>0 and len(clips_frame_info[1][i])>0: # 둘다 있으면
# x축 값이 가장 가까운걸로 찾고 그거랑 비교(어차피 대형이 중요한거니까)
ref_frame_dots = clips_frame_info[0][i] # 해당 frame의 정보
compare_frame_dots = clips_frame_info[1][i] # 해당 frame의 정보
min_dot_num = min(len(ref_frame_dots), len(compare_frame_dots)) # reference 기준으로 계산할거양
penalty = ((reference_clip.w **2 + reference_clip.h**2)**0.5) * abs(len(ref_frame_dots)-len(compare_frame_dots)) # 개수가 다를때 주는 패널티
total_diff = penalty
for dot_idx in range(min_dot_num):
ref_frame_dots[dot_idx] and compare_frame_dots[dot_idx]
total_diff += ((ref_frame_dots[dot_idx][0] - compare_frame_dots[dot_idx][0])**2 + (ref_frame_dots[dot_idx][1] - compare_frame_dots[dot_idx][1])**2)**0.5
dist_arr.append(total_diff)
else:
dist_arr.append(None)
# Minimize max distance in (minimax_frames) frames
min_diff = np.float('Inf')
min_idx = 0
max_dist = []
for i in range(min_size-(MINIMAX_FRAME-1)):
if None in dist_arr[i:i+MINIMAX_FRAME]:
max_dist.append(None)
else:
tmp_max = np.max(dist_arr[i:i+MINIMAX_FRAME])
max_dist.append(tmp_max)
if min_diff > tmp_max:
min_diff = tmp_max
min_idx = i
# return distance, second, additional_info
return min_diff, (min_idx*SKIP_FRAME_RATE)/reference_clip.fps, {}
|
[
"numpy.float",
"torch.from_numpy",
"numpy.max",
"numpy.array",
"torchvision.models.detection.fasterrcnn_resnet50_fpn",
"numpy.transpose"
] |
[((186, 243), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'models.detection.fasterrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (226, 243), False, 'from torchvision import models\n'), ((3155, 3170), 'numpy.float', 'np.float', (['"""Inf"""'], {}), "('Inf')\n", (3163, 3170), True, 'import numpy as np\n'), ((839, 869), 'numpy.transpose', 'np.transpose', (['frame', '(2, 0, 1)'], {}), '(frame, (2, 0, 1))\n', (851, 869), True, 'import numpy as np\n'), ((1776, 1802), 'numpy.array', 'np.array', (['every_frame_info'], {}), '(every_frame_info)\n', (1784, 1802), True, 'import numpy as np\n'), ((3372, 3409), 'numpy.max', 'np.max', (['dist_arr[i:i + MINIMAX_FRAME]'], {}), '(dist_arr[i:i + MINIMAX_FRAME])\n', (3378, 3409), True, 'import numpy as np\n'), ((919, 942), 'torch.from_numpy', 'torch.from_numpy', (['frame'], {}), '(frame)\n', (935, 942), False, 'import torch\n')]
|
import numpy
#Variables
PLAYERS= 2
boardW = 5
boardH = 5
board = numpy.zeros((boardW,boardH))
step = 0
winLength = 3
#Functions
def drawBoard():
global step
print("\n Step:", step, "\n")
for i in range(0,len(board)):
for j in numpy.flipud(board)[i]:
print('{:>4}'.format(getSym(j)), end = "")
print("\n")
step+=1;
symbols="■XOABCDEFGHIJKLMNOPQRSTUVWXZ"
def getSym(n):
return symbols[int(n)]
def move(player):
while(True):
row, column = eval(input("Player "+str(player)+" Move, Enter coordinates: "))
try:
if board[column-1][row-1]==0:
board[column-1][row-1]=player
break;
else:
print("You can't move there! Choose a blank spot!")
except:
print("Coordinates Out of Bounds, Try again!")
def contains(small, big):
for i in range(len(big)-len(small)+1):
for j in range(len(small)):
if big[i+j] != small[j]:
break
else:
return i, i+len(small)
return False
def getState():
#checks columns
for r in range(board.shape[0]):
for p in range(1, PLAYERS+1):
#if all(board[w,:] == numpy.full((board.shape[1]),p)):
if contains(numpy.full(3,p), board[r,:]):
return p
#checks rows
for c in range(board.shape[1]):
for p in range(1, PLAYERS+1):
#if all(board[:,h] == numpy.full((board.shape[0]),p)):
if contains(numpy.full(winLength,p), board[:,c]):
return p
#check diagonals
maxDiagonalOffset=max(board.shape[0], board.shape[1])-(winLength-1)
for o in range(-maxDiagonalOffset+1,maxDiagonalOffset):
for p in range(1, PLAYERS+1):
for i in [-1,1]:
if contains(numpy.full(winLength,p), numpy.diagonal(board[::i],o)):
return p
#check for no more blanks
if 0 not in board:
return "Tied"
return 0
#Main loop
while(True):
step = 0
board = numpy.zeros((5,5))
print(" ======= EXTREME TIC TAC TOE ======= ")
#Variables
PLAYERS=int(input("How many players?: "))
boardW = int(input("What's the board's width?: "))
boardH = int(input("What's the board's height?: "))
board = numpy.zeros((boardW,boardH))
step = 0
winLength = int(input("How many in a row to win?: "))
print(" ======= GAME STARTING... ======= ")
while(True):
drawBoard()
if getState()=="Tied":
print("The game tied!")
break;
elif getState()>0:
print("Player", getState(), "Won!")
break;
move((step-1)%PLAYERS+1)
if input("Keep playing?(press y): ").lower() != 'y':
break
|
[
"numpy.full",
"numpy.zeros",
"numpy.diagonal",
"numpy.flipud"
] |
[((66, 95), 'numpy.zeros', 'numpy.zeros', (['(boardW, boardH)'], {}), '((boardW, boardH))\n', (77, 95), False, 'import numpy\n'), ((2058, 2077), 'numpy.zeros', 'numpy.zeros', (['(5, 5)'], {}), '((5, 5))\n', (2069, 2077), False, 'import numpy\n'), ((2313, 2342), 'numpy.zeros', 'numpy.zeros', (['(boardW, boardH)'], {}), '((boardW, boardH))\n', (2324, 2342), False, 'import numpy\n'), ((248, 267), 'numpy.flipud', 'numpy.flipud', (['board'], {}), '(board)\n', (260, 267), False, 'import numpy\n'), ((1283, 1299), 'numpy.full', 'numpy.full', (['(3)', 'p'], {}), '(3, p)\n', (1293, 1299), False, 'import numpy\n'), ((1520, 1544), 'numpy.full', 'numpy.full', (['winLength', 'p'], {}), '(winLength, p)\n', (1530, 1544), False, 'import numpy\n'), ((1831, 1855), 'numpy.full', 'numpy.full', (['winLength', 'p'], {}), '(winLength, p)\n', (1841, 1855), False, 'import numpy\n'), ((1856, 1885), 'numpy.diagonal', 'numpy.diagonal', (['board[::i]', 'o'], {}), '(board[::i], o)\n', (1870, 1885), False, 'import numpy\n')]
|
"""
Author: <NAME>
Created: 3/11/2020 9:04 AM
"""
from Climate_Shocks.vcsn_pull import vcsn_pull_single_site
from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly
from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record
from Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit import calc_past_pasture_growth_anomaly
import ksl_env
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import itertools
import sys
event_def_dir = sys.argv[1] # the path to the directory
print(event_def_dir)
vcsn_version = sys.argv[2] # 'trended', 'detrended2'
print(vcsn_version)
if vcsn_version not in ['trended', 'detrended2']:
raise ValueError('incorrect value for vcsn_version: {}'.format(vcsn_version, ))
if not os.path.exists(event_def_dir):
os.makedirs(event_def_dir)
irrigated_pga = calc_past_pasture_growth_anomaly('irrigated', site='eyrewell').reset_index()
irrigated_pga.loc[:, 'year'] = irrigated_pga.date.dt.year
irrigated_pga = irrigated_pga.set_index(['month', 'year'])
dryland_pga = calc_past_pasture_growth_anomaly('dryland').reset_index()
dryland_pga.loc[:, 'year'] = dryland_pga.date.dt.year
dryland_pga = dryland_pga.set_index(['month', 'year'])
def prob(x):
out = np.nansum(x) / len(x)
return out
def add_pga_from_idx(idx):
idx = idx.dropna()
irr_temp = irrigated_pga.loc[idx].reset_index()
irr_temp2 = irr_temp.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
dry_temp = dryland_pga.loc[idx].reset_index()
dry_temp2 = dry_temp.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
temp3 = pd.merge(irr_temp2, dry_temp2, left_index=True, right_index=True, suffixes=('_irr', '_dry'))
return pd.DataFrame(temp3)
def add_pga(grouped_data, sim_keys, outdata):
grouped_data = grouped_data.set_index(['month', 'year'])
years = {}
for k in sim_keys:
idx = grouped_data.loc[grouped_data.loc[:, k], k]
assert idx.all()
idx = idx.index
years[k] = idx.values
temp_irr = irrigated_pga.loc[idx].reset_index()
temp_irr2 = temp_irr.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
temp_dry = dryland_pga.loc[idx].reset_index()
temp_dry2 = temp_dry.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
for k2 in temp_irr2:
outdata.loc[:, (k, 'pga_irr_{}'.format(k2))] = temp_irr2.loc[:, k2]
outdata.loc[:, (k, 'pga_dry_{}'.format(k2))] = temp_dry2.loc[:, k2]
mx_years = 48 * 12 + 1
out_years = pd.DataFrame(index=range(mx_years), columns=sim_keys)
for k in sim_keys:
missing_len = mx_years - len(years[k])
out_years.loc[:, k] = np.concatenate((years[k], np.zeros(missing_len) * np.nan))
outdata = outdata.sort_index(axis=1, level=0, sort_remaining=False)
return outdata, out_years
def calc_dry_recurance_monthly_smd():
data = get_vcsn_record(vcsn_version)
t = calc_smd_monthly(rain=data.rain, pet=data.pet, dates=data.index)
data.loc[:, 'smd'] = t
t = data.loc[:, ['doy', 'smd']].groupby('doy').mean().to_dict()
data.loc[:, 'sma'] = data.loc[:, 'smd'] - data.loc[:, 'doy'].replace(t['smd'])
data.reset_index(inplace=True)
data.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_raw.csv'))
smd_thresholds = [0]
sma_thresholds = [-5, -10, -12, -15, -17, -20]
ndays = [5, 7, 10, 14]
out_keys = []
for smd_t, sma_t in itertools.product(smd_thresholds, sma_thresholds):
k = 'd_smd{:03d}_sma{:02d}'.format(smd_t, sma_t)
data.loc[:, k] = (data.loc[:, 'smd'] <= smd_t) & (data.loc[:, 'sma'] <= sma_t)
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year',
'smd', 'sma'] + out_keys].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'monthly_smd_dry_monthly_data_desc.csv'))
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_years.csv'))
def calc_dry_recurance():
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.to_csv(os.path.join(event_def_dir, 'dry_raw.csv'))
smd_thresholds = [0, -110, -110]
sma_thresholds = [-20, 0, -20]
ndays = [5, 7, 10, 14]
out_keys = []
for smd_t, sma_t in zip(smd_thresholds, sma_thresholds):
k = 'd_smd{:03d}_sma{:02d}'.format(smd_t, sma_t)
data.loc[:, k] = (data.loc[:, 'smd'] <= smd_t) & (data.loc[:, 'sma'] <= sma_t)
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year',
'smd', 'sma'] + out_keys].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'dry_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'dry_monthly_data_desc.csv'))
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'dry_prob_only_prob.csv'), float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'dry_years.csv'))
def calc_wet_recurance():
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
temp = False
if temp: # just to look at some plots
fig, (ax, ax2, ax3) = plt.subplots(3, sharex=True)
ax.plot(data.date, data.smd)
ax2.plot(data.date, data.drain)
ax3.plot(data.date, data.rain)
plt.show()
data.to_csv(os.path.join(event_def_dir, 'smd_wet_raw.csv'))
thresholds_rain = [5, 3, 1, 0]
thresholds_smd = [0, -5, -10]
ndays = [7, 10, 14]
out_keys = []
for t_r, t_smd in itertools.product(thresholds_rain, thresholds_smd):
k = 'd_r{}_smd{}'.format(t_r, t_smd)
data.loc[:, k] = (data.loc[:, 'rain'] >= t_r) & (data.loc[:, 'smd'] >= t_smd)
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year', 'rain'] + out_keys].groupby(['month', 'year']).sum().reset_index()
# make montly restriction anaomaloy - mean
temp = grouped_data.groupby('month').mean().loc[:, 'rain'].to_dict()
grouped_data.loc[:, 'rain_an_mean'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'rain_an_mean': temp})
grouped_data.loc[:, 'rain_an_mean'] = grouped_data.loc[:, 'rain'] - grouped_data.loc[:, 'rain_an_mean']
# make montly restriction anaomaloy - median
temp = grouped_data.groupby('month').median().loc[:, 'rain'].to_dict()
grouped_data.loc[:, 'rain_an_med'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'rain_an_med': temp})
grouped_data.loc[:, 'rain_an_med'] = grouped_data.loc[:, 'rain'] - grouped_data.loc[:, 'rain_an_med']
grouped_data.to_csv(os.path.join(event_def_dir, 'smd_wet_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'smd_wet_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'smd_wet_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'smd_wet_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'smd_wet_years.csv'))
def calc_wet_recurance_ndays():
ndays = {
'org': { # this is the best value!
5: 14,
6: 11,
7: 11,
8: 13,
9: 13,
}
}
for v in ndays.values():
v.update({
1: 99,
2: 99,
3: 99,
4: 99,
10: 99,
11: 99,
12: 99,
})
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.loc[:, 'ndays_rain'] = (data.loc[:, 'rain'] > 0.01).astype(float)
data.to_csv(os.path.join(event_def_dir, 'ndays_wet_raw.csv'))
grouped_data = data.loc[:, ['month', 'year', 'rain', 'ndays_rain']].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'ndays_wet_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'ndays_wet_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for k, val in ndays.items():
ok = '{}'.format(k)
out_keys2.append(ok)
grouped_data.loc[:, 'limit'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'limit': val})
grouped_data.loc[:, ok] = grouped_data.loc[:, 'ndays_rain'] >= grouped_data.loc[:, 'limit']
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'ndays_wet_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'ndays_wet_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'ndays_wet_years.csv'))
def calc_dry_rolling():
bulk_ndays = [5, 10, 15, 20]
ndays = {}
for bnd in bulk_ndays:
ndays['ndays{}'.format(bnd)] = {k: bnd for k in range(1, 13)}
thresholds = { # this did not end up getting used
'first': {
4: 15,
5: 10,
8: 5,
9: 10,
},
'first-3': {
4: 15 - 3,
5: 10 - 3,
8: 5 - 3,
9: 10 - 3,
},
'first-5': {
4: 15 - 5,
5: 10 - 5,
8: 5 - 5,
9: 10 - 5,
},
'first-10': {
4: 15 - 10,
5: 10 - 10,
8: 5 - 10,
9: 10 - 10,
},
'zero': {
4: 0,
5: 0,
8: 0,
9: 0,
},
'one': {
4: 1,
5: 1,
8: 1,
9: 1,
},
'first-7': {
4: 15 - 7,
5: 10 - 7,
8: 5 - 7,
9: 10 - 7,
},
}
for v in thresholds.values():
v.update({
1: -1,
2: -1,
3: -1,
6: -1,
7: -1,
10: -1,
11: -1,
12: -1,
})
data = get_vcsn_record(vcsn_version).reset_index()
data.loc[:, 'roll_rain_10'] = data.loc[:, 'rain'].rolling(10).sum()
out_keys = []
outdata = pd.DataFrame(
index=pd.MultiIndex.from_product([range(1, 13), range(1972, 2020)], names=['month', 'year']))
for nd, thresh in itertools.product(ndays.keys(), thresholds.keys()):
temp_data = data.copy(deep=True)
ok = '{}_{}'.format(thresh, nd)
out_keys.append(ok)
for m in range(1, 13):
idx = data.month == m
temp_data.loc[idx, ok] = temp_data.loc[idx, 'roll_rain_10'] <= thresholds[thresh][m]
temp_data.loc[idx, 'ndays'] = ndays[nd][m]
temp_data = temp_data.groupby(['month', 'year']).agg({ok: 'sum', 'ndays': 'mean'})
outdata.loc[:, ok] = temp_data.loc[:, ok] >= temp_data.loc[:, 'ndays']
outdata.to_csv(os.path.join(event_def_dir, 'rolling_dry_monthly.csv'))
outdata = outdata.reset_index()
out = outdata.loc[:, ['month'] + out_keys].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(outdata, set(out_keys) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'rolling_dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'variable_hot_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'rolling_dry_years.csv'))
return list(set(out_keys) - set(drop_keys)), out
def calc_dry_recurance_ndays():
ndays = { # happy with this value other than middle ones; this did not end up getting used
'lower_q': { # based on the sma -20 10days
1: 31, # lower quartile of normal
2: 45, # lower quartile of normal
3: 38, # lower quartile of normal
4: 46, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
5: 37, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
8: 35, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
9: 30, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
10: 53, # lower quartile of normal
11: 43, # lower quartile of normal
12: 47, # lower quartile of normal
},
'up_5': { # based on the sma -20 10days
1: 31, # lower quartile of normal
2: 45, # lower quartile of normal
3: 38, # lower quartile of normal
4: 46 + 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
5: 37 + 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
8: 35 + 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
9: 30 + 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
10: 53, # lower quartile of normal
11: 43, # lower quartile of normal
12: 47, # lower quartile of normal
},
'down_5': { # based on the sma -20 10days
1: 31, # lower quartile of normal
2: 45, # lower quartile of normal
3: 38, # lower quartile of normal
4: 46 - 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
5: 37 - 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
8: 35 - 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
9: 30 - 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
10: 53, # lower quartile of normal
11: 43, # lower quartile of normal
12: 47, # lower quartile of normal
},
'down_7': { # based on the sma -20 10days
1: 31, # lower quartile of normal
2: 45, # lower quartile of normal
3: 38, # lower quartile of normal
4: 46 - 7, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
5: 37 - 7, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
8: 35 - 7, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
9: 30 - 7, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
10: 53, # lower quartile of normal
11: 43, # lower quartile of normal
12: 47, # lower quartile of normal
},
}
for v in ndays.values():
v.update({
6: -1,
7: -1,
})
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.loc[:, 'ndays_rain'] = (data.loc[:, 'rain'] > 0.01).astype(float)
data.to_csv(os.path.join(event_def_dir, 'ndays_dry_raw.csv'))
grouped_data = data.loc[:, ['month', 'year', 'rain', 'ndays_rain']].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'ndays_dry_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'ndays_dry_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for k, val in ndays.items():
ok = '{}'.format(k)
out_keys2.append(ok)
grouped_data.loc[:, 'limit'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'limit': val})
grouped_data.loc[:, ok] = grouped_data.loc[:, 'rain'] <= grouped_data.loc[:, 'limit']
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'ndays_dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'ndays_dry_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'ndays_dry_years.csv'))
def calc_hot_recurance_variable():
var_to_use = {
1: 'tmax',
2: 'tmax',
3: 'tmax',
4: 'tmean', # to use in conjunction with dry to get atual dry
5: 'tmean', # to use in conjunction with dry to get atual dry
6: 'tmax',
7: 'tmax',
8: 'tmean', # to use in conjunction with dry to get atual dry
9: 'tmean', # to use in conjunction with dry to get atual dry
10: 'tmax',
11: 'tmax',
12: 'tmax',
}
ndays = {
'5day': {
4: 5,
5: 5,
8: 5,
9: 5,
},
'7day': {
4: 7,
5: 7,
8: 7,
9: 7,
},
'10day': {
4: 10,
5: 10,
8: 10,
9: 10,
},
'15day': {
4: 15,
5: 15,
8: 15,
9: 15,
}
}
thresholds = {
'upper_q': { # based on the sma -20 10days
4: 18, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
5: 15, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
8: 13, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
9: 15, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
},
'2_less': { # based on the sma -20 10days
4: 18 - 2, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
5: 15 - 2, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
8: 13 - 2, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
9: 15 - 2, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
},
'5_less': { # based on the sma -20 10days
4: 18 - 5, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
5: 15 - 5, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
8: 13 - 5, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
9: 15 - 5, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
},
'7_less': { # based on the sma -20 10days
4: 18 - 7, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
5: 15 - 7, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
8: 13 - 7, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
9: 15 - 7, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
}
}
for v in thresholds.values(): # set for actual hot events
v.update({
1: 25,
2: 25,
3: 25,
6: 25,
7: 25,
10: 25,
11: 25,
12: 25,
})
for v in ndays.values(): # set for actual hot events
v.update({
1: 7,
2: 7,
3: 7,
6: 7,
7: 7,
10: 7,
11: 7,
12: 7,
})
data = get_vcsn_record(vcsn_version).reset_index()
data.loc[:, 'tmean'] = (data.loc[:, 'tmax'] + data.loc[:, 'tmin']) / 2
out_keys = []
outdata = pd.DataFrame(index=pd.MultiIndex.from_product([range(1, 13), range(1972, 2020)], names=['month', 'year']))
for thresh, nd in itertools.product(thresholds.keys(), ndays.keys()):
temp_data = data.copy(deep=True)
ok = '{}_{}'.format(thresh, nd)
out_keys.append(ok)
for m in range(1, 13):
idx = data.month == m
temp_data.loc[idx, ok] = temp_data.loc[idx, var_to_use[m]] >= thresholds[thresh][m]
temp_data.loc[idx, 'ndays'] = ndays[nd][m]
temp_data = temp_data.groupby(['month', 'year']).agg({ok: 'sum', 'ndays': 'mean'})
outdata.loc[:, ok] = temp_data.loc[:, ok] >= temp_data.loc[:, 'ndays']
outdata.to_csv(os.path.join(event_def_dir, 'variable_hot_monthly.csv'))
outdata = outdata.reset_index()
out = outdata.loc[:, ['month'] + out_keys].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(outdata, set(out_keys) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'variable_hot_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'variable_hot_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'variable_hot_years.csv'))
def joint_hot_dry():
hot = pd.read_csv(os.path.join(event_def_dir, 'variable_hot_years.csv'), index_col=0)
hot_keys = list(hot.keys())
dry = pd.read_csv(os.path.join(event_def_dir, 'rolling_dry_years.csv'), index_col=0)
dry_keys = list(dry.keys())
data = pd.merge(hot, dry, left_index=True, right_index=True)
use_data = []
for d in data.keys():
use_data.append(
pd.Series([np.nan if isinstance(t, float) else tuple(int(e) for e in t.strip('()').split(',')) for t in
data.loc[:, d]]))
use_data = pd.concat(use_data, axis=1)
use_data.columns = data.columns
_org_describe_names = ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']
_describe_names = []
for e in _org_describe_names:
_describe_names.extend(['{}_irr'.format(e), '{}_dry'.format(e)])
full_event_names = ['hot:{}_dry:{}'.format(h, d) for h, d in itertools.product(hot_keys, dry_keys)]
outdata = pd.DataFrame(index=pd.Series(range(1, 13), name='month'),
columns=pd.MultiIndex.from_product((full_event_names,
(['prob'] + _describe_names))
, names=['event', 'pga_desc']), dtype=float)
# make base data
print('making base data')
for hot_nm, dry_nm in itertools.product(hot_keys, dry_keys):
en = 'hot:{}_dry:{}'.format(hot_nm, dry_nm)
joint_event = pd.Series(list(set(use_data.loc[:, hot_nm]).intersection(set(use_data.loc[:, dry_nm]))))
if joint_event.dropna().empty:
continue
temp = make_prob(joint_event)
outdata.loc[temp.index, (en, 'prob')] = temp.values[:, 0]
temp = add_pga_from_idx(joint_event)
outdata.loc[temp.index, (en, _describe_names)] = temp.loc[:, _describe_names].values
t = pd.Series([' '.join(e) for e in outdata.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
outdata.loc[:, outdata.columns[idx]] *= 100
outdata = outdata.sort_index(axis=1, level=0, sort_remaining=False)
outdata.to_csv(os.path.join(event_def_dir, 'joint_hot_dry_prob.csv'), float_format='%.1f%%')
idx = t.str.contains('prob')
outdata.loc[:, outdata.columns[idx]].to_csv(os.path.join(event_def_dir, 'joint_hot_dry_prob_only_prob.csv'),
float_format='%.1f%%')
idx = t.str.contains('mean')
outdata.loc[:, outdata.columns[idx]].to_csv(os.path.join(event_def_dir, 'joint_hot_dry_mean_impact.csv'),
float_format='%.1f%%')
return full_event_names, outdata
def make_prob(in_series):
in_series = in_series.dropna()
data = pd.DataFrame(np.atleast_2d(list(in_series.values)), columns=['month', 'year'])
out_series = data.groupby('month').count() / 48
return pd.DataFrame(out_series)
def old_calc_restrict_recurance():
data = get_restriction_record()
thresholds = [0.5, 0.75, 1]
tnames = ['half', '3/4', 'full']
ndays = [1, 5, 7, 10, 14]
out_keys = []
for thresh, tname in zip(thresholds, tnames):
k = 'd_>{}_rest'.format(tname)
data.loc[:, k] = data.loc[:, 'f_rest'] >= thresh
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year', 'f_rest'] + out_keys].groupby(['month', 'year']).sum().reset_index()
# make montly restriction anaomaloy - mean
temp = grouped_data.groupby('month').mean().loc[:, 'f_rest'].to_dict()
grouped_data.loc[:, 'f_rest_an_mean'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'f_rest_an_mean': temp})
grouped_data.loc[:, 'f_rest_an_mean'] = grouped_data.loc[:, 'f_rest'] - grouped_data.loc[:, 'f_rest_an_mean']
# make montly restriction anaomaloy
temp = grouped_data.groupby('month').median().loc[:, 'f_rest'].to_dict()
grouped_data.loc[:, 'f_rest_an_med'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'f_rest_an_med': temp})
grouped_data.loc[:, 'f_rest_an_med'] = grouped_data.loc[:, 'f_rest'] - grouped_data.loc[:, 'f_rest_an_med']
grouped_data.to_csv(os.path.join(event_def_dir, 'rest_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'rest_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
out_years.to_csv(os.path.join(event_def_dir, 'rest_years.csv'))
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'old_rest_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'old_rest_prob_only_prob.csv'),
float_format='%.1f%%')
def calc_restrict_cumulative_recurance():
data = get_restriction_record()
ndays = [1, 5, 7, 10, 14, 21, 25, 29]
ndays = {'{:02d}'.format(e): e for e in ndays}
temp = {1: 10,
2: 17,
3: 17,
4: 10,
5: 7,
6: 10,
7: 10,
8: 10,
9: 7,
10: 5,
11: 5,
12: 7,
}
ndays['eqlikly'] = temp # note don't use 'prob' in this name!
grouped_data = data.loc[:, ['month', 'year', 'f_rest']].groupby(['month', 'year']).sum().reset_index()
# make montly restriction anaomaloy - mean
temp = grouped_data.groupby('month').mean().loc[:, 'f_rest'].to_dict()
grouped_data.loc[:, 'f_rest_an_mean'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'f_rest_an_mean': temp})
grouped_data.loc[:, 'f_rest_an_mean'] = grouped_data.loc[:, 'f_rest'] - grouped_data.loc[:, 'f_rest_an_mean']
# make montly restriction anaomaloy - median
temp = grouped_data.groupby('month').median().loc[:, 'f_rest'].to_dict()
grouped_data.loc[:, 'f_rest_an_med'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'f_rest_an_med': temp})
grouped_data.loc[:, 'f_rest_an_med'] = grouped_data.loc[:, 'f_rest'] - grouped_data.loc[:, 'f_rest_an_med']
grouped_data.to_csv(os.path.join(event_def_dir, 'rest_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'rest_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for k, nd in ndays.items():
ok = '{}d_rest'.format(k)
out_keys2.append(ok)
if isinstance(nd, int):
grouped_data.loc[:, ok] = grouped_data.loc[:, 'f_rest'] >= nd
elif isinstance(nd, dict):
grouped_data.loc[:, ok] = grouped_data.loc[:, 'f_rest'] >= grouped_data.loc[:, 'month'].replace(nd)
else:
raise ValueError('unexpected type for nd: {}'.format(type(nd)))
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
out_years.to_csv(os.path.join(event_def_dir, 'rest_years.csv'))
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'rest_prob.csv'), float_format='%.1f%%')
idx = (t.str.contains('prob') | t.str.contains('sum'))
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'rest_prob_only_prob.csv'), float_format='%.1f%%')
def calc_restrict_recurance():
data = get_restriction_record()
thresholds = [0.001, 0.5, 0.75, 1]
tnames = ['any', 'half', '75rest', 'full']
con_days = [5, 7, 10]
ndays = [5, 7, 10, 15, 20]
consecutive_data = {}
for tnm, t in zip(tnames, thresholds):
test_value = tnm
data.loc[:, test_value] = data.loc[:, 'f_rest'] >= t
data.loc[:, 'con_id'] = (data.loc[:, ['year',
'month',
test_value]].diff(1) != 0).any(axis=1).astype('int').cumsum().values
temp = data.loc[data[test_value]].groupby('con_id')
consecutive_data[tnm] = temp.agg({'year': 'mean', 'month': 'mean', test_value: 'size'}).reset_index()
out_columns = ['total_rest_days', 'num_per', 'mean_per_len', 'min_per_len', 'max_per_len']
rename_mapper = {'sum': 'total_rest_days', 'count': 'num_per',
'mean': 'mean_per_len', 'min': 'min_per_len', 'max': 'max_per_len'}
all_data = pd.DataFrame(
index=pd.MultiIndex.from_product([set(data.year), set(data.month)], names=['year', 'month']),
columns=pd.MultiIndex.from_product([tnames, out_columns]))
all_data.loc[:] = np.nan
for k, v in consecutive_data.items():
v.to_csv(os.path.join(event_def_dir, 'len_rest_{}_raw.csv'.format(k)))
temp = v.groupby(['year', 'month']).agg({k: ['sum', 'count',
'mean', 'min', 'max']})
temp = temp.rename(columns=rename_mapper, level=1)
all_data = all_data.combine_first(temp)
all_data = all_data.loc[:, (tnames, out_columns)]
all_data.reset_index().astype(float).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'len_rest_month_desc_no_zeros.csv'))
t = all_data['any']['num_per'].isna().reset_index().groupby('month').agg({'num_per': ['sum', prob]})
t.to_csv(os.path.join(event_def_dir, 'len_rest_prob_no_rest.csv'))
all_data = all_data.fillna(0)
all_data.to_csv(os.path.join(event_def_dir, 'len_rest_monthly.csv'))
all_data.reset_index().groupby('month').describe().to_csv(
os.path.join(event_def_dir, 'len_rest_month_desc_with_zeros.csv'))
prob_data = pd.DataFrame(index=all_data.index)
for rt, l, nd in itertools.product(tnames, con_days, ndays):
prob_data.loc[:, '{}d_{}_{}tot'.format(l, rt, nd)] = ((all_data.loc[:, (rt, 'max_per_len')] >= l) &
(all_data.loc[:, (rt, 'total_rest_days')] >= nd))
out = prob_data.reset_index().groupby('month').agg(['sum', prob])
out_keys2 = set(out.columns.levels[0]) - {'year'}
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(prob_data.reset_index(), set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'len_rest_prob.csv'), float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'len_rest_years.csv'))
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'len_rest_prob_only_prob.csv'),
float_format='%.1f%%')
def calc_cold_recurance():
data = get_vcsn_record(vcsn_version)
data.loc[:, 'tmean'] = (data.loc[:, 'tmax'] + data.loc[:, 'tmin']) / 2
data.loc[:, 'tmean_raw'] = (data.loc[:, 'tmax'] + data.loc[:, 'tmin']) / 2
data.loc[:, 'tmean'] = data.loc[:, 'tmean'].rolling(3).mean()
data.to_csv(os.path.join(event_def_dir, 'rolling_cold_raw.csv'))
thresholds = [0, 5, 7, 10, 12]
vars = ['tmean']
ndays = [3, 5, 7, 10, 14]
out_keys = []
for thresh, v in itertools.product(thresholds, vars):
k = 'd_{}_{:02d}'.format(v, thresh)
data.loc[:, k] = data.loc[:, v] <= thresh
out_keys.append(k)
aggs = {e: 'sum' for e in out_keys}
aggs.update({e: 'mean' for e in vars})
grouped_data = data.loc[:, ['month', 'year'] + vars + out_keys].groupby(['month', 'year'])
grouped_data = grouped_data.aggregate(aggs).reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'rolling_cold_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'rolling_cold_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'rolling_cold_prob.csv'), float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'rolling_cold_years.csv'))
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'rolling_cold_prob_only_prob.csv'),
float_format='%.1f%%')
def calc_hot_recurance():
data = get_vcsn_record(vcsn_version)
data.loc[:, 'tmean'] = (data.loc[:, 'tmax'] + data.loc[:, 'tmin']) / 2
data.to_csv(os.path.join(event_def_dir, 'temp_raw.csv'))
thresholds = [20, 25, 28, 30, 35]
vars = ['tmax', 'tmean']
ndays = [3, 5, 7, 10, 14]
out_keys = []
for thresh, v in itertools.product(thresholds, vars):
k = 'd_{}_{:02d}'.format(v, thresh)
data.loc[:, k] = data.loc[:, v] >= thresh
out_keys.append(k)
aggs = {e: 'sum' for e in out_keys}
aggs.update({e: 'mean' for e in vars})
grouped_data = data.loc[:, ['month', 'year'] + vars + out_keys].groupby(['month', 'year'])
grouped_data = grouped_data.aggregate(aggs).reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'hot_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'hot_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'hot_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'hot_prob_only_prob.csv'), float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'hot_years.csv'))
def plot_vcsn_smd():
data, use_cords = vcsn_pull_single_site(
lat=-43.358,
lon=172.301,
year_min=1972,
year_max=2019,
use_vars=('evspsblpot', 'pr'))
print(use_cords)
temp = calc_sma_smd_historical(data['pr'], data['evspsblpot'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.set_index('date', inplace=True)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, sharex=True)
ax1.plot(data.index, data['evspsblpot'], label='pet')
ax1.plot(data.index, data['aet_out'], label='aet')
ax2.plot(data.index, data['pr'], label='rain')
ax3.plot(data.index, data['smd'], label='smd')
ax3.plot(data.index, data['mean_doy_smd'], label='daily_mean_smd')
ax4.plot(data.index, data['sma'], label='sma')
ax4.axhline(ls='--', c='k')
for ax in (ax1, ax2, ax3, ax4):
ax.legend()
plt.show()
def check_vcns_data():
data, use_cords = vcsn_pull_single_site(
lat=-43.358,
lon=172.301,
year_min=1972,
year_max=2019,
use_vars='all')
print(use_cords)
data.set_index('date', inplace=True)
for v in data.keys():
fix, (ax) = plt.subplots()
ax.plot(data.index, data[v])
ax.set_title(v)
plt.show()
def plot_restriction_record():
data = get_restriction_record()
fix, (ax) = plt.subplots()
ax.plot(pd.to_datetime(data['date']), data['f_rest'])
plt.show()
if __name__ == '__main__':
# final run set up
calc_dry_recurance_monthly_smd()
calc_dry_recurance()
calc_hot_recurance()
calc_cold_recurance()
calc_wet_recurance_ndays()
calc_restrict_cumulative_recurance()
|
[
"Climate_Shocks.get_past_record.get_restriction_record",
"pandas.to_datetime",
"os.path.exists",
"Climate_Shocks.get_past_record.get_vcsn_record",
"pandas.MultiIndex.from_product",
"itertools.product",
"pandas.DataFrame",
"Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical",
"Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_smd_monthly",
"pandas.merge",
"Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit.calc_past_pasture_growth_anomaly",
"numpy.nansum",
"matplotlib.pyplot.show",
"os.makedirs",
"os.path.join",
"Climate_Shocks.vcsn_pull.vcsn_pull_single_site",
"numpy.zeros",
"pandas.concat",
"matplotlib.pyplot.subplots"
] |
[((844, 873), 'os.path.exists', 'os.path.exists', (['event_def_dir'], {}), '(event_def_dir)\n', (858, 873), False, 'import os\n'), ((879, 905), 'os.makedirs', 'os.makedirs', (['event_def_dir'], {}), '(event_def_dir)\n', (890, 905), False, 'import os\n'), ((1731, 1828), 'pandas.merge', 'pd.merge', (['irr_temp2', 'dry_temp2'], {'left_index': '(True)', 'right_index': '(True)', 'suffixes': "('_irr', '_dry')"}), "(irr_temp2, dry_temp2, left_index=True, right_index=True, suffixes=\n ('_irr', '_dry'))\n", (1739, 1828), True, 'import pandas as pd\n'), ((1835, 1854), 'pandas.DataFrame', 'pd.DataFrame', (['temp3'], {}), '(temp3)\n', (1847, 1854), True, 'import pandas as pd\n'), ((3062, 3091), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (3077, 3091), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((3100, 3164), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_smd_monthly', 'calc_smd_monthly', ([], {'rain': 'data.rain', 'pet': 'data.pet', 'dates': 'data.index'}), '(rain=data.rain, pet=data.pet, dates=data.index)\n', (3116, 3164), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((3597, 3646), 'itertools.product', 'itertools.product', (['smd_thresholds', 'sma_thresholds'], {}), '(smd_thresholds, sma_thresholds)\n', (3614, 3646), False, 'import itertools\n'), ((5519, 5588), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['rain']", "data['pet']", 'data.date', '(150)', '(1)'], {}), "(data['rain'], data['pet'], data.date, 150, 1)\n", (5542, 5588), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((7721, 7790), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['rain']", "data['pet']", 'data.date', '(150)', '(1)'], {}), "(data['rain'], data['pet'], data.date, 150, 1)\n", (7744, 7790), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((8368, 8418), 'itertools.product', 'itertools.product', (['thresholds_rain', 'thresholds_smd'], {}), '(thresholds_rain, thresholds_smd)\n', (8385, 8418), False, 'import itertools\n'), ((11323, 11392), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['rain']", "data['pet']", 'data.date', '(150)', '(1)'], {}), "(data['rain'], data['pet'], data.date, 150, 1)\n", (11346, 11392), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((19803, 19872), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['rain']", "data['pet']", 'data.date', '(150)', '(1)'], {}), "(data['rain'], data['pet'], data.date, 150, 1)\n", (19826, 19872), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((27246, 27299), 'pandas.merge', 'pd.merge', (['hot', 'dry'], {'left_index': '(True)', 'right_index': '(True)'}), '(hot, dry, left_index=True, right_index=True)\n', (27254, 27299), True, 'import pandas as pd\n'), ((27542, 27569), 'pandas.concat', 'pd.concat', (['use_data'], {'axis': '(1)'}), '(use_data, axis=1)\n', (27551, 27569), True, 'import pandas as pd\n'), ((28358, 28395), 'itertools.product', 'itertools.product', (['hot_keys', 'dry_keys'], {}), '(hot_keys, dry_keys)\n', (28375, 28395), False, 'import itertools\n'), ((29887, 29911), 'pandas.DataFrame', 'pd.DataFrame', (['out_series'], {}), '(out_series)\n', (29899, 29911), True, 'import pandas as pd\n'), ((29960, 29984), 'Climate_Shocks.get_past_record.get_restriction_record', 'get_restriction_record', ([], {}), '()\n', (29982, 29984), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((32635, 32659), 'Climate_Shocks.get_past_record.get_restriction_record', 'get_restriction_record', ([], {}), '()\n', (32657, 32659), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((35665, 35689), 'Climate_Shocks.get_past_record.get_restriction_record', 'get_restriction_record', ([], {}), '()\n', (35687, 35689), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((37964, 37998), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'all_data.index'}), '(index=all_data.index)\n', (37976, 37998), True, 'import pandas as pd\n'), ((38021, 38063), 'itertools.product', 'itertools.product', (['tnames', 'con_days', 'ndays'], {}), '(tnames, con_days, ndays)\n', (38038, 38063), False, 'import itertools\n'), ((39273, 39302), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (39288, 39302), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((39718, 39753), 'itertools.product', 'itertools.product', (['thresholds', 'vars'], {}), '(thresholds, vars)\n', (39735, 39753), False, 'import itertools\n'), ((41633, 41662), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (41648, 41662), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((41936, 41971), 'itertools.product', 'itertools.product', (['thresholds', 'vars'], {}), '(thresholds, vars)\n', (41953, 41971), False, 'import itertools\n'), ((43772, 43885), 'Climate_Shocks.vcsn_pull.vcsn_pull_single_site', 'vcsn_pull_single_site', ([], {'lat': '(-43.358)', 'lon': '(172.301)', 'year_min': '(1972)', 'year_max': '(2019)', 'use_vars': "('evspsblpot', 'pr')"}), "(lat=-43.358, lon=172.301, year_min=1972, year_max=\n 2019, use_vars=('evspsblpot', 'pr'))\n", (43793, 43885), False, 'from Climate_Shocks.vcsn_pull import vcsn_pull_single_site\n'), ((43955, 44029), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['pr']", "data['evspsblpot']", 'data.date', '(150)', '(1)'], {}), "(data['pr'], data['evspsblpot'], data.date, 150, 1)\n", (43978, 44029), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((44227, 44258), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'sharex': '(True)'}), '(4, 1, sharex=True)\n', (44239, 44258), True, 'import matplotlib.pyplot as plt\n'), ((44690, 44700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (44698, 44700), True, 'import matplotlib.pyplot as plt\n'), ((44748, 44846), 'Climate_Shocks.vcsn_pull.vcsn_pull_single_site', 'vcsn_pull_single_site', ([], {'lat': '(-43.358)', 'lon': '(172.301)', 'year_min': '(1972)', 'year_max': '(2019)', 'use_vars': '"""all"""'}), "(lat=-43.358, lon=172.301, year_min=1972, year_max=\n 2019, use_vars='all')\n", (44769, 44846), False, 'from Climate_Shocks.vcsn_pull import vcsn_pull_single_site\n'), ((45072, 45082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (45080, 45082), True, 'import matplotlib.pyplot as plt\n'), ((45127, 45151), 'Climate_Shocks.get_past_record.get_restriction_record', 'get_restriction_record', ([], {}), '()\n', (45149, 45151), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((45168, 45182), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (45180, 45182), True, 'import matplotlib.pyplot as plt\n'), ((45245, 45255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (45253, 45255), True, 'import matplotlib.pyplot as plt\n'), ((923, 985), 'Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit.calc_past_pasture_growth_anomaly', 'calc_past_pasture_growth_anomaly', (['"""irrigated"""'], {'site': '"""eyrewell"""'}), "('irrigated', site='eyrewell')\n", (955, 985), False, 'from Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit import calc_past_pasture_growth_anomaly\n'), ((1131, 1174), 'Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit.calc_past_pasture_growth_anomaly', 'calc_past_pasture_growth_anomaly', (['"""dryland"""'], {}), "('dryland')\n", (1163, 1174), False, 'from Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit import calc_past_pasture_growth_anomaly\n'), ((1323, 1335), 'numpy.nansum', 'np.nansum', (['x'], {}), '(x)\n', (1332, 1335), True, 'import numpy as np\n'), ((3395, 3449), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_raw.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_raw.csv')\n", (3407, 3449), False, 'import os\n'), ((3999, 4062), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_monthly_data.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_monthly_data.csv')\n", (4011, 4062), False, 'import os\n'), ((4139, 4207), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_monthly_data_desc.csv')\n", (4151, 4207), False, 'import os\n'), ((5094, 5149), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_prob.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_prob.csv')\n", (5106, 5149), False, 'import os\n'), ((5214, 5279), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_prob_only_prob.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_prob_only_prob.csv')\n", (5226, 5279), False, 'import os\n'), ((5366, 5422), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_years.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_years.csv')\n", (5378, 5422), False, 'import os\n'), ((5728, 5770), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_raw.csv"""'], {}), "(event_def_dir, 'dry_raw.csv')\n", (5740, 5770), False, 'import os\n'), ((6302, 6353), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_monthly_data.csv"""'], {}), "(event_def_dir, 'dry_monthly_data.csv')\n", (6314, 6353), False, 'import os\n'), ((6430, 6486), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'dry_monthly_data_desc.csv')\n", (6442, 6486), False, 'import os\n'), ((7373, 7416), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_prob.csv"""'], {}), "(event_def_dir, 'dry_prob.csv')\n", (7385, 7416), False, 'import os\n'), ((7481, 7534), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_prob_only_prob.csv"""'], {}), "(event_def_dir, 'dry_prob_only_prob.csv')\n", (7493, 7534), False, 'import os\n'), ((7581, 7625), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_years.csv"""'], {}), "(event_def_dir, 'dry_years.csv')\n", (7593, 7625), False, 'import os\n'), ((8005, 8033), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'sharex': '(True)'}), '(3, sharex=True)\n', (8017, 8033), True, 'import matplotlib.pyplot as plt\n'), ((8158, 8168), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8166, 8168), True, 'import matplotlib.pyplot as plt\n'), ((8186, 8232), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_raw.csv"""'], {}), "(event_def_dir, 'smd_wet_raw.csv')\n", (8198, 8232), False, 'import os\n'), ((9448, 9503), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_monthly_data.csv"""'], {}), "(event_def_dir, 'smd_wet_monthly_data.csv')\n", (9460, 9503), False, 'import os\n'), ((9581, 9641), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'smd_wet_monthly_data_desc.csv')\n", (9593, 9641), False, 'import os\n'), ((10552, 10599), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_prob.csv"""'], {}), "(event_def_dir, 'smd_wet_prob.csv')\n", (10564, 10599), False, 'import os\n'), ((10664, 10721), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_prob_only_prob.csv"""'], {}), "(event_def_dir, 'smd_wet_prob_only_prob.csv')\n", (10676, 10721), False, 'import os\n'), ((10808, 10856), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_years.csv"""'], {}), "(event_def_dir, 'smd_wet_years.csv')\n", (10820, 10856), False, 'import os\n'), ((11607, 11655), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_raw.csv"""'], {}), "(event_def_dir, 'ndays_wet_raw.csv')\n", (11619, 11655), False, 'import os\n'), ((11802, 11859), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_monthly_data.csv"""'], {}), "(event_def_dir, 'ndays_wet_monthly_data.csv')\n", (11814, 11859), False, 'import os\n'), ((11937, 11999), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'ndays_wet_monthly_data_desc.csv')\n", (11949, 11999), False, 'import os\n'), ((13036, 13085), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_prob.csv"""'], {}), "(event_def_dir, 'ndays_wet_prob.csv')\n", (13048, 13085), False, 'import os\n'), ((13150, 13209), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_prob_only_prob.csv"""'], {}), "(event_def_dir, 'ndays_wet_prob_only_prob.csv')\n", (13162, 13209), False, 'import os\n'), ((13296, 13346), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_years.csv"""'], {}), "(event_def_dir, 'ndays_wet_years.csv')\n", (13308, 13346), False, 'import os\n'), ((15471, 15525), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_dry_monthly.csv"""'], {}), "(event_def_dir, 'rolling_dry_monthly.csv')\n", (15483, 15525), False, 'import os\n'), ((16136, 16187), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_dry_prob.csv"""'], {}), "(event_def_dir, 'rolling_dry_prob.csv')\n", (16148, 16187), False, 'import os\n'), ((16252, 16314), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_prob_only_prob.csv"""'], {}), "(event_def_dir, 'variable_hot_prob_only_prob.csv')\n", (16264, 16314), False, 'import os\n'), ((16401, 16453), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_dry_years.csv"""'], {}), "(event_def_dir, 'rolling_dry_years.csv')\n", (16413, 16453), False, 'import os\n'), ((20087, 20135), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_raw.csv"""'], {}), "(event_def_dir, 'ndays_dry_raw.csv')\n", (20099, 20135), False, 'import os\n'), ((20282, 20339), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_monthly_data.csv"""'], {}), "(event_def_dir, 'ndays_dry_monthly_data.csv')\n", (20294, 20339), False, 'import os\n'), ((20417, 20479), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'ndays_dry_monthly_data_desc.csv')\n", (20429, 20479), False, 'import os\n'), ((21510, 21559), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_prob.csv"""'], {}), "(event_def_dir, 'ndays_dry_prob.csv')\n", (21522, 21559), False, 'import os\n'), ((21624, 21683), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_prob_only_prob.csv"""'], {}), "(event_def_dir, 'ndays_dry_prob_only_prob.csv')\n", (21636, 21683), False, 'import os\n'), ((21770, 21820), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_years.csv"""'], {}), "(event_def_dir, 'ndays_dry_years.csv')\n", (21782, 21820), False, 'import os\n'), ((25982, 26037), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_monthly.csv"""'], {}), "(event_def_dir, 'variable_hot_monthly.csv')\n", (25994, 26037), False, 'import os\n'), ((26648, 26700), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_prob.csv"""'], {}), "(event_def_dir, 'variable_hot_prob.csv')\n", (26660, 26700), False, 'import os\n'), ((26765, 26827), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_prob_only_prob.csv"""'], {}), "(event_def_dir, 'variable_hot_prob_only_prob.csv')\n", (26777, 26827), False, 'import os\n'), ((26914, 26967), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_years.csv"""'], {}), "(event_def_dir, 'variable_hot_years.csv')\n", (26926, 26967), False, 'import os\n'), ((27014, 27067), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_years.csv"""'], {}), "(event_def_dir, 'variable_hot_years.csv')\n", (27026, 27067), False, 'import os\n'), ((27136, 27188), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_dry_years.csv"""'], {}), "(event_def_dir, 'rolling_dry_years.csv')\n", (27148, 27188), False, 'import os\n'), ((29124, 29177), 'os.path.join', 'os.path.join', (['event_def_dir', '"""joint_hot_dry_prob.csv"""'], {}), "(event_def_dir, 'joint_hot_dry_prob.csv')\n", (29136, 29177), False, 'import os\n'), ((29284, 29347), 'os.path.join', 'os.path.join', (['event_def_dir', '"""joint_hot_dry_prob_only_prob.csv"""'], {}), "(event_def_dir, 'joint_hot_dry_prob_only_prob.csv')\n", (29296, 29347), False, 'import os\n'), ((29501, 29561), 'os.path.join', 'os.path.join', (['event_def_dir', '"""joint_hot_dry_mean_impact.csv"""'], {}), "(event_def_dir, 'joint_hot_dry_mean_impact.csv')\n", (29513, 29561), False, 'import os\n'), ((31163, 31215), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_monthly_data.csv"""'], {}), "(event_def_dir, 'rest_monthly_data.csv')\n", (31175, 31215), False, 'import os\n'), ((31292, 31349), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'rest_monthly_data_desc.csv')\n", (31304, 31349), False, 'import os\n'), ((32122, 32167), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_years.csv"""'], {}), "(event_def_dir, 'rest_years.csv')\n", (32134, 32167), False, 'import os\n'), ((32344, 32392), 'os.path.join', 'os.path.join', (['event_def_dir', '"""old_rest_prob.csv"""'], {}), "(event_def_dir, 'old_rest_prob.csv')\n", (32356, 32392), False, 'import os\n'), ((32457, 32515), 'os.path.join', 'os.path.join', (['event_def_dir', '"""old_rest_prob_only_prob.csv"""'], {}), "(event_def_dir, 'old_rest_prob_only_prob.csv')\n", (32469, 32515), False, 'import os\n'), ((33946, 33998), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_monthly_data.csv"""'], {}), "(event_def_dir, 'rest_monthly_data.csv')\n", (33958, 33998), False, 'import os\n'), ((34075, 34132), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'rest_monthly_data_desc.csv')\n", (34087, 34132), False, 'import os\n'), ((35152, 35197), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_years.csv"""'], {}), "(event_def_dir, 'rest_years.csv')\n", (35164, 35197), False, 'import os\n'), ((35374, 35418), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_prob.csv"""'], {}), "(event_def_dir, 'rest_prob.csv')\n", (35386, 35418), False, 'import os\n'), ((35542, 35596), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_prob_only_prob.csv"""'], {}), "(event_def_dir, 'rest_prob_only_prob.csv')\n", (35554, 35596), False, 'import os\n'), ((37371, 37434), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_month_desc_no_zeros.csv"""'], {}), "(event_def_dir, 'len_rest_month_desc_no_zeros.csv')\n", (37383, 37434), False, 'import os\n'), ((37643, 37699), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_prob_no_rest.csv"""'], {}), "(event_def_dir, 'len_rest_prob_no_rest.csv')\n", (37655, 37699), False, 'import os\n'), ((37755, 37806), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_monthly.csv"""'], {}), "(event_def_dir, 'len_rest_monthly.csv')\n", (37767, 37806), False, 'import os\n'), ((37880, 37945), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_month_desc_with_zeros.csv"""'], {}), "(event_def_dir, 'len_rest_month_desc_with_zeros.csv')\n", (37892, 37945), False, 'import os\n'), ((38925, 38973), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_prob.csv"""'], {}), "(event_def_dir, 'len_rest_prob.csv')\n", (38937, 38973), False, 'import os\n'), ((39019, 39068), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_years.csv"""'], {}), "(event_def_dir, 'len_rest_years.csv')\n", (39031, 39068), False, 'import os\n'), ((39110, 39168), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_prob_only_prob.csv"""'], {}), "(event_def_dir, 'len_rest_prob_only_prob.csv')\n", (39122, 39168), False, 'import os\n'), ((39539, 39590), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_raw.csv"""'], {}), "(event_def_dir, 'rolling_cold_raw.csv')\n", (39551, 39590), False, 'import os\n'), ((40142, 40202), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_monthly_data.csv"""'], {}), "(event_def_dir, 'rolling_cold_monthly_data.csv')\n", (40154, 40202), False, 'import os\n'), ((40280, 40345), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'rolling_cold_monthly_data_desc.csv')\n", (40292, 40345), False, 'import os\n'), ((41274, 41326), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_prob.csv"""'], {}), "(event_def_dir, 'rolling_cold_prob.csv')\n", (41286, 41326), False, 'import os\n'), ((41372, 41425), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_years.csv"""'], {}), "(event_def_dir, 'rolling_cold_years.csv')\n", (41384, 41425), False, 'import os\n'), ((41467, 41529), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_prob_only_prob.csv"""'], {}), "(event_def_dir, 'rolling_cold_prob_only_prob.csv')\n", (41479, 41529), False, 'import os\n'), ((41754, 41797), 'os.path.join', 'os.path.join', (['event_def_dir', '"""temp_raw.csv"""'], {}), "(event_def_dir, 'temp_raw.csv')\n", (41766, 41797), False, 'import os\n'), ((42360, 42411), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_monthly_data.csv"""'], {}), "(event_def_dir, 'hot_monthly_data.csv')\n", (42372, 42411), False, 'import os\n'), ((42489, 42545), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'hot_monthly_data_desc.csv')\n", (42501, 42545), False, 'import os\n'), ((43473, 43516), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_prob.csv"""'], {}), "(event_def_dir, 'hot_prob.csv')\n", (43485, 43516), False, 'import os\n'), ((43581, 43634), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_prob_only_prob.csv"""'], {}), "(event_def_dir, 'hot_prob_only_prob.csv')\n", (43593, 43634), False, 'import os\n'), ((43681, 43725), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_years.csv"""'], {}), "(event_def_dir, 'hot_years.csv')\n", (43693, 43725), False, 'import os\n'), ((44992, 45006), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (45004, 45006), True, 'import matplotlib.pyplot as plt\n'), ((45195, 45223), 'pandas.to_datetime', 'pd.to_datetime', (["data['date']"], {}), "(data['date'])\n", (45209, 45223), True, 'import pandas as pd\n'), ((5463, 5492), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (5478, 5492), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((7666, 7695), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (7681, 7695), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((11268, 11297), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (11283, 11297), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((14616, 14645), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (14631, 14645), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((19748, 19777), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (19763, 19777), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((25134, 25163), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (25149, 25163), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((27889, 27926), 'itertools.product', 'itertools.product', (['hot_keys', 'dry_keys'], {}), '(hot_keys, dry_keys)\n', (27906, 27926), False, 'import itertools\n'), ((28035, 28142), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["(full_event_names, ['prob'] + _describe_names)"], {'names': "['event', 'pga_desc']"}), "((full_event_names, ['prob'] + _describe_names),\n names=['event', 'pga_desc'])\n", (28061, 28142), True, 'import pandas as pd\n'), ((36786, 36835), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[tnames, out_columns]'], {}), '([tnames, out_columns])\n', (36812, 36835), True, 'import pandas as pd\n'), ((2874, 2895), 'numpy.zeros', 'np.zeros', (['missing_len'], {}), '(missing_len)\n', (2882, 2895), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
import numpy_net as npn
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, help='Learning rate', default=0.1)
parser.add_argument('--epochs', type=int, help='Number of epochs', default=10)
parser.add_argument('--batch-size', type=int, help='Batch size', default=50)
parser.add_argument('--model',
type=str,
help="Model type",
choices=['dense', 'conv'],
default='conv')
args = parser.parse_args()
N_CLASSES = 10
MEAN = 127.5
STD = 127.5
CONV_SHAPE = (-1, 28, 28, 1)
def to_onehot(y, n_classes):
return np.eye(n_classes)[y]
def normalize(x):
# Note: this is a poor but simple normalization
# If you want to be precise, subtract the mean
# and divide with standard deviation
return (x - MEAN) / STD
def get_data():
# Data
train_x, train_y, val_x, val_y = npn.load_mnist()
# One hot encoding
train_y = to_onehot(train_y, val_y.max() + 1)
val_y = to_onehot(val_y, val_y.max() + 1)
# Normalizing
train_x = normalize(train_x)
val_x = normalize(val_x)
# Reshape
if args.model == 'conv':
train_x = train_x.reshape(*CONV_SHAPE)
val_x = val_x.reshape(*CONV_SHAPE)
return train_x, train_y, val_x, val_y
def get_model(inp_channels):
# Model
model_f = npn.DenseModel if args.model == 'dense' else npn.ConvModel
return model_f(inp_channels, N_CLASSES)
# Shuffle the data
def shuffle(x, y):
i = np.arange(len(y))
np.random.shuffle(i)
return x[i], y[i]
# Run a single epoch
def run_epoch(model, loss, X, Y, backprop=True, name='Train'):
# Shuffle data
if name == 'Train':
X, Y = shuffle(X, Y)
losses, hits = [], 0
for start in range(0, len(Y), args.batch_size):
# Get batch
x = X[start:start + args.batch_size]
y = Y[start:start + args.batch_size]
# Predict
y_hat = model(x)
# Metrics
losses.append(loss(y_hat, y))
hits += (y_hat.argmax(axis=1) == y.argmax(axis=1)).sum()
# Backprop if needed
if backprop:
model.update(loss.backward(y_hat, y), lr=args.lr)
# Calculcate total loss and accuracy
total_loss = np.mean(losses)
total_acc = hits / len(Y)
# Print results to standard output
print(f"{name} loss: {(total_loss):.3f} | acc: {total_acc*100:2.2f}%")
if __name__ == "__main__":
# Loss
loss_fn = npn.CrossEntropy()
# Data
train_x, train_y, val_x, val_y = get_data()
# Model
model = get_model(train_x.shape[-1])
# TRAIN
for epoch in range(args.epochs):
print(f"Epoch {epoch+1}/{args.epochs}")
run_epoch(model, loss_fn, train_x, train_y)
run_epoch(model, loss_fn, val_x, val_y, backprop=False, name='Val')
print()
|
[
"numpy.mean",
"numpy.eye",
"argparse.ArgumentParser",
"numpy_net.load_mnist",
"numpy_net.CrossEntropy",
"numpy.random.shuffle"
] |
[((69, 94), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (92, 94), False, 'import argparse\n'), ((927, 943), 'numpy_net.load_mnist', 'npn.load_mnist', ([], {}), '()\n', (941, 943), True, 'import numpy_net as npn\n'), ((1552, 1572), 'numpy.random.shuffle', 'np.random.shuffle', (['i'], {}), '(i)\n', (1569, 1572), True, 'import numpy as np\n'), ((2281, 2296), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2288, 2296), True, 'import numpy as np\n'), ((2497, 2515), 'numpy_net.CrossEntropy', 'npn.CrossEntropy', ([], {}), '()\n', (2513, 2515), True, 'import numpy_net as npn\n'), ((648, 665), 'numpy.eye', 'np.eye', (['n_classes'], {}), '(n_classes)\n', (654, 665), True, 'import numpy as np\n')]
|
import os
def configuration(parent_package="", top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration("simulator", parent_package, top_path)
libraries = []
if os.name == "posix":
libraries.append("m")
# cpp_args = ['-stdlib=libc++', '-mmacosx-version-min=10.7']
config.add_extension(
"_simulatorc",
sources=["_simulatorc.pyx", "simulator.cpp"],
include_dirs=numpy.get_include(),
libraries=libraries,
language="c++",
# extra_compile_args = cpp_args,
)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
|
[
"numpy.distutils.misc_util.Configuration",
"numpy.get_include"
] |
[((152, 204), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""simulator"""', 'parent_package', 'top_path'], {}), "('simulator', parent_package, top_path)\n", (165, 204), False, 'from numpy.distutils.misc_util import Configuration\n'), ((473, 492), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (490, 492), False, 'import numpy\n')]
|
import numpy as np
import pandas as pd
import skfuzzy as fuzz
from skfuzzy import control as ctrl
x = ctrl.Antecedent(np.arange(0.0, 2.0), "X")
y = ctrl.Consequent(np.arange(0.0, 2), "Y")
x.automf(names=["pequeno", "médio", "grande"])
y.automf(names=["baixo", "alto"])
regra_1 = ctrl.Rule(antecedent=x["pequeno"], consequent=y["baixo"], label="regra_1")
regra_2 = ctrl.Rule(antecedent=x["médio"], consequent=y["baixo"], label="regra_2")
regra_3 = ctrl.Rule(antecedent=x["médio"], consequent=y["alto"], label="regra_3") ####
regra_4 = ctrl.Rule(antecedent=x["grande"], consequent=y["alto"], label="regra_4") ####
controlador = ctrl.ControlSystem(rules=[regra_1, regra_2, regra_3, regra_4])
simulador = ctrl.ControlSystemSimulation(control_system=controlador)
# -----------------------------------------------------------------------------
def gerador(n=50):
amostras = []
for amostra in range(n):
x = np.random.random()
y = x ** 2
amostras.append([x, y])
return amostras
def main(amostras, valores, verboso=False):
soma_dos_erros = 0
for i, amostra in enumerate(amostras.values):
print("---------------------") if verboso else None
simulador.input["X"] = amostra
simulador.compute()
if verboso:
print(f"AMOSTRA {i}\nX={amostra:.4f}\nY={simulador.output['Y']:.4f}\n")
soma_dos_erros += (valores[i] - amostra) ** 2
erro_total = soma_dos_erros / len(amostras)
print("---------------------") if verboso else None
print(f"ERRO TOTAL: {erro_total:.4f}")
# x.view(sim=simulador)
# y.view(sim=simulador)
if __name__ == "__main__":
# df = pd.read_csv('dados.csv', header=None)
df = pd.DataFrame(gerador(50))
A = df[0]
B = df[1]
main(A, B)
input()
|
[
"skfuzzy.control.ControlSystemSimulation",
"numpy.random.random",
"skfuzzy.control.ControlSystem",
"skfuzzy.control.Rule",
"numpy.arange"
] |
[((282, 356), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "x['pequeno']", 'consequent': "y['baixo']", 'label': '"""regra_1"""'}), "(antecedent=x['pequeno'], consequent=y['baixo'], label='regra_1')\n", (291, 356), True, 'from skfuzzy import control as ctrl\n'), ((367, 439), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "x['médio']", 'consequent': "y['baixo']", 'label': '"""regra_2"""'}), "(antecedent=x['médio'], consequent=y['baixo'], label='regra_2')\n", (376, 439), True, 'from skfuzzy import control as ctrl\n'), ((450, 521), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "x['médio']", 'consequent': "y['alto']", 'label': '"""regra_3"""'}), "(antecedent=x['médio'], consequent=y['alto'], label='regra_3')\n", (459, 521), True, 'from skfuzzy import control as ctrl\n'), ((538, 610), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "x['grande']", 'consequent': "y['alto']", 'label': '"""regra_4"""'}), "(antecedent=x['grande'], consequent=y['alto'], label='regra_4')\n", (547, 610), True, 'from skfuzzy import control as ctrl\n'), ((633, 695), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[regra_1, regra_2, regra_3, regra_4]'}), '(rules=[regra_1, regra_2, regra_3, regra_4])\n', (651, 695), True, 'from skfuzzy import control as ctrl\n'), ((708, 764), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', ([], {'control_system': 'controlador'}), '(control_system=controlador)\n', (736, 764), True, 'from skfuzzy import control as ctrl\n'), ((119, 138), 'numpy.arange', 'np.arange', (['(0.0)', '(2.0)'], {}), '(0.0, 2.0)\n', (128, 138), True, 'import numpy as np\n'), ((165, 182), 'numpy.arange', 'np.arange', (['(0.0)', '(2)'], {}), '(0.0, 2)\n', (174, 182), True, 'import numpy as np\n'), ((926, 944), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (942, 944), True, 'import numpy as np\n')]
|
import glob, os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def quick_plot(results_file, gauss_width, start, stop, step):
with open(results_file, "r") as results:
results = results.read().split('\n')
results = [float(res) for res in results[:-1]]
eigenenergies = results
gauss_width = gauss_width
D_E = 0
E = np.arange(start, stop, step)
for eigenenergy in eigenenergies:
D_E = D_E + np.exp(-(E - eigenenergy)**2 / (2 * gauss_width**2)) / (np.pi * gauss_width * np.sqrt(2))
font = {'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 16}
plt.figure(figsize=(13.66, 7.68))
plt.plot(E, D_E)
plt.xlabel('\nEnergy [a.u.]', fontsize=15,fontdict=font)
section = np.arange(-1, 1, 1/20.)
plt.fill_between(E, D_E, color='blue', alpha=0.3)
plt.ylabel('DOS\n', fontsize=15,fontdict=font)
plt.title('Density of states\n', fontsize=15,fontdict=font)
plt.xlim(start, stop)
plt.ylim(bottom=0)
plt.subplots_adjust(left=0.15)
plt.xticks(fontsize=11)
plt.yticks(fontsize=11)
#plt.gca().spines['right'].set_position(('data',0))
#plt.gca().spines['top'].set_position(('data',0))
plt.savefig(results_file + '.png', dpi=400)
plt.grid(False)
plt.close()
return
def main():
sns.set()
start = [-7,-6,-1.1,-6]#-7,-5.5,-5,-7,-0.1,-7,-5.,-6.6,-7,-0.5,-6.5,-7,-5,-7,-6,-7,-7,-7,0.1,0.5,-6,-0.5,-7,-7,-0.6,-7,-5.5,-6,-7,-7,-7,-7,-7,-6.5,-7,-7,-7
stop = [7,6,10.1,6] #7,14.5,5,7,14.5,7,13.5,6.5,7,15.5,15,7,14.,7,6,7,7,7,14.5,14.5,6,10,7,7,15.5,7,13.7,6,7,7,7,7,7,6.5,7,7,7
step = 0.01
gauss_width = 0.06
path = "/home/przemek/Documents/Modeling/tight_binding/results_diploma"
results = []
print(len(start), len(stop))
os.chdir(path)
for file in glob.glob("*.txt"):
input_file = path + '/' + file
ready_input_file = open(input_file, 'r')
num_list = [float(num) for num in ready_input_file.read().split()]
max_val = max(num_list)
min_val = min(num_list)
results.append([max_val, min_val, file])
for num, result in enumerate(results):
print(result[2])
print(start[num], stop[num])
quick_plot(path + '/' + result[2], gauss_width, start[num], stop[num], step)
return
if __name__ == '__main__':
exit(main())
|
[
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.arange",
"seaborn.set",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"glob.glob",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplots_adjust",
"os.chdir",
"matplotlib.pyplot.figure"
] |
[((360, 388), 'numpy.arange', 'np.arange', (['start', 'stop', 'step'], {}), '(start, stop, step)\n', (369, 388), True, 'import numpy as np\n'), ((640, 673), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13.66, 7.68)'}), '(figsize=(13.66, 7.68))\n', (650, 673), True, 'import matplotlib.pyplot as plt\n'), ((676, 692), 'matplotlib.pyplot.plot', 'plt.plot', (['E', 'D_E'], {}), '(E, D_E)\n', (684, 692), True, 'import matplotlib.pyplot as plt\n'), ((695, 752), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\nEnergy [a.u.]"""'], {'fontsize': '(15)', 'fontdict': 'font'}), "('\\nEnergy [a.u.]', fontsize=15, fontdict=font)\n", (705, 752), True, 'import matplotlib.pyplot as plt\n'), ((764, 790), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(1 / 20.0)'], {}), '(-1, 1, 1 / 20.0)\n', (773, 790), True, 'import numpy as np\n'), ((790, 839), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['E', 'D_E'], {'color': '"""blue"""', 'alpha': '(0.3)'}), "(E, D_E, color='blue', alpha=0.3)\n", (806, 839), True, 'import matplotlib.pyplot as plt\n'), ((842, 889), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""DOS\n"""'], {'fontsize': '(15)', 'fontdict': 'font'}), "('DOS\\n', fontsize=15, fontdict=font)\n", (852, 889), True, 'import matplotlib.pyplot as plt\n'), ((891, 951), 'matplotlib.pyplot.title', 'plt.title', (['"""Density of states\n"""'], {'fontsize': '(15)', 'fontdict': 'font'}), "('Density of states\\n', fontsize=15, fontdict=font)\n", (900, 951), True, 'import matplotlib.pyplot as plt\n'), ((953, 974), 'matplotlib.pyplot.xlim', 'plt.xlim', (['start', 'stop'], {}), '(start, stop)\n', (961, 974), True, 'import matplotlib.pyplot as plt\n'), ((977, 995), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(0)'}), '(bottom=0)\n', (985, 995), True, 'import matplotlib.pyplot as plt\n'), ((998, 1028), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)'}), '(left=0.15)\n', (1017, 1028), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1054), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(11)'}), '(fontsize=11)\n', (1041, 1054), True, 'import matplotlib.pyplot as plt\n'), ((1057, 1080), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(11)'}), '(fontsize=11)\n', (1067, 1080), True, 'import matplotlib.pyplot as plt\n'), ((1189, 1232), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(results_file + '.png')"], {'dpi': '(400)'}), "(results_file + '.png', dpi=400)\n", (1200, 1232), True, 'import matplotlib.pyplot as plt\n'), ((1235, 1250), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1243, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1264), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1262, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1305), 'seaborn.set', 'sns.set', ([], {}), '()\n', (1303, 1305), True, 'import seaborn as sns\n'), ((1752, 1766), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (1760, 1766), False, 'import glob, os\n'), ((1781, 1799), 'glob.glob', 'glob.glob', (['"""*.txt"""'], {}), "('*.txt')\n", (1790, 1799), False, 'import glob, os\n'), ((443, 499), 'numpy.exp', 'np.exp', (['(-(E - eigenenergy) ** 2 / (2 * gauss_width ** 2))'], {}), '(-(E - eigenenergy) ** 2 / (2 * gauss_width ** 2))\n', (449, 499), True, 'import numpy as np\n'), ((521, 531), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (528, 531), True, 'import numpy as np\n')]
|
from ..tweet_sentiment_classifier import Classifier, tokenizer_filter
import pickle as pkl
import numpy as np
import json
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
class BoW_Model(Classifier):
def __init__(self, vocab_size=100000, max_iter=10000, validation_split=0.2, accuracy=0, bootstrap=1,
remove_stopwords=True, remove_punctuation=True, lemmatize=True, **kwargs):
"""
Constructor for BoW_Model
Be sure to add additional parameters to export()
:param vocab_size: (int) Maximum vocabulary size. Default 1E6
:param max_iter: (int) Maximum number of fit iterations
:param remove_punctuation: (Bool) Remove punctuation. Recommended.
:param remove_stopwords: (Bool) Remove stopwords. Recommended.
:param lemmatize: (Bool) Lemmatize words. Recommended.
"""
self.package = 'twitter_nlp_toolkit.tweet_sentiment_classifier.models.bow_models'
self.type = 'BoW_Model'
self.vectorizer = None
self.classifier = None
self.vocab_size = vocab_size
self.max_iter = max_iter
self.validation_split = validation_split
self.accuracy = accuracy
self.bootstrap = bootstrap
self.remove_punctuation = remove_punctuation
self.remove_stopwords = remove_stopwords
self.lemmatize = lemmatize
def fit(self, train_data, y, weights=None, custom_vocabulary=None):
"""
Fit the model (from scratch)
:param train_data: (List-like) List of strings to train on
:param y: (vector) Targets
:param weights: (vector) Training weights. Optional
:param custom_vocabulary: (List of Strings) Custom vocabulary. Not recommended
"""
if weights is not None:
try:
y = np.hstack(y, weights)
except:
print('Weights not accepted')
if 1 < self.bootstrap < len(y):
train_data, y = resample(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)
elif self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y = resample(train_data, y, n_samples=n_samples, stratify=y, replace=False)
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize)
self.vectorizer = TfidfVectorizer(analyzer=str.split, max_features=self.vocab_size)
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = self.vectorizer.fit_transform(cleaned_data)
trainX, testX, trainY, testY = train_test_split(X, y, test_size=self.validation_split, stratify=y)
print('Fitting BoW model')
self.classifier = LogisticRegression(max_iter=self.max_iter).fit(trainX, trainY)
self.accuracy = accuracy_score(testY, self.classifier.predict(testX))
def refine(self, train_data, y, bootstrap=True, weights=None, max_iter=500, preprocess=True):
"""
Train the models further on new data. Note that it is not possible to increase the vocabulary
:param train_data: (List-like of Strings) List of strings to train on
:param y: (vector) Targets
:param max_iter: (int) Maximum number of fit iterations. Default: 500
"""
if weights is not None:
try:
y = np.hstack(y, weights)
except:
print('Weights not accepted')
if bootstrap and 1 < self.bootstrap < len(y):
train_data, y = resample(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)
elif bootstrap and self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y = resample(train_data, y, n_samples=n_samples, stratify=y, replace=False)
if preprocess:
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize)
print('\n Filtered data')
else:
filtered_data = train_data
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = self.vectorizer.transform(cleaned_data)
self.classifier = LogisticRegression(random_state=0, max_iter=max_iter).fit(X, y)
self.classifier.fit(X, y)
def predict(self, data, **kwargs):
"""
Predict the binary sentiment of a list of tweets
:param data: (list of Strings) Input tweets
:param kwargs: Keywords for predict_proba
:return: (list of bool) Predictions
"""
return np.round(self.predict_proba(data, **kwargs))
def predict_proba(self, data):
"""
Makes predictions
:param data: (List-like) List of strings to predict sentiment
:return: (vector) Un-binarized Predictions
"""
if self.classifier is None:
raise ValueError('Model has not been trained!')
filtered_data = tokenizer_filter(data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize,
verbose=False)
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = self.vectorizer.transform(cleaned_data)
return self.classifier.predict(X)
def export(self, filename):
"""
Saves the model to disk
:param filename: (String) Path to file
"""
parameters = {'Classifier': self.type,
'package': self.package,
'vocab_size': int(self.vocab_size),
'max_iter': int(self.max_iter),
'validation_split': float(self.validation_split),
'accuracy': float(self.accuracy),
'remove_punctuation': self.remove_punctuation,
'remove_stopwords': self.remove_stopwords,
'lemmatize': self.lemmatize,
'bootstrap': self.bootstrap
}
if parameters['bootstrap'] < 1:
parameters['bootstrap'] = float(parameters['bootstrap'])
else:
parameters['bootstrap'] = int(parameters['bootstrap'])
os.makedirs(filename, exist_ok=True)
with open(filename + '/param.json', 'w+') as outfile:
json.dump(parameters, outfile)
with open(filename + '/bow_vectorizer.pkl', 'wb+') as outfile:
pkl.dump(self.vectorizer, outfile)
with open(filename + '/bow_classifier.pkl', 'wb+') as outfile:
pkl.dump(self.classifier, outfile)
def load_model(self, filename):
"""
# TODO revise to properly close pkl files
:param filename: (String) Path to file
"""
self.vectorizer = pkl.load(open(filename + '/bow_vectorizer.pkl', 'rb'))
self.classifier = pkl.load(open(filename + '/bow_classifier.pkl', 'rb'))
|
[
"pickle.dump",
"os.makedirs",
"numpy.hstack",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LogisticRegression",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.utils.resample",
"json.dump"
] |
[((2678, 2743), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': 'str.split', 'max_features': 'self.vocab_size'}), '(analyzer=str.split, max_features=self.vocab_size)\n', (2693, 2743), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2908, 2975), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'self.validation_split', 'stratify': 'y'}), '(X, y, test_size=self.validation_split, stratify=y)\n', (2924, 2975), False, 'from sklearn.model_selection import train_test_split\n'), ((6662, 6698), 'os.makedirs', 'os.makedirs', (['filename'], {'exist_ok': '(True)'}), '(filename, exist_ok=True)\n', (6673, 6698), False, 'import os\n'), ((2183, 2259), 'sklearn.utils.resample', 'resample', (['train_data', 'y'], {'n_samples': 'self.bootstrap', 'stratify': 'y', 'replace': '(False)'}), '(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)\n', (2191, 2259), False, 'from sklearn.utils import resample\n'), ((3836, 3912), 'sklearn.utils.resample', 'resample', (['train_data', 'y'], {'n_samples': 'self.bootstrap', 'stratify': 'y', 'replace': '(False)'}), '(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)\n', (3844, 3912), False, 'from sklearn.utils import resample\n'), ((6773, 6803), 'json.dump', 'json.dump', (['parameters', 'outfile'], {}), '(parameters, outfile)\n', (6782, 6803), False, 'import json\n'), ((6887, 6921), 'pickle.dump', 'pkl.dump', (['self.vectorizer', 'outfile'], {}), '(self.vectorizer, outfile)\n', (6895, 6921), True, 'import pickle as pkl\n'), ((7005, 7039), 'pickle.dump', 'pkl.dump', (['self.classifier', 'outfile'], {}), '(self.classifier, outfile)\n', (7013, 7039), True, 'import pickle as pkl\n'), ((2026, 2047), 'numpy.hstack', 'np.hstack', (['y', 'weights'], {}), '(y, weights)\n', (2035, 2047), True, 'import numpy as np\n'), ((2374, 2445), 'sklearn.utils.resample', 'resample', (['train_data', 'y'], {'n_samples': 'n_samples', 'stratify': 'y', 'replace': '(False)'}), '(train_data, y, n_samples=n_samples, stratify=y, replace=False)\n', (2382, 2445), False, 'from sklearn.utils import resample\n'), ((3038, 3080), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': 'self.max_iter'}), '(max_iter=self.max_iter)\n', (3056, 3080), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3665, 3686), 'numpy.hstack', 'np.hstack', (['y', 'weights'], {}), '(y, weights)\n', (3674, 3686), True, 'import numpy as np\n'), ((4041, 4112), 'sklearn.utils.resample', 'resample', (['train_data', 'y'], {'n_samples': 'n_samples', 'stratify': 'y', 'replace': '(False)'}), '(train_data, y, n_samples=n_samples, stratify=y, replace=False)\n', (4049, 4112), False, 'from sklearn.utils import resample\n'), ((4586, 4639), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'max_iter': 'max_iter'}), '(random_state=0, max_iter=max_iter)\n', (4604, 4639), False, 'from sklearn.linear_model import LogisticRegression\n')]
|
# Owen's experiment to convert a CSDS to the HF data structure
import datasets
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments
from datasets import Dataset, DatasetDict, ClassLabel, load_metric
# create a CSDS as dict
# First create a mapping from string labels to integers
c2l = ClassLabel(num_classes=3, names=['CB', 'NCB', 'NA'])
csds_train_dict = {'text': ["John said he * likes * beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary maybe likes beets."
],
'label': map(c2l.str2int, ["CB", "NCB", "NCB", "NCB", "NCB", "NCB", "NCB",
"NCB", "NCB", "NCB", "NCB", "NCB", "NCB", "NCB",
"NCB", "NCB", "NCB", "NCB", "NCB", "NCB"])}
csds_eval_dict = {'text': ["Peter said he likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan maybe likes beets."
],
'label': map(c2l.str2int, ["CB", "NCB", "NCB", "NCB", "NCB", "NCB", "NCB",
"NCB", "NCB", "NCB", "NCB", "NCB", "NCB", "NCB",
"NCB", "NCB", "NCB", "NCB", "NCB", "NCB"])}
csds_train_dataset = Dataset.from_dict(csds_train_dict)
csds_eval_dataset = Dataset.from_dict(csds_eval_dict)
csds_datasets = DatasetDict({'train': csds_train_dataset,
'eval': csds_eval_dataset})
def notify(string):
print(">>>> ", string, " <<<<")
notify("Created datset, now tokenizing dataset")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
tokenized_csds_datasets = csds_datasets.map(tokenize_function, batched=True)
notify("Done tokenizing dataset")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=3)
metric = load_metric("accuracy")
# In the named arguments below, replace full_train_dataset
# and full-eval_dataset with small_train_dataset and
# small_eval_dataset, respectively, for experimentation with
# a small subset of the input data and a shorter running time.
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
notify("Starting training")
training_args = TrainingArguments("../CSDS/test_trainer")
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_csds_datasets['train'],
eval_dataset=tokenized_csds_datasets['eval'],
compute_metrics=compute_metrics,
)
trainer.train()
notify("Done training")
results = trainer.evaluate()
print(results)
|
[
"datasets.load_metric",
"transformers.TrainingArguments",
"datasets.Dataset.from_dict",
"numpy.argmax",
"transformers.AutoModelForSequenceClassification.from_pretrained",
"datasets.DatasetDict",
"datasets.ClassLabel",
"transformers.AutoTokenizer.from_pretrained",
"transformers.Trainer"
] |
[((357, 409), 'datasets.ClassLabel', 'ClassLabel', ([], {'num_classes': '(3)', 'names': "['CB', 'NCB', 'NA']"}), "(num_classes=3, names=['CB', 'NCB', 'NA'])\n", (367, 409), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((3639, 3673), 'datasets.Dataset.from_dict', 'Dataset.from_dict', (['csds_train_dict'], {}), '(csds_train_dict)\n', (3656, 3673), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((3694, 3727), 'datasets.Dataset.from_dict', 'Dataset.from_dict', (['csds_eval_dict'], {}), '(csds_eval_dict)\n', (3711, 3727), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((3744, 3813), 'datasets.DatasetDict', 'DatasetDict', (["{'train': csds_train_dataset, 'eval': csds_eval_dataset}"], {}), "({'train': csds_train_dataset, 'eval': csds_eval_dataset})\n", (3755, 3813), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((3969, 4017), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""bert-base-cased"""'], {}), "('bert-base-cased')\n", (3998, 4017), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments\n'), ((4254, 4341), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['"""bert-base-cased"""'], {'num_labels': '(3)'}), "('bert-base-cased',\n num_labels=3)\n", (4304, 4341), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments\n'), ((4347, 4370), 'datasets.load_metric', 'load_metric', (['"""accuracy"""'], {}), "('accuracy')\n", (4358, 4370), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((4834, 4875), 'transformers.TrainingArguments', 'TrainingArguments', (['"""../CSDS/test_trainer"""'], {}), "('../CSDS/test_trainer')\n", (4851, 4875), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments\n'), ((4886, 5063), 'transformers.Trainer', 'Trainer', ([], {'model': 'model', 'args': 'training_args', 'train_dataset': "tokenized_csds_datasets['train']", 'eval_dataset': "tokenized_csds_datasets['eval']", 'compute_metrics': 'compute_metrics'}), "(model=model, args=training_args, train_dataset=\n tokenized_csds_datasets['train'], eval_dataset=tokenized_csds_datasets[\n 'eval'], compute_metrics=compute_metrics)\n", (4893, 5063), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments\n'), ((4690, 4716), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (4699, 4716), True, 'import numpy as np\n')]
|
"""
Test integrators with simple ODE
dx/dy = 3x^2y given x0 = 1, y0 = 2
ANALYTIC SOLUTION:
y = e^{x^3 + c}, c = ln(2) - 1
y(1,1.1,1.2,1.3,1.4) = [2,2.78471958461639,4.141869187709196,6.6203429951303265,11.440356871885081]
"""
# Import package, test suite, and other packages as needed
import numpy as np
from pycc.rt import integrators as ints
def f(x,y):
"""dy/dx = f(x,y) = 3x^2y"""
Y = 3.*x**2. * y
return Y
def chk_ode(ode):
h = 0.1
ODE = ode(h)
t0 = 1
y0 = 2
y1 = ODE(f,t0,y0)
y2 = ODE(f,t0+h,y1)
y3 = ODE(f,t0+2*h,y2)
y4 = ODE(f,t0+3*h,y3)
return np.array([y0,y1,y2,y3,y4])
def test_rk4():
"""Test 4th-order Runge-Kutta"""
rk4 = chk_ode(ints.rk4)
ref = np.array([2,2.7846419118859376,4.141490537335979,6.618844434974082,11.434686303979237])
assert np.allclose(rk4,ref)
def test_rk38():
"""Test "corrected" 3rd-order Runge-Kutta"""
rk38 = chk_ode(ints.rk38)
ref = np.array([2,2.7846719015333337,4.141594947022453,6.619134913159302,11.435455703714204])
assert np.allclose(rk38,ref)
def test_rk3():
"""Test 3rd-order Runge-Kutta"""
rk3 = chk_ode(ints.rk3)
ref = np.array([2,2.783897725,4.137908208354427,6.60545045860959,11.38808439342214])
assert np.allclose(rk3,ref)
def test_rk2():
"""Test 2nd-order Runge-Kutta"""
rk2 = chk_ode(ints.rk2)
ref = np.array([2,2.7643999999999997,4.066743395,6.396857224546359,10.804576512405294])
assert np.allclose(rk2,ref)
def test_gl6():
"""Test 6th-order Gauss-Legendre"""
gl6 = chk_ode(ints.gl6)
ref = np.array([2,2.78364923694925,4.1371512621094695,6.603613786914487,11.383853535021142])
assert np.allclose(gl6,ref)
|
[
"numpy.array",
"numpy.allclose"
] |
[((611, 641), 'numpy.array', 'np.array', (['[y0, y1, y2, y3, y4]'], {}), '([y0, y1, y2, y3, y4])\n', (619, 641), True, 'import numpy as np\n'), ((730, 826), 'numpy.array', 'np.array', (['[2, 2.7846419118859376, 4.141490537335979, 6.618844434974082, \n 11.434686303979237]'], {}), '([2, 2.7846419118859376, 4.141490537335979, 6.618844434974082, \n 11.434686303979237])\n', (738, 826), True, 'import numpy as np\n'), ((830, 851), 'numpy.allclose', 'np.allclose', (['rk4', 'ref'], {}), '(rk4, ref)\n', (841, 851), True, 'import numpy as np\n'), ((958, 1054), 'numpy.array', 'np.array', (['[2, 2.7846719015333337, 4.141594947022453, 6.619134913159302, \n 11.435455703714204]'], {}), '([2, 2.7846719015333337, 4.141594947022453, 6.619134913159302, \n 11.435455703714204])\n', (966, 1054), True, 'import numpy as np\n'), ((1058, 1080), 'numpy.allclose', 'np.allclose', (['rk38', 'ref'], {}), '(rk38, ref)\n', (1069, 1080), True, 'import numpy as np\n'), ((1172, 1259), 'numpy.array', 'np.array', (['[2, 2.783897725, 4.137908208354427, 6.60545045860959, 11.38808439342214]'], {}), '([2, 2.783897725, 4.137908208354427, 6.60545045860959, \n 11.38808439342214])\n', (1180, 1259), True, 'import numpy as np\n'), ((1263, 1284), 'numpy.allclose', 'np.allclose', (['rk3', 'ref'], {}), '(rk3, ref)\n', (1274, 1284), True, 'import numpy as np\n'), ((1376, 1466), 'numpy.array', 'np.array', (['[2, 2.7643999999999997, 4.066743395, 6.396857224546359, 10.804576512405294]'], {}), '([2, 2.7643999999999997, 4.066743395, 6.396857224546359, \n 10.804576512405294])\n', (1384, 1466), True, 'import numpy as np\n'), ((1470, 1491), 'numpy.allclose', 'np.allclose', (['rk2', 'ref'], {}), '(rk2, ref)\n', (1481, 1491), True, 'import numpy as np\n'), ((1586, 1681), 'numpy.array', 'np.array', (['[2, 2.78364923694925, 4.1371512621094695, 6.603613786914487, 11.383853535021142\n ]'], {}), '([2, 2.78364923694925, 4.1371512621094695, 6.603613786914487, \n 11.383853535021142])\n', (1594, 1681), True, 'import numpy as np\n'), ((1685, 1706), 'numpy.allclose', 'np.allclose', (['gl6', 'ref'], {}), '(gl6, ref)\n', (1696, 1706), True, 'import numpy as np\n')]
|
# system
import os
from enum import Enum
# lib
import numpy as np
class GloVeSize(Enum):
tiny = 50
small = 100
medium = 200
large = 300
__DEFAULT_SIZE = GloVeSize.small
def get_pretrained_embedding_matrix(word_to_index,
vocab_size=10000,
glove_dir="./bin/GloVe",
use_cache_if_present=True,
cache_if_computed=True,
cache_dir='./bin/cache',
size=__DEFAULT_SIZE,
verbose=1):
"""
get pre-trained word embeddings from GloVe: https://github.com/stanfordnlp/GloVe
:param word_to_index: a word to index map of the corpus
:param vocab_size: the vocab size
:param glove_dir: the dir of glove
:param use_cache_if_present: whether to use a cached weight file if present
:param cache_if_computed: whether to cache the result if re-computed
:param cache_dir: the directory of the project's cache
:param size: an enumerated choice of GloVeSize
:param verbose: the verbosity level of logging
:return: a matrix of the embeddings
"""
def vprint(*args, with_arrow=True):
if verbose > 0:
if with_arrow:
print(">>", *args)
else:
print(*args)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_path = os.path.join(cache_dir, 'glove_%d_embedding_matrix.npy' % size.value)
if use_cache_if_present and os.path.isfile(cache_path):
return np.load(cache_path)
else:
vprint('computing embeddings', with_arrow=True)
embeddings_index = {}
size_value = size.value
f = open(os.path.join(glove_dir, 'glove.6B.' + str(size_value) + 'd.txt'),
encoding="ascii", errors='ignore')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
vprint('Found', len(embeddings_index), 'word vectors.')
embedding_matrix = np.random.normal(size=(vocab_size, size.value))
non = 0
for word, index in word_to_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
else:
non += 1
vprint(non, "words did not have mappings")
vprint(with_arrow=False)
if cache_if_computed:
np.save(cache_path, embedding_matrix)
return embedding_matrix
|
[
"numpy.random.normal",
"os.path.exists",
"os.makedirs",
"os.path.join",
"numpy.asarray",
"os.path.isfile",
"numpy.load",
"numpy.save"
] |
[((1503, 1572), 'os.path.join', 'os.path.join', (['cache_dir', "('glove_%d_embedding_matrix.npy' % size.value)"], {}), "(cache_dir, 'glove_%d_embedding_matrix.npy' % size.value)\n", (1515, 1572), False, 'import os\n'), ((1427, 1452), 'os.path.exists', 'os.path.exists', (['cache_dir'], {}), '(cache_dir)\n', (1441, 1452), False, 'import os\n'), ((1462, 1484), 'os.makedirs', 'os.makedirs', (['cache_dir'], {}), '(cache_dir)\n', (1473, 1484), False, 'import os\n'), ((1605, 1631), 'os.path.isfile', 'os.path.isfile', (['cache_path'], {}), '(cache_path)\n', (1619, 1631), False, 'import os\n'), ((1648, 1667), 'numpy.load', 'np.load', (['cache_path'], {}), '(cache_path)\n', (1655, 1667), True, 'import numpy as np\n'), ((2232, 2279), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(vocab_size, size.value)'}), '(size=(vocab_size, size.value))\n', (2248, 2279), True, 'import numpy as np\n'), ((2038, 2077), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (2048, 2077), True, 'import numpy as np\n'), ((2680, 2717), 'numpy.save', 'np.save', (['cache_path', 'embedding_matrix'], {}), '(cache_path, embedding_matrix)\n', (2687, 2717), True, 'import numpy as np\n')]
|
import numpy as np
arr = np.array([[2, 5], [1, 3]])
arr_inv = np.linalg.inv(arr)
print(arr_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat = np.matrix([[2, 5], [1, 3]])
mat_inv = np.linalg.inv(mat)
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat_inv = mat**-1
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat_inv = mat.I
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
result = mat * mat.I
print(result)
# [[1. 0.]
# [0. 1.]]
# print(arr.I)
# AttributeError: 'numpy.ndarray' object has no attribute 'I'
arr_s = np.array([[0, 0], [1, 3]])
# print(np.linalg.inv(arr_s))
# LinAlgError: Singular matrix
arr_pinv = np.linalg.pinv(arr_s)
print(arr_pinv)
# [[0. 0.1]
# [0. 0.3]]
print(arr_s @ arr_inv)
# [[0. 0.]
# [0. 1.]]
print(np.linalg.pinv(arr_pinv))
# [[0. 0.]
# [1. 3.]]
print(np.linalg.inv(arr))
# [[ 3. -5.]
# [-1. 2.]]
print(np.linalg.pinv(arr))
# [[ 3. -5.]
# [-1. 2.]]
mat_s = np.mat([[0, 0], [1, 3]])
# print(np.linalg.inv(mat_s))
# LinAlgError: Singular matrix
# print(mat_s**-1)
# LinAlgError: Singular matrix
# print(mat_s.I)
# LinAlgError: Singular matrix
print(np.linalg.pinv(mat_s))
# [[0. 0.1]
# [0. 0.3]]
|
[
"numpy.mat",
"numpy.linalg.pinv",
"numpy.array",
"numpy.linalg.inv",
"numpy.matrix"
] |
[((26, 52), 'numpy.array', 'np.array', (['[[2, 5], [1, 3]]'], {}), '([[2, 5], [1, 3]])\n', (34, 52), True, 'import numpy as np\n'), ((64, 82), 'numpy.linalg.inv', 'np.linalg.inv', (['arr'], {}), '(arr)\n', (77, 82), True, 'import numpy as np\n'), ((132, 159), 'numpy.matrix', 'np.matrix', (['[[2, 5], [1, 3]]'], {}), '([[2, 5], [1, 3]])\n', (141, 159), True, 'import numpy as np\n'), ((171, 189), 'numpy.linalg.inv', 'np.linalg.inv', (['mat'], {}), '(mat)\n', (184, 189), True, 'import numpy as np\n'), ((498, 524), 'numpy.array', 'np.array', (['[[0, 0], [1, 3]]'], {}), '([[0, 0], [1, 3]])\n', (506, 524), True, 'import numpy as np\n'), ((599, 620), 'numpy.linalg.pinv', 'np.linalg.pinv', (['arr_s'], {}), '(arr_s)\n', (613, 620), True, 'import numpy as np\n'), ((885, 909), 'numpy.mat', 'np.mat', (['[[0, 0], [1, 3]]'], {}), '([[0, 0], [1, 3]])\n', (891, 909), True, 'import numpy as np\n'), ((718, 742), 'numpy.linalg.pinv', 'np.linalg.pinv', (['arr_pinv'], {}), '(arr_pinv)\n', (732, 742), True, 'import numpy as np\n'), ((774, 792), 'numpy.linalg.inv', 'np.linalg.inv', (['arr'], {}), '(arr)\n', (787, 792), True, 'import numpy as np\n'), ((828, 847), 'numpy.linalg.pinv', 'np.linalg.pinv', (['arr'], {}), '(arr)\n', (842, 847), True, 'import numpy as np\n'), ((1079, 1100), 'numpy.linalg.pinv', 'np.linalg.pinv', (['mat_s'], {}), '(mat_s)\n', (1093, 1100), True, 'import numpy as np\n')]
|
"""
This script was modified from https://github.com/ZhaoJ9014/face.evoLVe.PyTorch
"""
import os
import cv2
import bcolz
import numpy as np
import tqdm
from sklearn.model_selection import KFold
from scipy import interpolate
import math
from .utils import l2_norm
def get_val_pair(path, name):
carray = bcolz.carray(rootdir=os.path.join(path, name), mode='r')
issame = np.load('{}/{}_list.npy'.format(path, name))
return carray, issame
def get_val_data(lfw_data_path=None, agedb_path=None, cfp_path=None):
lfw, lfw_issame, agedb_30, agedb_30_issame, cfp_fp, cfp_fp_issame = None,None,None,None,None,None
if lfw_data_path:
lfw, lfw_issame = get_val_pair(lfw_data_path, 'lfw')
if agedb_path:
agedb_30, agedb_30_issame = get_val_pair(agedb_path, 'agedb_30')
if cfp_path:
cfp_fp, cfp_fp_issame = get_val_pair(cfp_path, 'cfp_fp')
return lfw, agedb_30, cfp_fp, lfw_issame, agedb_30_issame, cfp_fp_issame
def ccrop_batch(imgs):
assert len(imgs.shape) == 4
resized_imgs = np.array([cv2.resize(img, (128, 128)) for img in imgs])
ccropped_imgs = resized_imgs[:, 8:-8, 8:-8, :]
return ccropped_imgs
def hflip_batch(imgs):
assert len(imgs.shape) == 4
return imgs[:, :, ::-1, :]
def distance(embeddings1, embeddings2, distance_metric=0):
if distance_metric==0:
# Euclidian distance
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff),1)
elif distance_metric==1:
# Distance based on cosine similarity
dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(embeddings2, axis=1)
similarity = dot / norm
dist = np.arccos(similarity) / math.pi
else:
raise 'Undefined distance metric %d' % distance_metric
return dist
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame),
np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame,
nrof_folds=10, distance_metric=0):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
best_thresholds = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
dist = distance(embeddings1, embeddings2, distance_metric)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
best_thresholds[fold_idx] = thresholds[best_threshold_index]
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = \
calculate_accuracy(threshold,
dist[test_set],
actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index],
dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy, best_thresholds
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = np.arange(nrof_pairs)
dist = distance(embeddings1, embeddings2, distance_metric)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train)>=far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def evaluate(embeddings, actual_issame, nrof_folds=10, distance_metric=0):
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy, best_thresholds = calculate_roc(
thresholds, embeddings1, embeddings2, np.asarray(actual_issame),
nrof_folds=nrof_folds, distance_metric=distance_metric)
val, val_std, far = calculate_val(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds, distance_metric=distance_metric)
return tpr, fpr, accuracy, best_thresholds, val, val_std, far
def perform_val(embedding_size, batch_size, model,
carray, issame, nrof_folds=10, is_ccrop=False, is_flip=True):
"""perform val"""
embeddings = np.zeros([len(carray), embedding_size])
for idx in tqdm.tqdm(range(0, len(carray), batch_size)):
batch = carray[idx:idx + batch_size]
batch = np.transpose(batch, [0, 2, 3, 1]) * 0.5 + 0.5
batch = batch[:, :, :, ::-1] # convert BGR to RGB
if is_ccrop:
batch = ccrop_batch(batch)
if is_flip:
fliped = hflip_batch(batch)
emb_batch = model(batch) + model(fliped)
embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
else:
emb_batch = model(batch)
embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
tpr, fpr, accuracy, best_thresholds, val, val_std, far = evaluate(
embeddings, issame, nrof_folds)
return accuracy.mean(), best_thresholds.mean(), accuracy.std(), val, val_std, far
|
[
"numpy.arccos",
"numpy.logical_not",
"scipy.interpolate.interp1d",
"numpy.linalg.norm",
"sklearn.model_selection.KFold",
"numpy.arange",
"numpy.mean",
"numpy.less",
"numpy.multiply",
"numpy.asarray",
"numpy.subtract",
"numpy.max",
"numpy.argmax",
"numpy.square",
"numpy.std",
"cv2.resize",
"numpy.transpose",
"numpy.logical_and",
"os.path.join",
"numpy.sum",
"numpy.zeros"
] |
[((1952, 1976), 'numpy.less', 'np.less', (['dist', 'threshold'], {}), '(dist, threshold)\n', (1959, 1976), True, 'import numpy as np\n'), ((2867, 2908), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nrof_folds', 'shuffle': '(False)'}), '(n_splits=nrof_folds, shuffle=False)\n', (2872, 2908), False, 'from sklearn.model_selection import KFold\n'), ((2921, 2960), 'numpy.zeros', 'np.zeros', (['(nrof_folds, nrof_thresholds)'], {}), '((nrof_folds, nrof_thresholds))\n', (2929, 2960), True, 'import numpy as np\n'), ((2972, 3011), 'numpy.zeros', 'np.zeros', (['(nrof_folds, nrof_thresholds)'], {}), '((nrof_folds, nrof_thresholds))\n', (2980, 3011), True, 'import numpy as np\n'), ((3027, 3047), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (3035, 3047), True, 'import numpy as np\n'), ((3072, 3092), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (3080, 3092), True, 'import numpy as np\n'), ((3109, 3130), 'numpy.arange', 'np.arange', (['nrof_pairs'], {}), '(nrof_pairs)\n', (3118, 3130), True, 'import numpy as np\n'), ((4165, 4181), 'numpy.mean', 'np.mean', (['tprs', '(0)'], {}), '(tprs, 0)\n', (4172, 4181), True, 'import numpy as np\n'), ((4192, 4208), 'numpy.mean', 'np.mean', (['fprs', '(0)'], {}), '(fprs, 0)\n', (4199, 4208), True, 'import numpy as np\n'), ((4333, 4357), 'numpy.less', 'np.less', (['dist', 'threshold'], {}), '(dist, threshold)\n', (4340, 4357), True, 'import numpy as np\n'), ((4532, 4553), 'numpy.sum', 'np.sum', (['actual_issame'], {}), '(actual_issame)\n', (4538, 4553), True, 'import numpy as np\n'), ((5085, 5126), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nrof_folds', 'shuffle': '(False)'}), '(n_splits=nrof_folds, shuffle=False)\n', (5090, 5126), False, 'from sklearn.model_selection import KFold\n'), ((5142, 5162), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (5150, 5162), True, 'import numpy as np\n'), ((5173, 5193), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (5181, 5193), True, 'import numpy as np\n'), ((5213, 5234), 'numpy.arange', 'np.arange', (['nrof_pairs'], {}), '(nrof_pairs)\n', (5222, 5234), True, 'import numpy as np\n'), ((5998, 6010), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (6005, 6010), True, 'import numpy as np\n'), ((6026, 6038), 'numpy.mean', 'np.mean', (['far'], {}), '(far)\n', (6033, 6038), True, 'import numpy as np\n'), ((6053, 6064), 'numpy.std', 'np.std', (['val'], {}), '(val)\n', (6059, 6064), True, 'import numpy as np\n'), ((6232, 6253), 'numpy.arange', 'np.arange', (['(0)', '(4)', '(0.01)'], {}), '(0, 4, 0.01)\n', (6241, 6253), True, 'import numpy as np\n'), ((1385, 1422), 'numpy.subtract', 'np.subtract', (['embeddings1', 'embeddings2'], {}), '(embeddings1, embeddings2)\n', (1396, 1422), True, 'import numpy as np\n'), ((1993, 2038), 'numpy.logical_and', 'np.logical_and', (['predict_issame', 'actual_issame'], {}), '(predict_issame, actual_issame)\n', (2007, 2038), True, 'import numpy as np\n'), ((3340, 3365), 'numpy.zeros', 'np.zeros', (['nrof_thresholds'], {}), '(nrof_thresholds)\n', (3348, 3365), True, 'import numpy as np\n'), ((3597, 3617), 'numpy.argmax', 'np.argmax', (['acc_train'], {}), '(acc_train)\n', (3606, 3617), True, 'import numpy as np\n'), ((4383, 4428), 'numpy.logical_and', 'np.logical_and', (['predict_issame', 'actual_issame'], {}), '(predict_issame, actual_issame)\n', (4397, 4428), True, 'import numpy as np\n'), ((4574, 4603), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (4588, 4603), True, 'import numpy as np\n'), ((5465, 5490), 'numpy.zeros', 'np.zeros', (['nrof_thresholds'], {}), '(nrof_thresholds)\n', (5473, 5490), True, 'import numpy as np\n'), ((6427, 6452), 'numpy.asarray', 'np.asarray', (['actual_issame'], {}), '(actual_issame)\n', (6437, 6452), True, 'import numpy as np\n'), ((6602, 6627), 'numpy.asarray', 'np.asarray', (['actual_issame'], {}), '(actual_issame)\n', (6612, 6627), True, 'import numpy as np\n'), ((329, 353), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (341, 353), False, 'import os\n'), ((1043, 1070), 'cv2.resize', 'cv2.resize', (['img', '(128, 128)'], {}), '(img, (128, 128))\n', (1053, 1070), False, 'import cv2\n'), ((1445, 1460), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (1454, 1460), True, 'import numpy as np\n'), ((2087, 2116), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (2101, 2116), True, 'import numpy as np\n'), ((2150, 2180), 'numpy.logical_not', 'np.logical_not', (['predict_issame'], {}), '(predict_issame)\n', (2164, 2180), True, 'import numpy as np\n'), ((2213, 2242), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (2227, 2242), True, 'import numpy as np\n'), ((2276, 2306), 'numpy.logical_not', 'np.logical_not', (['predict_issame'], {}), '(predict_issame)\n', (2290, 2306), True, 'import numpy as np\n'), ((4487, 4516), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (4501, 4516), True, 'import numpy as np\n'), ((5679, 5696), 'numpy.max', 'np.max', (['far_train'], {}), '(far_train)\n', (5685, 5696), True, 'import numpy as np\n'), ((5726, 5785), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['far_train', 'thresholds'], {'kind': '"""slinear"""'}), "(far_train, thresholds, kind='slinear')\n", (5746, 5785), False, 'from scipy import interpolate\n'), ((1560, 1597), 'numpy.multiply', 'np.multiply', (['embeddings1', 'embeddings2'], {}), '(embeddings1, embeddings2)\n', (1571, 1597), True, 'import numpy as np\n'), ((1622, 1657), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings1'], {'axis': '(1)'}), '(embeddings1, axis=1)\n', (1636, 1657), True, 'import numpy as np\n'), ((1660, 1695), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings2'], {'axis': '(1)'}), '(embeddings2, axis=1)\n', (1674, 1695), True, 'import numpy as np\n'), ((1743, 1764), 'numpy.arccos', 'np.arccos', (['similarity'], {}), '(similarity)\n', (1752, 1764), True, 'import numpy as np\n'), ((7090, 7123), 'numpy.transpose', 'np.transpose', (['batch', '[0, 2, 3, 1]'], {}), '(batch, [0, 2, 3, 1])\n', (7102, 7123), True, 'import numpy as np\n')]
|
import os, random, numpy as np, copy
from torch.utils.data import Dataset
import torch
def seq_collate(data):
(past_traj, future_traj) = zip(*data)
past_traj = torch.stack(past_traj,dim=0)
future_traj = torch.stack(future_traj,dim=0)
data = {
'past_traj': past_traj,
'future_traj': future_traj,
'seq': 'nba',
}
return data
class NBADataset(Dataset):
"""Dataloder for the Trajectory datasets"""
def __init__(
self, obs_len=5, pred_len=10, training=True
):
super(NBADataset, self).__init__()
self.obs_len = obs_len
self.pred_len = pred_len
self.seq_len = self.obs_len + self.pred_len
if training:
data_root = 'datasets/nba/train.npy'
else:
data_root = 'datasets/nba/test.npy'
self.trajs = np.load(data_root)
self.trajs /= (94/28) # Turn to meters
if training:
self.trajs = self.trajs[:32500]
else:
self.trajs = self.trajs[:12500]
self.batch_len = len(self.trajs)
print(self.batch_len)
self.traj_abs = torch.from_numpy(self.trajs).type(torch.float)
self.traj_norm = torch.from_numpy(self.trajs-self.trajs[:,self.obs_len-1:self.obs_len]).type(torch.float)
self.traj_abs = self.traj_abs.permute(0,2,1,3)
self.traj_norm = self.traj_norm.permute(0,2,1,3)
# print(self.traj_abs.shape)
def __len__(self):
return self.batch_len
def __getitem__(self, index):
# print(self.traj_abs.shape)
past_traj = self.traj_abs[index, :, :self.obs_len, :]
future_traj = self.traj_abs[index, :, self.obs_len:, :]
out = [past_traj, future_traj]
return out
|
[
"torch.stack",
"torch.from_numpy",
"numpy.load"
] |
[((172, 201), 'torch.stack', 'torch.stack', (['past_traj'], {'dim': '(0)'}), '(past_traj, dim=0)\n', (183, 201), False, 'import torch\n'), ((219, 250), 'torch.stack', 'torch.stack', (['future_traj'], {'dim': '(0)'}), '(future_traj, dim=0)\n', (230, 250), False, 'import torch\n'), ((843, 861), 'numpy.load', 'np.load', (['data_root'], {}), '(data_root)\n', (850, 861), True, 'import os, random, numpy as np, copy\n'), ((1131, 1159), 'torch.from_numpy', 'torch.from_numpy', (['self.trajs'], {}), '(self.trajs)\n', (1147, 1159), False, 'import torch\n'), ((1203, 1278), 'torch.from_numpy', 'torch.from_numpy', (['(self.trajs - self.trajs[:, self.obs_len - 1:self.obs_len])'], {}), '(self.trajs - self.trajs[:, self.obs_len - 1:self.obs_len])\n', (1219, 1278), False, 'import torch\n')]
|
#! /usr/bin/python
# -*- coding: utf8 -*-
import os, time, random
import numpy as np
import scipy
import tensorflow as tf
import tensorlayer as tl
from model import *
from utils import *
from config import *
###====================== HYPER-PARAMETERS ===========================###
batch_size = config.train.batch_size
patch_size = config.train.in_patch_size
ni = int(np.sqrt(config.train.batch_size))
def compute_charbonnier_loss(tensor1, tensor2, is_mean=True):
epsilon = 1e-6
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.sqrt(tf.square(tf.subtract(tensor1,tensor2))+epsilon), [1, 2, 3]))
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.sqrt(tf.square(tf.subtract(tensor1,tensor2))+epsilon), [1, 2, 3]))
return loss
def load_file_list():
train_hr_file_list = []
train_lr_file_list = []
valid_hr_file_list = []
valid_lr_file_list = []
directory = config.train.hr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
train_hr_file_list.append("%s%s"%(directory,filename))
directory = config.train.lr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
train_lr_file_list.append("%s%s"%(directory,filename))
directory = config.valid.hr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
valid_hr_file_list.append("%s%s"%(directory,filename))
directory = config.valid.lr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
valid_lr_file_list.append("%s%s"%(directory,filename))
return sorted(train_hr_file_list),sorted(train_lr_file_list),sorted(valid_hr_file_list),sorted(valid_lr_file_list)
def prepare_nn_data(hr_img_list, lr_img_list, idx_img=None):
i = np.random.randint(len(hr_img_list)) if (idx_img is None) else idx_img
input_image = get_imgs_fn(lr_img_list[i])
output_image = get_imgs_fn(hr_img_list[i])
scale = int(output_image.shape[0] / input_image.shape[0])
assert scale == config.model.scale
out_patch_size = patch_size * scale
input_batch = np.empty([batch_size,patch_size,patch_size,3])
output_batch = np.empty([batch_size,out_patch_size,out_patch_size,3])
for idx in range(batch_size):
in_row_ind = random.randint(0,input_image.shape[0]-patch_size)
in_col_ind = random.randint(0,input_image.shape[1]-patch_size)
input_cropped = augment_imgs_fn(input_image[in_row_ind:in_row_ind+patch_size,
in_col_ind:in_col_ind+patch_size])
input_cropped = normalize_imgs_fn(input_cropped)
input_cropped = np.expand_dims(input_cropped,axis=0)
input_batch[idx] = input_cropped
out_row_ind = in_row_ind * scale
out_col_ind = in_col_ind * scale
output_cropped = output_image[out_row_ind:out_row_ind+out_patch_size,
out_col_ind:out_col_ind+out_patch_size]
output_cropped = normalize_imgs_fn(output_cropped)
output_cropped = np.expand_dims(output_cropped,axis=0)
output_batch[idx] = output_cropped
return input_batch,output_batch
def train():
save_dir = "%s/%s_train"%(config.model.result_path,tl.global_flag['mode'])
checkpoint_dir = "%s"%(config.model.checkpoint_path)
tl.files.exists_or_mkdir(save_dir)
tl.files.exists_or_mkdir(checkpoint_dir)
###========================== DEFINE MODEL ============================###
t_image = tf.placeholder('float32', [batch_size, patch_size, patch_size, 3], name='t_image_input')
t_target_image = tf.placeholder('float32', [batch_size, patch_size*config.model.scale, patch_size*config.model.scale, 3], name='t_target_image')
t_target_image_down = tf.image.resize_images(t_target_image, size=[patch_size*2, patch_size*2], method=0, align_corners=False)
net_image2, net_grad2, net_image1, net_grad1 = LapSRN(t_image, is_train=True, reuse=False)
net_image2.print_params(False)
## test inference
net_image_test, net_grad_test, _, _ = LapSRN(t_image, is_train=False, reuse=True)
###========================== DEFINE TRAIN OPS ==========================###
loss2 = compute_charbonnier_loss(net_image2.outputs, t_target_image, is_mean=True)
loss1 = compute_charbonnier_loss(net_image1.outputs, t_target_image_down, is_mean=True)
g_loss = loss1 + loss2 * 4
g_vars = tl.layers.get_variables_with_name('LapSRN', True, True)
with tf.variable_scope('learning_rate'):
lr_v = tf.Variable(config.train.lr_init, trainable=False)
g_optim = tf.train.AdamOptimizer(lr_v, beta1=config.train.beta1).minimize(g_loss, var_list=g_vars)
###========================== RESTORE MODEL =============================###
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
tl.layers.initialize_global_variables(sess)
tl.files.load_and_assign_npz(sess=sess, name=checkpoint_dir+'/params_{}.npz'.format(tl.global_flag['mode']), network=net_image2)
###========================== PRE-LOAD DATA ===========================###
train_hr_list,train_lr_list,valid_hr_list,valid_lr_list = load_file_list()
###========================== INTERMEDIATE RESULT ===============================###
sample_ind = 37
sample_input_imgs,sample_output_imgs = prepare_nn_data(valid_hr_list,valid_lr_list,sample_ind)
tl.vis.save_images(truncate_imgs_fn(sample_input_imgs), [ni, ni], save_dir+'/train_sample_input.png')
tl.vis.save_images(truncate_imgs_fn(sample_output_imgs), [ni, ni], save_dir+'/train_sample_output.png')
###========================== TRAINING ====================###
sess.run(tf.assign(lr_v, config.train.lr_init))
print(" ** learning rate: %f" % config.train.lr_init)
for epoch in range(config.train.n_epoch):
## update learning rate
if epoch != 0 and (epoch % config.train.decay_iter == 0):
lr_decay = config.train.lr_decay ** (epoch // config.train.decay_iter)
lr = config.train.lr_init * lr_decay
sess.run(tf.assign(lr_v, lr))
print(" ** learning rate: %f" % (lr))
epoch_time = time.time()
total_g_loss, n_iter = 0, 0
## load image data
idx_list = np.random.permutation(len(train_hr_list))
for idx_file in range(len(idx_list)):
step_time = time.time()
batch_input_imgs,batch_output_imgs = prepare_nn_data(train_hr_list,train_lr_list,idx_file)
errM, _ = sess.run([g_loss, g_optim], {t_image: batch_input_imgs, t_target_image: batch_output_imgs})
total_g_loss += errM
n_iter += 1
print("[*] Epoch: [%2d/%2d] time: %4.4fs, loss: %.8f" % (epoch, config.train.n_epoch, time.time() - epoch_time, total_g_loss/n_iter))
## save model and evaluation on sample set
if (epoch >= 0):
tl.files.save_npz(net_image2.all_params, name=checkpoint_dir+'/params_{}.npz'.format(tl.global_flag['mode']), sess=sess)
if config.train.dump_intermediate_result is True:
sample_out, sample_grad_out = sess.run([net_image_test.outputs,net_grad_test.outputs], {t_image: sample_input_imgs})#; print('gen sub-image:', out.shape, out.min(), out.max())
tl.vis.save_images(truncate_imgs_fn(sample_out), [ni, ni], save_dir+'/train_predict_%d.png' % epoch)
tl.vis.save_images(truncate_imgs_fn(np.abs(sample_grad_out)), [ni, ni], save_dir+'/train_grad_predict_%d.png' % epoch)
def test(file):
try:
img = get_imgs_fn(file)
except IOError:
print('cannot open %s'%(file))
else:
checkpoint_dir = config.model.checkpoint_path
save_dir = "%s/%s"%(config.model.result_path,tl.global_flag['mode'])
input_image = normalize_imgs_fn(img)
size = input_image.shape
print('Input size: %s,%s,%s'%(size[0],size[1],size[2]))
t_image = tf.placeholder('float32', [None,size[0],size[1],size[2]], name='input_image')
net_g, _, _, _ = LapSRN(t_image, is_train=False, reuse=False)
###========================== RESTORE G =============================###
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
tl.layers.initialize_global_variables(sess)
tl.files.load_and_assign_npz(sess=sess, name=checkpoint_dir+'/params_train.npz', network=net_g)
###======================= TEST =============================###
start_time = time.time()
out = sess.run(net_g.outputs, {t_image: [input_image]})
print("took: %4.4fs" % (time.time() - start_time))
tl.files.exists_or_mkdir(save_dir)
tl.vis.save_image(truncate_imgs_fn(out[0,:,:,:]), save_dir+'/test_out.png')
tl.vis.save_image(input_image, save_dir+'/test_input.png')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', choices=['train','test'], default='train', help='select mode')
parser.add_argument('-f','--file', help='input file')
args = parser.parse_args()
tl.global_flag['mode'] = args.mode
if tl.global_flag['mode'] == 'train':
train()
elif tl.global_flag['mode'] == 'test':
if (args.file is None):
raise Exception("Please enter input file name for test mode")
test(args.file)
else:
raise Exception("Unknow --mode")
|
[
"tensorflow.image.resize_images",
"numpy.sqrt",
"tensorlayer.layers.initialize_global_variables",
"tensorlayer.files.load_and_assign_npz",
"os.listdir",
"argparse.ArgumentParser",
"tensorflow.placeholder",
"tensorlayer.layers.get_variables_with_name",
"tensorflow.assign",
"tensorlayer.vis.save_image",
"numpy.empty",
"tensorflow.ConfigProto",
"tensorflow.train.AdamOptimizer",
"random.randint",
"numpy.abs",
"tensorflow.variable_scope",
"tensorflow.Variable",
"tensorlayer.files.exists_or_mkdir",
"tensorflow.subtract",
"time.time",
"os.path.join",
"numpy.expand_dims"
] |
[((371, 403), 'numpy.sqrt', 'np.sqrt', (['config.train.batch_size'], {}), '(config.train.batch_size)\n', (378, 403), True, 'import numpy as np\n'), ((2259, 2308), 'numpy.empty', 'np.empty', (['[batch_size, patch_size, patch_size, 3]'], {}), '([batch_size, patch_size, patch_size, 3])\n', (2267, 2308), True, 'import numpy as np\n'), ((2325, 2382), 'numpy.empty', 'np.empty', (['[batch_size, out_patch_size, out_patch_size, 3]'], {}), '([batch_size, out_patch_size, out_patch_size, 3])\n', (2333, 2382), True, 'import numpy as np\n'), ((3499, 3533), 'tensorlayer.files.exists_or_mkdir', 'tl.files.exists_or_mkdir', (['save_dir'], {}), '(save_dir)\n', (3523, 3533), True, 'import tensorlayer as tl\n'), ((3538, 3578), 'tensorlayer.files.exists_or_mkdir', 'tl.files.exists_or_mkdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3562, 3578), True, 'import tensorlayer as tl\n'), ((3673, 3766), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[batch_size, patch_size, patch_size, 3]'], {'name': '"""t_image_input"""'}), "('float32', [batch_size, patch_size, patch_size, 3], name=\n 't_image_input')\n", (3687, 3766), True, 'import tensorflow as tf\n'), ((3783, 3919), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[batch_size, patch_size * config.model.scale, patch_size * config.model.\n scale, 3]'], {'name': '"""t_target_image"""'}), "('float32', [batch_size, patch_size * config.model.scale, \n patch_size * config.model.scale, 3], name='t_target_image')\n", (3797, 3919), True, 'import tensorflow as tf\n'), ((3937, 4050), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['t_target_image'], {'size': '[patch_size * 2, patch_size * 2]', 'method': '(0)', 'align_corners': '(False)'}), '(t_target_image, size=[patch_size * 2, patch_size * 2\n ], method=0, align_corners=False)\n', (3959, 4050), True, 'import tensorflow as tf\n'), ((4594, 4649), 'tensorlayer.layers.get_variables_with_name', 'tl.layers.get_variables_with_name', (['"""LapSRN"""', '(True)', '(True)'], {}), "('LapSRN', True, True)\n", (4627, 4649), True, 'import tensorlayer as tl\n'), ((5060, 5103), 'tensorlayer.layers.initialize_global_variables', 'tl.layers.initialize_global_variables', (['sess'], {}), '(sess)\n', (5097, 5103), True, 'import tensorlayer as tl\n'), ((9180, 9205), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9203, 9205), False, 'import argparse\n'), ((2438, 2490), 'random.randint', 'random.randint', (['(0)', '(input_image.shape[0] - patch_size)'], {}), '(0, input_image.shape[0] - patch_size)\n', (2452, 2490), False, 'import os, time, random\n'), ((2511, 2563), 'random.randint', 'random.randint', (['(0)', '(input_image.shape[1] - patch_size)'], {}), '(0, input_image.shape[1] - patch_size)\n', (2525, 2563), False, 'import os, time, random\n'), ((2816, 2853), 'numpy.expand_dims', 'np.expand_dims', (['input_cropped'], {'axis': '(0)'}), '(input_cropped, axis=0)\n', (2830, 2853), True, 'import numpy as np\n'), ((3225, 3263), 'numpy.expand_dims', 'np.expand_dims', (['output_cropped'], {'axis': '(0)'}), '(output_cropped, axis=0)\n', (3239, 3263), True, 'import numpy as np\n'), ((4664, 4698), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""learning_rate"""'], {}), "('learning_rate')\n", (4681, 4698), True, 'import tensorflow as tf\n'), ((4715, 4765), 'tensorflow.Variable', 'tf.Variable', (['config.train.lr_init'], {'trainable': '(False)'}), '(config.train.lr_init, trainable=False)\n', (4726, 4765), True, 'import tensorflow as tf\n'), ((5903, 5940), 'tensorflow.assign', 'tf.assign', (['lr_v', 'config.train.lr_init'], {}), '(lr_v, config.train.lr_init)\n', (5912, 5940), True, 'import tensorflow as tf\n'), ((6391, 6402), 'time.time', 'time.time', ([], {}), '()\n', (6400, 6402), False, 'import os, time, random\n'), ((8198, 8283), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[None, size[0], size[1], size[2]]'], {'name': '"""input_image"""'}), "('float32', [None, size[0], size[1], size[2]], name='input_image'\n )\n", (8212, 8283), True, 'import tensorflow as tf\n'), ((8540, 8583), 'tensorlayer.layers.initialize_global_variables', 'tl.layers.initialize_global_variables', (['sess'], {}), '(sess)\n', (8577, 8583), True, 'import tensorlayer as tl\n'), ((8592, 8693), 'tensorlayer.files.load_and_assign_npz', 'tl.files.load_and_assign_npz', ([], {'sess': 'sess', 'name': "(checkpoint_dir + '/params_train.npz')", 'network': 'net_g'}), "(sess=sess, name=checkpoint_dir +\n '/params_train.npz', network=net_g)\n", (8620, 8693), True, 'import tensorlayer as tl\n'), ((8783, 8794), 'time.time', 'time.time', ([], {}), '()\n', (8792, 8794), False, 'import os, time, random\n'), ((8931, 8965), 'tensorlayer.files.exists_or_mkdir', 'tl.files.exists_or_mkdir', (['save_dir'], {}), '(save_dir)\n', (8955, 8965), True, 'import tensorlayer as tl\n'), ((9058, 9118), 'tensorlayer.vis.save_image', 'tl.vis.save_image', (['input_image', "(save_dir + '/test_input.png')"], {}), "(input_image, save_dir + '/test_input.png')\n", (9075, 9118), True, 'import tensorlayer as tl\n'), ((975, 996), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (985, 996), False, 'import os, time, random\n'), ((1184, 1205), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1194, 1205), False, 'import os, time, random\n'), ((1393, 1414), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1403, 1414), False, 'import os, time, random\n'), ((1602, 1623), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1612, 1623), False, 'import os, time, random\n'), ((4781, 4835), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr_v'], {'beta1': 'config.train.beta1'}), '(lr_v, beta1=config.train.beta1)\n', (4803, 4835), True, 'import tensorflow as tf\n'), ((4985, 5054), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (4999, 5054), True, 'import tensorflow as tf\n'), ((6598, 6609), 'time.time', 'time.time', ([], {}), '()\n', (6607, 6609), False, 'import os, time, random\n'), ((1015, 1041), 'os.path.join', 'os.path.join', (['directory', 'y'], {}), '(directory, y)\n', (1027, 1041), False, 'import os, time, random\n'), ((1224, 1250), 'os.path.join', 'os.path.join', (['directory', 'y'], {}), '(directory, y)\n', (1236, 1250), False, 'import os, time, random\n'), ((1433, 1459), 'os.path.join', 'os.path.join', (['directory', 'y'], {}), '(directory, y)\n', (1445, 1459), False, 'import os, time, random\n'), ((1642, 1668), 'os.path.join', 'os.path.join', (['directory', 'y'], {}), '(directory, y)\n', (1654, 1668), False, 'import os, time, random\n'), ((6298, 6317), 'tensorflow.assign', 'tf.assign', (['lr_v', 'lr'], {}), '(lr_v, lr)\n', (6307, 6317), True, 'import tensorflow as tf\n'), ((8461, 8530), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (8475, 8530), True, 'import tensorflow as tf\n'), ((8891, 8902), 'time.time', 'time.time', ([], {}), '()\n', (8900, 8902), False, 'import os, time, random\n'), ((6987, 6998), 'time.time', 'time.time', ([], {}), '()\n', (6996, 6998), False, 'import os, time, random\n'), ((7682, 7705), 'numpy.abs', 'np.abs', (['sample_grad_out'], {}), '(sample_grad_out)\n', (7688, 7705), True, 'import numpy as np\n'), ((568, 597), 'tensorflow.subtract', 'tf.subtract', (['tensor1', 'tensor2'], {}), '(tensor1, tensor2)\n', (579, 597), True, 'import tensorflow as tf\n'), ((692, 721), 'tensorflow.subtract', 'tf.subtract', (['tensor1', 'tensor2'], {}), '(tensor1, tensor2)\n', (703, 721), True, 'import tensorflow as tf\n')]
|
import pandas as pd
import numpy as np
from typing import Tuple
from itertools import product
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.model_selection import train_test_split
"""
Using cross validation on the random forest and returning
the average and standard deviation of the results.
"""
def rf_cross_val(train_data: pd.DataFrame, tags: pd.Series, rfparams: dict, fold=5):
f1_mac_lst = []
f1_mic_lst = []
# running the random forest "fold" times.
for i in range(fold):
train_and_val = train_test_split(train_data, tags, test_size=1 / fold)
f1_mac, f1_mic = rf_single_hyperparams(*train_and_val, rfparams=rfparams)
f1_mac_lst.append(f1_mac)
f1_mic_lst.append(f1_mic)
# computing average and std.
f1_mac_lst = np.array(f1_mac_lst)
mean_mac_f1 = f1_mac_lst.mean(axis=0)
std_mac_f1 = f1_mac_lst.std(axis=0)
f1_mic_lst = np.array(f1_mic_lst)
mean_mic_f1 = f1_mic_lst.mean(axis=0)
std_mic_f1 = f1_mic_lst.std(axis=0)
return mean_mac_f1, std_mac_f1, mean_mic_f1, std_mic_f1
"""
A single run of the random forest.
"""
def rf_single_hyperparams(train_data: pd.DataFrame, test: pd.DataFrame, train_tag: pd.Series,
test_tag: pd.Series, rfparams: dict) -> Tuple[np.ndarray, np.ndarray]:
# first creating the singular model, then using it in a one VS rest model.
rf_single_model = RandomForestClassifier(bootstrap=True, n_jobs=-1, **rfparams)
rf_model = OneVsRestClassifier(rf_single_model).fit(train_data, train_tag)
# predicting and evaluating the results.
prediction = rf_model.predict(test)
return evaluate(prediction, test_tag)
"""
Calculating f1 scores, both macro and micro, as evaluation.
"""
def evaluate(prediction, tag) -> Tuple[np.ndarray, np.ndarray]:
f1_mac = f1_score(prediction, tag, labels=range(9), average="macro")
f1_mic = f1_score(prediction, tag, labels=range(9), average="micro")
return f1_mac, f1_mic
"""
Choosing the optimal parameters for the random forest using grid search.
"""
def choose_rf_params(df: pd.DataFrame, tags: pd.Series):
# the five parameters we are using to maximize.
n_estimators_lst = [int(x) for x in np.linspace(start=100, stop=1300, num=7)]
max_features_lst = ['log2', 'sqrt']
max_depth_lst = [int(x) for x in np.linspace(10, 100, num=10)]
min_split_lst = [2, 5, 10]
min_leaf_lst = [1, 2, 4]
maxmacf1 = 0
maxmicf1 = 0
# running on all possible combinations.
for n_est, max_feat, max_depth, min_splt, min_leaf in tqdm(product(n_estimators_lst,
max_features_lst, max_depth_lst, min_split_lst, min_leaf_lst), total=1260):
paramsgrid = {"n_estimators": n_est, "max_features": max_feat, "max_depth": max_depth,
"min_samples_split": min_splt, "min_samples_leaf": min_leaf}
# running the model with cross validation, to get a more accurate score.
mean_mac_f1, std_mac_f1, mean_mic_f1, std_mic_f1 = rf_cross_val(df, tags, paramsgrid)
# saving the best parameters and their score.
if mean_mic_f1 > maxmicf1:
maxmicf1 = mean_mic_f1
micf1std = std_mic_f1
f1_mic_params = paramsgrid
if mean_mac_f1 > maxmacf1:
maxmacf1 = mean_mac_f1
macf1std = std_mac_f1
f1_mac_params = paramsgrid
# returning both the best f1 micro and f1 macro scores, and their repective parameters.
return maxmicf1, micf1std, f1_mic_params, maxmacf1, macf1std, f1_mac_params
|
[
"sklearn.model_selection.train_test_split",
"itertools.product",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"numpy.linspace",
"sklearn.multiclass.OneVsRestClassifier"
] |
[((924, 944), 'numpy.array', 'np.array', (['f1_mac_lst'], {}), '(f1_mac_lst)\n', (932, 944), True, 'import numpy as np\n'), ((1045, 1065), 'numpy.array', 'np.array', (['f1_mic_lst'], {}), '(f1_mic_lst)\n', (1053, 1065), True, 'import numpy as np\n'), ((1544, 1605), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'bootstrap': '(True)', 'n_jobs': '(-1)'}), '(bootstrap=True, n_jobs=-1, **rfparams)\n', (1566, 1605), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((668, 722), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_data', 'tags'], {'test_size': '(1 / fold)'}), '(train_data, tags, test_size=1 / fold)\n', (684, 722), False, 'from sklearn.model_selection import train_test_split\n'), ((2700, 2791), 'itertools.product', 'product', (['n_estimators_lst', 'max_features_lst', 'max_depth_lst', 'min_split_lst', 'min_leaf_lst'], {}), '(n_estimators_lst, max_features_lst, max_depth_lst, min_split_lst,\n min_leaf_lst)\n', (2707, 2791), False, 'from itertools import product\n'), ((1621, 1657), 'sklearn.multiclass.OneVsRestClassifier', 'OneVsRestClassifier', (['rf_single_model'], {}), '(rf_single_model)\n', (1640, 1657), False, 'from sklearn.multiclass import OneVsRestClassifier\n'), ((2349, 2389), 'numpy.linspace', 'np.linspace', ([], {'start': '(100)', 'stop': '(1300)', 'num': '(7)'}), '(start=100, stop=1300, num=7)\n', (2360, 2389), True, 'import numpy as np\n'), ((2468, 2496), 'numpy.linspace', 'np.linspace', (['(10)', '(100)'], {'num': '(10)'}), '(10, 100, num=10)\n', (2479, 2496), True, 'import numpy as np\n')]
|
"""
`icclim.models.frequency` wraps the concept of pandas frequency in order to resample
time series. `slice_mode` paramater of `icclim.index` is always converted to a
`Frequency`.
"""
import datetime
from enum import Enum
from typing import Any, Callable, List, Optional, Tuple, Union
import cftime
import numpy as np
import pandas as pd
import xarray as xr
from xarray.core.dataarray import DataArray
from icclim.icclim_exceptions import InvalidIcclimArgumentError
SliceMode = Union[Any, str, List[Union[str, Tuple, int]]]
def seasons_resampler(
month_list: List[int],
) -> Callable[[DataArray], Tuple[DataArray, DataArray]]:
"""
Seasonal resampling method generator.
Returns a callable of DataArray which will resample the data to
the a season composed of the given month.
It also attached the corresponding time_bounds.
Parameters
----------
month_list : List[int]
List of month identified by `{1..12}`.
Returns
-------
function: Callable[[DataArray], DataArray]
function resampling the input da to the wanted season.
"""
def resampler(da: DataArray) -> Tuple[DataArray, DataArray]:
da_years = np.unique(da.time.dt.year)
seasons_acc: List[DataArray] = []
time_bounds = []
new_time_axis = []
start_month = month_list[0]
end_month = month_list[-1]
filtered_da = month_filter(da, month_list)
# TODO, maybe raise a warning if the month_list is not made of consecutive month
# (case of user error)
for year in da_years:
if start_month > end_month:
int_year = year - 1
else:
int_year = year
first_time = filtered_da.time.values[0]
if isinstance(first_time, cftime.datetime):
start = cftime.datetime(
year, start_month, 1, calendar=first_time.calendar
)
end = cftime.datetime(
year, end_month + 1, 1, calendar=first_time.calendar
)
else:
start = pd.to_datetime(f"{int_year}-{start_month}")
end = pd.to_datetime(f"{year}-{end_month + 1}")
end = end - datetime.timedelta(days=1)
season = filtered_da.sel(time=slice(start, end)).sum("time")
new_time_axis.append(start + (end - start) / 2)
time_bounds.append([start, end])
seasons_acc.append(season)
seasons = xr.concat(seasons_acc, "time")
seasons.coords["time"] = ("time", new_time_axis)
time_bounds_da = DataArray(
data=time_bounds,
dims=["time", "bounds"],
coords=[("time", seasons.time.values), ("bounds", [0, 1])],
)
return seasons, time_bounds_da
return resampler
def month_filter(da: DataArray, month_list: List[int]) -> DataArray:
return da.sel(time=da.time.dt.month.isin(month_list))
def _add_time_bounds(freq: str) -> Callable[[DataArray], Tuple[DataArray, DataArray]]:
def add_bounds(da: DataArray) -> Tuple[DataArray, DataArray]:
# da should already be resampled to freq
if isinstance(da.indexes.get("time"), xr.CFTimeIndex):
offset = xr.coding.cftime_offsets.to_offset(freq)
start = np.array(
[
cftime.datetime(
date.year,
date.month,
date.day,
date.hour,
date.minute,
date.second,
calendar=date.calendar,
)
for date in da.indexes.get("time")
]
)
end = start + offset
end = end - datetime.timedelta(days=1)
else:
offset = pd.tseries.frequencies.to_offset(freq)
start = pd.to_datetime(da.time.dt.floor("D"))
end = start + offset
end = end - pd.Timedelta(days=1)
da["time"] = start + (end - start) / 2
time_bounds_da = DataArray(
data=list(zip(start, end)),
dims=["time", "bounds"],
coords=[("time", da.time.values), ("bounds", [0, 1])],
)
return da, time_bounds_da
return add_bounds
class Frequency(Enum):
"""
The sampling frequency of the resulting dataset.
"""
MONTH = ("MS", ["month", "MS"], "monthly time series", _add_time_bounds("MS"))
""" Resample to monthly values"""
AMJJAS = (
"MS",
["AMJJAS"],
"summer half-year time series",
seasons_resampler([*range(4, 9)]),
)
""" Resample to summer half-year, from April to September included."""
ONDJFM = (
"MS",
["ONDJFM"],
"winter half-year time series",
seasons_resampler([10, 11, 12, 1, 2, 3]),
)
""" Resample to winter half-year, from October to March included."""
DJF = ("MS", ["DJF"], "winter time series", seasons_resampler([12, 1, 2]))
""" Resample to winter season, from December to February included."""
MAM = ("MS", ["MAM"], "spring time series", seasons_resampler([*range(3, 6)]))
""" Resample to spring season, from March to May included."""
JJA = ("MS", ["JJA"], "summer time series", seasons_resampler([*range(6, 9)]))
""" Resample to summer season, from June to Agust included."""
SON = ("MS", ["SON"], "autumn time series", seasons_resampler([*range(9, 12)]))
""" Resample to fall season, from September to November included."""
CUSTOM = ("MS", [], None, None)
""" Resample to custom values. Do not use as is, use `slice_mode` with month or season
keywords instead.
"""
YEAR = ("YS", ["year", "YS"], "annual time series", _add_time_bounds("YS"))
""" Resample to yearly values."""
def __init__(
self,
panda_time: str,
accepted_values: List[str],
description: Optional[str] = None,
post_processing: Optional[
Callable[[DataArray], Tuple[DataArray, DataArray]]
] = None,
):
self.panda_freq: str = panda_time
self.accepted_values: List[str] = accepted_values
self.description = description
self.post_processing = post_processing
@staticmethod
def lookup(slice_mode: SliceMode) -> Any:
if isinstance(slice_mode, Frequency):
return slice_mode
if isinstance(slice_mode, str):
return _get_frequency_from_string(slice_mode)
if isinstance(slice_mode, list):
return _get_frequency_from_list(slice_mode)
raise InvalidIcclimArgumentError(
f"Unknown frequency {slice_mode}."
f"Use a Frequency from {[f for f in Frequency]}"
)
def _get_frequency_from_string(slice_mode: str) -> Frequency:
for freq in Frequency:
if freq.name == slice_mode.upper() or slice_mode.upper() in map(
str.upper, freq.accepted_values
):
return freq
raise InvalidIcclimArgumentError(f"Unknown frequency {slice_mode}.")
def _get_frequency_from_list(slice_mode_list: List) -> Frequency:
if len(slice_mode_list) < 2:
raise InvalidIcclimArgumentError(
f"The given slice list {slice_mode_list}"
f" has a length of {len(slice_mode_list)}."
f" The maximum length here is 2."
)
sampling_freq = slice_mode_list[0]
months = slice_mode_list[1]
custom_freq = Frequency.CUSTOM
if sampling_freq == "month":
custom_freq.post_processing = lambda da: month_filter(da, months)
custom_freq.description = f"monthly time series (months: {months})"
elif sampling_freq == "season":
if months is Tuple:
rearranged_months = months[1] + months[0]
custom_freq.post_processing = seasons_resampler(rearranged_months)
custom_freq.description = (
f"seasonal time series (season: {rearranged_months})"
)
else:
custom_freq.post_processing = seasons_resampler(months)
custom_freq.description = f"seasonal time series (season: {months})"
else:
raise InvalidIcclimArgumentError(
f"Unknown frequency {slice_mode_list}. "
"The sampling frequency must be one of {'season', 'month'}"
)
return custom_freq
|
[
"xarray.coding.cftime_offsets.to_offset",
"xarray.core.dataarray.DataArray",
"numpy.unique",
"pandas.Timedelta",
"xarray.concat",
"pandas.tseries.frequencies.to_offset",
"cftime.datetime",
"icclim.icclim_exceptions.InvalidIcclimArgumentError",
"datetime.timedelta",
"pandas.to_datetime"
] |
[((7101, 7163), 'icclim.icclim_exceptions.InvalidIcclimArgumentError', 'InvalidIcclimArgumentError', (['f"""Unknown frequency {slice_mode}."""'], {}), "(f'Unknown frequency {slice_mode}.')\n", (7127, 7163), False, 'from icclim.icclim_exceptions import InvalidIcclimArgumentError\n'), ((1198, 1224), 'numpy.unique', 'np.unique', (['da.time.dt.year'], {}), '(da.time.dt.year)\n', (1207, 1224), True, 'import numpy as np\n'), ((2527, 2557), 'xarray.concat', 'xr.concat', (['seasons_acc', '"""time"""'], {}), "(seasons_acc, 'time')\n", (2536, 2557), True, 'import xarray as xr\n'), ((2640, 2756), 'xarray.core.dataarray.DataArray', 'DataArray', ([], {'data': 'time_bounds', 'dims': "['time', 'bounds']", 'coords': "[('time', seasons.time.values), ('bounds', [0, 1])]"}), "(data=time_bounds, dims=['time', 'bounds'], coords=[('time',\n seasons.time.values), ('bounds', [0, 1])])\n", (2649, 2756), False, 'from xarray.core.dataarray import DataArray\n'), ((6702, 6819), 'icclim.icclim_exceptions.InvalidIcclimArgumentError', 'InvalidIcclimArgumentError', (['f"""Unknown frequency {slice_mode}.Use a Frequency from {[f for f in Frequency]}"""'], {}), "(\n f'Unknown frequency {slice_mode}.Use a Frequency from {[f for f in Frequency]}'\n )\n", (6728, 6819), False, 'from icclim.icclim_exceptions import InvalidIcclimArgumentError\n'), ((3278, 3318), 'xarray.coding.cftime_offsets.to_offset', 'xr.coding.cftime_offsets.to_offset', (['freq'], {}), '(freq)\n', (3312, 3318), True, 'import xarray as xr\n'), ((3894, 3932), 'pandas.tseries.frequencies.to_offset', 'pd.tseries.frequencies.to_offset', (['freq'], {}), '(freq)\n', (3926, 3932), True, 'import pandas as pd\n'), ((8270, 8407), 'icclim.icclim_exceptions.InvalidIcclimArgumentError', 'InvalidIcclimArgumentError', (['f"""Unknown frequency {slice_mode_list}. The sampling frequency must be one of {{\'season\', \'month\'}}"""'], {}), '(\n f"Unknown frequency {slice_mode_list}. The sampling frequency must be one of {{\'season\', \'month\'}}"\n )\n', (8296, 8407), False, 'from icclim.icclim_exceptions import InvalidIcclimArgumentError\n'), ((1855, 1922), 'cftime.datetime', 'cftime.datetime', (['year', 'start_month', '(1)'], {'calendar': 'first_time.calendar'}), '(year, start_month, 1, calendar=first_time.calendar)\n', (1870, 1922), False, 'import cftime\n'), ((1983, 2052), 'cftime.datetime', 'cftime.datetime', (['year', '(end_month + 1)', '(1)'], {'calendar': 'first_time.calendar'}), '(year, end_month + 1, 1, calendar=first_time.calendar)\n', (1998, 2052), False, 'import cftime\n'), ((2133, 2176), 'pandas.to_datetime', 'pd.to_datetime', (['f"""{int_year}-{start_month}"""'], {}), "(f'{int_year}-{start_month}')\n", (2147, 2176), True, 'import pandas as pd\n'), ((2199, 2240), 'pandas.to_datetime', 'pd.to_datetime', (['f"""{year}-{end_month + 1}"""'], {}), "(f'{year}-{end_month + 1}')\n", (2213, 2240), True, 'import pandas as pd\n'), ((2265, 2291), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2283, 2291), False, 'import datetime\n'), ((3832, 3858), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3850, 3858), False, 'import datetime\n'), ((4048, 4068), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4060, 4068), True, 'import pandas as pd\n'), ((3387, 3500), 'cftime.datetime', 'cftime.datetime', (['date.year', 'date.month', 'date.day', 'date.hour', 'date.minute', 'date.second'], {'calendar': 'date.calendar'}), '(date.year, date.month, date.day, date.hour, date.minute,\n date.second, calendar=date.calendar)\n', (3402, 3500), False, 'import cftime\n')]
|
# coding=utf8
"""
@author: <NAME>
@date: 09/26/2019
@code description: It is a Python3 file to implement cosine similarity with TF-IDF and Word Embedding methods.
"""
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statistics
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
glove_path = "../../data/glove.6B.50d.txt"
punctuation_list = ['.', ',', '!', '?', '@', '#', '$', '%', '^', '&', '*', '(', ')']
def tf_idf(tfidf_vectorizer, corpus):
"""
It is a function to use TF-IDF to vectorize corpus.
:param corpus: corpus to fit
:return: vector
"""
X = tfidf_vectorizer.fit_transform(corpus)
return X.toarray()
def calculate_cosine_similarity(vec, *args):
"""
It is a function to calculate cosine similarity.
:param vec: vector
:return: cosine similarity result
"""
return cosine_similarity(vec, args)
def load_GloVe_model(path):
"""
It is a function to load GloVe model
:param path: model path
:return: model array
"""
print("Load GloVe Model.")
with open(path, 'r') as f:
content = f.readlines()
model = {}
for line in content:
splitLine = line.split()
word = splitLine[0]
embedding = np.array((splitLine[1:]))
model[word] = embedding
print("Done.", len(model), " words loaded!\n")
return model
def process(sentence, lemmatizer):
"""
It is a function to
:param sentence:
:return:
"""
res = []
# 1. Split
splitSentence = sentence.split()
# 2. To lower case
for word in splitSentence:
word = word.lower()
# 3. Lemmatize
word = lemmatizer.lemmatize(word)
# 4. Remove stop words
stopword_set = set(stopwords.words("english"))
if word in stopword_set:
continue
# 5. Remove punctuation
if word in punctuation_list:
continue
res.append(word)
return res
def get_glove_vec(sentence, lemmatizer, glove_model):
"""
It is a function to get glove vector for cosine similarity calculation.
:param process_sen: processed sentence
:param glove_model: GloVe model
:return: np.mean(process_sen)
"""
res = []
for word in process(sentence, lemmatizer):
try:
vec = glove_model.get(word).astype(float)
res.append(np.mean(vec))
except Exception:
continue
return res
def calculate_glove_cosine_similarity(s1, s2, lemmatizer, glove_model):
"""
It is a function to calculate GloVe embedding cosine similarity.
:param glove_model: GloVe model
:return: GloVe cosine similarity
"""
# 1. Get GloVe Vector
s1_vec = get_glove_vec(s1, lemmatizer, glove_model) # <List> object
s2_vec = get_glove_vec(s2, lemmatizer, glove_model)
# 2. Measure the length of vector
try:
if len(s1_vec) == len(s2_vec):
s1_array = np.array((s1_vec)).reshape(1, -1)
s2_array = np.array((s2_vec)).reshape(1, -1)
elif len(s1_vec) > len(s2_vec):
s1_array = np.array((s1_vec)).reshape(1, -1)
s2_array = np.zeros(shape=(1, len(s1_vec)))
s2_array[0, :len(s2_vec)] = s2_vec
else:
s2_array = np.array((s2_vec)).reshape(1, -1)
s1_array = np.zeros(shape=(1, len(s2_vec)))
s1_array[0, :len(s1_vec)] = s1_vec
assert s1_array.shape == s2_array.shape
s1_mean = np.mean(s1_array, axis=0).reshape(1, -1)
s2_mean = np.mean(s2_array, axis=0).reshape(1, -1)
return cosine_similarity(s1_mean, s2_mean)[0][0]
except Exception as e:
print(e)
def main():
corpus = ['The president greets the press in Chicago',
'Obama speaks to the media in Illinois']
s1 = 'The president greets the press in Chicago'
s2 = 'Obama speaks to the media in Illinois'
s3 = 'I love you'
s4 = 'We went to Starbucks to buy hazelnut lattee yesterday'
s5 = 'We often go to Starbucks to buy coffee and chat with each other.!!!!!!'
############## 1. TF-IDF ###############
tf_idf_vectorizer = TfidfVectorizer()
tf_idf_vec = tf_idf(tfidf_vectorizer=tf_idf_vectorizer,
corpus=corpus)
print("tf_idf_vec = ", tf_idf_vec)
print("tf_idf_vec.shape = ", tf_idf_vec.shape)
print("tf_idf_vectorizer.get_feature_names() = ", tf_idf_vectorizer.get_feature_names())
##### 2. TF-IDF Cosine Similarity ######
tfidf_cosine_res = cosine_similarity(tf_idf_vec)[0][1]
print("tfidf_cosine_res = ", tfidf_cosine_res)
print("\n")
########### 3. Lemmatization ###########
lemmatizer = WordNetLemmatizer()
########## 4. Load GloVe Model #########
glove_model = load_GloVe_model(glove_path) # len(glove_model) = 400000
###### 5. GloVe Cosine Similarity ######
res = calculate_glove_cosine_similarity(s1, s3, lemmatizer, glove_model)
print("res = ", res)
res1 = calculate_glove_cosine_similarity(s1, s2, lemmatizer, glove_model)
print("res1 = ", res1)
res2 = calculate_glove_cosine_similarity(s2, s3, lemmatizer, glove_model)
print("res2 = ", res2)
res3 = calculate_glove_cosine_similarity(s5, s4, lemmatizer, glove_model)
print("res3 = ", res3)
if __name__ == '__main__':
main()
|
[
"numpy.mean",
"nltk.corpus.stopwords.words",
"sklearn.metrics.pairwise.cosine_similarity",
"nltk.stem.WordNetLemmatizer",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer"
] |
[((1062, 1090), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['vec', 'args'], {}), '(vec, args)\n', (1079, 1090), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((4364, 4381), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (4379, 4381), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4901, 4920), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (4918, 4920), False, 'from nltk.stem import WordNetLemmatizer\n'), ((1446, 1469), 'numpy.array', 'np.array', (['splitLine[1:]'], {}), '(splitLine[1:])\n', (1454, 1469), True, 'import numpy as np\n'), ((1956, 1982), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1971, 1982), False, 'from nltk.corpus import stopwords\n'), ((4734, 4763), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['tf_idf_vec'], {}), '(tf_idf_vec)\n', (4751, 4763), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2581, 2593), 'numpy.mean', 'np.mean', (['vec'], {}), '(vec)\n', (2588, 2593), True, 'import numpy as np\n'), ((3691, 3716), 'numpy.mean', 'np.mean', (['s1_array'], {'axis': '(0)'}), '(s1_array, axis=0)\n', (3698, 3716), True, 'import numpy as np\n'), ((3750, 3775), 'numpy.mean', 'np.mean', (['s2_array'], {'axis': '(0)'}), '(s2_array, axis=0)\n', (3757, 3775), True, 'import numpy as np\n'), ((3806, 3841), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['s1_mean', 's2_mean'], {}), '(s1_mean, s2_mean)\n', (3823, 3841), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((3158, 3174), 'numpy.array', 'np.array', (['s1_vec'], {}), '(s1_vec)\n', (3166, 3174), True, 'import numpy as np\n'), ((3215, 3231), 'numpy.array', 'np.array', (['s2_vec'], {}), '(s2_vec)\n', (3223, 3231), True, 'import numpy as np\n'), ((3313, 3329), 'numpy.array', 'np.array', (['s1_vec'], {}), '(s1_vec)\n', (3321, 3329), True, 'import numpy as np\n'), ((3487, 3503), 'numpy.array', 'np.array', (['s2_vec'], {}), '(s2_vec)\n', (3495, 3503), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
This file contains implementations of the functions used to train a CNN model:
train_cnn - Function used to facilitate the training of the Convolutinal Neural Network model.
test_cnn - Function used to facilitate the testing of the Convolutinal Neural Network model.
"""
# Built-in/Generic Imports
import os
import time
# Library Imports
import torch
import numpy as np
import pandas as pd
from torch.cuda import amp
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.optim import SGD, LBFGS, lr_scheduler
from torch.utils.tensorboard import SummaryWriter
# Own Modules
from utils import log
from model import Classifier
from dataset import get_datasets
__author__ = ["<NAME>"]
__copyright__ = "Copyright 2020, Selective Dermatology"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "3.0.0"
__maintainer = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def train_cnn(arguments, device):
"""
Function for training of the Convolutional neural network.
:param arguments: ArgumentParser Namespace object with arguments used for training.
:param device: PyTorch device that will be used for training.
:return: Lists of training and validation losses and an integer for the best performing epoch.
"""
# Loads a TensorBoard Summary Writer.
if arguments.tensorboard_dir != "":
writer = SummaryWriter(os.path.join(arguments.tensorboard_dir, arguments.task, arguments.experiment))
# Loads the training and validation data.
train_data, val_data, _ = get_datasets(arguments)
# Creates the training data loader using the dataset objects.
training_data_loader = DataLoader(train_data, batch_size=arguments.batch_size,
shuffle=True, num_workers=arguments.data_workers,
pin_memory=False, drop_last=False)
# Creates the validation data loader using the dataset objects.
validation_data_loader = DataLoader(val_data, batch_size=arguments.batch_size,
shuffle=False, num_workers=arguments.data_workers,
pin_memory=False, drop_last=False)
log(arguments, "Loaded Datasets\n")
# Initialises the classifier model.
classifier = Classifier(arguments.efficient_net)
# Sets the classifier to training mode.
classifier.train()
# Moves the classifier to the selected device.
classifier.to(device)
# Initialises the optimiser used to optimise the parameters of the model.
optimiser = SGD(params=classifier.parameters(), lr=arguments.starting_lr)
# Initialises the learning rate scheduler to adjust the learning rate during training.
scheduler = lr_scheduler.CyclicLR(optimiser, base_lr=arguments.starting_lr, max_lr=arguments.maximum_lr)
# Initialises the gradient scaler used for 16 but precision.
if arguments.precision == 16 and device != torch.device("cpu"):
scaler = amp.GradScaler()
log(arguments, "Models Initialised")
# Declares the main logging variables for the training.
start_time = time.time()
losses, validation_losses, temperatures = [], [], []
best_loss, best_epoch, total_batches = 1e10, 0, 0
log(arguments, "Training Timer Started\n")
# The beginning of the main training loop.
for epoch in range(1, arguments.max_epochs + 1):
# Declares the logging variables for the epoch.
epoch_acc, epoch_loss, epoch_risk, epoch_coverage,num_batches = 0, 0, 0, 0, 0
# Loops through the training data batches.
for images, labels in training_data_loader:
# Moves the images and labels to the selected device.
images = images.to(device)
labels = labels.to(device)
# Resets the gradients in the model.
optimiser.zero_grad()
# Perform training with 16 bit precision.
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
# Performs forward propagation with the model.
logits = classifier(images, dropout=True)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
# Using the gradient scaler performs backward propagation.
scaler.scale(loss).backward()
# Update the weights of the model using the optimiser.
scaler.step(optimiser)
# Updates the scale factor of the gradient scaler.
scaler.update()
# Performs training with 32 bit precision.
else:
# Performs forward propagation with the model.
logits = classifier(images, dropout=True)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
# Performs backward propagation.
loss.backward()
# Update the weights of the model using the optimiser.
optimiser.step()
# Updates the learning rate scheduler.
scheduler.step()
# Calculates the accuracy of the batch.
batch_accuracy = (logits.max(dim=1)[1] == labels).sum().double() / labels.shape[0]
# Calculates the selection scores for the validation predictions.
selections = torch.max(F.softmax(logits), 1)[0]
# Calculates the coverage for the batch.
batch_coverage = selections.mean()
# Calculates the log probability for the predictions and selections.
log_prob = -1. * F.log_softmax(logits, 1) * selections.view([labels.shape[0], 1])
# Calculates the selective risk for the batch using the selections and predictions.
batch_risk = log_prob.gather(1, labels.unsqueeze(1)).mean() / batch_coverage
# Adds the number of batches, loss and accuracy to epoch sum.
num_batches += 1
epoch_loss += loss.item()
epoch_acc += batch_accuracy
epoch_coverage += batch_coverage
epoch_risk += batch_risk
# Writes the batch loss and accuracy to TensorBoard logger.
if arguments.tensorboard_dir != "":
writer.add_scalar("Loss/batch", loss.item(), num_batches + total_batches)
writer.add_scalar("Accuracy/batch", batch_accuracy, num_batches + total_batches)
# Logs the details of the epoch progress.
if num_batches % arguments.log_interval == 0:
log(arguments, "Time: {}s\tTrain Epoch: {} [{}/{}] ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.6f}".format(
str(int(time.time() - start_time)).rjust(6, '0'), str(epoch).rjust(2, '0'),
str(num_batches * arguments.batch_size).rjust(len(str(len(train_data))), '0'),
len(train_data), 100. * num_batches / (len(train_data) / arguments.batch_size),
epoch_loss / num_batches, epoch_acc / num_batches))
# If the number of batches have been reached end epoch.
if num_batches == arguments.batches_per_epoch:
break
# Updates the total number of batches (used for logging).
total_batches += num_batches
# Writes epoch loss and accuracy to TensorBoard.
if arguments.tensorboard_dir != "":
writer.add_scalar("Loss/train", epoch_loss / num_batches, epoch)
writer.add_scalar("Accuracy/train", epoch_acc / num_batches, epoch)
writer.add_scalar("Coverage/train", epoch_coverage / num_batches, epoch)
writer.add_scalar("Selective Risk/train", epoch_risk / num_batches, epoch)
# Declares the logging variables for validation.
validation_acc, validation_loss, validation_risk, validation_coverage, validation_batches = 0, 0, 0, 0, 0
logit_list, label_list = [], []
temperature = torch.nn.Parameter(torch.ones(1, device=device))
temp_optimiser = LBFGS([temperature], lr=0.01, max_iter=1000, line_search_fn="strong_wolfe")
# Performs the validation epoch with no gradient calculations.
with torch.no_grad():
# Loops through the training data batches.
for images, labels in validation_data_loader:
# Moves the images and labels to the selected device.
images = images.to(device)
labels = labels.to(device)
# Performs forward propagation using 16 bit precision.
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
# Performs forward propagation with the model.
logits = classifier(images, dropout=False)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
# Performs forward propagation using 32 bit precision.
else:
# Performs forward propagation with the model.
logits = classifier(images, dropout=True)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
logit_list.append(logits)
label_list.append(labels)
# Calculates the accuracy of the batch.
batch_accuracy = (logits.max(dim=1)[1] == labels).sum().double() / labels.shape[0]
# Calculates the selection scores for the validation predictions.
selections = torch.max(F.softmax(logits), 1)[0]
# Calculates the coverage for the batch.
batch_coverage = selections.mean()
# Calculates the log probability for the predictions and selections.
log_prob = -1. * F.log_softmax(logits, 1) * selections.view([labels.shape[0], 1])
# Calculates the selective risk for the batch using the selections and predictions.
batch_risk = log_prob.gather(1, labels.unsqueeze(1)).mean() / batch_coverage
# Adds the number of batches, loss and accuracy to validation sum.
validation_batches += 1
validation_loss += loss.item()
validation_acc += batch_accuracy
validation_coverage += batch_coverage
validation_risk += batch_risk
# If the number of batches have been reached end validation.
if validation_batches == arguments.batches_per_epoch:
break
logit_list = torch.cat(logit_list).to(device)
label_list = torch.cat(label_list).to(device)
def _eval():
temp_loss = F.cross_entropy(torch.div(logit_list, temperature), label_list)
temp_loss.backward()
return temp_loss
temp_optimiser.step(_eval)
temperatures.append(temperature.item())
# Writes validation loss and accuracy to TensorBoard.
if arguments.tensorboard_dir != "":
writer.add_scalar("Loss/validation", validation_loss / validation_batches, epoch)
writer.add_scalar("Accuracy/validation", validation_acc / validation_batches, epoch)
writer.add_scalar("Coverage/validation", validation_coverage / validation_batches, epoch)
writer.add_scalar("Selective Risk/validation", validation_risk / validation_batches, epoch)
# Adds the training and validation losses to their respective lists.
losses.append(epoch_loss / num_batches)
validation_losses.append(validation_loss / validation_batches)
# Logs the details of the training epoch.
log(arguments, "\nEpoch: {}\Training Loss: {:.6f}\tTraining Accuracy: {:.6f}\t"
"Training Coverage: {:.6f}\tTraining Selective Risk: {:.6f}\n"
"Validation Loss: {:.6f}\tValidation Accuracy: {:.6f}\t"
"Validation Coverage: {:.6f}\tValidation Selective Risk: {:.6f}\n".
format(epoch, losses[-1], epoch_acc / num_batches, epoch_coverage / num_batches, epoch_risk / num_batches,
validation_losses[-1], validation_acc / validation_batches,
validation_coverage / validation_batches, validation_risk / validation_batches))
# If the current epoch has the best validation loss then save the model with the prefix best.
if validation_losses[-1] < best_loss:
best_loss = validation_losses[-1]
best_epoch = epoch
classifier.save_model(arguments.model_dir, arguments.experiment)
# Saves the model with the current epoch as the prefix.
classifier.save_model(arguments.model_dir, arguments.experiment, str(epoch))
# Checks if the training has performed the minimum number of epochs.
if epoch >= arguments.min_epochs:
# Calculates the generalised validation loss.
g_loss = 100 *((validation_losses[-1] / min(validation_losses[:-1])) - 1)
# Calculates the training progress using a window over the training losses.
t_progress = 1000 * ((sum(losses[-(arguments.window + 1): - 1]) /
(arguments.window * min(losses[-(arguments.window + 1): - 1]))) - 1)
# Compares the generalised loss and training progress against a selected target value.
if g_loss / t_progress > arguments.stop_target:
break
# Logs the final training information.
log(arguments, f"\nTraining finished after {epoch} epochs in {int(time.time() - start_time)}s")
log(arguments, f"Best Epoch {best_epoch} with a temperature of {temperatures[best_epoch - 1]}")
# Returns the loss values from training and validation epochs and the best epoch.
return temperatures[best_epoch - 1]
def test_cnn(arguments, device):
"""
Function for testing the Convolutional neural network and generate csv files with all predictions.
:param arguments: ArgumentParser Namespace object with arguments used for training.
:param device: PyTorch device that will be used for training.
:return: Lists of training and validation losses and an integer for the best performing epoch.
"""
# Loads the training and validation data.
_, _, test_data = get_datasets(arguments)
# Creates the validation data loader using the dataset objects.
testing_data_loader = DataLoader(test_data, batch_size=arguments.batch_size,
shuffle=False, num_workers=arguments.data_workers,
pin_memory=False, drop_last=False)
log(arguments, "Loaded Datasets\n")
# Initialises the classifier model.
classifier = Classifier(arguments.efficient_net, pretrained=False)
# Loads the trained model.
classifier.load_state_dict(torch.load(os.path.join(arguments.model_dir, f"{arguments.experiment}_cnn_best.pt")))
# Sets the classifier to evaluation mode.
classifier.eval()
# Moves the classifier to the selected device.
classifier.to(device)
test_labels, testing_batches = [], 0
test_sr_mal, test_sr_ben, test_sr_selections = [], [], []
test_tmp_mal, test_tmp_ben, test_tmp_selections = [], [], []
test_mc_mal, test_mc_ben, test_mc_selections = [], [], []
with torch.no_grad():
for images, labels in testing_data_loader:
images = images.to(device)
labels = labels.cpu().numpy()
# Performs forward propagation using 16 bit precision.
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
# Performs forward propagation with the model.
logits = classifier(images, dropout=False)
# Performs forward propagation using 32 bit precision.
else:
# Performs forward propagation with the model.
logits = classifier(images, dropout=False)
sr_predictions = F.softmax(logits, dim=1).cpu().numpy()
sr_selections = np.amax(sr_predictions, axis=1)
test_sr_mal += sr_predictions[:, 0].tolist()
test_sr_ben += sr_predictions[:, 1].tolist()
test_sr_selections += sr_selections.tolist()
tmp_predictions = F.softmax(torch.div(logits, arguments.temperature), dim=1).cpu().numpy()
tmp_selections = np.amax(tmp_predictions, axis=1)
test_tmp_mal += tmp_predictions[:, 0].tolist()
test_tmp_ben += tmp_predictions[:, 1].tolist()
test_tmp_selections += tmp_selections.tolist()
mc_predictions = []
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
for _ in range(arguments.drop_iterations):
mc_predictions.append(classifier(images, dropout=True))
else:
for _ in range(arguments.drop_iterations):
mc_predictions.append(classifier(images, dropout=True))
mc_predictions = torch.stack(mc_predictions)
mc_predictions = F.softmax(mc_predictions, dim=2).cpu().numpy()
mc_selections = [np.var(mc_predictions[:, i, 0]) for i in range(mc_predictions.shape[1])]
mc_predictions = np.mean(mc_predictions, 0)
test_mc_mal += mc_predictions[:, 0].tolist()
test_mc_ben += mc_predictions[:, 1].tolist()
test_mc_selections += mc_selections
test_labels += labels.tolist()
testing_batches += 1
# If the number of batches have been reached end validation.
if testing_batches == arguments.batches_per_epoch:
break
filenames = [os.path.basename(file_path)[:-4] for file_path in test_data.filenames]
sr_output = pd.DataFrame({"image": filenames[:len(test_labels)],
"label": test_labels,
"mal": test_sr_mal,
"ben": test_sr_ben,
"sel": test_sr_selections})
tmp_output = pd.DataFrame({"image": filenames[:len(test_labels)],
"label": test_labels,
"mal": test_tmp_mal,
"ben": test_tmp_ben,
"sel": test_tmp_selections})
mc_output = pd.DataFrame({"image": filenames[:len(test_labels)],
"label": test_labels,
"mal": test_mc_mal,
"ben": test_mc_ben,
"sel": test_mc_selections})
os.makedirs(arguments.output_dir, exist_ok=True)
sr_output.to_csv(os.path.join(arguments.output_dir, f"{arguments.experiment}_sr_output.csv"), index=False)
tmp_output.to_csv(os.path.join(arguments.output_dir, f"{arguments.experiment}_tmp_output.csv"), index=False)
mc_output.to_csv(os.path.join(arguments.output_dir, f"{arguments.experiment}_mc_output.csv"), index=False)
|
[
"model.Classifier",
"torch.nn.functional.softmax",
"numpy.mean",
"torch.cuda.amp.GradScaler",
"torch.optim.lr_scheduler.CyclicLR",
"torch.cuda.amp.autocast",
"utils.log",
"torch.nn.functional.log_softmax",
"time.time",
"torch.cat",
"torch.device",
"os.makedirs",
"torch.stack",
"os.path.join",
"dataset.get_datasets",
"torch.optim.LBFGS",
"os.path.basename",
"torch.utils.data.DataLoader",
"torch.nn.functional.cross_entropy",
"torch.no_grad",
"torch.div",
"numpy.amax",
"numpy.var",
"torch.ones"
] |
[((1630, 1653), 'dataset.get_datasets', 'get_datasets', (['arguments'], {}), '(arguments)\n', (1642, 1653), False, 'from dataset import get_datasets\n'), ((1748, 1892), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'arguments.batch_size', 'shuffle': '(True)', 'num_workers': 'arguments.data_workers', 'pin_memory': '(False)', 'drop_last': '(False)'}), '(train_data, batch_size=arguments.batch_size, shuffle=True,\n num_workers=arguments.data_workers, pin_memory=False, drop_last=False)\n', (1758, 1892), False, 'from torch.utils.data import DataLoader\n'), ((2063, 2206), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data'], {'batch_size': 'arguments.batch_size', 'shuffle': '(False)', 'num_workers': 'arguments.data_workers', 'pin_memory': '(False)', 'drop_last': '(False)'}), '(val_data, batch_size=arguments.batch_size, shuffle=False,\n num_workers=arguments.data_workers, pin_memory=False, drop_last=False)\n', (2073, 2206), False, 'from torch.utils.data import DataLoader\n'), ((2288, 2323), 'utils.log', 'log', (['arguments', '"""Loaded Datasets\n"""'], {}), "(arguments, 'Loaded Datasets\\n')\n", (2291, 2323), False, 'from utils import log\n'), ((2382, 2417), 'model.Classifier', 'Classifier', (['arguments.efficient_net'], {}), '(arguments.efficient_net)\n', (2392, 2417), False, 'from model import Classifier\n'), ((2829, 2926), 'torch.optim.lr_scheduler.CyclicLR', 'lr_scheduler.CyclicLR', (['optimiser'], {'base_lr': 'arguments.starting_lr', 'max_lr': 'arguments.maximum_lr'}), '(optimiser, base_lr=arguments.starting_lr, max_lr=\n arguments.maximum_lr)\n', (2850, 2926), False, 'from torch.optim import SGD, LBFGS, lr_scheduler\n'), ((3095, 3131), 'utils.log', 'log', (['arguments', '"""Models Initialised"""'], {}), "(arguments, 'Models Initialised')\n", (3098, 3131), False, 'from utils import log\n'), ((3210, 3221), 'time.time', 'time.time', ([], {}), '()\n', (3219, 3221), False, 'import time\n'), ((3338, 3380), 'utils.log', 'log', (['arguments', '"""Training Timer Started\n"""'], {}), "(arguments, 'Training Timer Started\\n')\n", (3341, 3380), False, 'from utils import log\n'), ((13832, 13936), 'utils.log', 'log', (['arguments', 'f"""Best Epoch {best_epoch} with a temperature of {temperatures[best_epoch - 1]}"""'], {}), "(arguments,\n f'Best Epoch {best_epoch} with a temperature of {temperatures[best_epoch - 1]}'\n )\n", (13835, 13936), False, 'from utils import log\n'), ((14551, 14574), 'dataset.get_datasets', 'get_datasets', (['arguments'], {}), '(arguments)\n', (14563, 14574), False, 'from dataset import get_datasets\n'), ((14670, 14814), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'arguments.batch_size', 'shuffle': '(False)', 'num_workers': 'arguments.data_workers', 'pin_memory': '(False)', 'drop_last': '(False)'}), '(test_data, batch_size=arguments.batch_size, shuffle=False,\n num_workers=arguments.data_workers, pin_memory=False, drop_last=False)\n', (14680, 14814), False, 'from torch.utils.data import DataLoader\n'), ((14890, 14925), 'utils.log', 'log', (['arguments', '"""Loaded Datasets\n"""'], {}), "(arguments, 'Loaded Datasets\\n')\n", (14893, 14925), False, 'from utils import log\n'), ((14984, 15037), 'model.Classifier', 'Classifier', (['arguments.efficient_net'], {'pretrained': '(False)'}), '(arguments.efficient_net, pretrained=False)\n', (14994, 15037), False, 'from model import Classifier\n'), ((18974, 19022), 'os.makedirs', 'os.makedirs', (['arguments.output_dir'], {'exist_ok': '(True)'}), '(arguments.output_dir, exist_ok=True)\n', (18985, 19022), False, 'import os\n'), ((3073, 3089), 'torch.cuda.amp.GradScaler', 'amp.GradScaler', ([], {}), '()\n', (3087, 3089), False, 'from torch.cuda import amp\n'), ((8166, 8241), 'torch.optim.LBFGS', 'LBFGS', (['[temperature]'], {'lr': '(0.01)', 'max_iter': '(1000)', 'line_search_fn': '"""strong_wolfe"""'}), "([temperature], lr=0.01, max_iter=1000, line_search_fn='strong_wolfe')\n", (8171, 8241), False, 'from torch.optim import SGD, LBFGS, lr_scheduler\n'), ((15575, 15590), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15588, 15590), False, 'import torch\n'), ((19045, 19120), 'os.path.join', 'os.path.join', (['arguments.output_dir', 'f"""{arguments.experiment}_sr_output.csv"""'], {}), "(arguments.output_dir, f'{arguments.experiment}_sr_output.csv')\n", (19057, 19120), False, 'import os\n'), ((19157, 19233), 'os.path.join', 'os.path.join', (['arguments.output_dir', 'f"""{arguments.experiment}_tmp_output.csv"""'], {}), "(arguments.output_dir, f'{arguments.experiment}_tmp_output.csv')\n", (19169, 19233), False, 'import os\n'), ((19269, 19344), 'os.path.join', 'os.path.join', (['arguments.output_dir', 'f"""{arguments.experiment}_mc_output.csv"""'], {}), "(arguments.output_dir, f'{arguments.experiment}_mc_output.csv')\n", (19281, 19344), False, 'import os\n'), ((1474, 1551), 'os.path.join', 'os.path.join', (['arguments.tensorboard_dir', 'arguments.task', 'arguments.experiment'], {}), '(arguments.tensorboard_dir, arguments.task, arguments.experiment)\n', (1486, 1551), False, 'import os\n'), ((3035, 3054), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3047, 3054), False, 'import torch\n'), ((8111, 8139), 'torch.ones', 'torch.ones', (['(1)'], {'device': 'device'}), '(1, device=device)\n', (8121, 8139), False, 'import torch\n'), ((8327, 8342), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8340, 8342), False, 'import torch\n'), ((15112, 15184), 'os.path.join', 'os.path.join', (['arguments.model_dir', 'f"""{arguments.experiment}_cnn_best.pt"""'], {}), "(arguments.model_dir, f'{arguments.experiment}_cnn_best.pt')\n", (15124, 15184), False, 'import os\n'), ((16346, 16377), 'numpy.amax', 'np.amax', (['sr_predictions'], {'axis': '(1)'}), '(sr_predictions, axis=1)\n', (16353, 16377), True, 'import numpy as np\n'), ((16684, 16716), 'numpy.amax', 'np.amax', (['tmp_predictions'], {'axis': '(1)'}), '(tmp_predictions, axis=1)\n', (16691, 16716), True, 'import numpy as np\n'), ((17368, 17395), 'torch.stack', 'torch.stack', (['mc_predictions'], {}), '(mc_predictions)\n', (17379, 17395), False, 'import torch\n'), ((17606, 17632), 'numpy.mean', 'np.mean', (['mc_predictions', '(0)'], {}), '(mc_predictions, 0)\n', (17613, 17632), True, 'import numpy as np\n'), ((18050, 18077), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (18066, 18077), False, 'import os\n'), ((4950, 4981), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (4965, 4981), True, 'from torch.nn import functional as F\n'), ((10776, 10797), 'torch.cat', 'torch.cat', (['logit_list'], {}), '(logit_list)\n', (10785, 10797), False, 'import torch\n'), ((10830, 10851), 'torch.cat', 'torch.cat', (['label_list'], {}), '(label_list)\n', (10839, 10851), False, 'import torch\n'), ((10925, 10959), 'torch.div', 'torch.div', (['logit_list', 'temperature'], {}), '(logit_list, temperature)\n', (10934, 10959), False, 'import torch\n'), ((17503, 17534), 'numpy.var', 'np.var', (['mc_predictions[:, i, 0]'], {}), '(mc_predictions[:, i, 0])\n', (17509, 17534), True, 'import numpy as np\n'), ((4068, 4087), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4080, 4087), False, 'import torch\n'), ((4110, 4124), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (4122, 4124), False, 'from torch.cuda import amp\n'), ((4327, 4358), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (4342, 4358), True, 'from torch.nn import functional as F\n'), ((5512, 5529), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {}), '(logits)\n', (5521, 5529), True, 'from torch.nn import functional as F\n'), ((5749, 5773), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(1)'], {}), '(logits, 1)\n', (5762, 5773), True, 'from torch.nn import functional as F\n'), ((9353, 9384), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (9368, 9384), True, 'from torch.nn import functional as F\n'), ((15850, 15869), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (15862, 15869), False, 'import torch\n'), ((15892, 15906), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (15904, 15906), False, 'from torch.cuda import amp\n'), ((16984, 17003), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (16996, 17003), False, 'import torch\n'), ((17026, 17040), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (17038, 17040), False, 'from torch.cuda import amp\n'), ((8746, 8765), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8758, 8765), False, 'import torch\n'), ((8792, 8806), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (8804, 8806), False, 'from torch.cuda import amp\n'), ((9026, 9057), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (9041, 9057), True, 'from torch.nn import functional as F\n'), ((9748, 9765), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {}), '(logits)\n', (9757, 9765), True, 'from torch.nn import functional as F\n'), ((10001, 10025), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(1)'], {}), '(logits, 1)\n', (10014, 10025), True, 'from torch.nn import functional as F\n'), ((13798, 13809), 'time.time', 'time.time', ([], {}), '()\n', (13807, 13809), False, 'import time\n'), ((16278, 16302), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (16287, 16302), True, 'from torch.nn import functional as F\n'), ((17426, 17458), 'torch.nn.functional.softmax', 'F.softmax', (['mc_predictions'], {'dim': '(2)'}), '(mc_predictions, dim=2)\n', (17435, 17458), True, 'from torch.nn import functional as F\n'), ((16591, 16631), 'torch.div', 'torch.div', (['logits', 'arguments.temperature'], {}), '(logits, arguments.temperature)\n', (16600, 16631), False, 'import torch\n'), ((6832, 6843), 'time.time', 'time.time', ([], {}), '()\n', (6841, 6843), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 12:47:00 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AffinityPropagation
from sklearn.metrics import f1_score
from sklearn.metrics import normalized_mutual_info_score
from sklearn.preprocessing import LabelEncoder
def ap_cluster_k(x, K, preference_init=-1.0, max_iter=30,
c=None, iter_finetune=10):
'''
Clustering of x by affinity propagation which the number of cluster is K.
args:
x (ndarray):
Data matrix.
K (int):
Target number of clusters.
max_iter (int):
Number of trials for bisection search.
c (ndarray, optional):
Class labels of x. If this parameter is specified, the function
try to find the better solution by random search.
iter_finetune (int):
Number of steps for the random search.
'''
# first, search rough lower bound of the preference
assert preference_init < 0, "preference_init must be negative."
p = float(preference_init) # preference parameter
p_upper = 0
for i in range(5):
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
if k_current > K:
p_upper = p
k_upper = k_current
p *= 10
else:
p_lower = p
k_lower = k_current
break
else:
raise RuntimeError("Can't find initial lower bound for preference."
" Try another value of p_initial.")
# search the preference by bisection method
for i in range(max_iter):
p = (p_lower + p_upper) / 2
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
print('K = {}, k_current = {}, p = {}'.format(K, k_current, p))
print('{}:{}, {}:{}, {}:{}'.format(k_lower, p_lower, k_current, p,
k_upper, p_upper))
# if the current k goes out of bounds then retry with perturbed p
while k_current < k_lower or k_current > k_upper:
print("retry")
p += np.random.uniform(p_lower, p_upper) / 10
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
print('K = {}, k_current = {}, p = {}'.format(K, k_current, p))
print('{}:{}, {}:{}, {}:{}'.format(k_lower, p_lower, k_current, p,
k_upper, p_upper))
if k_current < K:
p_lower = p
k_lower = k_current
elif k_current > K:
p_upper = p
k_upper = k_current
else:
break
else:
raise RuntimeError("Can't find a preference to form K clusters."
" Try another value of p_initial.")
if c is None:
return ap
# Search further better preference in terms of NMI score by random search
p_best = p
score_best = normalized_mutual_info_score(c, ap.predict(y))
print('initial score:', score_best)
print()
for i in range(iter_finetune):
p = np.random.normal(p_best, (p_upper - p_lower) / 2)
if p < p_lower or p > p_upper: # where p is rejected
print('reject')
continue
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
if k_current < K and p > p_lower:
p_lower = p
elif k_current > K and p < p_upper:
p_upper = p
else: # wgere k_current is K
score = normalized_mutual_info_score(c, ap.predict(y))
if score > score_best:
print("update p {} -> {}".format(p_best, p))
p_best = p
score_best = score
print('p: {}, {}, {}'.format(p_lower, p, p_upper))
print('score: {}'.format(score_best))
print()
return AffinityPropagation(preference=p_best).fit(y)
if __name__ == '__main__':
y_train = np.load('y_train.npy')
c_train = np.load('c_train.npy').ravel()
y_test = np.load('y_test.npy')
c_test = np.load('c_test.npy').ravel()
c_train = LabelEncoder().fit_transform(c_train)
c_test = LabelEncoder().fit_transform(c_test)
K = 40
# K = len(np.unique(c_train))
y = y_train[c_train.ravel() < K]
c = c_train[c_train < K]
# y = y_test[c_test.ravel() < K]
# c = c_test[c_test < K]
ap = ap_cluster_k(y, K, preference_init=-1.0, c=c, iter_finetune=30)
c_pred = ap.predict(y)
print(normalized_mutual_info_score(c, c_pred))
plt.plot(np.vstack((c_pred, c)).T)
plt.show()
# print f1_score(c, c_pred)
|
[
"numpy.random.normal",
"sklearn.preprocessing.LabelEncoder",
"sklearn.cluster.AffinityPropagation",
"numpy.vstack",
"numpy.random.uniform",
"sklearn.metrics.normalized_mutual_info_score",
"numpy.load",
"matplotlib.pyplot.show"
] |
[((4137, 4159), 'numpy.load', 'np.load', (['"""y_train.npy"""'], {}), "('y_train.npy')\n", (4144, 4159), True, 'import numpy as np\n'), ((4218, 4239), 'numpy.load', 'np.load', (['"""y_test.npy"""'], {}), "('y_test.npy')\n", (4225, 4239), True, 'import numpy as np\n'), ((4757, 4767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4765, 4767), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3300), 'numpy.random.normal', 'np.random.normal', (['p_best', '((p_upper - p_lower) / 2)'], {}), '(p_best, (p_upper - p_lower) / 2)\n', (3267, 3300), True, 'import numpy as np\n'), ((4673, 4712), 'sklearn.metrics.normalized_mutual_info_score', 'normalized_mutual_info_score', (['c', 'c_pred'], {}), '(c, c_pred)\n', (4701, 4712), False, 'from sklearn.metrics import normalized_mutual_info_score\n'), ((4048, 4086), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p_best'}), '(preference=p_best)\n', (4067, 4086), False, 'from sklearn.cluster import AffinityPropagation\n'), ((4174, 4196), 'numpy.load', 'np.load', (['"""c_train.npy"""'], {}), "('c_train.npy')\n", (4181, 4196), True, 'import numpy as np\n'), ((4253, 4274), 'numpy.load', 'np.load', (['"""c_test.npy"""'], {}), "('c_test.npy')\n", (4260, 4274), True, 'import numpy as np\n'), ((4298, 4312), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4310, 4312), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4349, 4363), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4361, 4363), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4727, 4749), 'numpy.vstack', 'np.vstack', (['(c_pred, c)'], {}), '((c_pred, c))\n', (4736, 4749), True, 'import numpy as np\n'), ((1194, 1227), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p'}), '(preference=p)\n', (1213, 1227), False, 'from sklearn.cluster import AffinityPropagation\n'), ((1755, 1788), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p'}), '(preference=p)\n', (1774, 1788), False, 'from sklearn.cluster import AffinityPropagation\n'), ((2235, 2270), 'numpy.random.uniform', 'np.random.uniform', (['p_lower', 'p_upper'], {}), '(p_lower, p_upper)\n', (2252, 2270), True, 'import numpy as np\n'), ((3425, 3458), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p'}), '(preference=p)\n', (3444, 3458), False, 'from sklearn.cluster import AffinityPropagation\n'), ((2293, 2326), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p'}), '(preference=p)\n', (2312, 2326), False, 'from sklearn.cluster import AffinityPropagation\n')]
|
import sys
sys.path.append('../../')
import keras2caffe
DATA_DIR='../../data/'
import caffe
import cv2
import numpy as np
import sys
sys.path.append('/media/toshiba_ml/models/keras-models/keras-squeezenet')
from keras_squeezenet import SqueezeNet
#TensorFlow backend uses all GPU memory by default, so we need limit
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
#converting
keras_model = SqueezeNet()
keras2caffe.convert(keras_model, 'deploy.prototxt', 'SqueezeNet.caffemodel')
#testing the model
caffe.set_mode_gpu()
net = caffe.Net('deploy.prototxt', 'SqueezeNet.caffemodel', caffe.TEST)
img = cv2.imread(DATA_DIR+'bear.jpg')
img = cv2.resize(img, (227, 227))
img = img[...,::-1] #RGB 2 BGR
data = np.array(img, dtype=np.float32)
data = data.transpose((2, 0, 1))
data.shape = (1,) + data.shape
data -= 128
net.blobs['data'].data[...] = data
out = net.forward()
preds = out['global_average_pooling2d_1']
classes = eval(open(DATA_DIR+'class_names.txt', 'r').read())
print("Class is: " + classes[np.argmax(preds)])
print("Certainty is: " + str(preds[0][np.argmax(preds)]))
|
[
"keras_squeezenet.SqueezeNet",
"tensorflow.Session",
"caffe.set_mode_gpu",
"keras2caffe.convert",
"numpy.argmax",
"numpy.array",
"caffe.Net",
"tensorflow.ConfigProto",
"cv2.resize",
"sys.path.append",
"cv2.imread"
] |
[((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((136, 209), 'sys.path.append', 'sys.path.append', (['"""/media/toshiba_ml/models/keras-models/keras-squeezenet"""'], {}), "('/media/toshiba_ml/models/keras-models/keras-squeezenet')\n", (151, 209), False, 'import sys\n'), ((411, 427), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (425, 427), True, 'import tensorflow as tf\n'), ((551, 563), 'keras_squeezenet.SqueezeNet', 'SqueezeNet', ([], {}), '()\n', (561, 563), False, 'from keras_squeezenet import SqueezeNet\n'), ((565, 641), 'keras2caffe.convert', 'keras2caffe.convert', (['keras_model', '"""deploy.prototxt"""', '"""SqueezeNet.caffemodel"""'], {}), "(keras_model, 'deploy.prototxt', 'SqueezeNet.caffemodel')\n", (584, 641), False, 'import keras2caffe\n'), ((663, 683), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (681, 683), False, 'import caffe\n'), ((691, 756), 'caffe.Net', 'caffe.Net', (['"""deploy.prototxt"""', '"""SqueezeNet.caffemodel"""', 'caffe.TEST'], {}), "('deploy.prototxt', 'SqueezeNet.caffemodel', caffe.TEST)\n", (700, 756), False, 'import caffe\n'), ((764, 797), 'cv2.imread', 'cv2.imread', (["(DATA_DIR + 'bear.jpg')"], {}), "(DATA_DIR + 'bear.jpg')\n", (774, 797), False, 'import cv2\n'), ((803, 830), 'cv2.resize', 'cv2.resize', (['img', '(227, 227)'], {}), '(img, (227, 227))\n', (813, 830), False, 'import cv2\n'), ((871, 902), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (879, 902), True, 'import numpy as np\n'), ((497, 522), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (507, 522), True, 'import tensorflow as tf\n'), ((1170, 1186), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (1179, 1186), True, 'import numpy as np\n'), ((1227, 1243), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (1236, 1243), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
from scipy import sparse
from scipy.optimize import linprog
import matplotlib.pyplot as plt
import networkx as nx
import torch
import torch.nn as nn
import torch.nn.functional as func
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import TensorDataset, DataLoader
from captum.attr import IntegratedGradients
def temp_make_energy():
""" I tried to formulate the energy problem as a LP formulation, so defining c, A, and b. I am almost entirely
certain that this is in fact possible but I had too many problems and did not want to invest to much time in this.
So I stopped trying. """
time = 8760
# inputs:
c_pv = 1
c_bat = 1
c_buy = 1
demand = np.genfromtxt("data/TS_Demand.csv").reshape((time, 1))
avail_pv = np.genfromtxt("data/TS_PVAvail.csv").reshape((time, 1))
# cost vector
c_buy = np.full(time, c_buy)
c_zeros = np.zeros(time*5) # for the right dimensionality
c = np.concatenate((np.array([c_pv]), np.array([c_bat]), c_buy, c_zeros))
x_dim = c.shape[0]
# how does x look like? (everything with "(T)" contains T (=time) elements, one for each time step)
# c_ap_pv, c_ap_bat_s, p_buy (T), p_pv (T), p_bat_out (T), p_bat_in (T), p_bat_s (T)
# constraints
# energy balance (maybe remove the equality and make it an inequality (>= Demand, or <= - Demand only)
a_energy_balance = sparse.lil_matrix((2*time, x_dim))
b_energy_balance = sparse.lil_matrix((2*time, 1))
for t in range(time): # this can definitely be written more efficiently, for now I just want it correct though
a_energy_balance[t * 2, 2 + t] = 1 # p_buy(t)
a_energy_balance[t * 2, 2 + time + t] = 1 # p_pv(t)
a_energy_balance[t * 2, 2 + 2 * time + t] = 1 # p_bat_out(t)
a_energy_balance[t * 2, 2 + 3 * time + t] = -1 # p_bat_in(t)
b_energy_balance[t * 2] = demand[t]
a_energy_balance[t * 2 + 1, 2 + t] = -1 # p_buy(t)
a_energy_balance[t * 2 + 1, 2 + time + t] = -1 # p_pv(t)
a_energy_balance[t * 2 + 1, 2 + 2 * time + t] = -1 # p_bat_out(t)
a_energy_balance[t * 2 + 1, 2 + 3 * time + t] = 1 # p_bat_in(t)
b_energy_balance[t * 2 + 1] = -demand[t]
# battery equation
a_battery_equation = sparse.lil_matrix((2 * (time - 1), x_dim))
b_battery_equation = sparse.lil_matrix((2 * (time - 1), 1)) # just stays zero, so that is fine
for t in range(1, time):
a_battery_equation[(t - 1) * 2, 2 + 4 * time + t] = 1 # p_bat_s (t)
a_battery_equation[(t - 1) * 2, 2 + 4 * time + t - 1] = -1 # p_bat_s (t - 1)
a_battery_equation[(t - 1) * 2, 2 + 3 * time + t] = -1 # p_bat_in (t)
a_battery_equation[(t - 1) * 2, 2 + 2 * time + t] = 1 # p_bat_out (t)
a_battery_equation[(t - 1) * 2 + 1, 2 + 4 * time + t] = -1 # p_bat_s (t)
a_battery_equation[(t - 1) * 2 + 1, 2 + 4 * time + t - 1] = 1 # p_bat_s (t - 1)
a_battery_equation[(t - 1) * 2 + 1, 2 + 3 * time + t] = 1 # p_bat_in (t)
a_battery_equation[(t - 1) * 2 + 1, 2 + 2 * time + t] = -1 # p_bat_out (t)
# pv production limit (0 <= p_pv (t) always given per LP definition (x >= 0))
# lifetime missing (delta t I think) --> but why not for battery (per slides: not, per code: yes)
a_pv_production_limit = sparse.lil_matrix((time, x_dim))
b_pv_production_limit = sparse.lil_matrix((time, 1)) # just stays zero, so that is fine
for t in range(time):
a_pv_production_limit[t, 2 + time + t] = 1
a_pv_production_limit[t, 0] = -avail_pv[t]
# battery charge limit (0 <= p_bat_in (t) always given per LP definition (x >= 0))
a_battery_charge_limit = sparse.lil_matrix((time, x_dim))
b_battery_charge_limit = sparse.lil_matrix((time, 1)) # just stays zero, so that is fine
for t in range(time):
a_battery_charge_limit[t, 2 + 2 * time + t] = 1
a_battery_charge_limit[t, 1] = -1
# battery initial state
a_battery_initial_state = sparse.lil_matrix((2, x_dim))
b_battery_initial_state = sparse.lil_matrix((2, 1)) # just stays zero, so that is fine
a_battery_initial_state[0, 2 + 4 * time] = 1
a_battery_initial_state[0, 2 + 4 * time] = -1 # maybe not necessary because of x >= 0
# power purchase limit (0 <= p_buy (t) always given per LP definition (x >= 0))
# concatenate for constraint matrix a and vector b
a = sparse.vstack(
(a_energy_balance, a_battery_equation, a_pv_production_limit, a_battery_charge_limit, a_battery_initial_state))
b = sparse.vstack(
(b_energy_balance, b_battery_equation, b_pv_production_limit, b_battery_charge_limit, b_battery_initial_state))
# time with sparse matrices: csr: 92.63453531265259, lil: 1.463003396987915
# the following calculation of the solution (linprog): took like two hours, did not finish I stopped it, maybe I
# could use more linprog options, or I just don't use this (better I think)
res = linprog(c, a, b.toarray(), method="highs")
sol = res.x
print("Finished")
def load_energy_data(seed=0, with_new=True):
""" Load data points for the energy system. Currently contains 10000 data points. """
# data has 9 columns: cost_pv, cost_bat, cost_buy, demand, cap_pv, cap_bat, own_gen, totex, capex
orig_data = np.load("data/energy_data.npy")
orig_data_plus = None
if with_new:
# "data_plus.npy" is the combination of "data_values_interval.npy", data_values_around_min.npy", and
# "data_values_around_max.npy", generated by "energy_data.py"
orig_data_plus = np.load("data/data_plus.npy")
orig_data = np.concatenate((orig_data, orig_data_plus))
np.random.seed(seed)
np.random.shuffle(orig_data)
# this manually set mean is really (!) close to the calculated mean anyway (with infinite data, it would be exactly
# the same) but using this manually set means means we have data for +1 and -1 (normalized) for all input values
# when also using the newly generated data
if with_new:
# orig_data_plus[100] is exactly the mean data with corresponding outputs
data_mean = orig_data_plus[100] # np.mean(orig_data, axis=0)
else:
data_mean = np.mean(orig_data, axis=0)
# normalize data and save variables to be able to reverse that
data = orig_data - data_mean
data_max = np.max(np.abs(data), axis=0)
data = data / data_max
e_input = data[:, :4]
e_output = data[:, 4:]
return e_input, e_output, data_mean, data_max, orig_data
def vis_energy(attributions, values=None, edge_labels=False, vis_real=False):
""" Visualizes attribution for the energy neural network, from inputs to outputs. """
inp_params, output_vals, pred_outputs = None, None, None
if values is not None:
inp_params, output_vals, pred_outputs = values
g = nx.DiGraph()
# index to name (input/output)
itni = {0: "Photovoltaik", 1: "Batteriespeicher", 2: "Stromnetz", 3: "Energiebedarf"}
itno = {0: "Kapazität PV", 1: "Kapazität Batterie", 2: "Eigenerzeugung", 3: "TOTEX", 4: "CAPEX"}
# define nodes
g.add_node("Photovoltaik", pos=(0, 7))
g.add_node("Batteriespeicher", pos=(0, 5))
g.add_node("Stromnetz", pos=(0, 3))
g.add_node("Energiebedarf", pos=(0, 1))
g.add_node("Kapazität PV", pos=(5, 8))
g.add_node("Kapazität Batterie", pos=(5, 6))
g.add_node("Eigenerzeugung", pos=(5, 4))
g.add_node("TOTEX", pos=(5, 2))
g.add_node("CAPEX", pos=(5, 0))
labeldict = {}
if values is not None:
labeldict["Photovoltaik"] = f"Photovoltaik\n{inp_params[0, 0]:.2f}"
labeldict["Batteriespeicher"] = f"Batteriespeicher\n{inp_params[0, 1]:.2f}"
labeldict["Stromnetz"] = f"Stromnetz\n{inp_params[0, 2]:.2f}"
labeldict["Energiebedarf"] = f"Energiebedarf\n{inp_params[0, 3]:.2f}"
if output_vals is None:
labeldict["Kapazität PV"] = f"Kapazität PV\n{pred_outputs[0, 0]:.2f}"
labeldict["Kapazität Batterie"] = f"Kapazität Batterie\n{pred_outputs[0, 1]:.2f}"
labeldict["Eigenerzeugung"] = f"Eigenerzeugung\n{pred_outputs[0, 2]:.2f}"
labeldict["TOTEX"] = f"TOTEX\n{pred_outputs[0, 3]:.2f}"
labeldict["CAPEX"] = f"CAPEX\n{pred_outputs[0, 4]:.2f}"
else:
labeldict["Kapazität PV"] = f"Kapazität PV\n{pred_outputs[0, 0]:.2f} ({output_vals[0, 0]:.2f})"
labeldict["Kapazität Batterie"] = f"Kapazität Batterie\n{pred_outputs[0, 1]:.2f} ({output_vals[0, 1]:.2f})"
labeldict["Eigenerzeugung"] = f"Eigenerzeugung\n{pred_outputs[0, 2]:.2f} ({output_vals[0, 2]:.2f})"
labeldict["TOTEX"] = f"TOTEX\n{pred_outputs[0, 3]:.2f} ({output_vals[0, 3]:.2f})"
labeldict["CAPEX"] = f"CAPEX\n{pred_outputs[0, 4]:.2f} ({output_vals[0, 4]:.2f})"
if vis_real:
_, _, data_mean, data_max, _ = load_energy_data()
str_add = "\n" + r"$\rightarrow$"
input_diffs = inp_params[0, :] * data_max[:4]
real_inputs = input_diffs + data_mean[:4]
labeldict["Photovoltaik"] += str_add + f"{real_inputs[0]:.0f} ({'+' if input_diffs[0] > 0 else ''}{input_diffs[0]:.0f})"
labeldict["Batteriespeicher"] += str_add + f"{real_inputs[1]:.0f} ({'+' if input_diffs[1] > 0 else ''}{input_diffs[1]:.0f})"
labeldict["Stromnetz"] += str_add + f"{real_inputs[2]:.0f} ({'+' if input_diffs[2] > 0 else ''}{input_diffs[2]:.3f})"
labeldict["Energiebedarf"] += str_add + f"{real_inputs[3]:.0f} ({'+' if input_diffs[3] > 0 else ''}{input_diffs[3]:.0f})"
output_diffs = pred_outputs[0, :] * data_max[4:]
real_outputs = output_diffs + data_mean[4:]
labeldict["Kapazität PV"] += str_add + f"{real_outputs[0]:.2f} ({'+' if output_diffs[0] > 0 else ''}{output_diffs[0]:.2f})"
labeldict["Kapazität Batterie"] += str_add + f"{real_outputs[1]:.2f} ({'+' if output_diffs[1] > 0 else ''}{output_diffs[1]:.2f})"
labeldict["Eigenerzeugung"] += str_add + f"{real_outputs[2]:.3f} ({'+' if output_diffs[2] > 0 else ''}{output_diffs[2]:.3f})"
labeldict["TOTEX"] += str_add + f"{real_outputs[3]:.0f} ({'+' if output_diffs[3] > 0 else ''}{output_diffs[3]:.0f})"
labeldict["CAPEX"] += str_add + f"{real_outputs[4]:.0f} ({'+' if output_diffs[4] > 0 else ''}{output_diffs[4]:.0f})"
else:
labeldict["Photovoltaik"] = "Photovoltaik"
labeldict["Batteriespeicher"] = "Batteriespeicher"
labeldict["Stromnetz"] = "Stromnetz"
labeldict["Energiebedarf"] = "Energiebedarf"
labeldict["Kapazität PV"] = "Kapazität PV"
labeldict["Kapazität Batterie"] = "Kapazität Batterie"
labeldict["Eigenerzeugung"] = "Eigenerzeugung"
labeldict["TOTEX"] = "TOTEX"
labeldict["CAPEX"] = "CAPEX"
edge_list = []
edge_attr = []
for i, o in attributions:
edge_list.append((itni[i], itno[o]))
edge_attr.append(attributions[i, o])
color_bounds = np.max(np.abs(edge_attr))
cmap = plt.cm.RdBu
pos = nx.get_node_attributes(g, "pos")
nx.draw(g, pos, labels=labeldict, with_labels=True, node_color="w")
nx.draw_networkx_edges(g, pos, edgelist=edge_list, edge_color=edge_attr, edge_cmap=cmap, edge_vmin=-color_bounds,
edge_vmax=color_bounds)
if edge_labels:
e_labels = {(itni[key[0]], itno[key[1]]): f"{attributions[key]:.4f}" for key in attributions.keys()}
nx.draw_networkx_edge_labels(g, pos=pos, edge_labels=e_labels, label_pos=0.5)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=-color_bounds, vmax=color_bounds))
sm.set_array([])
plt.colorbar(sm)
plt.show()
def visualize_loss(train_loss, test_loss):
""" Plot the train and test loss of a neural network after learning. One graph shows the loss progress over all
iterations, another one for only the last 10 iterations (can see whether it is still improving)."""
nr_epochs = list(range(len(train_loss)+1))[1:]
print(train_loss)
print(test_loss)
ax1 = plt.subplot(2, 1, 1)
ax1.plot(nr_epochs, train_loss, label="Train")
ax1.plot(nr_epochs, test_loss, label="Test")
ax1.set_ylabel("Loss")
ax1.set_xlabel("Epoch")
ax1.set_xticks(nr_epochs)
ax1.set_xticklabels(str(epoch) for epoch in nr_epochs)
ax1.set_title("Loss over all epochs")
ax1.grid(True)
ax1.legend()
ax2 = plt.subplot(2, 1, 2)
ax2.plot(nr_epochs[-10:], train_loss[-10:], label="Train")
ax2.plot(nr_epochs[-10:], test_loss[-10:], label="Test")
ax2.set_ylabel("Loss")
ax2.set_xlabel("Epoch")
ax2.set_xticks(nr_epochs[-10:])
ax2.set_xticklabels(str(epoch) for epoch in nr_epochs[-10:])
ax2.set_title("Loss over all epochs")
ax2.set_title("Loss over the last 10 epochs")
ax2.grid(True)
ax2.legend()
plt.subplots_adjust(hspace=0.6)
plt.show()
class EnergyNet(nn.Module):
""" Neural network learning the relationship of the input and output values as loaded by load_energy_data. """
def __init__(self, dim_input, dim_output):
""" Initialize the neural network. """
super(EnergyNet, self).__init__()
factor = 80 * 2 * 2 * 2 * 2
self.fc1 = nn.Linear(dim_input, factor * 2)
self.fc2 = nn.Linear(factor * 2, factor * 2)
self.fc2b = nn.Linear(factor * 2, factor * 2)
self.fc3 = nn.Linear(factor * 2, factor)
self.fc4 = nn.Linear(factor, dim_output)
def forward(self, param):
""" Forward pass. """
h = self.fc1(param)
h = func.relu(h)
h = self.fc2(h)
h = func.relu(h)
h = self.fc2b(h)
h = func.relu(h)
h = self.fc3(h)
h = func.relu(h)
output = self.fc4(h)
return output
def train(args, model, device, train_loader, optimizer, epoch):
""" Train the model. """
model.train()
# mean squared error loss for output
criterion = torch.nn.MSELoss()
for batch_idx, (e_input, e_output) in enumerate(train_loader):
e_input, e_output = e_input.to(device), e_output.to(device)
optimizer.zero_grad()
prediction = model(e_input)
loss = criterion(prediction, e_output)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(epoch, batch_idx * len(e_input),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
if args.dry_run:
break
def test(model, device, test_loader):
""" Test the model. """
model.eval()
test_loss = 0
# mean squared error loss for output
criterion = torch.nn.MSELoss(reduction="sum")
with torch.no_grad():
for (e_input, e_output) in test_loader:
e_input, e_output = e_input.to(device), e_output.to(device)
prediction = model(e_input)
test_loss += criterion(prediction, e_output).item()
test_loss /= len(test_loader.batch_sampler)
print("\nTest set: Average loss: {:.4f}\n".format(test_loss))
return test_loss
def train_model(args):
""" Get model parameters, data and train a model. """
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
torch.manual_seed(args.seed)
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1,
"pin_memory": True,
"shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
e_input, e_output, _, _, _ = load_energy_data(args.seed)
# parameter
input_train = e_input[:e_input.shape[0] // 2]
input_test = e_input[e_input.shape[0] // 2:]
# use half of the data for training and testing each
output_train = e_output[:e_output.shape[0] // 2]
output_test = e_output[e_output.shape[0] // 2:]
tensor_input_train = torch.Tensor(input_train)
tensor_input_test = torch.Tensor(input_test)
tensor_output_train = torch.Tensor(output_train)
tensor_output_test = torch.Tensor(output_test)
dataset_train = TensorDataset(tensor_input_train, tensor_output_train)
dataset_test = TensorDataset(tensor_input_test, tensor_output_test)
dataloader_train = DataLoader(dataset_train, **train_kwargs)
dataloader_train_for_test = DataLoader(dataset_train, **test_kwargs)
dataloader_test = DataLoader(dataset_test, **test_kwargs)
model = EnergyNet(e_input.shape[1], e_output.shape[1]).to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
train_loss = []
test_loss = []
for epoch in range(1, args.epochs + 1):
train(args, model, device, dataloader_train, optimizer, epoch)
train_loss.append(test(model, device, dataloader_train_for_test))
test_loss.append(test(model, device, dataloader_test))
scheduler.step()
visualize_loss(train_loss, test_loss)
print(train_loss)
print(test_loss)
if args.save_model:
# save the model
save_path = f"models/Energy_{args.save_name}.pt"
torch.save(model.state_dict(), save_path)
return model
def prepare_model(args):
""" Define the model and load the state in the specified path. """
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
e_input, e_output, _, _, _ = load_energy_data(args.seed)
model = EnergyNet(e_input.shape[1], e_output.shape[1]).to(device)
# load the model state
save_path = f"models/Energy_{args.save_name}.pt"
model.load_state_dict(torch.load(save_path))
return model
def apply_visualization(model, args):
""" Set all necessary parameters and call the right visualization method. """
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
e_input, e_output, _, _, _ = load_energy_data(args.seed)
input_test = e_input[e_input.shape[0] // 2:]
output_test = e_output[e_output.shape[0] // 2:]
start_index = args.num_vis * args.vis_next
if start_index + args.num_vis > input_test.shape[0]:
raise ValueError(f"There are not enough test instances to visualize with respect to \"args.num_vis\": "
f"{args.num_vis} and \"args.vis_next\": {args.vis_next}")
input_test = input_test[start_index:start_index + args.num_vis]
tensor_input_vis = torch.Tensor(input_test).to(device)
output_test = output_test[start_index:start_index + args.num_vis]
attributions = {}
for i in range(args.num_vis):
# custom input
if args.vis_input:
vis_input_str = input("Enter input values ([-1, 1], 4 values, whitespace separated):")
for index, value in enumerate(vis_input_str.split()):
tensor_input_vis[i, index] = float(value)
for output_index in range(5):
ig = IntegratedGradients(model)
# this following code or the block after? What is better, and for what purpose? Both seem very similar
# for input_index in range(4):
# if args.vis_only_input != -1 and args.vis_only_input != input_index:
# continue
# # baseline
# bl = tensor_input_vis[i:i + 1].detach().clone()
# bl[0, input_index] = 0
# attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
# attr = attr.detach().cpu().numpy()[0]
# if (input_index, output_index) in attributions:
# attributions[(input_index, output_index)] += attr[input_index]
# else:
# attributions[(input_index, output_index)] = attr[input_index]
# which baseline to use
choose_baseline = args.baseline
# baseline for all smallest and all largest inputs
if choose_baseline == "edges":
bl = tensor_input_vis[i:i + 1].detach().clone()
bl[0, :] = -1
attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr = attr.detach().cpu().numpy()[0]
bl[0, :] = 1
attr2 = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr += attr2.detach().cpu().numpy()[0]
# random: multiple baselines, uniformly distributed within [-1, 1], average for final attribution
elif choose_baseline == "random":
all_attr = None
for bls in range(10):
bl = ((torch.rand(1, 4) * 2) - 1).to(device)
attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr = attr.detach().cpu().numpy()[0]
if bls == 0:
all_attr = attr
else:
all_attr += attr
attr = all_attr / 10
# gaussian: multiple baselines, gaussian distributed around 0, average for final attribution
elif choose_baseline == "gaussian":
all_attr = None
for bls in range(10):
std = 0.25 # pretty close to the underlying data std
bl = torch.normal(torch.tensor([[0.0, 0.0, 0.0, 0.0]]), std).to(device)
attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr = attr.detach().cpu().numpy()[0]
if bls == 0:
all_attr = attr
else:
all_attr += attr
attr = all_attr / 10
# baseline as specified in args
else:
bl = tensor_input_vis[i:i + 1].detach().clone()
bl[0, :] = float(choose_baseline)
attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr = attr.detach().cpu().numpy()[0]
for input_index in range(4):
if args.vis_only_input == -1 or args.vis_only_input == input_index:
attributions[(input_index, output_index)] = attr[input_index]
if not args.vis_agg:
pred = model(tensor_input_vis[i:i + 1]).detach().cpu().numpy()
if args.vis_input:
out_label = None
else:
out_label = output_test[i:i + 1]
vis_energy(attributions, values=(tensor_input_vis[i:i + 1].detach().cpu().numpy(), out_label, pred),
edge_labels=args.vis_only_input != -1, vis_real=args.vis_real_values)
attributions = {}
if args.vis_agg:
# pred = model(tensor_input_vis).detach().cpu().numpy()
vis_energy(attributions, edge_labels=args.vis_only_input != -1, vis_real=args.vis_real_values)
return
def prepare_arguments():
""" Define and return arguments. """
parser = argparse.ArgumentParser(description="PyTorch Energy Experiment v0_1")
# model training
parser.add_argument("--batch-size", type=int, default=64, metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument("--test-batch-size", type=int, default=1000, metavar="N",
help="input batch size for testing (default: 1000)")
parser.add_argument("--epochs", type=int, default=5, metavar="N", help="number of epochs to train (default: 5)")
parser.add_argument("--lr", type=float, default=1.0, metavar="LR", help="learning rate (default: 1.0)")
parser.add_argument("--gamma", type=float, default=0.7, metavar="M", help="Learning rate step gamma (default: 0.7)")
parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training")
parser.add_argument("--dry-run", action="store_true", default=False, help="quickly check a single pass")
parser.add_argument("--seed", type=int, default=0, metavar="S", help="random seed (default: 0)")
parser.add_argument("--log-interval", type=int, default=10, metavar="N",
help="how many batches to wait before logging training status")
# model saving / loading
parser.add_argument("--save-model", action="store_true", default=False, help="save the current model")
parser.add_argument("--load-model", action="store_true", default=False, help="load a model")
parser.add_argument("--save-name", type=str, default="0", metavar="NAME",
help="name with which the model will be saved or loaded")
# visualization
parser.add_argument("--vis", action="store_true", default=False, help="visualize model performance and attribution")
parser.add_argument("--num-vis", type=int, default=10, metavar="N", help="number of instanced to be visualized")
parser.add_argument("--vis-agg", action="store_true", default=False,
help="aggregate the attribution of all \"num-vis\" instances before the visualization)")
parser.add_argument("--vis-next", type=int, default=0, metavar="N",
help="skips the first vis_next * num_vis instances, can visualize other instances that way")
parser.add_argument("--vis-save", action="store_true", default=False,
help="save the visualization, otherwise simply show it")
parser.add_argument("--vis-input", action="store_true", default=False,
help="enter own inputs for the visualization")
parser.add_argument("--baseline", type=str, default="0", metavar="NAME OR NUMBER",
help="which baseline to use (\"edges\", \"random\", or a number as the baseline)")
parser.add_argument("--vis-real-values", action="store_true", default=False,
help="also show the unnormalized values on the visualization")
parser.add_argument("--vis-only-input", type=int, default=-1, metavar="N", help="only visualize for specific input")
args = parser.parse_args()
return args
def main():
""" Run the neural network with the specified arguments. """
# get arguments
args = prepare_arguments()
# get the model
if not args.load_model:
# train the model
model = train_model(args)
else:
# load the model
model = prepare_model(args)
# obtain and visualize attributions
if args.vis:
apply_visualization(model, args)
if __name__ == "__main__":
main()
|
[
"networkx.draw_networkx_edge_labels",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"numpy.genfromtxt",
"networkx.draw_networkx_edges",
"numpy.mean",
"scipy.sparse.lil_matrix",
"argparse.ArgumentParser",
"matplotlib.pyplot.Normalize",
"networkx.DiGraph",
"captum.attr.IntegratedGradients",
"numpy.random.seed",
"numpy.concatenate",
"numpy.abs",
"torch.Tensor",
"torch.utils.data.TensorDataset",
"torch.nn.functional.relu",
"torch.no_grad",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"torch.device",
"torch.manual_seed",
"networkx.draw",
"torch.load",
"matplotlib.pyplot.colorbar",
"torch.optim.lr_scheduler.StepLR",
"torch.tensor",
"numpy.zeros",
"networkx.get_node_attributes",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"numpy.full",
"scipy.sparse.vstack",
"numpy.load",
"matplotlib.pyplot.subplot",
"torch.rand",
"numpy.random.shuffle"
] |
[((924, 944), 'numpy.full', 'np.full', (['time', 'c_buy'], {}), '(time, c_buy)\n', (931, 944), True, 'import numpy as np\n'), ((959, 977), 'numpy.zeros', 'np.zeros', (['(time * 5)'], {}), '(time * 5)\n', (967, 977), True, 'import numpy as np\n'), ((1453, 1489), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2 * time, x_dim)'], {}), '((2 * time, x_dim))\n', (1470, 1489), False, 'from scipy import sparse\n'), ((1511, 1543), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2 * time, 1)'], {}), '((2 * time, 1))\n', (1528, 1543), False, 'from scipy import sparse\n'), ((2331, 2373), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2 * (time - 1), x_dim)'], {}), '((2 * (time - 1), x_dim))\n', (2348, 2373), False, 'from scipy import sparse\n'), ((2399, 2437), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2 * (time - 1), 1)'], {}), '((2 * (time - 1), 1))\n', (2416, 2437), False, 'from scipy import sparse\n'), ((3375, 3407), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(time, x_dim)'], {}), '((time, x_dim))\n', (3392, 3407), False, 'from scipy import sparse\n'), ((3436, 3464), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(time, 1)'], {}), '((time, 1))\n', (3453, 3464), False, 'from scipy import sparse\n'), ((3746, 3778), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(time, x_dim)'], {}), '((time, x_dim))\n', (3763, 3778), False, 'from scipy import sparse\n'), ((3808, 3836), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(time, 1)'], {}), '((time, 1))\n', (3825, 3836), False, 'from scipy import sparse\n'), ((4056, 4085), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2, x_dim)'], {}), '((2, x_dim))\n', (4073, 4085), False, 'from scipy import sparse\n'), ((4116, 4141), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2, 1)'], {}), '((2, 1))\n', (4133, 4141), False, 'from scipy import sparse\n'), ((4467, 4596), 'scipy.sparse.vstack', 'sparse.vstack', (['(a_energy_balance, a_battery_equation, a_pv_production_limit,\n a_battery_charge_limit, a_battery_initial_state)'], {}), '((a_energy_balance, a_battery_equation, a_pv_production_limit,\n a_battery_charge_limit, a_battery_initial_state))\n', (4480, 4596), False, 'from scipy import sparse\n'), ((4610, 4739), 'scipy.sparse.vstack', 'sparse.vstack', (['(b_energy_balance, b_battery_equation, b_pv_production_limit,\n b_battery_charge_limit, b_battery_initial_state)'], {}), '((b_energy_balance, b_battery_equation, b_pv_production_limit,\n b_battery_charge_limit, b_battery_initial_state))\n', (4623, 4739), False, 'from scipy import sparse\n'), ((5370, 5401), 'numpy.load', 'np.load', (['"""data/energy_data.npy"""'], {}), "('data/energy_data.npy')\n", (5377, 5401), True, 'import numpy as np\n'), ((5749, 5769), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5763, 5769), True, 'import numpy as np\n'), ((5774, 5802), 'numpy.random.shuffle', 'np.random.shuffle', (['orig_data'], {}), '(orig_data)\n', (5791, 5802), True, 'import numpy as np\n'), ((6922, 6934), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (6932, 6934), True, 'import networkx as nx\n'), ((11159, 11191), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['g', '"""pos"""'], {}), "(g, 'pos')\n", (11181, 11191), True, 'import networkx as nx\n'), ((11197, 11264), 'networkx.draw', 'nx.draw', (['g', 'pos'], {'labels': 'labeldict', 'with_labels': '(True)', 'node_color': '"""w"""'}), "(g, pos, labels=labeldict, with_labels=True, node_color='w')\n", (11204, 11264), True, 'import networkx as nx\n'), ((11270, 11411), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['g', 'pos'], {'edgelist': 'edge_list', 'edge_color': 'edge_attr', 'edge_cmap': 'cmap', 'edge_vmin': '(-color_bounds)', 'edge_vmax': 'color_bounds'}), '(g, pos, edgelist=edge_list, edge_color=edge_attr,\n edge_cmap=cmap, edge_vmin=-color_bounds, edge_vmax=color_bounds)\n', (11292, 11411), True, 'import networkx as nx\n'), ((11778, 11794), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {}), '(sm)\n', (11790, 11794), True, 'import matplotlib.pyplot as plt\n'), ((11800, 11810), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11808, 11810), True, 'import matplotlib.pyplot as plt\n'), ((12181, 12201), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (12192, 12201), True, 'import matplotlib.pyplot as plt\n'), ((12535, 12555), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (12546, 12555), True, 'import matplotlib.pyplot as plt\n'), ((12969, 13000), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.6)'}), '(hspace=0.6)\n', (12988, 13000), True, 'import matplotlib.pyplot as plt\n'), ((13006, 13016), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13014, 13016), True, 'import matplotlib.pyplot as plt\n'), ((14076, 14094), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (14092, 14094), False, 'import torch\n'), ((15062, 15095), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (15078, 15095), False, 'import torch\n'), ((15641, 15684), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (15653, 15684), False, 'import torch\n'), ((15690, 15718), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (15707, 15718), False, 'import torch\n'), ((16415, 16440), 'torch.Tensor', 'torch.Tensor', (['input_train'], {}), '(input_train)\n', (16427, 16440), False, 'import torch\n'), ((16465, 16489), 'torch.Tensor', 'torch.Tensor', (['input_test'], {}), '(input_test)\n', (16477, 16489), False, 'import torch\n'), ((16517, 16543), 'torch.Tensor', 'torch.Tensor', (['output_train'], {}), '(output_train)\n', (16529, 16543), False, 'import torch\n'), ((16569, 16594), 'torch.Tensor', 'torch.Tensor', (['output_test'], {}), '(output_test)\n', (16581, 16594), False, 'import torch\n'), ((16616, 16670), 'torch.utils.data.TensorDataset', 'TensorDataset', (['tensor_input_train', 'tensor_output_train'], {}), '(tensor_input_train, tensor_output_train)\n', (16629, 16670), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((16690, 16742), 'torch.utils.data.TensorDataset', 'TensorDataset', (['tensor_input_test', 'tensor_output_test'], {}), '(tensor_input_test, tensor_output_test)\n', (16703, 16742), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((16767, 16808), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {}), '(dataset_train, **train_kwargs)\n', (16777, 16808), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((16841, 16881), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {}), '(dataset_train, **test_kwargs)\n', (16851, 16881), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((16904, 16943), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_test'], {}), '(dataset_test, **test_kwargs)\n', (16914, 16943), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((17096, 17144), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': '(1)', 'gamma': 'args.gamma'}), '(optimizer, step_size=1, gamma=args.gamma)\n', (17102, 17144), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((17894, 17937), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (17906, 17937), False, 'import torch\n'), ((18414, 18457), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (18426, 18457), False, 'import torch\n'), ((23621, 23690), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Energy Experiment v0_1"""'}), "(description='PyTorch Energy Experiment v0_1')\n", (23644, 23690), False, 'import argparse\n'), ((5650, 5679), 'numpy.load', 'np.load', (['"""data/data_plus.npy"""'], {}), "('data/data_plus.npy')\n", (5657, 5679), True, 'import numpy as np\n'), ((5700, 5743), 'numpy.concatenate', 'np.concatenate', (['(orig_data, orig_data_plus)'], {}), '((orig_data, orig_data_plus))\n', (5714, 5743), True, 'import numpy as np\n'), ((6287, 6313), 'numpy.mean', 'np.mean', (['orig_data'], {'axis': '(0)'}), '(orig_data, axis=0)\n', (6294, 6313), True, 'import numpy as np\n'), ((6436, 6448), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (6442, 6448), True, 'import numpy as np\n'), ((11106, 11123), 'numpy.abs', 'np.abs', (['edge_attr'], {}), '(edge_attr)\n', (11112, 11123), True, 'import numpy as np\n'), ((11573, 11650), 'networkx.draw_networkx_edge_labels', 'nx.draw_networkx_edge_labels', (['g'], {'pos': 'pos', 'edge_labels': 'e_labels', 'label_pos': '(0.5)'}), '(g, pos=pos, edge_labels=e_labels, label_pos=0.5)\n', (11601, 11650), True, 'import networkx as nx\n'), ((13354, 13386), 'torch.nn.Linear', 'nn.Linear', (['dim_input', '(factor * 2)'], {}), '(dim_input, factor * 2)\n', (13363, 13386), True, 'import torch.nn as nn\n'), ((13406, 13439), 'torch.nn.Linear', 'nn.Linear', (['(factor * 2)', '(factor * 2)'], {}), '(factor * 2, factor * 2)\n', (13415, 13439), True, 'import torch.nn as nn\n'), ((13460, 13493), 'torch.nn.Linear', 'nn.Linear', (['(factor * 2)', '(factor * 2)'], {}), '(factor * 2, factor * 2)\n', (13469, 13493), True, 'import torch.nn as nn\n'), ((13513, 13542), 'torch.nn.Linear', 'nn.Linear', (['(factor * 2)', 'factor'], {}), '(factor * 2, factor)\n', (13522, 13542), True, 'import torch.nn as nn\n'), ((13562, 13591), 'torch.nn.Linear', 'nn.Linear', (['factor', 'dim_output'], {}), '(factor, dim_output)\n', (13571, 13591), True, 'import torch.nn as nn\n'), ((13693, 13705), 'torch.nn.functional.relu', 'func.relu', (['h'], {}), '(h)\n', (13702, 13705), True, 'import torch.nn.functional as func\n'), ((13742, 13754), 'torch.nn.functional.relu', 'func.relu', (['h'], {}), '(h)\n', (13751, 13754), True, 'import torch.nn.functional as func\n'), ((13792, 13804), 'torch.nn.functional.relu', 'func.relu', (['h'], {}), '(h)\n', (13801, 13804), True, 'import torch.nn.functional as func\n'), ((13841, 13853), 'torch.nn.functional.relu', 'func.relu', (['h'], {}), '(h)\n', (13850, 13853), True, 'import torch.nn.functional as func\n'), ((15105, 15120), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15118, 15120), False, 'import torch\n'), ((15602, 15627), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15625, 15627), False, 'import torch\n'), ((17855, 17880), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17878, 17880), False, 'import torch\n'), ((18177, 18198), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (18187, 18198), False, 'import torch\n'), ((18375, 18400), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18398, 18400), False, 'import torch\n'), ((767, 802), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/TS_Demand.csv"""'], {}), "('data/TS_Demand.csv')\n", (780, 802), True, 'import numpy as np\n'), ((837, 873), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/TS_PVAvail.csv"""'], {}), "('data/TS_PVAvail.csv')\n", (850, 873), True, 'import numpy as np\n'), ((1032, 1048), 'numpy.array', 'np.array', (['[c_pv]'], {}), '([c_pv])\n', (1040, 1048), True, 'import numpy as np\n'), ((1050, 1067), 'numpy.array', 'np.array', (['[c_bat]'], {}), '([c_bat])\n', (1058, 1067), True, 'import numpy as np\n'), ((11699, 11751), 'matplotlib.pyplot.Normalize', 'plt.Normalize', ([], {'vmin': '(-color_bounds)', 'vmax': 'color_bounds'}), '(vmin=-color_bounds, vmax=color_bounds)\n', (11712, 11751), True, 'import matplotlib.pyplot as plt\n'), ((19014, 19038), 'torch.Tensor', 'torch.Tensor', (['input_test'], {}), '(input_test)\n', (19026, 19038), False, 'import torch\n'), ((19507, 19533), 'captum.attr.IntegratedGradients', 'IntegratedGradients', (['model'], {}), '(model)\n', (19526, 19533), False, 'from captum.attr import IntegratedGradients\n'), ((21232, 21248), 'torch.rand', 'torch.rand', (['(1)', '(4)'], {}), '(1, 4)\n', (21242, 21248), False, 'import torch\n'), ((21942, 21978), 'torch.tensor', 'torch.tensor', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (21954, 21978), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
For Citibike rebancing simulation
"""
import simpy
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
order_threshold = 2.0
order_up_to = 5.0
delivery_delay = 20 # in minutes
SIM_RUN = 1000 #number of simulation runs
initial_bikes = 15
operation_cost = 2 # USD per bike for operation
oil_gas = 3 # USD per 1 refillment
service_fee = 3 # USD per bike per ride
PENALTY = 3 # USD for cost of loss of business oportunity
loss_profit = 0.1 # USD per bike per minute for loss of business opportunity
def arrival(env, _lambda ,requirement):
global num_bikes, revenue, penalty, Loss_profit
interarrival = np.random.exponential(1./_lambda)
yield env.timeout(interarrival)
Loss_profit += loss_profit * num_bikes * interarrival
# print 'Arrival @ t={}, require# {}'.format(env.now, requirement)
if requirement == 'Rent a bike':
if num_bikes > 0:
num_bikes -= 1
revenue += service_fee
else:
penalty += PENALTY
else:
num_bikes += 1
# print ('current num of bikes = {}'.format(num_bikes))
def rebalancing(env, quantity):
global num_bikes, num_ordered, revenue , cost, Loss_profit
num_ordered = quantity
cost += (num_ordered * operation_cost) + oil_gas
yield env.timeout(delivery_delay)
num_bikes += num_ordered
# print (" Fill bikes up to ={}".format(num_bikes))
num_ordered = 0
def citibike_run(env, _lambda ,requirement, order_up_to, order_threshold):
global num_bikes, quantity, num_ordered, revenue , cost, penalty, Loss_profit
num_ordered = 0.0
quantity = 0.0
while True:
yield env.process(arrival(env, _lambda ,requirement))
get_bikes.append(num_bikes)
if num_bikes <= order_threshold and num_ordered == 0:
quantity = order_up_to - num_bikes
env.process(rebalancing(env, quantity))
def observe(env):
while True:
obs_time.append(env.now)
obs_bikes.append(num_bikes)
obs_balance.append(revenue - cost - penalty - Loss_profit)
yield env.timeout(0.2)
avg_bikes = []
avg_balance = []
get_balance = []
for i in range(SIM_RUN):
np.random.seed(i)
num_bikes = initial_bikes
revenue = 0
cost = 0
penalty = 0
Loss_profit = 0
obs_time = []
obs_bikes = []
obs_balance = []
get_bikes = []
env = simpy.Environment()
env.process(citibike_run(env, 1.572 ,'Rent a bike', order_up_to, order_threshold))
env.process(citibike_run(env, 1.183 ,'Return a bike', order_up_to, order_threshold))
env.process(observe(env))
env.run(until=180.0) # during 5pm to 8am
avg_bikes.append(np.mean(get_bikes))
avg_balance.append(revenue - cost - penalty - Loss_profit)
if SIM_RUN > 1:
print ("The average number of available bikes during the interval = ", np.mean(avg_bikes))
plt.figure()
plt.scatter(range(len(avg_bikes)), avg_bikes, c='b', alpha=0.4)
plt.xlabel('Simulation runs')
plt.ylabel('Bike Level')
plt.title('Average Bike levels at each runs (Threshold= {:.0f}, order-up-to= {:.0f})'.format(order_threshold, order_up_to))
plt.savefig('Average bike level.png')
plt.figure()
plt.hist( avg_bikes, color='g')
plt.xlabel('X Bin')
plt.ylabel('Count')
plt.title(' Histogram (average number of bike)(Threshold= {:.0f}, order-up-to= {:.0f})'.format(order_threshold, order_up_to))
plt.legend(loc='best')
plt.savefig('Histogram Average bike level.png')
if SIM_RUN <= 1:
plt.figure()
plt.step(obs_time, obs_bikes, where='post' , color = 'g')
plt.xlabel('Time (Minutes)')
plt.ylabel('Bike Level')
plt.title(' Simulation (Initial bikes = {:.0f}, Threshold = {:.0f}, order-up-to = {:.0f})'.format(initial_bikes, order_threshold, order_up_to))
plt.savefig('Bikes level (Tshold = {:.0f}, orderut = {:.0f}).png'.format(order_threshold, order_up_to))
plt.show()
plt.figure()
plt.step(obs_time, obs_balance, where='post', color = 'r')
plt.xlabel('Time (Minutes)')
plt.ylabel('Balance ($)')
plt.title('Balance (Threshold = {:.0f}, order-up-to = {:.0f})'.format(order_threshold, order_up_to))
plt.savefig('balance level (Tshold = {:.0f}, orderut = {:.0f}).png'.format(order_threshold, order_up_to))
plt.show()
confidence_level = 0.05
z_crit = stats.norm.ppf(1-confidence_level/2)
print ('200 simulation runs = {:.3f} +/- {:.3f} (95% CI)'.format(np.mean(avg_balance), z_crit*stats.sem(avg_balance)))
|
[
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"scipy.stats.norm.ppf",
"numpy.random.exponential",
"simpy.Environment",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"scipy.stats.sem",
"matplotlib.pyplot.step",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((4533, 4573), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['(1 - confidence_level / 2)'], {}), '(1 - confidence_level / 2)\n', (4547, 4573), True, 'import scipy.stats as stats\n'), ((684, 720), 'numpy.random.exponential', 'np.random.exponential', (['(1.0 / _lambda)'], {}), '(1.0 / _lambda)\n', (705, 720), True, 'import numpy as np\n'), ((2310, 2327), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (2324, 2327), True, 'import numpy as np\n'), ((2527, 2546), 'simpy.Environment', 'simpy.Environment', ([], {}), '()\n', (2544, 2546), False, 'import simpy\n'), ((3043, 3055), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3053, 3055), True, 'import matplotlib.pyplot as plt\n'), ((3132, 3161), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Simulation runs"""'], {}), "('Simulation runs')\n", (3142, 3161), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3190), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Bike Level"""'], {}), "('Bike Level')\n", (3176, 3190), True, 'import matplotlib.pyplot as plt\n'), ((3323, 3360), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Average bike level.png"""'], {}), "('Average bike level.png')\n", (3334, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3370, 3382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3380, 3382), True, 'import matplotlib.pyplot as plt\n'), ((3387, 3417), 'matplotlib.pyplot.hist', 'plt.hist', (['avg_bikes'], {'color': '"""g"""'}), "(avg_bikes, color='g')\n", (3395, 3417), True, 'import matplotlib.pyplot as plt\n'), ((3423, 3442), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X Bin"""'], {}), "('X Bin')\n", (3433, 3442), True, 'import matplotlib.pyplot as plt\n'), ((3447, 3466), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (3457, 3466), True, 'import matplotlib.pyplot as plt\n'), ((3601, 3623), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3611, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3628, 3675), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Histogram Average bike level.png"""'], {}), "('Histogram Average bike level.png')\n", (3639, 3675), True, 'import matplotlib.pyplot as plt\n'), ((3709, 3721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3719, 3721), True, 'import matplotlib.pyplot as plt\n'), ((3726, 3780), 'matplotlib.pyplot.step', 'plt.step', (['obs_time', 'obs_bikes'], {'where': '"""post"""', 'color': '"""g"""'}), "(obs_time, obs_bikes, where='post', color='g')\n", (3734, 3780), True, 'import matplotlib.pyplot as plt\n'), ((3788, 3816), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Minutes)"""'], {}), "('Time (Minutes)')\n", (3798, 3816), True, 'import matplotlib.pyplot as plt\n'), ((3821, 3845), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Bike Level"""'], {}), "('Bike Level')\n", (3831, 3845), True, 'import matplotlib.pyplot as plt\n'), ((4106, 4116), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4114, 4116), True, 'import matplotlib.pyplot as plt\n'), ((4126, 4138), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4136, 4138), True, 'import matplotlib.pyplot as plt\n'), ((4143, 4199), 'matplotlib.pyplot.step', 'plt.step', (['obs_time', 'obs_balance'], {'where': '"""post"""', 'color': '"""r"""'}), "(obs_time, obs_balance, where='post', color='r')\n", (4151, 4199), True, 'import matplotlib.pyplot as plt\n'), ((4206, 4234), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Minutes)"""'], {}), "('Time (Minutes)')\n", (4216, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4239, 4264), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Balance ($)"""'], {}), "('Balance ($)')\n", (4249, 4264), True, 'import matplotlib.pyplot as plt\n'), ((4484, 4494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4492, 4494), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2845), 'numpy.mean', 'np.mean', (['get_bikes'], {}), '(get_bikes)\n', (2834, 2845), True, 'import numpy as np\n'), ((3019, 3037), 'numpy.mean', 'np.mean', (['avg_bikes'], {}), '(avg_bikes)\n', (3026, 3037), True, 'import numpy as np\n'), ((4637, 4657), 'numpy.mean', 'np.mean', (['avg_balance'], {}), '(avg_balance)\n', (4644, 4657), True, 'import numpy as np\n'), ((4666, 4688), 'scipy.stats.sem', 'stats.sem', (['avg_balance'], {}), '(avg_balance)\n', (4675, 4688), True, 'import scipy.stats as stats\n')]
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV, train_test_split, cross_val_predict, cross_val_score
from sklearn.metrics import f1_score
from sklearn.neural_network import MLPClassifier
np.random.seed(42)
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
num_attribs = ["Pclass", "Age", "SibSp", "Parch", "Fare"]
cat_attribs = ["Sex", "Embarked"]
# Create a class to select numerical or categorical columns
# since Scikit-Learn doesn't handle DataFrames yet
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy="median")),
('std_scaler', StandardScaler()),
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(["Sex"])),
('label_binarizer', LabelBinarizer()),
])
cat_pipeline_emb = Pipeline([
('selector', DataFrameSelector(["Embarked"])),
('label_binarizer', LabelBinarizer()),
])
full_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
#("cat_pipeline_emb", cat_pipeline_emb),
("cat_pipeline", cat_pipeline),
])
def predict_labels(clf, features, target):
y_pred = clf.predict(features)
return f1_score(target, y_pred), sum(target == y_pred) / float(len(y_pred))
#train = pd.read_csv("train.csv")
train = pd.read_csv("train.csv", index_col = "PassengerId")
train_set, test_set, train_label, test_label = train_test_split(train, train["Survived"], random_state=42, train_size=0.8)
train_prepared = full_pipeline.fit_transform(train_set)
test_prepared = full_pipeline.fit_transform(test_set)
from sklearn.linear_model import LogisticRegression
param_grid ={
'max_iter': range(5, 100, 10),
'C' : [0.2, 0.4, 0.6, 0.8, 1.0],
'solver' : ['newton-cg', 'lbfgs', 'liblinear', 'sag']
}
log_clf = LogisticRegression(random_state=42)
log_clf.fit(train_prepared, train_label)
f1_score(log_clf.predict(train_prepared), train_label)
f1_score(log_clf.predict(test_prepared), test_label)
neu_clf = MLPClassifier(random_state=42)
neu_clf.fit(train_prepared, train_label)
f1_score(neu_clf.predict(train_prepared), train_label)
f1_score(neu_clf.predict(test_prepared), test_label)
parameters={
'learning_rate': ["constant", "invscaling", "adaptive"],
'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)],
'activation': ["logistic", "relu", "Tanh"]
}
parameters={
'alpha': 10.0 ** -np.arange(1, 7),
'max_iter' : range(100, 5000, 100)
}
grid_search = GridSearchCV(MLPClassifier(random_state=42), parameters, cv=5, scoring='f1')
grid_search.fit(train_prepared, train_label)
neu_clf = grid_search.best_estimator_
neu_clf
f1_score(neu_clf.predict(train_prepared), train_label)
f1_score(neu_clf.predict(test_prepared), test_label)
MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(100,), learning_rate='constant',
learning_rate_init=0.001, max_iter=300, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
neu_clf = MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(100,), learning_rate='constant',
learning_rate_init=0.001, max_iter=300, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
neu_clf.fit(full_pipeline.fit_transform(train), train["Survived"])
test = pd.read_csv("test.csv", index_col = "PassengerId")
test["Survived"] = neu_clf.predict(full_pipeline.fit_transform(test))
test['Survived'].to_csv("result_2.csv")
parameters={
'alpha': [0.00008, 0.0001, 0.00012],
'max_iter' : range(100, 1000, 100),
'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)]
}
grid_search = GridSearchCV(MLPClassifier(random_state=42), parameters, cv=5, scoring='f1')
grid_search.fit(train_prepared, train_label)
neu_clf = grid_search.best_estimator_
neu_clf
f1_score(neu_clf.predict(train_prepared), train_label)
f1_score(neu_clf.predict(test_prepared), test_label)
|
[
"sklearn.pipeline.FeatureUnion",
"sklearn.preprocessing.LabelBinarizer",
"sklearn.metrics.f1_score",
"sklearn.neural_network.MLPClassifier",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.Imputer",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"numpy.random.seed",
"numpy.arange"
] |
[((445, 463), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (459, 463), True, 'import numpy as np\n'), ((1583, 1683), 'sklearn.pipeline.FeatureUnion', 'FeatureUnion', ([], {'transformer_list': "[('num_pipeline', num_pipeline), ('cat_pipeline', cat_pipeline)]"}), "(transformer_list=[('num_pipeline', num_pipeline), (\n 'cat_pipeline', cat_pipeline)])\n", (1595, 1683), False, 'from sklearn.pipeline import Pipeline, FeatureUnion\n'), ((1966, 2015), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {'index_col': '"""PassengerId"""'}), "('train.csv', index_col='PassengerId')\n", (1977, 2015), True, 'import pandas as pd\n'), ((2068, 2143), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train', "train['Survived']"], {'random_state': '(42)', 'train_size': '(0.8)'}), "(train, train['Survived'], random_state=42, train_size=0.8)\n", (2084, 2143), False, 'from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV, train_test_split, cross_val_predict, cross_val_score\n'), ((2532, 2567), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)'}), '(random_state=42)\n', (2550, 2567), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2733, 2763), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (2746, 2763), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3515, 3927), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'activation': '"""relu"""', 'alpha': '(0.0001)', 'batch_size': '"""auto"""', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'early_stopping': '(False)', 'epsilon': '(1e-08)', 'hidden_layer_sizes': '(100,)', 'learning_rate': '"""constant"""', 'learning_rate_init': '(0.001)', 'max_iter': '(300)', 'momentum': '(0.9)', 'nesterovs_momentum': '(True)', 'power_t': '(0.5)', 'random_state': '(42)', 'shuffle': '(True)', 'solver': '"""adam"""', 'tol': '(0.0001)', 'validation_fraction': '(0.1)', 'verbose': '(False)', 'warm_start': '(False)'}), "(activation='relu', alpha=0.0001, batch_size='auto', beta_1=\n 0.9, beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(100,), learning_rate='constant', learning_rate_init\n =0.001, max_iter=300, momentum=0.9, nesterovs_momentum=True, power_t=\n 0.5, random_state=42, shuffle=True, solver='adam', tol=0.0001,\n validation_fraction=0.1, verbose=False, warm_start=False)\n", (3528, 3927), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3966, 4378), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'activation': '"""relu"""', 'alpha': '(0.0001)', 'batch_size': '"""auto"""', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'early_stopping': '(False)', 'epsilon': '(1e-08)', 'hidden_layer_sizes': '(100,)', 'learning_rate': '"""constant"""', 'learning_rate_init': '(0.001)', 'max_iter': '(300)', 'momentum': '(0.9)', 'nesterovs_momentum': '(True)', 'power_t': '(0.5)', 'random_state': '(42)', 'shuffle': '(True)', 'solver': '"""adam"""', 'tol': '(0.0001)', 'validation_fraction': '(0.1)', 'verbose': '(False)', 'warm_start': '(False)'}), "(activation='relu', alpha=0.0001, batch_size='auto', beta_1=\n 0.9, beta_2=0.999, early_stopping=False, epsilon=1e-08,\n hidden_layer_sizes=(100,), learning_rate='constant', learning_rate_init\n =0.001, max_iter=300, momentum=0.9, nesterovs_momentum=True, power_t=\n 0.5, random_state=42, shuffle=True, solver='adam', tol=0.0001,\n validation_fraction=0.1, verbose=False, warm_start=False)\n", (3979, 4378), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4491, 4539), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {'index_col': '"""PassengerId"""'}), "('test.csv', index_col='PassengerId')\n", (4502, 4539), True, 'import pandas as pd\n'), ((3244, 3274), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (3257, 3274), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4863, 4893), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (4876, 4893), False, 'from sklearn.neural_network import MLPClassifier\n'), ((1851, 1875), 'sklearn.metrics.f1_score', 'f1_score', (['target', 'y_pred'], {}), '(target, y_pred)\n', (1859, 1875), False, 'from sklearn.metrics import f1_score\n'), ((1202, 1228), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (1209, 1228), False, 'from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler\n'), ((1255, 1271), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1269, 1271), False, 'from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler\n'), ((1391, 1407), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (1405, 1407), False, 'from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler\n'), ((1536, 1552), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (1550, 1552), False, 'from sklearn.preprocessing import Imputer, LabelBinarizer, StandardScaler\n'), ((3158, 3173), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (3167, 3173), True, 'import numpy as np\n')]
|
from librosa import cqt, icqt
import numpy as np
def gl_cqt(S, n_iter=32, sr=22050, hop_length=512, bins_per_octave=12, fmin=None, window='hann',
dtype=np.float32, length=None, momentum=0.99, random_state=None, res_type='kaiser_fast'):
if fmin is None:
fmin = librosa.note_to_hz('C1')
if random_state is None:
rng = np.random
elif isinstance(random_state, int):
rng = np.random.RandomState(seed=random_state)
elif isinstance(random_state, np.random.RandomState):
rng = random_state
if momentum > 1:
warnings.warn('Griffin-Lim with momentum={} > 1 can be unstable. Proceed with caution!'.format(momentum))
elif momentum < 0:
raise ParameterError('griffinlim() called with momentum={} < 0'.format(momentum))
# randomly initialize the phase
angles = np.exp(2j * np.pi * rng.rand(*S.shape))
# And initialize the previous iterate to 0
rebuilt = 0.
for _ in range(n_iter):
# Store the previous iterate
tprev = rebuilt
__import__('pdb').set_trace()
# Invert with our current estimate of the phases
inverse = icqt(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=bins_per_octave, fmin=fmin,
#window=window, length=length, res_type=res_type)
window=window)
# Rebuild the spectrogram
rebuilt = cqt(inverse, sr=sr, bins_per_octave=bins_per_octave, n_bins=S.shape[0],
hop_length=hop_length, fmin=fmin,
window=window, res_type=res_type)
# Update our phase estimates
angles[:] = rebuilt - (momentum / (1 + momentum)) * tprev
angles[:] /= np.abs(angles) + 1e-16
# Return the final phase estimates
return icqt(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=bins_per_octave, fmin=fmin,
window=window,length=length, res_type=res_type)
|
[
"numpy.abs",
"librosa.cqt",
"numpy.random.RandomState",
"librosa.icqt"
] |
[((1807, 1956), 'librosa.icqt', 'icqt', (['(S * angles)'], {'sr': 'sr', 'hop_length': 'hop_length', 'bins_per_octave': 'bins_per_octave', 'fmin': 'fmin', 'window': 'window', 'length': 'length', 'res_type': 'res_type'}), '(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=\n bins_per_octave, fmin=fmin, window=window, length=length, res_type=res_type\n )\n', (1811, 1956), False, 'from librosa import cqt, icqt\n'), ((1165, 1275), 'librosa.icqt', 'icqt', (['(S * angles)'], {'sr': 'sr', 'hop_length': 'hop_length', 'bins_per_octave': 'bins_per_octave', 'fmin': 'fmin', 'window': 'window'}), '(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=\n bins_per_octave, fmin=fmin, window=window)\n', (1169, 1275), False, 'from librosa import cqt, icqt\n'), ((1422, 1565), 'librosa.cqt', 'cqt', (['inverse'], {'sr': 'sr', 'bins_per_octave': 'bins_per_octave', 'n_bins': 'S.shape[0]', 'hop_length': 'hop_length', 'fmin': 'fmin', 'window': 'window', 'res_type': 'res_type'}), '(inverse, sr=sr, bins_per_octave=bins_per_octave, n_bins=S.shape[0],\n hop_length=hop_length, fmin=fmin, window=window, res_type=res_type)\n', (1425, 1565), False, 'from librosa import cqt, icqt\n'), ((430, 470), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (451, 470), True, 'import numpy as np\n'), ((1733, 1747), 'numpy.abs', 'np.abs', (['angles'], {}), '(angles)\n', (1739, 1747), True, 'import numpy as np\n')]
|
import sys
sys.path.append('C:/Python37/Lib/site-packages')
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import random
from pyOpenBCI import OpenBCICyton
import threading
import time
import numpy as np
from scipy import signal
import random
import numpy as np
from PIL import Image
img = Image.open('heart_1.png').convert('RGBA')
arr = np.array(img)
img2 = Image.open('heart_2.png').convert('RGBA')
arr2 = np.array(img2)
SCALE_FACTOR = (4500000)/24/(2**23-1) #From the pyOpenBCI repo
colors = 'rgbycmwr'
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title='Python OpenBCI GUI')
# title_graph = win.addPlot(row=0, col=0, colspan=4,title='Python OpenBCI GUI')
ts_plots = win.addPlot(row=0, col=0, colspan=4, title='Channel %d' % 1, labels={'left': 'uV'})
fft_plot = win.addPlot(row=2, col=0, rowspan=2, colspan=2, title='Filtered Plot', labels={'left': 'uV', 'bottom': 'Hz'})
fft_plot.setLimits(xMin=1,xMax=125, yMin=0, yMax=1e7)
ss_plot = win.addPlot(row=4, col=0, rowspan=2, colspan=2, title='signal',labels={'left':'Is beat'})
heart_im = win.addViewBox(lockAspect=True)
imv = pg.ImageItem()
heart_im.addItem(imv)
imv.setImage(arr)
data= [0]
def save_data(sample):
global data
data.append(sample.channels_data[0]*SCALE_FACTOR)
def updater():
global data, plots, colors
fs = 250 #Hz
disp_sec = 3 #Seconds to display
t_data = np.array(data[-(fs*disp_sec + 100):]).T #transpose data
#Notch Filter at 60 Hz
def notch_filter(val, data, fs=250, b=5):
notch_freq_Hz = np.array([float(val)])
for freq_Hz in np.nditer(notch_freq_Hz):
bp_stop_Hz = freq_Hz + 3.0 * np.array([-1, 1])
b, a = signal.butter(b, bp_stop_Hz / (fs / 2.0), 'bandstop')
fin = data = signal.lfilter(b, a, data)
return fin
def bandpass(start, stop, data, fs = 250):
bp_Hz = np.array([start, stop])
b, a = signal.butter(1, bp_Hz / (fs / 2.0), btype='bandpass')
return signal.lfilter(b, a, data, axis=0)
nf_data = np.array(notch_filter(60, t_data, b = 10))
nf_data = np.array(notch_filter(50, nf_data, b = 10))
bp_nf_data = np.array(bandpass(2, 50, nf_data))
ts_plots.clear()
ts_plots.plot(pen='r').setData(bp_nf_data[100:])
#fft of data
fft_plot.clear()
sp = np.absolute(np.fft.fft(bp_nf_data))
freq = np.fft.fftfreq(bp_nf_data.shape[-1], 1.0/fs)
fft_plot.plot(pen='y').setData(freq, sp)
one_beat = nf_data[100:300]
filt = one_beat[::-1]
ss_plot.clear()
new_arr = bp_nf_data > np.average(bp_nf_data) + np.std(bp_nf_data)
ss_plot.plot(pen='g').setData(new_arr[100:]*1)
if sum(new_arr[-100:]*1):
imv.setImage(arr2)
else:
imv.setImage(arr)
def start_board():
board = OpenBCICyton(port='COM5', daisy=False)
board.start_stream(save_data)
if __name__ == '__main__':
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
x = threading.Thread(target=start_board)
x.daemon = True
x.start()
timer = QtCore.QTimer()
timer.timeout.connect(updater)
timer.start(0)
QtGui.QApplication.instance().exec_()
|
[
"pyOpenBCI.OpenBCICyton",
"PIL.Image.open",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"numpy.average",
"numpy.fft.fftfreq",
"numpy.nditer",
"pyqtgraph.ImageItem",
"scipy.signal.butter",
"numpy.fft.fft",
"numpy.array",
"pyqtgraph.Qt.QtGui.QApplication",
"scipy.signal.lfilter",
"numpy.std",
"pyqtgraph.GraphicsWindow",
"sys.path.append",
"threading.Thread",
"pyqtgraph.Qt.QtCore.QTimer"
] |
[((12, 60), 'sys.path.append', 'sys.path.append', (['"""C:/Python37/Lib/site-packages"""'], {}), "('C:/Python37/Lib/site-packages')\n", (27, 60), False, 'import sys\n'), ((373, 386), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (381, 386), True, 'import numpy as np\n'), ((447, 461), 'numpy.array', 'np.array', (['img2'], {}), '(img2)\n', (455, 461), True, 'import numpy as np\n'), ((560, 582), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (578, 582), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((590, 635), 'pyqtgraph.GraphicsWindow', 'pg.GraphicsWindow', ([], {'title': '"""Python OpenBCI GUI"""'}), "(title='Python OpenBCI GUI')\n", (607, 635), True, 'import pyqtgraph as pg\n'), ((1146, 1160), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (1158, 1160), True, 'import pyqtgraph as pg\n'), ((2443, 2489), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['bp_nf_data.shape[-1]', '(1.0 / fs)'], {}), '(bp_nf_data.shape[-1], 1.0 / fs)\n', (2457, 2489), True, 'import numpy as np\n'), ((2886, 2924), 'pyOpenBCI.OpenBCICyton', 'OpenBCICyton', ([], {'port': '"""COM5"""', 'daisy': '(False)'}), "(port='COM5', daisy=False)\n", (2898, 2924), False, 'from pyOpenBCI import OpenBCICyton\n'), ((324, 349), 'PIL.Image.open', 'Image.open', (['"""heart_1.png"""'], {}), "('heart_1.png')\n", (334, 349), False, 'from PIL import Image\n'), ((397, 422), 'PIL.Image.open', 'Image.open', (['"""heart_2.png"""'], {}), "('heart_2.png')\n", (407, 422), False, 'from PIL import Image\n'), ((1438, 1477), 'numpy.array', 'np.array', (['data[-(fs * disp_sec + 100):]'], {}), '(data[-(fs * disp_sec + 100):])\n', (1446, 1477), True, 'import numpy as np\n'), ((1643, 1667), 'numpy.nditer', 'np.nditer', (['notch_freq_Hz'], {}), '(notch_freq_Hz)\n', (1652, 1667), True, 'import numpy as np\n'), ((1945, 1968), 'numpy.array', 'np.array', (['[start, stop]'], {}), '([start, stop])\n', (1953, 1968), True, 'import numpy as np\n'), ((1985, 2039), 'scipy.signal.butter', 'signal.butter', (['(1)', '(bp_Hz / (fs / 2.0))'], {'btype': '"""bandpass"""'}), "(1, bp_Hz / (fs / 2.0), btype='bandpass')\n", (1998, 2039), False, 'from scipy import signal\n'), ((2056, 2090), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'data'], {'axis': '(0)'}), '(b, a, data, axis=0)\n', (2070, 2090), False, 'from scipy import signal\n'), ((2407, 2429), 'numpy.fft.fft', 'np.fft.fft', (['bp_nf_data'], {}), '(bp_nf_data)\n', (2417, 2429), True, 'import numpy as np\n'), ((3080, 3116), 'threading.Thread', 'threading.Thread', ([], {'target': 'start_board'}), '(target=start_board)\n', (3096, 3116), False, 'import threading\n'), ((3180, 3195), 'pyqtgraph.Qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (3193, 3195), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1749, 1802), 'scipy.signal.butter', 'signal.butter', (['b', '(bp_stop_Hz / (fs / 2.0))', '"""bandstop"""'], {}), "(b, bp_stop_Hz / (fs / 2.0), 'bandstop')\n", (1762, 1802), False, 'from scipy import signal\n'), ((1829, 1855), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (1843, 1855), False, 'from scipy import signal\n'), ((2648, 2670), 'numpy.average', 'np.average', (['bp_nf_data'], {}), '(bp_nf_data)\n', (2658, 2670), True, 'import numpy as np\n'), ((2673, 2691), 'numpy.std', 'np.std', (['bp_nf_data'], {}), '(bp_nf_data)\n', (2679, 2691), True, 'import numpy as np\n'), ((3273, 3302), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (3300, 3302), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1711, 1728), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1719, 1728), True, 'import numpy as np\n')]
|
# [1차]프렌츠4블록
import numpy as np
def new_borad(m, n, board):
remove = np.array([[True for _ in range(m)] for _ in range(n)])
count = 0
for i in range(n - 1):
for j in range(m - 1):
cur = board[i,j]
if cur == "-1":
break
if cur == board[i,j+1] and cur == board[i+1,j] and cur == board[i+1,j+1]:
remove[i,j] = False
remove[i,j+1] = False
remove[i+1,j] = False
remove[i+1,j+1] = False
count += 1
new_map = []
remove_count = 0
for i in range(n):
tmp = board[i][remove[i]].tolist()
while len(tmp) < m:
tmp.append("-1")
remove_count += 1
new_map.append(tmp)
return new_map, count, remove_count
def solution(m, n, board):
answer = 0
b = np.array(list(map(lambda x:list(x), board)))
b_t = np.transpose(b)
new_b = b_t[...,::-1]
count = -1
while count != 0:
new_b, count, remove_count = new_borad(m, n, new_b)
answer += remove_count
new_b = np.array(new_b)
return answer
'''
채점을 시작합니다.
정확성 테스트
테스트 1 〉 통과 (0.13ms, 27.9MB)
테스트 2 〉 통과 (0.19ms, 27.8MB)
테스트 3 〉 통과 (0.26ms, 27.7MB)
테스트 4 〉 통과 (1.92ms, 28MB)
테스트 5 〉 통과 (103.88ms, 28MB)
테스트 6 〉 통과 (8.26ms, 28MB)
테스트 7 〉 통과 (1.22ms, 27.6MB)
테스트 8 〉 통과 (2.04ms, 27.7MB)
테스트 9 〉 통과 (0.15ms, 27.6MB)
테스트 10 〉 통과 (0.99ms, 27.6MB)
테스트 11 〉 통과 (2.55ms, 28MB)
채점 결과
정확성: 100.0
합계: 100.0 / 100.0
'''
|
[
"numpy.array",
"numpy.transpose"
] |
[((915, 930), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (927, 930), True, 'import numpy as np\n'), ((1101, 1116), 'numpy.array', 'np.array', (['new_b'], {}), '(new_b)\n', (1109, 1116), True, 'import numpy as np\n')]
|
import math
import numpy
def hill_chart_parametrisation(h, turbine_specs):
"""
Calculates power and flow rate through bulb turbines based on Aggidis and Feather (2012)
f_g = grid frequency, g_p = generator poles,
t_cap = Turbine capacity, h = head difference, dens = water density
"""
turb_sp = 2 * 60 * turbine_specs["f_g"] / turbine_specs["g_p"]
# Step 1: Calculate Hill Chart based on empirical equations
n_11 = turb_sp * turbine_specs["t_d"] / math.sqrt(h)
if n_11 < 255:
q_11 = 0.0166 * n_11 + 0.4861
else:
q_11 = 4.75
q = q_11 * (turbine_specs["t_d"] ** 2) * math.sqrt(h)
h_efficiency = -0.0019 * n_11 + 1.2461
# h_efficiency = 1
p1 = turbine_specs["dens"] * turbine_specs["g"] * q * h / (10 ** 6)
# Step 2 - Adjust Curve according to capacity
if p1 * h_efficiency < turbine_specs["t_cap"]: # 97.25% Gearbox efficiency
p2 = p1 * 0.9725 * h_efficiency
else:
p2 = turbine_specs["t_cap"] * 0.9725
p1 = p2 / (h_efficiency * 0.9725)
q = p1 * (10 ** 6) / (turbine_specs["dens"] * turbine_specs["g"] * h)
return p2, q
def ideal_turbine_parametrisation(h, turbine_specs):
"""
Calculates power and flow through a bulb turbine excluding efficiency loses
"""
q = math.pi * ((turbine_specs["t_d"] / 2)**2) * math.sqrt(2 * turbine_specs["g"] * h)
p1 = turbine_specs["dens"] * turbine_specs["g"] * q * h / (10 ** 6)
if p1 < turbine_specs["t_cap"]:
p2 = p1
else:
p2 = turbine_specs["t_cap"]
q = p2 * (10 ** 6) / (turbine_specs["dens"] * turbine_specs["g"] * h)
return p2, q
def turbine_parametrisation(h, turbine_specs):
"""
Chooses between hill chart or idealised turbine parameterisation.
"""
if turbine_specs["options"] == 0:
p, q = hill_chart_parametrisation(h, turbine_specs)
else:
p, q = ideal_turbine_parametrisation(h, turbine_specs)
return p, q
def gate_sluicing(h, ramp_f, N_s, q_s0, sluice_specs, flux_limiter=0.2):
"""
Calculates overall flow through power plant sluice gates given the status of the operation
"""
temp = ramp_f ** 2 * N_s * sluice_specs["c_d"] * sluice_specs["a_s"] * math.sqrt(2 * sluice_specs["g"] * abs(h))
if ramp_f >= 0.5 and abs(temp) >= abs(q_s0) > 0.:
q_s = -numpy.sign(h) * min(abs((1 + flux_limiter) * q_s0), abs(temp))
elif ramp_f >= 0.5 and abs(q_s0) >= abs(temp):
q_s = -numpy.sign(h) * max(abs((1 - flux_limiter) * q_s0), abs(temp))
else:
q_s = -numpy.sign(h) * temp
return q_s
def turbine_sluicing(h, ramp_f, N_t, q_t0, sluice_specs, turbine_specs, flux_limiter=0.2):
"""
Calculates flow through turbines operating in sluicing mode
"""
temp = ramp_f ** 2 * N_t * sluice_specs["c_t"] * (math.pi * (turbine_specs["t_d"] / 2) ** 2) *\
math.sqrt(2 * sluice_specs["g"] * abs(h))
if ramp_f >= 0.5 and abs(temp) >= abs(q_t0):
q_t = -numpy.sign(h) * min(abs((1 + flux_limiter) * q_t0), abs(temp))
elif ramp_f >= 0.5 and abs(q_t0) >= abs(temp):
q_t = -numpy.sign(h) * max(abs((1 - flux_limiter) * q_t0), abs(temp))
else:
q_t = -numpy.sign(h) * temp
if abs(h) != 0.0 and ramp_f >= 0.95 and q_t == 0.:
q_t = -numpy.sign(h) * temp
return q_t
|
[
"math.sqrt",
"numpy.sign"
] |
[((484, 496), 'math.sqrt', 'math.sqrt', (['h'], {}), '(h)\n', (493, 496), False, 'import math\n'), ((631, 643), 'math.sqrt', 'math.sqrt', (['h'], {}), '(h)\n', (640, 643), False, 'import math\n'), ((1346, 1383), 'math.sqrt', 'math.sqrt', (["(2 * turbine_specs['g'] * h)"], {}), "(2 * turbine_specs['g'] * h)\n", (1355, 1383), False, 'import math\n'), ((2344, 2357), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (2354, 2357), False, 'import numpy\n'), ((2984, 2997), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (2994, 2997), False, 'import numpy\n'), ((3293, 3306), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (3303, 3306), False, 'import numpy\n'), ((2473, 2486), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (2483, 2486), False, 'import numpy\n'), ((2561, 2574), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (2571, 2574), False, 'import numpy\n'), ((3113, 3126), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (3123, 3126), False, 'import numpy\n'), ((3201, 3214), 'numpy.sign', 'numpy.sign', (['h'], {}), '(h)\n', (3211, 3214), False, 'import numpy\n')]
|
# standard library imports
import io
import logging
import struct
import warnings
# 3rd party library imports
import numpy as np
from uuid import UUID
# local imports
from glymur import Jp2k
from .lib import tiff as libtiff
from .jp2box import UUIDBox
# Map the numeric TIFF datatypes to the format string used by the struct module
# and keep track of how wide they are.
tag_dtype = {
1: {'format': 'B', 'nbytes': 1},
2: {'format': 'B', 'nbytes': 1},
3: {'format': 'H', 'nbytes': 2},
4: {'format': 'I', 'nbytes': 4},
5: {'format': 'II', 'nbytes': 8},
7: {'format': 'B', 'nbytes': 1},
9: {'format': 'i', 'nbytes': 4},
10: {'format': 'ii', 'nbytes': 8},
11: {'format': 'f', 'nbytes': 4},
12: {'format': 'd', 'nbytes': 8},
13: {'format': 'I', 'nbytes': 4},
16: {'format': 'Q', 'nbytes': 8},
17: {'format': 'q', 'nbytes': 8},
18: {'format': 'Q', 'nbytes': 8}
}
# Mnemonics for the two TIFF format version numbers.
_TIFF = 42
_BIGTIFF = 43
class Tiff2Jp2k(object):
"""
Attributes
----------
found_geotiff_tags : bool
If true, then this TIFF must be a GEOTIFF
tiff_filename : path or str
Path to TIFF file.
jp2_filename : path or str
Path to JPEG 2000 file to be written.
tilesize : tuple
The dimensions of a tile in the JP2K file.
create_uuid : bool
Create a UUIDBox for the TIFF IFD metadata.
version : int
Identifies the TIFF as 32-bit TIFF or 64-bit TIFF.
"""
def __init__(
self, tiff_filename, jp2_filename, tilesize=None,
verbosity=logging.CRITICAL, create_uuid=True, **kwargs
):
self.tiff_filename = tiff_filename
if not self.tiff_filename.exists():
raise FileNotFoundError(f'{tiff_filename} does not exist')
self.jp2_filename = jp2_filename
self.tilesize = tilesize
self.create_uuid = create_uuid
self.kwargs = kwargs
self.logger = logging.getLogger('tiff2jp2')
self.logger.setLevel(verbosity)
ch = logging.StreamHandler()
ch.setLevel(verbosity)
self.logger.addHandler(ch)
def __enter__(self):
self.tiff_fp = libtiff.open(self.tiff_filename)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
libtiff.close(self.tiff_fp)
def run(self):
self.copy_image()
if self.create_uuid:
self.copy_metadata()
def copy_metadata(self):
"""
Copy over the TIFF IFD. Place it in a UUID box. Append to the JPEG
2000 file.
"""
# create a bytesio object for the IFD
b = io.BytesIO()
with open(self.tiff_filename, 'rb') as tfp:
endian = self._process_header(b, tfp)
self._process_tags(b, tfp, endian)
if self.found_geotiff_tags:
# geotiff UUID
uuid = UUID('b14bf8bd-083d-4b43-a5ae-8cd7d5a6ce03')
payload = b.getvalue()
else:
# Make it an exif UUID.
uuid = UUID(bytes=b'JpgTiffExif->JP2')
payload = b'EXIF\0\0' + b.getvalue()
# the length of the box is the length of the payload plus 8 bytes
# to store the length of the box and the box ID
box_length = len(payload) + 8
uuid_box = UUIDBox(uuid, payload, box_length)
with open(self.jp2_filename, mode='ab') as f:
uuid_box.write(f)
def _process_tags(self, b, tfp, endian):
self.found_geotiff_tags = False
tag_length = 20 if self.version == _BIGTIFF else 12
# keep this for writing to the UUID, which will always be 32-bit
little_tiff_tag_length = 12
# how many tags?
if self.version == _BIGTIFF:
buffer = tfp.read(8)
num_tags, = struct.unpack(endian + 'Q', buffer)
else:
buffer = tfp.read(2)
num_tags, = struct.unpack(endian + 'H', buffer)
write_buffer = struct.pack('<H', num_tags)
b.write(write_buffer)
# Ok, so now we have the IFD main body, but following that we have
# the tag payloads that cannot fit into 4 bytes.
# the IFD main body in the TIFF. As it might be big endian, we cannot
# just process it as one big chunk.
buffer = tfp.read(num_tags * tag_length)
start_of_tags_position = b.tell()
after_ifd_position = start_of_tags_position + len(buffer)
if self.version == _BIGTIFF:
tag_format_str = endian + 'HHQQ'
tag_payload_offset = 12
max_tag_payload_length = 8
else:
tag_format_str = endian + 'HHII'
tag_payload_offset = 8
max_tag_payload_length = 4
for idx in range(num_tags):
self.logger.debug(f'tag #: {idx}')
b.seek(start_of_tags_position + idx * little_tiff_tag_length)
tag_data = buffer[idx * tag_length:(idx + 1) * tag_length]
tag, dtype, nvalues, offset = struct.unpack(tag_format_str, tag_data) # noqa : E501
if tag == 34735:
self.found_geotiff_tags = True
payload_length = tag_dtype[dtype]['nbytes'] * nvalues
if payload_length > max_tag_payload_length:
# the payload does not fit into the tag entry, so use the
# offset to seek to that position
current_position = tfp.tell()
tfp.seek(offset)
payload_buffer = tfp.read(payload_length)
tfp.seek(current_position)
# read the payload from the TIFF
payload_format = tag_dtype[dtype]['format'] * nvalues
payload = struct.unpack(
endian + payload_format, payload_buffer
)
# write the tag entry to the UUID
new_offset = after_ifd_position
outbuffer = struct.pack(
'<HHII', tag, dtype, nvalues, new_offset
)
b.write(outbuffer)
# now write the payload at the outlying position and then come
# back to the same position in the file stream
cpos = b.tell()
b.seek(new_offset)
out_format = '<' + tag_dtype[dtype]['format'] * nvalues
outbuffer = struct.pack(out_format, *payload)
b.write(outbuffer)
# keep track of the next position to write out-of-IFD data
after_ifd_position = b.tell()
b.seek(cpos)
else:
# the payload DOES fit into the TIFF tag entry
payload_buffer = tag_data[tag_payload_offset:]
# read ALL of the payload buffer
payload_format = (
tag_dtype[dtype]['format']
* int(max_tag_payload_length / tag_dtype[dtype]['nbytes'])
)
payload = struct.unpack(
endian + payload_format, payload_buffer
)
# Extract the actual payload. Two things going on here. First
# of all, not all of the items may be used. For example, if
# the payload length is 4 bytes but the format string was HHH,
# the that last 16 bit value is not wanted, so we should
# discard it. Second thing is that the signed and unsigned
# rational datatypes effectively have twice the number of
# values so we need to account for that.
if dtype in [5, 10]:
payload = payload[:2 * nvalues]
else:
payload = payload[:nvalues]
# Does it fit into the UUID tag entry (4 bytes)?
if payload_length <= 4:
# so write it back into the tag entry in the UUID
outbuffer = struct.pack('<HHI', tag, dtype, nvalues)
b.write(outbuffer)
payload_format = tag_dtype[dtype]['format'] * nvalues
# we may need to alter the output format
if payload_format in ['H', 'B', 'I']:
# just write it as an integer
payload_format = 'I'
outbuffer = struct.pack('<' + payload_format, *payload)
b.write(outbuffer)
else:
# UUID: write the tag entry after the IFD
new_offset = after_ifd_position
outbuffer = struct.pack(
'<HHII', tag, dtype, nvalues, new_offset
)
b.write(outbuffer)
# now write the payload at the outlying position and then
# come back to the same position in the file stream
cpos = b.tell()
b.seek(new_offset)
out_format = '<' + tag_dtype[dtype]['format'] * nvalues
outbuffer = struct.pack(out_format, *payload)
b.write(outbuffer)
# keep track of the next position to write out-of-IFD data
after_ifd_position = b.tell()
b.seek(cpos)
def _process_header(self, b, tfp):
buffer = tfp.read(4)
data = struct.unpack('BB', buffer[:2])
# big endian or little endian?
if data[0] == 73 and data[1] == 73:
# little endian
endian = '<'
elif data[0] == 77 and data[1] == 77:
# big endian
endian = '>'
else:
msg = (
f"The byte order indication in the TIFF header "
f"({data}) is invalid. It should be either "
f"{bytes([73, 73])} or {bytes([77, 77])}."
)
raise RuntimeError(msg)
# version number and offset to the first IFD
version, = struct.unpack(endian + 'H', buffer[2:4])
self.version = _TIFF if version == 42 else _BIGTIFF
if self.version == _BIGTIFF:
buffer = tfp.read(12)
_, _, offset = struct.unpack(endian + 'HHQ', buffer)
else:
buffer = tfp.read(4)
offset, = struct.unpack(endian + 'I', buffer)
tfp.seek(offset)
# write this 32-bit header into the UUID, no matter if we had bigtiff
# or regular tiff or big endian
data = struct.pack('<BBHI', 73, 73, 42, 8)
b.write(data)
return endian
def copy_image(self):
"""
Transfer the image data from the TIFF to the JPEG 2000 file. If the
TIFF has a stripped configuration, this may be somewhat inefficient.
"""
if libtiff.isTiled(self.tiff_fp):
isTiled = True
else:
isTiled = False
photo = libtiff.getFieldDefaulted(self.tiff_fp, 'Photometric')
imagewidth = libtiff.getFieldDefaulted(self.tiff_fp, 'ImageWidth')
imageheight = libtiff.getFieldDefaulted(self.tiff_fp, 'ImageLength')
spp = libtiff.getFieldDefaulted(self.tiff_fp, 'SamplesPerPixel')
sf = libtiff.getFieldDefaulted(self.tiff_fp, 'SampleFormat')
bps = libtiff.getFieldDefaulted(self.tiff_fp, 'BitsPerSample')
planar = libtiff.getFieldDefaulted(self.tiff_fp, 'PlanarConfig')
if sf not in [libtiff.SampleFormat.UINT, libtiff.SampleFormat.VOID]:
sampleformat_str = self.tagvalue2str(libtiff.SampleFormat, sf)
msg = (
f"The TIFF SampleFormat is {sampleformat_str}. Only UINT "
"and VOID are supported."
)
raise RuntimeError(msg)
if bps not in [8, 16]:
msg = (
f"The TIFF BitsPerSample is {bps}. Only 8 and 16 bits per "
"sample are supported."
)
raise RuntimeError(msg)
if bps == 8 and sf == libtiff.SampleFormat.UINT:
dtype = np.uint8
if bps == 16 and sf == libtiff.SampleFormat.UINT:
dtype = np.uint16
if (
planar == libtiff.PlanarConfig.SEPARATE
and self.tilesize is not None
):
msg = (
"A separated planar configuration is not supported when a "
"tile size is specified."
)
raise RuntimeError(msg)
if libtiff.isTiled(self.tiff_fp):
tw = libtiff.getFieldDefaulted(self.tiff_fp, 'TileWidth')
th = libtiff.getFieldDefaulted(self.tiff_fp, 'TileLength')
else:
tw = imagewidth
rps = libtiff.getFieldDefaulted(self.tiff_fp, 'RowsPerStrip')
num_strips = libtiff.numberOfStrips(self.tiff_fp)
if self.tilesize is not None:
jth, jtw = self.tilesize
num_jp2k_tile_rows = int(np.ceil(imagewidth / jtw))
num_jp2k_tile_cols = int(np.ceil(imagewidth / jtw))
if photo == libtiff.Photometric.YCBCR:
# Using the RGBA interface is the only reasonable way to deal with
# this.
use_rgba_interface = True
elif photo == libtiff.Photometric.PALETTE:
# Using the RGBA interface is the only reasonable way to deal with
# this. The single plane gets turned into RGB.
use_rgba_interface = True
spp = 3
else:
use_rgba_interface = False
jp2 = Jp2k(
self.jp2_filename,
shape=(imageheight, imagewidth, spp),
tilesize=self.tilesize,
**self.kwargs
)
if not libtiff.RGBAImageOK(self.tiff_fp):
photometric_string = self.tagvalue2str(libtiff.Photometric, photo)
msg = (
f"The TIFF Photometric tag is {photometric_string} and is "
"not supported."
)
raise RuntimeError(msg)
elif self.tilesize is None and libtiff.RGBAImageOK(self.tiff_fp):
# if no jp2k tiling was specified and if the image is ok to read
# via the RGBA interface, then just do that.
msg = (
"Reading using the RGBA interface, writing as a single tile "
"image."
)
self.logger.info(msg)
if photo not in [
libtiff.Photometric.MINISWHITE,
libtiff.Photometric.MINISBLACK,
libtiff.Photometric.PALETTE,
libtiff.Photometric.YCBCR,
libtiff.Photometric.RGB
]:
photostr = self.tagvalue2str(libtiff.Photometric, photo)
msg = (
"Beware, the RGBA interface to attempt to read this TIFF "
f"when it has a PhotometricInterpretation of {photostr}."
)
warnings.warn(msg)
image = libtiff.readRGBAImageOriented(self.tiff_fp)
if spp < 4:
image = image[:, :, :3]
jp2[:] = image
elif isTiled and self.tilesize is not None:
num_tiff_tile_cols = int(np.ceil(imagewidth / tw))
partial_jp2_tile_rows = (imageheight / jth) != (imageheight // jth)
partial_jp2_tile_cols = (imagewidth / jtw) != (imagewidth // jtw)
rgba_tile = np.zeros((th, tw, 4), dtype=np.uint8)
self.logger.debug(f'image: {imageheight} x {imagewidth}')
self.logger.debug(f'jptile: {jth} x {jtw}')
self.logger.debug(f'ttile: {th} x {tw}')
for idx, tilewriter in enumerate(jp2.get_tilewriters()):
# populate the jp2k tile with tiff tiles
self.logger.info(f'Tile: #{idx}')
self.logger.debug(f'J tile row: #{idx // num_jp2k_tile_cols}')
self.logger.debug(f'J tile col: #{idx % num_jp2k_tile_cols}')
jp2k_tile = np.zeros((jth, jtw, spp), dtype=dtype)
tiff_tile = np.zeros((th, tw, spp), dtype=dtype)
jp2k_tile_row = int(np.ceil(idx // num_jp2k_tile_cols))
jp2k_tile_col = int(np.ceil(idx % num_jp2k_tile_cols))
# the coordinates of the upper left pixel of the jp2k tile
julr, julc = jp2k_tile_row * jth, jp2k_tile_col * jtw
# loop while the upper left corner of the current tiff file is
# less than the lower left corner of the jp2k tile
r = julr
while (r // th) * th < min(julr + jth, imageheight):
c = julc
tilenum = libtiff.computeTile(self.tiff_fp, c, r, 0, 0)
self.logger.debug(f'TIFF tile # {tilenum}')
tiff_tile_row = int(np.ceil(tilenum // num_tiff_tile_cols))
tiff_tile_col = int(np.ceil(tilenum % num_tiff_tile_cols))
# the coordinates of the upper left pixel of the TIFF tile
tulr = tiff_tile_row * th
tulc = tiff_tile_col * tw
# loop while the left corner of the current tiff tile is
# less than the right hand corner of the jp2k tile
while ((c // tw) * tw) < min(julc + jtw, imagewidth):
if use_rgba_interface:
libtiff.readRGBATile(
self.tiff_fp, tulc, tulr, rgba_tile
)
# flip the tile upside down!!
tiff_tile = np.flipud(rgba_tile[:, :, :3])
else:
libtiff.readEncodedTile(
self.tiff_fp, tilenum, tiff_tile
)
# determine how to fit this tiff tile into the jp2k
# tile
#
# these are the section coordinates in image space
ulr = max(julr, tulr)
llr = min(julr + jth, tulr + th)
ulc = max(julc, tulc)
urc = min(julc + jtw, tulc + tw)
# convert to JP2K tile coordinates
jrows = slice(ulr % jth, (llr - 1) % jth + 1)
jcols = slice(ulc % jtw, (urc - 1) % jtw + 1)
# convert to TIFF tile coordinates
trows = slice(ulr % th, (llr - 1) % th + 1)
tcols = slice(ulc % tw, (urc - 1) % tw + 1)
jp2k_tile[jrows, jcols, :] = tiff_tile[trows, tcols, :]
# move exactly one tiff tile over
c += tw
tilenum = libtiff.computeTile(self.tiff_fp, c, r, 0, 0)
tiff_tile_row = int(
np.ceil(tilenum // num_tiff_tile_cols)
)
tiff_tile_col = int(
np.ceil(tilenum % num_tiff_tile_cols)
)
# the coordinates of the upper left pixel of the TIFF
# tile
tulr = tiff_tile_row * th
tulc = tiff_tile_col * tw
r += th
# last tile column? If so, we may have a partial tile.
if (
partial_jp2_tile_cols
and jp2k_tile_col == num_jp2k_tile_cols - 1
):
last_j2k_cols = slice(0, imagewidth - julc)
jp2k_tile = jp2k_tile[:, last_j2k_cols, :].copy()
if (
partial_jp2_tile_rows
and jp2k_tile_row == num_jp2k_tile_rows - 1
):
last_j2k_rows = slice(0, imageheight - julr)
jp2k_tile = jp2k_tile[last_j2k_rows, :, :].copy()
tilewriter[:] = jp2k_tile
elif not isTiled and self.tilesize is not None:
num_strips = libtiff.numberOfStrips(self.tiff_fp)
num_jp2k_tile_cols = int(np.ceil(imagewidth / jtw))
partial_jp2_tile_rows = (imageheight / jth) != (imageheight // jth)
partial_jp2_tile_cols = (imagewidth / jtw) != (imagewidth // jtw)
tiff_strip = np.zeros((rps, imagewidth, spp), dtype=dtype)
rgba_strip = np.zeros((rps, imagewidth, 4), dtype=np.uint8)
for idx, tilewriter in enumerate(jp2.get_tilewriters()):
self.logger.info(f'Tile: #{idx}')
jp2k_tile = np.zeros((jth, jtw, spp), dtype=dtype)
jp2k_tile_row = idx // num_jp2k_tile_cols
jp2k_tile_col = idx % num_jp2k_tile_cols
# the coordinates of the upper left pixel of the jp2k tile
julr, julc = jp2k_tile_row * jth, jp2k_tile_col * jtw
# Populate the jp2k tile with tiff strips.
# Move by strips from the start of the jp2k tile to the bottom
# of the jp2k tile. That last strip may be partially empty,
# worry about that later.
#
# loop while the upper left corner of the current tiff file is
# less than the lower left corner of the jp2k tile
r = julr
while (r // rps) * rps < min(julr + jth, imageheight):
stripnum = libtiff.computeStrip(self.tiff_fp, r, 0)
if stripnum >= num_strips:
# we've moved past the end of the tiff
break
if use_rgba_interface:
# must use the first row in the strip
libtiff.readRGBAStrip(
self.tiff_fp, stripnum * rps, rgba_strip
)
# must flip the rows (!!) and get rid of the alpha
# plane
tiff_strip = np.flipud(rgba_strip[:, :, :spp])
else:
libtiff.readEncodedStrip(
self.tiff_fp, stripnum, tiff_strip
)
# the coordinates of the upper left pixel of the TIFF
# strip
tulr = stripnum * rps
tulc = 0
# determine how to fit this tiff strip into the jp2k
# tile
#
# these are the section coordinates in image space
ulr = max(julr, tulr)
llr = min(julr + jth, tulr + rps)
ulc = max(julc, tulc)
urc = min(julc + jtw, tulc + tw)
# convert to JP2K tile coordinates
jrows = slice(ulr % jth, (llr - 1) % jth + 1)
jcols = slice(ulc % jtw, (urc - 1) % jtw + 1)
# convert to TIFF strip coordinates
trows = slice(ulr % rps, (llr - 1) % rps + 1)
tcols = slice(ulc % tw, (urc - 1) % tw + 1)
jp2k_tile[jrows, jcols, :] = tiff_strip[trows, tcols, :]
r += rps
# last tile column? If so, we may have a partial tile.
# j2k_cols is not sufficient here, must shorten it from 250
# to 230
if (
partial_jp2_tile_cols
and jp2k_tile_col == num_jp2k_tile_cols - 1
):
# decrease the number of columns by however many it sticks
# over the image width
last_j2k_cols = slice(0, imagewidth - julc)
jp2k_tile = jp2k_tile[:, last_j2k_cols, :].copy()
if (
partial_jp2_tile_rows
and stripnum == num_strips - 1
):
# decrease the number of rows by however many it sticks
# over the image height
last_j2k_rows = slice(0, imageheight - julr)
jp2k_tile = jp2k_tile[last_j2k_rows, :, :].copy()
tilewriter[:] = jp2k_tile
def tagvalue2str(self, cls, tag_value):
"""
Take a class that encompasses all of a tag's allowed values and find
the name of that value.
"""
tag_value_string = [
key for key in dir(cls) if getattr(cls, key) == tag_value
][0]
return tag_value_string
|
[
"logging.getLogger",
"numpy.ceil",
"logging.StreamHandler",
"uuid.UUID",
"numpy.flipud",
"glymur.Jp2k",
"io.BytesIO",
"struct.pack",
"numpy.zeros",
"struct.unpack",
"warnings.warn"
] |
[((1982, 2011), 'logging.getLogger', 'logging.getLogger', (['"""tiff2jp2"""'], {}), "('tiff2jp2')\n", (1999, 2011), False, 'import logging\n'), ((2065, 2088), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2086, 2088), False, 'import logging\n'), ((2672, 2684), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2682, 2684), False, 'import io\n'), ((4002, 4029), 'struct.pack', 'struct.pack', (['"""<H"""', 'num_tags'], {}), "('<H', num_tags)\n", (4013, 4029), False, 'import struct\n'), ((9460, 9491), 'struct.unpack', 'struct.unpack', (['"""BB"""', 'buffer[:2]'], {}), "('BB', buffer[:2])\n", (9473, 9491), False, 'import struct\n'), ((10068, 10108), 'struct.unpack', 'struct.unpack', (["(endian + 'H')", 'buffer[2:4]'], {}), "(endian + 'H', buffer[2:4])\n", (10081, 10108), False, 'import struct\n'), ((10570, 10605), 'struct.pack', 'struct.pack', (['"""<BBHI"""', '(73)', '(73)', '(42)', '(8)'], {}), "('<BBHI', 73, 73, 42, 8)\n", (10581, 10605), False, 'import struct\n'), ((13589, 13694), 'glymur.Jp2k', 'Jp2k', (['self.jp2_filename'], {'shape': '(imageheight, imagewidth, spp)', 'tilesize': 'self.tilesize'}), '(self.jp2_filename, shape=(imageheight, imagewidth, spp), tilesize=self\n .tilesize, **self.kwargs)\n', (13593, 13694), False, 'from glymur import Jp2k\n'), ((2919, 2963), 'uuid.UUID', 'UUID', (['"""b14bf8bd-083d-4b43-a5ae-8cd7d5a6ce03"""'], {}), "('b14bf8bd-083d-4b43-a5ae-8cd7d5a6ce03')\n", (2923, 2963), False, 'from uuid import UUID\n'), ((3068, 3099), 'uuid.UUID', 'UUID', ([], {'bytes': "b'JpgTiffExif->JP2'"}), "(bytes=b'JpgTiffExif->JP2')\n", (3072, 3099), False, 'from uuid import UUID\n'), ((3835, 3870), 'struct.unpack', 'struct.unpack', (["(endian + 'Q')", 'buffer'], {}), "(endian + 'Q', buffer)\n", (3848, 3870), False, 'import struct\n'), ((3942, 3977), 'struct.unpack', 'struct.unpack', (["(endian + 'H')", 'buffer'], {}), "(endian + 'H', buffer)\n", (3955, 3977), False, 'import struct\n'), ((5041, 5080), 'struct.unpack', 'struct.unpack', (['tag_format_str', 'tag_data'], {}), '(tag_format_str, tag_data)\n', (5054, 5080), False, 'import struct\n'), ((10268, 10305), 'struct.unpack', 'struct.unpack', (["(endian + 'HHQ')", 'buffer'], {}), "(endian + 'HHQ', buffer)\n", (10281, 10305), False, 'import struct\n'), ((10375, 10410), 'struct.unpack', 'struct.unpack', (["(endian + 'I')", 'buffer'], {}), "(endian + 'I', buffer)\n", (10388, 10410), False, 'import struct\n'), ((5747, 5801), 'struct.unpack', 'struct.unpack', (['(endian + payload_format)', 'payload_buffer'], {}), '(endian + payload_format, payload_buffer)\n', (5760, 5801), False, 'import struct\n'), ((5967, 6020), 'struct.pack', 'struct.pack', (['"""<HHII"""', 'tag', 'dtype', 'nvalues', 'new_offset'], {}), "('<HHII', tag, dtype, nvalues, new_offset)\n", (5978, 6020), False, 'import struct\n'), ((6405, 6438), 'struct.pack', 'struct.pack', (['out_format', '*payload'], {}), '(out_format, *payload)\n', (6416, 6438), False, 'import struct\n'), ((7026, 7080), 'struct.unpack', 'struct.unpack', (['(endian + payload_format)', 'payload_buffer'], {}), '(endian + payload_format, payload_buffer)\n', (7039, 7080), False, 'import struct\n'), ((12997, 13022), 'numpy.ceil', 'np.ceil', (['(imagewidth / jtw)'], {}), '(imagewidth / jtw)\n', (13004, 13022), True, 'import numpy as np\n'), ((13061, 13086), 'numpy.ceil', 'np.ceil', (['(imagewidth / jtw)'], {}), '(imagewidth / jtw)\n', (13068, 13086), True, 'import numpy as np\n'), ((8004, 8044), 'struct.pack', 'struct.pack', (['"""<HHI"""', 'tag', 'dtype', 'nvalues'], {}), "('<HHI', tag, dtype, nvalues)\n", (8015, 8044), False, 'import struct\n'), ((8411, 8454), 'struct.pack', 'struct.pack', (["('<' + payload_format)", '*payload'], {}), "('<' + payload_format, *payload)\n", (8422, 8454), False, 'import struct\n'), ((8665, 8718), 'struct.pack', 'struct.pack', (['"""<HHII"""', 'tag', 'dtype', 'nvalues', 'new_offset'], {}), "('<HHII', tag, dtype, nvalues, new_offset)\n", (8676, 8718), False, 'import struct\n'), ((9139, 9172), 'struct.pack', 'struct.pack', (['out_format', '*payload'], {}), '(out_format, *payload)\n', (9150, 9172), False, 'import struct\n'), ((14996, 15014), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (15009, 15014), False, 'import warnings\n'), ((15474, 15511), 'numpy.zeros', 'np.zeros', (['(th, tw, 4)'], {'dtype': 'np.uint8'}), '((th, tw, 4), dtype=np.uint8)\n', (15482, 15511), True, 'import numpy as np\n'), ((15264, 15288), 'numpy.ceil', 'np.ceil', (['(imagewidth / tw)'], {}), '(imagewidth / tw)\n', (15271, 15288), True, 'import numpy as np\n'), ((16061, 16099), 'numpy.zeros', 'np.zeros', (['(jth, jtw, spp)'], {'dtype': 'dtype'}), '((jth, jtw, spp), dtype=dtype)\n', (16069, 16099), True, 'import numpy as np\n'), ((16128, 16164), 'numpy.zeros', 'np.zeros', (['(th, tw, spp)'], {'dtype': 'dtype'}), '((th, tw, spp), dtype=dtype)\n', (16136, 16164), True, 'import numpy as np\n'), ((20552, 20597), 'numpy.zeros', 'np.zeros', (['(rps, imagewidth, spp)'], {'dtype': 'dtype'}), '((rps, imagewidth, spp), dtype=dtype)\n', (20560, 20597), True, 'import numpy as np\n'), ((20623, 20669), 'numpy.zeros', 'np.zeros', (['(rps, imagewidth, 4)'], {'dtype': 'np.uint8'}), '((rps, imagewidth, 4), dtype=np.uint8)\n', (20631, 20669), True, 'import numpy as np\n'), ((16202, 16236), 'numpy.ceil', 'np.ceil', (['(idx // num_jp2k_tile_cols)'], {}), '(idx // num_jp2k_tile_cols)\n', (16209, 16236), True, 'import numpy as np\n'), ((16274, 16307), 'numpy.ceil', 'np.ceil', (['(idx % num_jp2k_tile_cols)'], {}), '(idx % num_jp2k_tile_cols)\n', (16281, 16307), True, 'import numpy as np\n'), ((20340, 20365), 'numpy.ceil', 'np.ceil', (['(imagewidth / jtw)'], {}), '(imagewidth / jtw)\n', (20347, 20365), True, 'import numpy as np\n'), ((20819, 20857), 'numpy.zeros', 'np.zeros', (['(jth, jtw, spp)'], {'dtype': 'dtype'}), '((jth, jtw, spp), dtype=dtype)\n', (20827, 20857), True, 'import numpy as np\n'), ((16907, 16945), 'numpy.ceil', 'np.ceil', (['(tilenum // num_tiff_tile_cols)'], {}), '(tilenum // num_tiff_tile_cols)\n', (16914, 16945), True, 'import numpy as np\n'), ((16987, 17024), 'numpy.ceil', 'np.ceil', (['(tilenum % num_tiff_tile_cols)'], {}), '(tilenum % num_tiff_tile_cols)\n', (16994, 17024), True, 'import numpy as np\n'), ((17716, 17746), 'numpy.flipud', 'np.flipud', (['rgba_tile[:, :, :3]'], {}), '(rgba_tile[:, :, :3])\n', (17725, 17746), True, 'import numpy as np\n'), ((19064, 19102), 'numpy.ceil', 'np.ceil', (['(tilenum // num_tiff_tile_cols)'], {}), '(tilenum // num_tiff_tile_cols)\n', (19071, 19102), True, 'import numpy as np\n'), ((19202, 19239), 'numpy.ceil', 'np.ceil', (['(tilenum % num_tiff_tile_cols)'], {}), '(tilenum % num_tiff_tile_cols)\n', (19209, 19239), True, 'import numpy as np\n'), ((22245, 22278), 'numpy.flipud', 'np.flipud', (['rgba_strip[:, :, :spp]'], {}), '(rgba_strip[:, :, :spp])\n', (22254, 22278), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from config_settings import Args
# %load_ext autoreload
# %autoreload 2
args=Args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device=",device)
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=args.layer_sizes[0], fc2_units=args.layer_sizes[1]): #States[0] (33,)
"""Initialize parameters and build model.
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer """
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
#self.bn0 = nn.BatchNorm1d(state_size).to(device)
self.fc1 = nn.Linear(state_size, fc1_units).to(device) #33
#self.bn1 = nn.BatchNorm1d(fc1_units).to(device)
self.fc2 = nn.Linear(fc1_units, fc2_units).to(device)
#self.bn2 = nn.BatchNorm1d(fc2_units).to(device)
self.fc3 = nn.Linear(fc2_units, action_size).to(device) #4
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
if str(type(state))=="<class \'numpy.ndarray\'>":
states = torch.from_numpy(states).float().to(device)
#x = self.bn0(state).to(device)
x = F.relu(self.fc1(state)) #x = F.relu(self.bn1(self.fc1(x))) #
x = F.relu(self.fc2(x)) #x = F.relu(self.bn2(self.fc2(x)))
return F.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=args.layer_sizes[0], fc2_units=args.layer_sizes[1]):
"""Initialize parameters and build model.
Params
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer """
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
#self.bn0 = nn.BatchNorm1d(state_size).to(device)
self.fcs1 = nn.Linear(state_size, fcs1_units).to(device)
self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units).to(device)
self.fc3 = nn.Linear(fc2_units, 1).to(device)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
if str(type(state))=="<class \'numpy.ndarray\'>":
state = torch.from_numpy(state).float().to(device)
#state = self.bn0(state)
xs = F.relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
|
[
"torch.manual_seed",
"numpy.sqrt",
"config_settings.Args",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.cat"
] |
[((165, 171), 'config_settings.Args', 'Args', ([], {}), '()\n', (169, 171), False, 'from config_settings import Args\n'), ((206, 231), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (229, 231), False, 'import torch\n'), ((349, 364), 'numpy.sqrt', 'np.sqrt', (['fan_in'], {}), '(fan_in)\n', (356, 364), True, 'import numpy as np\n'), ((972, 995), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (989, 995), False, 'import torch\n'), ((2644, 2667), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2661, 2667), False, 'import torch\n'), ((3496, 3526), 'torch.cat', 'torch.cat', (['(xs, action)'], {'dim': '(1)'}), '((xs, action), dim=1)\n', (3505, 3526), False, 'import torch\n'), ((1074, 1106), 'torch.nn.Linear', 'nn.Linear', (['state_size', 'fc1_units'], {}), '(state_size, fc1_units)\n', (1083, 1106), True, 'import torch.nn as nn\n'), ((1198, 1229), 'torch.nn.Linear', 'nn.Linear', (['fc1_units', 'fc2_units'], {}), '(fc1_units, fc2_units)\n', (1207, 1229), True, 'import torch.nn as nn\n'), ((1317, 1350), 'torch.nn.Linear', 'nn.Linear', (['fc2_units', 'action_size'], {}), '(fc2_units, action_size)\n', (1326, 1350), True, 'import torch.nn as nn\n'), ((2746, 2779), 'torch.nn.Linear', 'nn.Linear', (['state_size', 'fcs1_units'], {}), '(state_size, fcs1_units)\n', (2755, 2779), True, 'import torch.nn as nn\n'), ((2810, 2856), 'torch.nn.Linear', 'nn.Linear', (['(fcs1_units + action_size)', 'fc2_units'], {}), '(fcs1_units + action_size, fc2_units)\n', (2819, 2856), True, 'import torch.nn as nn\n'), ((2885, 2908), 'torch.nn.Linear', 'nn.Linear', (['fc2_units', '(1)'], {}), '(fc2_units, 1)\n', (2894, 2908), True, 'import torch.nn as nn\n'), ((1790, 1814), 'torch.from_numpy', 'torch.from_numpy', (['states'], {}), '(states)\n', (1806, 1814), False, 'import torch\n'), ((3370, 3393), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (3386, 3393), False, 'import torch\n')]
|
from math import sqrt
from numpy import arange
from universal_constants import MARS_RADIUS
from universal_functions import mars_density
class Simulation:
@property
def xs(self):
return [v.x for v in self.ps]
@property
def ys(self):
return [v.y for v in self.ps]
@property
def zs(self):
return [v.z for v in self.ps]
@property
def rs(self):
return [p.module for p in self.ps]
@property
def hs(self):
return [r - MARS_RADIUS for r in self.rs]
def __init__(self, body, forces):
self.body = body
self.forces = forces
self.ps = []
self.vs = []
self.gs = []
self.duration = 0
self.delta_v = 0
def run(self, time, dt, condition=lambda b: False):
duration = 0
initial_speed = self.body.speed
for _ in arange(0, time, dt):
duration += dt
self.step(dt)
if condition(self.body):
break
self.duration = duration
self.delta_v = self.body.speed - initial_speed
def step(self, dt):
force = self.forces(self.body)
self.body.velocity += dt * force / self.body.mass
self.body.position += dt * self.body.velocity
self.ps.append(self.body.position)
self.vs.append(self.body.velocity)
self.gs.append(force.module / self.body.mass / 9.81 / dt)
class ThrustSimulation(Simulation):
@property
def engine_mass(self):
return 0.0014 * self.mass_0 * abs(self.delta_v) / self.duration + 49.6
def __init__(self, body, delta_mass, *forces):
super().__init__(body, *forces)
self.delta_mass = delta_mass
self.propellant_mass = 0
self.mass_0 = body.mass
def step(self, dt):
super().step(dt)
self.body.mass -= self.delta_mass * dt
self.propellant_mass += self.delta_mass * dt
def save_data(self, filename):
import csv
with open(filename + '.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(['Engine mass', str(self.engine_mass)])
spamwriter.writerow(['Propellant mass', str(self.propellant_mass)])
spamwriter.writerow(['Max gs', str(max(self.gs))])
class AerobreakingSimulation(Simulation):
@property
def shield_mass(self):
return self.body.mass * 0.00091 * (self.Q * 1e-4) ** 0.51575
@property
def structure_mass(self):
return self.body.mass * 0.0232 * max(self.pressures) ** -0.1708
@property
def back_shield_mass(self):
return 0.14 * self.body.mass
@property
def heat_shield_mass(self):
return self.shield_mass + self.structure_mass + self.back_shield_mass
def __init__(self, body, *forces):
super().__init__(body, *forces)
self.qs = []
self.pressures = []
self.Q = 0
self.k = 1.9027e-4 # [SI] Hinc sunt draconis
def q(self, b):
return self.k * sqrt(mars_density(b.radius) / b.r_nose) * b.speed ** 3
def p(self, b):
return mars_density(b.radius) * b.speed2 / 2
def run(self, time, dt=1, condition=lambda b: False):
super().run(time, dt, condition)
self.Q = sum(self.qs) * dt
def step(self, dt):
super().step(dt)
self.qs.append(self.q(self.body))
self.pressures.append(self.p(self.body))
def save_data(self, filename):
import csv
with open(filename + '.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(['Shield mass', str(self.heat_shield_mass)])
|
[
"csv.writer",
"universal_functions.mars_density",
"numpy.arange"
] |
[((871, 890), 'numpy.arange', 'arange', (['(0)', 'time', 'dt'], {}), '(0, time, dt)\n', (877, 890), False, 'from numpy import arange\n'), ((2073, 2092), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (2083, 2092), False, 'import csv\n'), ((3587, 3606), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3597, 3606), False, 'import csv\n'), ((3127, 3149), 'universal_functions.mars_density', 'mars_density', (['b.radius'], {}), '(b.radius)\n', (3139, 3149), False, 'from universal_functions import mars_density\n'), ((3041, 3063), 'universal_functions.mars_density', 'mars_density', (['b.radius'], {}), '(b.radius)\n', (3053, 3063), False, 'from universal_functions import mars_density\n')]
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from collections import deque
# Import configuration parameters
import config as cfg
# Define a class to receive the characteristics of each line detection
class Line:
def __init__(self, buf_len = 5):
# x values of the last n fits of the line
self.recent_xfitted = deque(maxlen=buf_len)
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = deque(maxlen=buf_len) # circular buffer
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False]), np.array([False]), np.array([False])]
# This function is a reuse from the lecture. Finds lane pixels
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] // 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0] // cfg.nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(cfg.nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - cfg.margin
win_xleft_high = leftx_current + cfg.margin
win_xright_low = rightx_current - cfg.margin
win_xright_high = rightx_current + cfg.margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low),
(win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low),
(win_xright_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > cfg.minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > cfg.minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
# This function is a reuse from the lecture with minor modification to pass challenge video.
# Fits a second order polynomial.
def fit_polynomial(binary_warped, left_line, right_line):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
# Fit a second order polynomial to each using `np.polyfit`
try:
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Checks that the lines are separated by approximately the same distance horizontally and
# compares poly coefficients with previous fits. If it fails this frame is discarded
# Perform this check only for the challenge video, don't do it for project video or test images
is_a_good_frame = ((np.abs(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0])) < cfg.th) & \
(np.abs(left_fit[0] - left_line.current_fit[0]) < cfg.th1) \
& (np.abs(left_fit[1] - left_line.current_fit[1]) < cfg.th2) & \
(np.abs(left_fit[2] - left_line.current_fit[2]) < cfg.th3))
# Check against maximum lane width
is_a_good_frame &= (np.abs(left_fitx[-1] - right_fitx[-1]) < cfg.lane_max_width) & \
(np.abs(left_fitx[0] - right_fitx[0]) < cfg.lane_max_width)
#if (0 == cfg.video_mode) | (cfg.video_file_name == '../project_video') | is_a_good_frame:
if (0 == cfg.video_mode) | is_a_good_frame:
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
left_line.recent_xfitted.append(left_fitx)
left_line.best_fit.append(left_fit)
right_line.recent_xfitted.append(right_fitx)
right_line.best_fit.append(right_fit)
else:
print('bad frame')
#pass # skip this 'bad' frame
except:
print('bad frame')
#pass
return out_img, left_line, right_line
# Sets the poly coefficients to the last coefficients computed
def long_term_filter_init(left_line, right_line):
left_line.bestx = left_line.recent_xfitted[-1]
right_line.bestx = right_line.recent_xfitted[-1]
left_line.current_fit = left_line.best_fit[-1]
right_line.current_fit = right_line.best_fit[-1]
return left_line, right_line
# Takes a mean over accumulated over time poly coefficients
def long_term_filter(left_line, right_line):
left_line.bestx = np.mean(left_line.recent_xfitted, axis=0)
right_line.bestx = np.mean(right_line.recent_xfitted, axis=0)
left_line.current_fit = np.mean(left_line.best_fit, axis=0)
right_line.current_fit = np.mean(right_line.best_fit, axis=0)
return left_line, right_line
# Calculate the radius of curvature in meters for both lane lines
def measure_curvature(left_fit_cr, right_fit_cr, ploty):
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve in meters (radius of curvature)
left_curverad = ((1 + (left_fit_cr[0] * 2 * y_eval * cfg.ym_per_pix + left_fit_cr[1]) ** 2) ** (3 / 2)) / np.abs(
2 * left_fit_cr[0])
# Calculation of the left line here
right_curverad = ((1 + (right_fit_cr[0] * 2 * y_eval * cfg.ym_per_pix + right_fit_cr[1]) ** 2) ** (3 / 2)) / np.abs(
2 * right_fit_cr[0])
return left_curverad, right_curverad
# Calculate vehicle center offset in meters
def vehicle_offset_calc(undist, bottom_x_left, bottom_x_right):
# Calculate vehicle center offset in pixels
vehicle_offset = undist.shape[1]/2 - (bottom_x_left + bottom_x_right)/2
# Convert pixel offset to meters
vehicle_offset *= cfg.xm_per_pix
return vehicle_offset
# Fits a second order polynomial to each line. Reuse from a lecture
def fit_poly(img_shape, leftx, lefty, rightx, righty):
#Fit a second order polynomial to each with np.polyfit()
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0] - 1, img_shape[0])
# Calc both polynomials using ploty, left_fit and right_fit #
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
return left_fitx, right_fitx, ploty, left_fit, right_fit
# Search for the new line within +/- some margin around the old line center.
def search_around_poly(binary_warped, left_line, right_line):
margin = cfg.search_around_poly
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_fit = left_line.current_fit
right_fit = right_line.current_fit
# Set the area of search based on activated x-values
# within the +/- margin of our polynomial function
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0] * (nonzeroy ** 2) +
left_fit[1] * nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0] * (nonzeroy ** 2) +
right_fit[1] * nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Fit new polynomials
try:
left_fitx, right_fitx, ploty, left_fit, right_fit = \
fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
# Checks that the lines are separated by approximately the same distance horizontally and
# compares poly coefficients with previous fits. If it fails this frame is discarded
# Perform this check only for the challenge video, don't do it for project video or test images
is_a_good_frame = ((np.abs(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0])) < cfg.th) & \
(np.abs(left_fit[0] - left_line.current_fit[0]) < cfg.th1) \
& (np.abs(left_fit[1] - left_line.current_fit[1]) < cfg.th2) & \
(np.abs(left_fit[2] - left_line.current_fit[2]) < cfg.th3))
# Check against maximum lane width
is_a_good_frame &= (np.abs(left_fitx[-1] - right_fitx[-1]) < cfg.lane_max_width) & \
(np.abs(left_fitx[0] - right_fitx[0]) < cfg.lane_max_width)
if is_a_good_frame:
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# Store coefficients into a circular buffer
left_line.recent_xfitted.append(left_fitx)
right_line.recent_xfitted.append(right_fitx)
left_line.best_fit.append(left_fit)
right_line.best_fit.append(right_fit)
else:
print('bad frame')
#pass # skip this 'bad' frame
except:
print('bad frame')
#pass
return out_img, left_line, right_line
|
[
"cv2.rectangle",
"numpy.dstack",
"numpy.mean",
"numpy.abs",
"collections.deque",
"numpy.polyfit",
"numpy.hstack",
"numpy.zeros_like",
"numpy.argmax",
"numpy.max",
"numpy.sum",
"numpy.array",
"numpy.linspace",
"cv2.addWeighted",
"numpy.vstack",
"numpy.concatenate",
"numpy.int_",
"numpy.int"
] |
[((917, 979), 'numpy.sum', 'np.sum', (['binary_warped[binary_warped.shape[0] // 2:, :]'], {'axis': '(0)'}), '(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)\n', (923, 979), True, 'import numpy as np\n'), ((1059, 1115), 'numpy.dstack', 'np.dstack', (['(binary_warped, binary_warped, binary_warped)'], {}), '((binary_warped, binary_warped, binary_warped))\n', (1068, 1115), True, 'import numpy as np\n'), ((1265, 1296), 'numpy.int', 'np.int', (['(histogram.shape[0] // 2)'], {}), '(histogram.shape[0] // 2)\n', (1271, 1296), True, 'import numpy as np\n'), ((1314, 1345), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (1323, 1345), True, 'import numpy as np\n'), ((1498, 1544), 'numpy.int', 'np.int', (['(binary_warped.shape[0] // cfg.nwindows)'], {}), '(binary_warped.shape[0] // cfg.nwindows)\n', (1504, 1544), True, 'import numpy as np\n'), ((1670, 1690), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (1678, 1690), True, 'import numpy as np\n'), ((1706, 1726), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (1714, 1726), True, 'import numpy as np\n'), ((7067, 7108), 'numpy.mean', 'np.mean', (['left_line.recent_xfitted'], {'axis': '(0)'}), '(left_line.recent_xfitted, axis=0)\n', (7074, 7108), True, 'import numpy as np\n'), ((7132, 7174), 'numpy.mean', 'np.mean', (['right_line.recent_xfitted'], {'axis': '(0)'}), '(right_line.recent_xfitted, axis=0)\n', (7139, 7174), True, 'import numpy as np\n'), ((7203, 7238), 'numpy.mean', 'np.mean', (['left_line.best_fit'], {'axis': '(0)'}), '(left_line.best_fit, axis=0)\n', (7210, 7238), True, 'import numpy as np\n'), ((7268, 7304), 'numpy.mean', 'np.mean', (['right_line.best_fit'], {'axis': '(0)'}), '(right_line.best_fit, axis=0)\n', (7275, 7304), True, 'import numpy as np\n'), ((7611, 7624), 'numpy.max', 'np.max', (['ploty'], {}), '(ploty)\n', (7617, 7624), True, 'import numpy as np\n'), ((8600, 8627), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (8610, 8627), True, 'import numpy as np\n'), ((8644, 8673), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (8654, 8673), True, 'import numpy as np\n'), ((8729, 8775), 'numpy.linspace', 'np.linspace', (['(0)', '(img_shape[0] - 1)', 'img_shape[0]'], {}), '(0, img_shape[0] - 1, img_shape[0])\n', (8740, 8775), True, 'import numpy as np\n'), ((9320, 9340), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (9328, 9340), True, 'import numpy as np\n'), ((9356, 9376), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (9364, 9376), True, 'import numpy as np\n'), ((350, 371), 'collections.deque', 'deque', ([], {'maxlen': 'buf_len'}), '(maxlen=buf_len)\n', (355, 371), False, 'from collections import deque\n'), ((563, 584), 'collections.deque', 'deque', ([], {'maxlen': 'buf_len'}), '(maxlen=buf_len)\n', (568, 584), False, 'from collections import deque\n'), ((1364, 1395), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (1373, 1395), True, 'import numpy as np\n'), ((2550, 2650), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xleft_low, win_y_low), (win_xleft_high,\n win_y_high), (0, 255, 0), 2)\n', (2563, 2650), False, 'import cv2\n'), ((2677, 2779), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xright_low, win_y_low), (win_xright_high,\n win_y_high), (0, 255, 0), 2)\n', (2690, 2779), False, 'import cv2\n'), ((3799, 3829), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (3813, 3829), True, 'import numpy as np\n'), ((3856, 3887), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (3870, 3887), True, 'import numpy as np\n'), ((4629, 4656), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (4639, 4656), True, 'import numpy as np\n'), ((4677, 4706), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (4687, 4706), True, 'import numpy as np\n'), ((4771, 4837), 'numpy.linspace', 'np.linspace', (['(0)', '(binary_warped.shape[0] - 1)', 'binary_warped.shape[0]'], {}), '(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n', (4782, 4837), True, 'import numpy as np\n'), ((7797, 7823), 'numpy.abs', 'np.abs', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (7803, 7823), True, 'import numpy as np\n'), ((7987, 8014), 'numpy.abs', 'np.abs', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (7993, 8014), True, 'import numpy as np\n'), ((10463, 10519), 'numpy.dstack', 'np.dstack', (['(binary_warped, binary_warped, binary_warped)'], {}), '((binary_warped, binary_warped, binary_warped))\n', (10472, 10519), True, 'import numpy as np\n'), ((688, 705), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (696, 705), True, 'import numpy as np\n'), ((707, 724), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (715, 724), True, 'import numpy as np\n'), ((726, 743), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (734, 743), True, 'import numpy as np\n'), ((11647, 11669), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (11660, 11669), True, 'import numpy as np\n'), ((12356, 12405), 'numpy.hstack', 'np.hstack', (['(left_line_window1, left_line_window2)'], {}), '((left_line_window1, left_line_window2))\n', (12365, 12405), True, 'import numpy as np\n'), ((12722, 12773), 'numpy.hstack', 'np.hstack', (['(right_line_window1, right_line_window2)'], {}), '((right_line_window1, right_line_window2))\n', (12731, 12773), True, 'import numpy as np\n'), ((13006, 13053), 'cv2.addWeighted', 'cv2.addWeighted', (['out_img', '(1)', 'window_img', '(0.3)', '(0)'], {}), '(out_img, 1, window_img, 0.3, 0)\n', (13021, 13053), False, 'import cv2\n'), ((3528, 3561), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (3535, 3561), True, 'import numpy as np\n'), ((3645, 3679), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (3652, 3679), True, 'import numpy as np\n'), ((5569, 5615), 'numpy.abs', 'np.abs', (['(left_fit[2] - left_line.current_fit[2])'], {}), '(left_fit[2] - left_line.current_fit[2])\n', (5575, 5615), True, 'import numpy as np\n'), ((5699, 5737), 'numpy.abs', 'np.abs', (['(left_fitx[-1] - right_fitx[-1])'], {}), '(left_fitx[-1] - right_fitx[-1])\n', (5705, 5737), True, 'import numpy as np\n'), ((5792, 5828), 'numpy.abs', 'np.abs', (['(left_fitx[0] - right_fitx[0])'], {}), '(left_fitx[0] - right_fitx[0])\n', (5798, 5828), True, 'import numpy as np\n'), ((11314, 11360), 'numpy.abs', 'np.abs', (['(left_fit[2] - left_line.current_fit[2])'], {}), '(left_fit[2] - left_line.current_fit[2])\n', (11320, 11360), True, 'import numpy as np\n'), ((11444, 11482), 'numpy.abs', 'np.abs', (['(left_fitx[-1] - right_fitx[-1])'], {}), '(left_fitx[-1] - right_fitx[-1])\n', (11450, 11482), True, 'import numpy as np\n'), ((11533, 11569), 'numpy.abs', 'np.abs', (['(left_fitx[0] - right_fitx[0])'], {}), '(left_fitx[0] - right_fitx[0])\n', (11539, 11569), True, 'import numpy as np\n'), ((12868, 12892), 'numpy.int_', 'np.int_', (['[left_line_pts]'], {}), '([left_line_pts])\n', (12875, 12892), True, 'import numpy as np\n'), ((12944, 12969), 'numpy.int_', 'np.int_', (['[right_line_pts]'], {}), '([right_line_pts])\n', (12951, 12969), True, 'import numpy as np\n'), ((5497, 5543), 'numpy.abs', 'np.abs', (['(left_fit[1] - left_line.current_fit[1])'], {}), '(left_fit[1] - left_line.current_fit[1])\n', (5503, 5543), True, 'import numpy as np\n'), ((11224, 11270), 'numpy.abs', 'np.abs', (['(left_fit[1] - left_line.current_fit[1])'], {}), '(left_fit[1] - left_line.current_fit[1])\n', (11230, 11270), True, 'import numpy as np\n'), ((5329, 5400), 'numpy.abs', 'np.abs', (['(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0]))'], {}), '(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0]))\n', (5335, 5400), True, 'import numpy as np\n'), ((5425, 5471), 'numpy.abs', 'np.abs', (['(left_fit[0] - left_line.current_fit[0])'], {}), '(left_fit[0] - left_line.current_fit[0])\n', (5431, 5471), True, 'import numpy as np\n'), ((11020, 11091), 'numpy.abs', 'np.abs', (['(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0]))'], {}), '(left_fitx[-1] - right_fitx[-1] - (left_fitx[0] - right_fitx[0]))\n', (11026, 11091), True, 'import numpy as np\n'), ((11134, 11180), 'numpy.abs', 'np.abs', (['(left_fit[0] - left_line.current_fit[0])'], {}), '(left_fit[0] - left_line.current_fit[0])\n', (11140, 11180), True, 'import numpy as np\n'), ((12102, 12140), 'numpy.vstack', 'np.vstack', (['[left_fitx - margin, ploty]'], {}), '([left_fitx - margin, ploty])\n', (12111, 12140), True, 'import numpy as np\n'), ((12463, 12502), 'numpy.vstack', 'np.vstack', (['[right_fitx - margin, ploty]'], {}), '([right_fitx - margin, ploty])\n', (12472, 12502), True, 'import numpy as np\n'), ((12209, 12247), 'numpy.vstack', 'np.vstack', (['[left_fitx + margin, ploty]'], {}), '([left_fitx + margin, ploty])\n', (12218, 12247), True, 'import numpy as np\n'), ((12572, 12611), 'numpy.vstack', 'np.vstack', (['[right_fitx + margin, ploty]'], {}), '([right_fitx + margin, ploty])\n', (12581, 12611), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
...
"""
import LibraryTT.txt2array as conversion
import numpy as np
from numpy import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import random
import math
from mpl_toolkits.mplot3d import Axes3D
# import open3d as o3d
# %matplotlib inline
D = conversion.txt2array()
DD = np.copy(D) # Creamos copia de datos para no afectar a los originales
Epsilon = 30
MinPts = 75 #78
# result = DBSCAN(DD,Epsilon,MinPts)
chch = conversion.RObjetos(DD,Epsilon,MinPts)
TN = conversion.usar(chch)
# Graficar un dato
conversion.imprimir3D(D)
# conversion.imprimir3D(DD)
# Imprimir sin ruido--- graficar
conversion.imprimirObjetos(TN,chch,0,0)
# Graficar con ruido
conversion.imprimirObjetos(TN,chch,1,0)
# conversion.imprimirObjetos(TN,chch,2,1)
# (Objetos,tamañoobjetos,2,cualObjeto)
# el ransac
# vectores para guardar datos.
abcd = np.array([[0,0,0,0]])
ldps = np.array([])
gplns = np.array([])
abcd,ldps,gplns = conversion.rnsc(TN,chch,abcd,ldps,gplns)
abcd = np.delete(abcd,0,axis=0)
# BUSCAR centros de planos aunque debiera buscar algo más con planos pequeños.
cplns = 0 # va pasando por cada valor abcd para hacer la prueba
# p1 = 0
# sc = 0
# p2 = gplns[sc]
cplanos = np.array([[0,0,0]])
Dists = np.array([])
cplanos,Dists = conversion.centros(cplanos,Dists,TN,ldps,gplns)
dext = 100
dint = 50
tvdext = np.array([])
tvdint = np.array([])
# Para checar que objetos andan dentro del rango int y ext
# np.append(datosx,[[xmin,xmax]],axis=0)
# Se guardan las posiciones
for ima in range(0,len(Dists)):
if (Dists[ima] <= dext):
tvdext = np.append(tvdext,ima)
# print("hay un obstaculo en zona alejada")
if (Dists[ima] <= dint):
tvdint = np.append(tvdint,ima)
# print("Hay obstaculo cercano, detener y cambiar posicion")
# Para conocer mejor cuales son int mas que ext porque son mas importantes
if (len(tvdext) > 0) and (len(tvdint) > 0):
for ixt in range(0,len(tvdint)):
for ixtt in range(0,len(tvdext)):
if (tvdint[ixt] == tvdext[ixtt]):
tvdext = np.delete(tvdext[ixtt])
if (len(tvdext) <= 0):
break
prac = 0
if (len(tvdext) > 0) or (len(tvdint) > 0):
if (len(tvdint)>0):
for din in range(0,len(tvdint)):
xd = cplanos[int(tvdint[din]),0]
yd = cplanos[int(tvdint[din]),1]
angulo = math.atan2(xd,yd)
angulo = math.degrees(angulo)
# En cada uno encender vibrador
if (angulo >= 120):
print("rapido dar un paso a la derecha")
prac += 1
if (angulo <= 60):
print("rapido dar un paso a la izquierda")
prac += 1
if ((angulo > 60)and(angulo < 120)):
print("Deten tu carruaje")
prac += 1
# Aqui apagara los vibradores
if (prac == 0) and (len(tvdext)>0):
for din in range(0,len(tvdext)):
xd = cplanos[int(tvdext[din]),0]
yd = cplanos[int(tvdext[din]),1]
angulo = math.atan2(xd,yd)
angulo = math.degrees(angulo)
# En cada uno encender vibrador
if (angulo >= 120):
print("dar un paso a la derecha")
if (angulo <= 60):
print("dar un paso a la izquierda")
if ((angulo > 60)and(angulo < 120)):
print("Abra algo")
|
[
"numpy.copy",
"LibraryTT.txt2array.usar",
"LibraryTT.txt2array.imprimir3D",
"LibraryTT.txt2array.centros",
"numpy.delete",
"LibraryTT.txt2array.imprimirObjetos",
"math.degrees",
"numpy.append",
"numpy.array",
"math.atan2",
"LibraryTT.txt2array.rnsc",
"LibraryTT.txt2array.RObjetos",
"LibraryTT.txt2array.txt2array"
] |
[((305, 327), 'LibraryTT.txt2array.txt2array', 'conversion.txt2array', ([], {}), '()\n', (325, 327), True, 'import LibraryTT.txt2array as conversion\n'), ((336, 346), 'numpy.copy', 'np.copy', (['D'], {}), '(D)\n', (343, 346), True, 'import numpy as np\n'), ((486, 526), 'LibraryTT.txt2array.RObjetos', 'conversion.RObjetos', (['DD', 'Epsilon', 'MinPts'], {}), '(DD, Epsilon, MinPts)\n', (505, 526), True, 'import LibraryTT.txt2array as conversion\n'), ((533, 554), 'LibraryTT.txt2array.usar', 'conversion.usar', (['chch'], {}), '(chch)\n', (548, 554), True, 'import LibraryTT.txt2array as conversion\n'), ((578, 602), 'LibraryTT.txt2array.imprimir3D', 'conversion.imprimir3D', (['D'], {}), '(D)\n', (599, 602), True, 'import LibraryTT.txt2array as conversion\n'), ((669, 711), 'LibraryTT.txt2array.imprimirObjetos', 'conversion.imprimirObjetos', (['TN', 'chch', '(0)', '(0)'], {}), '(TN, chch, 0, 0)\n', (695, 711), True, 'import LibraryTT.txt2array as conversion\n'), ((733, 775), 'LibraryTT.txt2array.imprimirObjetos', 'conversion.imprimirObjetos', (['TN', 'chch', '(1)', '(0)'], {}), '(TN, chch, 1, 0)\n', (759, 775), True, 'import LibraryTT.txt2array as conversion\n'), ((913, 937), 'numpy.array', 'np.array', (['[[0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0]])\n', (921, 937), True, 'import numpy as np\n'), ((943, 955), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (951, 955), True, 'import numpy as np\n'), ((965, 977), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (973, 977), True, 'import numpy as np\n'), ((999, 1043), 'LibraryTT.txt2array.rnsc', 'conversion.rnsc', (['TN', 'chch', 'abcd', 'ldps', 'gplns'], {}), '(TN, chch, abcd, ldps, gplns)\n', (1014, 1043), True, 'import LibraryTT.txt2array as conversion\n'), ((1048, 1074), 'numpy.delete', 'np.delete', (['abcd', '(0)'], {'axis': '(0)'}), '(abcd, 0, axis=0)\n', (1057, 1074), True, 'import numpy as np\n'), ((1269, 1290), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (1277, 1290), True, 'import numpy as np\n'), ((1298, 1310), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1306, 1310), True, 'import numpy as np\n'), ((1328, 1379), 'LibraryTT.txt2array.centros', 'conversion.centros', (['cplanos', 'Dists', 'TN', 'ldps', 'gplns'], {}), '(cplanos, Dists, TN, ldps, gplns)\n', (1346, 1379), True, 'import LibraryTT.txt2array as conversion\n'), ((1414, 1426), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1422, 1426), True, 'import numpy as np\n'), ((1437, 1449), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1445, 1449), True, 'import numpy as np\n'), ((1663, 1685), 'numpy.append', 'np.append', (['tvdext', 'ima'], {}), '(tvdext, ima)\n', (1672, 1685), True, 'import numpy as np\n'), ((1791, 1813), 'numpy.append', 'np.append', (['tvdint', 'ima'], {}), '(tvdint, ima)\n', (1800, 1813), True, 'import numpy as np\n'), ((2496, 2514), 'math.atan2', 'math.atan2', (['xd', 'yd'], {}), '(xd, yd)\n', (2506, 2514), False, 'import math\n'), ((2536, 2556), 'math.degrees', 'math.degrees', (['angulo'], {}), '(angulo)\n', (2548, 2556), False, 'import math\n'), ((3250, 3268), 'math.atan2', 'math.atan2', (['xd', 'yd'], {}), '(xd, yd)\n', (3260, 3268), False, 'import math\n'), ((3290, 3310), 'math.degrees', 'math.degrees', (['angulo'], {}), '(angulo)\n', (3302, 3310), False, 'import math\n'), ((2160, 2183), 'numpy.delete', 'np.delete', (['tvdext[ixtt]'], {}), '(tvdext[ixtt])\n', (2169, 2183), True, 'import numpy as np\n')]
|
import sys
import os
import numpy as np
import pandas as pd
from Globals import *
#-------- Create directories ------------
os.makedirs(dir_data,exist_ok=True)
os.makedirs(dir_chain,exist_ok=True)
os.makedirs(dir_plots,exist_ok=True)
os.makedirs(dir_outs,exist_ok=True)
#------------- Load data ----------------------------------
df = pd.read_csv(file_data,usecols=columns_data,nrows=n_sources)
df.replace(to_replace=nan_values,value=np.nan,inplace=True)
df.set_index(identifier,inplace=True)
n_init = len(df)
print("The data set contains {0} sources.".format(n_init))
#-----------------------------------------------------------
#+++++++++++++++++++ Filter data ++++++++++++++++++++++++++
#---- Set as NaN the BP values larger than limit_BP -------
idx = np.where(df[label_BP] > limit_BP)[0]
if len(idx) > 0:
df.loc[df.iloc[idx].index,label_BP] = np.nan
#----------------------------------------------------------
#---- Set uncertainty as missing if band is missing ---
for ob,un in zip(phot_obs,phot_unc):
mask = np.isnan(df.loc[:,ob])
df.loc[mask,un] = np.nan
#----------------------------------------------------------
#- Set uncertainty to nan_unc if band is observed ---
for ob,un in zip(phot_obs,phot_unc):
mask = np.isnan(df.loc[:,un]) & np.isfinite(df.loc[:,ob])
df.loc[mask,un] = nan_unc
#----------------------------------------------------------
#--- Remove objects with less than n_obs_min bands --------
df.dropna(thresh=n_obs_min,subset=phot_obs,inplace=True)
#----------------------------------------------------------
#---- Minimum uncertainty --------------------------------
for un in phot_unc:
df.loc[:,un] += add_unc
#----------------------------------------------------------
print("After filtering {0} sources were removed.".format(n_init - len(df)))
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++ Split data frame +++++++++++++++++++++++++++++++++
n_sources = len(df)
group_size = int(np.floor(n_sources/size))
reminder = n_sources % size
group_size = np.repeat(group_size,size)
group_size[-1] += reminder
groups = []
for g,gs in enumerate(group_size):
groups.append(np.repeat(g+1,gs))
groups = np.concatenate(groups)
df.insert(loc=0,column="Groups",value=groups)
grouped_df = df.groupby("Groups")
#--- Write each chunk -----
for g in range(1,size+1):
grouped_df.get_group(g).to_csv(dir_data + "data_{0}_of_{1}.csv".format(g,size))
|
[
"numpy.repeat",
"os.makedirs",
"pandas.read_csv",
"numpy.where",
"numpy.floor",
"numpy.isnan",
"numpy.isfinite",
"numpy.concatenate"
] |
[((125, 161), 'os.makedirs', 'os.makedirs', (['dir_data'], {'exist_ok': '(True)'}), '(dir_data, exist_ok=True)\n', (136, 161), False, 'import os\n'), ((161, 198), 'os.makedirs', 'os.makedirs', (['dir_chain'], {'exist_ok': '(True)'}), '(dir_chain, exist_ok=True)\n', (172, 198), False, 'import os\n'), ((198, 235), 'os.makedirs', 'os.makedirs', (['dir_plots'], {'exist_ok': '(True)'}), '(dir_plots, exist_ok=True)\n', (209, 235), False, 'import os\n'), ((235, 271), 'os.makedirs', 'os.makedirs', (['dir_outs'], {'exist_ok': '(True)'}), '(dir_outs, exist_ok=True)\n', (246, 271), False, 'import os\n'), ((337, 398), 'pandas.read_csv', 'pd.read_csv', (['file_data'], {'usecols': 'columns_data', 'nrows': 'n_sources'}), '(file_data, usecols=columns_data, nrows=n_sources)\n', (348, 398), True, 'import pandas as pd\n'), ((2019, 2046), 'numpy.repeat', 'np.repeat', (['group_size', 'size'], {}), '(group_size, size)\n', (2028, 2046), True, 'import numpy as np\n'), ((2164, 2186), 'numpy.concatenate', 'np.concatenate', (['groups'], {}), '(groups)\n', (2178, 2186), True, 'import numpy as np\n'), ((759, 792), 'numpy.where', 'np.where', (['(df[label_BP] > limit_BP)'], {}), '(df[label_BP] > limit_BP)\n', (767, 792), True, 'import numpy as np\n'), ((1021, 1044), 'numpy.isnan', 'np.isnan', (['df.loc[:, ob]'], {}), '(df.loc[:, ob])\n', (1029, 1044), True, 'import numpy as np\n'), ((1952, 1978), 'numpy.floor', 'np.floor', (['(n_sources / size)'], {}), '(n_sources / size)\n', (1960, 1978), True, 'import numpy as np\n'), ((1230, 1253), 'numpy.isnan', 'np.isnan', (['df.loc[:, un]'], {}), '(df.loc[:, un])\n', (1238, 1253), True, 'import numpy as np\n'), ((1255, 1281), 'numpy.isfinite', 'np.isfinite', (['df.loc[:, ob]'], {}), '(df.loc[:, ob])\n', (1266, 1281), True, 'import numpy as np\n'), ((2135, 2155), 'numpy.repeat', 'np.repeat', (['(g + 1)', 'gs'], {}), '(g + 1, gs)\n', (2144, 2155), True, 'import numpy as np\n')]
|
from estimator_adaptative import EstimatorAdaptative
from mpl_toolkits.mplot3d import Axes3D
from grid_search import GridSearch
from sklearn import metrics
import matplotlib.pyplot as plt
import matplotlib as mpl
from utils import *
import numpy as np
import os
import sys
data_path = '../../databases'
PlotsDirectory = '../plots/Week2/task2/'
if not os.path.exists(PlotsDirectory):
os.makedirs(PlotsDirectory)
Pr = list()
Re = list()
names = ['highway', 'fall', 'traffic']
estimation_range = [np.array([1050, 1200]), np.array([1460, 1510]), np.array([950, 1000])]
prediction_range = [np.array([1201, 1350]), np.array([1511, 1560]), np.array([1001, 1050])]
a = [{'min':0, 'max':40, 'step':1}, {'min':0, 'max':40, 'step':1},{'min':0, 'max':40, 'step':1}]
rho = [0.599, 0.004,0]
for i in range(len(names)):
if len(sys.argv) > 1:
i = names.index(str(sys.argv[1]))
print('computing ' + names[i] +' ...')
[X_est, y_est] = load_data(data_path, names[i], estimation_range[i], grayscale=True)
[X_pred, y_pred] = load_data(data_path, names[i], prediction_range[i], grayscale=True)
alpha_range = np.arange(a[i].get('min'), a[i].get('max'), a[i].get('step'))
for idx, alpha in enumerate(alpha_range):
print(str(idx) + "/" + str(len(alpha_range)) + " " + str(alpha))
estPrecision = EstimatorAdaptative(alpha=alpha, rho=rho[i], metric="precision")
estRecall = EstimatorAdaptative(alpha=alpha, rho=rho[i], metric="recall")
estPrecision.fit(X_est)
estRecall.fit(X_est)
Pr.append(estPrecision.score(X_pred, y_pred))
Re.append(estRecall.score(X_pred, y_pred))
plt.figure()
plt.plot(np.array(Re), np.array(Pr), 'b', label='Precision-Recall')
plt.title("Precision vs Recall curve [AUC = " + str(round(metrics.auc(Re, Pr, True), 4)) + "] [" + names[i] + " sequence]")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.savefig(PlotsDirectory + names[i] + '_PRcurve_AUC.png', bbox_inches='tight')
plt.close()
if len(sys.argv) > 1:
break
#Empty lists
Pr[:] = []
Re[:] = []
|
[
"os.path.exists",
"matplotlib.pyplot.savefig",
"estimator_adaptative.EstimatorAdaptative",
"os.makedirs",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure"
] |
[((353, 383), 'os.path.exists', 'os.path.exists', (['PlotsDirectory'], {}), '(PlotsDirectory)\n', (367, 383), False, 'import os\n'), ((389, 416), 'os.makedirs', 'os.makedirs', (['PlotsDirectory'], {}), '(PlotsDirectory)\n', (400, 416), False, 'import os\n'), ((501, 523), 'numpy.array', 'np.array', (['[1050, 1200]'], {}), '([1050, 1200])\n', (509, 523), True, 'import numpy as np\n'), ((525, 547), 'numpy.array', 'np.array', (['[1460, 1510]'], {}), '([1460, 1510])\n', (533, 547), True, 'import numpy as np\n'), ((549, 570), 'numpy.array', 'np.array', (['[950, 1000]'], {}), '([950, 1000])\n', (557, 570), True, 'import numpy as np\n'), ((592, 614), 'numpy.array', 'np.array', (['[1201, 1350]'], {}), '([1201, 1350])\n', (600, 614), True, 'import numpy as np\n'), ((616, 638), 'numpy.array', 'np.array', (['[1511, 1560]'], {}), '([1511, 1560])\n', (624, 638), True, 'import numpy as np\n'), ((640, 662), 'numpy.array', 'np.array', (['[1001, 1050]'], {}), '([1001, 1050])\n', (648, 662), True, 'import numpy as np\n'), ((1648, 1660), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1658, 1660), True, 'import matplotlib.pyplot as plt\n'), ((1865, 1885), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (1875, 1885), True, 'import matplotlib.pyplot as plt\n'), ((1890, 1913), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (1900, 1913), True, 'import matplotlib.pyplot as plt\n'), ((1919, 2004), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(PlotsDirectory + names[i] + '_PRcurve_AUC.png')"], {'bbox_inches': '"""tight"""'}), "(PlotsDirectory + names[i] + '_PRcurve_AUC.png', bbox_inches='tight'\n )\n", (1930, 2004), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2015), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2013, 2015), True, 'import matplotlib.pyplot as plt\n'), ((1330, 1394), 'estimator_adaptative.EstimatorAdaptative', 'EstimatorAdaptative', ([], {'alpha': 'alpha', 'rho': 'rho[i]', 'metric': '"""precision"""'}), "(alpha=alpha, rho=rho[i], metric='precision')\n", (1349, 1394), False, 'from estimator_adaptative import EstimatorAdaptative\n'), ((1415, 1476), 'estimator_adaptative.EstimatorAdaptative', 'EstimatorAdaptative', ([], {'alpha': 'alpha', 'rho': 'rho[i]', 'metric': '"""recall"""'}), "(alpha=alpha, rho=rho[i], metric='recall')\n", (1434, 1476), False, 'from estimator_adaptative import EstimatorAdaptative\n'), ((1674, 1686), 'numpy.array', 'np.array', (['Re'], {}), '(Re)\n', (1682, 1686), True, 'import numpy as np\n'), ((1688, 1700), 'numpy.array', 'np.array', (['Pr'], {}), '(Pr)\n', (1696, 1700), True, 'import numpy as np\n'), ((1795, 1820), 'sklearn.metrics.auc', 'metrics.auc', (['Re', 'Pr', '(True)'], {}), '(Re, Pr, True)\n', (1806, 1820), False, 'from sklearn import metrics\n')]
|
import numpy as np
a = np.arange(15).reshape(3,5)
print(a)
print(a.shape)
|
[
"numpy.arange"
] |
[((23, 36), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (32, 36), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
############################################################
## <NAME> ##
## Copyright (C) 2019-2020 Lauro Sumoy Lab, IGTP, Spain ##
############################################################
"""
Get frequence of reads for each type, variant, etc
"""
## import useful modules
import os
import sys
import re
import time
from io import open
import pandas as pd
from collections import defaultdict
import numpy as np
import random
import argparse
## import my modules
from HCGB import functions
from HCGB.functions import fasta_functions
## get frequencies
def get_freq(given_df, col_list):
df_freq = pd.DataFrame()
for miRNA, row in given_df.iterrows():
for col in col_list:
if row[col]==0:
df_freq.loc[miRNA, col] = 0
else:
df_freq.loc[miRNA, col] = row[col]/row['total']
return (df_freq)
#####################################################
parser = argparse.ArgumentParser(prog='mod_freq.py', formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
mod_freq.py: Modified given frequencies and select isomiRs
Version: 0.1
License: GPLv3
USAGE: python mod_freq.py --freq table.freq.csv --out out_name [--debug]
''', epilog="Original code: JFSanchezHerrero")
#####################################################
parser.add_argument('-f', '--freq', action='store', help='Table with original variant frequencies to modify', required=True)
parser.add_argument('-o', '--out', action='store', help='Output names', required=True)
parser.add_argument('--debug', action='store_true', default=False, help='Developer messages')
parser.add_argument('--random_rows', action='store', type=int, help='Numbers of miRNA to subset', default=100)
args = parser.parse_args()
#####################################################
## original counts
print ("# Read original frequency table")
original_counts = functions.main_functions.get_data(args.freq, ',', 'index_col=0')
col_list = list(original_counts) ## get columns
## drop NAs
print ("# Remove any rows containing NAs from frequency table")
original_counts = original_counts.dropna()
## subset 100 rows
print ("# Randomly subsetting rows")
subset_df = original_counts.sample(n=args.random_rows)
## add missing data
print ("# Adding missing information")
modified_counts = subset_df.copy(deep=True)
for col in col_list:
modified_counts.loc[modified_counts.sample(frac=0.35).index, col] = pd.np.nan
## randomize
print ("# Shuffling information")
random_counts = modified_counts.apply(np.random.permutation, axis=1, result_type='broadcast')
random_counts[np.isnan(random_counts)] = 0
random_counts['total'] = random_counts.sum(axis=1)
## get frequencies
print ("# Get frequence")
random_freqs = get_freq(random_counts, col_list)
if (args.debug):
print ('##########')
print ('Random original Counts')
print (subset_df)
print ('##########')
print ('')
print ('##########')
print ('Random original Frequence')
subset_df['total'] = subset_df.sum(axis=1)
original_freq = get_freq(subset_df, col_list)
print (original_freq)
print ('##########')
## print randomize counts & frequencies
print ('##########')
print ('Random Counts')
print (random_counts)
print ('##########')
print ('')
print ('##########')
print ('Frequence')
print (random_freqs)
print ('##########')
## adjust to 100
print ("# Adjusting to 100 counts")
new_random = pd.DataFrame(columns=col_list)
for miRNA, row in random_freqs.iterrows():
for col in col_list:
if row[col]==0:
new_random.loc[miRNA, col] = 0
else:
new_random.loc[miRNA, col] = int(row[col]*100)
new_random['total'] = new_random.sum(axis=1)
for miRNA, row in new_random.iterrows():
if row['total']!=100:
sum = 100 - int(row['total'])
rnd = random.sample(col_list, 1)
row[rnd] += sum
new_random = new_random.drop(columns=['total'])
new_random['total'] = new_random.sum(axis=1)
print ('##########')
print ('Counts')
print (subset_df)
print ('##########')
print ('')
## print randomize counts & frequencies
print ('##########')
print ('Counts adjusted')
print (new_random)
print ('##########')
print ('')
print ("Printing frequencies in table: " + args.out)
#print (df_miRNA)
new_random.to_csv(args.out + ".csv", ',')
|
[
"random.sample",
"argparse.ArgumentParser",
"numpy.isnan",
"HCGB.functions.main_functions.get_data",
"pandas.DataFrame"
] |
[((997, 1336), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""mod_freq.py"""', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'description': '"""\n\nmod_freq.py: Modified given frequencies and select isomiRs\n\nVersion: 0.1\nLicense: GPLv3\n\nUSAGE: python mod_freq.py --freq table.freq.csv --out out_name [--debug] \n"""', 'epilog': '"""Original code: JFSanchezHerrero"""'}), '(prog=\'mod_freq.py\', formatter_class=argparse.\n RawDescriptionHelpFormatter, description=\n """\n\nmod_freq.py: Modified given frequencies and select isomiRs\n\nVersion: 0.1\nLicense: GPLv3\n\nUSAGE: python mod_freq.py --freq table.freq.csv --out out_name [--debug] \n"""\n , epilog=\'Original code: JFSanchezHerrero\')\n', (1020, 1336), False, 'import argparse\n'), ((1989, 2053), 'HCGB.functions.main_functions.get_data', 'functions.main_functions.get_data', (['args.freq', '""","""', '"""index_col=0"""'], {}), "(args.freq, ',', 'index_col=0')\n", (2022, 2053), False, 'from HCGB import functions\n'), ((3560, 3590), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'col_list'}), '(columns=col_list)\n', (3572, 3590), True, 'import pandas as pd\n'), ((670, 684), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (682, 684), True, 'import pandas as pd\n'), ((2698, 2721), 'numpy.isnan', 'np.isnan', (['random_counts'], {}), '(random_counts)\n', (2706, 2721), True, 'import numpy as np\n'), ((3966, 3992), 'random.sample', 'random.sample', (['col_list', '(1)'], {}), '(col_list, 1)\n', (3979, 3992), False, 'import random\n')]
|
import os
import argparse
import torch
import numpy
import random
from datetime import datetime
def format_time():
now = datetime.now() # current date and time
date_time = now.strftime("%m-%d-%H:%M:%S")
return date_time
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('true'):
return True
elif v.lower() in ('false'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def set_seed(seed):
# torch.backends.cudnn.deterministic = True ## this one is controversial
torch.manual_seed(seed)
numpy.random.seed(seed)
random.seed(seed)
torch.cuda.manual_seed(seed)
|
[
"torch.manual_seed",
"os.path.exists",
"os.makedirs",
"random.seed",
"argparse.ArgumentTypeError",
"datetime.datetime.now",
"numpy.random.seed",
"torch.cuda.manual_seed"
] |
[((127, 141), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (139, 141), False, 'from datetime import datetime\n'), ((668, 691), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (685, 691), False, 'import torch\n'), ((696, 719), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (713, 719), False, 'import numpy\n'), ((724, 741), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (735, 741), False, 'import random\n'), ((746, 774), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (768, 774), False, 'import torch\n'), ((270, 290), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (284, 290), False, 'import os\n'), ((300, 317), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (311, 317), False, 'import os\n'), ((510, 563), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (536, 563), False, 'import argparse\n')]
|
###############################################################################
# Copyright (c) 2019 Uber Technologies, Inc. #
# #
# Licensed under the Uber Non-Commercial License (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at the root directory of this project. #
# #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import math
import sys
from copy import deepcopy
import gpytorch
import numpy as np
import torch
from .gp import train_gp
from .turbo_1 import Turbo1
from .utils import from_unit_cube, latin_hypercube, to_unit_cube
class TurboM(Turbo1):
"""The TuRBO-m algorithm.
Parameters
----------
f : function handle
lb : Lower variable bounds, numpy.array, shape (d,).
ub : Upper variable bounds, numpy.array, shape (d,).
n_init : Number of initial points *FOR EACH TRUST REGION* (2*dim is recommended), int.
max_evals : Total evaluation budget, int.
n_trust_regions : Number of trust regions
batch_size : Number of points in each batch, int.
verbose : If you want to print information about the optimization progress, bool.
use_ard : If you want to use ARD for the GP kernel.
max_cholesky_size : Largest number of training points where we use Cholesky, int
n_training_steps : Number of training steps for learning the GP hypers, int
min_cuda : We use float64 on the CPU if we have this or fewer datapoints
device : Device to use for GP fitting ("cpu" or "cuda")
dtype : Dtype to use for GP fitting ("float32" or "float64")
Example usage:
turbo5 = TurboM(f=f, lb=lb, ub=ub, n_init=n_init, max_evals=max_evals, n_trust_regions=5)
turbo5.optimize() # Run optimization
X, fX = turbo5.X, turbo5.fX # Evaluated points
"""
def __init__(
self,
f,
lb,
ub,
n_init,
max_evals,
n_trust_regions,
batch_size=1,
verbose=True,
use_ard=True,
max_cholesky_size=2000,
n_training_steps=50,
min_cuda=1024,
device="cpu",
dtype="float64",
):
self.n_trust_regions = n_trust_regions
super().__init__(
f=f,
lb=lb,
ub=ub,
n_init=n_init,
max_evals=max_evals,
batch_size=batch_size,
verbose=verbose,
use_ard=use_ard,
max_cholesky_size=max_cholesky_size,
n_training_steps=n_training_steps,
min_cuda=min_cuda,
device=device,
dtype=dtype,
)
self.succtol = 3
self.failtol = max(5, self.dim)
# Very basic input checks
assert n_trust_regions > 1 and isinstance(max_evals, int)
assert max_evals > n_trust_regions * n_init, "Not enough trust regions to do initial evaluations"
assert max_evals > batch_size, "Not enough evaluations to do a single batch"
# Remember the hypers for trust regions we don't sample from
self.hypers = [{} for _ in range(self.n_trust_regions)]
# Initialize parameters
self._restart()
def _restart(self):
self._idx = np.zeros((0, 1), dtype=int) # Track what trust region proposed what using an index vector
self.failcount = np.zeros(self.n_trust_regions, dtype=int)
self.succcount = np.zeros(self.n_trust_regions, dtype=int)
self.length = self.length_init * np.ones(self.n_trust_regions)
def _adjust_length(self, fX_next, i):
assert i >= 0 and i <= self.n_trust_regions - 1
fX_min = self.fX[self._idx[:, 0] == i, 0].min() # Target value
if fX_next.min() < fX_min - 1e-3 * math.fabs(fX_min):
self.succcount[i] += 1
self.failcount[i] = 0
else:
self.succcount[i] = 0
self.failcount[i] += len(fX_next) # NOTE: Add size of the batch for this TR
if self.succcount[i] == self.succtol: # Expand trust region
self.length[i] = min([2.0 * self.length[i], self.length_max])
self.succcount[i] = 0
elif self.failcount[i] >= self.failtol: # Shrink trust region (we may have exceeded the failtol)
self.length[i] /= 2.0
self.failcount[i] = 0
def _select_candidates(self, X_cand, y_cand):
"""Select candidates from samples from all trust regions."""
assert X_cand.shape == (self.n_trust_regions, self.n_cand, self.dim)
assert y_cand.shape == (self.n_trust_regions, self.n_cand, self.batch_size)
assert X_cand.min() >= 0.0 and X_cand.max() <= 1.0 and np.all(np.isfinite(y_cand))
X_next = np.zeros((self.batch_size, self.dim))
idx_next = np.zeros((self.batch_size, 1), dtype=int)
for k in range(self.batch_size):
i, j = np.unravel_index(np.argmin(y_cand[:, :, k]), (self.n_trust_regions, self.n_cand))
assert y_cand[:, :, k].min() == y_cand[i, j, k]
X_next[k, :] = deepcopy(X_cand[i, j, :])
idx_next[k, 0] = i
assert np.isfinite(y_cand[i, j, k]) # Just to make sure we never select nan or inf
# Make sure we never pick this point again
y_cand[i, j, :] = np.inf
return X_next, idx_next
def optimize(self):
"""Run the full optimization process."""
# Create initial points for each TR
for i in range(self.n_trust_regions):
X_init = latin_hypercube(self.n_init, self.dim)
X_init = from_unit_cube(X_init, self.lb, self.ub)
fX_init = np.array([[self.f(x)] for x in X_init])
# Update budget and set as initial data for this TR
self.X = np.vstack((self.X, X_init))
self.fX = np.vstack((self.fX, fX_init))
self._idx = np.vstack((self._idx, i * np.ones((self.n_init, 1), dtype=int)))
self.n_evals += self.n_init
if self.verbose:
fbest = fX_init.min()
print(f"TR-{i} starting from: {fbest:.4}")
sys.stdout.flush()
# Thompson sample to get next suggestions
while self.n_evals < self.max_evals:
# Generate candidates from each TR
X_cand = np.zeros((self.n_trust_regions, self.n_cand, self.dim))
y_cand = np.inf * np.ones((self.n_trust_regions, self.n_cand, self.batch_size))
for i in range(self.n_trust_regions):
idx = np.where(self._idx == i)[0] # Extract all "active" indices
# Get the points, values the active values
X = deepcopy(self.X[idx, :])
X = to_unit_cube(X, self.lb, self.ub)
# Get the values from the standardized data
fX = deepcopy(self.fX[idx, 0].ravel())
# Don't retrain the model if the training data hasn't changed
n_training_steps = 0 if self.hypers[i] else self.n_training_steps
# Create new candidates
X_cand[i, :, :], y_cand[i, :, :], self.hypers[i] = self._create_candidates(
X, fX, length=self.length[i], n_training_steps=n_training_steps, hypers=self.hypers[i]
)
# Select the next candidates
X_next, idx_next = self._select_candidates(X_cand, y_cand)
assert X_next.min() >= 0.0 and X_next.max() <= 1.0
# Undo the warping
X_next = from_unit_cube(X_next, self.lb, self.ub)
# Evaluate batch
fX_next = np.array([[self.f(x)] for x in X_next])
# Update trust regions
for i in range(self.n_trust_regions):
idx_i = np.where(idx_next == i)[0]
if len(idx_i) > 0:
self.hypers[i] = {} # Remove model hypers
fX_i = fX_next[idx_i]
if self.verbose and fX_i.min() < self.fX.min() - 1e-3 * math.fabs(self.fX.min()):
n_evals, fbest = self.n_evals, fX_i.min()
print(f"{n_evals}) New best @ TR-{i}: {fbest:.4}")
sys.stdout.flush()
self._adjust_length(fX_i, i)
# Update budget and append data
self.n_evals += self.batch_size
self.X = np.vstack((self.X, deepcopy(X_next)))
self.fX = np.vstack((self.fX, deepcopy(fX_next)))
self._idx = np.vstack((self._idx, deepcopy(idx_next)))
# Check if any TR needs to be restarted
for i in range(self.n_trust_regions):
if self.length[i] < self.length_min: # Restart trust region if converged
idx_i = self._idx[:, 0] == i
if self.verbose:
n_evals, fbest = self.n_evals, self.fX[idx_i, 0].min()
print(f"{n_evals}) TR-{i} converged to: : {fbest:.4}")
sys.stdout.flush()
# Reset length and counters, remove old data from trust region
self.length[i] = self.length_init
self.succcount[i] = 0
self.failcount[i] = 0
self._idx[idx_i, 0] = -1 # Remove points from trust region
self.hypers[i] = {} # Remove model hypers
# Create a new initial design
X_init = latin_hypercube(self.n_init, self.dim)
X_init = from_unit_cube(X_init, self.lb, self.ub)
fX_init = np.array([[self.f(x)] for x in X_init])
# Print progress
if self.verbose:
n_evals, fbest = self.n_evals, fX_init.min()
print(f"{n_evals}) TR-{i} is restarting from: : {fbest:.4}")
sys.stdout.flush()
# Append data to local history
self.X = np.vstack((self.X, X_init))
self.fX = np.vstack((self.fX, fX_init))
self._idx = np.vstack((self._idx, i * np.ones((self.n_init, 1), dtype=int)))
self.n_evals += self.n_init
|
[
"numpy.ones",
"numpy.where",
"numpy.zeros",
"numpy.isfinite",
"numpy.vstack",
"math.fabs",
"copy.deepcopy",
"numpy.argmin",
"sys.stdout.flush"
] |
[((3611, 3638), 'numpy.zeros', 'np.zeros', (['(0, 1)'], {'dtype': 'int'}), '((0, 1), dtype=int)\n', (3619, 3638), True, 'import numpy as np\n'), ((3727, 3768), 'numpy.zeros', 'np.zeros', (['self.n_trust_regions'], {'dtype': 'int'}), '(self.n_trust_regions, dtype=int)\n', (3735, 3768), True, 'import numpy as np\n'), ((3794, 3835), 'numpy.zeros', 'np.zeros', (['self.n_trust_regions'], {'dtype': 'int'}), '(self.n_trust_regions, dtype=int)\n', (3802, 3835), True, 'import numpy as np\n'), ((5089, 5126), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.dim)'], {}), '((self.batch_size, self.dim))\n', (5097, 5126), True, 'import numpy as np\n'), ((5146, 5187), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 1)'], {'dtype': 'int'}), '((self.batch_size, 1), dtype=int)\n', (5154, 5187), True, 'import numpy as np\n'), ((3877, 3906), 'numpy.ones', 'np.ones', (['self.n_trust_regions'], {}), '(self.n_trust_regions)\n', (3884, 3906), True, 'import numpy as np\n'), ((5417, 5442), 'copy.deepcopy', 'deepcopy', (['X_cand[i, j, :]'], {}), '(X_cand[i, j, :])\n', (5425, 5442), False, 'from copy import deepcopy\n'), ((5493, 5521), 'numpy.isfinite', 'np.isfinite', (['y_cand[i, j, k]'], {}), '(y_cand[i, j, k])\n', (5504, 5521), True, 'import numpy as np\n'), ((6130, 6157), 'numpy.vstack', 'np.vstack', (['(self.X, X_init)'], {}), '((self.X, X_init))\n', (6139, 6157), True, 'import numpy as np\n'), ((6180, 6209), 'numpy.vstack', 'np.vstack', (['(self.fX, fX_init)'], {}), '((self.fX, fX_init))\n', (6189, 6209), True, 'import numpy as np\n'), ((6666, 6721), 'numpy.zeros', 'np.zeros', (['(self.n_trust_regions, self.n_cand, self.dim)'], {}), '((self.n_trust_regions, self.n_cand, self.dim))\n', (6674, 6721), True, 'import numpy as np\n'), ((5050, 5069), 'numpy.isfinite', 'np.isfinite', (['y_cand'], {}), '(y_cand)\n', (5061, 5069), True, 'import numpy as np\n'), ((5265, 5291), 'numpy.argmin', 'np.argmin', (['y_cand[:, :, k]'], {}), '(y_cand[:, :, k])\n', (5274, 5291), True, 'import numpy as np\n'), ((6482, 6500), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6498, 6500), False, 'import sys\n'), ((6752, 6813), 'numpy.ones', 'np.ones', (['(self.n_trust_regions, self.n_cand, self.batch_size)'], {}), '((self.n_trust_regions, self.n_cand, self.batch_size))\n', (6759, 6813), True, 'import numpy as np\n'), ((7026, 7050), 'copy.deepcopy', 'deepcopy', (['self.X[idx, :]'], {}), '(self.X[idx, :])\n', (7034, 7050), False, 'from copy import deepcopy\n'), ((4122, 4139), 'math.fabs', 'math.fabs', (['fX_min'], {}), '(fX_min)\n', (4131, 4139), False, 'import math\n'), ((6886, 6910), 'numpy.where', 'np.where', (['(self._idx == i)'], {}), '(self._idx == i)\n', (6894, 6910), True, 'import numpy as np\n'), ((8112, 8135), 'numpy.where', 'np.where', (['(idx_next == i)'], {}), '(idx_next == i)\n', (8120, 8135), True, 'import numpy as np\n'), ((8744, 8760), 'copy.deepcopy', 'deepcopy', (['X_next'], {}), '(X_next)\n', (8752, 8760), False, 'from copy import deepcopy\n'), ((8805, 8822), 'copy.deepcopy', 'deepcopy', (['fX_next'], {}), '(fX_next)\n', (8813, 8822), False, 'from copy import deepcopy\n'), ((8871, 8889), 'copy.deepcopy', 'deepcopy', (['idx_next'], {}), '(idx_next)\n', (8879, 8889), False, 'from copy import deepcopy\n'), ((10350, 10377), 'numpy.vstack', 'np.vstack', (['(self.X, X_init)'], {}), '((self.X, X_init))\n', (10359, 10377), True, 'import numpy as np\n'), ((10408, 10437), 'numpy.vstack', 'np.vstack', (['(self.fX, fX_init)'], {}), '((self.fX, fX_init))\n', (10417, 10437), True, 'import numpy as np\n'), ((6260, 6296), 'numpy.ones', 'np.ones', (['(self.n_init, 1)'], {'dtype': 'int'}), '((self.n_init, 1), dtype=int)\n', (6267, 6296), True, 'import numpy as np\n'), ((8547, 8565), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8563, 8565), False, 'import sys\n'), ((9354, 9372), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9370, 9372), False, 'import sys\n'), ((10250, 10268), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10266, 10268), False, 'import sys\n'), ((10496, 10532), 'numpy.ones', 'np.ones', (['(self.n_init, 1)'], {'dtype': 'int'}), '((self.n_init, 1), dtype=int)\n', (10503, 10532), True, 'import numpy as np\n')]
|
#
# file: gd_1d.py
#
# 1D example of GD
#
# RTK, 14-Feb-2021
# Last update: 14-Feb-2021
#
################################################################
import sys
import os
import numpy as np
import matplotlib.pylab as plt
# The function and its derivative
def f(x):
return 6*x**2 - 12*x + 3
def d(x):
return 12*x - 12
# Show the function, derivative, and minimum
x = np.linspace(-1,3,1000)
y = f(x)
plt.plot(x,y,color='#1f77b4')
x = np.linspace(0,3,10)
z = d(x)
plt.plot(x,z,color='#ff7f0e')
plt.plot([-1,3],[0,0],linestyle=(0,(1,1)),color='k')
plt.plot([1,1],[-10,25],linestyle=(0,(1,1)),color='k')
plt.plot([1,1],[f(1),f(1)],marker='o',color='#1f77b4')
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.savefig("gd_1d_plot.png", dpi=300)
#plt.show()
plt.close()
# Show a series of gradient descent steps
x = np.linspace(-1,3,1000)
plt.plot(x,f(x))
x = -0.9
eta = 0.03
for i in range(15):
plt.plot(x, f(x), marker='o', color='r')
x = x - eta * d(x)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.savefig("gd_1d_steps.png", dpi=300)
#plt.show()
plt.close()
print("Minimum at (%0.6f, %0.6f)" % (x, f(x)))
# Show oscillation if step size too large
x = np.linspace(0.75,1.25,1000)
plt.plot(x,f(x))
x = xold = 0.75
for i in range(14):
plt.plot([xold,x], [f(xold),f(x)], marker='o', linestyle='dotted', color='r')
xold = x
x = x - 0.15 * d(x)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.savefig("gd_1d_oscillating.png", dpi=300)
#plt.show()
|
[
"matplotlib.pylab.savefig",
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.xlabel",
"numpy.linspace",
"matplotlib.pylab.plot",
"matplotlib.pylab.close",
"matplotlib.pylab.ylabel"
] |
[((392, 416), 'numpy.linspace', 'np.linspace', (['(-1)', '(3)', '(1000)'], {}), '(-1, 3, 1000)\n', (403, 416), True, 'import numpy as np\n'), ((424, 455), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {'color': '"""#1f77b4"""'}), "(x, y, color='#1f77b4')\n", (432, 455), True, 'import matplotlib.pylab as plt\n'), ((458, 479), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(10)'], {}), '(0, 3, 10)\n', (469, 479), True, 'import numpy as np\n'), ((487, 518), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'z'], {'color': '"""#ff7f0e"""'}), "(x, z, color='#ff7f0e')\n", (495, 518), True, 'import matplotlib.pylab as plt\n'), ((517, 576), 'matplotlib.pylab.plot', 'plt.plot', (['[-1, 3]', '[0, 0]'], {'linestyle': '(0, (1, 1))', 'color': '"""k"""'}), "([-1, 3], [0, 0], linestyle=(0, (1, 1)), color='k')\n", (525, 576), True, 'import matplotlib.pylab as plt\n'), ((570, 631), 'matplotlib.pylab.plot', 'plt.plot', (['[1, 1]', '[-10, 25]'], {'linestyle': '(0, (1, 1))', 'color': '"""k"""'}), "([1, 1], [-10, 25], linestyle=(0, (1, 1)), color='k')\n", (578, 631), True, 'import matplotlib.pylab as plt\n'), ((680, 697), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (690, 697), True, 'import matplotlib.pylab as plt\n'), ((698, 715), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (708, 715), True, 'import matplotlib.pylab as plt\n'), ((716, 757), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)', 'w_pad': '(0)', 'h_pad': '(0)'}), '(pad=0, w_pad=0, h_pad=0)\n', (732, 757), True, 'import matplotlib.pylab as plt\n'), ((758, 796), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""gd_1d_plot.png"""'], {'dpi': '(300)'}), "('gd_1d_plot.png', dpi=300)\n", (769, 796), True, 'import matplotlib.pylab as plt\n'), ((809, 820), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (818, 820), True, 'import matplotlib.pylab as plt\n'), ((869, 893), 'numpy.linspace', 'np.linspace', (['(-1)', '(3)', '(1000)'], {}), '(-1, 3, 1000)\n', (880, 893), True, 'import numpy as np\n'), ((1019, 1036), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (1029, 1036), True, 'import matplotlib.pylab as plt\n'), ((1037, 1054), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (1047, 1054), True, 'import matplotlib.pylab as plt\n'), ((1055, 1096), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)', 'w_pad': '(0)', 'h_pad': '(0)'}), '(pad=0, w_pad=0, h_pad=0)\n', (1071, 1096), True, 'import matplotlib.pylab as plt\n'), ((1097, 1136), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""gd_1d_steps.png"""'], {'dpi': '(300)'}), "('gd_1d_steps.png', dpi=300)\n", (1108, 1136), True, 'import matplotlib.pylab as plt\n'), ((1149, 1160), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (1158, 1160), True, 'import matplotlib.pylab as plt\n'), ((1256, 1285), 'numpy.linspace', 'np.linspace', (['(0.75)', '(1.25)', '(1000)'], {}), '(0.75, 1.25, 1000)\n', (1267, 1285), True, 'import numpy as np\n'), ((1457, 1474), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (1467, 1474), True, 'import matplotlib.pylab as plt\n'), ((1475, 1492), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (1485, 1492), True, 'import matplotlib.pylab as plt\n'), ((1493, 1534), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)', 'w_pad': '(0)', 'h_pad': '(0)'}), '(pad=0, w_pad=0, h_pad=0)\n', (1509, 1534), True, 'import matplotlib.pylab as plt\n'), ((1535, 1580), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""gd_1d_oscillating.png"""'], {'dpi': '(300)'}), "('gd_1d_oscillating.png', dpi=300)\n", (1546, 1580), True, 'import matplotlib.pylab as plt\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>, <NAME>
# *****************************************************************************/
from __future__ import absolute_import, division, print_function, unicode_literals # , nested_scopes, generators, generator_stop, with_statement, annotations
import tensorflow as tf
from tensorflow import keras
import numpy as np
import os, json, random
import matplotlib.pyplot as plt
def main():
#fashion_mnist = keras.datasets.fashion_mnist
#(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
if os.path.exists("QDist_2.txt") and os.path.exists("labels_1.txt"):
with open("QDist_2.txt") as jF:
shapelets = json.load(jF)
with open("labels_1.txt") as jF:
labels = json.load(jF)
dists = []
for key in shapelets:
dists.append(shapelets[key])
shuffArr = [i for i in zip(dists, labels)]
random.shuffle(shuffArr)
#print(shuffArr)
dists = np.array([i[0] for i in shuffArr])
labels = np.array([i[1] for i in shuffArr])
print(labels)
test = np.array(dists[0:1])
train = dists[1:]
test_labels = np.array(labels[0:1])
train_labels = labels[1:]
print(train.shape)
#print(train_images)
#class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
# 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
#train_images = train_images / 255.0
#
#test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(11, 11)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(64, activation=tf.nn.relu),
keras.layers.Dense(3, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
checkpoint_path = "training_2/cp_1.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create checkpoint callback
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
model.fit(train, train_labels, epochs=50, callbacks = [cp_callback])
test_loss, test_acc = model.evaluate(dists, labels)
#test_loss, test_acc = model.evaluate(test, test_labels)
print(test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test)
model.save('my_model_new_data.h5')
if __name__ == '__main__':
main()
|
[
"os.path.exists",
"random.shuffle",
"os.path.dirname",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ModelCheckpoint",
"json.load",
"tensorflow.keras.layers.Flatten"
] |
[((703, 732), 'os.path.exists', 'os.path.exists', (['"""QDist_2.txt"""'], {}), "('QDist_2.txt')\n", (717, 732), False, 'import os, json, random\n'), ((737, 767), 'os.path.exists', 'os.path.exists', (['"""labels_1.txt"""'], {}), "('labels_1.txt')\n", (751, 767), False, 'import os, json, random\n'), ((1089, 1113), 'random.shuffle', 'random.shuffle', (['shuffArr'], {}), '(shuffArr)\n', (1103, 1113), False, 'import os, json, random\n'), ((1161, 1195), 'numpy.array', 'np.array', (['[i[0] for i in shuffArr]'], {}), '([i[0] for i in shuffArr])\n', (1169, 1195), True, 'import numpy as np\n'), ((1214, 1248), 'numpy.array', 'np.array', (['[i[1] for i in shuffArr]'], {}), '([i[1] for i in shuffArr])\n', (1222, 1248), True, 'import numpy as np\n'), ((1290, 1310), 'numpy.array', 'np.array', (['dists[0:1]'], {}), '(dists[0:1])\n', (1298, 1310), True, 'import numpy as np\n'), ((1361, 1382), 'numpy.array', 'np.array', (['labels[0:1]'], {}), '(labels[0:1])\n', (1369, 1382), True, 'import numpy as np\n'), ((2261, 2293), 'os.path.dirname', 'os.path.dirname', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2276, 2293), False, 'import os, json, random\n'), ((2357, 2447), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['checkpoint_path'], {'save_weights_only': '(True)', 'verbose': '(1)'}), '(checkpoint_path, save_weights_only=True,\n verbose=1)\n', (2391, 2447), True, 'import tensorflow as tf\n'), ((835, 848), 'json.load', 'json.load', (['jF'], {}), '(jF)\n', (844, 848), False, 'import os, json, random\n'), ((915, 928), 'json.load', 'json.load', (['jF'], {}), '(jF)\n', (924, 928), False, 'import os, json, random\n'), ((1794, 1836), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': '(11, 11)'}), '(input_shape=(11, 11))\n', (1814, 1836), False, 'from tensorflow import keras\n'), ((1851, 1897), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.relu'}), '(128, activation=tf.nn.relu)\n', (1869, 1897), False, 'from tensorflow import keras\n'), ((1912, 1957), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(64)'], {'activation': 'tf.nn.relu'}), '(64, activation=tf.nn.relu)\n', (1930, 1957), False, 'from tensorflow import keras\n'), ((1972, 2019), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(3)'], {'activation': 'tf.nn.softmax'}), '(3, activation=tf.nn.softmax)\n', (1990, 2019), False, 'from tensorflow import keras\n')]
|
import sep
import numpy as np
import scarlet
from scarlet.wavelet import mad_wavelet, Starlet
from .utils import extract_obj, image_gaia_stars
from astropy.table import Table, Column
from astropy import units as u
from astropy.units import Quantity
from astropy.coordinates import SkyCoord
from kuaizi.mock import Data
def interpolate(data_lr, data_hr):
''' Interpolate low resolution data to high resolution
Parameters
----------
data_lr: Data
low resolution Data
data_hr: Data
high resolution Data
Result
------
interp: numpy array
the images in data_lr interpolated to the grid of data_hr
'''
frame_lr = scarlet.Frame(data_lr.images.shape,
wcs=data_lr.wcs, channels=data_lr.channels)
frame_hr = scarlet.Frame(data_hr.images.shape,
wcs=data_hr.wcs, channels=data_hr.channels)
coord_lr0 = (np.arange(data_lr.images.shape[1]), np.arange(
data_lr.images.shape[1]))
coord_hr = (np.arange(data_hr.images.shape[1]), np.arange(
data_hr.images.shape[1]))
coord_lr = scarlet.resampling.convert_coordinates(
coord_lr0, frame_lr, frame_hr)
interp = []
for image in data_lr.images:
interp.append(scarlet.interpolation.sinc_interp(
image[None, :, :], coord_hr, coord_lr, angle=None)[0].T)
return np.array(interp)
# Vanilla detection: SEP
def vanilla_detection(detect_image, mask=None, sigma=3, b=64, f=3, minarea=5, deblend_nthresh=30,
deblend_cont=0.001, sky_subtract=True, show_fig=True, **kwargs):
'''
Source detection using Source Extractor (actually SEP).
Parameters
----------
detect_image: 2-D numpy array
image
mask: numpy 2-D array
image mask
sigma: float
detection threshold
b: float
box size
f: float
kernel size
minarea: float
minimum area for a source
sky_subtract: bool
whether subtract the estimated sky from the input image, then detect sources
show_fig: bool
whether plot a figure showing objects and segmentation map
**kwargs: see `utils.extract_obj`.
Result
------
obj_cat: `astropy.table.Table` object
catalog of detected sources
segmap: numpy array
segmentation map
fig: `matplotlib.pyplot.figure` object
'''
result = extract_obj(
detect_image,
mask=mask,
b=b,
f=f,
sigma=sigma,
minarea=minarea,
deblend_nthresh=deblend_nthresh,
deblend_cont=deblend_cont,
sky_subtract=sky_subtract,
show_fig=show_fig,
**kwargs)
obj_cat = result[0]
arg_ind = obj_cat.argsort('flux', reverse=True)
obj_cat.sort('flux', reverse=True)
obj_cat['index'] = np.arange(len(obj_cat))
segmap = result[1]
segmap = np.append(-1, np.argsort(arg_ind))[segmap] + 1
if show_fig is True:
fig = result[2]
return obj_cat, segmap, fig
else:
return obj_cat, segmap
def wavelet_detection(detect_image, mask=None, wavelet_lvl=4, low_freq_lvl=0, high_freq_lvl=1,
sigma=3, b=64, f=3, minarea=5, deblend_nthresh=30,
deblend_cont=0.001, sky_subtract=True, show_fig=True, **kwargs):
'''
Perform wavelet transform before detecting sources. This enable us to emphasize features with high frequency or low frequency.
Parameters
----------
detect_image: 2-D numpy array
image
mask: numpy 2-D array
image mask
wavelet_lvl: int
the number of wavelet decompositions
high_freq_lvl: int
this parameter controls how much low-frequency features are wiped away. It should be smaller than `wavelet_lvl - 1`.
`high_freq_lvl=0` means no low-freq features are wiped (equivalent to vanilla), higher number yields a image with less low-freq features.
sigma: float
detection threshold
b: float
box size
f: float
kernel size
minarea: float
minimum area for a source
sky_subtract: bool
whether subtract the estimated sky from the input image, then detect sources
show_fig: bool
whether plot a figure showing objects and segmentation map
**kwargs: see `utils.extract_obj`.
Result
------
obj_cat: `astropy.table.Table` object
catalog of detected sources
segmap: numpy array
segmentation map
fig: `matplotlib.pyplot.figure` object
'''
Sw = Starlet(detect_image, lvl=wavelet_lvl) # wavelet decomposition
w = Sw.coefficients
iw = Sw.image
if high_freq_lvl != 0:
w[:, (high_freq_lvl):, :, :] = 0 # remove low frequency features
# w: from high to low
if low_freq_lvl != 0:
w[:, :(low_freq_lvl), :, :] = 0 # remove high frequency features
# image with high-frequency features highlighted
high_freq_image = Starlet(coefficients=w).image[0]
result = vanilla_detection(
high_freq_image,
mask=mask,
sigma=sigma,
b=b,
f=f,
minarea=minarea,
deblend_nthresh=deblend_nthresh,
deblend_cont=deblend_cont,
sky_subtract=sky_subtract,
show_fig=show_fig,
**kwargs)
if show_fig is True:
obj_cat, segmap, fig = result
return obj_cat, segmap, fig
else:
obj_cat, segmap = result
return obj_cat, segmap
def makeCatalog(datas, mask=None, lvl=3, method='wavelet', convolve=False, conv_radius=5,
match_gaia=True, show_fig=True, visual_gaia=True, **kwargs):
''' Creates a detection catalog by combining low and high resolution data.
This function is used for detection before running scarlet.
It is particularly useful for stellar crowded fields and for detecting high frequency features.
Parameters
----------
datas: array
array of Data objects
mask: numpy 2-D array
image mask
lvl: int
detection lvl, i.e., sigma in SEP
method: str
Options:
"wavelet" uses wavelet decomposition of images before combination, emphasizes high-frequency features
"vanilla" directly detect objects using SEP
match_gaia: bool
whether matching the detection catalog with Gaia dataset
show_fig: bool
whether show the detection catalog as a figure
visual_gaia: bool
whether mark Gaia stars in the figure
kwargs:
See the arguments of 'utils.extract_obj'.
Returns
-------
obj_cat: `astropy.table.Table` object
catalog of detected sources
segmap: numpy array
segmentation map
bg_rms: array
background level for each dataset
'''
if len(datas) == 1:
hr_images = datas[0].images / \
np.sum(datas[0].images, axis=(1, 2))[:, None, None]
# Detection image as the sum over all images
detect_image = np.sum(hr_images, axis=0)
else:
data_lr, data_hr = datas
# Create observations for each image
# Interpolate low resolution to high resolution
interp = interpolate(data_lr, data_hr)
# Normalisation of the interpolate low res images
interp = interp / np.sum(interp, axis=(1, 2))[:, None, None]
# Normalisation of the high res data
hr_images = data_hr.images / \
np.sum(data_hr.images, axis=(1, 2))[:, None, None]
# Detection image as the sum over all images
detect_image = np.sum(interp, axis=0) + np.sum(hr_images, axis=0)
detect_image *= np.sum(data_hr.images)
if np.size(detect_image.shape) == 3:
detect = detect_image.mean(axis=0)
else:
detect = detect_image
if convolve:
from astropy.convolution import convolve, Box2DKernel, Gaussian2DKernel
detect = convolve(detect.astype(float), Gaussian2DKernel(conv_radius))
if method == 'wavelet':
result = wavelet_detection(
detect, mask=mask, sigma=lvl, show_fig=show_fig, **kwargs)
else:
result = vanilla_detection(
detect, mask=mask, sigma=lvl, show_fig=show_fig, **kwargs)
obj_cat = result[0]
segmap = result[1]
## RA and Dec
if len(datas) == 1:
ra, dec = datas[0].wcs.wcs_pix2world(obj_cat['x'], obj_cat['y'], 0)
obj_cat.add_columns([Column(data=ra, name='ra'),
Column(data=dec, name='dec')])
else:
ra_lr, dec_lr = data_lr.wcs.wcs_pix2world(
obj_cat['x'], obj_cat['y'], 0)
ra_hr, dec_hr = data_hr.wcs.wcs_pix2world(
obj_cat['x'], obj_cat['y'], 0)
obj_cat.add_columns(
[Column(data=ra_lr, name='ra_lr'), Column(data=dec_lr, name='dec_lr')])
obj_cat.add_columns(
[Column(data=ra_hr, name='ra_hr'), Column(data=dec_lr, name='dec_hr')])
# Reorder columns
colnames = obj_cat.colnames
for item in ['dec', 'ra', 'y', 'x', 'index']:
if item in colnames:
colnames.remove(item)
colnames.insert(0, item)
obj_cat = obj_cat[colnames]
obj_cat.add_column(
Column(data=[None] * len(obj_cat), name='obj_type'), index=0)
if len(datas) == 1:
bg_rms = mad_wavelet(datas[0].images)
else:
bg_rms = []
for data in datas:
bg_rms.append(mad_wavelet(data.images))
if match_gaia:
obj_cat.add_column(
Column(data=[None] * len(obj_cat), name='gaia_coord'))
if len(datas) == 1:
w = datas[0].wcs
pixel_scale = w.to_header()['PC2_2'] * 3600
else:
w = data_hr.wcs
pixel_scale = w.to_header()['PC2_2'] * 3600
# Retrieve GAIA catalog
gaia_stars = image_gaia_stars(
detect, w, pixel_scale=pixel_scale,
verbose=True, visual=visual_gaia)
# Cross-match with SExtractor catalog
from astropy.coordinates import SkyCoord, match_coordinates_sky
temp, dist, _ = match_coordinates_sky(SkyCoord(ra=gaia_stars['ra'], dec=gaia_stars['dec'], unit='deg'),
SkyCoord(ra=obj_cat['ra'], dec=obj_cat['dec'], unit='deg'), nthneighbor=1)
flag = dist < 5 * u.arcsec
star_mag = gaia_stars['phot_g_mean_mag'].data
psf_ind = temp[flag]
star_mag = star_mag[flag]
bright_star_flag = star_mag < 19.0
obj_cat['obj_type'][psf_ind[bright_star_flag]
] = scarlet.source.ExtendedSource
obj_cat['obj_type'][psf_ind[~bright_star_flag]
] = scarlet.source.PointSource
# we also use the coordinates from Gaia for bright stars
obj_cat['gaia_coord'][psf_ind] = np.array(
gaia_stars[['ra', 'dec']])[flag]
# Cross-match for a second time: to deal with splitted bright stars
temp_cat = obj_cat.copy(copy_data=True)
temp_cat.remove_rows(psf_ind)
temp2, dist2, _ = match_coordinates_sky(SkyCoord(ra=gaia_stars['ra'], dec=gaia_stars['dec'], unit='deg'),
SkyCoord(ra=temp_cat['ra'], dec=temp_cat['dec'], unit='deg'), nthneighbor=1)
flag2 = dist2 < 1 * u.arcsec
psf_ind2 = temp_cat[temp2[flag2]]['index'].data
# we also use the coordinates from Gaia for bright stars
obj_cat.remove_rows(psf_ind2)
#obj_cat['gaia_coord'][psf_ind2] = np.array(gaia_stars[['ra', 'dec']])[flag2]
#obj_cat['obj_type'][psf_ind2] = scarlet.source.PointSource
print(f'# Matched {len(psf_ind)} stars from GAIA')
obj_cat['index'] = np.arange(len(obj_cat))
# Visualize the results
if show_fig and match_gaia:
from matplotlib.patches import Ellipse as mpl_ellip
from .display import ORG, GRN
fig = result[2]
ax1 = fig.get_axes()[0]
xlim = ax1.get_xlim()
ylim = ax1.get_ylim()
# Plot an ellipse for each object
for star in gaia_stars[flag]:
smask = mpl_ellip(
xy=(star['x_pix'], star['y_pix']),
width=(2.0 * star['rmask_arcsec'] / pixel_scale),
height=(2.0 * star['rmask_arcsec'] / pixel_scale),
angle=0.0)
smask.set_facecolor(ORG(0.2))
smask.set_edgecolor(ORG(1.0))
smask.set_alpha(0.3)
ax1.add_artist(smask)
# Show stars
ax1.scatter(
gaia_stars['x_pix'],
gaia_stars['y_pix'],
color=GRN(1.0),
s=100,
alpha=0.9,
marker='+')
ax1.set_xlim(xlim)
ax1.set_ylim(ylim)
return obj_cat, segmap, bg_rms
|
[
"scarlet.Frame",
"scarlet.interpolation.sinc_interp",
"numpy.size",
"astropy.coordinates.SkyCoord",
"numpy.argsort",
"scarlet.resampling.convert_coordinates",
"numpy.array",
"numpy.sum",
"scarlet.wavelet.mad_wavelet",
"astropy.table.Column",
"astropy.convolution.Gaussian2DKernel",
"scarlet.wavelet.Starlet",
"matplotlib.patches.Ellipse",
"numpy.arange"
] |
[((677, 756), 'scarlet.Frame', 'scarlet.Frame', (['data_lr.images.shape'], {'wcs': 'data_lr.wcs', 'channels': 'data_lr.channels'}), '(data_lr.images.shape, wcs=data_lr.wcs, channels=data_lr.channels)\n', (690, 756), False, 'import scarlet\n'), ((801, 880), 'scarlet.Frame', 'scarlet.Frame', (['data_hr.images.shape'], {'wcs': 'data_hr.wcs', 'channels': 'data_hr.channels'}), '(data_hr.images.shape, wcs=data_hr.wcs, channels=data_hr.channels)\n', (814, 880), False, 'import scarlet\n'), ((1121, 1190), 'scarlet.resampling.convert_coordinates', 'scarlet.resampling.convert_coordinates', (['coord_lr0', 'frame_lr', 'frame_hr'], {}), '(coord_lr0, frame_lr, frame_hr)\n', (1159, 1190), False, 'import scarlet\n'), ((1387, 1403), 'numpy.array', 'np.array', (['interp'], {}), '(interp)\n', (1395, 1403), True, 'import numpy as np\n'), ((4560, 4598), 'scarlet.wavelet.Starlet', 'Starlet', (['detect_image'], {'lvl': 'wavelet_lvl'}), '(detect_image, lvl=wavelet_lvl)\n', (4567, 4598), False, 'from scarlet.wavelet import mad_wavelet, Starlet\n'), ((928, 962), 'numpy.arange', 'np.arange', (['data_lr.images.shape[1]'], {}), '(data_lr.images.shape[1])\n', (937, 962), True, 'import numpy as np\n'), ((964, 998), 'numpy.arange', 'np.arange', (['data_lr.images.shape[1]'], {}), '(data_lr.images.shape[1])\n', (973, 998), True, 'import numpy as np\n'), ((1025, 1059), 'numpy.arange', 'np.arange', (['data_hr.images.shape[1]'], {}), '(data_hr.images.shape[1])\n', (1034, 1059), True, 'import numpy as np\n'), ((1061, 1095), 'numpy.arange', 'np.arange', (['data_hr.images.shape[1]'], {}), '(data_hr.images.shape[1])\n', (1070, 1095), True, 'import numpy as np\n'), ((6997, 7022), 'numpy.sum', 'np.sum', (['hr_images'], {'axis': '(0)'}), '(hr_images, axis=0)\n', (7003, 7022), True, 'import numpy as np\n'), ((7639, 7661), 'numpy.sum', 'np.sum', (['data_hr.images'], {}), '(data_hr.images)\n', (7645, 7661), True, 'import numpy as np\n'), ((7670, 7697), 'numpy.size', 'np.size', (['detect_image.shape'], {}), '(detect_image.shape)\n', (7677, 7697), True, 'import numpy as np\n'), ((9298, 9326), 'scarlet.wavelet.mad_wavelet', 'mad_wavelet', (['datas[0].images'], {}), '(datas[0].images)\n', (9309, 9326), False, 'from scarlet.wavelet import mad_wavelet, Starlet\n'), ((4975, 4998), 'scarlet.wavelet.Starlet', 'Starlet', ([], {'coefficients': 'w'}), '(coefficients=w)\n', (4982, 4998), False, 'from scarlet.wavelet import mad_wavelet, Starlet\n'), ((7564, 7586), 'numpy.sum', 'np.sum', (['interp'], {'axis': '(0)'}), '(interp, axis=0)\n', (7570, 7586), True, 'import numpy as np\n'), ((7589, 7614), 'numpy.sum', 'np.sum', (['hr_images'], {'axis': '(0)'}), '(hr_images, axis=0)\n', (7595, 7614), True, 'import numpy as np\n'), ((7933, 7962), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['conv_radius'], {}), '(conv_radius)\n', (7949, 7962), False, 'from astropy.convolution import convolve, Box2DKernel, Gaussian2DKernel\n'), ((10092, 10156), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "gaia_stars['ra']", 'dec': "gaia_stars['dec']", 'unit': '"""deg"""'}), "(ra=gaia_stars['ra'], dec=gaia_stars['dec'], unit='deg')\n", (10100, 10156), False, 'from astropy.coordinates import SkyCoord, match_coordinates_sky\n'), ((10204, 10262), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "obj_cat['ra']", 'dec': "obj_cat['dec']", 'unit': '"""deg"""'}), "(ra=obj_cat['ra'], dec=obj_cat['dec'], unit='deg')\n", (10212, 10262), False, 'from astropy.coordinates import SkyCoord, match_coordinates_sky\n'), ((10810, 10845), 'numpy.array', 'np.array', (["gaia_stars[['ra', 'dec']]"], {}), "(gaia_stars[['ra', 'dec']])\n", (10818, 10845), True, 'import numpy as np\n'), ((11076, 11140), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "gaia_stars['ra']", 'dec': "gaia_stars['dec']", 'unit': '"""deg"""'}), "(ra=gaia_stars['ra'], dec=gaia_stars['dec'], unit='deg')\n", (11084, 11140), False, 'from astropy.coordinates import SkyCoord, match_coordinates_sky\n'), ((11190, 11250), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "temp_cat['ra']", 'dec': "temp_cat['dec']", 'unit': '"""deg"""'}), "(ra=temp_cat['ra'], dec=temp_cat['dec'], unit='deg')\n", (11198, 11250), False, 'from astropy.coordinates import SkyCoord, match_coordinates_sky\n'), ((12100, 12261), 'matplotlib.patches.Ellipse', 'mpl_ellip', ([], {'xy': "(star['x_pix'], star['y_pix'])", 'width': "(2.0 * star['rmask_arcsec'] / pixel_scale)", 'height': "(2.0 * star['rmask_arcsec'] / pixel_scale)", 'angle': '(0.0)'}), "(xy=(star['x_pix'], star['y_pix']), width=2.0 * star[\n 'rmask_arcsec'] / pixel_scale, height=2.0 * star['rmask_arcsec'] /\n pixel_scale, angle=0.0)\n", (12109, 12261), True, 'from matplotlib.patches import Ellipse as mpl_ellip\n'), ((2913, 2932), 'numpy.argsort', 'np.argsort', (['arg_ind'], {}), '(arg_ind)\n', (2923, 2932), True, 'import numpy as np\n'), ((6869, 6905), 'numpy.sum', 'np.sum', (['datas[0].images'], {'axis': '(1, 2)'}), '(datas[0].images, axis=(1, 2))\n', (6875, 6905), True, 'import numpy as np\n'), ((7298, 7325), 'numpy.sum', 'np.sum', (['interp'], {'axis': '(1, 2)'}), '(interp, axis=(1, 2))\n', (7304, 7325), True, 'import numpy as np\n'), ((7437, 7472), 'numpy.sum', 'np.sum', (['data_hr.images'], {'axis': '(1, 2)'}), '(data_hr.images, axis=(1, 2))\n', (7443, 7472), True, 'import numpy as np\n'), ((8413, 8439), 'astropy.table.Column', 'Column', ([], {'data': 'ra', 'name': '"""ra"""'}), "(data=ra, name='ra')\n", (8419, 8439), False, 'from astropy.table import Table, Column\n'), ((8470, 8498), 'astropy.table.Column', 'Column', ([], {'data': 'dec', 'name': '"""dec"""'}), "(data=dec, name='dec')\n", (8476, 8498), False, 'from astropy.table import Table, Column\n'), ((8741, 8773), 'astropy.table.Column', 'Column', ([], {'data': 'ra_lr', 'name': '"""ra_lr"""'}), "(data=ra_lr, name='ra_lr')\n", (8747, 8773), False, 'from astropy.table import Table, Column\n'), ((8775, 8809), 'astropy.table.Column', 'Column', ([], {'data': 'dec_lr', 'name': '"""dec_lr"""'}), "(data=dec_lr, name='dec_lr')\n", (8781, 8809), False, 'from astropy.table import Table, Column\n'), ((8854, 8886), 'astropy.table.Column', 'Column', ([], {'data': 'ra_hr', 'name': '"""ra_hr"""'}), "(data=ra_hr, name='ra_hr')\n", (8860, 8886), False, 'from astropy.table import Table, Column\n'), ((8888, 8922), 'astropy.table.Column', 'Column', ([], {'data': 'dec_lr', 'name': '"""dec_hr"""'}), "(data=dec_lr, name='dec_hr')\n", (8894, 8922), False, 'from astropy.table import Table, Column\n'), ((9410, 9434), 'scarlet.wavelet.mad_wavelet', 'mad_wavelet', (['data.images'], {}), '(data.images)\n', (9421, 9434), False, 'from scarlet.wavelet import mad_wavelet, Starlet\n'), ((1272, 1360), 'scarlet.interpolation.sinc_interp', 'scarlet.interpolation.sinc_interp', (['image[None, :, :]', 'coord_hr', 'coord_lr'], {'angle': 'None'}), '(image[None, :, :], coord_hr, coord_lr,\n angle=None)\n', (1305, 1360), False, 'import scarlet\n')]
|
import numpy as np
import cv2
from keras.layers import Input
from keras.models import Model
from keras.models import load_model
decoder = load_model('roses_decoder.h5')
perceptron = load_model('decoder-perceptron.h5')
path = 'dataset/rose'
id=25 # sample code
param0 = np.loadtxt(path+'{:04d}.txt'.format(id))
id=26 # sample code
param1 = np.loadtxt(path+'{:04d}.txt'.format(id))
id=2 # sample code
param2 = np.loadtxt(path+'{:04d}.txt'.format(id))
id=235 # sample code
param3 = np.loadtxt(path+'{:04d}.txt'.format(id))
param = np.copy(param0)
last_value = -1
cv2.namedWindow("generator")
cv2.imshow('generator',np.zeros((112*5,112*5),np.uint8))
value = int(param[7])
def update_value( *args ):
global value
print(args[0])
value = float(args[0])
cv2.createTrackbar("value", "generator", value, 90, update_value)
id = 0
while True:
if last_value != value:
last_value = value
param[7] = value
coded = perceptron.predict(param.reshape(1,-1))
decoded = decoder.predict(coded)
decoded = np.asarray(decoded[0]*255,np.uint8)
decoded = cv2.resize(decoded,(112*5,112*5))
cv2.imshow('generator',decoded)
key = cv2.waitKey(10)
if key == 27:
break
elif key == ord('r') or key == ord('0'):
param = np.copy(param0)
update_value(int(param[7]))
elif key == ord('1'):
param = np.copy(param1)
update_value(int(param[7]))
elif key == ord('2'):
param = np.copy(param2)
update_value(int(param[7]))
elif key == ord('3'):
param = np.copy(param3)
update_value(int(param[7]))
elif key == ord('s'):
cv2.imwrite('generator/final'+str(id)+'.png',decoded)
np.savetxt('generator/final'+str(id)+'.txt',param)
id += 1
cv2.destroyAllWindows()
|
[
"numpy.copy",
"keras.models.load_model",
"numpy.asarray",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.createTrackbar",
"cv2.namedWindow"
] |
[((139, 169), 'keras.models.load_model', 'load_model', (['"""roses_decoder.h5"""'], {}), "('roses_decoder.h5')\n", (149, 169), False, 'from keras.models import load_model\n'), ((183, 218), 'keras.models.load_model', 'load_model', (['"""decoder-perceptron.h5"""'], {}), "('decoder-perceptron.h5')\n", (193, 218), False, 'from keras.models import load_model\n'), ((532, 547), 'numpy.copy', 'np.copy', (['param0'], {}), '(param0)\n', (539, 547), True, 'import numpy as np\n'), ((566, 594), 'cv2.namedWindow', 'cv2.namedWindow', (['"""generator"""'], {}), "('generator')\n", (581, 594), False, 'import cv2\n'), ((767, 832), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""value"""', '"""generator"""', 'value', '(90)', 'update_value'], {}), "('value', 'generator', value, 90, update_value)\n", (785, 832), False, 'import cv2\n'), ((1803, 1826), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1824, 1826), False, 'import cv2\n'), ((618, 656), 'numpy.zeros', 'np.zeros', (['(112 * 5, 112 * 5)', 'np.uint8'], {}), '((112 * 5, 112 * 5), np.uint8)\n', (626, 656), True, 'import numpy as np\n'), ((1196, 1211), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1207, 1211), False, 'import cv2\n'), ((1049, 1087), 'numpy.asarray', 'np.asarray', (['(decoded[0] * 255)', 'np.uint8'], {}), '(decoded[0] * 255, np.uint8)\n', (1059, 1087), True, 'import numpy as np\n'), ((1103, 1142), 'cv2.resize', 'cv2.resize', (['decoded', '(112 * 5, 112 * 5)'], {}), '(decoded, (112 * 5, 112 * 5))\n', (1113, 1142), False, 'import cv2\n'), ((1145, 1177), 'cv2.imshow', 'cv2.imshow', (['"""generator"""', 'decoded'], {}), "('generator', decoded)\n", (1155, 1177), False, 'import cv2\n'), ((1305, 1320), 'numpy.copy', 'np.copy', (['param0'], {}), '(param0)\n', (1312, 1320), True, 'import numpy as np\n'), ((1399, 1414), 'numpy.copy', 'np.copy', (['param1'], {}), '(param1)\n', (1406, 1414), True, 'import numpy as np\n'), ((1493, 1508), 'numpy.copy', 'np.copy', (['param2'], {}), '(param2)\n', (1500, 1508), True, 'import numpy as np\n'), ((1587, 1602), 'numpy.copy', 'np.copy', (['param3'], {}), '(param3)\n', (1594, 1602), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# distrib.py
import pandas as pd
import numpy as np
import scipy.integrate
import scipy.interpolate
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from .utils import grouper
from .plotting import plotVertBar
def integrate(xvec, yvec):
return abs(scipy.integrate.simps(yvec, x=xvec))
def normalizeDistrib(x, y, u=None):
x = x.values if isinstance(x, pd.Series) else x
y = y.values if isinstance(y, pd.Series) else y
# normalize the distribution to area of 1
norm = integrate(x, y)
#print("CONTINs norm", norm)
y /= norm
if u is not None:
u /= norm
return x, y, u
def area(xvec, yvec, showArea = True):
"""Returns a string with the area value of the given discrete curve points."""
return " $\int${:.3g}".format(integrate(xvec, yvec)) if showArea else ""
def findPeakRanges(x, y, tol=1e-16):
"""Returns the location of data/peak above a base line.
Assumes it touches the baseline before and after. For distributions.
*tol*: Multiplied by Y to produce a threshold to distinguish noise/artifacts from peaks."""
x = x.values if isinstance(x, pd.Series) else x
y = y.values if isinstance(y, pd.Series) else y
# look at all data above zero, get their array indices
indices = np.where(y > tol*y.max())[0]
# segmentation: look where continous groups of indices start and end
indexGroups = np.where(np.diff(indices) > 1)[0]
ranges = []
istart = indices[0]
def appendPeakRange(start, end):
#print("appending", start, end, end-start)
start, end = max(start-1, 0), min(end+1, len(x)-1)
monotony = np.sign(np.diff(y[start:end+1]))
if not all(monotony == monotony[0]):
# avoid monotonously increasing/decreasing peaks -> unwanted artefacts
ranges.append((start, end))
for idx in indexGroups:
appendPeakRange(istart, indices[idx]) # add the new range to the list
istart = indices[idx+1] # start new range
appendPeakRange(istart, indices[-1])
#print("findPeakRanges", ranges)
return ranges
def findLocalMinima(peakRanges, xarr, yarr, doPlot=False, verbose=False):
"""Identify local (non-zero) minima within given peak ranges and separate those
bimodal ranges into monomodal ranges, thus splitting up the peak range if it contains
maxima connected by non-zero minima. Returns a list of index tuples indicating the
start and end of each peak. Uses 4th order spline fitting and its derivative
for finding positions of local minima."""
#print("findLocalMinima", peakRanges)
newRanges = []
if doPlot:
plt.figure(figsize=(15,5))
for ip, (istart, iend) in enumerate(peakRanges):
if verbose: print((istart, iend), xarr[istart], xarr[iend])
if iend-istart < 5: # skip this, can't be fitted and no sub-peaks are likely
newRanges.append((istart, iend))
continue
while yarr[istart] <= 0. and istart < iend:
istart += 1 # exclude leading zero
while yarr[iend] <= 0. and istart < iend:
iend -= 1 # exclude trailing zero
if istart == iend:
continue
if verbose: print((istart, iend))
x, y = xarr[istart:iend+1], yarr[istart:iend+1]
try:
spline = scipy.interpolate.InterpolatedUnivariateSpline(x, y, k=4)
except:
print(f"Warning: Could not findLocalMinima() within {(istart, iend)}!")
newRanges.append((istart, iend))
continue
#if verbose: print(spline(x))
deriv = spline.derivative()
#if verbose: print(deriv(x))
roots = deriv.roots()
# get indices of roots and ignore any duplicate indices
rootIdx = set(np.argmin(np.abs(xarr[:,np.newaxis]-roots[np.newaxis,:]), axis=0))
rootIdx.add(istart); rootIdx.add(iend)
rootIdx = sorted(rootIdx)
#if rootIdx[0] == istart: # omit the first root at the beginning
# rootIdx = rootIdx[1:]
if verbose: print((istart, iend), len(roots), roots, rootIdx)
if doPlot:
plt.subplot(1,len(peakRanges), ip+1)
radGrid = np.linspace(x[0], x[-1], 200)
plt.plot(x, y, label="data")
plt.plot(radGrid, spline(radGrid), label="spline"),
plt.ylabel("data & spline approx.")
handles1, labels1 = plt.gca().get_legend_handles_labels()
[plotVertBar(plt, xarr[i], spline(radGrid).max(), color="blue", ls=":") for i in rootIdx]
plt.gca().twinx()
plt.plot(radGrid, deriv(radGrid), label="deriv. spline", color="green")
plt.ylabel("1st derivative")
handles2, labels2 = plt.gca().get_legend_handles_labels()
plt.grid(); plt.legend(handles1+handles2, labels1+labels2)
peakBoundaries = rootIdx[::2]
if verbose: print(peakBoundaries)
newRanges += [tuple(peakBoundaries[i:i+2]) for i in range(len(peakBoundaries)-1)]
if verbose: print(newRanges)
return newRanges
def getLargestPeaks(peakRanges, xarr, yarr, count=1):
def peakRangeArea(peakRange):
return integrate(xarr[peakRange[0]:peakRange[1]+1], yarr[peakRange[0]:peakRange[1]+1])
return sorted(peakRanges, key=peakRangeArea, reverse=True)[:count]
class Moments(dict):
@staticmethod
def nthMoment(x, weights, n):
"""Calculates the nth moment of the given distribution weights."""
center = 0
if n > 0: # calculate the mean first
center = np.average(x, weights=weights) if sum(weights) else 0.
# np.sqrt(u**2)/len(u) # center uncertainty
if n == 1:
return center # the mean
var = 1.
if n > 1:
var = np.sum(weights*(x-center)**2) / np.sum(weights)
if n == 2:
return var # the variance
return np.sum(weights*(x-center)**n) / np.sum(weights) / var**n
@classmethod
def fromData(cls, x, y):
store = cls()
mean, var, skew, kurt = [cls.nthMoment(x, y, i) for i in range(1,5)]
store['area'] = integrate(x, y)
store['mean'] = mean
store['var'] = var
store['skew'] = skew
store['kurt'] = kurt
return store
@property
def area(self):
return self['area']
@property
def mean(self):
return self['mean']
@property
def var(self):
return self['var']
@property
def skew(self):
return self['skew']
@property
def kurt(self):
return self['kurt']
def __str__(self):
return "\n".join(
["{: <4s}: {: 9.2g}".format(k, self[k])
for k in ("area", "mean", "var", "skew", "kurt")]
)
def distrParFromDistrib(mean, var, N=1.):
# SASfit manual, 6.4. Log-Normal distribution
median = mean**2/np.sqrt(var + mean**2)
sigma = np.sqrt(np.log(mean**2/median**2))
#print("momentToDistrPar", mean, var, "->", median, sigma)
return N, sigma, median # return in the order used elsewhere for distrPar
class Distribution:
x, y, u = None, None, None
peaks = None # list of peak (start, end) indices pointing into x,y,u
color = None
plotAxes, plotAxisIdx = None, 0
def __init__(self, xvec, yvec, uvec, maxPeakCount=None):
xvec = xvec.values if isinstance(xvec, pd.Series) else xvec
yvec = yvec.values if isinstance(yvec, pd.Series) else yvec
uvec = uvec.values if isinstance(uvec, pd.Series) else uvec
self.x, self.y, self.u = normalizeDistrib(xvec, yvec, uvec)
self.peaks = findPeakRanges(self.x, self.y, tol=1e-6)
# refine the peak ranges containing multiple maxima
self.peaks = findLocalMinima(self.peaks, self.x, self.y)
# For a given list of peaks (by start/end indices) return only those
# whose ratio of amount to uncertainty ratio is always below the given max. ratio
#maxRatio = 1.5
#self.peakRanges = [(istart, iend) for istart, iend in self.peakRanges
# if maxRatio > 1/np.median(self.y[istart:iend+1]/self.u[istart:iend+1])]
# Sort the peaks by area and use the largest (last) only, assuming monomodal distributions
if maxPeakCount:
self.peaks = getLargestPeaks(self.peaks, self.x, self.y, count=maxPeakCount)
def peakData(self, peakRange):
return (self.x[peakRange[0]:peakRange[1]+1],
self.y[peakRange[0]:peakRange[1]+1],
self.u[peakRange[0]:peakRange[1]+1])
def uncertRatioMedian(self, peakRange):
_, y, u = self.peakData(peakRange)
return 1./np.median(y/u)
@staticmethod
def getBarWidth(xvec):
return np.concatenate((np.diff(xvec)[:1], np.diff(xvec)))
def plotPeak(self, peakRange, moments, distrPar, showFullRange=False, ax=None):
"""*showFullRange*: Set the x range to cover the whole distribution instead of the peak only."""
x, y, u = self.peakData(peakRange)
if not ax:
ax = plt.gca()
mom, momLo, momHi = moments
dp, dpLo, dpHi = distrPar
#ax.plot(x, y, 'o', color=cls.color)
lbl, fmt = [], "{: <7s} {: 9.2g} ±{: 9.2g}"
for k in "area", "median", "var", "skew", "kurt":
if k == "median":
lbl.append(fmt.format("median:", dp[-1], max(abs(dp[-1]-dpLo[-1]), abs(dpHi[-1]-dp[-1]))))
else:
lbl.append(fmt.format(k+':', mom[k], max(abs(mom[k]-momLo[k]), abs(momHi[k]-mom[k]))))
lbl.append("LogNorm: "+distrParToText(dp)[0])
ax.bar(x, y, width=self.getBarWidth(x), color=self.color, alpha=0.5, label="\n".join(lbl))
ax.fill_between(x, np.maximum(0, y-u), y+u,
color='red', lw=0, alpha=0.1,
label=f"uncertainties (lvl: {self.uncertRatioMedian(peakRange):.3g})")
if showFullRange:
ax.set_xlim((self.x.min(), self.x.max()))
ax.set_xlabel(f"Radius (m)")
ax.legend(prop=font_manager.FontProperties(family='monospace')); ax.grid(True);
def plot(self, ax, distPar, name=""):
"""plot complete distribution as loaded from file"""
lbl = ("from file, " + name
+ area(self.x, self.y, showArea=True)
+"\n"+distrParLatex(distPar[0]))
ax.fill_between(self.x, self.y,
#width=GenericResult.getBarWidth(self.x),
color=self.color, alpha=0.5, label=lbl)
#ax.errorbar(self.x, self.y, yerr=self.u, lw=lineWidth()*2, label=lbl)
ax.fill_between(self.x, np.maximum(0, self.y-self.u), self.y+self.u,
color='red', lw=0, alpha=0.1, label="uncertainties")
ax.set_xlabel(f"Radius (m)")
ax.legend(); ax.grid(); ax.set_xscale("log")
def peakDistrPar(self, plotAxes=None, plotAxisStart=0, **plotPeakKwargs):
distrPar = []
moments = []
for i, peakRange in enumerate(self.peaks): # for each peak
x, y, u = self.peakData(peakRange)
N = integrate(x, y)
mom = Moments.fromData(x, y)
momLo = Moments.fromData(x, np.maximum(0, y-u))
momHi = Moments.fromData(x, y+u)
dptmp = distrParFromDistrib(mom.mean, mom.var, N=N)
dptmpLo = distrParFromDistrib(momLo.mean, momLo.var, N=N)
dptmpHi = distrParFromDistrib(momHi.mean, momHi.var, N=N)
distrPar.append(dptmp)
moments.append(mom)
if plotAxes is not None:
plotPeakKwargs['ax'] = plotAxes[plotAxisStart+i]
self.plotPeak(peakRange, (mom,momLo,momHi), (dptmp,dptmpLo,dptmpHi), **plotPeakKwargs)
return distrPar, moments
def distrParToText(distrPar):
numPars = 3
if len(distrPar) > numPars:
fmt = "R_{i}={:3.0f} s_{i}={:0.2f} N_{i}={:.3g}"
else:
fmt = "R={:3.0f} s={:0.2f} N={:.3g}"
return [fmt.format(p[2]*1e9, p[1], p[0], i = i)
for i, p in enumerate(grouper(distrPar, numPars))]
def distrParToFilename(distrPar, prefix=''):
return '_'.join([prefix] + distrParToText(distrPar)).replace(' ', '_')
def distrParLatex(distrPar, *kwargs):
return "\n".join(['$'+txt.replace(' ',r'\;')+'$' for txt in distrParToText(distrPar)])
def distrParFromFilename(fn):
fn = fn.split('=')
fn = [elem.lstrip('_') for elem in fn]
fn = [(elem.split('_', maxsplit=1) if elem[0].isnumeric() else [elem]) for elem in fn]
fn = list(itertools.chain(*fn))
return list(itertools.chain(*[(float(grp[5]), float(grp[3]), float(grp[1])*1e-9)
for grp in grouper(fn, 6)]))
def test():
"""Some testing."""
distrPar = (1, 0.2, 40e-9)
print("distrPar: ", list(grouper(distrPar, 3)))
print("distrParToText:", distrParToText(distrPar))
print("distrParLatex: ", distrParLatex(distrPar))
print("distrParToFilename: ", distrParToFilename(distrPar))
print("distrParFromFilename:", distrParFromFilename(distrParToFilename(distrPar)))
print("distrParFromFilename:", distrParFromFilename(distrParToFilename(distrPar, "lognorm")))
print()
distrPar = (1, 0.2, 40e-9)+(1, 0.1, 10e-9)
print("distrPar: ", list(grouper(distrPar, 3)))
print("distrParToText:", distrParToText(distrPar))
print("distrParLatex: ", distrParLatex(distrPar))
print("distrParToFilename: ", distrParToFilename(distrPar))
print("distrParFromFilename:", distrParFromFilename(distrParToFilename(distrPar)))
print("distrParFromFilename:", distrParFromFilename(distrParToFilename(distrPar, "lognorm")))
|
[
"numpy.abs",
"matplotlib.pyplot.grid",
"numpy.sqrt",
"numpy.median",
"matplotlib.pyplot.ylabel",
"numpy.average",
"matplotlib.pyplot.gca",
"matplotlib.font_manager.FontProperties",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.maximum",
"matplotlib.pyplot.legend"
] |
[((2684, 2711), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (2694, 2711), True, 'import matplotlib.pyplot as plt\n'), ((6911, 6935), 'numpy.sqrt', 'np.sqrt', (['(var + mean ** 2)'], {}), '(var + mean ** 2)\n', (6918, 6935), True, 'import numpy as np\n'), ((6954, 6985), 'numpy.log', 'np.log', (['(mean ** 2 / median ** 2)'], {}), '(mean ** 2 / median ** 2)\n', (6960, 6985), True, 'import numpy as np\n'), ((1678, 1703), 'numpy.diff', 'np.diff', (['y[start:end + 1]'], {}), '(y[start:end + 1])\n', (1685, 1703), True, 'import numpy as np\n'), ((4225, 4254), 'numpy.linspace', 'np.linspace', (['x[0]', 'x[-1]', '(200)'], {}), '(x[0], x[-1], 200)\n', (4236, 4254), True, 'import numpy as np\n'), ((4267, 4295), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""data"""'}), "(x, y, label='data')\n", (4275, 4295), True, 'import matplotlib.pyplot as plt\n'), ((4372, 4407), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""data & spline approx."""'], {}), "('data & spline approx.')\n", (4382, 4407), True, 'import matplotlib.pyplot as plt\n'), ((4706, 4734), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""1st derivative"""'], {}), "('1st derivative')\n", (4716, 4734), True, 'import matplotlib.pyplot as plt\n'), ((4817, 4827), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4825, 4827), True, 'import matplotlib.pyplot as plt\n'), ((4829, 4879), 'matplotlib.pyplot.legend', 'plt.legend', (['(handles1 + handles2)', '(labels1 + labels2)'], {}), '(handles1 + handles2, labels1 + labels2)\n', (4839, 4879), True, 'import matplotlib.pyplot as plt\n'), ((8706, 8722), 'numpy.median', 'np.median', (['(y / u)'], {}), '(y / u)\n', (8715, 8722), True, 'import numpy as np\n'), ((9102, 9111), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9109, 9111), True, 'import matplotlib.pyplot as plt\n'), ((9775, 9795), 'numpy.maximum', 'np.maximum', (['(0)', '(y - u)'], {}), '(0, y - u)\n', (9785, 9795), True, 'import numpy as np\n'), ((10658, 10688), 'numpy.maximum', 'np.maximum', (['(0)', '(self.y - self.u)'], {}), '(0, self.y - self.u)\n', (10668, 10688), True, 'import numpy as np\n'), ((1439, 1455), 'numpy.diff', 'np.diff', (['indices'], {}), '(indices)\n', (1446, 1455), True, 'import numpy as np\n'), ((3819, 3869), 'numpy.abs', 'np.abs', (['(xarr[:, np.newaxis] - roots[np.newaxis, :])'], {}), '(xarr[:, np.newaxis] - roots[np.newaxis, :])\n', (3825, 3869), True, 'import numpy as np\n'), ((5589, 5619), 'numpy.average', 'np.average', (['x'], {'weights': 'weights'}), '(x, weights=weights)\n', (5599, 5619), True, 'import numpy as np\n'), ((5818, 5853), 'numpy.sum', 'np.sum', (['(weights * (x - center) ** 2)'], {}), '(weights * (x - center) ** 2)\n', (5824, 5853), True, 'import numpy as np\n'), ((5850, 5865), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (5856, 5865), True, 'import numpy as np\n'), ((5938, 5973), 'numpy.sum', 'np.sum', (['(weights * (x - center) ** n)'], {}), '(weights * (x - center) ** n)\n', (5944, 5973), True, 'import numpy as np\n'), ((5970, 5985), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (5976, 5985), True, 'import numpy as np\n'), ((8817, 8830), 'numpy.diff', 'np.diff', (['xvec'], {}), '(xvec)\n', (8824, 8830), True, 'import numpy as np\n'), ((10089, 10136), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'family': '"""monospace"""'}), "(family='monospace')\n", (10116, 10136), True, 'import matplotlib.font_manager as font_manager\n'), ((11219, 11239), 'numpy.maximum', 'np.maximum', (['(0)', '(y - u)'], {}), '(0, y - u)\n', (11229, 11239), True, 'import numpy as np\n'), ((4440, 4449), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4447, 4449), True, 'import matplotlib.pyplot as plt\n'), ((4592, 4601), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4599, 4601), True, 'import matplotlib.pyplot as plt\n'), ((4767, 4776), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4774, 4776), True, 'import matplotlib.pyplot as plt\n'), ((8798, 8811), 'numpy.diff', 'np.diff', (['xvec'], {}), '(xvec)\n', (8805, 8811), True, 'import numpy as np\n')]
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import x2paddle
import os
import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
import onnx
from onnx import helper, onnx_pb
class PaddleOpMapper(object):
def __init__(self):
self.paddle_onnx_dtype_map = {
core.VarDesc.VarType.FP32: onnx_pb.TensorProto.FLOAT,
core.VarDesc.VarType.FP64: onnx_pb.TensorProto.DOUBLE,
core.VarDesc.VarType.INT32: onnx_pb.TensorProto.INT32,
core.VarDesc.VarType.INT16: onnx_pb.TensorProto.INT16,
core.VarDesc.VarType.INT16: onnx_pb.TensorProto.UINT16,
core.VarDesc.VarType.INT64: onnx_pb.TensorProto.INT64,
core.VarDesc.VarType.BOOL: onnx_pb.TensorProto.BOOL
}
self.name_counter = dict()
def get_name(self, op_name, var_name):
name = 'p2o.{}.{}'.format(op_name, var_name)
if name not in self.name_counter:
self.name_counter[name] = 0
else:
self.name_counter[name] += 1
return name + '.{}'.format(self.name_counter[name])
def make_constant_node(self, name, dtype, value=None):
if isinstance(value, list):
dims = (len(value), )
elif value is None:
dims = ()
value = []
else:
dims = ()
value = [value]
tensor = helper.make_tensor(
name=name, data_type=dtype, dims=dims, vals=value)
node = helper.make_node(
'Constant', inputs=[], outputs=[name], value=tensor)
return node
def conv2d(self, op, block):
kernel_shape = block.var(op.input('Filter')[0]).shape
node = helper.make_node(
'Conv',
inputs=op.input('Input') + op.input('Filter'),
outputs=op.output('Output'),
dilations=op.attr('dilations'),
kernel_shape=kernel_shape[-2:],
strides=op.attr('strides'),
group=op.attr('groups'),
pads=op.attr('paddings') + op.attr('paddings'))
return node
def relu(self, op, block):
node = helper.make_node(
'Relu', inputs=op.input('X'), outputs=op.output('Out'))
return node
def elementwise_add(self, op, block):
axis = op.attr('axis')
x_shape = block.var(op.input('X')[0]).shape
y_shape = block.var(op.input('Y')[0]).shape
if len(y_shape) == 1 and axis == 1:
shape_name = self.get_name(op.type, 'shape')
shape_value = [1] * len(x_shape)
shape_value[axis] = y_shape[0]
shape_node = self.make_constant_node(
shape_name, onnx_pb.TensorProto.INT64, shape_value)
temp_value = self.get_name(op.type, 'temp')
y_node = helper.make_node(
'Reshape',
inputs=[op.input('Y')[0], shape_name],
outputs=[temp_value])
node = helper.make_node(
'Add',
inputs=[op.input('X')[0], temp_value],
outputs=op.output('Out'))
return [shape_node, y_node, node]
elif len(x_shape) == len(y_shape):
node = helper.make_node(
'Add',
inputs=[op.input('X')[0], op.input('Y')[0]],
outputs=op.output('Out'))
return node
else:
raise Excpetion("Unexpected situation happend in elementwise_add")
def pool2d(self, op, block):
pool_type = {
'max': ('MaxPool', 'GlobalMaxPool'),
'avg': ('AveragePool', 'GlobalAveragePool')
}
if op.attr('global_pooling'):
node = helper.make_node(
pool_type[op.attr('pooling_type')][1],
inputs=op.input('X'),
outputs=op.output('Out'),
)
else:
node = helper.make_node(
pool_type[op.attr('pooling_type')][0],
inputs=op.input('X'),
outputs=op.output('Out'),
kernel_shape=op.attr('ksize'),
strides=op.attr('strides'),
pads=op.attr('paddings') + op.attr('paddings'))
return node
def softmax(self, op, block):
node = helper.make_node(
'Softmax',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'))
return node
def scale(self, op, block):
scale = op.attr('scale')
bias = op.attr('bias')
if math.fabs(scale - 1.0) < 1e-06 and math.fabs(bias - 0.0) < 1e-06:
node = helper.make_node(
'Identity', inputs=op.input('X'), outputs=op.output('Out'))
return node
else:
scale_name = self.get_name(op.type, 'scale')
bias_name = self.get_name(op.type, 'bias')
scale_node = self.make_constant_node(
scale_name, onnx_pb.TensorProto.FLOAT, scale)
bias_node = self.make_constant_node(bias_name,
onnx_pb.TensorProto.FLOAT, bias)
temp_tensor_name = self.get_name(op.type, 'temporary')
if op.attr('bias_after_scale'):
node1 = helper.make_node(
'Mul',
inputs=[scale_name, op.input('X')[0]],
outputs=[temp_tensor_name])
node2 = helper.make_node(
'Add',
inputs=[bias_name, temp_tensor_name],
outputs=op.output('Out'))
else:
node1 = helper.make_node(
'Add',
inputs=[bias_name, op.input('X')[0]],
outputs=temp_tensor_name)
node2 = helper.make_node(
'Mul',
inputs=[scale_name, temp_tensor_name],
outputs=[op.output('Out')])
return [scale_node, bias_node, node1, node2]
def mul(self, op, block):
x_shape = block.var(op.input('X')[0]).shape
y_shape = block.var(op.input('Y')[0]).shape
out_shape = list(block.var(op.output('Out')[0]).shape)
x_num_col_dims = op.attr('x_num_col_dims')
y_num_col_dims = op.attr('y_num_col_dims')
flatten_x_name = 'flatten_{}'.format(op.input('X')[0])
flatten_y_name = 'flatten_{}'.format(op.input('Y')[0])
shape_name = 'temp_shape_{}'.format(op.output('Out')[0])
temp_out_name = 'temp_{}'.format(op.output('Out')[0])
flatten_x = helper.make_node(
'Flatten',
inputs=op.input('X'),
outputs=[flatten_x_name],
axis=x_num_col_dims)
flatten_y = helper.make_node(
'Flatten',
inputs=op.input('Y'),
outputs=[flatten_y_name],
axis=y_num_col_dims)
shape_node = self.make_constant_node(
shape_name, onnx_pb.TensorProto.INT64, out_shape)
node = helper.make_node(
'MatMul',
inputs=[flatten_x_name, flatten_y_name],
outputs=[temp_out_name])
reshape_out = helper.make_node(
'Reshape',
inputs=[temp_out_name, shape_name],
outputs=op.output('Out'))
return [flatten_x, flatten_y, shape_node, node, reshape_out]
def batch_norm(self, op, block):
kwargs = {
'epsilon': op.attr('epsilon'),
'momentum': op.attr('momentum')
}
inputs = op.input('X') + op.input('Scale') + op.input(
'Bias') + op.input('Mean') + op.input('Variance')
node = helper.make_node(
'BatchNormalization',
inputs=inputs,
outputs=op.output('Y'),
**kwargs)
return node
def concat(self, op, block):
node = helper.make_node(
'Concat',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'))
return node
def depthwise_conv2d(self, op, block):
return self.conv2d(op, block)
def relu6(self, op, block):
min_name = self.get_name(op.type, 'min')
max_name = self.get_name(op.type, 'max')
min_node = self.make_constant_node(min_name, onnx_pb.TensorProto.FLOAT,
0)
max_node = self.make_constant_node(max_name, onnx_pb.TensorProto.FLOAT,
op.attr('threshold'))
node = helper.make_node(
'Clip',
inputs=[op.input('X')[0], min_name, max_name],
outputs=op.output('Out'),
)
return [min_node, max_node, node]
def shape(self, op, block):
node = helper.make_node(
'Shape', inputs=op.input('Input'), outputs=op.output('Out'))
return node
def split(self, op, block):
sections = op.attr('sections')
if len(sections) > 0:
node = helper.make_node(
'Split',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'),
split=sections)
else:
node = helper.make_node(
'Split',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'))
def slice(self, op, block):
axes = op.attr('axes')
starts = op.attr('starts')
ends = op.attr('ends')
axes_name = get_name(op.type, 'axes')
starts_name = get_name(op.type, 'starts')
ends_name = get_name(op.type, 'ends')
axes_node = make_constant_node(axes_name, onnx_pb.TensorProto.INT64,
axes)
starts_node = make_constant_node(starts_name, onnx_pb.TensorProto.INT64,
starts)
ends_node = make_constant_node(ends_name, onnx_pb.TensorProto.INT64,
ends)
node = helper.make_node(
"Slice",
inputs=[op.input('Input')[0], starts_name, ends_name, axes_name],
outputs=op.output('Out'),
)
return [starts_node, ends_node, axes_node, node]
def fill_constant(self, op, block):
value = op.attr('value')
dtype = op.attr('dtype')
shape = op.attr('shape')
value = np.ones(shape) * value
node = helper.make_node(
'Constant',
inputs=[],
outputs=op.attr('Out'),
value=helper.make_tensor(
name=op.attr('Out'),
data_type=self.paddle_onnx_dtype_map[dtype],
dims=shape,
vals=value.tolist()))
return node
def transpose2(self, op, block):
node = helper.make_node(
'Transpose',
inputs=op.input('X'),
outputs=op.output('Out'),
perm=op.attr('perm'))
return node
def reshape2(self, op, block):
input_names = op.input_names
if 'Shape' in input_names and len(op.input('Shape')) > 0:
node = helper.make_node(
'Reshape',
inputs=[op.input('X')[0],
op.input('Shape')[0]],
outputs=op.output('Out'))
else:
shape = op.attr('shape')
shape_name = get_name(op.type, 'shape')
shape_node = make_constant_node(shape_name,
onnxpb.TensorProto.INT64, shape)
node = helper.make_node(
'Reshape',
inputs=[op.input('X')[0], shape_name],
outputs=op.output('Out'))
return [shape_node, node]
return node
def dropout(self, op, block):
dropout_mode = op.attr('dropout_implementation')
dropout_prob = op.attr('dropout_prob')
if dropout_mode == 'upscale_in_train':
node = helper.make_node(
'Identity', inputs=op.input('X'), outputs=op.output('Out'))
return node
elif dropout_mode == 'downgrade_in_infer':
scale_name = self.get_name(op.type, 'scale')
scale_node = self.make_constant_node(
scale_name, onnx_pb.TensorProto.FLOAT, 1 - dropout_prob)
node = helper.make_node(
"Mul",
inputs=[op.input('X')[0], scale_name],
outputs=op.output('Out'))
return [scale_node, node]
else:
raise Exception("Unexpected situation happend")
def reduce_mean(self, op, block):
node = helper.make_node(
'ReduceMean',
inputs=op.input('X'),
outputs=op.output('Out'),
axes=op.attr('axes'),
keepdims=op.attr('keep_dim'))
return node
def nearest_interp(self, op, block):
input_names = op.input_names
if 'OutSize' in input_names and len(op.input('OutSize')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], '',
op.input('OutSize')[0]],
outputs=op.output('Out'))
elif 'Scale' in input_names and len(op.input('Scale')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0],
op.input('Scale')[0]],
outputs=op.output('Out'))
else:
out_shape = [op.attr('out_h'), op.attr('out_w')]
scale = op.attr('scale')
if out_shape.count(-1) > 0:
scale_name = self.get_name(op.type, 'scale')
scale_node = self.make_constant_node(
scale_name, onnx_pb.TensorProto.FLOAT, [1, 1, scale, scale])
roi_name = self.get_name(op.type, 'roi')
roi_node = self.make_constant_node(roi_name,
onnx_pb.TensorProto.FLOAT,
[1, 1, 1, 1, 1, 1, 1, 1])
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], roi_name, scale_name],
outputs=op.output('Out'),
mode='nearest')
return [scale_node, roi_node, node]
else:
raise Exception("Unexpected situation happend")
return node
def hard_sigmoid(self, op, block):
slope = op.attr('slope')
offset = op.attr('offset')
node = helper.make_node(
'HardSigmoid',
inputs=op.input('X'),
outputs=op.output('Out'),
alpha=slope,
beta=offset)
return node
def elementwise_mul(self, op, block):
axis = op.attr('axis')
x_shape = block.var(op.input('X')[0]).shape
y_shape = block.var(op.input('Y')[0]).shape
if len(y_shape) == 1 and axis == 1:
shape_name = self.get_name(op.type, 'shape')
shape_value = [1] * len(x_shape)
shape_value[axis] = y_shape[0]
shape_node = self.make_constant_node(
shape_name, onnx_pb.TensorProto.INT64, shape_value)
temp_value = self.get_name(op.type, 'temp')
y_node = helper.make_node(
'Reshape',
inputs=[op.input('Y')[0], shape_name],
outputs=[temp_value])
node = helper.make_node(
'Mul',
inputs=[op.input('X')[0], temp_value],
outputs=op.output('Out'))
return [shape_node, y_node, node]
elif len(x_shape) == len(y_shape):
node = helper.make_node(
'Mul',
inputs=[op.input('X')[0], op.input('Y')[0]],
outputs=op.output('Out'))
return node
else:
raise Excpetion("Unexpected situation happend in elementwise_add")
return node
def feed(self, op, block):
name = op.output('Out')[0]
var = block.var(name)
tensor_info = helper.make_tensor_value_info(
name=name,
shape=var.shape,
elem_type=self.paddle_onnx_dtype_map[var.dtype])
return tensor_info
def fetch(self, op, block):
name = op.input('X')[0]
var = block.var(name)
tensor_info = helper.make_tensor_value_info(
name=name,
shape=var.shape,
elem_type=self.paddle_onnx_dtype_map[var.dtype])
return tensor_info
def convert_weights(self, program):
var_names = program.global_block().vars
nodes = list()
for name in var_names:
var = program.global_block().var(name)
if name.endswith('feed') or name.endswith('fetch'):
continue
if not var.persistable:
continue
weight = np.array(fluid.global_scope().find_var(name).get_tensor())
tensor = helper.make_tensor(
name=name,
dims=var.shape,
data_type=self.paddle_onnx_dtype_map[var.dtype],
vals=weight.flatten().tolist())
node = helper.make_node(
'Constant', inputs=[], outputs=[name], value=tensor)
nodes.append(node)
return nodes
def convert(self, program, save_dir):
weight_nodes = self.convert_weights(program)
op_nodes = list()
input_nodes = list()
output_nodes = list()
unsupported_ops = set()
for block in program.blocks:
for op in block.ops:
print('Translating op: {}'.format(op.type))
if not hasattr(self, op.type):
unsupported_ops.add(op.type)
continue
if len(unsupported_ops) > 0:
continue
node = getattr(self, op.type)(op, block)
if op.type == 'feed':
input_nodes.append(node)
elif op.type == 'fetch':
output_nodes.append(node)
else:
if isinstance(node, list):
op_nodes = op_nodes + node
else:
op_nodes.append(node)
if len(unsupported_ops) > 0:
print("There's {} ops are not supported yet".format(
len(unsupported_ops)))
for op in unsupported_ops:
print("=========== {} ===========".format(op))
return
graph = helper.make_graph(
nodes=weight_nodes + op_nodes,
name='onnx_model_from_paddle',
initializer=[],
inputs=input_nodes,
outputs=output_nodes)
model = helper.make_model(graph, producer_name='X2Paddle')
onnx.checker.check_model(model)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, 'x2paddle_model.onnx'), 'wb') as f:
f.write(model.SerializeToString())
print("Translated model saved in {}".format(
os.path.join(save_dir, 'x2paddle_model.onnx')))
|
[
"onnx.helper.make_graph",
"onnx.helper.make_node",
"numpy.ones",
"os.makedirs",
"paddle.fluid.global_scope",
"onnx.helper.make_tensor_value_info",
"os.path.join",
"onnx.helper.make_model",
"os.path.isdir",
"math.fabs",
"onnx.helper.make_tensor",
"onnx.checker.check_model"
] |
[((1961, 2030), 'onnx.helper.make_tensor', 'helper.make_tensor', ([], {'name': 'name', 'data_type': 'dtype', 'dims': 'dims', 'vals': 'value'}), '(name=name, data_type=dtype, dims=dims, vals=value)\n', (1979, 2030), False, 'from onnx import helper, onnx_pb\n'), ((2059, 2128), 'onnx.helper.make_node', 'helper.make_node', (['"""Constant"""'], {'inputs': '[]', 'outputs': '[name]', 'value': 'tensor'}), "('Constant', inputs=[], outputs=[name], value=tensor)\n", (2075, 2128), False, 'from onnx import helper, onnx_pb\n'), ((7589, 7686), 'onnx.helper.make_node', 'helper.make_node', (['"""MatMul"""'], {'inputs': '[flatten_x_name, flatten_y_name]', 'outputs': '[temp_out_name]'}), "('MatMul', inputs=[flatten_x_name, flatten_y_name], outputs\n =[temp_out_name])\n", (7605, 7686), False, 'from onnx import helper, onnx_pb\n'), ((16740, 16851), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', ([], {'name': 'name', 'shape': 'var.shape', 'elem_type': 'self.paddle_onnx_dtype_map[var.dtype]'}), '(name=name, shape=var.shape, elem_type=self.\n paddle_onnx_dtype_map[var.dtype])\n', (16769, 16851), False, 'from onnx import helper, onnx_pb\n'), ((17028, 17139), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', ([], {'name': 'name', 'shape': 'var.shape', 'elem_type': 'self.paddle_onnx_dtype_map[var.dtype]'}), '(name=name, shape=var.shape, elem_type=self.\n paddle_onnx_dtype_map[var.dtype])\n', (17057, 17139), False, 'from onnx import helper, onnx_pb\n'), ((19237, 19384), 'onnx.helper.make_graph', 'helper.make_graph', ([], {'nodes': '(weight_nodes + op_nodes)', 'name': '"""onnx_model_from_paddle"""', 'initializer': '[]', 'inputs': 'input_nodes', 'outputs': 'output_nodes'}), "(nodes=weight_nodes + op_nodes, name=\n 'onnx_model_from_paddle', initializer=[], inputs=input_nodes, outputs=\n output_nodes)\n", (19254, 19384), False, 'from onnx import helper, onnx_pb\n'), ((19452, 19502), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {'producer_name': '"""X2Paddle"""'}), "(graph, producer_name='X2Paddle')\n", (19469, 19502), False, 'from onnx import helper, onnx_pb\n'), ((19511, 19542), 'onnx.checker.check_model', 'onnx.checker.check_model', (['model'], {}), '(model)\n', (19535, 19542), False, 'import onnx\n'), ((10996, 11010), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (11003, 11010), True, 'import numpy as np\n'), ((17855, 17924), 'onnx.helper.make_node', 'helper.make_node', (['"""Constant"""'], {'inputs': '[]', 'outputs': '[name]', 'value': 'tensor'}), "('Constant', inputs=[], outputs=[name], value=tensor)\n", (17871, 17924), False, 'from onnx import helper, onnx_pb\n'), ((19559, 19582), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (19572, 19582), False, 'import os\n'), ((19596, 19617), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (19607, 19617), False, 'import os\n'), ((5116, 5138), 'math.fabs', 'math.fabs', (['(scale - 1.0)'], {}), '(scale - 1.0)\n', (5125, 5138), False, 'import math\n'), ((5151, 5172), 'math.fabs', 'math.fabs', (['(bias - 0.0)'], {}), '(bias - 0.0)\n', (5160, 5172), False, 'import math\n'), ((19636, 19681), 'os.path.join', 'os.path.join', (['save_dir', '"""x2paddle_model.onnx"""'], {}), "(save_dir, 'x2paddle_model.onnx')\n", (19648, 19681), False, 'import os\n'), ((19807, 19852), 'os.path.join', 'os.path.join', (['save_dir', '"""x2paddle_model.onnx"""'], {}), "(save_dir, 'x2paddle_model.onnx')\n", (19819, 19852), False, 'import os\n'), ((17573, 17593), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (17591, 17593), True, 'import paddle.fluid as fluid\n')]
|
"""
glucoseDataFrame.py
Creates a dataframe of glucose related statistics
in diabetics for predictive analysis.
"""
import sys
import os
import math
from datetime import *
from dateutil.parser import parse
import pandas as pd
import numpy as np
sys.path.append("..") # proper file path for importing local modules
from pythonScripts.jsonToCsv import convertToCsv
#-------CONSTANTS-------------
CONVERSION_FACTOR = 18.01559
#-------Dicts----------
#basal rates (unit/hour)
BASAL = {
"0" : .625,
"2" : .650, #if hour equals 2, then also minute = 30 cause (2:30)
"4" : .800,
"8" : .725,
"12" : .700,
"14" : .250,
"19" : .650
}
#insulin sensitivity (mg/dL/unit)
SENSITIVITY = {
"0" : 60,
"6" : 70,
"9" : 60,
"12" : 60,
"15" : 60
}
#carb ratio (grams/unit)
CARB_RATIO = {
"0" : 10,
"6" : 5,
"11" : 5.5, #if hour equals 11, then also minute = 30 cause (11:30)
"14" : 6,
"18" : 7,
"21" : 9
}
#----------------------
#-----------------------------
def convert_glucose(glucose_levels):
"""Do conversion across entire dataset
conversion mmol/L to mg/dL"""
value_row = glucose_levels.loc[:, 'value']
convert_row = value_row.mul(CONVERSION_FACTOR)
round_conversion = convert_row.round(2)
return round_conversion
def divide_timestamp(time_row):
"""Seperates timestamp into individual
months, days, weekdays, hours, and minutes"""
month_list = []
day_list = []
weekday_list = []
hour_list = []
minutes_list = []
time_str = time_row.astype(str).values.tolist()
for i in time_str:
#for months
month = parse(i).month
month_list.append(month)
#for days
day = parse(i).day
day_list.append(day)
#for weekdays
weekday = parse(i).weekday()
weekday_list.append(weekday)
#for hours
hour = parse(i).hour
hour_list.append(hour)
#for minutes
minute = parse(i).minute
minutes_list.append(minute)
return month_list, day_list, weekday_list, hour_list, minutes_list
def create_dataframe():
"""Creates dataframe for glucose analysis"""
#---get correct path to csv input file-----------
path_to_input_csv = convertToCsv()
current_file = os.path.basename(path_to_input_csv)
print(f"Currently Reading File: {current_file}")
care_link_file = input("\nEnter Medtronic File: ")
#------------------------------------------------
#----------Create data frame-------------------
#get all data from csv
gluc_level_data = pd.read_csv(path_to_input_csv)
# remove rows that are NaN for value
gluc_level_data = gluc_level_data[pd.notnull(gluc_level_data["value"])]
#----------------------------------------------
#---------------conversion mmol/L to mg/dL-----------------
glu = convert_glucose(gluc_level_data)
#----------------------------------------------------------
#--------Save month, day, weekday, hour, minutes---------------
timestamp = gluc_level_data.loc[:, 'time']
saved_index = timestamp.index # save the index from this dataframe as variable index
month_list, day_list, weekday_list, hour_list, minutes_list = divide_timestamp(timestamp)
#convert the lists to dataframes while ensuring the index corresponds to the other dataframes
monthdf = pd.DataFrame(np.array(month_list), index=saved_index)
daydf = pd.DataFrame(np.array(day_list), index=saved_index)
weekdaydf = pd.DataFrame(np.array(weekday_list), index=saved_index)
hourdf = pd.DataFrame(np.array(hour_list), index=saved_index)
minutesdf = pd.DataFrame(np.array(minutes_list), index=saved_index)
#--------------------------------------------------------------
#---------BOLUS OUTPUT---------------------------
path_to_care_link = os.path.join(os.getcwd(), "csvData", "csvInData")
bolus_carb_csv = pd.read_csv(os.path.join(path_to_care_link, care_link_file), skiprows=6)
bolus = bolus_carb_csv.loc[:, 'Bolus Volume Delivered (U)']
date = bolus_carb_csv.loc[:, 'Date']
time = bolus_carb_csv.loc[:, 'Time']
carb = bolus_carb_csv.loc[:, 'BWZ Carb Input (grams)']
bolus_carb_data = pd.concat([date, time, bolus, carb], axis=1, ignore_index=True)
#remove column if NaN value in both columns 2&3
bolus_carb_data = bolus_carb_data.dropna(subset=[2, 3], how='all')
#get rid of last header row
bolus_carb_data = bolus_carb_data.drop(bolus_carb_data.index[len(bolus_carb_data)-1])
bolus_carb_data.columns = ["Date", "Time", "Bolus (U)", "Carb Input (grams)"]
#-------------------------------------------------------------------------
#--------Save month, day, weekday, hour, minutes---------------
month_list_b = []
day_list_b = []
hour_list_b = []
minutes_list_b = []
date = bolus_carb_data.loc[:, 'Date']
time = bolus_carb_data.loc[:, 'Time']
index_bolus = date.index # save the index from this dataframe as variable index
day_str = date.astype(str).values.tolist()
time_str_b = time.astype(str).values.tolist()
for j in time_str_b:
time_whole = datetime.strptime(j, '%H:%M:%S')
#for months
hour_list_b.append(time_whole.hour)
#for days
minutes_list_b.append(time_whole.minute)
for k in day_str:
date_whole = datetime.strptime(k, '%Y/%m/%d')
#for hours
month_list_b.append(date_whole.month)
#for minutes
day_list_b.append(date_whole.day)
#convert the lists to dataframes while ensuring the index corresponds to the other dataframes
monthdf_bolus = pd.DataFrame(np.array(month_list_b), index=index_bolus)
daydf_bolus = pd.DataFrame(np.array(day_list_b), index=index_bolus)
hourdf_bolus = pd.DataFrame(np.array(hour_list_b), index=index_bolus)
minutesdf_bolus = pd.DataFrame(np.array(minutes_list_b), index=index_bolus)
#concatenate all of these
bolus_carb_final = pd.concat([bolus_carb_data, monthdf_bolus, daydf_bolus, hourdf_bolus, minutesdf_bolus], axis=1, ignore_index=True)
bolus_carb_final.columns = ["Date", "Time", "Bolus (U)", "Carb Input (grams)", "Month", "Day", "Hour", "Minutes"]
#--------------------------------------------------------------
#--------Concatenate all of the dataframes into one dataframe----------------------------
final = pd.concat([timestamp, glu, monthdf, daydf, weekdaydf, hourdf, minutesdf], axis=1, ignore_index=True) #concatenate the dataframe together
#give columns names
final.columns = ["TimeStamp", "Glucose (mg/dL)", "Month", "Day", "Weekday", "Hour", "Minutes"]
#----------------------------------------------------------------------------------------
#MERGE MEDTRONIC DATA WITH DEXCOM
#----------------------------------------------------------------------------------------
#make dataframe of NaN filled bolus and carb columns with indexes matching tidepool
bolus_carbdf = pd.DataFrame(np.nan, index=saved_index, columns=["Bolus (U)", "Carb Input (grams)"])
#match up the bolus insulin & carb intake from one csv
for index_med, row_med in bolus_carb_final.iterrows(): #go through Medtronic Data
mins_med = getattr(row_med, "Minutes")
hrs_med = getattr(row_med, "Hour")
day_med = getattr(row_med, "Day")
month_med = getattr(row_med, "Month")
bolus_med = getattr(row_med, "Bolus (U)")
carb_med = getattr(row_med, "Carb Input (grams)")
cur_smalls = -1
got_one = False
for index_tide, row_tide in final.iterrows(): #go through Tidepool Data
mins_tide = getattr(row_tide, "Minutes")
hrs_tide = getattr(row_tide, "Hour")
day_tide = getattr(row_tide, "Day")
month_tide = getattr(row_tide, "Month")
#find closest time in Tidepool data to Medtronic data
if month_tide == month_med and day_tide == day_med and hrs_tide == hrs_med:
#time difference of medtronic time minux tidepool time
dif_time = mins_med - mins_tide
if (dif_time) <= 5:
cur_smalls = index_tide
if got_one:
break #get out of this inner loop as we found the time we wanted for this data
if (dif_time) <= 5:
got_one = True
#add bolus & carb info to bolusCarbdf
if cur_smalls != -1:
if not math.isnan(float(carb_med)):
bolus_carbdf.loc[cur_smalls, 'Carb Input (grams)'] = carb_med
if not math.isnan(float(bolus_med)):
bolus_carbdf.loc[cur_smalls, 'Bolus (U)'] = bolus_med
#--------Concatenate all of the bolusCarbdf dataframe with final dataframe---------------
#concatenate the dataframes together
almost_final = pd.concat([timestamp, glu, monthdf, daydf, weekdaydf, hourdf,
minutesdf, bolus_carbdf], axis=1, ignore_index=True)
#give columns names
almost_final.columns = ["TimeStamp", "Glucose (mg/dL)", "Month",
"Day", "Weekday", "Hour", "Minutes", "Bolus (U)",
"Carb Input (grams)"]
#----------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
#create initial csv OUTPUT
path_base_name = os.path.basename(path_to_input_csv)
output_file_name = "OUTPUT_" + path_base_name
path_to_out_csv = os.path.join(os.getcwd(), "csvData", "csvOutData")
output_file_path = os.path.join(path_to_out_csv, output_file_name)
almost_final.to_csv(output_file_path) # return dataframes as a csv
#----------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
basal_sens_ratio_data = pd.read_csv(output_file_path)
basal_list = []
insulin_sens_list = []
carb_ratio_list = []
for index, row in basal_sens_ratio_data.iterrows():
#for basal list
if row['Hour'] >= 0 and row['Hour'] < 3:
if row['Hour'] == 2 and row['Minutes'] < 30:
basal_list.append(BASAL["0"])
elif row['Hour'] == 2 and row['Minutes'] >= 30:
basal_list.append(BASAL["2"])
else:
basal_list.append(BASAL["0"])
elif row['Hour'] >= 3 and row['Hour'] < 4:
basal_list.append(BASAL["2"])
elif row['Hour'] >= 4 and row['Hour'] < 8:
basal_list.append(BASAL["4"])
elif row['Hour'] >= 8 and row['Hour'] < 12:
basal_list.append(BASAL["8"])
elif row['Hour'] >= 12 and row['Hour'] < 14:
basal_list.append(BASAL["12"])
elif row['Hour'] >= 14 and row['Hour'] < 19:
basal_list.append(BASAL["14"])
elif row['Hour'] >= 19 and row['Hour'] < 24:
basal_list.append(BASAL["19"])
#for insulin sensitivity list
if row['Hour'] >= 0 and row['Hour'] < 6:
insulin_sens_list.append(SENSITIVITY["0"])
elif row['Hour'] >= 6 and row['Hour'] < 9:
insulin_sens_list.append(SENSITIVITY["6"])
elif row['Hour'] >= 9 and row['Hour'] < 12:
insulin_sens_list.append(SENSITIVITY["9"])
elif row['Hour'] >= 12 and row['Hour'] < 15:
insulin_sens_list.append(SENSITIVITY["12"])
elif row['Hour'] >= 15 and row['Hour'] < 24:
insulin_sens_list.append(SENSITIVITY["15"])
#for carb ratio list
if row['Hour'] >= 0 and row['Hour'] < 6:
carb_ratio_list.append(CARB_RATIO["0"])
elif row['Hour'] >= 6 and row['Hour'] < 12:
if row['Hour'] == 11 and row['Minutes'] < 30:
carb_ratio_list.append(CARB_RATIO["6"])
elif row['Hour'] == 11 and row['Minutes'] >= 30:
carb_ratio_list.append(CARB_RATIO["11"])
else:
carb_ratio_list.append(CARB_RATIO["6"])
elif row['Hour'] >= 12 and row['Hour'] < 14:
carb_ratio_list.append(CARB_RATIO["11"])
elif row['Hour'] >= 14 and row['Hour'] < 18:
carb_ratio_list.append(CARB_RATIO["14"])
elif row['Hour'] >= 18 and row['Hour'] < 21:
carb_ratio_list.append(CARB_RATIO["18"])
elif row['Hour'] >= 21 and row['Hour'] < 24:
carb_ratio_list.append(CARB_RATIO["21"])
#create dataframes from lists
basaldf = pd.DataFrame(np.array(basal_list), index=saved_index) #like above set index to index
insulindf = pd.DataFrame(np.array(insulin_sens_list), index=saved_index) #like above set index to index
carbdf = pd.DataFrame(np.array(carb_ratio_list), index=saved_index) #like above set index to index
#----------------------------------------------------------------------------------------
#--------Concatenate the new dataframes into final dataframe----------------------------
real_final = pd.concat([timestamp, glu, basaldf, insulindf, carbdf, monthdf, daydf, weekdaydf, hourdf, minutesdf, bolus_carbdf], axis=1, ignore_index=True) #concatenate the dataframe together
#----------------------------------------------------------------------------------------
#give columns names
real_final.columns = ["TimeStamp", "Glucose (mg/dL)", "Basal Insulin (U/hr)",
"Insulin Sensitivity (mg/dL/U)","Carb Ratio (g/U)", "Month", "Day",
"Weekday", "Hour", "Minutes", "Bolus (U)", "Carb Input (grams)"]
last_time = ""
for index, row in real_final.iterrows():
if row['TimeStamp'] == last_time:
real_final = real_final.drop(index, axis=0)
last_time = row['TimeStamp']
'''
#create final csv OUTPUT (rewrites the earlier csv file)
header = ["TimeStamp", "Glucose (mg/dL)", "Basal Insulin (U/hr)","Insulin Sensitivity (mg/dL/U)","Carb Ratio (g/U)", "Month", "Day","Weekday", "Hour","Minutes","Bolus (U)", "Carb Input (grams)"]
'''
real_final = real_final.reindex(index=real_final.index[::-1])
real_final.to_csv(output_file_path) # return dataframes as a csv
def main():
"""main"""
create_dataframe()
if __name__ == '__main__':
main()
|
[
"dateutil.parser.parse",
"pandas.read_csv",
"pythonScripts.jsonToCsv.convertToCsv",
"os.path.join",
"os.getcwd",
"numpy.array",
"pandas.concat",
"os.path.basename",
"pandas.DataFrame",
"pandas.notnull",
"sys.path.append"
] |
[((247, 268), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (262, 268), False, 'import sys\n'), ((2268, 2282), 'pythonScripts.jsonToCsv.convertToCsv', 'convertToCsv', ([], {}), '()\n', (2280, 2282), False, 'from pythonScripts.jsonToCsv import convertToCsv\n'), ((2302, 2337), 'os.path.basename', 'os.path.basename', (['path_to_input_csv'], {}), '(path_to_input_csv)\n', (2318, 2337), False, 'import os\n'), ((2602, 2632), 'pandas.read_csv', 'pd.read_csv', (['path_to_input_csv'], {}), '(path_to_input_csv)\n', (2613, 2632), True, 'import pandas as pd\n'), ((4234, 4297), 'pandas.concat', 'pd.concat', (['[date, time, bolus, carb]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([date, time, bolus, carb], axis=1, ignore_index=True)\n', (4243, 4297), True, 'import pandas as pd\n'), ((5999, 6117), 'pandas.concat', 'pd.concat', (['[bolus_carb_data, monthdf_bolus, daydf_bolus, hourdf_bolus, minutesdf_bolus]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([bolus_carb_data, monthdf_bolus, daydf_bolus, hourdf_bolus,\n minutesdf_bolus], axis=1, ignore_index=True)\n', (6008, 6117), True, 'import pandas as pd\n'), ((6408, 6512), 'pandas.concat', 'pd.concat', (['[timestamp, glu, monthdf, daydf, weekdaydf, hourdf, minutesdf]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([timestamp, glu, monthdf, daydf, weekdaydf, hourdf, minutesdf],\n axis=1, ignore_index=True)\n', (6417, 6512), True, 'import pandas as pd\n'), ((7002, 7090), 'pandas.DataFrame', 'pd.DataFrame', (['np.nan'], {'index': 'saved_index', 'columns': "['Bolus (U)', 'Carb Input (grams)']"}), "(np.nan, index=saved_index, columns=['Bolus (U)',\n 'Carb Input (grams)'])\n", (7014, 7090), True, 'import pandas as pd\n'), ((8883, 9001), 'pandas.concat', 'pd.concat', (['[timestamp, glu, monthdf, daydf, weekdaydf, hourdf, minutesdf, bolus_carbdf]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([timestamp, glu, monthdf, daydf, weekdaydf, hourdf, minutesdf,\n bolus_carbdf], axis=1, ignore_index=True)\n', (8892, 9001), True, 'import pandas as pd\n'), ((9493, 9528), 'os.path.basename', 'os.path.basename', (['path_to_input_csv'], {}), '(path_to_input_csv)\n', (9509, 9528), False, 'import os\n'), ((9675, 9722), 'os.path.join', 'os.path.join', (['path_to_out_csv', 'output_file_name'], {}), '(path_to_out_csv, output_file_name)\n', (9687, 9722), False, 'import os\n'), ((10016, 10045), 'pandas.read_csv', 'pd.read_csv', (['output_file_path'], {}), '(output_file_path)\n', (10027, 10045), True, 'import pandas as pd\n'), ((13132, 13278), 'pandas.concat', 'pd.concat', (['[timestamp, glu, basaldf, insulindf, carbdf, monthdf, daydf, weekdaydf,\n hourdf, minutesdf, bolus_carbdf]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([timestamp, glu, basaldf, insulindf, carbdf, monthdf, daydf,\n weekdaydf, hourdf, minutesdf, bolus_carbdf], axis=1, ignore_index=True)\n', (13141, 13278), True, 'import pandas as pd\n'), ((2712, 2748), 'pandas.notnull', 'pd.notnull', (["gluc_level_data['value']"], {}), "(gluc_level_data['value'])\n", (2722, 2748), True, 'import pandas as pd\n'), ((3400, 3420), 'numpy.array', 'np.array', (['month_list'], {}), '(month_list)\n', (3408, 3420), True, 'import numpy as np\n'), ((3466, 3484), 'numpy.array', 'np.array', (['day_list'], {}), '(day_list)\n', (3474, 3484), True, 'import numpy as np\n'), ((3534, 3556), 'numpy.array', 'np.array', (['weekday_list'], {}), '(weekday_list)\n', (3542, 3556), True, 'import numpy as np\n'), ((3603, 3622), 'numpy.array', 'np.array', (['hour_list'], {}), '(hour_list)\n', (3611, 3622), True, 'import numpy as np\n'), ((3672, 3694), 'numpy.array', 'np.array', (['minutes_list'], {}), '(minutes_list)\n', (3680, 3694), True, 'import numpy as np\n'), ((3875, 3886), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3884, 3886), False, 'import os\n'), ((3945, 3992), 'os.path.join', 'os.path.join', (['path_to_care_link', 'care_link_file'], {}), '(path_to_care_link, care_link_file)\n', (3957, 3992), False, 'import os\n'), ((5676, 5698), 'numpy.array', 'np.array', (['month_list_b'], {}), '(month_list_b)\n', (5684, 5698), True, 'import numpy as np\n'), ((5750, 5770), 'numpy.array', 'np.array', (['day_list_b'], {}), '(day_list_b)\n', (5758, 5770), True, 'import numpy as np\n'), ((5823, 5844), 'numpy.array', 'np.array', (['hour_list_b'], {}), '(hour_list_b)\n', (5831, 5844), True, 'import numpy as np\n'), ((5900, 5924), 'numpy.array', 'np.array', (['minutes_list_b'], {}), '(minutes_list_b)\n', (5908, 5924), True, 'import numpy as np\n'), ((9614, 9625), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9623, 9625), False, 'import os\n'), ((12639, 12659), 'numpy.array', 'np.array', (['basal_list'], {}), '(basal_list)\n', (12647, 12659), True, 'import numpy as np\n'), ((12740, 12767), 'numpy.array', 'np.array', (['insulin_sens_list'], {}), '(insulin_sens_list)\n', (12748, 12767), True, 'import numpy as np\n'), ((12845, 12870), 'numpy.array', 'np.array', (['carb_ratio_list'], {}), '(carb_ratio_list)\n', (12853, 12870), True, 'import numpy as np\n'), ((1656, 1664), 'dateutil.parser.parse', 'parse', (['i'], {}), '(i)\n', (1661, 1664), False, 'from dateutil.parser import parse\n'), ((1736, 1744), 'dateutil.parser.parse', 'parse', (['i'], {}), '(i)\n', (1741, 1744), False, 'from dateutil.parser import parse\n'), ((1908, 1916), 'dateutil.parser.parse', 'parse', (['i'], {}), '(i)\n', (1913, 1916), False, 'from dateutil.parser import parse\n'), ((1991, 1999), 'dateutil.parser.parse', 'parse', (['i'], {}), '(i)\n', (1996, 1999), False, 'from dateutil.parser import parse\n'), ((1818, 1826), 'dateutil.parser.parse', 'parse', (['i'], {}), '(i)\n', (1823, 1826), False, 'from dateutil.parser import parse\n')]
|
# Copyright 2013-2021 The Salish Sea MEOPAR contributors
# and The University of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Produce a figure that shows a map of the Salish Sea with markers indicating
the risks of high water levels at the Point Atkinson, Victoria, Campbell River,
Nanaimo, and Cherry Point tide gauge locations.
The figure also shows wind vectors that indicate the average wind speed and
direction averaged over the 4 hours preceding the maximum sea surface height
at each location.
The figure is a thumbnail version of the figure produced by
:py:mod:`nowcast.figures.publish.storm_surge_alerts`.
It is intended primarily for use on the Salish Sea Storm Surge Information
Portal page https://salishsea.eos.ubc.ca/storm-surge/.
Testing notebook for this module is
https://nbviewer.jupyter.org/github/SalishSeaCast/SalishSeaNowcast/blob/main/notebooks/figures/publish/TestStormSurgeAlertsThumbnailModule.ipynb
"""
from collections import namedtuple
import arrow
import matplotlib.pyplot as plt
import numpy
from matplotlib import gridspec
from salishsea_tools import places, nc_tools, stormtools, unit_conversions, wind_tools
import nowcast.figures.website_theme
from nowcast.figures import shared
def make_figure(
grids_15m,
weather_path,
coastline,
tidal_predictions,
figsize=(18, 20),
theme=nowcast.figures.website_theme,
):
"""Plot high water level risk indication markers and 4h average wind
vectors on a Salish Sea map.
:arg dict grids_15m: Collection of 15m sea surface height datasets at tide
gauge locations,
keyed by tide gauge station name.
:arg str weather_path: The directory where the weather forcing files
are stored.
:arg coastline: Coastline dataset.
:type coastline: :class:`mat.Dataset`
:arg str tidal_predictions: Path to directory of tidal prediction
file.
:arg 2-tuple figsize: Figure size (width, height) in inches.
:arg theme: Module-like object that defines the style elements for the
figure. See :py:mod:`nowcast.figures.website_theme` for an
example.
:returns: :py:class:`matplotlib.figure.Figure`
"""
plot_data = _prep_plot_data(grids_15m, tidal_predictions, weather_path)
fig, (ax_map, ax_no_risk, ax_high_risk, ax_extreme_risk) = _prep_fig_axes(
figsize, theme
)
_plot_alerts_map(ax_map, coastline, plot_data, theme)
legend_boxes = (ax_no_risk, ax_high_risk, ax_extreme_risk)
risk_levels = (None, "moderate risk", "extreme risk")
legend_texts = (
"No flooding\nrisk",
"Risk of\nhigh water",
"Extreme risk\nof flooding",
)
for ax, risk_level, text in zip(legend_boxes, risk_levels, legend_texts):
_plot_legend(ax, risk_level, text, theme)
return fig
def _prep_plot_data(grids_15m, tidal_predictions, weather_path):
max_ssh, max_ssh_time, risk_levels = {}, {}, {}
u_wind_4h_avg, v_wind_4h_avg, max_wind_avg = {}, {}, {}
for name in places.TIDE_GAUGE_SITES:
ssh_ts = nc_tools.ssh_timeseries_at_point(grids_15m[name], 0, 0, datetimes=True)
ttide = shared.get_tides(name, tidal_predictions)
max_ssh[name], max_ssh_time[name] = shared.find_ssh_max(name, ssh_ts, ttide)
risk_levels[name] = stormtools.storm_surge_risk_level(
name, max_ssh[name], ttide
)
wind_avg = wind_tools.calc_wind_avg_at_point(
arrow.get(max_ssh_time[name]),
weather_path,
places.PLACES[name]["wind grid ji"],
avg_hrs=-4,
)
u_wind_4h_avg[name], v_wind_4h_avg[name] = wind_avg
max_wind_avg[name], _ = wind_tools.wind_speed_dir(
u_wind_4h_avg[name], v_wind_4h_avg[name]
)
plot_data = namedtuple(
"PlotData",
"ssh_ts, max_ssh, max_ssh_time, risk_levels, "
"u_wind_4h_avg, v_wind_4h_avg, max_wind_avg",
)
return plot_data(
ssh_ts,
max_ssh,
max_ssh_time,
risk_levels,
u_wind_4h_avg,
v_wind_4h_avg,
max_wind_avg,
)
def _prep_fig_axes(figsize, theme):
fig = plt.figure(figsize=figsize, facecolor=theme.COLOURS["figure"]["facecolor"])
gs = gridspec.GridSpec(2, 3, width_ratios=[1, 1, 1], height_ratios=[6, 1])
gs.update(hspace=0.15, wspace=0.05)
ax_map = fig.add_subplot(gs[0, :])
ax_no_risk = fig.add_subplot(gs[1, 0])
ax_no_risk.set_facecolor(theme.COLOURS["figure"]["facecolor"])
ax_high_risk = fig.add_subplot(gs[1, 1])
ax_high_risk.set_facecolor(theme.COLOURS["figure"]["facecolor"])
ax_extreme_risk = fig.add_subplot(gs[1, 2])
ax_extreme_risk.set_facecolor(theme.COLOURS["figure"]["facecolor"])
return fig, (ax_map, ax_no_risk, ax_high_risk, ax_extreme_risk)
def _plot_alerts_map(ax, coastline, plot_data, theme):
shared.plot_map(ax, coastline)
for name in places.TIDE_GAUGE_SITES:
alpha = 0 if numpy.isnan(plot_data.max_ssh[name]) else 0.3
shared.plot_risk_level_marker(
ax, name, plot_data.risk_levels[name], "o", 55, alpha, theme
)
shared.plot_wind_arrow(
ax,
*places.PLACES[name]["lon lat"],
plot_data.u_wind_4h_avg[name],
plot_data.v_wind_4h_avg[name],
theme,
)
# Format the axes and make it pretty
_alerts_map_axis_labels(ax, plot_data.ssh_ts.time[0], theme)
_alerts_map_wind_legend(ax, theme)
_alerts_map_geo_labels(ax, theme)
def _alerts_map_axis_labels(ax, date_time, theme):
ax.set_title(
f"Marine and Atmospheric Conditions\n {date_time:%A, %B %d, %Y}",
fontproperties=theme.FONTS["axes title large"],
color=theme.COLOURS["text"]["axes title"],
)
ax.set_xlabel(
"Longitude [°E]",
fontproperties=theme.FONTS["axis"],
color=theme.COLOURS["text"]["axis"],
)
ax.set_ylabel(
"Latitude [°N]",
fontproperties=theme.FONTS["axis"],
color=theme.COLOURS["text"]["axis"],
)
ax.grid(axis="both")
theme.set_axis_colors(ax)
def _alerts_map_wind_legend(ax, theme):
shared.plot_wind_arrow(ax, -122.5, 50.65, 0, -5, theme)
ax.text(
-122.58,
50.5,
"Reference: 5 m/s",
rotation=90,
fontproperties=theme.FONTS["axes annotation"],
color=theme.COLOURS["text"]["axes annotation"],
)
shared.plot_wind_arrow(ax, -122.75, 50.65, 0, unit_conversions.knots_mps(-5), theme)
ax.text(
-122.83,
50.5,
"Reference: 5 knots",
rotation=90,
fontproperties=theme.FONTS["axes annotation"],
color=theme.COLOURS["text"]["axes annotation"],
)
ax.text(
-122.85,
49.9,
"Winds are 4 hour\n" "average before\n" "maximum water level",
verticalalignment="top",
bbox=theme.COLOURS["axes textbox"],
fontproperties=theme.FONTS["axes annotation"],
color=theme.COLOURS["text"]["axes annotation"],
)
def _alerts_map_geo_labels(ax, theme):
geo_labels = (
# PLACES key, offset x, y, rotation, text size
("Pacific Ocean", 0, 0, 0, "left", "small"),
("Neah Bay", -0.04, -0.08, 0, "right", "large"),
("<NAME>", 0, 0, -18, "left", "small"),
("Puget Sound", 0, 0, -30, "left", "small"),
("Strait of Georgia", 0, 0, -20, "left", "small"),
("Victoria", -0.04, 0.04, 0, "right", "large"),
("<NAME>", 0.04, 0, 0, "left", "large"),
("<NAME>", 0.06, 0.16, 0, "left", "large"),
("Nanaimo", -0.04, 0, 0, "right", "large"),
("<NAME>", -0.04, -0.04, 0, "right", "large"),
("<NAME>", 0, 0, 0, "left", "small"),
("Washington State", 0, 0, 0, "left", "small"),
)
for place, dx, dy, rotation, justify, label_size in geo_labels:
lon, lat = places.PLACES[place]["lon lat"]
ax.text(
lon + dx,
lat + dy,
place,
rotation=rotation,
horizontalalignment=justify,
fontproperties=theme.FONTS[f"location label {label_size}"],
)
def _plot_legend(ax, risk_level, text, theme):
colour = theme.COLOURS["storm surge risk levels"][risk_level]
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.plot(
0.2, 0.45, marker="o", markersize=70, markeredgewidth=2, color=colour, alpha=0.6
)
colour_name = "yellow" if colour.lower() == "gold" else colour
ax.text(
0.4,
0.2,
f"{colour_name.title()}:\n{text}",
transform=ax.transAxes,
fontproperties=theme.FONTS["legend label large"],
color=theme.COLOURS["text"]["risk level label"],
)
_legend_box_hide_frame(ax, theme)
def _legend_box_hide_frame(ax, theme):
ax.set_facecolor(theme.COLOURS["figure"]["facecolor"])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
for spine in ax.spines:
ax.spines[spine].set_visible(False)
|
[
"collections.namedtuple",
"salishsea_tools.wind_tools.wind_speed_dir",
"salishsea_tools.stormtools.storm_surge_risk_level",
"nowcast.figures.shared.plot_risk_level_marker",
"nowcast.figures.shared.get_tides",
"nowcast.figures.shared.find_ssh_max",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"nowcast.figures.shared.plot_map",
"arrow.get",
"numpy.isnan",
"salishsea_tools.nc_tools.ssh_timeseries_at_point",
"nowcast.figures.shared.plot_wind_arrow",
"salishsea_tools.unit_conversions.knots_mps"
] |
[((4396, 4517), 'collections.namedtuple', 'namedtuple', (['"""PlotData"""', '"""ssh_ts, max_ssh, max_ssh_time, risk_levels, u_wind_4h_avg, v_wind_4h_avg, max_wind_avg"""'], {}), "('PlotData',\n 'ssh_ts, max_ssh, max_ssh_time, risk_levels, u_wind_4h_avg, v_wind_4h_avg, max_wind_avg'\n )\n", (4406, 4517), False, 'from collections import namedtuple\n'), ((4763, 4838), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'facecolor': "theme.COLOURS['figure']['facecolor']"}), "(figsize=figsize, facecolor=theme.COLOURS['figure']['facecolor'])\n", (4773, 4838), True, 'import matplotlib.pyplot as plt\n'), ((4848, 4917), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(3)'], {'width_ratios': '[1, 1, 1]', 'height_ratios': '[6, 1]'}), '(2, 3, width_ratios=[1, 1, 1], height_ratios=[6, 1])\n', (4865, 4917), False, 'from matplotlib import gridspec\n'), ((5470, 5500), 'nowcast.figures.shared.plot_map', 'shared.plot_map', (['ax', 'coastline'], {}), '(ax, coastline)\n', (5485, 5500), False, 'from nowcast.figures import shared\n'), ((6760, 6815), 'nowcast.figures.shared.plot_wind_arrow', 'shared.plot_wind_arrow', (['ax', '(-122.5)', '(50.65)', '(0)', '(-5)', 'theme'], {}), '(ax, -122.5, 50.65, 0, -5, theme)\n', (6782, 6815), False, 'from nowcast.figures import shared\n'), ((3665, 3736), 'salishsea_tools.nc_tools.ssh_timeseries_at_point', 'nc_tools.ssh_timeseries_at_point', (['grids_15m[name]', '(0)', '(0)'], {'datetimes': '(True)'}), '(grids_15m[name], 0, 0, datetimes=True)\n', (3697, 3736), False, 'from salishsea_tools import places, nc_tools, stormtools, unit_conversions, wind_tools\n'), ((3753, 3794), 'nowcast.figures.shared.get_tides', 'shared.get_tides', (['name', 'tidal_predictions'], {}), '(name, tidal_predictions)\n', (3769, 3794), False, 'from nowcast.figures import shared\n'), ((3839, 3879), 'nowcast.figures.shared.find_ssh_max', 'shared.find_ssh_max', (['name', 'ssh_ts', 'ttide'], {}), '(name, ssh_ts, ttide)\n', (3858, 3879), False, 'from nowcast.figures import shared\n'), ((3908, 3969), 'salishsea_tools.stormtools.storm_surge_risk_level', 'stormtools.storm_surge_risk_level', (['name', 'max_ssh[name]', 'ttide'], {}), '(name, max_ssh[name], ttide)\n', (3941, 3969), False, 'from salishsea_tools import places, nc_tools, stormtools, unit_conversions, wind_tools\n'), ((4290, 4357), 'salishsea_tools.wind_tools.wind_speed_dir', 'wind_tools.wind_speed_dir', (['u_wind_4h_avg[name]', 'v_wind_4h_avg[name]'], {}), '(u_wind_4h_avg[name], v_wind_4h_avg[name])\n', (4315, 4357), False, 'from salishsea_tools import places, nc_tools, stormtools, unit_conversions, wind_tools\n'), ((5617, 5713), 'nowcast.figures.shared.plot_risk_level_marker', 'shared.plot_risk_level_marker', (['ax', 'name', 'plot_data.risk_levels[name]', '"""o"""', '(55)', 'alpha', 'theme'], {}), "(ax, name, plot_data.risk_levels[name], 'o', \n 55, alpha, theme)\n", (5646, 5713), False, 'from nowcast.figures import shared\n'), ((5739, 5872), 'nowcast.figures.shared.plot_wind_arrow', 'shared.plot_wind_arrow', (['ax', "*places.PLACES[name]['lon lat']", 'plot_data.u_wind_4h_avg[name]', 'plot_data.v_wind_4h_avg[name]', 'theme'], {}), "(ax, *places.PLACES[name]['lon lat'], plot_data.\n u_wind_4h_avg[name], plot_data.v_wind_4h_avg[name], theme)\n", (5761, 5872), False, 'from nowcast.figures import shared\n'), ((7076, 7106), 'salishsea_tools.unit_conversions.knots_mps', 'unit_conversions.knots_mps', (['(-5)'], {}), '(-5)\n', (7102, 7106), False, 'from salishsea_tools import places, nc_tools, stormtools, unit_conversions, wind_tools\n'), ((4058, 4087), 'arrow.get', 'arrow.get', (['max_ssh_time[name]'], {}), '(max_ssh_time[name])\n', (4067, 4087), False, 'import arrow\n'), ((5563, 5599), 'numpy.isnan', 'numpy.isnan', (['plot_data.max_ssh[name]'], {}), '(plot_data.max_ssh[name])\n', (5574, 5599), False, 'import numpy\n')]
|
#!/usr/bin/env python
import numpy as np
dim = 3
A = np.ones(shape=(dim, dim))
B = A.copy()
b = np.empty(dim)
for i in range(dim):
b[i] = i + 2
print('A')
print(A)
print('b')
print(b)
for j in range(dim):
A[:, j] *= b[j]
print('% (1)')
print(A)
print('% (2)')
print(B * b)
|
[
"numpy.empty",
"numpy.ones"
] |
[((56, 81), 'numpy.ones', 'np.ones', ([], {'shape': '(dim, dim)'}), '(shape=(dim, dim))\n', (63, 81), True, 'import numpy as np\n'), ((99, 112), 'numpy.empty', 'np.empty', (['dim'], {}), '(dim)\n', (107, 112), True, 'import numpy as np\n')]
|
#
# python_grabber
#
import cv2
import numpy as np
def save_image(filename, img):
cv2.imwrite(filename, img)
def sepia(img):
kernel = np.float32([
[0.272, 0.534, 0.131],
[0.349, 0.686, 0.168],
[0.393, 0.769, 0.189]])
return cv2.transform(img, kernel)
def edge_preserving(img):
return cv2.edgePreservingFilter(img)
def stylization(img):
return cv2.stylization(img)
def pencil_sketch(img):
_, res = cv2.pencilSketch(img)
return res
|
[
"cv2.imwrite",
"cv2.transform",
"cv2.pencilSketch",
"cv2.edgePreservingFilter",
"cv2.stylization",
"numpy.float32"
] |
[((89, 115), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'img'], {}), '(filename, img)\n', (100, 115), False, 'import cv2\n'), ((147, 233), 'numpy.float32', 'np.float32', (['[[0.272, 0.534, 0.131], [0.349, 0.686, 0.168], [0.393, 0.769, 0.189]]'], {}), '([[0.272, 0.534, 0.131], [0.349, 0.686, 0.168], [0.393, 0.769, \n 0.189]])\n', (157, 233), True, 'import numpy as np\n'), ((265, 291), 'cv2.transform', 'cv2.transform', (['img', 'kernel'], {}), '(img, kernel)\n', (278, 291), False, 'import cv2\n'), ((331, 360), 'cv2.edgePreservingFilter', 'cv2.edgePreservingFilter', (['img'], {}), '(img)\n', (355, 360), False, 'import cv2\n'), ((396, 416), 'cv2.stylization', 'cv2.stylization', (['img'], {}), '(img)\n', (411, 416), False, 'import cv2\n'), ((456, 477), 'cv2.pencilSketch', 'cv2.pencilSketch', (['img'], {}), '(img)\n', (472, 477), False, 'import cv2\n')]
|
from collections import defaultdict
import itertools
from skorch.net import NeuralNet
from skorch.dataset import Dataset
import pandas as pd
from sklearn import preprocessing
from tqdm import tqdm
import more_itertools as mit
import numpy as np
import skorch
import torch
import logging
logging.getLogger('matplotlib').setLevel(logging.WARNING)
NORMAL_TRAFFIC = 0.
ATTACK_TRAFFIC = 1.
class ContextCriterion():
def score(self, model_output, target):
raise NotImplementedError
def __call__(self, p1, p2, p3=None):
if isinstance(p1, NeuralNet): # p1=Model, p2=dataset.X, p3=dataset.y
with torch.no_grad():
mout = p1.forward(p2)
return self.score(mout, p3)
else: # p1=model_output, p2=dataset.y
return self.score(p1, p2)
class WindowedDataGenerator():
def __init__(self, overlapping, context_len):
self.overlapping = overlapping
self.context_len = context_len
self.window_stepsize = max(int(context_len * (1 - overlapping)), 1)
def dataframe_windows(self, df):
df_len = len(df)
wnds = mit.windowed(range(df_len), self.context_len, step=self.window_stepsize)
wnds = filter(lambda x: None not in x, wnds)
wnds_values = map(lambda x: df.iloc[list(x)].reset_index(), wnds)
return wnds_values
def anomaly_metadata(self, context):
anomaly_perc = len(context[context["_isanomaly"] != "none"]) / self.context_len
anomaly_type = "none"
if anomaly_perc > 0:
anomaly_type = context.loc[context["_isanomaly"] != "none", "_isanomaly"].iloc[0]
return anomaly_type, anomaly_perc
def generate_context(self, df: pd.DataFrame):
samples = defaultdict(list)
channels = [c for c in df.columns if c[0] != "_"]
logging.debug("Windowing time series for each host")
host_ts = df.groupby(level=["_host"])
for host, ts in tqdm(host_ts):
windows = self.dataframe_windows(ts)
for context in windows:
anomaly_type, anomaly_perc = self.anomaly_metadata(context)
samples["anomaly_type"].append(anomaly_type)
samples["anomaly_perc"].append(anomaly_perc)
ctxvalues = context[channels].values
samples["context"].append(ctxvalues)
samples["host"].append(host)
samples = { k: np.stack(v) for k, v in samples.items() }
return samples
@staticmethod
def alternate_merge(ll):
return list(itertools.chain(*zip(*ll)))
def sk_dataset(self, context_dictionary):
skdset = {}
skdset["context"] = torch.Tensor(context_dictionary["context"])
skdset["host"] = preprocessing.LabelEncoder().fit_transform(context_dictionary["host"])
an_perc = context_dictionary["anomaly_perc"]
Y = np.where(an_perc==0, NORMAL_TRAFFIC, an_perc)
Y = np.where(Y > 0, ATTACK_TRAFFIC, Y)
Y = torch.Tensor(Y)
return self.Dataset2GPU(Dataset(skdset, Y))
def Dataset2GPU(self, dataset):
if torch.cuda.is_available():
dataset.X["context"] = dataset.X["context"].cuda()
dataset.y = dataset.y.cuda()
return dataset
def __call__(self, df_collection, to_sk_dataset=True, filter_anomaly=True):
model_input = defaultdict(list)
for df in df_collection:
ctxs = self.generate_context(df)
for k, v in ctxs.items():
model_input[k].append(v)
model_input = { k: np.array(self.alternate_merge(v)) for k, v in model_input.items() }
if filter_anomaly:
normal_mask = np.where(model_input["anomaly_perc"] == 0)[0]
model_input = { k: x[normal_mask] for k, x in model_input.items() }
if to_sk_dataset:
return self.sk_dataset(model_input)
return model_input
class WindowedAnomalyDetector(skorch.net.NeuralNet):
def __init__(self, *args, wlen=None, **kwargs):
self.pointwise_ctxs = None
if wlen is not None:
self.initialize_context(wlen)
super(WindowedAnomalyDetector, self).__init__(*args, **kwargs)
def initialize_context(self, wlen):
self.wlen = wlen
self.pointwise_ctxs = WindowedDataGenerator(1., wlen)
def fit(self, *args, **kwargs):
wlen = args[0].X["context"].size(1)
self.initialize_context(wlen)
super().fit(*args, **kwargs)
def pointwise_embedding(self, samples):
return self.pointwise(samples, self.module_.toembedding, "_embedding", pad_with=np.nan)
def pointwise_anomaly(self, samples):
return self.pointwise(samples, self.module_.context_anomaly, "_y_hat")
def pointwise(self, samples, fun, label, pad_with=0.):
if self.pointwise_ctxs is None:
raise AttributeError("Not fitted, missing context len")
if not isinstance(samples, list):
samples = [samples]
halfwlen = int(self.wlen / 2)
aus = 1 if (halfwlen % 2 ==0) else 0
ebs_sl = slice(halfwlen, -halfwlen + aus)
res = [ [] for i in range(len(samples)) ]
channels = [c for c in samples[0].columns if c[0] != "_"]
for i, df in enumerate(samples):
host_ts = df.groupby(level=["_host"])
for _, host_df in host_ts:
windows = self.pointwise_ctxs.dataframe_windows(host_df)
ctx_batch = np.stack([ ctx[channels].values for ctx in windows ])
def aperc(ctx):
return self.pointwise_ctxs.anomaly_metadata(ctx)[1]
windows = self.pointwise_ctxs.dataframe_windows(host_df)
aperc = np.array([ aperc(ctx) for ctx in windows ])
vaperc = np.full((len(host_df), 1), pad_with).squeeze()
vaperc[ebs_sl] = aperc
host_df["_aperc"] = vaperc
with torch.no_grad():
pred = fun(torch.tensor(ctx_batch))
# Fix windowing padding with zeros (hope no anomaly)
if len(pred.shape) == 1:
y_hat = np.full((len(host_df), 1), pad_with).squeeze()
y_hat[ebs_sl] = pred.numpy()
else:
y_hat = np.full((len(host_df), pred.size(1)), pad_with)
y_hat[ebs_sl] = pred.numpy()
y_hat = [ np.nan if np.isnan(x).any() else x for x in list(y_hat) ]
host_df[label] = y_hat
res[i].append(host_df)
res[i] = pd.concat(res[i])
if not isinstance(samples, list):
return res[0]
return res
|
[
"logging.getLogger",
"sklearn.preprocessing.LabelEncoder",
"logging.debug",
"numpy.where",
"tqdm.tqdm",
"torch.Tensor",
"numpy.stack",
"torch.tensor",
"torch.cuda.is_available",
"collections.defaultdict",
"numpy.isnan",
"skorch.dataset.Dataset",
"torch.no_grad",
"pandas.concat"
] |
[((288, 319), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (305, 319), False, 'import logging\n'), ((1754, 1771), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1765, 1771), False, 'from collections import defaultdict\n'), ((1843, 1895), 'logging.debug', 'logging.debug', (['"""Windowing time series for each host"""'], {}), "('Windowing time series for each host')\n", (1856, 1895), False, 'import logging\n'), ((1966, 1979), 'tqdm.tqdm', 'tqdm', (['host_ts'], {}), '(host_ts)\n', (1970, 1979), False, 'from tqdm import tqdm\n'), ((2699, 2742), 'torch.Tensor', 'torch.Tensor', (["context_dictionary['context']"], {}), "(context_dictionary['context'])\n", (2711, 2742), False, 'import torch\n'), ((2904, 2951), 'numpy.where', 'np.where', (['(an_perc == 0)', 'NORMAL_TRAFFIC', 'an_perc'], {}), '(an_perc == 0, NORMAL_TRAFFIC, an_perc)\n', (2912, 2951), True, 'import numpy as np\n'), ((2962, 2996), 'numpy.where', 'np.where', (['(Y > 0)', 'ATTACK_TRAFFIC', 'Y'], {}), '(Y > 0, ATTACK_TRAFFIC, Y)\n', (2970, 2996), True, 'import numpy as np\n'), ((3009, 3024), 'torch.Tensor', 'torch.Tensor', (['Y'], {}), '(Y)\n', (3021, 3024), False, 'import torch\n'), ((3134, 3159), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3157, 3159), False, 'import torch\n'), ((3391, 3408), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3402, 3408), False, 'from collections import defaultdict\n'), ((2439, 2450), 'numpy.stack', 'np.stack', (['v'], {}), '(v)\n', (2447, 2450), True, 'import numpy as np\n'), ((3066, 3084), 'skorch.dataset.Dataset', 'Dataset', (['skdset', 'Y'], {}), '(skdset, Y)\n', (3073, 3084), False, 'from skorch.dataset import Dataset\n'), ((6653, 6670), 'pandas.concat', 'pd.concat', (['res[i]'], {}), '(res[i])\n', (6662, 6670), True, 'import pandas as pd\n'), ((629, 644), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (642, 644), False, 'import torch\n'), ((2768, 2796), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (2794, 2796), False, 'from sklearn import preprocessing\n'), ((3724, 3766), 'numpy.where', 'np.where', (["(model_input['anomaly_perc'] == 0)"], {}), "(model_input['anomaly_perc'] == 0)\n", (3732, 3766), True, 'import numpy as np\n'), ((5517, 5568), 'numpy.stack', 'np.stack', (['[ctx[channels].values for ctx in windows]'], {}), '([ctx[channels].values for ctx in windows])\n', (5525, 5568), True, 'import numpy as np\n'), ((5994, 6009), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6007, 6009), False, 'import torch\n'), ((6042, 6065), 'torch.tensor', 'torch.tensor', (['ctx_batch'], {}), '(ctx_batch)\n', (6054, 6065), False, 'import torch\n'), ((6489, 6500), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (6497, 6500), True, 'import numpy as np\n')]
|
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import cv2
import rlkit.torch.sac.diayn
from .mode_actions_sampler import ModeActionSampler
from network import ModeDisentanglingNetwork
from env import DmControlEnvForPytorchBothObstype
class DisentanglingTester:
def __init__(self,
latent_model_path,
env,
seed,
video_dir,
):
# Latent model
self.latent_model = torch.load(latent_model_path).eval()
print("Model loaded")
# Environment
self.env = env
assert isinstance(self.env, DmControlEnvForPytorchBothObstype), \
'Both observation types (pixel and state representantion are needed' \
' to create the test video. ' \
'Take DmControlForPytorchBothObstype env-class'
assert self.env.obs_type == 'pixels'
self.action_repeat = self.env.action_repeat
# Seed
torch.manual_seed(seed)
np.random.seed(seed)
self.env.seed(seed)
# Device
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
# Directories
self.video_dir = video_dir
os.makedirs(video_dir, exist_ok=True)
# Video
self.video_cnt = 0
self.video_name = 'skill'
self.video = None
self._reset_video_writer(video_name=self.video_name + str(self.video_cnt))
# Sampling mode conditioned actions from the latent model
self.mode_action_sampler = ModeActionSampler(self.latent_model, device=self.device)
# Other
self.steps = 0
self.episodes = 0
def _create_video_name(self):
return self.video_name + str(self.video_cnt)
def _reset_video_writer(self, video_name):
video_path = os.path.join(self.video_dir, video_name)
video_path += '.avi'
rows = self.env.observation_space.shape[1]
cols = self.env.observation_space.shape[2]
self.video = cv2.VideoWriter(video_path,
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
25,
(rows, cols),
True)
self.video_cnt += 1
def _write_img_to_video(self, img):
# (H, W, num_channels) seems to be needed by cvtColor
if img.shape[0] == 3:
img = img.transpose(1, 2, 0)
bgr_img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR)
self.video.write(bgr_img)
def _save_video(self):
self.video.release()
def _get_state_obs_enc(self):
state_obs = self.env.get_state_obs()
with torch.no_grad():
state_obs_enc = self.latent_model.encoder(
torch.from_numpy(state_obs).unsqueeze(dim=0).float().to(self.device)
)
return state_obs_enc
def generate_skill_autoregressive(self):
# Env reset
pixel_obs = self.env.reset()
state_obs = self._get_state_obs_enc()
self._write_img_to_video(pixel_obs)
# Counters
self.episodes += 1
episode_steps = 0
# Done Flag
done = False
while not done:
action = self.mode_action_sampler(state_obs)
pixel_obs, _, done, _ = self.env.step(action.cpu().numpy())
state_obs = self._get_state_obs_enc()
self.steps += self.action_repeat
episode_steps += self.action_repeat
self._write_img_to_video(pixel_obs)
def run(self, num_skills=10):
for skill in range(num_skills):
# Resets
self.mode_action_sampler.reset()
self._reset_video_writer(video_name=self._create_video_name())
# Make video
self.generate_skill_autoregressive()
self._save_video()
|
[
"torch.manual_seed",
"os.makedirs",
"torch.load",
"os.path.join",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.random.seed",
"cv2.VideoWriter_fourcc",
"torch.no_grad"
] |
[((990, 1013), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1007, 1013), False, 'import torch\n'), ((1022, 1042), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1036, 1042), True, 'import numpy as np\n'), ((1260, 1297), 'os.makedirs', 'os.makedirs', (['video_dir'], {'exist_ok': '(True)'}), '(video_dir, exist_ok=True)\n', (1271, 1297), False, 'import os\n'), ((1867, 1907), 'os.path.join', 'os.path.join', (['self.video_dir', 'video_name'], {}), '(self.video_dir, video_name)\n', (1879, 1907), False, 'import os\n'), ((2125, 2167), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (2147, 2167), False, 'import cv2\n'), ((2765, 2780), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2778, 2780), False, 'import torch\n'), ((495, 524), 'torch.load', 'torch.load', (['latent_model_path'], {}), '(latent_model_path)\n', (505, 524), False, 'import torch\n'), ((1147, 1172), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1170, 1172), False, 'import torch\n'), ((2853, 2880), 'torch.from_numpy', 'torch.from_numpy', (['state_obs'], {}), '(state_obs)\n', (2869, 2880), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import matplotlib.pyplot as plt
from splane import pzmap, grpDelay, bodePlot, convert2SOS
from scipy import signal
# Esta es una liberia tomada de la comunidad [https://stackoverflow.com/questions/35304245/multiply-scipy-lti-transfer-functions?newreg=b12c460c179042b09ad75c2fb4297bc9]
from ltisys import *
e = .096
a = np.roots([-256*e,0,-640*e,0,-560*e,0,-200*e,0,-25*e,0,1])
# Coeficientes Transferencias
q_1 = 2.59
k = 1
q_2 = 4.3
w_2 = 1.025
q_3 = 1.12
w_3 = 0.703
# Genero la función transferencia T1 en S
num_t1 = [1, 0]
den_t1 = [1, q_1]
T1 = ltimul(num_t1, den_t1);
# Genero la función transferencia T2 en S
num_t2 = [1, 0, 0]
den_t2 = [1, 1 / (q_2 * w_2), 1/ (w_2**2)]
T2 = ltimul(num_t2, den_t2);
# Genero la función transferencia T3 en S
num_t3 = [1, 0, 0]
den_t3 = [1, 1 / (q_3 * w_3), 1/ (w_3**2)]
T3 = ltimul(num_t3, den_t3);
T = T1 * T2 * T3
#pzmap(T, 1);
#fig, ax = bodePlot(T1.to_ss(), 2);
#fig, ax = bodePlot(T2.to_ss(), 2);
#fig, ax = bodePlot(T3.to_ss(), 2);
fig, ax = bodePlot(T.to_ss(), 2);
#ax[0].set_xlim(1e-1,1e1)
#ax[0].set_ylim(-100, 10)
#ax[1].set_xlim(1e-1,1e1)
|
[
"numpy.roots"
] |
[((420, 497), 'numpy.roots', 'np.roots', (['[-256 * e, 0, -640 * e, 0, -560 * e, 0, -200 * e, 0, -25 * e, 0, 1]'], {}), '([-256 * e, 0, -640 * e, 0, -560 * e, 0, -200 * e, 0, -25 * e, 0, 1])\n', (428, 497), True, 'import numpy as np\n')]
|
"""Функции проверки статуса дивидендов"""
from urllib.error import URLError
import numpy as np
import pandas as pd
from local.dividends import comony_ru
from local.dividends import dohod_ru
from local.dividends import smart_lab_ru
from local.dividends.sqlite import DividendsDataManager
from local.dividends.sqlite import STATISTICS_START
from web.labels import DIVIDENDS
from web.labels import TICKER
DIVIDENDS_SOURCES = [dohod_ru.dividends_dohod,
comony_ru.dividends_conomy,
smart_lab_ru.dividends_smart_lab]
def smart_lab_status(tickers: tuple):
"""Информация об актуальности данных в основной локальной базе дивидендов
Parameters
----------
tickers
Основные тикеры, для которых нужно проверить актуальность данных
Returns
-------
tuple of list
Нулевой элемент кортежа - список тикеров из переданных без актуальной информации в локальной базе
Первый элемент кортежа - список тикеров со СмартЛаба, по которым нет актуальной информации в локальной базе
"""
df = smart_lab_ru.dividends_smart_lab()
result = ([], [])
for i in range(len(df)):
date = df.index[i]
ticker = df.iloc[i][TICKER]
value = df.iloc[i][DIVIDENDS]
local_data = DividendsDataManager(ticker).value
if (date not in local_data.index) or (local_data[date] != value):
if ticker in tickers:
result[0].append(ticker)
else:
result[1].append(ticker)
return result
def dividends_status(ticker: str):
"""Проверяет необходимость обновления данных
Сравнивает основные данные по дивидендам с альтернативными источниками и выводит результаты сравнения
Parameters
----------
ticker
Тикер
Returns
-------
list
Список из DataFrame с результатами сравнения для каждого источника данных
"""
manager = DividendsDataManager(ticker)
manager.update()
df = manager.value
result = []
for source in DIVIDENDS_SOURCES:
print(f'\nСРАВНЕНИЕ ОСНОВНЫХ ДАННЫХ С {source.__name__}\n')
try:
source_df = source(ticker)
except IndexError as err:
print(err.args[0])
except URLError as err:
print(err.args[0])
else:
source_df = source_df[source_df.index >= pd.Timestamp(STATISTICS_START)]
source_df.name = source.__name__
compare_df = pd.concat([df, source_df], axis='columns')
compare_df['STATUS'] = 'ERROR'
compare_df.loc[np.isclose(compare_df[ticker].values, compare_df[source.__name__].values), 'STATUS'] = ''
print(compare_df)
result.append(compare_df)
return result
if __name__ == '__main__':
dividends_status('ALRS')
|
[
"numpy.isclose",
"local.dividends.sqlite.DividendsDataManager",
"local.dividends.smart_lab_ru.dividends_smart_lab",
"pandas.Timestamp",
"pandas.concat"
] |
[((1072, 1106), 'local.dividends.smart_lab_ru.dividends_smart_lab', 'smart_lab_ru.dividends_smart_lab', ([], {}), '()\n', (1104, 1106), False, 'from local.dividends import smart_lab_ru\n'), ((1928, 1956), 'local.dividends.sqlite.DividendsDataManager', 'DividendsDataManager', (['ticker'], {}), '(ticker)\n', (1948, 1956), False, 'from local.dividends.sqlite import DividendsDataManager\n'), ((1280, 1308), 'local.dividends.sqlite.DividendsDataManager', 'DividendsDataManager', (['ticker'], {}), '(ticker)\n', (1300, 1308), False, 'from local.dividends.sqlite import DividendsDataManager\n'), ((2471, 2513), 'pandas.concat', 'pd.concat', (['[df, source_df]'], {'axis': '"""columns"""'}), "([df, source_df], axis='columns')\n", (2480, 2513), True, 'import pandas as pd\n'), ((2369, 2399), 'pandas.Timestamp', 'pd.Timestamp', (['STATISTICS_START'], {}), '(STATISTICS_START)\n', (2381, 2399), True, 'import pandas as pd\n'), ((2584, 2657), 'numpy.isclose', 'np.isclose', (['compare_df[ticker].values', 'compare_df[source.__name__].values'], {}), '(compare_df[ticker].values, compare_df[source.__name__].values)\n', (2594, 2657), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy import stats
from transposonmapper.statistics import dataframe_from_pergenefile
def make_datafile(path_a,filelist_a,path_b,filelist_b):
"""Assembly the datafile name to analyze
Parameters
----------
path_a : str
Path of the files corresponding to the reference library
filelist_a : list of str
List of the filenames of the different replicates from the reference library.
It has to have minimum two replicates per library, so the list has to contain
a minimum of two files.
path_b : str
Path of the files corresponding to the experimental library
filelist_b : list of str
List of the filenames of the different replicates from the experimental library.
It has to have minimum two replicates per library, so the list has to contain
a minimum of two files.
Returns
-------
str
Complete paths of the reference and the experimental libraries
"""
datafiles_list_a = []
datafiles_list_b = []
for files in filelist_a:
datafile = os.path.join(path_a, files)
assert os.path.isfile(datafile), 'File not found at: %s' % datafile
datafiles_list_a.append(datafile)
for files in filelist_b:
datafile = os.path.join(path_b, files)
assert os.path.isfile(datafile), 'File not found at: %s' % datafile
datafiles_list_b.append(datafile)
return datafiles_list_a,datafiles_list_b
def info_from_datasets(datafiles_list_a,datafiles_list_b,variable,normalize):
"""Read the information contain in the datafiles for the volcano plot
Parameters
----------
datafiles_list_a : list of str
List of the absolute paths of all the replicates from the
reference library.
datafiles_list_b : list of str
List of the absolute paths of all the replicates from the
experimental library.
variable : str
Magnitude indicating based on what to make the volcano plot.
For example: tn_per_gene, read_per_gene or Nreadsperinsrt
normalize : bool
If True , If set to True, each gene is normalized based on
the total count in each dataset (i.e. each file in filelist_)
Returns
-------
variable_a_array : numpy.array
variable_b_array: numpy.array
volcano_df: pandas.core.frame.DataFrame
tnread_gene_a: pandas.core.frame.DataFrame
tnread_gene_b: pandas.core.frame.DataFrame
"""
tn_per_gene_zeroreplace = 5 #Add 5 insertions to every gene
read_per_gene_zeroreplace = 25 #Add 25 reads to every gene
# norm_a = 0
# norm_b = 0
for count, datafile_a in enumerate(datafiles_list_a):
tnread_gene_a = dataframe_from_pergenefile(datafile_a, verbose=False)
if normalize == True:
if variable == 'tn_per_gene':
norm_a = sum(tnread_gene_a.tn_per_gene)#*10**-4
elif variable == 'read_per_gene':
norm_a = sum(tnread_gene_a.read_per_gene)#*10**-7
elif variable == 'Nreadsperinsrt':
norm_a = sum(tnread_gene_a.Nreadsperinsrt)
#ADD A CONSTANT TO ALL VALUES TO PREVENT A ZERO DIVISION WHEN DETERMINING THE FOLD CHANGE.
tnread_gene_a.tn_per_gene = tnread_gene_a.tn_per_gene + tn_per_gene_zeroreplace
tnread_gene_a.read_per_gene = tnread_gene_a.read_per_gene + read_per_gene_zeroreplace
tnread_gene_a.Nreadsperinsrt = tnread_gene_a.Nreadsperinsrt + (read_per_gene_zeroreplace/tn_per_gene_zeroreplace)
if count == 0:
volcano_df = tnread_gene_a[['gene_names']] #initialize new dataframe with gene_names
if normalize == True:
variable_a_array = np.divide(tnread_gene_a[[variable]].to_numpy(), norm_a) #create numpy array to store normalized data
else:
variable_a_array = tnread_gene_a[[variable]].to_numpy() #create numpy array to store raw data
else:
if normalize == True:
variable_a_array = np.append(variable_a_array, np.divide(tnread_gene_a[[variable]].to_numpy(), norm_a), axis=1) #append normalized data
else:
variable_a_array = np.append(variable_a_array, tnread_gene_a[[variable]].to_numpy(), axis=1) #append raw data
for count, datafile_b in enumerate(datafiles_list_b):
tnread_gene_b = dataframe_from_pergenefile(datafile_b, verbose=False)
if normalize == True:
if variable == 'tn_per_gene':
norm_b = sum(tnread_gene_b.tn_per_gene)#*10**-4
elif variable == 'read_per_gene':
norm_b = sum(tnread_gene_b.read_per_gene)#*10**-7
elif variable == 'Nreadsperinsrt':
norm_b = sum(tnread_gene_b.Nreadsperinsrt)
#ADD A CONSTANT TO ALL VALUES TO PREVENT A ZERO DIVISION WHEN DETERMINING THE FOLD CHANGE.
tnread_gene_b.tn_per_gene = tnread_gene_b.tn_per_gene + tn_per_gene_zeroreplace
tnread_gene_b.read_per_gene = tnread_gene_b.read_per_gene + read_per_gene_zeroreplace
tnread_gene_b.Nreadsperinsrt = tnread_gene_b.Nreadsperinsrt + (read_per_gene_zeroreplace/tn_per_gene_zeroreplace)
if count == 0:
if normalize == True:
variable_b_array = np.divide(tnread_gene_b[[variable]].to_numpy(), norm_b)
else:
variable_b_array = tnread_gene_b[[variable]].to_numpy()
else:
if normalize == True:
variable_b_array = np.append(variable_b_array, np.divide(tnread_gene_b[[variable]].to_numpy(), norm_b), axis=1)
else:
variable_b_array = np.append(variable_b_array, tnread_gene_b[[variable]].to_numpy(), axis=1)
return variable_a_array,variable_b_array,volcano_df,tnread_gene_a,tnread_gene_b
def apply_stats(variable_a_array,variable_b_array,significance_threshold,volcano_df):
"""This function computes the statistics measure for the volcano plot
Parameters
----------
variable_a_array : array
The values (# of insertions or reads) of the replicates of one library
variable_b_array : array
The values (# of insertions or reads) of the replicates of the other library
significance_threshold : float
It will use the default value in the volcano function which is 0.01
Returns
-------
dataframe
A dataframe containing all the info for the volcano plot.
"""
ttest_tval_list = [np.nan]*len(variable_a_array) #initialize list for storing t statistics
ttest_pval_list = [np.nan]*len(variable_a_array) #initialize list for storing p-values
signif_thres_list = [False]*len(variable_a_array) #initialize boolean list for indicating datapoints with p-value above threshold
fc_list = [np.nan]*len(variable_a_array) #initialize list for storing fold changes
for count,val in enumerate(variable_a_array):
ttest_val = stats.ttest_ind(variable_a_array[count], variable_b_array[count]) #T-test
ttest_tval_list[count] = ttest_val[0]
if not ttest_val[1] == 0: #prevent p=0 to be inputted in log
ttest_pval_list[count] = -1*np.log10(ttest_val[1])
else:
ttest_pval_list[count] = 0
if ttest_pval_list[count] > -1*np.log10(significance_threshold):
signif_thres_list[count] = True
#DETERMINE FOLD CHANGE PER GENE
if np.mean(variable_b_array[count]) == 0 and np.mean(variable_a_array[count]) == 0:
fc_list[count] = 0
else:
fc_list[count] = np.log2(np.mean(variable_a_array[count]) / np.mean(variable_b_array[count]))
volcano_df['fold_change'] = fc_list
volcano_df['t_statistic'] = ttest_tval_list
volcano_df['p_value'] = ttest_pval_list
volcano_df['significance'] = signif_thres_list
return volcano_df
|
[
"numpy.mean",
"numpy.log10",
"os.path.join",
"os.path.isfile",
"transposonmapper.statistics.dataframe_from_pergenefile",
"scipy.stats.ttest_ind"
] |
[((1143, 1170), 'os.path.join', 'os.path.join', (['path_a', 'files'], {}), '(path_a, files)\n', (1155, 1170), False, 'import os\n'), ((1186, 1210), 'os.path.isfile', 'os.path.isfile', (['datafile'], {}), '(datafile)\n', (1200, 1210), False, 'import os\n'), ((1337, 1364), 'os.path.join', 'os.path.join', (['path_b', 'files'], {}), '(path_b, files)\n', (1349, 1364), False, 'import os\n'), ((1380, 1404), 'os.path.isfile', 'os.path.isfile', (['datafile'], {}), '(datafile)\n', (1394, 1404), False, 'import os\n'), ((2790, 2843), 'transposonmapper.statistics.dataframe_from_pergenefile', 'dataframe_from_pergenefile', (['datafile_a'], {'verbose': '(False)'}), '(datafile_a, verbose=False)\n', (2816, 2843), False, 'from transposonmapper.statistics import dataframe_from_pergenefile\n'), ((4450, 4503), 'transposonmapper.statistics.dataframe_from_pergenefile', 'dataframe_from_pergenefile', (['datafile_b'], {'verbose': '(False)'}), '(datafile_b, verbose=False)\n', (4476, 4503), False, 'from transposonmapper.statistics import dataframe_from_pergenefile\n'), ((7019, 7084), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['variable_a_array[count]', 'variable_b_array[count]'], {}), '(variable_a_array[count], variable_b_array[count])\n', (7034, 7084), False, 'from scipy import stats\n'), ((7248, 7270), 'numpy.log10', 'np.log10', (['ttest_val[1]'], {}), '(ttest_val[1])\n', (7256, 7270), True, 'import numpy as np\n'), ((7363, 7395), 'numpy.log10', 'np.log10', (['significance_threshold'], {}), '(significance_threshold)\n', (7371, 7395), True, 'import numpy as np\n'), ((7489, 7521), 'numpy.mean', 'np.mean', (['variable_b_array[count]'], {}), '(variable_b_array[count])\n', (7496, 7521), True, 'import numpy as np\n'), ((7531, 7563), 'numpy.mean', 'np.mean', (['variable_a_array[count]'], {}), '(variable_a_array[count])\n', (7538, 7563), True, 'import numpy as np\n'), ((7652, 7684), 'numpy.mean', 'np.mean', (['variable_a_array[count]'], {}), '(variable_a_array[count])\n', (7659, 7684), True, 'import numpy as np\n'), ((7687, 7719), 'numpy.mean', 'np.mean', (['variable_b_array[count]'], {}), '(variable_b_array[count])\n', (7694, 7719), True, 'import numpy as np\n')]
|
__version__ = '0.1.13'
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
from cartesian_explorer import lazy_imports
from cartesian_explorer.lib.lru_cache import lru_cache
from cartesian_explorer.lib.lru_cache_mproc import lru_cache as lru_cache_mproc
from cartesian_explorer.lib.dict_product import dict_product
from cartesian_explorer.Explorer import Explorer
def get_example_explorer():
""" Create a demonstrative explorer.
The explorer describes radioactve decay
of Pb isotopes.
Provides:
Mass: mass left of isotope of type `isotope`
Speed: curernt speed of decay at `time_sec`
Requires:
time_sec: time to calculate outputs at.
isotope: type of isotope: one of "Pb186", "Pb187", "Pb188"
"""
import numpy as np
ex = Explorer()
@ex.provider
@ex.add_function(provides='Mass', requires=('time_sec', 'T'))
def mass(time_sec, T):
return np.exp(-T*time_sec)
@ex.add_function(provides='Speed', requires=('time_sec', 'T'))
def speed(time_sec, T):
return -T*np.exp(-T*time_sec)
@ex.provider
def T(isotope):
if isotope == 'Pb186':
return np.log(2)/4.82
if isotope == 'Pb187':
return np.log(2)/15.2
if isotope == 'Pb188':
return np.log(2)/35.2
return ex
|
[
"logging.getLogger",
"numpy.exp",
"numpy.log",
"cartesian_explorer.Explorer.Explorer"
] |
[((51, 82), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (68, 82), False, 'import logging\n'), ((837, 847), 'cartesian_explorer.Explorer.Explorer', 'Explorer', ([], {}), '()\n', (845, 847), False, 'from cartesian_explorer.Explorer import Explorer\n'), ((973, 994), 'numpy.exp', 'np.exp', (['(-T * time_sec)'], {}), '(-T * time_sec)\n', (979, 994), True, 'import numpy as np\n'), ((1107, 1128), 'numpy.exp', 'np.exp', (['(-T * time_sec)'], {}), '(-T * time_sec)\n', (1113, 1128), True, 'import numpy as np\n'), ((1215, 1224), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1221, 1224), True, 'import numpy as np\n'), ((1280, 1289), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1286, 1289), True, 'import numpy as np\n'), ((1345, 1354), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1351, 1354), True, 'import numpy as np\n')]
|
import torch
import argparse
import scipy
import numpy as np
import pickle
from deeprobust.graph.targeted_attack import Nettack
from deeprobust.graph.utils import *
from deeprobust.graph.data import Dataset
from deeprobust.graph.defense import *
from sklearn.preprocessing import normalize
from tqdm import tqdm
from scipy.sparse.linalg import eigs
from scipy.sparse import csgraph,lil_matrix
from scipy import spatial
# arguments
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type = str, default = "cora",choices = ["cora","citeseer"],help="dataset")
parser.add_argument("--defense", type = bool, default = False, help="defense or not") # with --defense flag, the value of flag is true
parser.add_argument("--model", type = str, default = "GCN", choices= ["GCN","GAT","GIN"])
parser.add_argument("--debug", type = bool, default = True, choices= [True,False])
parser.add_argument("--seed", type = int, default = 29, help="Random Seed" )
parser.add_argument("--direct", action = "store_false", help = "direct attack / influence attack") # with --direct flag, the val of flag is false
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.direct:
influencers = 0
else:
influencers = 5
if args.cuda:
torch.cuda.manual_seed(args.cuda)
if args.debug:
print('cuda :: {}\ndataset :: {}\nDefense Algo :: {}\nmodel :: {}\nDirect attack :: {}'.format(args.cuda, args.dataset, args.defense, args.model, args.direct))
#get data from deeprobust/Dataset
data = Dataset(root='/tmp/',name=args.dataset)
#adj matrix, features, labels
adj, features, labels = data.adj, data.features, data.labels
#train,test sets
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
adj=adj+adj.T
adj[adj>1] = 1
#setup surrogate model
surrogate=GCN(nfeat = features.shape[1], nclass = labels.max().item()+1, nhid = 16, dropout = 0, with_relu = False,
with_bias = False, device = device).to(device)
surrogate.fit(features, adj, labels, idx_train, idx_val, patience = 30, train_iters = 100)
"""
features:
features of nodes
adj:
adjacency matrix
labels:
labels
patience:
patience for early stopping (valid when val is given)
train_iters:
epochs
"""
# setup attack model
target_node = 384 #1554
model = Nettack(surrogate, nnodes = adj.shape[0], attack_structure = True, attack_features = False, device = device).to(device)
#set defense
defense = args.defense
def main():
degrees = adj.sum(0).A1
print('index ::', np.where(degrees == max(degrees)))
per_num = int(degrees[target_node])
if args.debug:
print('degrees (# of perturbations) :: {}'.format(per_num))
model.attack(features, adj, labels, target_node, per_num, direct = args.direct, n_influencers = influencers)
m_adj = model.modified_adj
m_features = model.modified_features
#S_D_Clean = SpectralDistance(adj,adj)
#print(S_D_Clean)
#S_D_Same = SpectralDistance(m_adj,m_adj)
#print(S_D_Same)
#print(adj.shape)
S_Distance,eigv_dif = SpectralDistance(adj,m_adj)
#print(S_Distance)
#dif = m_adj-adj
#for r,c in zip(*dif.nonzero()):
# print(r,c,dif[r,c])
print(S_Distance)
def SpectralDistance(adj,m_adj):
#I = lil_matrix(np.eye(adj.shape[0]))
L_norm = csgraph.laplacian(adj)
L_norm_m = csgraph.laplacian(m_adj)
evals,evecs = np.linalg.eig(L_norm.todense())
evals = evals.real
#print(evals)
print(evecs.shape)
m_evals, m_evecs = np.linalg.eig(L_norm_m.todense())
m_evals = m_evals.real
evec_dif = evecs - m_evecs
print("Evec difference:")
print(evec_dif)
print("================")
#dif = (evals-m_evals)
dif2 = sum(m_evals)-sum(evals)
dif3 = np.linalg.norm(m_evals)-np.linalg.norm(evals)
#print(dif2)
#np.set_printoptions(threshold=np.inf)
#with open('Eigenvalus.log','a+') as f:
# print(dif2,file=f)
#print(dif,file=f)
#L_norm = csgraph.laplacian(np.diag(evals))
#L_norm_m = csgraph.laplacian(np.diag(m_evals))
#dif = (L_norm - L_norm_m)
#dif = (np.diag(evals)-np.diag(evals))
#print(np.linalg.norm(dif,axis=1))
dif1 = (np.diag(evals)-np.diag(m_evals))
"""
Dis here is the difference of eigenvalues:
"""
#d = evals - m_evals
#Dis = {dis:idx for idx,dis in enumerate(d) if dis>1}
#print(Dis)
S_Dis = np.linalg.norm(dif1)
#print(S_Dis)
#Dis = {d:idx for idx,d in enumerate(S_Dis) if d>=1}
#Dis = sorted(Dis,reverse=True)
#print(Dis)
#print(len(Dis))
#print(np.where(S_Dis == max(S_Dis)))
#dif = evals-m_evals
return S_Dis, dif2, evec_dif
"""
print("=======test on clean adj===================")
print("without defense :: ")
test(adj, features, target_node,defense_al=False)
print("with defense (with default setting):: ")
test(adj, features, target_node, defense_al = defense)
print("================ test on perturbed adj =================")
print("without defense ::")
test(m_adj, m_features, target_node,defense_al=False)
print("with defense (with default setting)::")
test(m_adj, m_features, target_node, defense_al = defense)
def test(adj, features, target, defense_al=False):
target_model = globals()[args.model](nfeat = features.shape[1], nhid = 16, nclass = labels.max().item()+1, dropout = 0.5, device = device)
target_model = target_model.to(device)
target_model.fit(features, adj, labels, idx_train, idx_val=idx_val, attention = defense_al)
target_model.eval()
_, output = target_model.test(idx_test=idx_test)
probs = torch.exp(output[[target_node]])[0]
print('probs: {}'.format(probs.detach().cpu().numpy()))
acc_test = accuracy(output[idx_test], labels[idx_test])
print('Test set accuracy:',
"accuracy = {:.4f}".format(acc_test.item()))
return acc_test.item()
"""
def multi_evecs():
cnt = 0
degrees = adj.sum(0).A1
node_list = select_nodes(num_target=10)
print(node_list)
a = []
angle = []
num = len(node_list)
def get_angle(x,y):
u1 = x/np.linalg.norm(x)
u2 = y/np.linalg.norm(y)
return np.arccos(np.clip(np.real(np.dot(u1, u2.T)), -1.0, 1.0))
print('=== Attacking %s nodes respectively ===' % num)
for target_node in tqdm(node_list):
n_perturbations = int(degrees[target_node])
if n_perturbations <1: # at least one perturbation
continue
model = Nettack(surrogate, nnodes=adj.shape[0], attack_structure=True, attack_features=False, device=device)
model = model.to(device)
model.attack(features, adj, labels, target_node, n_perturbations, direct=args.direct, n_influencers = influencers, verbose=False)
modified_adj = model.modified_adj
modified_features = model.modified_features
S_Dis, sum_eigv_dif, evec_dif = SpectralDistance(adj,modified_adj)
a.append(evec_dif.flatten())
if(len(a)==2):
print('angle test:{}'.format(get_angle(a[0], a[1])))
a_list = [get_angle(x,y) for x in a for y in a]
mean = np.mean(a, axis=0)
var = np.var(a, axis=0)
np.set_printoptions(threshold=np.inf)
with open(args.dataset+'_'+args.model+'_Directions_rf.log','a+') as f:
print('Angle:',file=f)
print(a_list,file=f)
print('Mean:{}, Var:{}'.format(mean, var),file=f)
def multi_test():
cnt = 0
degrees = adj.sum(0).A1
node_list = select_nodes(num_target=10)
print(node_list)
num = len(node_list)
print('=== Attacking %s nodes respectively ===' % num)
num_tar = 0
for target_node in tqdm(node_list):
n_perturbations = int(degrees[target_node])
if n_perturbations <1: # at least one perturbation
continue
model = Nettack(surrogate, nnodes=adj.shape[0], attack_structure=True, attack_features=False, device=device)
model = model.to(device)
model.attack(features, adj, labels, target_node, n_perturbations, direct=args.direct, n_influencers = influencers, verbose=False)
modified_adj = model.modified_adj
modified_features = model.modified_features
S_Dis, sum_eigv_dif = SpectralDistance(adj,modified_adj)
print(target_node,'::',S_Dis)
with open(args.dataset+'_'+args.model+'_SpectralDistance_sum.log','a+') as f:
print('Target Node: {}, S_Dis: {}, Eigv_dif: {}'.format(target_node,S_Dis,sum_eigv_dif),file=f)
"""
acc = single_test(modified_adj, modified_features, target_node)
if acc == 0:
cnt += 1
num_tar += 1
with open(args.dataset+"_"+args.model+"_gsl.log","a+") as f:
print('classification rate : %s' % (1-cnt/num_tar), '# of targets:',num_tar,file=f)
print('classification rate : %s' % (1-cnt/num_tar), '# of targets:', num_tar)
"""
def single_test(adj, features, target_node):
'ALL the baselines'
# """defense models"""
# classifier = globals()[args.defensemodel](nnodes=adj.shape[0], nfeat=features.shape[1], nhid=16,
# nclass=labels.max().item() + 1, dropout=0.5, device=device)
# ''' test on GCN (poisoning attack), model could be GCN, GAT, GIN'''
classifier = globals()[args.model](nfeat=features.shape[1], nhid=16, nclass=labels.max().item() + 1, dropout=0.5, device=device)
classifier = classifier.to(device)
classifier.fit(features, adj, labels, idx_train,
idx_val=idx_val,
idx_test=idx_test,
verbose=False, attention=defense) #model_name=model_name
classifier.eval()
acc_overall, output = classifier.test(idx_test, ) #model_name=model_name
probs = torch.exp(output[[target_node]])
acc_test, pred_y, true_y = accuracy_1(output[[target_node]], labels[target_node])
with open(args.dataset+"_"+args.model+"_gsl.log","a+") as f:
print('Defense: {}, target:{}, pred:{}, label: {}'.format(defense, target_node, pred_y.item(),true_y.item()),file=f)
print('target:{}, pred:{}, label: {}'.format(target_node, pred_y.item(), true_y.item()))
print('Pred probs', probs.data)
return acc_test.item()
"""=======Basic Functions============="""
def select_nodes(num_target = 10):
'''
selecting nodes as reported in nettack paper:
(i) the 10 nodes with highest margin of classification, i.e. they are clearly correctly classified,
(ii) the 10 nodes with lowest margin (but still correctly classified) and
(iii) 20 more nodes randomly
'''
gcn = globals()[args.model](nfeat=features.shape[1],
nhid=16,
nclass=labels.max().item() + 1,
dropout=0.5, device=device)
gcn = gcn.to(device)
gcn.fit(features, adj, labels, idx_train, idx_test, verbose=True)
gcn.eval()
output = gcn.predict()
degrees = adj.sum(0).A1
margin_dict = {}
for idx in tqdm(idx_test):
margin = classification_margin(output[idx], labels[idx])
acc, _, _ = accuracy_1(output[[idx]], labels[idx])
if acc==0 or int(degrees[idx])<1: # only keep the correctly classified nodes
continue
"""check the outliers:"""
neighbours = list(adj.todense()[idx].nonzero()[1])
y = [labels[i] for i in neighbours]
node_y = labels[idx]
aa = node_y==y
outlier_score = 1- aa.sum()/len(aa)
if outlier_score >=0.5:
continue
margin_dict[idx] = margin
sorted_margins = sorted(margin_dict.items(), key=lambda x:x[1], reverse=True)
high = [x for x, y in sorted_margins[: num_target]]
low = [x for x, y in sorted_margins[-num_target: ]]
other = [x for x, y in sorted_margins[num_target: -num_target]]
other = np.random.choice(other, 2*num_target, replace=False).tolist()
return other + high + low
def accuracy_1(output,labels):
try:
num = len(labels)
except:
num = 1
if type(labels) is not torch.Tensor:
labels = torch.LongTensor([labels])
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct/num, preds, labels
if __name__ == "__main__":
#main()
#multi_test()
multi_evecs()
|
[
"torch.LongTensor",
"torch.exp",
"torch.cuda.is_available",
"deeprobust.graph.targeted_attack.Nettack",
"numpy.linalg.norm",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.dot",
"numpy.random.seed",
"numpy.random.choice",
"numpy.set_printoptions",
"torch.device",
"torch.manual_seed",
"deeprobust.graph.data.Dataset",
"scipy.sparse.csgraph.laplacian",
"tqdm.tqdm",
"numpy.diag",
"torch.cuda.manual_seed",
"numpy.var"
] |
[((442, 467), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (465, 467), False, 'import argparse\n'), ((1148, 1173), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1171, 1173), False, 'import torch\n'), ((1183, 1227), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (1195, 1227), False, 'import torch\n'), ((1228, 1253), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1242, 1253), True, 'import numpy as np\n'), ((1254, 1282), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1271, 1282), False, 'import torch\n'), ((1621, 1661), 'deeprobust.graph.data.Dataset', 'Dataset', ([], {'root': '"""/tmp/"""', 'name': 'args.dataset'}), "(root='/tmp/', name=args.dataset)\n", (1628, 1661), False, 'from deeprobust.graph.data import Dataset\n'), ((1365, 1398), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.cuda'], {}), '(args.cuda)\n', (1387, 1398), False, 'import torch\n'), ((3400, 3422), 'scipy.sparse.csgraph.laplacian', 'csgraph.laplacian', (['adj'], {}), '(adj)\n', (3417, 3422), False, 'from scipy.sparse import csgraph, lil_matrix\n'), ((3438, 3462), 'scipy.sparse.csgraph.laplacian', 'csgraph.laplacian', (['m_adj'], {}), '(m_adj)\n', (3455, 3462), False, 'from scipy.sparse import csgraph, lil_matrix\n'), ((4534, 4554), 'numpy.linalg.norm', 'np.linalg.norm', (['dif1'], {}), '(dif1)\n', (4548, 4554), True, 'import numpy as np\n'), ((6470, 6485), 'tqdm.tqdm', 'tqdm', (['node_list'], {}), '(node_list)\n', (6474, 6485), False, 'from tqdm import tqdm\n'), ((7269, 7287), 'numpy.mean', 'np.mean', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (7276, 7287), True, 'import numpy as np\n'), ((7298, 7315), 'numpy.var', 'np.var', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (7304, 7315), True, 'import numpy as np\n'), ((7321, 7358), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (7340, 7358), True, 'import numpy as np\n'), ((7801, 7816), 'tqdm.tqdm', 'tqdm', (['node_list'], {}), '(node_list)\n', (7805, 7816), False, 'from tqdm import tqdm\n'), ((9915, 9947), 'torch.exp', 'torch.exp', (['output[[target_node]]'], {}), '(output[[target_node]])\n', (9924, 9947), False, 'import torch\n'), ((11109, 11123), 'tqdm.tqdm', 'tqdm', (['idx_test'], {}), '(idx_test)\n', (11113, 11123), False, 'from tqdm import tqdm\n'), ((2396, 2500), 'deeprobust.graph.targeted_attack.Nettack', 'Nettack', (['surrogate'], {'nnodes': 'adj.shape[0]', 'attack_structure': '(True)', 'attack_features': '(False)', 'device': 'device'}), '(surrogate, nnodes=adj.shape[0], attack_structure=True,\n attack_features=False, device=device)\n', (2403, 2500), False, 'from deeprobust.graph.targeted_attack import Nettack\n'), ((3863, 3886), 'numpy.linalg.norm', 'np.linalg.norm', (['m_evals'], {}), '(m_evals)\n', (3877, 3886), True, 'import numpy as np\n'), ((3887, 3908), 'numpy.linalg.norm', 'np.linalg.norm', (['evals'], {}), '(evals)\n', (3901, 3908), True, 'import numpy as np\n'), ((4311, 4325), 'numpy.diag', 'np.diag', (['evals'], {}), '(evals)\n', (4318, 4325), True, 'import numpy as np\n'), ((4326, 4342), 'numpy.diag', 'np.diag', (['m_evals'], {}), '(m_evals)\n', (4333, 4342), True, 'import numpy as np\n'), ((6637, 6741), 'deeprobust.graph.targeted_attack.Nettack', 'Nettack', (['surrogate'], {'nnodes': 'adj.shape[0]', 'attack_structure': '(True)', 'attack_features': '(False)', 'device': 'device'}), '(surrogate, nnodes=adj.shape[0], attack_structure=True,\n attack_features=False, device=device)\n', (6644, 6741), False, 'from deeprobust.graph.targeted_attack import Nettack\n'), ((7968, 8072), 'deeprobust.graph.targeted_attack.Nettack', 'Nettack', (['surrogate'], {'nnodes': 'adj.shape[0]', 'attack_structure': '(True)', 'attack_features': '(False)', 'device': 'device'}), '(surrogate, nnodes=adj.shape[0], attack_structure=True,\n attack_features=False, device=device)\n', (7975, 8072), False, 'from deeprobust.graph.targeted_attack import Nettack\n'), ((12215, 12241), 'torch.LongTensor', 'torch.LongTensor', (['[labels]'], {}), '([labels])\n', (12231, 12241), False, 'import torch\n'), ((6264, 6281), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (6278, 6281), True, 'import numpy as np\n'), ((6297, 6314), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (6311, 6314), True, 'import numpy as np\n'), ((11950, 12004), 'numpy.random.choice', 'np.random.choice', (['other', '(2 * num_target)'], {'replace': '(False)'}), '(other, 2 * num_target, replace=False)\n', (11966, 12004), True, 'import numpy as np\n'), ((6356, 6372), 'numpy.dot', 'np.dot', (['u1', 'u2.T'], {}), '(u1, u2.T)\n', (6362, 6372), True, 'import numpy as np\n')]
|
import numpy as np
import json
from os.path import join
from tqdm import tqdm
from scipy.optimize import least_squares
from pose_optimize.multiview_geo import reproject_error
DEBUG=False
def reproject_error_loss(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23):
'''
Return:
kp4_e, kp6_e: error array, both (23,) shape
'''
assert p3d.shape == (num_kpt, 3)
assert p4.shape == (num_kpt, 2)
assert p6.shape == (num_kpt, 2)
kp4_recon = np.dot(cam_proj_4[0:3,0:3],p3d.T) + cam_proj_4[0:3,3].reshape([-1,1])
kp6_recon = np.dot(cam_proj_6[0:3,0:3],p3d.T) + cam_proj_6[0:3,3].reshape([-1,1])
kp4_recon = kp4_recon[0:2,:]/kp4_recon[2,:]
kp6_recon = kp6_recon[0:2,:]/kp6_recon[2,:]
# kp4_e = np.linalg.norm(kp4_recon.T - p4, axis=1)
# kp6_e = np.linalg.norm(kp6_recon.T - p6, axis=1)
kp4_e = np.sqrt(np.sum(np.square(kp4_recon.T - p4), axis=1))
kp6_e = np.sqrt(np.sum(np.square(kp6_recon.T - p6), axis=1))
return kp4_e, kp6_e
def reproject_error_loss_score(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23):
'''
Return:
kp4_e, kp6_e: error array, both (23,) shape
'''
assert p3d.shape == (num_kpt, 3)
assert p4.shape == (num_kpt, 3)
assert p6.shape == (num_kpt, 3)
kp4_recon = np.dot(cam_proj_4[0:3,0:3],p3d.T) + cam_proj_4[0:3,3].reshape([-1,1])
kp6_recon = np.dot(cam_proj_6[0:3,0:3],p3d.T) + cam_proj_6[0:3,3].reshape([-1,1])
kp4_recon = kp4_recon[0:2,:]/kp4_recon[2,:]
kp6_recon = kp6_recon[0:2,:]/kp6_recon[2,:]
# kp4_e = np.linalg.norm(kp4_recon.T - p4, axis=1)
# kp6_e = np.linalg.norm(kp6_recon.T - p6, axis=1)
kp4_e = p4[:,2]*np.sqrt(np.sum(np.square(kp4_recon.T - p4[:,:2]), axis=1))
kp6_e = p6[:,2]*np.sqrt(np.sum(np.square(kp6_recon.T - p6[:,:2]), axis=1))
return kp4_e, kp6_e
def optimze_loss_2d(p3d_faltten, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23, lambda_reproj = 1):
'''
Only consider reprojection loss
'''
l1 = lambda_reproj
p3d = p3d_faltten.reshape([-1,3])
kp4_e, kp6_e = reproject_error_loss(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23)
return np.concatenate((l1*kp4_e, l1*kp6_e))
def shape_dis_loss(kpt_3d_array, median_bone, left_list, right_list, num_kpt=23):
'''
Shape loss given prior shape information
'''
assert kpt_3d_array.shape == (num_kpt, 3)
assert len(left_list) == len(right_list)
assert len(left_list) == len(median_bone.keys())
num_bone = len(left_list)
left_error = []
right_error = []
left_error = np.zeros(num_bone)
right_error = np.zeros(num_bone)
for i in range(num_bone):
bon_vec_left = kpt_3d_array[left_list[i][1],:] - kpt_3d_array[left_list[i][0],:]
left_error_i = np.sqrt(np.dot(bon_vec_left, bon_vec_left)) - median_bone[str(i)]
left_error[i] = abs(left_error_i)
bon_vec_right = kpt_3d_array[right_list[i][1],:] - kpt_3d_array[right_list[i][0],:]
right_error_i = np.sqrt(np.dot(bon_vec_right, bon_vec_right)) - median_bone[str(i)]
right_error[i] = abs(right_error_i)
return left_error, right_error
def optimze_loss(p3d_faltten, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone, num_kpt=23, lambda_reproj = 0.1, lambda_shape=5.0):
'''
Full Loss with shape prior
'''
l1 = lambda_reproj
l2 = lambda_shape
p3d = p3d_faltten.reshape([-1,3])
kp4_e, kp6_e = reproject_error_loss_score(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23)
left_error, right_error = shape_dis_loss(p3d, median_bone, left_list, right_list, num_kpt=23)
return np.concatenate((l1*kp4_e, l1*kp6_e, l2*left_error, l2*right_error))
def optimze_loss_no_score(p3d_faltten, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone, num_kpt=23, lambda_reproj = 0.1, lambda_shape=1.0):
'''
Full Loss with shape prior
'''
l1 = lambda_reproj
l2 = lambda_shape
p3d = p3d_faltten.reshape([-1,3])
kp4_e, kp6_e = reproject_error_loss(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23)
left_error, right_error = shape_dis_loss(p3d, median_bone, left_list, right_list, num_kpt=23)
return np.concatenate((l1*kp4_e, l1*kp6_e, l2*left_error, l2*right_error))
def centerize_keypoint(p1, p2, norm_dst):
'''
Centeralize two points
'''
assert p1.shape == (3,)
assert p2.shape == (3,)
p_center = (p1+p2)/2
p_vec = (p1-p2)
p_dis = np.sqrt(np.dot(p_vec, p_vec))
p1_shift = p_center + 0.5*p_vec/p_dis
p2_shift = p_center - 0.5*p_vec/p_dis
return p1_shift, p2_shift
def shape_initialize(left_list, right_list, median_bone, kpt_3d_array, num_kpt=23):
'''
Initialize human joints 3D position from shape prior
'''
assert kpt_3d_array.shape == (num_kpt,3)
assert len(left_list) == len(right_list)
assert len(left_list) == len(median_bone.keys())
num_bone = len(left_list)
left_ratio_list, right_ratio_list = [],[]
vec_left_list, vec_right_list = [], []
ratio_outlier = 1.5
ratio_draw_back = 1.1
for i in range(num_bone):
bon_vec_left = kpt_3d_array[left_list[i][1],:] - kpt_3d_array[left_list[i][0],:]
ratio_left = np.sqrt(np.dot(bon_vec_left, bon_vec_left))/ median_bone[str(i)]
left_ratio_list += [ratio_left]
vec_left_list += [bon_vec_left]
for i in range(num_bone):
bon_vec_right = kpt_3d_array[right_list[i][1],:] - kpt_3d_array[right_list[i][0],:]
ratio_right = np.sqrt(np.dot(bon_vec_right, bon_vec_right))/median_bone[str(i)]
right_ratio_list += [ratio_right]
vec_right_list += [bon_vec_right]
kp_3d_new = np.zeros(kpt_3d_array.shape)
# Adjust Shoulder to hip
kp_3d_new[left_list[2][0], :], kp_3d_new[left_list[2][1], :] = centerize_keypoint(kpt_3d_array[left_list[2][0], :], kpt_3d_array[left_list[2][1], :] , median_bone["2"])
kp_3d_new[right_list[2][0], :], kp_3d_new[right_list[2][1], :] = centerize_keypoint(kpt_3d_array[right_list[2][0], :], kpt_3d_array[right_list[2][1], :] , median_bone["2"])
# Adjust shoulder and Hip pair
sh_p = left_list[0]
hi_p = left_list[1]
kp_3d_new[sh_p[0]], kp_3d_new[sh_p[1]] = centerize_keypoint(kp_3d_new[sh_p[0]], kp_3d_new[sh_p[1]], median_bone["0"]) # shoulder
kp_3d_new[hi_p[0]], kp_3d_new[hi_p[1]] = centerize_keypoint(kp_3d_new[hi_p[0]], kp_3d_new[hi_p[1]], median_bone["1"]) # hip
# left part
for i in range(2, num_bone):
start_indx, end_indx = tuple(left_list[i])
if left_ratio_list[i] < ratio_outlier:
kp_3d_new[end_indx, :] = kp_3d_new[start_indx, :] + vec_left_list[i]
else:
kp_3d_new[end_indx, :] = kp_3d_new[start_indx, :] + vec_left_list[i]/left_ratio_list[i]*ratio_draw_back
for i in range(2, num_bone):
start_indx, end_indx = tuple(right_list[i])
if right_ratio_list[i] < ratio_outlier:
kp_3d_new[end_indx, :] = kp_3d_new[start_indx, :] + vec_right_list[i]
else:
kp_3d_new[end_indx, :] = kp_3d_new[start_indx, :] + vec_right_list[i]/right_ratio_list[i]*ratio_draw_back
# left_error, right_error = loss_kpt_3d(kp_3d_new, median_bone, left_list, right_list)
# print(left_error)
# print(right_error)
# print("OK")
return kp_3d_new
def fintune_human_keypoint_2d(P4, P6, path4, path6, path3D, path_finetune=None):
with open(path3D,"r") as f:
data_3d = json.load(f)
with open(path4, "r") as f:
data_dict4 = json.load(f)
with open(path6, "r") as f:
data_dict6 = json.load(f)
# frame_id = next(iter(data_3d["3D"].keys()))
# person_id = next(iter(data_3d["3D"][frame_id].keys()))
# # frame_id = "000005"
# # person_id = "000"
cam_proj_4 = np.array(data_3d["P4"])
cam_proj_6 = np.array(data_3d["P6"])
data_3d_dict = {}
data_3d_dict["P4"] = data_3d["P4"]
data_3d_dict["P6"] = data_3d["P6"]
data_3d_dict["3D"] = {}
data_3d_dict["kp4_e"] = {}
data_3d_dict["kp6_e"] = {}
frame_list = [k for k in data_dict4.keys()]
frame_list.sort()
for i, frame_id in enumerate(tqdm(frame_list)):
frame_3d_dict = {}
kp4_dict = {}
kp6_dict = {}
person_list = [k for k in data_dict4[frame_id].keys()]
person_list.sort()
for person_id in person_list:
p3d_flatten = np.array(data_3d["3D"][frame_id][person_id]).ravel()
p4_homo = np.array(data_dict4[frame_id][person_id]).reshape([-1,3])
p6_homo = np.array(data_dict6[frame_id][person_id]).reshape([-1,3])
p4 = p4_homo[:,:2]
p6 = p6_homo[:,:2]
if DEBUG:
loss_init = optimze_loss_2d(p3d_flatten, p4, p6, cam_proj_4, cam_proj_6)
print("Initial error", str(np.sqrt(np.sum(np.square(loss_init)))) )
res = least_squares(optimze_loss_2d, p3d_flatten, verbose=0, x_scale='jac', ftol=1e-4, method='trf',args=(p4, p6, cam_proj_4, cam_proj_6))
if DEBUG:
loss_final = res.fun
print("Final error", str(np.sqrt(np.sum(np.square(loss_final)))) )
loss_final = optimze_loss_2d(res.x, p4, p6, cam_proj_4, cam_proj_6)
print("Final error", str(np.sqrt(np.sum(np.square(loss_final)))) )
p3d_tune = res.x.reshape([-1,3])
kp4_recon, kp6_recon, kp4_e, kp6_e = reproject_error(p3d_tune, p4, p6, cam_proj_4, cam_proj_6)
frame_3d_dict[person_id] = p3d_tune.tolist()
kp4_dict[person_id] = kp4_e.tolist()
kp6_dict[person_id] = kp6_e.tolist()
data_3d_dict["3D"][frame_id] = frame_3d_dict
data_3d_dict["kp4_e"][frame_id] = kp4_dict
data_3d_dict["kp6_e"][frame_id] = kp6_dict
if path_finetune is not None:
with open(path_finetune, "w") as f:
json.dump(data_3d_dict, f)
return data_3d_dict
def finetune_human_3d(path_finetune_input, path4, path6, shape_prior_path, shape_prior_finetune_output, frame_list=None):
'''
path_finetune_input:
path4: data_C4.json
path6: data_C6.json
shape_prior_path:
shape_prior_finetune_output:
'''
with open(path_finetune_input,"r") as f:
data_3d = json.load(f)
with open(path4, "r") as f:
data_dict4 = json.load(f)
with open(path6, "r") as f:
data_dict6 = json.load(f)
with open(shape_prior_path, 'r') as f:
data_prior = json.load(f)
left_list = data_prior["left_list"]
right_list = data_prior["right_list"]
median_bone = data_prior["median_bone"]
cam_proj_4 = np.array(data_3d["P4"])
cam_proj_6 = np.array(data_3d["P6"])
data_3d_dict = {}
data_3d_dict["P4"] = data_3d["P4"]
data_3d_dict["P6"] = data_3d["P6"]
data_3d_dict["3D"] = {}
data_3d_dict["kp4_e"] = {}
data_3d_dict["kp6_e"] = {}
if frame_list:
for f in frame_list:
if f not in data_dict4.keys():
print("KEY ERROR!")
assert 0
else:
frame_list = [k for k in data_dict4.keys()]
frame_list.sort()
for i, frame_id in enumerate(tqdm(frame_list)):
frame_3d_dict = {}
kp4_dict = {}
kp6_dict = {}
person_list = [k for k in data_dict4[frame_id].keys()]
person_list.sort()
for person_id in person_list:
p3d = np.array(data_3d["3D"][frame_id][person_id]).reshape([-1,3])
p3d_init = shape_initialize(left_list, right_list, median_bone, p3d)
p4_homo = np.array(data_dict4[frame_id][person_id]).reshape([-1,3])
p6_homo = np.array(data_dict6[frame_id][person_id]).reshape([-1,3])
p4 = p4_homo
p6 = p6_homo
p3d_flatten = p3d_init.flatten()
# loss_init = optimze_loss(p3d_flatten, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)
#print(np.linalg.norm(loss_init))
res = least_squares(optimze_loss, p3d_flatten, verbose=0, x_scale='jac', ftol=1e-2, method='trf',args=(p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone))
p3d_tune = res.x.reshape([-1,3])
# loss_res = optimze_loss(res.x, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)
# print(np.linalg.norm(loss_res))
kp4_recon, kp6_recon, kp4_e, kp6_e = reproject_error(p3d_tune, p4[:,:2], p6[:,:2], cam_proj_4, cam_proj_6)
frame_3d_dict[person_id] = p3d_tune.tolist()
kp4_dict[person_id] = kp4_e.tolist()
kp6_dict[person_id] = kp6_e.tolist()
data_3d_dict["3D"][frame_id] = frame_3d_dict
data_3d_dict["kp4_e"][frame_id] = kp4_dict
data_3d_dict["kp6_e"][frame_id] = kp6_dict
with open(shape_prior_finetune_output, "w") as f:
json.dump(data_3d_dict, f)
def finetune_human_3d_no_score(path_finetune_input, path4, path6, shape_prior_path, shape_prior_finetune_output, frame_list=None):
'''
path_finetune_input:
path4: data_C4.json
path6: data_C6.json
shape_prior_path:
shape_prior_finetune_output:
'''
with open(path_finetune_input,"r") as f:
data_3d = json.load(f)
with open(path4, "r") as f:
data_dict4 = json.load(f)
with open(path6, "r") as f:
data_dict6 = json.load(f)
with open(shape_prior_path, 'r') as f:
data_prior = json.load(f)
left_list = data_prior["left_list"]
right_list = data_prior["right_list"]
median_bone = data_prior["median_bone"]
cam_proj_4 = np.array(data_3d["P4"])
cam_proj_6 = np.array(data_3d["P6"])
data_3d_dict = {}
data_3d_dict["P4"] = data_3d["P4"]
data_3d_dict["P6"] = data_3d["P6"]
data_3d_dict["3D"] = {}
data_3d_dict["kp4_e"] = {}
data_3d_dict["kp6_e"] = {}
if frame_list:
for f in frame_list:
if f not in data_dict4.keys():
print("KEY ERROR!")
assert 0
else:
frame_list = [k for k in data_dict4.keys()]
frame_list.sort()
for i, frame_id in enumerate(tqdm(frame_list)):
if i > 300:
import sys
sys.exit()
frame_3d_dict = {}
kp4_dict = {}
kp6_dict = {}
person_list = [k for k in data_dict4[frame_id].keys()]
person_list.sort()
for person_id in person_list:
try:
p3d = np.array(data_3d["3D"][frame_id][person_id]).reshape([-1,3])
p3d_init = shape_initialize(left_list, right_list, median_bone, p3d)
p4_homo = np.array(data_dict4[frame_id][person_id]).reshape([-1,3])
p6_homo = np.array(data_dict6[frame_id][person_id]).reshape([-1,3])
p4 = p4_homo[:,:2]
p6 = p6_homo[:,:2]
p3d_flatten = p3d_init.flatten()
# loss_init = optimze_loss(p3d_flatten, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)
#print(np.linalg.norm(loss_init))
res = least_squares(optimze_loss_no_score, p3d_flatten, verbose=2, x_scale='jac', ftol=1e-2, method='trf',args=(p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone))
p3d_tune = res.x.reshape([-1,3])
# loss_res = optimze_loss(res.x, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)
# print(np.linalg.norm(loss_res))
kp4_recon, kp6_recon, kp4_e, kp6_e = reproject_error(p3d_tune, p4[:,:2], p6[:,:2], cam_proj_4, cam_proj_6)
frame_3d_dict[person_id] = p3d_tune.tolist()
kp4_dict[person_id] = kp4_e.tolist()
kp6_dict[person_id] = kp6_e.tolist()
except:
print("Error")
data_3d_dict["3D"][frame_id] = frame_3d_dict
data_3d_dict["kp4_e"][frame_id] = kp4_dict
data_3d_dict["kp6_e"][frame_id] = kp6_dict
with open(shape_prior_finetune_output, "w") as f:
json.dump(data_3d_dict, f)
|
[
"scipy.optimize.least_squares",
"tqdm.tqdm",
"numpy.square",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"pose_optimize.multiview_geo.reproject_error",
"numpy.concatenate",
"sys.exit",
"json.load",
"json.dump"
] |
[((2154, 2194), 'numpy.concatenate', 'np.concatenate', (['(l1 * kp4_e, l1 * kp6_e)'], {}), '((l1 * kp4_e, l1 * kp6_e))\n', (2168, 2194), True, 'import numpy as np\n'), ((2567, 2585), 'numpy.zeros', 'np.zeros', (['num_bone'], {}), '(num_bone)\n', (2575, 2585), True, 'import numpy as np\n'), ((2604, 2622), 'numpy.zeros', 'np.zeros', (['num_bone'], {}), '(num_bone)\n', (2612, 2622), True, 'import numpy as np\n'), ((3642, 3717), 'numpy.concatenate', 'np.concatenate', (['(l1 * kp4_e, l1 * kp6_e, l2 * left_error, l2 * right_error)'], {}), '((l1 * kp4_e, l1 * kp6_e, l2 * left_error, l2 * right_error))\n', (3656, 3717), True, 'import numpy as np\n'), ((4201, 4276), 'numpy.concatenate', 'np.concatenate', (['(l1 * kp4_e, l1 * kp6_e, l2 * left_error, l2 * right_error)'], {}), '((l1 * kp4_e, l1 * kp6_e, l2 * left_error, l2 * right_error))\n', (4215, 4276), True, 'import numpy as np\n'), ((5689, 5717), 'numpy.zeros', 'np.zeros', (['kpt_3d_array.shape'], {}), '(kpt_3d_array.shape)\n', (5697, 5717), True, 'import numpy as np\n'), ((7810, 7833), 'numpy.array', 'np.array', (["data_3d['P4']"], {}), "(data_3d['P4'])\n", (7818, 7833), True, 'import numpy as np\n'), ((7851, 7874), 'numpy.array', 'np.array', (["data_3d['P6']"], {}), "(data_3d['P6'])\n", (7859, 7874), True, 'import numpy as np\n'), ((10745, 10768), 'numpy.array', 'np.array', (["data_3d['P4']"], {}), "(data_3d['P4'])\n", (10753, 10768), True, 'import numpy as np\n'), ((10786, 10809), 'numpy.array', 'np.array', (["data_3d['P6']"], {}), "(data_3d['P6'])\n", (10794, 10809), True, 'import numpy as np\n'), ((13757, 13780), 'numpy.array', 'np.array', (["data_3d['P4']"], {}), "(data_3d['P4'])\n", (13765, 13780), True, 'import numpy as np\n'), ((13798, 13821), 'numpy.array', 'np.array', (["data_3d['P6']"], {}), "(data_3d['P6'])\n", (13806, 13821), True, 'import numpy as np\n'), ((471, 506), 'numpy.dot', 'np.dot', (['cam_proj_4[0:3, 0:3]', 'p3d.T'], {}), '(cam_proj_4[0:3, 0:3], p3d.T)\n', (477, 506), True, 'import numpy as np\n'), ((557, 592), 'numpy.dot', 'np.dot', (['cam_proj_6[0:3, 0:3]', 'p3d.T'], {}), '(cam_proj_6[0:3, 0:3], p3d.T)\n', (563, 592), True, 'import numpy as np\n'), ((1286, 1321), 'numpy.dot', 'np.dot', (['cam_proj_4[0:3, 0:3]', 'p3d.T'], {}), '(cam_proj_4[0:3, 0:3], p3d.T)\n', (1292, 1321), True, 'import numpy as np\n'), ((1372, 1407), 'numpy.dot', 'np.dot', (['cam_proj_6[0:3, 0:3]', 'p3d.T'], {}), '(cam_proj_6[0:3, 0:3], p3d.T)\n', (1378, 1407), True, 'import numpy as np\n'), ((4476, 4496), 'numpy.dot', 'np.dot', (['p_vec', 'p_vec'], {}), '(p_vec, p_vec)\n', (4482, 4496), True, 'import numpy as np\n'), ((7475, 7487), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7484, 7487), False, 'import json\n'), ((7541, 7553), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7550, 7553), False, 'import json\n'), ((7607, 7619), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7616, 7619), False, 'import json\n'), ((8170, 8186), 'tqdm.tqdm', 'tqdm', (['frame_list'], {}), '(frame_list)\n', (8174, 8186), False, 'from tqdm import tqdm\n'), ((10359, 10371), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10368, 10371), False, 'import json\n'), ((10425, 10437), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10434, 10437), False, 'import json\n'), ((10491, 10503), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10500, 10503), False, 'import json\n'), ((10572, 10584), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10581, 10584), False, 'import json\n'), ((11280, 11296), 'tqdm.tqdm', 'tqdm', (['frame_list'], {}), '(frame_list)\n', (11284, 11296), False, 'from tqdm import tqdm\n'), ((13005, 13031), 'json.dump', 'json.dump', (['data_3d_dict', 'f'], {}), '(data_3d_dict, f)\n', (13014, 13031), False, 'import json\n'), ((13371, 13383), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13380, 13383), False, 'import json\n'), ((13437, 13449), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13446, 13449), False, 'import json\n'), ((13503, 13515), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13512, 13515), False, 'import json\n'), ((13584, 13596), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13593, 13596), False, 'import json\n'), ((14292, 14308), 'tqdm.tqdm', 'tqdm', (['frame_list'], {}), '(frame_list)\n', (14296, 14308), False, 'from tqdm import tqdm\n'), ((16235, 16261), 'json.dump', 'json.dump', (['data_3d_dict', 'f'], {}), '(data_3d_dict, f)\n', (16244, 16261), False, 'import json\n'), ((862, 889), 'numpy.square', 'np.square', (['(kp4_recon.T - p4)'], {}), '(kp4_recon.T - p4)\n', (871, 889), True, 'import numpy as np\n'), ((927, 954), 'numpy.square', 'np.square', (['(kp6_recon.T - p6)'], {}), '(kp6_recon.T - p6)\n', (936, 954), True, 'import numpy as np\n'), ((8940, 9080), 'scipy.optimize.least_squares', 'least_squares', (['optimze_loss_2d', 'p3d_flatten'], {'verbose': '(0)', 'x_scale': '"""jac"""', 'ftol': '(0.0001)', 'method': '"""trf"""', 'args': '(p4, p6, cam_proj_4, cam_proj_6)'}), "(optimze_loss_2d, p3d_flatten, verbose=0, x_scale='jac', ftol=\n 0.0001, method='trf', args=(p4, p6, cam_proj_4, cam_proj_6))\n", (8953, 9080), False, 'from scipy.optimize import least_squares\n'), ((9504, 9561), 'pose_optimize.multiview_geo.reproject_error', 'reproject_error', (['p3d_tune', 'p4', 'p6', 'cam_proj_4', 'cam_proj_6'], {}), '(p3d_tune, p4, p6, cam_proj_4, cam_proj_6)\n', (9519, 9561), False, 'from pose_optimize.multiview_geo import reproject_error\n'), ((9977, 10003), 'json.dump', 'json.dump', (['data_3d_dict', 'f'], {}), '(data_3d_dict, f)\n', (9986, 10003), False, 'import json\n'), ((12111, 12286), 'scipy.optimize.least_squares', 'least_squares', (['optimze_loss', 'p3d_flatten'], {'verbose': '(0)', 'x_scale': '"""jac"""', 'ftol': '(0.01)', 'method': '"""trf"""', 'args': '(p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)'}), "(optimze_loss, p3d_flatten, verbose=0, x_scale='jac', ftol=\n 0.01, method='trf', args=(p4, p6, cam_proj_4, cam_proj_6, left_list,\n right_list, median_bone))\n", (12124, 12286), False, 'from scipy.optimize import least_squares\n'), ((12544, 12615), 'pose_optimize.multiview_geo.reproject_error', 'reproject_error', (['p3d_tune', 'p4[:, :2]', 'p6[:, :2]', 'cam_proj_4', 'cam_proj_6'], {}), '(p3d_tune, p4[:, :2], p6[:, :2], cam_proj_4, cam_proj_6)\n', (12559, 12615), False, 'from pose_optimize.multiview_geo import reproject_error\n'), ((14367, 14377), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14375, 14377), False, 'import sys\n'), ((1685, 1719), 'numpy.square', 'np.square', (['(kp4_recon.T - p4[:, :2])'], {}), '(kp4_recon.T - p4[:, :2])\n', (1694, 1719), True, 'import numpy as np\n'), ((1764, 1798), 'numpy.square', 'np.square', (['(kp6_recon.T - p6[:, :2])'], {}), '(kp6_recon.T - p6[:, :2])\n', (1773, 1798), True, 'import numpy as np\n'), ((2773, 2807), 'numpy.dot', 'np.dot', (['bon_vec_left', 'bon_vec_left'], {}), '(bon_vec_left, bon_vec_left)\n', (2779, 2807), True, 'import numpy as np\n'), ((3006, 3042), 'numpy.dot', 'np.dot', (['bon_vec_right', 'bon_vec_right'], {}), '(bon_vec_right, bon_vec_right)\n', (3012, 3042), True, 'import numpy as np\n'), ((5237, 5271), 'numpy.dot', 'np.dot', (['bon_vec_left', 'bon_vec_left'], {}), '(bon_vec_left, bon_vec_left)\n', (5243, 5271), True, 'import numpy as np\n'), ((5526, 5562), 'numpy.dot', 'np.dot', (['bon_vec_right', 'bon_vec_right'], {}), '(bon_vec_right, bon_vec_right)\n', (5532, 5562), True, 'import numpy as np\n'), ((15258, 15441), 'scipy.optimize.least_squares', 'least_squares', (['optimze_loss_no_score', 'p3d_flatten'], {'verbose': '(2)', 'x_scale': '"""jac"""', 'ftol': '(0.01)', 'method': '"""trf"""', 'args': '(p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)'}), "(optimze_loss_no_score, p3d_flatten, verbose=2, x_scale='jac',\n ftol=0.01, method='trf', args=(p4, p6, cam_proj_4, cam_proj_6,\n left_list, right_list, median_bone))\n", (15271, 15441), False, 'from scipy.optimize import least_squares\n'), ((15720, 15791), 'pose_optimize.multiview_geo.reproject_error', 'reproject_error', (['p3d_tune', 'p4[:, :2]', 'p6[:, :2]', 'cam_proj_4', 'cam_proj_6'], {}), '(p3d_tune, p4[:, :2], p6[:, :2], cam_proj_4, cam_proj_6)\n', (15735, 15791), False, 'from pose_optimize.multiview_geo import reproject_error\n'), ((8424, 8468), 'numpy.array', 'np.array', (["data_3d['3D'][frame_id][person_id]"], {}), "(data_3d['3D'][frame_id][person_id])\n", (8432, 8468), True, 'import numpy as np\n'), ((8499, 8540), 'numpy.array', 'np.array', (['data_dict4[frame_id][person_id]'], {}), '(data_dict4[frame_id][person_id])\n', (8507, 8540), True, 'import numpy as np\n'), ((8579, 8620), 'numpy.array', 'np.array', (['data_dict6[frame_id][person_id]'], {}), '(data_dict6[frame_id][person_id])\n', (8587, 8620), True, 'import numpy as np\n'), ((11527, 11571), 'numpy.array', 'np.array', (["data_3d['3D'][frame_id][person_id]"], {}), "(data_3d['3D'][frame_id][person_id])\n", (11535, 11571), True, 'import numpy as np\n'), ((11692, 11733), 'numpy.array', 'np.array', (['data_dict4[frame_id][person_id]'], {}), '(data_dict4[frame_id][person_id])\n', (11700, 11733), True, 'import numpy as np\n'), ((11772, 11813), 'numpy.array', 'np.array', (['data_dict6[frame_id][person_id]'], {}), '(data_dict6[frame_id][person_id])\n', (11780, 11813), True, 'import numpy as np\n'), ((14626, 14670), 'numpy.array', 'np.array', (["data_3d['3D'][frame_id][person_id]"], {}), "(data_3d['3D'][frame_id][person_id])\n", (14634, 14670), True, 'import numpy as np\n'), ((14799, 14840), 'numpy.array', 'np.array', (['data_dict4[frame_id][person_id]'], {}), '(data_dict4[frame_id][person_id])\n', (14807, 14840), True, 'import numpy as np\n'), ((14883, 14924), 'numpy.array', 'np.array', (['data_dict6[frame_id][person_id]'], {}), '(data_dict6[frame_id][person_id])\n', (14891, 14924), True, 'import numpy as np\n'), ((8883, 8903), 'numpy.square', 'np.square', (['loss_init'], {}), '(loss_init)\n', (8892, 8903), True, 'import numpy as np\n'), ((9202, 9223), 'numpy.square', 'np.square', (['loss_final'], {}), '(loss_final)\n', (9211, 9223), True, 'import numpy as np\n'), ((9369, 9390), 'numpy.square', 'np.square', (['loss_final'], {}), '(loss_final)\n', (9378, 9390), True, 'import numpy as np\n')]
|
import pseudopol.ppseudopol as p_pp
import numpy as np
import sys
max_val=int(sys.argv[1])
vals=list(np.random.randint(1,500000,5000, dtype=np.uint32))
print(p_pp.find_max_subsum(max_val, vals))
|
[
"numpy.random.randint",
"pseudopol.ppseudopol.find_max_subsum"
] |
[((103, 154), 'numpy.random.randint', 'np.random.randint', (['(1)', '(500000)', '(5000)'], {'dtype': 'np.uint32'}), '(1, 500000, 5000, dtype=np.uint32)\n', (120, 154), True, 'import numpy as np\n'), ((160, 195), 'pseudopol.ppseudopol.find_max_subsum', 'p_pp.find_max_subsum', (['max_val', 'vals'], {}), '(max_val, vals)\n', (180, 195), True, 'import pseudopol.ppseudopol as p_pp\n')]
|
# INFO
__author__ = "<NAME>"
__date__ = "26 Mar 2022"
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Definitive version"
__copyright__ = "© 2022"
# SCRIPT
import numpy as np
from logistic_regression import *
from other_functions import *
print('\033[92m\033[1m\n\n------------------ 1. TRAINING RESULTS --------------------\033[0m')
# Load the training data
train_X, train_Y = load_file("../titanic-data/titanic-train.txt")
''' Understand the best learning rate value '''
# Which is a good value for the learning rate?
learning_rate_accuracy = {}
learning_rate_values = 1e-4, 0.5e-4, 1e-3, 0.5e-3, 1e-2, 0.05e-1
for lr in learning_rate_values:
w, b, iteration_loss = logreg_train(train_X, train_Y, lr)
P = logreg_inference(train_X, w, b)
Y_hat_train = (P >= 0.5)
accuracy = (train_Y == Y_hat_train).mean() * 100
learning_rate_accuracy[lr] = accuracy
# Show how the accuracy goes for specific learning rate values
display_lr_vs_accuracy(learning_rate_accuracy)
# Find the best learning rate value, such that it maximizes the accuracy
best_lr_value, max_accuracy = find_best_lr_value(learning_rate_accuracy)
# What is the training accuracy of the trained model?
print('\nBest learning rate value:', best_lr_value, ' Accuracy:', round(max_accuracy, 2))
''' Once understood the best learning rate, now lets understand which is the best value for the iterations '''
w, b, iteration_loss = logreg_train(train_X, train_Y, best_lr_value)
# Show how the loss goes for specific iterations value
display_iteration_vs_loss(iteration_loss)
# Find the best iterations value, such that it minimizes the loss function
best_iterations_value, min_loss = find_best_iterations_value(iteration_loss)
# How many iterations are required to converge?
print('\nBest iterations value:', best_iterations_value, ' Loss:', round(min_loss, 2))
''' Extra: Load the obtained parameters into an external .csv file '''
# Obtain the parameters considering the "best model", using the previously obtained values
# Why 60000 as iteration value? Time-performance trade-off...by increasing the number of iterations over
# 60000 the loss function doesn't decreases so much and the time spent it's not worth it
w, b, iteration_loss = logreg_train(train_X, train_Y, best_lr_value, 60000)
# Load the parameters into the 'parameter.csv' file
np.savetxt( '../parameters.csv', np.append(w, b))
# END
print('\n')
|
[
"numpy.append"
] |
[((2421, 2436), 'numpy.append', 'np.append', (['w', 'b'], {}), '(w, b)\n', (2430, 2436), True, 'import numpy as np\n')]
|
#!/usr/bin/python3.8
"""
Genetic Algorithm to maximize surveillance over a population for AI Assignment.
Author: Sam (eremus-dev)
Repo: https://github.com/eremus-dev
"""
import math
from collections import Counter
from typing import List, Dict
import numpy as np
import matplotlib.pyplot as plt
from test_pop import test_pop
"""
GENETIC ALGORITHM CONFIGURATION VARIABLES
"""
# Genetic Algorithm and Camera Config
genetic_pop = 100 # number different genetic strains
generation = 100 # number of generations to maximize coverage
view_radius = 15 # how far the cameras see
citizens = 200 # how many people we need to surveil
cam_count = 4 # how many cams we have to surveil them with
mutation_chance = 10 # percentage chance mutation occurs
threshold = 100 # stop at this result or generation
test_number = 10 # number of tests to run, set to zero if no tests
Coord = List[int] # Type of co-ordinates
def gen_randpop(size: int) -> List[Coord]:
"""
Function to generate randomly distributed population
to surveil
"""
obs = [] # [x,y] of size number of people
for _ in range(1, size + 1):
xy = [] # x, y co-ords of each person
x = np.random.randint(1, 100)
xy.append(x)
y = np.random.randint(1, 100)
xy.append(y)
obs.append(xy)
return np.array(obs, copy=True)
def rate_gen(cams: Dict[int, List[Coord]], pop: List[Coord]) -> Dict[int, int]:
"""
Function to get the best of the population to breed, mutate and to survive
"""
scores = {}
for n in cams:
scores[n] = fitness_function(cams[n], pop)
return scores
def fitness_function(cams: List[Coord], pop: List[Coord]) -> int:
"""
Function to calculate number of surveilled citizens.
Check if all the cameras can see them, if any can score increases
"""
score = []
for cit in pop:
test = False
for cam in cams:
if (
math.sqrt(((cam[0] - cit[0]) ** 2) + ((cam[1] - cit[1]) ** 2))
<= view_radius
):
test = True
score.append(test)
return score.count(True)
def select_from_pop(
cams: Dict[int, List[Coord]], total_scores
) -> Dict[int, List[Coord]]:
"""
Function that takes a dict of camera positions and a dict of scores and breeds the strongest
returns new population of cameras
"""
top_scores = {}
new_pop = {}
selection = int(len(total_scores) / 2)
scores = sorted(total_scores, key=total_scores.get, reverse=True)[:selection]
assert len(scores) == selection
for i in scores:
top_scores[i] = total_scores[i]
new_pop[i] = cams[i]
assert len(new_pop) == selection
return breed_strongest(top_scores, new_pop)
def breed_strongest(
top_scores: Dict[int, int], new_pop: Dict[int, List[Coord]]
) -> Dict[int, List[Coord]]:
"""
Function to breed 25 best positions.
Strongest always remains unchanged.
"""
count = 0
full_pop = {}
keys = list(new_pop.keys())
for i in keys:
dad = []
child = []
mum = []
mum = np.copy(new_pop[i])
child = dad = np.copy(
new_pop[np.random.choice(keys)]
) # randomly select breeding mate
child[0] = mum[np.random.randint(0, 3)]
child[1] = mum[np.random.randint(0, 3)]
full_pop[count] = mum # save mum
count += 1
full_pop[count] = child # add random child
count += 1
full_pop = mutate(full_pop, top_scores)
assert len(full_pop) == genetic_pop
return full_pop
def mutate(
full_pop: Dict[int, List[Coord]], top_scores: Dict[int, int]
) -> Dict[int, List[Coord]]:
"""
Function to mutate population, 10% chance they will mutate
"""
for i in full_pop:
if np.random.randint(0, 100) > (100 - mutation_chance):
temp = full_pop[i]
xmod, ymod = [
np.random.randint(-20, 20),
np.random.randint(-20, 20),
] # pick random mutation
camera_num = np.random.randint(0, 3)
camera = temp[camera_num] # cameras to mod
camera[0] = (camera[0] + xmod) % 100
camera[1] = (camera[1] + ymod) % 100
temp[camera_num] = camera
full_pop[i] = temp
return full_pop
def plot_pop(pop: List[Coord], cams: List[Coord], top_score: int, gen: int, run: int) -> None:
"""
Function to plot placement of cams and population on graph
"""
plt.cla() # clears graph
plt.gcf().canvas.mpl_connect( # allows exit key to quit qraph
"key_release_event", lambda event: [exit(0) if event.key == "escape" else None]
)
plt.axis("equal")
plt.grid(True)
plt.plot(pop[:, 0], pop[:, 1], "ok")
plt.plot(cams[:, 0], cams[:, 1], "*")
for i in range(len(cams)): # plots camera view range
circle = plt.Circle(
(cams[i][0], cams[i][1]), view_radius, color="r", fill=False
)
ax = plt.gca()
ax.add_artist(circle)
ax = plt.gca()
ax.set_xlabel("City Terrain X") # sets up all labels
ax.set_ylabel("City Terrain Y")
ax.set_title(f"Visualisation of Cameras and Population\nSurveilled Population {max_seen} in Generation {gen}")
plt.pause(0.01)
plt.draw() # draws graph
if gen == 199:
plt.savefig(f'./results/last_gen_test{run}.png')
def plot_final_results(generational_record: Dict[int, int], run: int, max_seen: int) -> None:
'''
Produces final plot of the progression of the GA across a single generational run
'''
plt.cla()
plt.grid(True)
lists = sorted(generational_record.items())
x, y = zip(*lists)
plt.xlim(-2, generation+2)
plt.ylim(50, 120)
plt.plot(x, y, label="Pop Surveilled", linestyle="--", marker='o')
ax = plt.gca()
ax.set_xlabel("Generations")
ax.set_ylabel("Number of Population Surveilled")
ax.set_title(f"Population Surveilled Over Generations\nMax Population Surveilled {max_seen}")
plt.savefig(f'./results/final_results_test{run}.png')
if test_number > 0:
plt.pause(0.5)
plt.draw()
else:
plt.show()
def plot_aggregate_results(aggregate_results: Dict[int, int], ) -> None:
'''
Produces plot of aggregate results for test runs of GA
'''
# Graph aggregate results and average of test runs
plt.cla()
plt.grid(True)
lists = sorted(aggregate_results.items())
x,y = zip(*lists)
avg = [sum(y) / len(y)] * len(x)
mean = np.mean(y)
std_dev = format(np.std(y), '.3f')
maximum = max(y)
plt.scatter(x, y, label="Pop Surveilled", color="r")
ax = plt.gca()
ax.plot(x, avg, label='Mean', linestyle='--')
ax.set_title(f"Population Surveilled Over Tests using Genetic Algorithm\nPopulation Surveilled Mean: {mean}, Max {maximum}, Stdev {std_dev}")
ax.legend(loc='upper left')
ax.set_xlabel("Test Number")
ax.set_ylabel("Number of Population Surveilled")
plt.savefig(f'./results/aggregate_result_GA_test_run.png')
plt.show()
if __name__ == "__main__":
aggregate_results = {} # collect each tests results
# run the GA for test_number times and graph results
for run in range(0, test_number):
generational_record = {} # record to graph at end
cams = {} # dictionary of genetic population
#citpop = gen_randpop(citizens) # a numpy array of citizens randomly distributed
citpop = np.array(test_pop)
for i in range(genetic_pop): # generate genetic population
cams[i] = gen_randpop(cam_count) # a numpy array of cams randomly distributed
# Main Genetic Algorithm Loop
gen = 0
max_seen = 0
while (gen < generation) & (
max_seen < threshold
): # evolve for number of generations
if gen != 0: # do nothing first time through loop
cams = select_from_pop(cams, total_scores)
total_scores = rate_gen(cams, citpop)
best_cam = max(total_scores, key=total_scores.get)
max_seen = total_scores[best_cam]
print(f"We surveilled {max_seen} in generation {gen}, best is {best_cam}")
plot_pop(citpop, cams[best_cam], max_seen, gen, run) # print best fit for each generation
generational_record[gen] = max_seen # to graph at end of process
gen += 1
# Graph Results of Genetic Algorithm over generations
plot_final_results(generational_record, run, max_seen)
aggregate_results[run] = max_seen
# Graph aggregate results and average of test runs
plot_aggregate_results(aggregate_results)
|
[
"matplotlib.pyplot.grid",
"math.sqrt",
"numpy.array",
"numpy.mean",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.Circle",
"numpy.random.choice",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"numpy.std",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.show",
"numpy.copy",
"numpy.random.randint"
] |
[((1327, 1351), 'numpy.array', 'np.array', (['obs'], {'copy': '(True)'}), '(obs, copy=True)\n', (1335, 1351), True, 'import numpy as np\n'), ((4549, 4558), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4556, 4558), True, 'import matplotlib.pyplot as plt\n'), ((4776, 4793), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (4784, 4793), True, 'import matplotlib.pyplot as plt\n'), ((4821, 4835), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4829, 4835), True, 'import matplotlib.pyplot as plt\n'), ((4840, 4876), 'matplotlib.pyplot.plot', 'plt.plot', (['pop[:, 0]', 'pop[:, 1]', '"""ok"""'], {}), "(pop[:, 0], pop[:, 1], 'ok')\n", (4848, 4876), True, 'import matplotlib.pyplot as plt\n'), ((4881, 4918), 'matplotlib.pyplot.plot', 'plt.plot', (['cams[:, 0]', 'cams[:, 1]', '"""*"""'], {}), "(cams[:, 0], cams[:, 1], '*')\n", (4889, 4918), True, 'import matplotlib.pyplot as plt\n'), ((5156, 5165), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5163, 5165), True, 'import matplotlib.pyplot as plt\n'), ((5382, 5397), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (5391, 5397), True, 'import matplotlib.pyplot as plt\n'), ((5402, 5412), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5410, 5412), True, 'import matplotlib.pyplot as plt\n'), ((5731, 5740), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5738, 5740), True, 'import matplotlib.pyplot as plt\n'), ((5745, 5759), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5753, 5759), True, 'import matplotlib.pyplot as plt\n'), ((5835, 5863), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2)', '(generation + 2)'], {}), '(-2, generation + 2)\n', (5843, 5863), True, 'import matplotlib.pyplot as plt\n'), ((5866, 5883), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(50)', '(120)'], {}), '(50, 120)\n', (5874, 5883), True, 'import matplotlib.pyplot as plt\n'), ((5888, 5954), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""Pop Surveilled"""', 'linestyle': '"""--"""', 'marker': '"""o"""'}), "(x, y, label='Pop Surveilled', linestyle='--', marker='o')\n", (5896, 5954), True, 'import matplotlib.pyplot as plt\n'), ((5964, 5973), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5971, 5973), True, 'import matplotlib.pyplot as plt\n'), ((6162, 6215), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""./results/final_results_test{run}.png"""'], {}), "(f'./results/final_results_test{run}.png')\n", (6173, 6215), True, 'import matplotlib.pyplot as plt\n'), ((6529, 6538), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (6536, 6538), True, 'import matplotlib.pyplot as plt\n'), ((6543, 6557), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6551, 6557), True, 'import matplotlib.pyplot as plt\n'), ((6674, 6684), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (6681, 6684), True, 'import numpy as np\n'), ((6749, 6801), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'label': '"""Pop Surveilled"""', 'color': '"""r"""'}), "(x, y, label='Pop Surveilled', color='r')\n", (6760, 6801), True, 'import matplotlib.pyplot as plt\n'), ((6811, 6820), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6818, 6820), True, 'import matplotlib.pyplot as plt\n'), ((7139, 7197), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""./results/aggregate_result_GA_test_run.png"""'], {}), "(f'./results/aggregate_result_GA_test_run.png')\n", (7150, 7197), True, 'import matplotlib.pyplot as plt\n'), ((7202, 7212), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7210, 7212), True, 'import matplotlib.pyplot as plt\n'), ((1185, 1210), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (1202, 1210), True, 'import numpy as np\n'), ((1245, 1270), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (1262, 1270), True, 'import numpy as np\n'), ((3145, 3164), 'numpy.copy', 'np.copy', (['new_pop[i]'], {}), '(new_pop[i])\n', (3152, 3164), True, 'import numpy as np\n'), ((4999, 5071), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(cams[i][0], cams[i][1])', 'view_radius'], {'color': '"""r"""', 'fill': '(False)'}), "((cams[i][0], cams[i][1]), view_radius, color='r', fill=False)\n", (5009, 5071), True, 'import matplotlib.pyplot as plt\n'), ((5107, 5116), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5114, 5116), True, 'import matplotlib.pyplot as plt\n'), ((5480, 5528), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""./results/last_gen_test{run}.png"""'], {}), "(f'./results/last_gen_test{run}.png')\n", (5491, 5528), True, 'import matplotlib.pyplot as plt\n'), ((6253, 6267), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.5)'], {}), '(0.5)\n', (6262, 6267), True, 'import matplotlib.pyplot as plt\n'), ((6276, 6286), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (6284, 6286), True, 'import matplotlib.pyplot as plt\n'), ((6305, 6315), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6313, 6315), True, 'import matplotlib.pyplot as plt\n'), ((6706, 6715), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (6712, 6715), True, 'import numpy as np\n'), ((7620, 7638), 'numpy.array', 'np.array', (['test_pop'], {}), '(test_pop)\n', (7628, 7638), True, 'import numpy as np\n'), ((3307, 3330), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (3324, 3330), True, 'import numpy as np\n'), ((3355, 3378), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (3372, 3378), True, 'import numpy as np\n'), ((3839, 3864), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (3856, 3864), True, 'import numpy as np\n'), ((4101, 4124), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (4118, 4124), True, 'import numpy as np\n'), ((1960, 2018), 'math.sqrt', 'math.sqrt', (['((cam[0] - cit[0]) ** 2 + (cam[1] - cit[1]) ** 2)'], {}), '((cam[0] - cit[0]) ** 2 + (cam[1] - cit[1]) ** 2)\n', (1969, 2018), False, 'import math\n'), ((3216, 3238), 'numpy.random.choice', 'np.random.choice', (['keys'], {}), '(keys)\n', (3232, 3238), True, 'import numpy as np\n'), ((3966, 3992), 'numpy.random.randint', 'np.random.randint', (['(-20)', '(20)'], {}), '(-20, 20)\n', (3983, 3992), True, 'import numpy as np\n'), ((4010, 4036), 'numpy.random.randint', 'np.random.randint', (['(-20)', '(20)'], {}), '(-20, 20)\n', (4027, 4036), True, 'import numpy as np\n'), ((4607, 4616), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4614, 4616), True, 'import matplotlib.pyplot as plt\n')]
|
r"""
Echelle Spectrum
----------------
An abstract base class for a high resolution spectrum, for some echelle order :math:`m \in ` out of :math:`M` total orders, each with vectors for wavelength, flux, and uncertainty, e.g. :math:`F_m(\lambda)`. This class is a subclass of specutils' Spectrum1D and is intended to have its methods inherited by specific instrument classes.
EchelleSpectrum
###############
"""
import warnings
import logging
import numpy as np
import astropy
import pandas as pd
from astropy.io import fits
from astropy import units as u
from astropy.units import Quantity
from astropy.wcs import WCS, FITSFixedWarning
from astropy.nddata import StdDevUncertainty
from scipy.stats import median_abs_deviation
from scipy.interpolate import InterpolatedUnivariateSpline
from specutils.analysis import equivalent_width
from scipy.interpolate import UnivariateSpline, interp1d
from scipy.signal import savgol_filter
from astropy.constants import R_jup, R_sun, G, M_jup, R_earth, c
from astropy.modeling.physical_models import BlackBody
import specutils
from muler.utilities import apply_numpy_mask, resample_list
# from barycorrpy import get_BC_vel
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.time import Time
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import os
import copy
from specutils.spectra.spectral_region import SpectralRegion
from specutils.analysis import equivalent_width
log = logging.getLogger(__name__)
from astropy.io.fits.verify import VerifyWarning
warnings.simplefilter("ignore", category=VerifyWarning)
# See Issue: https://github.com/astropy/specutils/issues/779
warnings.filterwarnings(
"ignore", category=astropy.utils.exceptions.AstropyDeprecationWarning
)
warnings.filterwarnings("ignore", category=FITSFixedWarning)
# See Issue: https://github.com/astropy/specutils/issues/800
warnings.filterwarnings("ignore", category=RuntimeWarning)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
from specutils import Spectrum1D
from specutils import SpectrumList
class EchelleSpectrum(Spectrum1D):
r"""
An abstract base class to provide common methods that will be inherited by instrument-specific classes
"""
def __init__(self, *args, **kwargs):
self.ancillary_spectra = None
super().__init__(*args, **kwargs)
@property
def snr(self):
"""The Signal-to-Noise Ratio :math:`\frac{S}{N}`, the flux divided by the uncertainty
The spectrum should have an input uncertainty, otherwise returns NaNs
"""
if self.uncertainty is not None:
if self.uncertainty.uncertainty_type == "std":
snr_estimate = self.flux / self.uncertainty.quantity
elif self.uncertainty.uncertainty_type == "ivar":
snr_estimate = self.flux * np.sqrt(self.uncertainty.quantity)
else:
message = "SNR only supports standard deviation and inverse variance uncertainty"
raise NotImplementedError(message)
else:
snr_estimate = np.repeat(np.NaN, len(self.flux)) * u.dimensionless_unscaled
return snr_estimate
@property
def available_ancillary_spectra(self):
"""The list of available ancillary spectra"""
output = []
if hasattr(self, "ancillary_spectra"):
if self.ancillary_spectra is not None:
output = [
ancillary_spectrum
for ancillary_spectrum in self.ancillary_spectra
if ancillary_spectrum in self.meta.keys()
]
return output
def estimate_barycorr(self):
"""Estimate the Barycentric Correction from the Date and Target Coordinates
Returns
-------
barycentric_corrections : float
Barycentric correction for targets in units of m/s
"""
obstime = self.astropy_time
loc = EarthLocation.of_site(self.site_name)
sc = SkyCoord(ra=self.RA, dec=self.DEC)
barycorr = sc.radial_velocity_correction(obstime=obstime, location=loc)
return barycorr
def measure_ew(self, lower=None, upper=None):
"""Measure the equivalent width of a given spectrum
Parameters
----------
lower : AstroPy Quantity or float
The short wavelength limit at which to define the EW lower bound.
If the value is a float, it assume Angstrom units.
upper : AstroPy Quantity or float
The long wavelength limit at which to define the EW upper bound.
If the value is a float, it assume Angstrom units.
Returns
-------
equivalent width : (scalar)
"""
if type(lower) is not u.Quantity:
# Assume it's Angstroms
lower = lower * u.Angstrom
upper = upper * u.Angstrom
ew = equivalent_width(self, regions=SpectralRegion(lower, upper))
return ew
def normalize(self):
"""Normalize spectrum by its median value
Returns
-------
normalized_spec : (KeckNIRSPECSpectrum)
Normalized Spectrum
"""
spec = self._copy(
spectral_axis=self.wavelength.value * self.wavelength.unit, wcs=None
)
median_flux = np.nanmedian(spec.flux.value)
# Each ancillary spectrum (e.g. sky) should also be normalized
meta_out = copy.deepcopy(spec.meta)
for ancillary_spectrum in self.available_ancillary_spectra:
meta_out[ancillary_spectrum] = meta_out[ancillary_spectrum].divide(
median_flux * spec.flux.unit, handle_meta="ff"
)
# spec.meta = meta_out
return spec.divide(
median_flux * spec.flux.unit, handle_meta="first_found"
)._copy(meta=meta_out)
def flatten_by_black_body(self, Teff):
"""Flatten the spectrum by a scaled black body, usually after deblazing
Note: This method applies mostly to high-bandwidth stellar spectra.
Parameters
----------
Teff : float
The effective temperature of the black body in Kelvin units
"""
blackbody = BlackBody(temperature=Teff * u.K)(self.wavelength)
blackbody = blackbody / np.mean(blackbody)
wl_scaled = self.wavelength
wl_scaled = wl_scaled / np.median(wl_scaled)
try:
return self.divide(blackbody / wl_scaled ** 2, handle_meta="first_found")
except u.UnitConversionError:
return self.divide(
blackbody / wl_scaled ** 2 * self.unit, handle_meta="first_found"
)
def flatten(
self,
window_length=101,
polyorder=2,
return_trend=False,
break_tolerance=5,
niters=3,
sigma=3,
mask=None,
**kwargs,
):
"""Removes the low frequency trend using scipy's Savitzky-Golay filter.
This method wraps `scipy.signal.savgol_filter`. Abridged from the
`lightkurve` method with the same name for flux time series.
Parameters
----------
window_length : int
The length of the filter window (i.e. the number of coefficients).
``window_length`` must be a positive odd integer.
polyorder : int
The order of the polynomial used to fit the samples. ``polyorder``
must be less than window_length.
return_trend : bool
If `True`, the method will return a tuple of two elements
(flattened_spec, trend_spec) where trend_spec is the removed trend.
break_tolerance : int
If there are large gaps in wavelength, flatten will split the flux into
several sub-spectra and apply `savgol_filter` to each
individually. A gap is defined as a region in wavelength larger than
`break_tolerance` times the median gap. To disable this feature,
set `break_tolerance` to None.
niters : int
Number of iterations to iteratively sigma clip and flatten. If more than one, will
perform the flatten several times, removing outliers each time.
sigma : int
Number of sigma above which to remove outliers from the flatten
mask : boolean array with length of self.wavelength
Boolean array to mask data with before flattening. Flux values where
mask is True will not be used to flatten the data. An interpolated
result will be provided for these points. Use this mask to remove
data you want to preserve, e.g. spectral regions of interest.
**kwargs : dict
Dictionary of arguments to be passed to `scipy.signal.savgol_filter`.
Returns
-------
flatten_spec : `EchelleSpectrum`
New light curve object with long-term trends removed.
If ``return_trend`` is set to ``True``, this method will also return:
trend_spec : `EchelleSpectrum`
New light curve object containing the trend that was removed.
"""
if mask is None:
mask = np.ones(len(self.wavelength), dtype=bool)
else:
# Deep copy ensures we don't change the original.
mask = copy.deepcopy(~mask)
# No NaNs
mask &= np.isfinite(self.flux)
# No outliers
mask &= np.nan_to_num(np.abs(self.flux - np.nanmedian(self.flux))) <= (
np.nanstd(self.flux) * sigma
)
for iter in np.arange(0, niters):
if break_tolerance is None:
break_tolerance = np.nan
if polyorder >= window_length:
polyorder = window_length - 1
log.warning(
"polyorder must be smaller than window_length, "
"using polyorder={}.".format(polyorder)
)
# Split the lightcurve into segments by finding large gaps in time
dlam = self.wavelength.value[mask][1:] - self.wavelength.value[mask][0:-1]
with warnings.catch_warnings(): # Ignore warnings due to NaNs
warnings.simplefilter("ignore", RuntimeWarning)
cut = np.where(dlam > break_tolerance * np.nanmedian(dlam))[0] + 1
low = np.append([0], cut)
high = np.append(cut, len(self.wavelength[mask]))
# Then, apply the savgol_filter to each segment separately
trend_signal = Quantity(
np.zeros(len(self.wavelength[mask])), unit=self.flux.unit
)
for l, h in zip(low, high):
# Reduce `window_length` and `polyorder` for short segments;
# this prevents `savgol_filter` from raising an exception
# If the segment is too short, just take the median
if np.any([window_length > (h - l), (h - l) < break_tolerance]):
trend_signal[l:h] = np.nanmedian(self.flux[mask][l:h])
else:
# Scipy outputs a warning here that is not useful, will be fixed in version 1.2
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
trsig = savgol_filter(
x=self.flux.value[mask][l:h],
window_length=window_length,
polyorder=polyorder,
**kwargs,
)
trend_signal[l:h] = Quantity(trsig, trend_signal.unit)
# Ignore outliers;
# Note that it's possible numerical noise can cause outliers...
# If this happens you can add `1e-14` below to avoid detecting
# outliers which are merely caused by numerical noise.
mask1 = np.nan_to_num(np.abs(self.flux[mask] - trend_signal)) < (
np.nanstd(self.flux[mask] - trend_signal)
* sigma
# + Quantity(1e-14, self.flux.unit)
)
f = interp1d(
self.wavelength.value[mask][mask1],
trend_signal[mask1],
fill_value="extrapolate",
)
trend_signal = Quantity(f(self.wavelength.value), self.flux.unit)
mask[mask] &= mask1
flatten_spec = copy.deepcopy(self)
trend_spec = self._copy(flux=trend_signal)
with warnings.catch_warnings():
# ignore invalid division warnings
warnings.simplefilter("ignore", RuntimeWarning)
flatten_spec = flatten_spec.divide(trend_spec, handle_meta="ff")
if return_trend:
return flatten_spec, trend_spec
else:
return flatten_spec
def deblaze(self, method="spline"):
"""Remove blaze function from spectrum by interpolating a spline function
Note: It is recommended to remove NaNs before running this operation,
otherwise edge effects can be appear from zero-padded edges.
Returns
-------
blaze corrrected spectrum
"""
if method == "spline":
if np.any(np.isnan(self.flux)):
log.warning(
"your spectrum contains NaNs, "
"it is highly recommended to run `.remove_nans()` before deblazing"
)
spline = UnivariateSpline(self.wavelength, np.nan_to_num(self.flux), k=5)
interp_spline = spline(self.wavelength) * self.flux.unit
no_blaze = self.divide(interp_spline, handle_meta="first_found")
if "sky" in self.meta.keys():
new_sky = self.sky.divide(interp_spline, handle_meta="first_found")
no_blaze.meta["sky"] = new_sky
return no_blaze
else:
raise NotImplementedError
def barycentric_correct(self):
"""shift spectrum by barycenter velocity
Returns
-------
barycenter corrected Spectrum : (KeckNIRSPECSpectrum)
"""
bcRV = +1.0 * self.estimate_barycorr()
return self.rv_shift(bcRV)
def rv_shift(self, velocity):
"""
Shift velocity of spectrum in astropy units (or km/s if input velocity is just a float)
"""
if (
type(velocity) == float
): # If supplied velocity is not using astropy units, default to km/s
velocity = velocity * (u.km / u.s)
try:
self.radial_velocity = velocity
return self._copy(
spectral_axis=self.wavelength.value * self.wavelength.unit,
wcs=None,
)
except:
log.error(
"rv shift requires specutils version >= 1.2, you have: {}".format(
specutils.__version__
)
)
raise
def remove_nans(self):
"""Remove data points that have NaN fluxes
By default the method removes NaN's from target, sky, and lfc fibers.
Returns
-------
finite_spec : (KeckNIRSPECSpectrum)
Spectrum with NaNs removed
"""
keep_indices = (self.mask == False) & (self.flux == self.flux)
return self.apply_boolean_mask(keep_indices)
def smooth_spectrum(
self, return_model=False, optimize_kernel=False, bandwidth=150.0
):
"""Smooth the spectrum using Gaussian Process regression
Parameters
-------
return_model : (bool)
Whether or not to return the gp model, which takes a wavelength axis
as input and outputs the smooth trend
optimize_kernel : (bool)
Whether to optimize the GP hyperparameters: correlation scale and amplitude
bandwidth : (float)
The smoothing bandwidth in Angstroms. Defaults to 150 Angstrom lengthscale.
Returns
-------
smoothed_spec : (EchelleSpectrum)
Smooth version of input Spectrum
"""
try:
from celerite2 import terms
import celerite2
except ImportError:
raise ImportError(
"You need to install celerite2 to use the smoothing='celerite' method."
)
if self.uncertainty is not None:
unc = self.uncertainty.array
else:
unc = np.repeat(np.nanmedian(self.flux.value) / 100.0, len(self.flux))
# TODO: change rho to depend on the bandwidth
kernel = terms.SHOTerm(sigma=0.01, rho=bandwidth, Q=0.25)
gp = celerite2.GaussianProcess(kernel, mean=0.0)
gp.compute(self.wavelength, yerr=unc)
if optimize_kernel:
# Construct the GP model with celerite
def set_params(params, gp):
gp.mean = params[0]
theta = np.exp(params[1:])
gp.kernel = terms.SHOTerm(sigma=theta[0], rho=theta[1], Q=0.5)
gp.compute(self.wavelength.value, yerr=unc + theta[2], quiet=True)
return gp
def neg_log_like(params, gp):
gp = set_params(params, gp)
return -gp.log_likelihood(self.flux.value)
initial_params = [np.log(1), np.log(0.001), np.log(5.0), np.log(0.01)]
soln = minimize(neg_log_like, initial_params, method="L-BFGS-B", args=(gp,))
opt_gp = set_params(soln.x, gp)
else:
opt_gp = gp
mean_model = opt_gp.predict(self.flux.value, t=self.wavelength.value)
smoothed_spectrum = self.__class__(
spectral_axis=self.wavelength.value * self.wavelength.unit,
flux=mean_model * self.flux.unit,
uncertainty=None,
mask=np.zeros_like(mean_model, dtype=np.bool),
meta=copy.deepcopy(self.meta),
wcs=None,
)
if return_model:
gp_model = lambda wl: opt_gp.predict(self.flux.value, t=wl)
return (smoothed_spectrum, gp_model)
else:
return smoothed_spectrum
def plot(self, ax=None, ylo=0.6, yhi=1.2, figsize=(10, 4), **kwargs):
"""Plot a quick look of the spectrum"
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
ylo : scalar
Lower limit of the y axis
yhi : scalar
Upper limit of the y axis
figsize : tuple
The figure size for the plot
label : str
The legend label to for plt.legend()
Returns
-------
ax : (`~matplotlib.axes.Axes`)
The axis to display and/or modify
"""
if ax is None:
fig, ax = plt.subplots(1, figsize=figsize)
ax.set_ylim(ylo, yhi)
ax.set_xlabel("$\lambda \;(\AA)$")
ax.set_ylabel("Flux")
if hasattr(self, "spectrographname"):
ax.set_title(self.spectrographname + " Spectrum")
ax.step(self.wavelength, self.flux, **kwargs, where="mid")
else:
ax.step(self.wavelength, self.flux, **kwargs, where="mid")
return ax
def remove_outliers(self, threshold=5):
"""Remove outliers above threshold
Parameters
----------
threshold : float
The sigma-clipping threshold (in units of sigma)
Returns
-------
clean_spec : (KeckNIRSPECSpectrum)
Cleaned version of input Spectrum
"""
residual = self.flux - self.smooth_spectrum().flux
mad = median_abs_deviation(residual.value, nan_policy="omit")
keep_indices = (np.abs(residual.value) < (threshold * mad)) == True
return self.apply_boolean_mask(keep_indices)
def trim_edges(self, limits=None):
"""Trim the order edges, which falloff in SNR
This method applies limits on absolute x pixel values, regardless
of the order of previous destructive operations, which may not
be the intended behavior in some applications.
Parameters
----------
limits : tuple
The index bounds (lo, hi) for trimming the order
Returns
-------
trimmed_spec : (EchelleSpectrum)
Trimmed version of input Spectrum
"""
if limits is None:
limits = self.noisy_edges
lo, hi = limits
if self.meta is not None:
if "x_values" in self.meta.keys():
x_values = self.meta["x_values"]
else:
log.warn(
"The spectrum metadata is missing its native pixel location labels. "
"Proceeding by assuming contiguous pixel labels, which may not be what you want."
)
x_values = np.arange(len(self.wavelength))
keep_indices = (x_values > lo) & (x_values < hi)
return self.apply_boolean_mask(keep_indices)
def estimate_uncertainty(self):
"""Estimate the uncertainty based on residual after smoothing
Returns
-------
uncertainty : (np.float)
Typical uncertainty
"""
residual = self.flux - self.smooth_spectrum().flux
return median_abs_deviation(residual.value)
def to_HDF5(self, path, file_basename):
"""Export to spectral order to HDF5 file format
This format is required for per-order Starfish input
Parameters
----------
path : str
The directory destination for the HDF5 file
file_basename : str
The basename of the file to which the order number and extension
are appended. Typically source name that matches a database entry.
"""
try:
import h5py
except ImportError:
raise ImportError("You need to install h5py to export to the HDF5 format.")
grating_order = self.meta["m"]
out_path = path + "/" + file_basename + "_m{:03d}.hdf5".format(grating_order)
# The mask should be ones everywhere
mask_out = np.ones(len(self.wavelength), dtype=int)
f_new = h5py.File(out_path, "w")
f_new.create_dataset("fls", data=self.flux.value)
f_new.create_dataset("wls", data=self.wavelength.to(u.Angstrom).value)
f_new.create_dataset("sigmas", data=self.uncertainty.array)
f_new.create_dataset("masks", data=mask_out)
f_new.close()
def resample_list(self, specList, **kwargs):
"""
Resample a single EchelleSpectrum object into a EchelleSpectrumList object.
Useful for converting models into echelle spectra with multiple orders.
"""
return resample_list(self, specList, **kwargs)
def apply_boolean_mask(self, mask):
"""Apply a boolean mask to the spectrum and any available ancillary spectra
Parameters
----------
mask: boolean mask, typically a numpy array
The boolean mask with numpy-style masking: True means "keep" that index and
False means discard that index
"""
spec = apply_numpy_mask(self, mask)
for ancillary_spectrum in self.available_ancillary_spectra:
spec.meta[ancillary_spectrum] = apply_numpy_mask(
spec.meta[ancillary_spectrum], mask
)
return spec
class EchelleSpectrumList(SpectrumList):
r"""
An enhanced container for a list of Echelle spectral orders
"""
def __init__(self, *args, **kwargs):
self.normalization_order_index = 0
super().__init__(*args, **kwargs)
def normalize(self, order_index=0):
"""Normalize all orders to one of the other orders"""
index = self.normalization_order_index
median_flux = copy.deepcopy(np.nanmedian(self[index].flux))
for i in range(len(self)):
self[i] = self[i].divide(median_flux, handle_meta="first_found")
return self
def remove_nans(self):
"""Remove all the NaNs"""
# TODO: is this in-place overriding of self allowed?
# May have unintended consequences?
# Consider making a copy instead...
for i in range(len(self)):
self[i] = self[i].remove_nans()
return self
def remove_outliers(self, threshold=5):
"""Remove all the outliers
Parameters
----------
threshold : float
The sigma-clipping threshold (in units of sigma)
"""
for i in range(len(self)):
self[i] = self[i].remove_outliers(threshold=threshold)
return self
def trim_edges(self, limits=None):
"""Trim all the edges"""
for i in range(len(self)):
self[i] = self[i].trim_edges(limits)
return self
def deblaze(self, method="spline"):
"""Remove blaze function from all orders by interpolating a spline function
Note: It is recommended to remove NaNs before running this operation,
otherwise effects can be appear from zero-padded edges.
"""
spec_out = copy.deepcopy(self)
for i in range(len(self)):
spec_out[i] = self[i].deblaze(method=method)
return spec_out
def flatten_by_black_body(self, Teff):
"""Flatten by black body"""
spec_out = copy.deepcopy(self)
index = self.normalization_order_index
median_wl = copy.deepcopy(np.nanmedian(self[index].wavelength))
blackbody_func = BlackBody(temperature=Teff * u.K)
blackbody_ref = blackbody_func(median_wl)
for i in range(len(spec_out)):
blackbody = (
blackbody_func(spec_out[i].wavelength)
/ blackbody_ref
/ (spec_out[i].wavelength / median_wl) ** 2
)
try:
spec_out[i] = spec_out[i].divide(blackbody, handle_meta="first_found")
except u.UnitConversionError:
spec_out[i] = spec_out[i].divide(
blackbody * self.unit, handle_meta="first_found"
)
return spec_out
def to_HDF5(self, path, file_basename):
"""Save all spectral orders to the HDF5 file format"""
for i in range(len(self)):
self[i].to_HDF5(path, file_basename)
def stitch(self):
"""Stitch all the spectra together, assuming zero overlap in wavelength."""
spec = copy.deepcopy(self)
wls = (
np.hstack([spec[i].wavelength.value for i in range(len(spec))])
* spec[0].wavelength.unit
)
fluxes = (
np.hstack([spec[i].flux.value for i in range(len(spec))])
* spec[0].flux.unit
)
if spec[0].uncertainty is not None:
# HACK We assume if one order has it, they all do, and that it's StdDev
unc = np.hstack([spec[i].uncertainty.array for i in range(len(self))])
unc_out = StdDevUncertainty(unc)
else:
unc_out = None
# Stack the x_values:
x_values = np.hstack([spec[i].meta["x_values"] for i in range(len(spec))])
meta_out = copy.deepcopy(spec[0].meta)
meta_out["x_values"] = x_values
for ancillary_spectrum in spec[0].available_ancillary_spectra:
if spec[0].meta[ancillary_spectrum].meta is not None:
meta_of_meta = spec[0].meta[ancillary_spectrum].meta
x_values = np.hstack(
[
spec[i].meta[ancillary_spectrum].meta["x_values"]
for i in range(len(spec))
]
)
meta_of_meta["x_values"] = x_values
else:
meta_of_meta = None
wls_anc = np.hstack(
[spec[i].meta[ancillary_spectrum].wavelength for i in range(len(spec))]
)
fluxes_anc = np.hstack(
[spec[i].meta[ancillary_spectrum].flux for i in range(len(spec))]
)
meta_out[ancillary_spectrum] = spec[0].__class__(
spectral_axis=wls_anc, flux=fluxes_anc, meta=meta_of_meta
)
return spec[0].__class__(
spectral_axis=wls, flux=fluxes, uncertainty=unc_out, meta=meta_out, wcs=None
)
def plot(self, **kwargs):
"""Plot the entire spectrum list"""
if not "ax" in kwargs:
ax = self[0].plot(figsize=(25, 4), **kwargs)
for i in range(1, len(self)):
self[i].plot(ax=ax, **kwargs)
return ax
else:
for i in range(1, len(self)):
self[i].plot(**kwargs)
def __add__(self, other):
"""Bandmath addition"""
spec_out = copy.deepcopy(self)
for i in range(len(self)):
spec_out[i] = self[i] + other[i]
return spec_out
def __sub__(self, other):
"""Bandmath subtraction"""
spec_out = copy.deepcopy(self)
for i in range(len(self)):
spec_out[i] = self[i] - other[i]
return spec_out
def __mul__(self, other):
"""Bandmath multiplication"""
spec_out = copy.deepcopy(self)
for i in range(len(self)):
spec_out[i] = self[i] * other[i]
return spec_out
def __truediv__(self, other):
"""Bandmath division"""
spec_out = copy.deepcopy(self)
for i in range(len(self)):
spec_out[i] = self[i] / other[i]
return spec_out
def rv_shift(self, velocity):
"""
Shift velocity of spectrum in km s^-1
"""
spec_out = copy.deepcopy(self)
for i in range(len(self)):
spec_out[i] = self[i].rv_shift(velocity)
return spec_out
|
[
"logging.getLogger",
"numpy.sqrt",
"celerite2.GaussianProcess",
"numpy.log",
"scipy.signal.savgol_filter",
"scipy.interpolate.interp1d",
"numpy.isfinite",
"specutils.spectra.spectral_region.SpectralRegion",
"copy.deepcopy",
"numpy.arange",
"numpy.mean",
"numpy.exp",
"astropy.nddata.StdDevUncertainty",
"muler.utilities.resample_list",
"warnings.simplefilter",
"numpy.abs",
"numpy.nanstd",
"celerite2.terms.SHOTerm",
"scipy.optimize.minimize",
"numpy.any",
"h5py.File",
"muler.utilities.apply_numpy_mask",
"numpy.isnan",
"astropy.coordinates.EarthLocation.of_site",
"warnings.filterwarnings",
"astropy.modeling.physical_models.BlackBody",
"astropy.units.Quantity",
"scipy.stats.median_abs_deviation",
"numpy.median",
"numpy.nanmedian",
"warnings.catch_warnings",
"astropy.coordinates.SkyCoord",
"numpy.append",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"numpy.nan_to_num"
] |
[((1464, 1491), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1481, 1491), False, 'import logging\n'), ((1543, 1598), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'VerifyWarning'}), "('ignore', category=VerifyWarning)\n", (1564, 1598), False, 'import warnings\n'), ((1662, 1761), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'astropy.utils.exceptions.AstropyDeprecationWarning'}), "('ignore', category=astropy.utils.exceptions.\n AstropyDeprecationWarning)\n", (1685, 1761), False, 'import warnings\n'), ((1763, 1823), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FITSFixedWarning'}), "('ignore', category=FITSFixedWarning)\n", (1786, 1823), False, 'import warnings\n'), ((1885, 1943), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (1908, 1943), False, 'import warnings\n'), ((1950, 1975), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1973, 1975), False, 'import warnings\n'), ((1981, 2014), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2004, 2014), False, 'import warnings\n'), ((3986, 4023), 'astropy.coordinates.EarthLocation.of_site', 'EarthLocation.of_site', (['self.site_name'], {}), '(self.site_name)\n', (4007, 4023), False, 'from astropy.coordinates import SkyCoord, EarthLocation\n'), ((4037, 4071), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'self.RA', 'dec': 'self.DEC'}), '(ra=self.RA, dec=self.DEC)\n', (4045, 4071), False, 'from astropy.coordinates import SkyCoord, EarthLocation\n'), ((5361, 5390), 'numpy.nanmedian', 'np.nanmedian', (['spec.flux.value'], {}), '(spec.flux.value)\n', (5373, 5390), True, 'import numpy as np\n'), ((5482, 5506), 'copy.deepcopy', 'copy.deepcopy', (['spec.meta'], {}), '(spec.meta)\n', (5495, 5506), False, 'import copy\n'), ((9409, 9431), 'numpy.isfinite', 'np.isfinite', (['self.flux'], {}), '(self.flux)\n', (9420, 9431), True, 'import numpy as np\n'), ((9605, 9625), 'numpy.arange', 'np.arange', (['(0)', 'niters'], {}), '(0, niters)\n', (9614, 9625), True, 'import numpy as np\n'), ((12451, 12470), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (12464, 12470), False, 'import copy\n'), ((16645, 16693), 'celerite2.terms.SHOTerm', 'terms.SHOTerm', ([], {'sigma': '(0.01)', 'rho': 'bandwidth', 'Q': '(0.25)'}), '(sigma=0.01, rho=bandwidth, Q=0.25)\n', (16658, 16693), False, 'from celerite2 import terms\n'), ((16707, 16750), 'celerite2.GaussianProcess', 'celerite2.GaussianProcess', (['kernel'], {'mean': '(0.0)'}), '(kernel, mean=0.0)\n', (16732, 16750), False, 'import celerite2\n'), ((19790, 19845), 'scipy.stats.median_abs_deviation', 'median_abs_deviation', (['residual.value'], {'nan_policy': '"""omit"""'}), "(residual.value, nan_policy='omit')\n", (19810, 19845), False, 'from scipy.stats import median_abs_deviation\n'), ((21461, 21497), 'scipy.stats.median_abs_deviation', 'median_abs_deviation', (['residual.value'], {}), '(residual.value)\n', (21481, 21497), False, 'from scipy.stats import median_abs_deviation\n'), ((22372, 22396), 'h5py.File', 'h5py.File', (['out_path', '"""w"""'], {}), "(out_path, 'w')\n", (22381, 22396), False, 'import h5py\n'), ((22930, 22969), 'muler.utilities.resample_list', 'resample_list', (['self', 'specList'], {}), '(self, specList, **kwargs)\n', (22943, 22969), False, 'from muler.utilities import apply_numpy_mask, resample_list\n'), ((23345, 23373), 'muler.utilities.apply_numpy_mask', 'apply_numpy_mask', (['self', 'mask'], {}), '(self, mask)\n', (23361, 23373), False, 'from muler.utilities import apply_numpy_mask, resample_list\n'), ((25332, 25351), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (25345, 25351), False, 'import copy\n'), ((25567, 25586), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (25580, 25586), False, 'import copy\n'), ((25732, 25765), 'astropy.modeling.physical_models.BlackBody', 'BlackBody', ([], {'temperature': '(Teff * u.K)'}), '(temperature=Teff * u.K)\n', (25741, 25765), False, 'from astropy.modeling.physical_models import BlackBody\n'), ((26665, 26684), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (26678, 26684), False, 'import copy\n'), ((27387, 27414), 'copy.deepcopy', 'copy.deepcopy', (['spec[0].meta'], {}), '(spec[0].meta)\n', (27400, 27414), False, 'import copy\n'), ((28993, 29012), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (29006, 29012), False, 'import copy\n'), ((29202, 29221), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (29215, 29221), False, 'import copy\n'), ((29414, 29433), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (29427, 29433), False, 'import copy\n'), ((29624, 29643), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (29637, 29643), False, 'import copy\n'), ((29872, 29891), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (29885, 29891), False, 'import copy\n'), ((6255, 6288), 'astropy.modeling.physical_models.BlackBody', 'BlackBody', ([], {'temperature': '(Teff * u.K)'}), '(temperature=Teff * u.K)\n', (6264, 6288), False, 'from astropy.modeling.physical_models import BlackBody\n'), ((6338, 6356), 'numpy.mean', 'np.mean', (['blackbody'], {}), '(blackbody)\n', (6345, 6356), True, 'import numpy as np\n'), ((6425, 6445), 'numpy.median', 'np.median', (['wl_scaled'], {}), '(wl_scaled)\n', (6434, 6445), True, 'import numpy as np\n'), ((9354, 9374), 'copy.deepcopy', 'copy.deepcopy', (['(~mask)'], {}), '(~mask)\n', (9367, 9374), False, 'import copy\n'), ((10379, 10398), 'numpy.append', 'np.append', (['[0]', 'cut'], {}), '([0], cut)\n', (10388, 10398), True, 'import numpy as np\n'), ((12162, 12257), 'scipy.interpolate.interp1d', 'interp1d', (['self.wavelength.value[mask][mask1]', 'trend_signal[mask1]'], {'fill_value': '"""extrapolate"""'}), "(self.wavelength.value[mask][mask1], trend_signal[mask1],\n fill_value='extrapolate')\n", (12170, 12257), False, 'from scipy.interpolate import UnivariateSpline, interp1d\n'), ((12535, 12560), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (12558, 12560), False, 'import warnings\n'), ((12621, 12668), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (12642, 12668), False, 'import warnings\n'), ((17433, 17502), 'scipy.optimize.minimize', 'minimize', (['neg_log_like', 'initial_params'], {'method': '"""L-BFGS-B"""', 'args': '(gp,)'}), "(neg_log_like, initial_params, method='L-BFGS-B', args=(gp,))\n", (17441, 17502), False, 'from scipy.optimize import minimize\n'), ((18929, 18961), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'figsize'}), '(1, figsize=figsize)\n', (18941, 18961), True, 'import matplotlib.pyplot as plt\n'), ((23487, 23540), 'muler.utilities.apply_numpy_mask', 'apply_numpy_mask', (['spec.meta[ancillary_spectrum]', 'mask'], {}), '(spec.meta[ancillary_spectrum], mask)\n', (23503, 23540), False, 'from muler.utilities import apply_numpy_mask, resample_list\n'), ((24029, 24059), 'numpy.nanmedian', 'np.nanmedian', (['self[index].flux'], {}), '(self[index].flux)\n', (24041, 24059), True, 'import numpy as np\n'), ((25668, 25704), 'numpy.nanmedian', 'np.nanmedian', (['self[index].wavelength'], {}), '(self[index].wavelength)\n', (25680, 25704), True, 'import numpy as np\n'), ((27189, 27211), 'astropy.nddata.StdDevUncertainty', 'StdDevUncertainty', (['unc'], {}), '(unc)\n', (27206, 27211), False, 'from astropy.nddata import StdDevUncertainty\n'), ((4972, 5000), 'specutils.spectra.spectral_region.SpectralRegion', 'SpectralRegion', (['lower', 'upper'], {}), '(lower, upper)\n', (4986, 5000), False, 'from specutils.spectra.spectral_region import SpectralRegion\n'), ((9546, 9566), 'numpy.nanstd', 'np.nanstd', (['self.flux'], {}), '(self.flux)\n', (9555, 9566), True, 'import numpy as np\n'), ((10156, 10181), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (10179, 10181), False, 'import warnings\n'), ((10230, 10277), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (10251, 10277), False, 'import warnings\n'), ((10935, 10991), 'numpy.any', 'np.any', (['[window_length > h - l, h - l < break_tolerance]'], {}), '([window_length > h - l, h - l < break_tolerance])\n', (10941, 10991), True, 'import numpy as np\n'), ((13281, 13300), 'numpy.isnan', 'np.isnan', (['self.flux'], {}), '(self.flux)\n', (13289, 13300), True, 'import numpy as np\n'), ((13546, 13570), 'numpy.nan_to_num', 'np.nan_to_num', (['self.flux'], {}), '(self.flux)\n', (13559, 13570), True, 'import numpy as np\n'), ((16977, 16995), 'numpy.exp', 'np.exp', (['params[1:]'], {}), '(params[1:])\n', (16983, 16995), True, 'import numpy as np\n'), ((17024, 17074), 'celerite2.terms.SHOTerm', 'terms.SHOTerm', ([], {'sigma': 'theta[0]', 'rho': 'theta[1]', 'Q': '(0.5)'}), '(sigma=theta[0], rho=theta[1], Q=0.5)\n', (17037, 17074), False, 'from celerite2 import terms\n'), ((17361, 17370), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (17367, 17370), True, 'import numpy as np\n'), ((17372, 17385), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (17378, 17385), True, 'import numpy as np\n'), ((17387, 17398), 'numpy.log', 'np.log', (['(5.0)'], {}), '(5.0)\n', (17393, 17398), True, 'import numpy as np\n'), ((17400, 17412), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (17406, 17412), True, 'import numpy as np\n'), ((17874, 17914), 'numpy.zeros_like', 'np.zeros_like', (['mean_model'], {'dtype': 'np.bool'}), '(mean_model, dtype=np.bool)\n', (17887, 17914), True, 'import numpy as np\n'), ((17933, 17957), 'copy.deepcopy', 'copy.deepcopy', (['self.meta'], {}), '(self.meta)\n', (17946, 17957), False, 'import copy\n'), ((19870, 19892), 'numpy.abs', 'np.abs', (['residual.value'], {}), '(residual.value)\n', (19876, 19892), True, 'import numpy as np\n'), ((11037, 11071), 'numpy.nanmedian', 'np.nanmedian', (['self.flux[mask][l:h]'], {}), '(self.flux[mask][l:h])\n', (11049, 11071), True, 'import numpy as np\n'), ((11954, 11992), 'numpy.abs', 'np.abs', (['(self.flux[mask] - trend_signal)'], {}), '(self.flux[mask] - trend_signal)\n', (11960, 11992), True, 'import numpy as np\n'), ((12014, 12055), 'numpy.nanstd', 'np.nanstd', (['(self.flux[mask] - trend_signal)'], {}), '(self.flux[mask] - trend_signal)\n', (12023, 12055), True, 'import numpy as np\n'), ((16518, 16547), 'numpy.nanmedian', 'np.nanmedian', (['self.flux.value'], {}), '(self.flux.value)\n', (16530, 16547), True, 'import numpy as np\n'), ((2869, 2903), 'numpy.sqrt', 'np.sqrt', (['self.uncertainty.quantity'], {}), '(self.uncertainty.quantity)\n', (2876, 2903), True, 'import numpy as np\n'), ((9503, 9526), 'numpy.nanmedian', 'np.nanmedian', (['self.flux'], {}), '(self.flux)\n', (9515, 9526), True, 'import numpy as np\n'), ((11219, 11244), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (11242, 11244), False, 'import warnings\n'), ((11270, 11316), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'FutureWarning'], {}), "('ignore', FutureWarning)\n", (11291, 11316), False, 'import warnings\n'), ((11349, 11456), 'scipy.signal.savgol_filter', 'savgol_filter', ([], {'x': 'self.flux.value[mask][l:h]', 'window_length': 'window_length', 'polyorder': 'polyorder'}), '(x=self.flux.value[mask][l:h], window_length=window_length,\n polyorder=polyorder, **kwargs)\n', (11362, 11456), False, 'from scipy.signal import savgol_filter\n'), ((11636, 11670), 'astropy.units.Quantity', 'Quantity', (['trsig', 'trend_signal.unit'], {}), '(trsig, trend_signal.unit)\n', (11644, 11670), False, 'from astropy.units import Quantity\n'), ((10334, 10352), 'numpy.nanmedian', 'np.nanmedian', (['dlam'], {}), '(dlam)\n', (10346, 10352), True, 'import numpy as np\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Hillas shower parametrization.
TODO:
-----
- Should have a separate function or option to compute 3rd order
moments + asymmetry (which are not always needed)
- remove alpha calculation (which is only about (0,0), and make a get
alpha function that does it from an arbitrary point given a
pre-computed list of parameters
"""
import numpy as np
from astropy.units import Quantity
from collections import namedtuple
__all__ = [
'MomentParameters',
'HighOrderMomentParameters',
'hillas_parameters',
]
MomentParameters = namedtuple(
"MomentParameters",
"size,cen_x,cen_y,length,width,r,phi,psi,miss"
)
"""Shower moment parameters up to second order.
See also
--------
HighOrderMomentParameters, hillas_parameters, hillas_parameters_2
"""
HighOrderMomentParameters = namedtuple(
"HighOrderMomentParameters",
"skewness,kurtosis,asymmetry"
)
"""Shower moment parameters of third order.
See also
--------
MomentParameters, hillas_parameters, hillas_parameters_2
"""
def hillas_parameters_1(pix_x, pix_y, image):
"""Compute Hillas parameters for a given shower image.
Reference: Appendix of the Whipple Crab paper Weekes et al. (1998)
http://adsabs.harvard.edu/abs/1989ApJ...342..379W
(corrected for some obvious typos)
Parameters
----------
pix_x : array_like
Pixel x-coordinate
pix_y : array_like
Pixel y-coordinate
image : array_like
Pixel values corresponding
Returns
-------
hillas_parameters : `MomentParameters`
"""
pix_x = Quantity(np.asanyarray(pix_x, dtype=np.float64)).value
pix_y = Quantity(np.asanyarray(pix_y, dtype=np.float64)).value
image = np.asanyarray(image, dtype=np.float64)
assert pix_x.shape == image.shape
assert pix_y.shape == image.shape
# Compute image moments
_s = np.sum(image)
m_x = np.sum(image * pix_x) / _s
m_y = np.sum(image * pix_y) / _s
m_xx = np.sum(image * pix_x * pix_x) / _s # note: typo in paper
m_yy = np.sum(image * pix_y * pix_y) / _s
m_xy = np.sum(image * pix_x * pix_y) / _s # note: typo in paper
# Compute major axis line representation y = a * x + b
S_xx = m_xx - m_x * m_x
S_yy = m_yy - m_y * m_y
S_xy = m_xy - m_x * m_y
d = S_yy - S_xx
temp = d * d + 4 * S_xy * S_xy
a = (d + np.sqrt(temp)) / (2 * S_xy)
b = m_y - a * m_x
# Compute Hillas parameters
width_2 = (S_yy + a * a * S_xx - 2 * a * S_xy) / (1 + a * a)
width = np.sqrt(width_2)
length_2 = (S_xx + a * a * S_yy + 2 * a * S_xy) / (1 + a * a)
length = np.sqrt(length_2)
miss = np.abs(b / (1 + a * a))
r = np.sqrt(m_x * m_x + m_y * m_y)
phi = np.arctan2(m_y, m_x)
# Compute azwidth by transforming to (p, q) coordinates
sin_theta = m_y / r
cos_theta = m_x / r
q = (m_x - pix_x) * sin_theta + (pix_y - m_y) * cos_theta
m_q = np.sum(image * q) / _s
m_qq = np.sum(image * q * q) / _s
azwidth_2 = m_qq - m_q * m_q
azwidth = np.sqrt(azwidth_2)
return MomentParameters(size=_s, cen_x=m_x, cen_y=m_y, length=length,
width=width, r=r, phi=phi, psi=None, miss=miss)
def hillas_parameters_2(pix_x, pix_y, image):
"""Compute Hillas parameters for a given shower image.
Alternate implementation of `hillas_parameters` ...
in the end we'll just keep one, but we're using Hilllas parameter
computation as an example for performance checks.
Parameters
----------
pix_x : array_like
Pixel x-coordinate
pix_y : array_like
Pixel y-coordinate
image : array_like
Pixel values corresponding
Returns
-------
hillas_parameters : `MomentParameters`
"""
pix_x = Quantity(np.asanyarray(pix_x, dtype=np.float64)).value
pix_y = Quantity(np.asanyarray(pix_y, dtype=np.float64)).value
image = np.asanyarray(image, dtype=np.float64)
assert pix_x.shape == image.shape
assert pix_y.shape == image.shape
# Compute image moments (done in a bit faster way, but putting all
# into one 2D array, where each row will be summed to calculate a
# moment) However, this doesn't avoid a temporary created for the
# 2D array
size = image.sum()
momdata = np.row_stack([pix_x,
pix_y,
pix_x * pix_x,
pix_y * pix_y,
pix_x * pix_y]) * image
moms = momdata.sum(axis=1) / size
# calculate variances
vx2 = moms[2] - moms[0] ** 2
vy2 = moms[3] - moms[1] ** 2
vxy = moms[4] - moms[0] * moms[1]
# common factors:
dd = vy2 - vx2
zz = np.sqrt(dd ** 2 + 4.0 * vxy ** 2)
# miss
uu = 1.0 + dd / zz
vv = 2.0 - uu
miss = np.sqrt((uu * moms[0] ** 2 + vv * moms[1] ** 2) / 2.0
- moms[0] * moms[1] * 2.0 * vxy / zz)
# shower shape parameters
width = np.sqrt(vx2 + vy2 - zz)
length = np.sqrt(vx2 + vy2 + zz)
azwidth = np.sqrt(moms[2] + moms[3] - zz)
# rotation angle of ellipse relative to centroid
tanpsi_numer = (dd + zz) * moms[1] + 2.0 * vxy * moms[0]
tanpsi_denom = (2 * vxy * moms[1]) - (dd - zz) * moms[0]
psi = np.pi / 2.0 + np.arctan2(tanpsi_numer, tanpsi_denom)
# polar coordinates of centroid
rr = np.hypot(moms[0], moms[1])
phi = np.arctan2(moms[1], moms[0])
return MomentParameters(size=size, cen_x=moms[0], cen_y=moms[1],
length=length, width=width, r=rr, phi=phi,
psi=psi, miss=miss)
# use the 2 version by default
hillas_parameters = hillas_parameters_2
|
[
"numpy.abs",
"collections.namedtuple",
"numpy.sqrt",
"numpy.asanyarray",
"numpy.sum",
"numpy.arctan2",
"numpy.row_stack",
"numpy.hypot"
] |
[((608, 686), 'collections.namedtuple', 'namedtuple', (['"""MomentParameters"""', '"""size,cen_x,cen_y,length,width,r,phi,psi,miss"""'], {}), "('MomentParameters', 'size,cen_x,cen_y,length,width,r,phi,psi,miss')\n", (618, 686), False, 'from collections import namedtuple\n'), ((863, 933), 'collections.namedtuple', 'namedtuple', (['"""HighOrderMomentParameters"""', '"""skewness,kurtosis,asymmetry"""'], {}), "('HighOrderMomentParameters', 'skewness,kurtosis,asymmetry')\n", (873, 933), False, 'from collections import namedtuple\n'), ((1751, 1789), 'numpy.asanyarray', 'np.asanyarray', (['image'], {'dtype': 'np.float64'}), '(image, dtype=np.float64)\n', (1764, 1789), True, 'import numpy as np\n'), ((1904, 1917), 'numpy.sum', 'np.sum', (['image'], {}), '(image)\n', (1910, 1917), True, 'import numpy as np\n'), ((2548, 2564), 'numpy.sqrt', 'np.sqrt', (['width_2'], {}), '(width_2)\n', (2555, 2564), True, 'import numpy as np\n'), ((2644, 2661), 'numpy.sqrt', 'np.sqrt', (['length_2'], {}), '(length_2)\n', (2651, 2661), True, 'import numpy as np\n'), ((2673, 2696), 'numpy.abs', 'np.abs', (['(b / (1 + a * a))'], {}), '(b / (1 + a * a))\n', (2679, 2696), True, 'import numpy as np\n'), ((2705, 2735), 'numpy.sqrt', 'np.sqrt', (['(m_x * m_x + m_y * m_y)'], {}), '(m_x * m_x + m_y * m_y)\n', (2712, 2735), True, 'import numpy as np\n'), ((2746, 2766), 'numpy.arctan2', 'np.arctan2', (['m_y', 'm_x'], {}), '(m_y, m_x)\n', (2756, 2766), True, 'import numpy as np\n'), ((3056, 3074), 'numpy.sqrt', 'np.sqrt', (['azwidth_2'], {}), '(azwidth_2)\n', (3063, 3074), True, 'import numpy as np\n'), ((3925, 3963), 'numpy.asanyarray', 'np.asanyarray', (['image'], {'dtype': 'np.float64'}), '(image, dtype=np.float64)\n', (3938, 3963), True, 'import numpy as np\n'), ((4722, 4755), 'numpy.sqrt', 'np.sqrt', (['(dd ** 2 + 4.0 * vxy ** 2)'], {}), '(dd ** 2 + 4.0 * vxy ** 2)\n', (4729, 4755), True, 'import numpy as np\n'), ((4821, 4916), 'numpy.sqrt', 'np.sqrt', (['((uu * moms[0] ** 2 + vv * moms[1] ** 2) / 2.0 - moms[0] * moms[1] * 2.0 *\n vxy / zz)'], {}), '((uu * moms[0] ** 2 + vv * moms[1] ** 2) / 2.0 - moms[0] * moms[1] *\n 2.0 * vxy / zz)\n', (4828, 4916), True, 'import numpy as np\n'), ((4976, 4999), 'numpy.sqrt', 'np.sqrt', (['(vx2 + vy2 - zz)'], {}), '(vx2 + vy2 - zz)\n', (4983, 4999), True, 'import numpy as np\n'), ((5013, 5036), 'numpy.sqrt', 'np.sqrt', (['(vx2 + vy2 + zz)'], {}), '(vx2 + vy2 + zz)\n', (5020, 5036), True, 'import numpy as np\n'), ((5051, 5082), 'numpy.sqrt', 'np.sqrt', (['(moms[2] + moms[3] - zz)'], {}), '(moms[2] + moms[3] - zz)\n', (5058, 5082), True, 'import numpy as np\n'), ((5370, 5396), 'numpy.hypot', 'np.hypot', (['moms[0]', 'moms[1]'], {}), '(moms[0], moms[1])\n', (5378, 5396), True, 'import numpy as np\n'), ((5407, 5435), 'numpy.arctan2', 'np.arctan2', (['moms[1]', 'moms[0]'], {}), '(moms[1], moms[0])\n', (5417, 5435), True, 'import numpy as np\n'), ((1928, 1949), 'numpy.sum', 'np.sum', (['(image * pix_x)'], {}), '(image * pix_x)\n', (1934, 1949), True, 'import numpy as np\n'), ((1965, 1986), 'numpy.sum', 'np.sum', (['(image * pix_y)'], {}), '(image * pix_y)\n', (1971, 1986), True, 'import numpy as np\n'), ((2003, 2032), 'numpy.sum', 'np.sum', (['(image * pix_x * pix_x)'], {}), '(image * pix_x * pix_x)\n', (2009, 2032), True, 'import numpy as np\n'), ((2072, 2101), 'numpy.sum', 'np.sum', (['(image * pix_y * pix_y)'], {}), '(image * pix_y * pix_y)\n', (2078, 2101), True, 'import numpy as np\n'), ((2118, 2147), 'numpy.sum', 'np.sum', (['(image * pix_x * pix_y)'], {}), '(image * pix_x * pix_y)\n', (2124, 2147), True, 'import numpy as np\n'), ((2948, 2965), 'numpy.sum', 'np.sum', (['(image * q)'], {}), '(image * q)\n', (2954, 2965), True, 'import numpy as np\n'), ((2982, 3003), 'numpy.sum', 'np.sum', (['(image * q * q)'], {}), '(image * q * q)\n', (2988, 3003), True, 'import numpy as np\n'), ((4305, 4378), 'numpy.row_stack', 'np.row_stack', (['[pix_x, pix_y, pix_x * pix_x, pix_y * pix_y, pix_x * pix_y]'], {}), '([pix_x, pix_y, pix_x * pix_x, pix_y * pix_y, pix_x * pix_y])\n', (4317, 4378), True, 'import numpy as np\n'), ((5284, 5322), 'numpy.arctan2', 'np.arctan2', (['tanpsi_numer', 'tanpsi_denom'], {}), '(tanpsi_numer, tanpsi_denom)\n', (5294, 5322), True, 'import numpy as np\n'), ((1626, 1664), 'numpy.asanyarray', 'np.asanyarray', (['pix_x'], {'dtype': 'np.float64'}), '(pix_x, dtype=np.float64)\n', (1639, 1664), True, 'import numpy as np\n'), ((1693, 1731), 'numpy.asanyarray', 'np.asanyarray', (['pix_y'], {'dtype': 'np.float64'}), '(pix_y, dtype=np.float64)\n', (1706, 1731), True, 'import numpy as np\n'), ((2388, 2401), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (2395, 2401), True, 'import numpy as np\n'), ((3800, 3838), 'numpy.asanyarray', 'np.asanyarray', (['pix_x'], {'dtype': 'np.float64'}), '(pix_x, dtype=np.float64)\n', (3813, 3838), True, 'import numpy as np\n'), ((3867, 3905), 'numpy.asanyarray', 'np.asanyarray', (['pix_y'], {'dtype': 'np.float64'}), '(pix_y, dtype=np.float64)\n', (3880, 3905), True, 'import numpy as np\n')]
|
import pandas as pd
from matplotlib import pyplot as plt
from scipy.interpolate import spline
from scipy.ndimage.filters import gaussian_filter1d
import numpy as np
#%%
# TZ numbers via https://www.fangraphs.com/leaders.aspx?pos=3b&stats=fld&lg=all&qual=y&type=0&season=2017&month=0&season1=1961&ind=1&team=0&rost=0&age=0&filter=&players=0
df = pd.read_csv(r'TZ.csv')
df = df[['Season','Name','TZ','playerid']]
# For Total Zone delete anything past 2002
df = df.loc[df.Season < 2002]
# UZR numbers via https://www.fangraphs.com/leaders.aspx?pos=3b&stats=fld&lg=all&qual=y&type=1&season=2017&month=0&season1=2002&ind=1&team=0&rost=0&age=0&filter=&players=0
df2 = pd.read_csv(r'UZR.csv')
df2 = df2[['Season','Name','UZR','playerid']]
df2.columns = ['Season','Name','TZ','playerid']
df = pd.concat([df,df2])
df = df.sort_values(by='Season')
#%%
# Figure out which names to use
# by looking at max TZ for each year
gb = df.groupby(['Season'])['TZ'].max()
gb = gb.reset_index()
gb = gb.sort_values(by='Season')
df3 = df.merge(gb)
df3 = df3.sort_values(by='Season')
df3 = df3.loc[df3['TZ'] > 15]
names = ['<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', 'Evan Longoria', '<NAME>']
colors = ['blue','lightblue','lightblue','blue','lightblue','lightblue','blue','lightblue','gold','lightblue','lightblue']
fig = plt.figure()
SPLINE_FACTOR = 1.6
# Go through and plot each player
for i,name in enumerate(names):
#print(name)
col = colors[i]
linewidth = 1
if col != 'lightblue':
linewidth=2
df_name = df.loc[df['Name'] == name]
df_name.index = range(len(df_name))
xnew = np.linspace(df_name.Season.min(),df_name.Season.max(),300) #300 represents number of points to make between T.min and T.max
#tz_smooth = spline(df_name.Season,df_name.TZ,xnew)
tz_smooth = spline(df_name.Season,gaussian_filter1d(df_name.TZ,SPLINE_FACTOR),xnew)
tz_convolved = np.convolve(df_name.TZ,np.ones(3,)/3,mode='same')
tz_smooth2 = spline(df_name.Season,gaussian_filter1d(tz_convolved,SPLINE_FACTOR),xnew)
#plt.plot(xnew,tz_smooth,label=name)#,color=col)
#plt.plot(df_name['Season'],df_name['TZ'],label=name)#,color=col)
#plt.plot(df_name['Season'],tz_convolved,label=name)#,color=col)
plt.plot(xnew,tz_smooth2,label=name,color=col,linewidth=linewidth)
#print df_name['TZ']
# Axis labeling and such
#plt.legend(fontsize=8,loc='lower left')
plt.ylabel('TZ/UZR')
plt.xlabel('Year')
plt.xlim((1960,2017))
plt.title('Third Base Defense')
plt.tight_layout()
# Save the plot as PNG
filename = '3b_defense.png'
fig.savefig(filename,dpi=400)
|
[
"numpy.ones",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"scipy.ndimage.filters.gaussian_filter1d",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"pandas.concat"
] |
[((347, 368), 'pandas.read_csv', 'pd.read_csv', (['"""TZ.csv"""'], {}), "('TZ.csv')\n", (358, 368), True, 'import pandas as pd\n'), ((666, 688), 'pandas.read_csv', 'pd.read_csv', (['"""UZR.csv"""'], {}), "('UZR.csv')\n", (677, 688), True, 'import pandas as pd\n'), ((791, 811), 'pandas.concat', 'pd.concat', (['[df, df2]'], {}), '([df, df2])\n', (800, 811), True, 'import pandas as pd\n'), ((1373, 1385), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1383, 1385), True, 'from matplotlib import pyplot as plt\n'), ((2468, 2488), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TZ/UZR"""'], {}), "('TZ/UZR')\n", (2478, 2488), True, 'from matplotlib import pyplot as plt\n'), ((2489, 2507), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (2499, 2507), True, 'from matplotlib import pyplot as plt\n'), ((2508, 2530), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1960, 2017)'], {}), '((1960, 2017))\n', (2516, 2530), True, 'from matplotlib import pyplot as plt\n'), ((2530, 2561), 'matplotlib.pyplot.title', 'plt.title', (['"""Third Base Defense"""'], {}), "('Third Base Defense')\n", (2539, 2561), True, 'from matplotlib import pyplot as plt\n'), ((2562, 2580), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2578, 2580), True, 'from matplotlib import pyplot as plt\n'), ((2305, 2375), 'matplotlib.pyplot.plot', 'plt.plot', (['xnew', 'tz_smooth2'], {'label': 'name', 'color': 'col', 'linewidth': 'linewidth'}), '(xnew, tz_smooth2, label=name, color=col, linewidth=linewidth)\n', (2313, 2375), True, 'from matplotlib import pyplot as plt\n'), ((1895, 1939), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['df_name.TZ', 'SPLINE_FACTOR'], {}), '(df_name.TZ, SPLINE_FACTOR)\n', (1912, 1939), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((2053, 2099), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['tz_convolved', 'SPLINE_FACTOR'], {}), '(tz_convolved, SPLINE_FACTOR)\n', (2070, 2099), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((1987, 1997), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1994, 1997), True, 'import numpy as np\n')]
|
# -*- python -*-
import math
import numpy
import Shadow
from Shadow.ShadowPreprocessorsXraylib import prerefl, pre_mlayer, bragg
from srxraylib.sources import srfunc
from sirepo.template import transfer_mat_bl
from pykern.pkcollections import PKDict
from pykern import pkjson
sigmax = 0.0045000000000000005
sigdix = 2.913e-05
sigmaz = 0.0045000000000000005
sigdiz = 2.913e-05
beam_stats = []
epsilon = 1e-06
beam = transfer_mat_bl.create_mat_rays(epsilon)
sigma_mat = numpy.matrix([
[sigmax ** 2, 0, 0, 0],
[0, sigdix ** 2, 0, 0],
[0, 0, sigmaz ** 2, 0],
[0, 0, 0, sigdiz ** 2],
])
alpha = 0
def calculate_stats(pos, oe):
global alpha
Tmat, x_prop_cen, xp_prop_cen, z_prop_cen, zp_prop_cen = transfer_mat_bl.tmat_calc(beam.duplicate(), epsilon)
res = Tmat * sigma_mat * numpy.transpose(Tmat)
pos += (oe.T_SOURCE if oe else 0)
if oe:
# oe.ALPHA is in radians after traceOE()
alpha = int(alpha + 180 / math.pi * oe.ALPHA) % 360
beam_stats.append(PKDict(
isRotated=True if alpha == 90 or alpha == 270 else False,
s=pos * 1e-2,
x=x_prop_cen,
xp=xp_prop_cen,
z=z_prop_cen,
zp=zp_prop_cen,
matrix=Tmat.tolist(),
sigmax=math.sqrt(res[0, 0]) * 1e-2,
sigdix=math.sqrt(res[1, 1]),
sigmaz=math.sqrt(res[2, 2]) * 1e-2,
sigdiz=math.sqrt(res[3, 3]),
))
return pos
pos = calculate_stats(0, None)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 1)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 2)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 3)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 4)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 5)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 6)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 7)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 8)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 9)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 10)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 11)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 12)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 13)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 14)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 15)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 16)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 17)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 18)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 19)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 20)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 21)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 22)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 23)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 24)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 25)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 26)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 27)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 28)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 29)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 30)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 31)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 32)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 33)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 34)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 35)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 36)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 37)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 38)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 39)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 40)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 41)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 42)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 43)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 44)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 45)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 46)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 47)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 48)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 49)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 50)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 51)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 52)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 53)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 54)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 55)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 56)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 57)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 58)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 59)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 60)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 61)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 62)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 63)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 64)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 65)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 66)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 67)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 68)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 69)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 70)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 71)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 72)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 73)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 74)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 75)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 76)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 77)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 78)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 79)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 80)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 81)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 82)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 83)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 84)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 85)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 86)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 87)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 88)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 89)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 90)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 91)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 92)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 93)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 94)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 95)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 96)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 97)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 98)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 99)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.FMIRR = 2
oe.ALPHA = 0
oe.FHIT_C = 0
oe.F_EXT = 0
oe.F_DEFAULT = 0
oe.SSOUR = 2900.0
oe.SIMAG = 1000.0
oe.THETA = 2.0002
oe.F_CONVEX = 0
oe.FCYL = 1
oe.CIL_ANG = 90.0
oe.T_INCIDENCE = 2.0
oe.T_REFLECTION = 2.0
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 100)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 101)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 102)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 103)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 104)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 105)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 106)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 107)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 108)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 109)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 110)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 111)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 112)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 113)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 114)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 115)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 116)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 117)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 118)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 119)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 120)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 121)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 122)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 123)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 124)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 125)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 126)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 127)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 128)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 129)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 130)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 131)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 132)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 133)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 134)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 135)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 136)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 137)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 138)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 139)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 140)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 141)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 142)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 143)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 144)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 145)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 146)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 147)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 148)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 149)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 150)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 151)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 152)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 153)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 154)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 155)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 156)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 157)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 158)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 159)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 160)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 161)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 162)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 163)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 164)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 165)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 166)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 167)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 168)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 169)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 170)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 171)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 172)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 173)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 174)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 175)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 176)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 177)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 178)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 179)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 180)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 181)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 182)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 183)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 184)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 185)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 186)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 187)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 188)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 189)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 190)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 191)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 192)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 193)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 194)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 195)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 196)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 197)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 198)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 199)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.FMIRR = 2
oe.ALPHA = 0
oe.FHIT_C = 1
oe.F_EXT = 0
oe.F_DEFAULT = 0
oe.SSOUR = 3000.0
oe.SIMAG = 900.0
oe.THETA = 2.0002
oe.F_CONVEX = 0
oe.FCYL = 1
oe.CIL_ANG = 0.0
oe.FSHAPE = 2
oe.RWIDX2 = 15.0
oe.RLEN2 = 25.0
oe.F_MOVE = 1
oe.OFFX = 1.0
oe.T_INCIDENCE = 2.0
oe.T_REFLECTION = 2.0
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 200)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 201)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 202)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 203)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 204)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 205)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 206)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 207)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 208)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 209)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 210)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 211)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 212)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 213)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 214)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 215)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 216)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 217)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 218)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 219)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 220)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 221)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 222)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 223)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 224)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 225)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 226)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 227)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 228)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 229)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 230)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 231)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 232)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 233)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 234)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 235)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 236)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 237)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 238)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 239)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 240)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 241)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 242)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 243)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 244)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 245)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 246)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 247)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 248)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 249)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 250)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 251)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 252)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 253)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 254)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 255)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 256)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 257)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 258)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 259)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 260)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 261)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 262)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 263)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 264)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 265)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 266)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 267)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 268)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 269)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 270)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 271)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 272)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 273)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 274)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 275)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 276)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 277)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 278)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 279)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 280)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 281)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 282)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 283)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 284)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 285)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 286)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 287)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 288)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 289)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 290)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 291)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 292)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 293)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 294)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 295)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 296)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 297)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 298)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.050000000000182
beam.traceOE(oe, 299)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty()
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 9.049999999981537
beam.traceOE(oe, 300)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 301)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 302)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 303)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 304)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 305)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 306)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 307)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 308)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 309)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 310)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 311)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 312)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 313)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 314)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 315)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 316)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 317)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 318)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 319)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 320)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 321)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 322)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 323)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 324)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 325)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 326)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 327)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 328)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 329)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 330)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 331)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 332)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 333)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 334)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 335)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 336)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 337)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 338)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 339)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 340)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 341)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 342)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 343)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 344)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 345)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 346)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 347)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 348)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 349)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 350)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 351)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 352)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 353)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 354)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 355)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 356)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 357)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 358)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 359)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 360)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 361)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 362)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 363)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 364)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 365)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 366)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 367)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 368)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 369)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 370)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 371)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 372)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 373)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 374)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 375)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 376)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 377)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 378)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 379)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 380)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 381)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 382)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 383)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 384)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 385)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 386)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 387)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 388)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 389)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 390)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 391)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 392)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 393)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 394)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 395)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 396)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 397)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 398)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 399)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 0.9499999999998181
beam.traceOE(oe, 400)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty()
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.864464138634503e-11
beam.traceOE(oe, 401)
pos = calculate_stats(pos, oe)
pkjson.dump_pretty(beam_stats, filename='beam_stats.json')
import Shadow.ShadowTools
Shadow.ShadowTools.plotxy(beam, 1, 3, nbins=100, nolost=1)
|
[
"pykern.pkjson.dump_pretty",
"Shadow.OE",
"Shadow.ShadowTools.plotxy",
"sirepo.template.transfer_mat_bl.create_mat_rays",
"math.sqrt",
"numpy.matrix",
"numpy.transpose"
] |
[((418, 458), 'sirepo.template.transfer_mat_bl.create_mat_rays', 'transfer_mat_bl.create_mat_rays', (['epsilon'], {}), '(epsilon)\n', (449, 458), False, 'from sirepo.template import transfer_mat_bl\n'), ((472, 586), 'numpy.matrix', 'numpy.matrix', (['[[sigmax ** 2, 0, 0, 0], [0, sigdix ** 2, 0, 0], [0, 0, sigmaz ** 2, 0], [0,\n 0, 0, sigdiz ** 2]]'], {}), '([[sigmax ** 2, 0, 0, 0], [0, sigdix ** 2, 0, 0], [0, 0, sigmaz **\n 2, 0], [0, 0, 0, sigdiz ** 2]])\n', (484, 586), False, 'import numpy\n'), ((1446, 1457), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (1455, 1457), False, 'import Shadow\n'), ((1602, 1613), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (1611, 1613), False, 'import Shadow\n'), ((1758, 1769), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (1767, 1769), False, 'import Shadow\n'), ((1914, 1925), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (1923, 1925), False, 'import Shadow\n'), ((2070, 2081), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (2079, 2081), False, 'import Shadow\n'), ((2226, 2237), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (2235, 2237), False, 'import Shadow\n'), ((2382, 2393), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (2391, 2393), False, 'import Shadow\n'), ((2538, 2549), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (2547, 2549), False, 'import Shadow\n'), ((2694, 2705), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (2703, 2705), False, 'import Shadow\n'), ((2850, 2861), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (2859, 2861), False, 'import Shadow\n'), ((3007, 3018), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (3016, 3018), False, 'import Shadow\n'), ((3164, 3175), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (3173, 3175), False, 'import Shadow\n'), ((3321, 3332), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (3330, 3332), False, 'import Shadow\n'), ((3478, 3489), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (3487, 3489), False, 'import Shadow\n'), ((3635, 3646), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (3644, 3646), False, 'import Shadow\n'), ((3792, 3803), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (3801, 3803), False, 'import Shadow\n'), ((3949, 3960), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (3958, 3960), False, 'import Shadow\n'), ((4106, 4117), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (4115, 4117), False, 'import Shadow\n'), ((4263, 4274), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (4272, 4274), False, 'import Shadow\n'), ((4420, 4431), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (4429, 4431), False, 'import Shadow\n'), ((4577, 4588), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (4586, 4588), False, 'import Shadow\n'), ((4734, 4745), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (4743, 4745), False, 'import Shadow\n'), ((4891, 4902), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (4900, 4902), False, 'import Shadow\n'), ((5048, 5059), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (5057, 5059), False, 'import Shadow\n'), ((5205, 5216), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (5214, 5216), False, 'import Shadow\n'), ((5362, 5373), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (5371, 5373), False, 'import Shadow\n'), ((5519, 5530), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (5528, 5530), False, 'import Shadow\n'), ((5676, 5687), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (5685, 5687), False, 'import Shadow\n'), ((5833, 5844), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (5842, 5844), False, 'import Shadow\n'), ((5990, 6001), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (5999, 6001), False, 'import Shadow\n'), ((6147, 6158), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (6156, 6158), False, 'import Shadow\n'), ((6304, 6315), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (6313, 6315), False, 'import Shadow\n'), ((6461, 6472), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (6470, 6472), False, 'import Shadow\n'), ((6618, 6629), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (6627, 6629), False, 'import Shadow\n'), ((6775, 6786), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (6784, 6786), False, 'import Shadow\n'), ((6932, 6943), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (6941, 6943), False, 'import Shadow\n'), ((7089, 7100), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (7098, 7100), False, 'import Shadow\n'), ((7246, 7257), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (7255, 7257), False, 'import Shadow\n'), ((7403, 7414), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (7412, 7414), False, 'import Shadow\n'), ((7560, 7571), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (7569, 7571), False, 'import Shadow\n'), ((7717, 7728), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (7726, 7728), False, 'import Shadow\n'), ((7874, 7885), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (7883, 7885), False, 'import Shadow\n'), ((8031, 8042), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (8040, 8042), False, 'import Shadow\n'), ((8188, 8199), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (8197, 8199), False, 'import Shadow\n'), ((8345, 8356), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (8354, 8356), False, 'import Shadow\n'), ((8502, 8513), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (8511, 8513), False, 'import Shadow\n'), ((8659, 8670), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (8668, 8670), False, 'import Shadow\n'), ((8816, 8827), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (8825, 8827), False, 'import Shadow\n'), ((8973, 8984), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (8982, 8984), False, 'import Shadow\n'), ((9130, 9141), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (9139, 9141), False, 'import Shadow\n'), ((9287, 9298), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (9296, 9298), False, 'import Shadow\n'), ((9444, 9455), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (9453, 9455), False, 'import Shadow\n'), ((9601, 9612), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (9610, 9612), False, 'import Shadow\n'), ((9758, 9769), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (9767, 9769), False, 'import Shadow\n'), ((9915, 9926), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (9924, 9926), False, 'import Shadow\n'), ((10072, 10083), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (10081, 10083), False, 'import Shadow\n'), ((10229, 10240), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (10238, 10240), False, 'import Shadow\n'), ((10386, 10397), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (10395, 10397), False, 'import Shadow\n'), ((10543, 10554), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (10552, 10554), False, 'import Shadow\n'), ((10700, 10711), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (10709, 10711), False, 'import Shadow\n'), ((10857, 10868), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (10866, 10868), False, 'import Shadow\n'), ((11014, 11025), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (11023, 11025), False, 'import Shadow\n'), ((11171, 11182), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (11180, 11182), False, 'import Shadow\n'), ((11328, 11339), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (11337, 11339), False, 'import Shadow\n'), ((11485, 11496), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (11494, 11496), False, 'import Shadow\n'), ((11642, 11653), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (11651, 11653), False, 'import Shadow\n'), ((11799, 11810), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (11808, 11810), False, 'import Shadow\n'), ((11956, 11967), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (11965, 11967), False, 'import Shadow\n'), ((12113, 12124), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (12122, 12124), False, 'import Shadow\n'), ((12270, 12281), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (12279, 12281), False, 'import Shadow\n'), ((12427, 12438), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (12436, 12438), False, 'import Shadow\n'), ((12584, 12595), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (12593, 12595), False, 'import Shadow\n'), ((12741, 12752), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (12750, 12752), False, 'import Shadow\n'), ((12898, 12909), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (12907, 12909), False, 'import Shadow\n'), ((13055, 13066), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (13064, 13066), False, 'import Shadow\n'), ((13212, 13223), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (13221, 13223), False, 'import Shadow\n'), ((13369, 13380), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (13378, 13380), False, 'import Shadow\n'), ((13526, 13537), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (13535, 13537), False, 'import Shadow\n'), ((13683, 13694), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (13692, 13694), False, 'import Shadow\n'), ((13840, 13851), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (13849, 13851), False, 'import Shadow\n'), ((13997, 14008), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (14006, 14008), False, 'import Shadow\n'), ((14154, 14165), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (14163, 14165), False, 'import Shadow\n'), ((14311, 14322), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (14320, 14322), False, 'import Shadow\n'), ((14468, 14479), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (14477, 14479), False, 'import Shadow\n'), ((14625, 14636), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (14634, 14636), False, 'import Shadow\n'), ((14782, 14793), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (14791, 14793), False, 'import Shadow\n'), ((14939, 14950), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (14948, 14950), False, 'import Shadow\n'), ((15096, 15107), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (15105, 15107), False, 'import Shadow\n'), ((15253, 15264), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (15262, 15264), False, 'import Shadow\n'), ((15410, 15421), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (15419, 15421), False, 'import Shadow\n'), ((15567, 15578), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (15576, 15578), False, 'import Shadow\n'), ((15724, 15735), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (15733, 15735), False, 'import Shadow\n'), ((15881, 15892), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (15890, 15892), False, 'import Shadow\n'), ((16038, 16049), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (16047, 16049), False, 'import Shadow\n'), ((16195, 16206), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (16204, 16206), False, 'import Shadow\n'), ((16352, 16363), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (16361, 16363), False, 'import Shadow\n'), ((16509, 16520), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (16518, 16520), False, 'import Shadow\n'), ((16666, 16677), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (16675, 16677), False, 'import Shadow\n'), ((16823, 16834), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (16832, 16834), False, 'import Shadow\n'), ((16980, 16991), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (16989, 16991), False, 'import Shadow\n'), ((17329, 17340), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (17338, 17340), False, 'import Shadow\n'), ((17486, 17497), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (17495, 17497), False, 'import Shadow\n'), ((17643, 17654), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (17652, 17654), False, 'import Shadow\n'), ((17800, 17811), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (17809, 17811), False, 'import Shadow\n'), ((17957, 17968), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (17966, 17968), False, 'import Shadow\n'), ((18114, 18125), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (18123, 18125), False, 'import Shadow\n'), ((18271, 18282), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (18280, 18282), False, 'import Shadow\n'), ((18428, 18439), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (18437, 18439), False, 'import Shadow\n'), ((18585, 18596), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (18594, 18596), False, 'import Shadow\n'), ((18742, 18753), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (18751, 18753), False, 'import Shadow\n'), ((18899, 18910), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (18908, 18910), False, 'import Shadow\n'), ((19056, 19067), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (19065, 19067), False, 'import Shadow\n'), ((19213, 19224), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (19222, 19224), False, 'import Shadow\n'), ((19370, 19381), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (19379, 19381), False, 'import Shadow\n'), ((19527, 19538), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (19536, 19538), False, 'import Shadow\n'), ((19684, 19695), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (19693, 19695), False, 'import Shadow\n'), ((19841, 19852), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (19850, 19852), False, 'import Shadow\n'), ((19998, 20009), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (20007, 20009), False, 'import Shadow\n'), ((20155, 20166), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (20164, 20166), False, 'import Shadow\n'), ((20312, 20323), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (20321, 20323), False, 'import Shadow\n'), ((20469, 20480), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (20478, 20480), False, 'import Shadow\n'), ((20626, 20637), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (20635, 20637), False, 'import Shadow\n'), ((20783, 20794), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (20792, 20794), False, 'import Shadow\n'), ((20940, 20951), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (20949, 20951), False, 'import Shadow\n'), ((21097, 21108), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (21106, 21108), False, 'import Shadow\n'), ((21254, 21265), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (21263, 21265), False, 'import Shadow\n'), ((21411, 21422), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (21420, 21422), False, 'import Shadow\n'), ((21568, 21579), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (21577, 21579), False, 'import Shadow\n'), ((21725, 21736), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (21734, 21736), False, 'import Shadow\n'), ((21882, 21893), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (21891, 21893), False, 'import Shadow\n'), ((22039, 22050), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (22048, 22050), False, 'import Shadow\n'), ((22196, 22207), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (22205, 22207), False, 'import Shadow\n'), ((22353, 22364), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (22362, 22364), False, 'import Shadow\n'), ((22510, 22521), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (22519, 22521), False, 'import Shadow\n'), ((22667, 22678), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (22676, 22678), False, 'import Shadow\n'), ((22824, 22835), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (22833, 22835), False, 'import Shadow\n'), ((22981, 22992), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (22990, 22992), False, 'import Shadow\n'), ((23138, 23149), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (23147, 23149), False, 'import Shadow\n'), ((23295, 23306), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (23304, 23306), False, 'import Shadow\n'), ((23452, 23463), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (23461, 23463), False, 'import Shadow\n'), ((23609, 23620), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (23618, 23620), False, 'import Shadow\n'), ((23766, 23777), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (23775, 23777), False, 'import Shadow\n'), ((23923, 23934), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (23932, 23934), False, 'import Shadow\n'), ((24080, 24091), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (24089, 24091), False, 'import Shadow\n'), ((24237, 24248), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (24246, 24248), False, 'import Shadow\n'), ((24394, 24405), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (24403, 24405), False, 'import Shadow\n'), ((24551, 24562), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (24560, 24562), False, 'import Shadow\n'), ((24708, 24719), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (24717, 24719), False, 'import Shadow\n'), ((24865, 24876), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (24874, 24876), False, 'import Shadow\n'), ((25022, 25033), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (25031, 25033), False, 'import Shadow\n'), ((25179, 25190), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (25188, 25190), False, 'import Shadow\n'), ((25336, 25347), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (25345, 25347), False, 'import Shadow\n'), ((25493, 25504), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (25502, 25504), False, 'import Shadow\n'), ((25650, 25661), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (25659, 25661), False, 'import Shadow\n'), ((25807, 25818), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (25816, 25818), False, 'import Shadow\n'), ((25964, 25975), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (25973, 25975), False, 'import Shadow\n'), ((26121, 26132), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (26130, 26132), False, 'import Shadow\n'), ((26278, 26289), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (26287, 26289), False, 'import Shadow\n'), ((26435, 26446), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (26444, 26446), False, 'import Shadow\n'), ((26592, 26603), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (26601, 26603), False, 'import Shadow\n'), ((26749, 26760), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (26758, 26760), False, 'import Shadow\n'), ((26906, 26917), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (26915, 26917), False, 'import Shadow\n'), ((27063, 27074), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (27072, 27074), False, 'import Shadow\n'), ((27220, 27231), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (27229, 27231), False, 'import Shadow\n'), ((27377, 27388), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (27386, 27388), False, 'import Shadow\n'), ((27534, 27545), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (27543, 27545), False, 'import Shadow\n'), ((27691, 27702), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (27700, 27702), False, 'import Shadow\n'), ((27848, 27859), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (27857, 27859), False, 'import Shadow\n'), ((28005, 28016), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (28014, 28016), False, 'import Shadow\n'), ((28162, 28173), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (28171, 28173), False, 'import Shadow\n'), ((28319, 28330), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (28328, 28330), False, 'import Shadow\n'), ((28476, 28487), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (28485, 28487), False, 'import Shadow\n'), ((28633, 28644), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (28642, 28644), False, 'import Shadow\n'), ((28790, 28801), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (28799, 28801), False, 'import Shadow\n'), ((28947, 28958), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (28956, 28958), False, 'import Shadow\n'), ((29104, 29115), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (29113, 29115), False, 'import Shadow\n'), ((29261, 29272), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (29270, 29272), False, 'import Shadow\n'), ((29418, 29429), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (29427, 29429), False, 'import Shadow\n'), ((29575, 29586), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (29584, 29586), False, 'import Shadow\n'), ((29732, 29743), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (29741, 29743), False, 'import Shadow\n'), ((29889, 29900), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (29898, 29900), False, 'import Shadow\n'), ((30046, 30057), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (30055, 30057), False, 'import Shadow\n'), ((30203, 30214), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (30212, 30214), False, 'import Shadow\n'), ((30360, 30371), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (30369, 30371), False, 'import Shadow\n'), ((30517, 30528), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (30526, 30528), False, 'import Shadow\n'), ((30674, 30685), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (30683, 30685), False, 'import Shadow\n'), ((30831, 30842), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (30840, 30842), False, 'import Shadow\n'), ((30988, 30999), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (30997, 30999), False, 'import Shadow\n'), ((31145, 31156), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (31154, 31156), False, 'import Shadow\n'), ((31302, 31313), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (31311, 31313), False, 'import Shadow\n'), ((31459, 31470), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (31468, 31470), False, 'import Shadow\n'), ((31616, 31627), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (31625, 31627), False, 'import Shadow\n'), ((31773, 31784), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (31782, 31784), False, 'import Shadow\n'), ((31930, 31941), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (31939, 31941), False, 'import Shadow\n'), ((32087, 32098), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (32096, 32098), False, 'import Shadow\n'), ((32244, 32255), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (32253, 32255), False, 'import Shadow\n'), ((32401, 32412), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (32410, 32412), False, 'import Shadow\n'), ((32558, 32569), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (32567, 32569), False, 'import Shadow\n'), ((32715, 32726), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (32724, 32726), False, 'import Shadow\n'), ((32872, 32883), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (32881, 32883), False, 'import Shadow\n'), ((33293, 33304), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (33302, 33304), False, 'import Shadow\n'), ((33464, 33475), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (33473, 33475), False, 'import Shadow\n'), ((33635, 33646), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (33644, 33646), False, 'import Shadow\n'), ((33806, 33817), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (33815, 33817), False, 'import Shadow\n'), ((33977, 33988), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (33986, 33988), False, 'import Shadow\n'), ((34148, 34159), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (34157, 34159), False, 'import Shadow\n'), ((34319, 34330), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (34328, 34330), False, 'import Shadow\n'), ((34490, 34501), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (34499, 34501), False, 'import Shadow\n'), ((34661, 34672), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (34670, 34672), False, 'import Shadow\n'), ((34832, 34843), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (34841, 34843), False, 'import Shadow\n'), ((35003, 35014), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (35012, 35014), False, 'import Shadow\n'), ((35174, 35185), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (35183, 35185), False, 'import Shadow\n'), ((35345, 35356), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (35354, 35356), False, 'import Shadow\n'), ((35516, 35527), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (35525, 35527), False, 'import Shadow\n'), ((35687, 35698), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (35696, 35698), False, 'import Shadow\n'), ((35858, 35869), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (35867, 35869), False, 'import Shadow\n'), ((36029, 36040), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (36038, 36040), False, 'import Shadow\n'), ((36200, 36211), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (36209, 36211), False, 'import Shadow\n'), ((36371, 36382), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (36380, 36382), False, 'import Shadow\n'), ((36542, 36553), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (36551, 36553), False, 'import Shadow\n'), ((36713, 36724), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (36722, 36724), False, 'import Shadow\n'), ((36884, 36895), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (36893, 36895), False, 'import Shadow\n'), ((37055, 37066), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (37064, 37066), False, 'import Shadow\n'), ((37226, 37237), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (37235, 37237), False, 'import Shadow\n'), ((37397, 37408), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (37406, 37408), False, 'import Shadow\n'), ((37568, 37579), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (37577, 37579), False, 'import Shadow\n'), ((37739, 37750), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (37748, 37750), False, 'import Shadow\n'), ((37910, 37921), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (37919, 37921), False, 'import Shadow\n'), ((38081, 38092), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (38090, 38092), False, 'import Shadow\n'), ((38252, 38263), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (38261, 38263), False, 'import Shadow\n'), ((38423, 38434), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (38432, 38434), False, 'import Shadow\n'), ((38594, 38605), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (38603, 38605), False, 'import Shadow\n'), ((38765, 38776), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (38774, 38776), False, 'import Shadow\n'), ((38936, 38947), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (38945, 38947), False, 'import Shadow\n'), ((39107, 39118), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (39116, 39118), False, 'import Shadow\n'), ((39278, 39289), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (39287, 39289), False, 'import Shadow\n'), ((39449, 39460), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (39458, 39460), False, 'import Shadow\n'), ((39620, 39631), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (39629, 39631), False, 'import Shadow\n'), ((39791, 39802), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (39800, 39802), False, 'import Shadow\n'), ((39962, 39973), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (39971, 39973), False, 'import Shadow\n'), ((40133, 40144), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (40142, 40144), False, 'import Shadow\n'), ((40304, 40315), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (40313, 40315), False, 'import Shadow\n'), ((40475, 40486), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (40484, 40486), False, 'import Shadow\n'), ((40646, 40657), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (40655, 40657), False, 'import Shadow\n'), ((40817, 40828), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (40826, 40828), False, 'import Shadow\n'), ((40988, 40999), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (40997, 40999), False, 'import Shadow\n'), ((41159, 41170), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (41168, 41170), False, 'import Shadow\n'), ((41330, 41341), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (41339, 41341), False, 'import Shadow\n'), ((41501, 41512), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (41510, 41512), False, 'import Shadow\n'), ((41672, 41683), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (41681, 41683), False, 'import Shadow\n'), ((41843, 41854), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (41852, 41854), False, 'import Shadow\n'), ((42014, 42025), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (42023, 42025), False, 'import Shadow\n'), ((42185, 42196), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (42194, 42196), False, 'import Shadow\n'), ((42356, 42367), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (42365, 42367), False, 'import Shadow\n'), ((42527, 42538), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (42536, 42538), False, 'import Shadow\n'), ((42698, 42709), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (42707, 42709), False, 'import Shadow\n'), ((42869, 42880), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (42878, 42880), False, 'import Shadow\n'), ((43040, 43051), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (43049, 43051), False, 'import Shadow\n'), ((43211, 43222), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (43220, 43222), False, 'import Shadow\n'), ((43382, 43393), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (43391, 43393), False, 'import Shadow\n'), ((43553, 43564), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (43562, 43564), False, 'import Shadow\n'), ((43724, 43735), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (43733, 43735), False, 'import Shadow\n'), ((43895, 43906), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (43904, 43906), False, 'import Shadow\n'), ((44066, 44077), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (44075, 44077), False, 'import Shadow\n'), ((44237, 44248), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (44246, 44248), False, 'import Shadow\n'), ((44408, 44419), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (44417, 44419), False, 'import Shadow\n'), ((44579, 44590), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (44588, 44590), False, 'import Shadow\n'), ((44750, 44761), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (44759, 44761), False, 'import Shadow\n'), ((44921, 44932), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (44930, 44932), False, 'import Shadow\n'), ((45092, 45103), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (45101, 45103), False, 'import Shadow\n'), ((45263, 45274), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (45272, 45274), False, 'import Shadow\n'), ((45434, 45445), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (45443, 45445), False, 'import Shadow\n'), ((45605, 45616), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (45614, 45616), False, 'import Shadow\n'), ((45776, 45787), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (45785, 45787), False, 'import Shadow\n'), ((45947, 45958), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (45956, 45958), False, 'import Shadow\n'), ((46118, 46129), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (46127, 46129), False, 'import Shadow\n'), ((46289, 46300), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (46298, 46300), False, 'import Shadow\n'), ((46460, 46471), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (46469, 46471), False, 'import Shadow\n'), ((46631, 46642), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (46640, 46642), False, 'import Shadow\n'), ((46802, 46813), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (46811, 46813), False, 'import Shadow\n'), ((46973, 46984), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (46982, 46984), False, 'import Shadow\n'), ((47144, 47155), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (47153, 47155), False, 'import Shadow\n'), ((47315, 47326), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (47324, 47326), False, 'import Shadow\n'), ((47486, 47497), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (47495, 47497), False, 'import Shadow\n'), ((47657, 47668), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (47666, 47668), False, 'import Shadow\n'), ((47828, 47839), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (47837, 47839), False, 'import Shadow\n'), ((47999, 48010), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (48008, 48010), False, 'import Shadow\n'), ((48170, 48181), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (48179, 48181), False, 'import Shadow\n'), ((48341, 48352), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (48350, 48352), False, 'import Shadow\n'), ((48512, 48523), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (48521, 48523), False, 'import Shadow\n'), ((48683, 48694), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (48692, 48694), False, 'import Shadow\n'), ((48854, 48865), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (48863, 48865), False, 'import Shadow\n'), ((49025, 49036), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (49034, 49036), False, 'import Shadow\n'), ((49196, 49207), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (49205, 49207), False, 'import Shadow\n'), ((49367, 49378), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (49376, 49378), False, 'import Shadow\n'), ((49538, 49549), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (49547, 49549), False, 'import Shadow\n'), ((49709, 49720), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (49718, 49720), False, 'import Shadow\n'), ((49880, 49891), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (49889, 49891), False, 'import Shadow\n'), ((50051, 50062), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (50060, 50062), False, 'import Shadow\n'), ((50222, 50233), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (50231, 50233), False, 'import Shadow\n'), ((50386, 50397), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (50395, 50397), False, 'import Shadow\n'), ((50558, 50569), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (50567, 50569), False, 'import Shadow\n'), ((50730, 50741), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (50739, 50741), False, 'import Shadow\n'), ((50902, 50913), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (50911, 50913), False, 'import Shadow\n'), ((51074, 51085), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (51083, 51085), False, 'import Shadow\n'), ((51246, 51257), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (51255, 51257), False, 'import Shadow\n'), ((51418, 51429), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (51427, 51429), False, 'import Shadow\n'), ((51590, 51601), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (51599, 51601), False, 'import Shadow\n'), ((51762, 51773), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (51771, 51773), False, 'import Shadow\n'), ((51934, 51945), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (51943, 51945), False, 'import Shadow\n'), ((52106, 52117), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (52115, 52117), False, 'import Shadow\n'), ((52278, 52289), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (52287, 52289), False, 'import Shadow\n'), ((52450, 52461), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (52459, 52461), False, 'import Shadow\n'), ((52622, 52633), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (52631, 52633), False, 'import Shadow\n'), ((52794, 52805), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (52803, 52805), False, 'import Shadow\n'), ((52966, 52977), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (52975, 52977), False, 'import Shadow\n'), ((53138, 53149), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (53147, 53149), False, 'import Shadow\n'), ((53310, 53321), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (53319, 53321), False, 'import Shadow\n'), ((53482, 53493), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (53491, 53493), False, 'import Shadow\n'), ((53654, 53665), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (53663, 53665), False, 'import Shadow\n'), ((53826, 53837), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (53835, 53837), False, 'import Shadow\n'), ((53998, 54009), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (54007, 54009), False, 'import Shadow\n'), ((54170, 54181), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (54179, 54181), False, 'import Shadow\n'), ((54342, 54353), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (54351, 54353), False, 'import Shadow\n'), ((54514, 54525), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (54523, 54525), False, 'import Shadow\n'), ((54686, 54697), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (54695, 54697), False, 'import Shadow\n'), ((54858, 54869), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (54867, 54869), False, 'import Shadow\n'), ((55030, 55041), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (55039, 55041), False, 'import Shadow\n'), ((55202, 55213), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (55211, 55213), False, 'import Shadow\n'), ((55374, 55385), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (55383, 55385), False, 'import Shadow\n'), ((55546, 55557), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (55555, 55557), False, 'import Shadow\n'), ((55718, 55729), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (55727, 55729), False, 'import Shadow\n'), ((55890, 55901), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (55899, 55901), False, 'import Shadow\n'), ((56062, 56073), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (56071, 56073), False, 'import Shadow\n'), ((56234, 56245), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (56243, 56245), False, 'import Shadow\n'), ((56406, 56417), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (56415, 56417), False, 'import Shadow\n'), ((56578, 56589), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (56587, 56589), False, 'import Shadow\n'), ((56750, 56761), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (56759, 56761), False, 'import Shadow\n'), ((56922, 56933), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (56931, 56933), False, 'import Shadow\n'), ((57094, 57105), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (57103, 57105), False, 'import Shadow\n'), ((57266, 57277), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (57275, 57277), False, 'import Shadow\n'), ((57438, 57449), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (57447, 57449), False, 'import Shadow\n'), ((57610, 57621), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (57619, 57621), False, 'import Shadow\n'), ((57782, 57793), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (57791, 57793), False, 'import Shadow\n'), ((57954, 57965), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (57963, 57965), False, 'import Shadow\n'), ((58126, 58137), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (58135, 58137), False, 'import Shadow\n'), ((58298, 58309), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (58307, 58309), False, 'import Shadow\n'), ((58470, 58481), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (58479, 58481), False, 'import Shadow\n'), ((58642, 58653), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (58651, 58653), False, 'import Shadow\n'), ((58814, 58825), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (58823, 58825), False, 'import Shadow\n'), ((58986, 58997), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (58995, 58997), False, 'import Shadow\n'), ((59158, 59169), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (59167, 59169), False, 'import Shadow\n'), ((59330, 59341), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (59339, 59341), False, 'import Shadow\n'), ((59502, 59513), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (59511, 59513), False, 'import Shadow\n'), ((59674, 59685), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (59683, 59685), False, 'import Shadow\n'), ((59846, 59857), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (59855, 59857), False, 'import Shadow\n'), ((60018, 60029), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (60027, 60029), False, 'import Shadow\n'), ((60190, 60201), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (60199, 60201), False, 'import Shadow\n'), ((60362, 60373), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (60371, 60373), False, 'import Shadow\n'), ((60534, 60545), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (60543, 60545), False, 'import Shadow\n'), ((60706, 60717), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (60715, 60717), False, 'import Shadow\n'), ((60878, 60889), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (60887, 60889), False, 'import Shadow\n'), ((61050, 61061), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (61059, 61061), False, 'import Shadow\n'), ((61222, 61233), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (61231, 61233), False, 'import Shadow\n'), ((61394, 61405), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (61403, 61405), False, 'import Shadow\n'), ((61566, 61577), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (61575, 61577), False, 'import Shadow\n'), ((61738, 61749), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (61747, 61749), False, 'import Shadow\n'), ((61910, 61921), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (61919, 61921), False, 'import Shadow\n'), ((62082, 62093), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (62091, 62093), False, 'import Shadow\n'), ((62254, 62265), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (62263, 62265), False, 'import Shadow\n'), ((62426, 62437), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (62435, 62437), False, 'import Shadow\n'), ((62598, 62609), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (62607, 62609), False, 'import Shadow\n'), ((62770, 62781), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (62779, 62781), False, 'import Shadow\n'), ((62942, 62953), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (62951, 62953), False, 'import Shadow\n'), ((63114, 63125), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (63123, 63125), False, 'import Shadow\n'), ((63286, 63297), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (63295, 63297), False, 'import Shadow\n'), ((63458, 63469), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (63467, 63469), False, 'import Shadow\n'), ((63630, 63641), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (63639, 63641), False, 'import Shadow\n'), ((63802, 63813), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (63811, 63813), False, 'import Shadow\n'), ((63974, 63985), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (63983, 63985), False, 'import Shadow\n'), ((64146, 64157), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (64155, 64157), False, 'import Shadow\n'), ((64318, 64329), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (64327, 64329), False, 'import Shadow\n'), ((64490, 64501), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (64499, 64501), False, 'import Shadow\n'), ((64662, 64673), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (64671, 64673), False, 'import Shadow\n'), ((64834, 64845), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (64843, 64845), False, 'import Shadow\n'), ((65006, 65017), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (65015, 65017), False, 'import Shadow\n'), ((65178, 65189), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (65187, 65189), False, 'import Shadow\n'), ((65350, 65361), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (65359, 65361), False, 'import Shadow\n'), ((65522, 65533), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (65531, 65533), False, 'import Shadow\n'), ((65694, 65705), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (65703, 65705), False, 'import Shadow\n'), ((65866, 65877), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (65875, 65877), False, 'import Shadow\n'), ((66038, 66049), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (66047, 66049), False, 'import Shadow\n'), ((66210, 66221), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (66219, 66221), False, 'import Shadow\n'), ((66382, 66393), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (66391, 66393), False, 'import Shadow\n'), ((66554, 66565), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (66563, 66565), False, 'import Shadow\n'), ((66726, 66737), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (66735, 66737), False, 'import Shadow\n'), ((66898, 66909), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (66907, 66909), False, 'import Shadow\n'), ((67070, 67081), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (67079, 67081), False, 'import Shadow\n'), ((67242, 67253), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (67251, 67253), False, 'import Shadow\n'), ((67414, 67425), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (67423, 67425), False, 'import Shadow\n'), ((67586, 67597), 'Shadow.OE', 'Shadow.OE', ([], {}), '()\n', (67595, 67597), False, 'import Shadow\n'), ((67749, 67807), 'pykern.pkjson.dump_pretty', 'pkjson.dump_pretty', (['beam_stats'], {'filename': '"""beam_stats.json"""'}), "(beam_stats, filename='beam_stats.json')\n", (67767, 67807), False, 'from pykern import pkjson\n'), ((67836, 67894), 'Shadow.ShadowTools.plotxy', 'Shadow.ShadowTools.plotxy', (['beam', '(1)', '(3)'], {'nbins': '(100)', 'nolost': '(1)'}), '(beam, 1, 3, nbins=100, nolost=1)\n', (67861, 67894), False, 'import Shadow\n'), ((803, 824), 'numpy.transpose', 'numpy.transpose', (['Tmat'], {}), '(Tmat)\n', (818, 824), False, 'import numpy\n'), ((1282, 1302), 'math.sqrt', 'math.sqrt', (['res[1, 1]'], {}), '(res[1, 1])\n', (1291, 1302), False, 'import math\n'), ((1363, 1383), 'math.sqrt', 'math.sqrt', (['res[3, 3]'], {}), '(res[3, 3])\n', (1372, 1383), False, 'import math\n'), ((1238, 1258), 'math.sqrt', 'math.sqrt', (['res[0, 0]'], {}), '(res[0, 0])\n', (1247, 1258), False, 'import math\n'), ((1319, 1339), 'math.sqrt', 'math.sqrt', (['res[2, 2]'], {}), '(res[2, 2])\n', (1328, 1339), False, 'import math\n')]
|
import json
import logging
import os
import re
from collections import namedtuple
from copy import deepcopy
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import spacy
from scirex_utilities.analyse_pwc_entity_results import *
from scirex_utilities.entity_utils import *
from spacy.tokens import Doc
from tqdm import tqdm
tqdm.pandas()
LabelSpan = namedtuple("Span", ["start", "end", "token_start", "token_end", "entity", "links", "modified"])
logging.basicConfig(level=logging.INFO)
class WhitespaceTokenizer(object):
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split()
# All tokens 'own' a subsequent space character in this tokenizer
spaces = [True] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
nlp = spacy.load("en")
nlp.tokenizer = WhitespaceTokenizer(nlp.vocab)
def process_folder(folder: str) -> Tuple[dict, str]:
span_labels = {}
map_T_to_span = {}
if not os.path.isdir(folder) or "document.txt" not in os.listdir(folder):
print(folder, " have not document")
return None
doc_text = open(os.path.join(folder, "document.txt")).read()
ann_file = open(os.path.join(folder, "document.ann")).read().strip()
annotations = [x.split("\t", 1) for x in ann_file.split("\n")]
annotations = sorted(annotations, key=lambda x: 0 if x[0] == "T" else 1)
for ann_type, ann in annotations:
if ann_type[0] == "T":
ann, ann_text = ann.split("\t")
if ";" in ann:
continue
else:
enttype, span_start, span_end = ann.split()
span_start, span_end = int(span_start), int(span_end)
if (span_start, span_end) in span_labels:
assert "Span already present"
else:
span_labels[(span_start, span_end)] = {"E": enttype, "A": set(), "T": ann_text}
map_T_to_span[ann_type] = (span_start, span_end)
if ann_type[0] == "A":
ann, ann_T = ann.split()
if ann_T in map_T_to_span:
span_labels[map_T_to_span[ann_T]]["A"].add(ann)
else:
print("Attribute before Trigger")
return span_labels, doc_text
def get_all_document_annotations(brat_folder: str) -> Dict[str, Tuple[dict, str]]:
map_id_to_ann = {}
for f in tqdm(os.listdir(brat_folder)):
try:
map_id_to_ann[f] = process_folder(os.path.join(brat_folder, f))
except Exception as e:
print(f)
return map_id_to_ann
def process_back_to_dataframe(span_labels: Dict[Tuple[int, int], dict], doc_text: str):
sentences = doc_text.split("\n ")
assert sentences[-1] == ""
sentences = [x + "\n " for x in sentences[:-1]]
sentence_limits = np.cumsum([len(x) for x in sentences])
sentence_limits = list(zip([0] + list(sentence_limits)[:-1], sentence_limits))
for s, e in sentence_limits:
assert doc_text[e - 2 : e] == "\n "
assert doc_text[s] != " "
span_labels = list(map(lambda x: [list(x[0]), x[1]], sorted(span_labels.items(), key=lambda x: x[0][0])))
sl_ix = 0
map_sentence_limits_to_spans = {}
for ss, se in sentence_limits:
map_sentence_limits_to_spans[(ss, se)] = []
while sl_ix < len(span_labels) and span_labels[sl_ix][0][0] >= ss and span_labels[sl_ix][0][1] <= se:
map_sentence_limits_to_spans[(ss, se)].append(span_labels[sl_ix])
sl_ix += 1
spans_in_l = 0
for k, v in map_sentence_limits_to_spans.items():
for span, _ in v:
assert k[0] <= span[0] and k[1] >= span[1]
spans_in_l += 1
assert span[1] < k[1] - 1
assert spans_in_l == len(span_labels)
for k, v in map_sentence_limits_to_spans.items():
for span, _ in v:
span[0] -= k[0]
span[1] -= k[0]
df = []
for sent_id, ((ss, se), st) in enumerate(zip(sentence_limits, sentences)):
for span, d in map_sentence_limits_to_spans[(ss, se)]:
assert st[-2:] == "\n ", st[-2:]
assert span[1] < len(st) - 2
assert st[span[0] : span[1]] == d["T"] and len(d["T"]) > 0, (st[span[0] : span[1]], d["T"])
df.append({"sentence": st, "spans": map_sentence_limits_to_spans[(ss, se)], "sentence_id": sent_id})
assert df[4]["sentence"].strip() == "", breakpoint()
df = df[5:]
df = pd.DataFrame(df)
return df
def get_dataframe_from_folder(brat_folder):
logging.info("Generating DataFrame ...")
map_changes = get_all_document_annotations(brat_folder)
logging.info("Done generating DataFrame")
doc_df = []
for k in tqdm(map_changes):
if map_changes[k] is None:
continue
df = process_back_to_dataframe(*map_changes[k])
df["doc_id"] = k
doc_df.append(df)
doc_df = pd.concat(doc_df)
return doc_df
def overlap(span_1, span_2):
if span_1[0] >= span_2[1] or span_2[0] >= span_1[1]:
return False
return True
def process_cluster(cluster):
stats = {
"new_spans": len([x for x in cluster if "pre" not in x[1]]),
"old_spans": len([x for x in cluster if "pre" in x[1]]),
"type_change": 0,
"change_attributes": 0,
}
old_spans = [x for x in cluster if "pre" in x[1]]
new_spans = [x for x in cluster if "pre" not in x[1]]
old_spans_modified, old_spans_unmodified = [], []
for span, info in old_spans:
if [info[k] for k in ["E", "T", "A"]] == [info["pre"][k] for k in ["E", "T", "A"]]:
del info["pre"]
if any(overlap(span, n_span) for n_span, _ in new_spans):
continue
old_spans_unmodified.append((span, info))
else:
del info["pre"]
if any(overlap(span, n_span) for n_span, _ in new_spans):
continue
old_spans_modified.append((span, info))
assert all((si == sj or not overlap(si[0], sj[0])) for si in new_spans for sj in new_spans), breakpoint()
assert len(old_spans_unmodified) == 0 or len(old_spans_modified) == 0, breakpoint()
assert all(
(not overlap(ospan, nspan)) for ospan, _ in old_spans_modified for nspan, _ in new_spans
), breakpoint()
assert all(
(not overlap(ospan, nspan)) for ospan, _ in old_spans_unmodified for nspan, _ in new_spans
), breakpoint()
if len(old_spans_modified + old_spans_unmodified) > 0 and len(new_spans) > 0:
breakpoint()
new_spans = [
LabelSpan(
start=x[0][0],
end=x[0][1],
entity=x[1]["E"],
links=x[1]["A"],
token_start=None,
token_end=None,
modified=True,
)._asdict()
for x in new_spans + old_spans_modified
]
new_spans += [
LabelSpan(
start=x[0][0],
end=x[0][1],
entity=x[1]["E"],
links=x[1]["A"],
token_start=None,
token_end=None,
modified=False,
)._asdict()
for x in old_spans_unmodified
]
stats["spans_kept"] = len(new_spans)
return new_spans, stats
# Cases 1 : Pre entity have labels / post don't -> copy labels / delete pre entity
# Cases 2 : Pre entity have labels / post also have labels -> don't copy labels / delete pre entity
# Cases 3 : If post entity have different type than pre entity, remove pre entity
def normalize_spans(row):
span_list_1, span_list_2 = row["spans_old"], row["spans_new"]
map_1_span_to_ix = {tuple(k): v for k, v in span_list_1}
if len(span_list_2) == 0:
return [], None
spans = [tuple(x[0]) for x in span_list_2]
if len(spans) != len(set(spans)):
assert "Duplicate spans", span_list_2
span_list_2 = sorted(span_list_2, key=lambda x: x[0])
stats = []
clusters = []
curr_cluster = []
cstart, cend = -1, -1
for (start, end), span_info in span_list_2:
cspan = ((start, end), span_info)
if (start, end) in map_1_span_to_ix:
span_info["pre"] = map_1_span_to_ix[(start, end)]
if cstart == -1: # (Start First Cluster)
curr_cluster.append(cspan)
cstart, cend = start, end
elif start < cend: # Append to current cluster
curr_cluster.append(cspan)
cend = max(cend, end)
else: # Start new cluster
curr_cluster, cluster_stats = process_cluster(curr_cluster)
stats.append(cluster_stats)
clusters.append(curr_cluster)
curr_cluster = [cspan]
cstart, cend = start, end
curr_cluster, cluster_stats = process_cluster(curr_cluster)
stats.append(cluster_stats)
clusters.append(curr_cluster)
clusters = sorted([z for x in clusters for z in x], key=lambda x: (x["start"], x["end"]))
for i in range(len(clusters) - 1):
if clusters[i]["end"] > clusters[i + 1]["start"]:
breakpoint()
stats_reduced = {}
for s in stats:
for k, v in s.items():
if k not in stats_reduced:
stats_reduced[k] = v
else:
stats_reduced[k] += v
return clusters, stats_reduced
def add_token_index(row):
if len(row["cluster"]) == 0:
return []
sentence = row["sentence_old"]
words = row["words"]
word_indices = row["word_indices"]
sentence_start = row["sentence_start"]
starts, ends = list(zip(*word_indices))
for i, (start, end) in enumerate(zip(starts, ends)):
assert sentence[start:end] == words[i], breakpoint()
new_cluster = []
cluster = row["cluster"]
for i, span in enumerate(cluster):
assert "start" in span, breakpoint()
assert "end" in span, breakpoint()
if not (span["start"] in starts):
if sentence[span["start"]].strip() == "":
span["start"] += 1
else:
span["start"] = min(
starts, key=lambda x: abs(x - span["start"]) if x < span["start"] else float("inf")
)
if not (span["end"] in ends):
if sentence[span["end"] - 1].strip() == "":
span["end"] -= 1
else:
span["end"] = min(
ends, key=lambda x: abs(x - span["end"]) if x > span["end"] else float("inf")
)
span["token_start"] = starts.index(span["start"]) + sentence_start - len(words)
span["token_end"] = ends.index(span["end"]) + 1 + sentence_start - len(words)
for cleaned_span in new_cluster:
if overlap(
(span["token_start"], span["token_end"]),
(cleaned_span["token_start"], cleaned_span["token_end"]),
):
print(row["doc_id"])
print(" ".join(row["words"]))
print("=" * 20)
new_cluster.append(span)
return new_cluster
def generate_token_and_indices(sentence):
words = sorted(
[(m.group(0), (m.start(), m.end())) for m in re.finditer(r"[^\s\+\-/\(\)&\[\],]+", sentence)]
+ [(m.group(0), (m.start(), m.end())) for m in re.finditer(r"[\+\-/\(\)&\[\],]+", sentence)]
+ [(m.group(0), (m.start(), m.end())) for m in re.finditer(r"\s+", sentence)],
key=lambda x: x[1],
)
if len(words) == 0 or sentence.strip() == "":
return [], []
try:
words, indices = list(zip(*[(t, i) for t, i in words if t.strip() != ""]))
except:
breakpoint()
return words, indices
def compare_brat_annotations(ann_old_df, ann_new_df):
df_merged = ann_old_df.merge(ann_new_df, on=["doc_id", "sentence_id"], suffixes=("_old", "_new"))
logging.info("Applying Normalize Spans ...")
output = df_merged.progress_apply(normalize_spans, axis=1)
df_merged["cluster"], df_merged["stats"] = list(zip(*output))
df_merged = df_merged.sort_values(["doc_id", "sentence_id"]).reset_index(drop=True)
logging.info("Applying Add Token Index ...")
df_merged["words"], df_merged["word_indices"] = list(
zip(*df_merged["sentence_old"].progress_apply(generate_token_and_indices))
)
df_merged["num_words"] = df_merged["words"].progress_apply(len)
df_merged["sentence_start"] = df_merged.groupby("doc_id")["num_words"].cumsum()
df_merged["entities"] = df_merged.apply(add_token_index, axis=1)
df_merged = (
df_merged.sort_values(["doc_id", "sentence_id"])
.reset_index(drop=True)
.drop(columns=["spans_old", "spans_new", "sentence_new", "cluster"])
.rename(columns={"sentence_old": "sentence"})
)
return df_merged
def generate_relations_in_pwc_df(pwc_df):
pwc_df_keep = pwc_df[["s2_paper_id"] + true_entities + ["score"]].rename(
columns=map_true_entity_to_available
)
pwc_df_keep = (
pwc_df_keep[(~pwc_df_keep.duplicated()) & (pwc_df_keep.s2_paper_id != "not_found")]
.sort_values(["s2_paper_id"] + used_entities + ["score"])
.reset_index(drop=True)
)
# pwc_df_keep[used_entities] = pwc_df_keep[used_entities].applymap(lambda x: re.sub(r"[^\w-]", "_", x))
pwc_df_keep = (
pwc_df_keep.groupby("s2_paper_id")
.apply(lambda x: list(x[used_entities + ["score"]].itertuples(index=False, name="Relation")))
.reset_index()
.rename(columns={0: "Relations"})
)
return pwc_df_keep
def combine_brat_to_original_data(
pwc_doc_file,
pwc_sentence_file,
pwc_prediction_file,
original_brat_anno_folder,
annotated_brat_anno_folder,
):
logging.info("Loading pwc docs ... ")
pwc_df = load_pwc_full_text(pwc_doc_file)
pwc_grouped = (
pwc_df.groupby("s2_paper_id")[["dataset", "task", "model_name", "metric"]]
.aggregate(lambda x: list(set(tuple(x))))
.reset_index()
)
pwc_df_relations = generate_relations_in_pwc_df(pwc_df)
pwc_df_relations = pwc_df_relations.rename(columns={"s2_paper_id": "doc_id"})[["doc_id", "Relations"]]
pwc_df_relations.index = pwc_df_relations.doc_id
pwc_df_relations = pwc_df_relations.drop(columns=["doc_id"])
pwc_df_relations: Dict[str, Relation] = pwc_df_relations.to_dict()["Relations"]
method_breaks = {
d: {
clean_name(rel.Method): [(i, clean_name(x)) for i, x in chunk_string(rel.Method)]
for rel in relations
}
for d, relations in pwc_df_relations.items()
}
pwc_df_relations = {
d: [{k: clean_name(x) if k != "score" else x for k, x in rel._asdict().items()} for rel in relations]
for d, relations in pwc_df_relations.items()
}
logging.info("Loading PwC Sentence Predictions ... ")
pwc_sentences = load_pwc_sentence_predictions(pwc_sentence_file, pwc_prediction_file)
pwc_sentences = pwc_sentences.merge(pwc_grouped, left_on="doc_id", right_on="s2_paper_id")
pwc_sentences = pwc_sentences.sort_values(
by=["doc_id", "section_id", "para_id", "sentence_id"]
).reset_index(drop=True)
pwc_sentences["words"] = pwc_sentences["words"].progress_apply(
lambda x: generate_token_and_indices(" ".join(x))[0]
)
df_changed = get_dataframe_from_folder(annotated_brat_anno_folder)
df_original = get_dataframe_from_folder(original_brat_anno_folder)
df_merged = compare_brat_annotations(df_original, df_changed)
assert (
pwc_sentences.groupby("doc_id")["words"].agg(lambda words: [x for y in words for x in y])
!= df_merged.groupby("doc_id")["words"].agg(lambda words: [x for y in words for x in y])
).sum() == 0, breakpoint()
def add_nums(rows, columns, name):
rows[name] = list(rows.groupby(columns).grouper.group_info[0])
return rows
pwc_sentences["para_num"] = None
pwc_sentences["sentence_num"] = None
pwc_sentences = pwc_sentences.groupby("doc_id").progress_apply(
lambda x: add_nums(x, ["section_id", "para_id"], "para_num")
)
pwc_sentences = pwc_sentences.groupby("doc_id").progress_apply(
lambda x: add_nums(x, ["section_id", "para_id", "sentence_id"], "sentence_num")
)
words: Dict[str, List[str]] = pwc_sentences.groupby("doc_id")["words"].agg(
lambda words: [x for y in words for x in y]
).to_dict()
pwc_sentences["num_words"] = pwc_sentences["words"].apply(len)
sentences = pwc_sentences.groupby(["doc_id", "sentence_num"])["num_words"].agg(sum)
sections = pwc_sentences.groupby(["doc_id", "section_id"])["num_words"].agg(sum)
sections: Dict[str, Dict[int, int]] = {
level: sections.xs(level).to_dict() for level in sections.index.levels[0]
}
sentences: Dict[str, Dict[int, int]] = {
level: sentences.xs(level).to_dict() for level in sentences.index.levels[0]
}
words_merged = (
df_merged.groupby("doc_id")["words"].agg(lambda words: [x for y in words for x in y]).to_dict()
)
entities = (
df_merged.groupby("doc_id")["entities"].agg(lambda ents: [x for y in ents for x in y]).to_dict()
)
def compute_start_end(cards):
ends = list(np.cumsum(cards))
starts = [0] + ends
return list(zip([int(x) for x in starts], [int(x) for x in ends]))
combined_information = {}
for d in words:
assert words[d] == words_merged[d], breakpoint()
assert list(sentences[d].keys()) == list(range(max(sentences[d].keys()) + 1)), breakpoint()
assert list(sections[d].keys()) == list(range(max(sections[d].keys()) + 1)), breakpoint()
sent = compute_start_end([sentences[d][i] for i in range(len(sentences[d]))])
sec = compute_start_end([sections[d][i] for i in range(len(sections[d]))])
for e in entities[d]:
del e["start"]
del e["end"]
combined_information[d] = {
"words": words[d],
"sentences": sent,
"sections": sec,
"relations": pwc_df_relations[d],
"entities": entities[d],
"doc_id": d,
"method_subrelations": method_breaks[d],
}
return combined_information
def _annotation_to_dict(dc):
# convenience method
if isinstance(dc, dict):
ret = dict()
for k, v in dc.items():
k = _annotation_to_dict(k)
v = _annotation_to_dict(v)
ret[k] = v
return ret
elif isinstance(dc, str):
return dc
elif isinstance(dc, (set, frozenset, list, tuple)):
ret = []
for x in dc:
ret.append(_annotation_to_dict(x))
return tuple(ret)
else:
return dc
def annotations_to_jsonl(annotations, output_file, key="doc_id"):
with open(output_file, "w") as of:
for ann in sorted(annotations, key=lambda x: x[key]):
as_json = _annotation_to_dict(ann)
as_str = json.dumps(as_json, sort_keys=True)
of.write(as_str)
of.write("\n")
def propagate_annotations(data_dict: Dict[str, Any]):
words = data_dict["words"]
entities = data_dict["entities"]
entities = {(e["token_start"], e["token_end"]): e for e in entities}
assert not any(e != f and overlap(e, f) for e in entities for f in entities), breakpoint()
new_entities = {}
for (s, e) in entities:
if entities[(s, e)]["modified"] == True:
span_text = words[s:e]
possible_matches = [
(i, i + len(span_text))
for i in range(len(words))
if words[i : i + len(span_text)] == span_text
]
for match in possible_matches:
add_match = False
if match in entities:
if entities[match].get("proped", False):
continue
if entities[match]["modified"] == False: # Propagate the changes
for k in ["entity", "links", "modified"]:
entities[match][k] = deepcopy(entities[(s, e)][k])
elif entities[match]["entity"] != entities[(s, e)]["entity"]:
if match > (s, e):
for k in ["entity", "links", "modified"]:
entities[match][k] = deepcopy(entities[(s, e)][k])
elif set(entities[match]["links"]) != set(
entities[(s, e)]["links"]
): # Two entities with same text have different annotations. BAD !!!
merged_links = set(entities[match]["links"]) | set(entities[(s, e)]["links"])
entities[match]["links"] = deepcopy(list(merged_links))
entities[(s, e)]["links"] = deepcopy(list(merged_links))
entities[match]["proped"] = True
add_match = False
else:
for span in entities:
if overlap(span, match):
if entities[span]["modified"] == True:
add_match = False
if entities[span]["entity"] != entities[(s, e)]["entity"]:
break
elif set(entities[span]["links"]) != set(entities[(s, e)]["links"]):
diff_links = set(entities[(s, e)]["links"]) ^ set(entities[span]["links"])
canon_name = set(["Canonical_Name"])
if (
diff_links != canon_name
and set(entities[(s, e)]["links"]) != canon_name
and set(entities[span]["links"]) != canon_name
):
break
else:
merged_links = set(entities[(s, e)]["links"]) | set(
entities[span]["links"]
)
entities[(s, e)]["links"] = deepcopy(list(merged_links))
entities[span]["links"] = deepcopy(list(merged_links))
break
break
else:
add_match = True
if match in new_entities:
if new_entities[match]["entity"] != entities[(s, e)]["entity"]:
breakpoint()
elif set(new_entities[match]["links"]) != set(
entities[(s, e)]["links"]
): # Two entities with same text have different annotations. BAD !!!
diff_links = set(new_entities[match]["links"]) & set(entities[(s, e)]["links"])
if (
len(diff_links) == 0
and len(set(new_entities[match]["links"])) > 0
and len(set(entities[(s, e)]["links"])) > 0
):
breakpoint()
else:
merged_links = set(new_entities[match]["links"] + entities[(s, e)]["links"])
entities[(s, e)]["links"] = deepcopy(list(merged_links))
new_entities[match]["links"] = deepcopy(list(merged_links))
else:
add_match = False
if add_match:
new_entities[match] = {
k: deepcopy(entities[(s, e)][k]) for k in ["entity", "links", "modified"]
}
new_entities[match]["token_start"] = match[0]
new_entities[match]["token_end"] = match[1]
for match in list(new_entities.keys()):
for span in list(entities.keys()):
if overlap(match, span):
assert entities[span]["modified"] == False or entities[span]["proped"], breakpoint()
if entities[span].get("proped", False):
if match in new_entities:
del new_entities[match]
elif not entities[span]["modified"]:
del entities[span]
new_entities = sorted(list(new_entities.items()), key=lambda x: x[0][1])
disjoint_new_entities = []
for e in new_entities:
if len(disjoint_new_entities) == 0:
disjoint_new_entities.append(e)
else:
if e[0][0] >= disjoint_new_entities[-1][0][1]:
disjoint_new_entities.append(e)
assert not any(
e[0] != f[0] and overlap(e[0], f[0]) for e in disjoint_new_entities for f in disjoint_new_entities
)
disjoint_new_entities = dict(disjoint_new_entities)
assert not any(overlap(e, f) for e in disjoint_new_entities for f in entities), breakpoint()
entities.update(disjoint_new_entities)
assert not any(e != f and overlap(e, f) for e in entities for f in entities), breakpoint()
assert all(v["token_start"] == s and v["token_end"] == e for (s, e), v in entities.items()), breakpoint()
data_dict["entities"] = [x for x in entities.values()]
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--annotator")
if __name__ == "__main__":
args = parser.parse_args()
annotations_dict = combine_brat_to_original_data(
"data/pwc_s2_cleaned_text_v2.jsonl",
"data/pwc_s2_cleaned_text_v2_sentences.jsonl",
"outputs/pwc_s2_cleaned_text_v2_sentences_predictions.jsonl.clean",
"/home/sarthakj/brat/brat/data/result_extraction/outputs/second_phase_annotations_"
+ args.annotator
+ "/",
"/home/sarthakj/brat/brat/data/result_extraction/outputs/second_phase_annotations_original/",
)
annotations_to_jsonl(list(annotations_dict.values()), "model_data/all_data_" + args.annotator + ".jsonl")
data = [json.loads(line) for line in open("model_data/all_data_" + args.annotator + ".jsonl")]
for d in tqdm(data):
names = [v for rel in d["relations"] for k, v in rel.items() if k != "score"]
names += [n for m, subm in d["method_subrelations"].items() for idx, n in subm]
names = set(names)
propagate_annotations(d)
coreference = {n: [] for n in names}
ner = []
for e in d["entities"]:
e["links"] = set(e["links"])
e["canon"] = "Canonical_Name" in e["links"]
if e["canon"]:
e["links"].remove("Canonical_Name")
if "proped" in e:
del e["proped"]
del e["modified"]
e["links"] = e["links"] & names
for l in e["links"]:
coreference[l].append([e["token_start"], e["token_end"]])
ner.append((e["token_start"], e["token_end"], e["entity"]))
del d["entities"]
d["n_ary_relations"] = d["relations"]
del d["relations"]
d["coref"] = coreference
d["ner"] = ner
assert d["sentences"][-1][-1] == len(d["words"]), breakpoint()
assert d["sections"][-1][-1] == len(d["words"]), breakpoint()
annotations_to_jsonl(data, "model_data/all_data_" + args.annotator + "_propagated.jsonl")
|
[
"logging.basicConfig",
"json.loads",
"collections.namedtuple",
"os.listdir",
"argparse.ArgumentParser",
"spacy.load",
"tqdm.tqdm",
"json.dumps",
"spacy.tokens.Doc",
"os.path.join",
"os.path.isdir",
"pandas.concat",
"re.finditer",
"copy.deepcopy",
"pandas.DataFrame",
"numpy.cumsum",
"tqdm.tqdm.pandas",
"logging.info"
] |
[((357, 370), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (368, 370), False, 'from tqdm import tqdm\n'), ((383, 482), 'collections.namedtuple', 'namedtuple', (['"""Span"""', "['start', 'end', 'token_start', 'token_end', 'entity', 'links', 'modified']"], {}), "('Span', ['start', 'end', 'token_start', 'token_end', 'entity',\n 'links', 'modified'])\n", (393, 482), False, 'from collections import namedtuple\n'), ((479, 518), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (498, 518), False, 'import logging\n'), ((852, 868), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (862, 868), False, 'import spacy\n'), ((25462, 25487), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (25485, 25487), False, 'import argparse\n'), ((4482, 4498), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (4494, 4498), True, 'import pandas as pd\n'), ((4564, 4604), 'logging.info', 'logging.info', (['"""Generating DataFrame ..."""'], {}), "('Generating DataFrame ...')\n", (4576, 4604), False, 'import logging\n'), ((4670, 4711), 'logging.info', 'logging.info', (['"""Done generating DataFrame"""'], {}), "('Done generating DataFrame')\n", (4682, 4711), False, 'import logging\n'), ((4741, 4758), 'tqdm.tqdm', 'tqdm', (['map_changes'], {}), '(map_changes)\n', (4745, 4758), False, 'from tqdm import tqdm\n'), ((4937, 4954), 'pandas.concat', 'pd.concat', (['doc_df'], {}), '(doc_df)\n', (4946, 4954), True, 'import pandas as pd\n'), ((11806, 11850), 'logging.info', 'logging.info', (['"""Applying Normalize Spans ..."""'], {}), "('Applying Normalize Spans ...')\n", (11818, 11850), False, 'import logging\n'), ((12074, 12118), 'logging.info', 'logging.info', (['"""Applying Add Token Index ..."""'], {}), "('Applying Add Token Index ...')\n", (12086, 12118), False, 'import logging\n'), ((13686, 13723), 'logging.info', 'logging.info', (['"""Loading pwc docs ... """'], {}), "('Loading pwc docs ... ')\n", (13698, 13723), False, 'import logging\n'), ((14754, 14807), 'logging.info', 'logging.info', (['"""Loading PwC Sentence Predictions ... """'], {}), "('Loading PwC Sentence Predictions ... ')\n", (14766, 14807), False, 'import logging\n'), ((26277, 26287), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (26281, 26287), False, 'from tqdm import tqdm\n'), ((800, 843), 'spacy.tokens.Doc', 'Doc', (['self.vocab'], {'words': 'words', 'spaces': 'spaces'}), '(self.vocab, words=words, spaces=spaces)\n', (803, 843), False, 'from spacy.tokens import Doc\n'), ((2426, 2449), 'os.listdir', 'os.listdir', (['brat_folder'], {}), '(brat_folder)\n', (2436, 2449), False, 'import os\n'), ((26177, 26193), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (26187, 26193), False, 'import json\n'), ((1026, 1047), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (1039, 1047), False, 'import os\n'), ((1073, 1091), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1083, 1091), False, 'import os\n'), ((17206, 17222), 'numpy.cumsum', 'np.cumsum', (['cards'], {}), '(cards)\n', (17215, 17222), True, 'import numpy as np\n'), ((18956, 18991), 'json.dumps', 'json.dumps', (['as_json'], {'sort_keys': '(True)'}), '(as_json, sort_keys=True)\n', (18966, 18991), False, 'import json\n'), ((1177, 1213), 'os.path.join', 'os.path.join', (['folder', '"""document.txt"""'], {}), "(folder, 'document.txt')\n", (1189, 1213), False, 'import os\n'), ((2511, 2539), 'os.path.join', 'os.path.join', (['brat_folder', 'f'], {}), '(brat_folder, f)\n', (2523, 2539), False, 'import os\n'), ((11352, 11381), 're.finditer', 're.finditer', (['"""\\\\s+"""', 'sentence'], {}), "('\\\\s+', sentence)\n", (11363, 11381), False, 'import re\n'), ((1242, 1278), 'os.path.join', 'os.path.join', (['folder', '"""document.ann"""'], {}), "(folder, 'document.ann')\n", (1254, 1278), False, 'import os\n'), ((11147, 11200), 're.finditer', 're.finditer', (['"""[^\\\\s\\\\+\\\\-/\\\\(\\\\)&\\\\[\\\\],]+"""', 'sentence'], {}), "('[^\\\\s\\\\+\\\\-/\\\\(\\\\)&\\\\[\\\\],]+', sentence)\n", (11158, 11200), False, 'import re\n'), ((11251, 11300), 're.finditer', 're.finditer', (['"""[\\\\+\\\\-/\\\\(\\\\)&\\\\[\\\\],]+"""', 'sentence'], {}), "('[\\\\+\\\\-/\\\\(\\\\)&\\\\[\\\\],]+', sentence)\n", (11262, 11300), False, 'import re\n'), ((23801, 23828), 'copy.deepcopy', 'deepcopy', (['entities[s, e][k]'], {}), '(entities[s, e][k])\n', (23809, 23828), False, 'from copy import deepcopy\n'), ((20079, 20106), 'copy.deepcopy', 'deepcopy', (['entities[s, e][k]'], {}), '(entities[s, e][k])\n', (20087, 20106), False, 'from copy import deepcopy\n'), ((20357, 20384), 'copy.deepcopy', 'deepcopy', (['entities[s, e][k]'], {}), '(entities[s, e][k])\n', (20365, 20384), False, 'from copy import deepcopy\n')]
|
# Copyright (C) 2018 Innoviz Technologies
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD 3-Clause license. See the LICENSE file for details.
import pandas as pd
import os
import numpy as np
from utilities.math_utils import RotationTranslationData
from visualizations.vis import pcshow
from utilities import data_utils
if __name__ == '__main__':
base_dir = os.path.dirname(os.getcwd())
video_dir = os.path.join(base_dir, 'data_examples', 'test_video')
agg_point_cloud_list = []
max_frames_to_keep = 10
min_idx = 0
decimate = 1
max_dist = 100
for idx in data_utils.enumerate_frames(video_dir):
if idx < min_idx or idx % decimate != 0:
continue
pc_file = data_utils.frame_to_filename(video_dir, idx, 'pointcloud')
pc, ego, label = data_utils.read_all_data(video_dir, idx)
ego_rt = RotationTranslationData(vecs=(ego[:3], ego[3:]))
ego_pc = ego_rt.apply_transform(pc[:, :3])
ego_pc = np.concatenate((ego_pc, pc[:, 3:4]), -1)
labeled_pc = np.concatenate((ego_pc, label), -1)
agg_point_cloud_list.append(labeled_pc)
if len(agg_point_cloud_list) > max_frames_to_keep:
agg_point_cloud_list = agg_point_cloud_list[1:]
agg_point_cloud = np.concatenate(agg_point_cloud_list, 0)
pc2disp = ego_rt.inverse().apply_transform(agg_point_cloud[:, :3])
pc2disp = np.concatenate((pc2disp, agg_point_cloud[:, 3:]), -1)
pc2disp = pc2disp[np.linalg.norm(pc2disp[:, :3], axis=1) < max_dist]
pcshow(pc2disp, on_screen_text=pc_file, max_points=32000 * max_frames_to_keep)
|
[
"utilities.data_utils.enumerate_frames",
"os.path.join",
"os.getcwd",
"utilities.math_utils.RotationTranslationData",
"numpy.concatenate",
"numpy.linalg.norm",
"utilities.data_utils.read_all_data",
"utilities.data_utils.frame_to_filename",
"visualizations.vis.pcshow"
] |
[((463, 516), 'os.path.join', 'os.path.join', (['base_dir', '"""data_examples"""', '"""test_video"""'], {}), "(base_dir, 'data_examples', 'test_video')\n", (475, 516), False, 'import os\n'), ((642, 680), 'utilities.data_utils.enumerate_frames', 'data_utils.enumerate_frames', (['video_dir'], {}), '(video_dir)\n', (669, 680), False, 'from utilities import data_utils\n'), ((434, 445), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (443, 445), False, 'import os\n'), ((770, 828), 'utilities.data_utils.frame_to_filename', 'data_utils.frame_to_filename', (['video_dir', 'idx', '"""pointcloud"""'], {}), "(video_dir, idx, 'pointcloud')\n", (798, 828), False, 'from utilities import data_utils\n'), ((854, 894), 'utilities.data_utils.read_all_data', 'data_utils.read_all_data', (['video_dir', 'idx'], {}), '(video_dir, idx)\n', (878, 894), False, 'from utilities import data_utils\n'), ((912, 960), 'utilities.math_utils.RotationTranslationData', 'RotationTranslationData', ([], {'vecs': '(ego[:3], ego[3:])'}), '(vecs=(ego[:3], ego[3:]))\n', (935, 960), False, 'from utilities.math_utils import RotationTranslationData\n'), ((1029, 1069), 'numpy.concatenate', 'np.concatenate', (['(ego_pc, pc[:, 3:4])', '(-1)'], {}), '((ego_pc, pc[:, 3:4]), -1)\n', (1043, 1069), True, 'import numpy as np\n'), ((1092, 1127), 'numpy.concatenate', 'np.concatenate', (['(ego_pc, label)', '(-1)'], {}), '((ego_pc, label), -1)\n', (1106, 1127), True, 'import numpy as np\n'), ((1321, 1360), 'numpy.concatenate', 'np.concatenate', (['agg_point_cloud_list', '(0)'], {}), '(agg_point_cloud_list, 0)\n', (1335, 1360), True, 'import numpy as np\n'), ((1454, 1507), 'numpy.concatenate', 'np.concatenate', (['(pc2disp, agg_point_cloud[:, 3:])', '(-1)'], {}), '((pc2disp, agg_point_cloud[:, 3:]), -1)\n', (1468, 1507), True, 'import numpy as np\n'), ((1593, 1671), 'visualizations.vis.pcshow', 'pcshow', (['pc2disp'], {'on_screen_text': 'pc_file', 'max_points': '(32000 * max_frames_to_keep)'}), '(pc2disp, on_screen_text=pc_file, max_points=32000 * max_frames_to_keep)\n', (1599, 1671), False, 'from visualizations.vis import pcshow\n'), ((1534, 1572), 'numpy.linalg.norm', 'np.linalg.norm', (['pc2disp[:, :3]'], {'axis': '(1)'}), '(pc2disp[:, :3], axis=1)\n', (1548, 1572), True, 'import numpy as np\n')]
|
from typing import Sequence, Optional
import pandas as pd
import numpy as np
def formatted_corr_df(df: pd.DataFrame, cols: Optional[Sequence[str]] = None) -> pd.DataFrame:
"""
Calculates correlations on a DataFrame and displays only the lower triangular of the
resulting correlation DataFrame.
:param df:
:param cols: subset of column names on which to calculate correlations
:return:
"""
if not cols:
use_cols = list(df.columns)
else:
use_cols = list(cols)
corr_df = df[use_cols].corr()
corr_df = _lower_triangular_of_df(corr_df)
return corr_df.applymap(lambda x: f'{x:.2f}' if not isinstance(x, str) else x)
def _lower_triangular_of_df(df):
return pd.DataFrame(np.tril(df), index=df.index, columns=df.columns).replace(0, '')
|
[
"numpy.tril"
] |
[((738, 749), 'numpy.tril', 'np.tril', (['df'], {}), '(df)\n', (745, 749), True, 'import numpy as np\n')]
|
import torch
import get_data
import numpy as np
import torchaudio
def number_of_correct(pred, target):
return pred.squeeze().eq(target).sum().item()
def get_likely_index(tensor):
return tensor.argmax(dim=-1)
def compute_accuracy(model, data_loader, device):
model.eval()
correct = 0
for data, target in data_loader:
data = data.to(device)
target = target.to(device)
pred = model(data)
pred = get_likely_index(pred)
correct += number_of_correct(pred, target)
score = correct / len(data_loader.dataset)
return score
def apply_to_wav(model, waveform: torch.Tensor, sample_rate: float, device: str):
model.eval()
mel_spec = get_data.prepare_wav(waveform, sample_rate)
mel_spec = torch.unsqueeze(mel_spec, dim=0).to(device)
res = model(mel_spec)
probs = torch.nn.Softmax(dim=-1)(res).cpu().detach().numpy()
predictions = []
for idx in np.argsort(-probs):
label = get_data.idx_to_label(idx)
predictions.append((label, probs[idx]))
return predictions
def apply_to_file(model, wav_file: str, device: str):
waveform, sample_rate = torchaudio.load(wav_file)
return apply_to_wav(model, waveform, sample_rate, device)
|
[
"torch.nn.Softmax",
"get_data.prepare_wav",
"torchaudio.load",
"torch.unsqueeze",
"get_data.idx_to_label",
"numpy.argsort"
] |
[((707, 750), 'get_data.prepare_wav', 'get_data.prepare_wav', (['waveform', 'sample_rate'], {}), '(waveform, sample_rate)\n', (727, 750), False, 'import get_data\n'), ((938, 956), 'numpy.argsort', 'np.argsort', (['(-probs)'], {}), '(-probs)\n', (948, 956), True, 'import numpy as np\n'), ((1156, 1181), 'torchaudio.load', 'torchaudio.load', (['wav_file'], {}), '(wav_file)\n', (1171, 1181), False, 'import torchaudio\n'), ((974, 1000), 'get_data.idx_to_label', 'get_data.idx_to_label', (['idx'], {}), '(idx)\n', (995, 1000), False, 'import get_data\n'), ((766, 798), 'torch.unsqueeze', 'torch.unsqueeze', (['mel_spec'], {'dim': '(0)'}), '(mel_spec, dim=0)\n', (781, 798), False, 'import torch\n'), ((849, 873), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (865, 873), False, 'import torch\n')]
|
import cv2
import numpy as np
from plantcv.plantcv.transform import nonuniform_illumination
def test_nonuniform_illumination_rgb(transform_test_data):
"""Test for PlantCV."""
# Load rgb image
rgb_img = cv2.imread(transform_test_data.small_rgb_img)
corrected = nonuniform_illumination(img=rgb_img, ksize=11)
assert np.mean(corrected) < np.mean(rgb_img)
def test_nonuniform_illumination_gray(transform_test_data):
"""Test for PlantCV."""
# Load rgb image
gray_img = cv2.imread(transform_test_data.small_gray_img, -1)
corrected = nonuniform_illumination(img=gray_img, ksize=11)
assert corrected.shape == gray_img.shape
|
[
"numpy.mean",
"plantcv.plantcv.transform.nonuniform_illumination",
"cv2.imread"
] |
[((216, 261), 'cv2.imread', 'cv2.imread', (['transform_test_data.small_rgb_img'], {}), '(transform_test_data.small_rgb_img)\n', (226, 261), False, 'import cv2\n'), ((278, 324), 'plantcv.plantcv.transform.nonuniform_illumination', 'nonuniform_illumination', ([], {'img': 'rgb_img', 'ksize': '(11)'}), '(img=rgb_img, ksize=11)\n', (301, 324), False, 'from plantcv.plantcv.transform import nonuniform_illumination\n'), ((500, 550), 'cv2.imread', 'cv2.imread', (['transform_test_data.small_gray_img', '(-1)'], {}), '(transform_test_data.small_gray_img, -1)\n', (510, 550), False, 'import cv2\n'), ((567, 614), 'plantcv.plantcv.transform.nonuniform_illumination', 'nonuniform_illumination', ([], {'img': 'gray_img', 'ksize': '(11)'}), '(img=gray_img, ksize=11)\n', (590, 614), False, 'from plantcv.plantcv.transform import nonuniform_illumination\n'), ((336, 354), 'numpy.mean', 'np.mean', (['corrected'], {}), '(corrected)\n', (343, 354), True, 'import numpy as np\n'), ((357, 373), 'numpy.mean', 'np.mean', (['rgb_img'], {}), '(rgb_img)\n', (364, 373), True, 'import numpy as np\n')]
|
def main():
import numpy as np
import matplotlib.pyplot as plt
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import pickle
from random import randint, randrange
import sys
from tqdm import tqdm
import cv2
print("CUDA available: {}".format(torch.cuda.is_available()))
location = "ncc"
# Import model architectures
from models.DSCLRCN_OldContext import DSCLRCN
from models.CoSADUV import CoSADUV
from models.CoSADUV_NoTemporal import CoSADUV_NoTemporal
# Prepare settings and get the datasets
from util.data_utils import get_SALICON_datasets, get_video_datasets
### Data options ###
dataset_root_dir = "Dataset/UAV123" # Dataset/[SALICON, UAV123, UAV123_LIKE_MISC]
mean_image_name = (
"mean_image.npy"
) # Must be located at dataset_root_dir/mean_image_name
img_size = (
480,
640,
) # height, width - original: 480, 640, reimplementation: 96, 128
duration = (
300
) # Length of sequences loaded from each video, if a video dataset is used
from util import loss_functions
from util.solver import Solver
### Testing options ###
# Minibatchsize: Determines how many images are processed at a time on the GPU
minibatchsize = 2 # Recommended: 4 for 480x640 for >12GB mem, 2 for <12GB mem.
########## PREPARE DATASETS ##########
### Prepare datasets and loaders ###
if "SALICON" in dataset_root_dir:
train_data, val_data, test_data, mean_image = get_SALICON_datasets(
dataset_root_dir, mean_image_name, img_size
)
train_loader = [
torch.utils.data.DataLoader(
train_data,
batch_size=minibatchsize,
shuffle=True,
num_workers=8,
pin_memory=True,
)
]
val_loader = [
torch.utils.data.DataLoader(
val_data,
batch_size=minibatchsize,
shuffle=True,
num_workers=8,
pin_memory=True,
)
]
# Load test loader using val_data as SALICON does not give GT for its test set
test_loader = [
torch.utils.data.DataLoader(
val_data,
batch_size=minibatchsize,
shuffle=True,
num_workers=8,
pin_memory=True,
)
]
elif "UAV123" in dataset_root_dir:
train_loader, val_loader, test_loader, mean_image = get_video_datasets(
dataset_root_dir,
mean_image_name,
duration=duration,
img_size=img_size,
shuffle=False,
loader_settings={
"batch_size": minibatchsize,
"num_workers": 8,
"pin_memory": False,
},
)
########## LOADING MODELS ##########
# Loading a model from the saved state that produced
# the lowest validation loss during training:
# Requires the model classes be loaded
# Assumes the model uses models.CoSADUV_NoTemporal architecture.
# If not, this method will fail
def load_model_from_checkpoint(model_name):
filename = "trained_models/" + model_name + ".pth"
if torch.cuda.is_available():
checkpoint = torch.load(filename)
else:
# Load GPU model on CPU
checkpoint = torch.load(filename, map_location="cpu")
start_epoch = checkpoint["epoch"]
best_accuracy = checkpoint["best_accuracy"]
if "DSCLRCN" in model_name:
model = DSCLRCN(input_dim=img_size, local_feats_net="Seg")
elif "CoSADUV_NoTemporal" in model_name:
model = CoSADUV_NoTemporal(input_dim=img_size, local_feats_net="Seg")
elif "CoSADUV" in model_name:
model = CoSADUV(input_dim=img_size, local_feats_net="Seg")
else:
tqdm.write("Error: no model name found in filename: {}".format(model_name))
return
# Ignore extra parameters ('.num_batches_tracked'
# that are added on NCC due to different pytorch version)
model.load_state_dict(checkpoint["state_dict"], strict=False)
tqdm.write(
"=> loaded model checkpoint '{}' (trained for {} epochs)\n with architecture {}".format(
model_name, checkpoint["epoch"], type(model).__name__
)
)
if torch.cuda.is_available():
model = model.cuda()
tqdm.write(" loaded to cuda")
model.eval()
return model
def load_model(model_name):
model = torch.load("trained_models/" + model_name, map_location="cpu")
print("=> loaded model '{}'".format(model_name))
if torch.cuda.is_available():
model = model.cuda()
print(" loaded to cuda")
model.eval()
return model
########## LOAD THE MODELS ##########
models = []
model_names = []
# Loading some pretrained models to test them on the images:
# DSCLRCN models
## Trained on SALICON
### NSS_loss
# model_names.append("DSCLRCN/SALICON NSS -1.62NSS val best and last/best_model_DSCLRCN_NSS_loss_batch20_epoch5")
## Trained on UAV123
### NSS_alt loss func
# model_names.append("DSCLRCN/UAV123 NSS_alt 1.38last 3.15best testing/best_model_DSCLRCN_NSS_alt_batch20_epoch5")
# CoSADUV_NoTemporal models
## Trained on UAV123
### DoM loss func
# model_names.append(
# "CoSADUV_NoTemporal/DoM SGD 0.01lr - 3.16 NSS_alt/best_model_CoSADUV_NoTemporal_DoM_batch20_epoch6"
# )
### NSS_alt loss func
# model_names.append("CoSADUV_NoTemporal/NSS_alt Adam lr 1e-4 - 1.36/best_model_CoSADUV_NoTemporal_NSS_alt_batch20_epoch5")
### CE_MAE loss func
# model_names.append("CoSADUV_NoTemporal/best_model_CoSADUV_NoTemporal_CE_MAE_loss_batch20_epoch10")
# CoSADUV models (CoSADUV2)
## Trained on UAV123
### NSS_alt loss func
#### 1 Frame backpropagation
#### Kernel size 1
# model_names.append("CoSADUV/NSS_alt Adam 0.001lr 1frame backprop size1 kernel -2train -0.7val 1epoch/best_model_CoSADUV_NSS_alt_batch20_epoch5")
#### Kernel size 3
model_names.append(
"CoSADUV/NSS_alt Adam 0.01lr 1frame backprop size3 kernel/best_model_CoSADUV_NSS_alt_batch20_epoch5"
)
#### 2 Frame backpropagation
#### Kernel size 3
#model_names.append(
# "CoSADUV/NSS_alt Adam 0.01lr 2frame backprop size3 kernel - 6.56 NSS_alt val/best_model_CoSADUV_NSS_alt_batch20_epoch5"
#)
### DoM loss func
# Only very poor results achieved
### CE_MAE loss func
# Only very poor results achieved
max_name_len = max([len(name) for name in model_names])
# Load the models specified above
iterable = model_names
# for i, name in enumerate(iterable):
# if "best_model" in name:
# models.append(load_model_from_checkpoint(name))
# else:
# models.append(load_model(name))
print()
print("Loaded all specified models")
########## TEST THE MODEL ##########
# Define a function for testing a model
# Output is resized to the size of the data_source
def test_model(model, data_loader, loss_fns=[loss_functions.MAE_loss]):
loss_sums = []
loss_counts = []
for i, loss_fn in enumerate(loss_fns):
if loss_fn != loss_functions.NSS_alt:
loss_sums.append(0)
loss_counts.append(0)
else:
loss_sums.append([0, 0])
loss_counts.append([0, 0])
loop1 = data_loader
if location != "ncc":
loop1 = tqdm(loop1)
for video_loader in loop1:
# Reset temporal state if model is temporal
if model.temporal:
model.clear_temporal_state()
loop2 = video_loader
if location != "ncc":
loop2 = tqdm(loop2)
for data in loop2:
inputs, labels = data
if torch.cuda.is_available():
inputs = inputs.cuda()
labels = labels.cuda()
# Produce the output
outputs = model(inputs).squeeze(1)
# if model is temporal detach its state
if model.temporal:
model.detach_temporal_state()
# Move the output to the CPU so we can process it using numpy
outputs = outputs.cpu().data.numpy()
# Threshold output if model is temporal
if model.temporal:
outputs[outputs >= 0.50001] = 1
outputs[outputs < 0.50001] = 0
# If outputs contains a single image, insert
# a singleton batchsize dimension at index 0
if len(outputs.shape) == 2:
outputs = np.expand_dims(outputs, 0)
# Resize the images to input size
outputs = np.array(
[
cv2.resize(output, (labels.shape[2], labels.shape[1]))
for output in outputs
]
)
outputs = torch.from_numpy(outputs)
if torch.cuda.is_available():
outputs = outputs.cuda()
labels = labels.cuda()
# Apply each loss function, add results to corresponding entry in loss_sums and loss_counts
for i, loss_fn in enumerate(loss_fns):
# If loss fn is NSS_alt, manually add std_dev() if the target is all-0
if loss_fn == loss_functions.NSS_alt:
for i in range(len(labels)):
if labels[i].sum() == 0:
loss_sums[i][1] += outputs[i].std().item()
loss_counts[i][1] += 1
else:
loss_sums[i][0] += loss_fn(outputs[i], labels[i]).item()
loss_counts[i][0] += 1
else:
loss_sums[i] += loss_fn(outputs, labels).item()
loss_counts[i] += 1
return loss_sums, loss_counts
# Obtaining loss values on the test set for different models:
loop3 = model_names
if location != "ncc":
loop3 = tqdm(loop3)
for i, model_name in enumerate(loop3):
if location != "ncc":
tqdm.write("model name: {}".format(model_name))
else:
print("model name: {}".format(model_name))
if "best_model" in model_name:
model = load_model_from_checkpoint(model_name)
else:
model = load_model(model_name)
loss_fns = [
loss_functions.NSS_alt,
loss_functions.CE_loss,
loss_functions.MAE_loss,
loss_functions.DoM,
]
test_losses, test_counts = test_model(model, test_loader, loss_fns=loss_fns)
# Print out the result
tqdm.write("[{}] Model: ".format(i, model_names[i]))
for i, func in enumerate(loss_fns):
if func == loss_functions.NSS_alt:
tqdm.write(
("{:25} : {:6f}").format(
"NSS_alt (+ve imgs)", test_losses[i][0] / max(test_counts[i][0], 1)
)
)
tqdm.write(
("{:25} : {:6f}").format(
"NSS_alt (-ve imgs)", test_losses[i][1] / max(test_counts[i][1], 1)
)
)
else:
tqdm.write(
("{:25} : {:6f}").format(
func.__name__, test_losses[i] / test_counts[i]
)
)
del model
if __name__ == "__main__":
import torch
torch.multiprocessing.set_start_method("forkserver") # spawn, forkserver, or fork
# Use CuDNN with benchmarking for performance improvement:
# from 1.05 batch20/s to 1.55 batch20/s on Quadro P4000
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
main()
|
[
"models.DSCLRCN_OldContext.DSCLRCN",
"cv2.resize",
"tqdm.tqdm.write",
"torch.load",
"util.data_utils.get_SALICON_datasets",
"tqdm.tqdm",
"models.CoSADUV_NoTemporal.CoSADUV_NoTemporal",
"util.data_utils.get_video_datasets",
"torch.from_numpy",
"models.CoSADUV.CoSADUV",
"torch.cuda.is_available",
"numpy.expand_dims",
"torch.utils.data.DataLoader",
"torch.multiprocessing.set_start_method"
] |
[((12055, 12107), 'torch.multiprocessing.set_start_method', 'torch.multiprocessing.set_start_method', (['"""forkserver"""'], {}), "('forkserver')\n", (12093, 12107), False, 'import torch\n'), ((1562, 1627), 'util.data_utils.get_SALICON_datasets', 'get_SALICON_datasets', (['dataset_root_dir', 'mean_image_name', 'img_size'], {}), '(dataset_root_dir, mean_image_name, img_size)\n', (1582, 1627), False, 'from util.data_utils import get_SALICON_datasets, get_video_datasets\n'), ((3348, 3373), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3371, 3373), False, 'import torch\n'), ((4524, 4549), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4547, 4549), False, 'import torch\n'), ((4719, 4781), 'torch.load', 'torch.load', (["('trained_models/' + model_name)"], {'map_location': '"""cpu"""'}), "('trained_models/' + model_name, map_location='cpu')\n", (4729, 4781), False, 'import torch\n'), ((4850, 4875), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4873, 4875), False, 'import torch\n'), ((10555, 10566), 'tqdm.tqdm', 'tqdm', (['loop3'], {}), '(loop3)\n', (10559, 10566), False, 'from tqdm import tqdm\n'), ((315, 340), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (338, 340), False, 'import torch\n'), ((1687, 1803), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'minibatchsize', 'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)'}), '(train_data, batch_size=minibatchsize, shuffle=\n True, num_workers=8, pin_memory=True)\n', (1714, 1803), False, 'import torch\n'), ((1939, 2053), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_data'], {'batch_size': 'minibatchsize', 'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)'}), '(val_data, batch_size=minibatchsize, shuffle=\n True, num_workers=8, pin_memory=True)\n', (1966, 2053), False, 'import torch\n'), ((2277, 2391), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_data'], {'batch_size': 'minibatchsize', 'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)'}), '(val_data, batch_size=minibatchsize, shuffle=\n True, num_workers=8, pin_memory=True)\n', (2304, 2391), False, 'import torch\n'), ((2591, 2791), 'util.data_utils.get_video_datasets', 'get_video_datasets', (['dataset_root_dir', 'mean_image_name'], {'duration': 'duration', 'img_size': 'img_size', 'shuffle': '(False)', 'loader_settings': "{'batch_size': minibatchsize, 'num_workers': 8, 'pin_memory': False}"}), "(dataset_root_dir, mean_image_name, duration=duration,\n img_size=img_size, shuffle=False, loader_settings={'batch_size':\n minibatchsize, 'num_workers': 8, 'pin_memory': False})\n", (2609, 2791), False, 'from util.data_utils import get_SALICON_datasets, get_video_datasets\n'), ((3400, 3420), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (3410, 3420), False, 'import torch\n'), ((3496, 3536), 'torch.load', 'torch.load', (['filename'], {'map_location': '"""cpu"""'}), "(filename, map_location='cpu')\n", (3506, 3536), False, 'import torch\n'), ((3688, 3738), 'models.DSCLRCN_OldContext.DSCLRCN', 'DSCLRCN', ([], {'input_dim': 'img_size', 'local_feats_net': '"""Seg"""'}), "(input_dim=img_size, local_feats_net='Seg')\n", (3695, 3738), False, 'from models.DSCLRCN_OldContext import DSCLRCN\n'), ((4596, 4627), 'tqdm.tqdm.write', 'tqdm.write', (['""" loaded to cuda"""'], {}), "(' loaded to cuda')\n", (4606, 4627), False, 'from tqdm import tqdm\n'), ((7787, 7798), 'tqdm.tqdm', 'tqdm', (['loop1'], {}), '(loop1)\n', (7791, 7798), False, 'from tqdm import tqdm\n'), ((3808, 3869), 'models.CoSADUV_NoTemporal.CoSADUV_NoTemporal', 'CoSADUV_NoTemporal', ([], {'input_dim': 'img_size', 'local_feats_net': '"""Seg"""'}), "(input_dim=img_size, local_feats_net='Seg')\n", (3826, 3869), False, 'from models.CoSADUV_NoTemporal import CoSADUV_NoTemporal\n'), ((8059, 8070), 'tqdm.tqdm', 'tqdm', (['loop2'], {}), '(loop2)\n', (8063, 8070), False, 'from tqdm import tqdm\n'), ((8160, 8185), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8183, 8185), False, 'import torch\n'), ((9354, 9379), 'torch.from_numpy', 'torch.from_numpy', (['outputs'], {}), '(outputs)\n', (9370, 9379), False, 'import torch\n'), ((9400, 9425), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9423, 9425), False, 'import torch\n'), ((3928, 3978), 'models.CoSADUV.CoSADUV', 'CoSADUV', ([], {'input_dim': 'img_size', 'local_feats_net': '"""Seg"""'}), "(input_dim=img_size, local_feats_net='Seg')\n", (3935, 3978), False, 'from models.CoSADUV import CoSADUV\n'), ((9026, 9052), 'numpy.expand_dims', 'np.expand_dims', (['outputs', '(0)'], {}), '(outputs, 0)\n', (9040, 9052), True, 'import numpy as np\n'), ((9186, 9240), 'cv2.resize', 'cv2.resize', (['output', '(labels.shape[2], labels.shape[1])'], {}), '(output, (labels.shape[2], labels.shape[1]))\n', (9196, 9240), False, 'import cv2\n')]
|
import copy as _copy
import math as _math
import os as _os
import cv2 as _cv2
import numpy as _np
from PIL import Image as _IMG
from easytorch.utils.logger import *
"""
##################################################################################################
Very useful image related utilities
##################################################################################################
"""
def _same_file(x):
return x
class Image:
def __init__(self, dtype=_np.uint8):
self.dir = None
self.file = None
self.array = None
self.mask = None
self.ground_truth = None
self.extras = {}
self.dtype = dtype
def load(self, dir, file):
try:
self.dir = dir
self.file = file
self.array = _np.array(_IMG.open(self.path), dtype=self.dtype)
except Exception as e:
error('Fail to load file: ' + self.file + ': ' + str(e))
def load_mask(self, mask_dir=None, fget_mask=_same_file):
if fget_mask is None:
fget_mask = _same_file
try:
mask_file = fget_mask(self.file)
self.mask = _np.array(_IMG.open(_os.path.join(mask_dir, mask_file)), dtype=self.dtype)
except Exception as e:
error('Fail to load mask: ' + str(e))
def load_ground_truth(self, gt_dir=None, fget_ground_truth=_same_file):
if fget_ground_truth is None:
fget_ground_truth = _same_file
try:
gt_file = fget_ground_truth(self.file)
self.ground_truth = _np.array(_IMG.open(_os.path.join(gt_dir, gt_file)), dtype=self.dtype)
except Exception as e:
error('Fail to load ground truth: ' + str(e))
def get_array(self, dir='', getter=_same_file, file=None):
if getter is None:
getter = _same_file
if not file:
file = self.file
arr = _np.array(_IMG.open(_os.path.join(dir, getter(file))), dtype=self.dtype)
return arr
def apply_mask(self):
if self.mask is not None:
self.array[self.mask == 0] = 0
def apply_clahe(self, clip_limit=2.0, tile_shape=(8, 8)):
enhancer = _cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_shape)
if len(self.array.shape) == 2:
self.array = enhancer.apply(self.array)
elif len(self.array.shape) == 3:
self.array[:, :, 0] = enhancer.apply(self.array[:, :, 0])
self.array[:, :, 1] = enhancer.apply(self.array[:, :, 1])
self.array[:, :, 2] = enhancer.apply(self.array[:, :, 2])
else:
error('More than three channels')
def __copy__(self):
copy_obj = Image()
copy_obj.file = _copy.copy(self.file)
copy_obj.array = _copy.copy(self.array)
copy_obj.mask = _copy.copy(self.mask)
copy_obj.ground_truth = _copy.copy(self.ground_truth)
copy_obj.extras = _copy.deepcopy(self.extras)
copy_obj.dtype = _copy.deepcopy(self.dtype)
return copy_obj
@property
def path(self):
return _os.path.join(self.dir, self.file)
def get_rgb_scores(arr_2d=None, truth=None):
"""
Returns a rgb image of pixelwise separation between ground truth and arr_2d
(predicted image) with different color codes
Easy when needed to inspect segmentation result against ground truth.
:param arr_2d:
:param truth:
:return:
"""
arr_rgb = _np.zeros([arr_2d.shape[0], arr_2d.shape[1], 3], dtype=_np.uint8)
x = arr_2d.copy()
y = truth.copy()
x[x == 255] = 1
y[y == 255] = 1
xy = x + (y * 2)
arr_rgb[xy == 3] = [255, 255, 255]
arr_rgb[xy == 1] = [0, 255, 0]
arr_rgb[xy == 2] = [255, 0, 0]
arr_rgb[xy == 0] = [0, 0, 0]
return arr_rgb
def get_praf1(arr_2d=None, truth=None):
"""
Returns precision, recall, f1 and accuracy score between two binary arrays upto five precision.
:param arr_2d:
:param truth:
:return:
"""
x = arr_2d.copy()
y = truth.copy()
x[x == 255] = 1
y[y == 255] = 1
xy = x + (y * 2)
tp = xy[xy == 3].shape[0]
fp = xy[xy == 1].shape[0]
tn = xy[xy == 0].shape[0]
fn = xy[xy == 2].shape[0]
try:
p = tp / (tp + fp)
except ZeroDivisionError:
p = 0
try:
r = tp / (tp + fn)
except ZeroDivisionError:
r = 0
try:
a = (tp + tn) / (tp + fp + fn + tn)
except ZeroDivisionError:
a = 0
try:
f1 = 2 * p * r / (p + r)
except ZeroDivisionError:
f1 = 0
return {
'Precision': round(p, 5),
'Recall': round(r, 5),
'Accuracy': round(a, 5),
'F1': round(f1, 5)
}
def rescale2d(arr):
m = _np.max(arr)
n = _np.min(arr)
return (arr - n) / (m - n)
def rescale3d(arrays):
return list(rescale2d(arr) for arr in arrays)
def get_signed_diff_int8(image_arr1=None, image_arr2=None):
signed_diff = _np.array(image_arr1 - image_arr2, dtype=_np.int8)
fx = _np.array(signed_diff - _np.min(signed_diff), _np.uint8)
fx = rescale2d(fx)
return _np.array(fx * 255, _np.uint8)
def whiten_image2d(img_arr2d=None):
img_arr2d = img_arr2d.copy()
img_arr2d = (img_arr2d - img_arr2d.mean()) / img_arr2d.std()
return _np.array(rescale2d(img_arr2d) * 255, dtype=_np.uint8)
def get_chunk_indexes(img_shape=(0, 0), chunk_shape=(0, 0), offset_row_col=None):
"""
Returns a generator for four corners of each patch within image as specified.
:param img_shape: Shape of the original image
:param chunk_shape: Shape of desired patch
:param offset_row_col: Offset for each patch on both x, y directions
:return:
"""
img_rows, img_cols = img_shape
chunk_row, chunk_col = chunk_shape
offset_row, offset_col = offset_row_col
row_end = False
for i in range(0, img_rows, offset_row):
if row_end:
continue
row_from, row_to = i, i + chunk_row
if row_to > img_rows:
row_to = img_rows
row_from = img_rows - chunk_row
row_end = True
col_end = False
for j in range(0, img_cols, offset_col):
if col_end:
continue
col_from, col_to = j, j + chunk_col
if col_to > img_cols:
col_to = img_cols
col_from = img_cols - chunk_col
col_end = True
yield [int(row_from), int(row_to), int(col_from), int(col_to)]
def get_chunk_indices_by_index(img_shape=(0, 0), chunk_shape=(0, 0), indices=None):
x, y = chunk_shape
ix = []
for (c1, c2) in indices:
w, h = img_shape
p, q, r, s = c1 - x // 2, c1 + x // 2, c2 - y // 2, c2 + y // 2
if p < 0:
p, q = 0, x
if q > w:
p, q = w - x, w
if r < 0:
r, s = 0, y
if s > h:
r, s = h - y, h
ix.append([int(p), int(q), int(r), int(s)])
return ix
def merge_patches(patches=None, image_size=(0, 0), patch_size=(0, 0), offset_row_col=None):
"""
Merge different pieces of image to form a full image. Overlapped regions are averaged.
:param patches: List of all patches to merge in order (left to right).
:param image_size: Full image size
:param patch_size: A patch size(Patches must be uniform in size to be able to merge)
:param offset_row_col: Offset used to chunk the patches.
:return:
"""
padded_sum = _np.zeros([image_size[0], image_size[1]])
non_zero_count = _np.zeros_like(padded_sum)
for i, chunk_ix in enumerate(get_chunk_indexes(image_size, patch_size, offset_row_col)):
row_from, row_to, col_from, col_to = chunk_ix
patch = _np.array(patches[i, :, :]).squeeze()
padded = _np.pad(patch, [(row_from, image_size[0] - row_to), (col_from, image_size[1] - col_to)],
'constant')
padded_sum = padded + padded_sum
non_zero_count = non_zero_count + _np.array(padded > 0).astype(int)
non_zero_count[non_zero_count == 0] = 1
return _np.array(padded_sum / non_zero_count, dtype=_np.uint8)
def expand_and_mirror_patch(full_img_shape=None, orig_patch_indices=None, expand_by=None):
"""
Given a patch within an image, this function select a speciified region around it if present, else mirros it.
It is useful in neuralnetworks like u-net which look for wide range of area than the actual input image.
:param full_img_shape: Full image shape
:param orig_patch_indices: Four cornets of the actual patch
:param expand_by: Expand by (x, y ) in each dimension
:return:
"""
i, j = int(expand_by[0] / 2), int(expand_by[1] / 2)
p, q, r, s = orig_patch_indices
a, b, c, d = p - i, q + i, r - j, s + j
pad_a, pad_b, pad_c, pad_d = [0] * 4
if a < 0:
pad_a = i - p
a = 0
if b > full_img_shape[0]:
pad_b = b - full_img_shape[0]
b = full_img_shape[0]
if c < 0:
pad_c = j - r
c = 0
if d > full_img_shape[1]:
pad_d = d - full_img_shape[1]
d = full_img_shape[1]
return a, b, c, d, [(pad_a, pad_b), (pad_c, pad_d)]
def largest_cc(binary_arr=None):
from skimage.measure import label
labels = label(binary_arr)
if labels.max() != 0: # assume at least 1 CC
largest = labels == _np.argmax(_np.bincount(labels.flat)[1:]) + 1
return largest
def map_img_to_img2d(map_to, img):
arr = map_to.copy()
rgb = arr.copy()
if len(arr.shape) == 2:
rgb = _np.zeros((arr.shape[0], arr.shape[1], 3), dtype=_np.uint8)
rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] = arr, arr, arr
rgb[:, :, 0][img == 255] = 255
rgb[:, :, 1][img == 255] = 0
rgb[:, :, 2][img == 255] = 0
return rgb
def remove_connected_comp(segmented_img, connected_comp_diam_limit=20):
"""
Remove connected components of a binary image that are less than smaller than specified diameter.
:param segmented_img: Binary image.
:param connected_comp_diam_limit: Diameter limit
:return:
"""
from scipy.ndimage.measurements import label
img = segmented_img.copy()
structure = _np.ones((3, 3), dtype=_np.int)
labeled, n_components = label(img, structure)
for i in range(n_components):
ixy = _np.array(list(zip(*_np.where(labeled == i))))
x1, y1 = ixy[0]
x2, y2 = ixy[-1]
dst = _math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
if dst < connected_comp_diam_limit:
for u, v in ixy:
img[u, v] = 0
return img
def get_pix_neigh(i, j, eight=False):
"""
Get four/ eight neighbors of an image.
:param i: x position of pixel
:param j: y position of pixel
:param eight: Eight neighbors? Else four
:return:
"""
n1 = (i - 1, j - 1)
n2 = (i - 1, j)
n3 = (i - 1, j + 1)
n4 = (i, j - 1)
n5 = (i, j + 1)
n6 = (i + 1, j - 1)
n7 = (i + 1, j)
n8 = (i + 1, j + 1)
if eight:
return [n1, n2, n3, n4, n5, n6, n7, n8]
else:
return [n2, n5, n7, n4]
|
[
"PIL.Image.open",
"copy.deepcopy",
"numpy.ones",
"numpy.where",
"scipy.ndimage.measurements.label",
"os.path.join",
"math.sqrt",
"numpy.max",
"cv2.createCLAHE",
"numpy.array",
"numpy.zeros",
"numpy.pad",
"numpy.bincount",
"numpy.min",
"copy.copy",
"numpy.zeros_like"
] |
[((3470, 3535), 'numpy.zeros', '_np.zeros', (['[arr_2d.shape[0], arr_2d.shape[1], 3]'], {'dtype': '_np.uint8'}), '([arr_2d.shape[0], arr_2d.shape[1], 3], dtype=_np.uint8)\n', (3479, 3535), True, 'import numpy as _np\n'), ((4755, 4767), 'numpy.max', '_np.max', (['arr'], {}), '(arr)\n', (4762, 4767), True, 'import numpy as _np\n'), ((4776, 4788), 'numpy.min', '_np.min', (['arr'], {}), '(arr)\n', (4783, 4788), True, 'import numpy as _np\n'), ((4975, 5025), 'numpy.array', '_np.array', (['(image_arr1 - image_arr2)'], {'dtype': '_np.int8'}), '(image_arr1 - image_arr2, dtype=_np.int8)\n', (4984, 5025), True, 'import numpy as _np\n'), ((5126, 5156), 'numpy.array', '_np.array', (['(fx * 255)', '_np.uint8'], {}), '(fx * 255, _np.uint8)\n', (5135, 5156), True, 'import numpy as _np\n'), ((7501, 7542), 'numpy.zeros', '_np.zeros', (['[image_size[0], image_size[1]]'], {}), '([image_size[0], image_size[1]])\n', (7510, 7542), True, 'import numpy as _np\n'), ((7564, 7590), 'numpy.zeros_like', '_np.zeros_like', (['padded_sum'], {}), '(padded_sum)\n', (7578, 7590), True, 'import numpy as _np\n'), ((8109, 8164), 'numpy.array', '_np.array', (['(padded_sum / non_zero_count)'], {'dtype': '_np.uint8'}), '(padded_sum / non_zero_count, dtype=_np.uint8)\n', (8118, 8164), True, 'import numpy as _np\n'), ((9292, 9309), 'scipy.ndimage.measurements.label', 'label', (['binary_arr'], {}), '(binary_arr)\n', (9297, 9309), False, 'from scipy.ndimage.measurements import label\n'), ((10220, 10251), 'numpy.ones', '_np.ones', (['(3, 3)'], {'dtype': '_np.int'}), '((3, 3), dtype=_np.int)\n', (10228, 10251), True, 'import numpy as _np\n'), ((10280, 10301), 'scipy.ndimage.measurements.label', 'label', (['img', 'structure'], {}), '(img, structure)\n', (10285, 10301), False, 'from scipy.ndimage.measurements import label\n'), ((2205, 2268), 'cv2.createCLAHE', '_cv2.createCLAHE', ([], {'clipLimit': 'clip_limit', 'tileGridSize': 'tile_shape'}), '(clipLimit=clip_limit, tileGridSize=tile_shape)\n', (2221, 2268), True, 'import cv2 as _cv2\n'), ((2747, 2768), 'copy.copy', '_copy.copy', (['self.file'], {}), '(self.file)\n', (2757, 2768), True, 'import copy as _copy\n'), ((2794, 2816), 'copy.copy', '_copy.copy', (['self.array'], {}), '(self.array)\n', (2804, 2816), True, 'import copy as _copy\n'), ((2841, 2862), 'copy.copy', '_copy.copy', (['self.mask'], {}), '(self.mask)\n', (2851, 2862), True, 'import copy as _copy\n'), ((2895, 2924), 'copy.copy', '_copy.copy', (['self.ground_truth'], {}), '(self.ground_truth)\n', (2905, 2924), True, 'import copy as _copy\n'), ((2951, 2978), 'copy.deepcopy', '_copy.deepcopy', (['self.extras'], {}), '(self.extras)\n', (2965, 2978), True, 'import copy as _copy\n'), ((3004, 3030), 'copy.deepcopy', '_copy.deepcopy', (['self.dtype'], {}), '(self.dtype)\n', (3018, 3030), True, 'import copy as _copy\n'), ((3105, 3139), 'os.path.join', '_os.path.join', (['self.dir', 'self.file'], {}), '(self.dir, self.file)\n', (3118, 3139), True, 'import os as _os\n'), ((7811, 7916), 'numpy.pad', '_np.pad', (['patch', '[(row_from, image_size[0] - row_to), (col_from, image_size[1] - col_to)]', '"""constant"""'], {}), "(patch, [(row_from, image_size[0] - row_to), (col_from, image_size[1\n ] - col_to)], 'constant')\n", (7818, 7916), True, 'import numpy as _np\n'), ((9582, 9641), 'numpy.zeros', '_np.zeros', (['(arr.shape[0], arr.shape[1], 3)'], {'dtype': '_np.uint8'}), '((arr.shape[0], arr.shape[1], 3), dtype=_np.uint8)\n', (9591, 9641), True, 'import numpy as _np\n'), ((10460, 10503), 'math.sqrt', '_math.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (10470, 10503), True, 'import math as _math\n'), ((5059, 5079), 'numpy.min', '_np.min', (['signed_diff'], {}), '(signed_diff)\n', (5066, 5079), True, 'import numpy as _np\n'), ((820, 840), 'PIL.Image.open', '_IMG.open', (['self.path'], {}), '(self.path)\n', (829, 840), True, 'from PIL import Image as _IMG\n'), ((7755, 7782), 'numpy.array', '_np.array', (['patches[i, :, :]'], {}), '(patches[i, :, :])\n', (7764, 7782), True, 'import numpy as _np\n'), ((1190, 1224), 'os.path.join', '_os.path.join', (['mask_dir', 'mask_file'], {}), '(mask_dir, mask_file)\n', (1203, 1224), True, 'import os as _os\n'), ((1600, 1630), 'os.path.join', '_os.path.join', (['gt_dir', 'gt_file'], {}), '(gt_dir, gt_file)\n', (1613, 1630), True, 'import os as _os\n'), ((8020, 8041), 'numpy.array', '_np.array', (['(padded > 0)'], {}), '(padded > 0)\n', (8029, 8041), True, 'import numpy as _np\n'), ((9399, 9424), 'numpy.bincount', '_np.bincount', (['labels.flat'], {}), '(labels.flat)\n', (9411, 9424), True, 'import numpy as _np\n'), ((10370, 10393), 'numpy.where', '_np.where', (['(labeled == i)'], {}), '(labeled == i)\n', (10379, 10393), True, 'import numpy as _np\n')]
|
'''
Created on July 7, 2019
@author: Terry
@email:<EMAIL>
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier, plot_tree
# Parameter
n_classes = 3
plot_colors = "ryb"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0,1],[0,2],[0,3],
[1,2],[1,3],[2,3]]):
# We only take two corresponding features
X = iris.data[:,pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X,y)
# Plot the descision boundary
plt.subplot(2,3,pairidx + 1)
x_min, x_max = X[:,0].min() - 1, X[:,0].max() - 1
y_min, y_max = X[:,1].min() - 1, X[:,1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min,x_max,plot_step),
np.arange(y_min,y_max,plot_step))
plt.tight_layout(h_pad=0.5,w_pad=0.5,pad=2.5)
Z = clf.predict(np.c_[xx.ravel(),yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
# Plot the training points
for i, color in zip(range(n_classes),plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx,0],X[idx,1],c=color,label=iris.target_names[i],
cmap=plt.cm.RdYlBu,edgecolors='black',s=15)
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend(loc='lower right', borderpad=0, handletextpad=0)
plt.axis("tight")
plt.figure()
clf = DecisionTreeClassifier().fit(iris.data, iris.target)
plot_tree(clf, filled=True)
plt.show()
|
[
"sklearn.datasets.load_iris",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"numpy.where",
"matplotlib.pyplot.xlabel",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.figure",
"sklearn.tree.plot_tree",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((315, 326), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (324, 326), False, 'from sklearn.datasets import load_iris\n'), ((1404, 1477), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Decision surface of a decision tree using paired features"""'], {}), "('Decision surface of a decision tree using paired features')\n", (1416, 1477), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1537), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'borderpad': '(0)', 'handletextpad': '(0)'}), "(loc='lower right', borderpad=0, handletextpad=0)\n", (1488, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1538, 1555), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (1546, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1569), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1567, 1569), True, 'import matplotlib.pyplot as plt\n'), ((1629, 1656), 'sklearn.tree.plot_tree', 'plot_tree', (['clf'], {'filled': '(True)'}), '(clf, filled=True)\n', (1638, 1656), False, 'from sklearn.tree import DecisionTreeClassifier, plot_tree\n'), ((1657, 1667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1665, 1667), True, 'import matplotlib.pyplot as plt\n'), ((620, 650), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(pairidx + 1)'], {}), '(2, 3, pairidx + 1)\n', (631, 650), True, 'import matplotlib.pyplot as plt\n'), ((878, 925), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(0.5)', 'w_pad': '(0.5)', 'pad': '(2.5)'}), '(h_pad=0.5, w_pad=0.5, pad=2.5)\n', (894, 925), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1055), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'cmap': 'plt.cm.RdYlBu'}), '(xx, yy, Z, cmap=plt.cm.RdYlBu)\n', (1024, 1055), True, 'import matplotlib.pyplot as plt\n'), ((1060, 1099), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['iris.feature_names[pair[0]]'], {}), '(iris.feature_names[pair[0]])\n', (1070, 1099), True, 'import matplotlib.pyplot as plt\n'), ((1104, 1143), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['iris.feature_names[pair[1]]'], {}), '(iris.feature_names[pair[1]])\n', (1114, 1143), True, 'import matplotlib.pyplot as plt\n'), ((782, 816), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'plot_step'], {}), '(x_min, x_max, plot_step)\n', (791, 816), True, 'import numpy as np\n'), ((840, 874), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'plot_step'], {}), '(y_min, y_max, plot_step)\n', (849, 874), True, 'import numpy as np\n'), ((1249, 1265), 'numpy.where', 'np.where', (['(y == i)'], {}), '(y == i)\n', (1257, 1265), True, 'import numpy as np\n'), ((1274, 1395), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[idx, 0]', 'X[idx, 1]'], {'c': 'color', 'label': 'iris.target_names[i]', 'cmap': 'plt.cm.RdYlBu', 'edgecolors': '"""black"""', 's': '(15)'}), "(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i], cmap\n =plt.cm.RdYlBu, edgecolors='black', s=15)\n", (1285, 1395), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1600), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1598, 1600), False, 'from sklearn.tree import DecisionTreeClassifier, plot_tree\n'), ((547, 571), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (569, 571), False, 'from sklearn.tree import DecisionTreeClassifier, plot_tree\n')]
|
""" Intermediate Factors
@author: <NAME>
This module computes the interpolated features between the principal vectors -- the one
linking source to target following the geodesics on the Grassmannian. We use the
equivalent formulation derived in [1] and represent this geodesics for each pair
of principal components.
Example
-------
Examples are given in the vignettes.
Notes
-------
Examples are given in the vignette
References
-------
[1] <NAME>., <NAME>., <NAME>., "TO CHANGE"
"""
import numpy as np
import pandas as pd
from pathlib import Path
from joblib import Parallel, delayed
from precise.principal_vectors import PVComputation
class IntermediateFactors:
"""
Handle the intermediate representations between
Attributes
-------
source_components_ : numpy.ndarray, shape (n_components, n_features)
Loadings of the source factors, be them already aligned to target or not.
target_components : numpy.ndarray, shape (n_components, n_features)
Loadings of the target factors, be them already aligned to source or not.
intermediate_factors_ : numpy.ndarray, shape (n_representations, n_components, n_features)
Loadings of intermediate factors along the geodesic path. Components are ordered
by similarity, i.e. first components correspond to path between first PVs, etc.
n_representations: int
Number of representations along the geodesic path. If -1, means that the Geodesic Flow Kernel
has been used instead.
geodesic_matrix_: numpy.ndarray, shape (n_features, n_features)
Geodesic Matrix for geodesic flow kernel.
geodesic_flow_: method float:numpy.array
Method that computes geodesic flow at a certain position.
"""
def __init__(self, n_representations, n_jobs=1):
"""
Parameters
-------
n_representations : int
Number of representations to pick between source and target.
n_jobs: int (optional, default to 1)
Number of jobs for computation.
"""
self.n_representations = n_representations
self.intermediate_factors_ = None
self.source_components_ = None
self.target_components_ = None
self.n_jobs = 1
def _compute_principal_vectors(self):
n_pv = np.min([self.source_components_.shape[0],
self.target_components_.shape[0]])
n_factors = {
'source': self.source_components_.shape[0],
'target': self.target_components_.shape[0]
}
self.principal_vectors_ = PVComputation(n_factors, n_pv)
self.principal_vectors_.compute_principal_vectors(self.source_components_,
self.target_components_)
def _compute_flow_time(t, principal_vectors):
Pi = np.sin( (1-t) * principal_vectors.angles_)\
/np.sin(principal_vectors.angles_)
Pi[np.isnan(Pi)] = 1-t # Asymptotic value of sin/sin in 0
Xi = np.sin( t * principal_vectors.angles_)\
/ np.sin(principal_vectors.angles_)
Xi[np.isnan(Xi)] = t # Asymptotic value of sin/sin in 0
return (principal_vectors.source_components_.T*Pi \
+ principal_vectors.target_components_.T*Xi).T
def sample_flow(self, source_components, target_components, already_aligned=False):
"""
Sample intermediate subspaces (i.e. set of factors) uniformely along the geodesic flow.
IMPORTANT: Same genes have to be given for source and target, and in same order
Parameters
-------
source_components : np.ndarray, shape (n_components, n_features)
Source factors
target_components : np.ndarray, shape (n_components, n_features)
Target factors
already_aligned : boolean (optional, default to False)
Whether the components are already aligned (i.e. are they PV or not).
Return values
-------
Intermediate subspace, numpy.ndarray of shape (n_representations + 1, n_components, n_features).
"""
self.source_components_ = source_components
self.target_components_ = target_components
# Compute the principal vectors
if not already_aligned:
self._compute_principal_vectors()
else:
self.principal_vectors_.source_components_ = self.source_components_
self.principal_vectors_.target_components = self.target_components_
# Sample at different uniformly distributed time points
if self.n_representations == -1:
t_sample = np.array([1])
else:
t_sample = np.linspace(0, 1, self.n_representations + 1)
if self.n_jobs >= 2:
return np.array(
Parallel(n_jobs=self.n_jobs)\
(delayed(IntermediateFactors._compute_flow_time)(t, self.principal_vectors_)\
for t in t_sample)
)
else:
return np.array([IntermediateFactors._compute_flow_time(t, self.principal_vectors_) for t in t_sample])
def compute_geodesic_matrix(self, source_components, target_components):
"""
Return method for computing the domain-invariant kernel of Geodesic Flow Kernel.
Parameters
-------
source_components : np.ndarray, shape (n_components, n_features)
Source factors
target_components : np.ndarray, shape (n_components, n_features)
Target factors
Return values
-------
Method that takes two p-dimensional vector and returns their domain-invariant
scalar product.
"""
self.source_components_ = source_components
self.target_components_ = target_components
self._compute_principal_vectors()
diag_term = (self.principal_vectors_.angles_ - np.cos(self.principal_vectors_.angles_)*np.sin(self.principal_vectors_.angles_)) \
/ 2 / self.principal_vectors_.angles_ / np.power(np.sin(self.principal_vectors_.angles_), 2)
off_diag_term = (np.sin(self.principal_vectors_.angles_) - np.cos(self.principal_vectors_.angles_)*self.principal_vectors_.angles_) \
/ 2 / np.power(np.sin(self.principal_vectors_.angles_),2) / self.principal_vectors_.angles_
# Correct for extreme case when theta = 0
diag_term[np.isnan(diag_term)] = 1./3.
diag_term[np.isinf(diag_term)] = 1./3.
off_diag_term[np.isnan(off_diag_term)] = 1./6.
off_diag_term[np.isinf(off_diag_term)] = 1./6.
diag_term = np.diag(diag_term)
off_diag_term = np.diag(off_diag_term)
self.G_matrix = np.block([
[diag_term, off_diag_term],
[off_diag_term, diag_term]
])
self.projection = np.block([self.principal_vectors_.source_components_.transpose(), self.principal_vectors_.target_components_.transpose()])
return self.G_matrix
#return lambda x,y: IntermediateFactors._compute_domain_invariant_scalar_product(x, y, self.projection, self.G_matrix)
def _compute_domain_invariant_scalar_product(x, y, projection, G_matrix):
x_p = x.dot(projection)
y_p = y.dot(projection)
return x_p.dot(G_matrix).dot(y_p.transpose())
|
[
"numpy.block",
"numpy.diag",
"joblib.Parallel",
"numpy.array",
"numpy.linspace",
"numpy.isnan",
"numpy.cos",
"precise.principal_vectors.PVComputation",
"numpy.min",
"numpy.sin",
"joblib.delayed",
"numpy.isinf"
] |
[((2327, 2403), 'numpy.min', 'np.min', (['[self.source_components_.shape[0], self.target_components_.shape[0]]'], {}), '([self.source_components_.shape[0], self.target_components_.shape[0]])\n', (2333, 2403), True, 'import numpy as np\n'), ((2602, 2632), 'precise.principal_vectors.PVComputation', 'PVComputation', (['n_factors', 'n_pv'], {}), '(n_factors, n_pv)\n', (2615, 2632), False, 'from precise.principal_vectors import PVComputation\n'), ((6663, 6681), 'numpy.diag', 'np.diag', (['diag_term'], {}), '(diag_term)\n', (6670, 6681), True, 'import numpy as np\n'), ((6706, 6728), 'numpy.diag', 'np.diag', (['off_diag_term'], {}), '(off_diag_term)\n', (6713, 6728), True, 'import numpy as np\n'), ((6754, 6820), 'numpy.block', 'np.block', (['[[diag_term, off_diag_term], [off_diag_term, diag_term]]'], {}), '([[diag_term, off_diag_term], [off_diag_term, diag_term]])\n', (6762, 6820), True, 'import numpy as np\n'), ((2861, 2904), 'numpy.sin', 'np.sin', (['((1 - t) * principal_vectors.angles_)'], {}), '((1 - t) * principal_vectors.angles_)\n', (2867, 2904), True, 'import numpy as np\n'), ((2918, 2951), 'numpy.sin', 'np.sin', (['principal_vectors.angles_'], {}), '(principal_vectors.angles_)\n', (2924, 2951), True, 'import numpy as np\n'), ((2963, 2975), 'numpy.isnan', 'np.isnan', (['Pi'], {}), '(Pi)\n', (2971, 2975), True, 'import numpy as np\n'), ((3032, 3069), 'numpy.sin', 'np.sin', (['(t * principal_vectors.angles_)'], {}), '(t * principal_vectors.angles_)\n', (3038, 3069), True, 'import numpy as np\n'), ((3086, 3119), 'numpy.sin', 'np.sin', (['principal_vectors.angles_'], {}), '(principal_vectors.angles_)\n', (3092, 3119), True, 'import numpy as np\n'), ((3131, 3143), 'numpy.isnan', 'np.isnan', (['Xi'], {}), '(Xi)\n', (3139, 3143), True, 'import numpy as np\n'), ((4680, 4693), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4688, 4693), True, 'import numpy as np\n'), ((4731, 4776), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(self.n_representations + 1)'], {}), '(0, 1, self.n_representations + 1)\n', (4742, 4776), True, 'import numpy as np\n'), ((6457, 6476), 'numpy.isnan', 'np.isnan', (['diag_term'], {}), '(diag_term)\n', (6465, 6476), True, 'import numpy as np\n'), ((6504, 6523), 'numpy.isinf', 'np.isinf', (['diag_term'], {}), '(diag_term)\n', (6512, 6523), True, 'import numpy as np\n'), ((6555, 6578), 'numpy.isnan', 'np.isnan', (['off_diag_term'], {}), '(off_diag_term)\n', (6563, 6578), True, 'import numpy as np\n'), ((6610, 6633), 'numpy.isinf', 'np.isinf', (['off_diag_term'], {}), '(off_diag_term)\n', (6618, 6633), True, 'import numpy as np\n'), ((6099, 6138), 'numpy.sin', 'np.sin', (['self.principal_vectors_.angles_'], {}), '(self.principal_vectors_.angles_)\n', (6105, 6138), True, 'import numpy as np\n'), ((4852, 4880), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (4860, 4880), False, 'from joblib import Parallel, delayed\n'), ((6312, 6351), 'numpy.sin', 'np.sin', (['self.principal_vectors_.angles_'], {}), '(self.principal_vectors_.angles_)\n', (6318, 6351), True, 'import numpy as np\n'), ((6168, 6207), 'numpy.sin', 'np.sin', (['self.principal_vectors_.angles_'], {}), '(self.principal_vectors_.angles_)\n', (6174, 6207), True, 'import numpy as np\n'), ((4907, 4954), 'joblib.delayed', 'delayed', (['IntermediateFactors._compute_flow_time'], {}), '(IntermediateFactors._compute_flow_time)\n', (4914, 4954), False, 'from joblib import Parallel, delayed\n'), ((5955, 5994), 'numpy.cos', 'np.cos', (['self.principal_vectors_.angles_'], {}), '(self.principal_vectors_.angles_)\n', (5961, 5994), True, 'import numpy as np\n'), ((5995, 6034), 'numpy.sin', 'np.sin', (['self.principal_vectors_.angles_'], {}), '(self.principal_vectors_.angles_)\n', (6001, 6034), True, 'import numpy as np\n'), ((6210, 6249), 'numpy.cos', 'np.cos', (['self.principal_vectors_.angles_'], {}), '(self.principal_vectors_.angles_)\n', (6216, 6249), True, 'import numpy as np\n')]
|
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.cross_validation import train_test_split
import theanets
import climate
climate.enable_default_logging()
X_orig = np.load('/Users/bzamecnik/Documents/music-processing/music-processing-experiments/c-scale-piano_spectrogram_2048_hamming.npy')
sample_count, feature_count = X_orig.shape
X = MinMaxScaler().fit_transform(X_orig)
X = X.astype(np.float32)
X_train, X_test = train_test_split(X, test_size=0.4, random_state=42)
X_val, X_test = train_test_split(X_test, test_size=0.5, random_state=42)
# (np.maximum(0, 44100/512*np.arange(13)-2)).astype('int')
#blocks = [0, 84, 170, 256, 342, 428, 514, 600, 687, 773, 859, 945, 1031, 1205]
blocks = [0, 48, 98, 148, 198, 248, 298, 348, 398, 448, 498, 548, 598, 700]
def make_labels(blocks):
label_count = len(blocks) - 1
labels = np.zeros(blocks[-1])
for i in range(label_count):
labels[blocks[i]:blocks[i+1]] = i
return labels
y = make_labels(blocks)
def score(exp, Xs):
X_train, X_val, X_test = Xs
def sc(exp, X):
return r2_score(X, exp.network.predict(X))
print("training: ", sc(exp, X_train))
# NOTE: only optimize to validation dataset's score!
print("validation:", sc(exp, X_val))
print("test: ", sc(exp, X_test))
exp1 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1)
exp1.train(X_train, X_val, optimize='nag', learning_rate=1e-3, momentum=0.9)
exp2 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1)
exp2.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# gives quite nice prediction, trains slow
exp3 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1, hidden_activation='relu')
exp3.train(X_train, X_val, optimize='nag', learning_rate=1e-3, momentum=0.9)
exp4 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1, input_dropout=0.3)
exp4.train(X_train, X_val, optimize='nag', learning_rate=1e-3, momentum=0.9)
# rmsprop - converges faster in this case than nag
exp5 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1)
exp5.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# tied weighs - work good, much lower loss function values
# r2: 0.75037549551862703
exp6 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1, tied_weights=True)
exp6.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# higher hidden L1 penalty - worse
exp7 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.7, tied_weights=True)
exp7.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# hidden L2 penalty - a bit worse
exp8 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1, hidden_l2=0.1, tied_weights=True)
exp8.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# no regularization - in this case better
# r2: 0.82211329411744094
exp10 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
tied_weights=True)
exp10.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# layerwise autoencoder training
exp11 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 500, feature_count), tied_weights=True)
exp11.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# wow - this actually is able to to a 2D visualization
exp12 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 100, 10, 2, 10, 100, feature_count),
tied_weights=True)
exp12.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
def compute_middle_layer(X, model):
X_pred_ff = model.feed_forward(X)
middle = int(len(X_pred_ff)/2)
X_middle = X_pred_ff[middle]
return X_middle
def visualize_2d(X, y=None):
colors = y/max(y) if y is not None else np.linspace(0,1,len(X))
scatter(X[:,0], X[:,1],
c=colors, alpha=0.2, edgecolors='none', cmap='rainbow')
# same visualization, a little bit better r2
exp13 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 256, 64, 16, 2, 16, 64, 256, feature_count),
tied_weights=True)
exp13.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# contractive - better than without
# r2: 0.82820148664941162
exp14 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
tied_weights=True, contractive=0.8)
exp14.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# tanh - bad
exp15 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
tied_weights=True, hidden_activation='tanh')
exp15.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# relu, contractive
exp16 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 128, 16, 2, 16, 128, feature_count),
tied_weights=True, hidden_activation='relu', contractive=0.5)
exp16.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
exp17 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 128, 16, 2, 16, 128, feature_count),
tied_weights=True, contractive=0.8)
exp17.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
exp18 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.8)
exp18.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# r2: 0.83371355062803953
exp19 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.8, hidden_dropout=0.8)
exp19.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
exp20 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.9, hidden_dropout=0.9)
exp20.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# -----------------
# animate the 2D point movement
import matplotlib.animation as animation
def export_animation(X_2d, y, filename):
fig = plt.figure()
# 854x480 px (480p) in inches, note that 8.54 gives 853px width :/
fig.set_size_inches(8.545, 4.80)
plt.axis('equal')
# plt.tight_layout()
# plt.xlim(-0.1, 1.1)
# plt.ylim(-0.1, 1.1)
images = []
im1 = scatter(X_2d[:, 0], X_2d[:, 1], c=y/max(y), cmap='rainbow', alpha=0.2)
for i in range(len(X_2d)):
im2 = scatter(X_2d[i, 0], X_2d[i, 1], c=y[i]/max(y), cmap='rainbow')
images.append([im1, im2])
ani = animation.ArtistAnimation(fig, images,
interval=20, blit=False, repeat=False)
writer = animation.writers['ffmpeg'](fps=50, bitrate=5000)
ani.save(filename, writer=writer, dpi=100)
export_animation(X_tsne, y, 'piano-tsne.mp4')
#----------------------
exp21 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.3, hidden_dropout=0.5,
batch_size=len(X_train))
exp21.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
exp22 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.3, hidden_dropout=0.5)
exp22.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
exp23 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, 256, 128, 64, 32, 16, 8, 4, 2,
4, 8, 16, 32, 64, 128, 256, 512, feature_count),
tied_weights=True, input_dropout=0.3, hidden_dropout=0.5)
exp23.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
exp24 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.3, hidden_dropout=0.5,
hidden_activation='linear')
exp24.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# r2: 0.833454635805
exp25 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp25.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# r2: 0.731835366439
exp26 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp26.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.1)
# r2: 0.854741515141 (*)
exp27 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp27.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# r2: 0.84260338122
exp28 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp28.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.7)
exp29 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp29.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp30 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.9)
exp30.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp31 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 100, feature_count),
tied_weights=True)
exp31.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp32 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True, input_dropout=0.5, hidden_dropout=0.5)
exp32.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# bad - makes a single curve
exp33 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True, hidden_l1=0.1)
exp33.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# bad - makes a non-discriminative curve
exp34 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True, input_dropout=0.5)
exp34.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp35 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True, hidden_dropout=0.5)
exp35.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp36 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True)
exp36.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp33 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, 256, 128, 64, 32, 16, 8, 4, 2,
4, 8, 16, 32, 64, 128, 256, 512, feature_count),
tied_weights=True)
exp33.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
X_zca_train, X_zca_test = train_test_split(X_zca, test_size=0.4, random_state=42)
X_zca_val, X_zca_test = train_test_split(X_zca_test, test_size=0.5, random_state=42)
exp34 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp34.train(X_zca_train, X_zca_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp35 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, 256, 128, 64, 32, 16, 8, 4, 2,
4, 8, 16, 32, 64, 128, 256, 512, feature_count),
tied_weights=True, hidden_activation='relu')
exp35.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# - try tanh and relu for deeper networks
# - try other normalization (mean-std instead od min-max)
X_ms = StandardScaler().fit_transform(X_orig).astype(np.float32)
X_ms_train, X_ms_test = train_test_split(X_ms, test_size=0.4, random_state=42)
X_ms_val, X_ms_test = train_test_split(X_ms_test, test_size=0.5, random_state=42)
exp36 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp36.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp37 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='tanh')
exp37.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp38 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp38.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
X_orig_train, X_orig_test = train_test_split(X_orig.astype('float32'), test_size=0.4, random_state=42)
X_orig_val, X_orig_test = train_test_split(X_orig_test, test_size=0.5, random_state=42)
exp39 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp39.train(X_orig_train, X_orig_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp40 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='linear', hidden_l1=0.5)
exp40.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp41 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu', hidden_l1=0.5)
exp41.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp42 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu', weight_l1=0.5)
exp42.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# bad
exp43 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu', contractive=0.9)
exp43.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# not bad
exp44 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu')
exp45.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp45 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu', contractive=0.5)
exp45.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# r2: 0.849283267068
exp46 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='linear', contractive=0.5)
exp46.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp47 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='linear', contractive=0.5)
exp47.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
|
[
"theanets.Experiment",
"climate.enable_default_logging",
"sklearn.preprocessing.StandardScaler",
"matplotlib.animation.ArtistAnimation",
"numpy.zeros",
"sklearn.cross_validation.train_test_split",
"numpy.load",
"sklearn.preprocessing.MinMaxScaler"
] |
[((168, 200), 'climate.enable_default_logging', 'climate.enable_default_logging', ([], {}), '()\n', (198, 200), False, 'import climate\n'), ((211, 347), 'numpy.load', 'np.load', (['"""/Users/bzamecnik/Documents/music-processing/music-processing-experiments/c-scale-piano_spectrogram_2048_hamming.npy"""'], {}), "(\n '/Users/bzamecnik/Documents/music-processing/music-processing-experiments/c-scale-piano_spectrogram_2048_hamming.npy'\n )\n", (218, 347), True, 'import numpy as np\n'), ((466, 517), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X'], {'test_size': '(0.4)', 'random_state': '(42)'}), '(X, test_size=0.4, random_state=42)\n', (482, 517), False, 'from sklearn.cross_validation import train_test_split\n'), ((534, 590), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X_test'], {'test_size': '(0.5)', 'random_state': '(42)'}), '(X_test, test_size=0.5, random_state=42)\n', (550, 590), False, 'from sklearn.cross_validation import train_test_split\n'), ((1334, 1438), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'hidden_l1': '(0.1)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), hidden_l1=0.1)\n', (1353, 1438), False, 'import theanets\n'), ((1530, 1634), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'hidden_l1': '(0.1)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), hidden_l1=0.1)\n', (1549, 1634), False, 'import theanets\n'), ((1774, 1904), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'hidden_l1': '(0.1)', 'hidden_activation': '"""relu"""'}), "(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), hidden_l1=0.1, hidden_activation='relu')\n", (1793, 1904), False, 'import theanets\n'), ((1996, 2119), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'hidden_l1': '(0.1)', 'input_dropout': '(0.3)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), hidden_l1=0.1, input_dropout=0.3)\n', (2015, 2119), False, 'import theanets\n'), ((2262, 2366), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'hidden_l1': '(0.1)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), hidden_l1=0.1)\n', (2281, 2366), False, 'import theanets\n'), ((2547, 2670), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'hidden_l1': '(0.1)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), hidden_l1=0.1, tied_weights=True)\n', (2566, 2670), False, 'import theanets\n'), ((2801, 2924), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'hidden_l1': '(0.7)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), hidden_l1=0.7, tied_weights=True)\n', (2820, 2924), False, 'import theanets\n'), ((3054, 3192), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'hidden_l1': '(0.1)', 'hidden_l2': '(0.1)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), hidden_l1=0.1, hidden_l2=0.1, tied_weights=True)\n', (3073, 3192), False, 'import theanets\n'), ((3357, 3465), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), tied_weights=True)\n', (3376, 3465), False, 'import theanets\n'), ((3597, 3705), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), tied_weights=True)\n', (3616, 3705), False, 'import theanets\n'), ((3854, 3978), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 100, 10, 2, 10, 100, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 100, 10, 2,\n 10, 100, feature_count), tied_weights=True)\n', (3873, 3978), False, 'import theanets\n'), ((4474, 4607), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 256, 64, 16, 2, 16, 64, 256, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 256, 64, \n 16, 2, 16, 64, 256, feature_count), tied_weights=True)\n', (4493, 4607), False, 'import theanets\n'), ((4766, 4891), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'tied_weights': '(True)', 'contractive': '(0.8)'}), '(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), tied_weights=True, contractive=0.8)\n', (4785, 4891), False, 'import theanets\n'), ((5001, 5135), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 500, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""tanh"""'}), "(theanets.Autoencoder, layers=(feature_count, 500,\n feature_count), tied_weights=True, hidden_activation='tanh')\n", (5020, 5135), False, 'import theanets\n'), ((5252, 5423), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 128, 16, 2, 16, 128, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""relu"""', 'contractive': '(0.5)'}), "(theanets.Autoencoder, layers=(feature_count, 128, 16, 2,\n 16, 128, feature_count), tied_weights=True, hidden_activation='relu',\n contractive=0.5)\n", (5271, 5423), False, 'import theanets\n'), ((5517, 5658), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 128, 16, 2, 16, 128, feature_count)', 'tied_weights': '(True)', 'contractive': '(0.8)'}), '(theanets.Autoencoder, layers=(feature_count, 128, 16, 2,\n 16, 128, feature_count), tied_weights=True, contractive=0.8)\n', (5536, 5658), False, 'import theanets\n'), ((5756, 5883), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'input_dropout': '(0.8)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, input_dropout=0.8)\n', (5775, 5883), False, 'import theanets\n'), ((6007, 6154), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'input_dropout': '(0.8)', 'hidden_dropout': '(0.8)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, input_dropout=0.8, hidden_dropout=0.8)\n', (6026, 6154), False, 'import theanets\n'), ((6252, 6399), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'input_dropout': '(0.9)', 'hidden_dropout': '(0.9)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, input_dropout=0.9, hidden_dropout=0.9)\n', (6271, 6399), False, 'import theanets\n'), ((7656, 7803), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'input_dropout': '(0.3)', 'hidden_dropout': '(0.5)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, input_dropout=0.3, hidden_dropout=0.5)\n', (7675, 7803), False, 'import theanets\n'), ((7899, 8115), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, 256, 128, 64, 32, 16, 8, 4, 2, 4, 8, 16, 32, 64, 128, \n 256, 512, feature_count)', 'tied_weights': '(True)', 'input_dropout': '(0.3)', 'hidden_dropout': '(0.5)'}), '(theanets.Autoencoder, layers=(feature_count, 512, 256, \n 128, 64, 32, 16, 8, 4, 2, 4, 8, 16, 32, 64, 128, 256, 512,\n feature_count), tied_weights=True, input_dropout=0.3, hidden_dropout=0.5)\n', (7918, 8115), False, 'import theanets\n'), ((8216, 8396), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'input_dropout': '(0.3)', 'hidden_dropout': '(0.5)', 'hidden_activation': '"""linear"""'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, input_dropout=0.3, hidden_dropout=\n 0.5, hidden_activation='linear')\n", (8235, 8396), False, 'import theanets\n'), ((8512, 8620), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True)\n', (8531, 8620), False, 'import theanets\n'), ((8739, 8847), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True)\n', (8758, 8847), False, 'import theanets\n'), ((8970, 9078), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True)\n', (8989, 9078), False, 'import theanets\n'), ((9196, 9304), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True)\n', (9215, 9304), False, 'import theanets\n'), ((9402, 9510), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True)\n', (9421, 9510), False, 'import theanets\n'), ((9608, 9735), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'input_dropout': '(0.9)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, input_dropout=0.9)\n', (9627, 9735), False, 'import theanets\n'), ((9833, 9941), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 100, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 100,\n feature_count), tied_weights=True)\n', (9852, 9941), False, 'import theanets\n'), ((10039, 10206), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 200, 20, 2, 20, 200, feature_count)', 'tied_weights': '(True)', 'input_dropout': '(0.5)', 'hidden_dropout': '(0.5)'}), '(theanets.Autoencoder, layers=(feature_count, 200, 20, 2,\n 20, 200, feature_count), tied_weights=True, input_dropout=0.5,\n hidden_dropout=0.5)\n', (10058, 10206), False, 'import theanets\n'), ((10329, 10468), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 200, 20, 2, 20, 200, feature_count)', 'tied_weights': '(True)', 'hidden_l1': '(0.1)'}), '(theanets.Autoencoder, layers=(feature_count, 200, 20, 2,\n 20, 200, feature_count), tied_weights=True, hidden_l1=0.1)\n', (10348, 10468), False, 'import theanets\n'), ((10607, 10750), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 200, 20, 2, 20, 200, feature_count)', 'tied_weights': '(True)', 'input_dropout': '(0.5)'}), '(theanets.Autoencoder, layers=(feature_count, 200, 20, 2,\n 20, 200, feature_count), tied_weights=True, input_dropout=0.5)\n', (10626, 10750), False, 'import theanets\n'), ((10848, 10992), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 200, 20, 2, 20, 200, feature_count)', 'tied_weights': '(True)', 'hidden_dropout': '(0.5)'}), '(theanets.Autoencoder, layers=(feature_count, 200, 20, 2,\n 20, 200, feature_count), tied_weights=True, hidden_dropout=0.5)\n', (10867, 10992), False, 'import theanets\n'), ((11090, 11214), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 200, 20, 2, 20, 200, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 200, 20, 2,\n 20, 200, feature_count), tied_weights=True)\n', (11109, 11214), False, 'import theanets\n'), ((11313, 11490), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, 256, 128, 64, 32, 16, 8, 4, 2, 4, 8, 16, 32, 64, 128, \n 256, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512, 256, \n 128, 64, 32, 16, 8, 4, 2, 4, 8, 16, 32, 64, 128, 256, 512,\n feature_count), tied_weights=True)\n', (11332, 11490), False, 'import theanets\n'), ((11609, 11664), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X_zca'], {'test_size': '(0.4)', 'random_state': '(42)'}), '(X_zca, test_size=0.4, random_state=42)\n', (11625, 11664), False, 'from sklearn.cross_validation import train_test_split\n'), ((11689, 11749), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X_zca_test'], {'test_size': '(0.5)', 'random_state': '(42)'}), '(X_zca_test, test_size=0.5, random_state=42)\n', (11705, 11749), False, 'from sklearn.cross_validation import train_test_split\n'), ((11760, 11868), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True)\n', (11779, 11868), False, 'import theanets\n'), ((11974, 12177), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, 256, 128, 64, 32, 16, 8, 4, 2, 4, 8, 16, 32, 64, 128, \n 256, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""relu"""'}), "(theanets.Autoencoder, layers=(feature_count, 512, 256, \n 128, 64, 32, 16, 8, 4, 2, 4, 8, 16, 32, 64, 128, 256, 512,\n feature_count), tied_weights=True, hidden_activation='relu')\n", (11993, 12177), False, 'import theanets\n'), ((12460, 12514), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X_ms'], {'test_size': '(0.4)', 'random_state': '(42)'}), '(X_ms, test_size=0.4, random_state=42)\n', (12476, 12514), False, 'from sklearn.cross_validation import train_test_split\n'), ((12537, 12596), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X_ms_test'], {'test_size': '(0.5)', 'random_state': '(42)'}), '(X_ms_test, test_size=0.5, random_state=42)\n', (12553, 12596), False, 'from sklearn.cross_validation import train_test_split\n'), ((12606, 12714), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True)\n', (12625, 12714), False, 'import theanets\n'), ((12818, 12952), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""tanh"""'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, hidden_activation='tanh')\n", (12837, 12952), False, 'import theanets\n'), ((13056, 13164), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True)\n', (13075, 13164), False, 'import theanets\n'), ((13383, 13444), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X_orig_test'], {'test_size': '(0.5)', 'random_state': '(42)'}), '(X_orig_test, test_size=0.5, random_state=42)\n', (13399, 13444), False, 'from sklearn.cross_validation import train_test_split\n'), ((13454, 13562), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)'}), '(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True)\n', (13473, 13562), False, 'import theanets\n'), ((13670, 13825), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""linear"""', 'hidden_l1': '(0.5)'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, hidden_activation='linear',\n hidden_l1=0.5)\n", (13689, 13825), False, 'import theanets\n'), ((13919, 14068), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""relu"""', 'hidden_l1': '(0.5)'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, hidden_activation='relu', hidden_l1=0.5)\n", (13938, 14068), False, 'import theanets\n'), ((14166, 14315), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""relu"""', 'weight_l1': '(0.5)'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, hidden_activation='relu', weight_l1=0.5)\n", (14185, 14315), False, 'import theanets\n'), ((14419, 14574), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""relu"""', 'contractive': '(0.9)'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, hidden_activation='relu',\n contractive=0.9)\n", (14438, 14574), False, 'import theanets\n'), ((14678, 14812), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""relu"""'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, hidden_activation='relu')\n", (14697, 14812), False, 'import theanets\n'), ((14916, 15071), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""relu"""', 'contractive': '(0.5)'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, hidden_activation='relu',\n contractive=0.5)\n", (14935, 15071), False, 'import theanets\n'), ((15193, 15350), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""linear"""', 'contractive': '(0.5)'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, hidden_activation='linear',\n contractive=0.5)\n", (15212, 15350), False, 'import theanets\n'), ((15450, 15607), 'theanets.Experiment', 'theanets.Experiment', (['theanets.Autoencoder'], {'layers': '(feature_count, 512, feature_count)', 'tied_weights': '(True)', 'hidden_activation': '"""linear"""', 'contractive': '(0.5)'}), "(theanets.Autoencoder, layers=(feature_count, 512,\n feature_count), tied_weights=True, hidden_activation='linear',\n contractive=0.5)\n", (15469, 15607), False, 'import theanets\n'), ((880, 900), 'numpy.zeros', 'np.zeros', (['blocks[-1]'], {}), '(blocks[-1])\n', (888, 900), True, 'import numpy as np\n'), ((7106, 7183), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'images'], {'interval': '(20)', 'blit': '(False)', 'repeat': '(False)'}), '(fig, images, interval=20, blit=False, repeat=False)\n', (7131, 7183), True, 'import matplotlib.animation as animation\n'), ((385, 399), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (397, 399), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((12378, 12394), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12392, 12394), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n')]
|
import os
import glob
import argparse
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description='Display datas')
parser.add_argument('--data-dir', default='C:/Users/junya/Documents/plant_segmentation_data',
help='dataset directory')
parser.add_argument('--val-rate', default=0.1, type=float,
help='Number of validation rate')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
phase = 'train'
train_dir = os.path.join(args.data_dir, phase)
all_set = np.array(glob.glob(os.path.join(train_dir, '*.jpg')))
all_idx = np.random.choice(all_set.shape[0], all_set.shape[0], replace=False)
train_num = int(all_set.shape[0] * (1 - args.val_rate))
train_idx = all_idx[0:train_num]
val_idx = all_idx[train_num:all_set.shape[0]]
train_set = all_set[train_idx]
val_set = all_set[val_idx]
np.savetxt("segmentation/train.txt", train_set, fmt='%s', delimiter=',')
np.savetxt("segmentation/val.txt", val_set, fmt='%s', delimiter=',')
|
[
"numpy.random.choice",
"numpy.savetxt",
"os.path.join",
"argparse.ArgumentParser"
] |
[((90, 142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Display datas"""'}), "(description='Display datas')\n", (113, 142), False, 'import argparse\n'), ((548, 582), 'os.path.join', 'os.path.join', (['args.data_dir', 'phase'], {}), '(args.data_dir, phase)\n', (560, 582), False, 'import os\n'), ((670, 737), 'numpy.random.choice', 'np.random.choice', (['all_set.shape[0]', 'all_set.shape[0]'], {'replace': '(False)'}), '(all_set.shape[0], all_set.shape[0], replace=False)\n', (686, 737), True, 'import numpy as np\n'), ((958, 1030), 'numpy.savetxt', 'np.savetxt', (['"""segmentation/train.txt"""', 'train_set'], {'fmt': '"""%s"""', 'delimiter': '""","""'}), "('segmentation/train.txt', train_set, fmt='%s', delimiter=',')\n", (968, 1030), True, 'import numpy as np\n'), ((1035, 1103), 'numpy.savetxt', 'np.savetxt', (['"""segmentation/val.txt"""', 'val_set'], {'fmt': '"""%s"""', 'delimiter': '""","""'}), "('segmentation/val.txt', val_set, fmt='%s', delimiter=',')\n", (1045, 1103), True, 'import numpy as np\n'), ((621, 653), 'os.path.join', 'os.path.join', (['train_dir', '"""*.jpg"""'], {}), "(train_dir, '*.jpg')\n", (633, 653), False, 'import os\n')]
|
# python3
# Copyright 2021 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wraps a PettingZoo MARL environment to be used as a dm_env environment."""
import copy
from typing import Any, Dict, Iterator, List, Optional, Union
import dm_env
import gym
import numpy as np
from acme import specs
from acme.wrappers.gym_wrapper import _convert_to_spec
from pettingzoo.utils.env import AECEnv, ParallelEnv
from supersuit import black_death_v1
from mava import types
from mava.utils.wrapper_utils import (
apply_env_wrapper_preprocessers,
convert_np_type,
parameterized_restart,
)
from mava.wrappers.env_wrappers import ParallelEnvWrapper, SequentialEnvWrapper
class PettingZooAECEnvWrapper(SequentialEnvWrapper):
"""Environment wrapper for PettingZoo MARL environments."""
def __init__(
self,
environment: AECEnv,
env_preprocess_wrappers: Optional[List] = [
# (env_preprocessor, dict_with_preprocessor_params)
(black_death_v1, None),
],
):
self._environment = environment
self._reset_next_step = True
if env_preprocess_wrappers:
self._environment = apply_env_wrapper_preprocessers(
self._environment, env_preprocess_wrappers
)
self.correct_agent_name()
self.last_turn_agent = None
def reset(self) -> dm_env.TimeStep:
"""Resets the episode."""
self._reset_next_step = False
self._environment.reset()
self._step_types = {
agent: dm_env.StepType.FIRST for agent in self.possible_agents
}
self._first_step_performed = {agent: False for agent in self.possible_agents}
observe, _, done, _ = self._environment.last()
agent = self.current_agent
observation = self._convert_observation(agent, observe, done)
self._discount = convert_np_type(self.discount_spec()[agent].dtype, 1)
reward = convert_np_type(self.reward_spec()[agent].dtype, 0)
return parameterized_restart(reward, self._discount, observation)
def step( # type: ignore[override]
self, action: Union[int, float]
) -> dm_env.TimeStep:
"""Steps the environment."""
if self._reset_next_step:
return self.reset()
_, _, done, _ = self._environment.last()
# If current agent is done
if done:
self._environment.step(None)
else:
self._environment.step(action)
agent = self.current_agent
# Reset if all agents are done
if self.env_done():
self._reset_next_step = True
reward = convert_np_type(self.reward_spec()[agent].dtype, 0)
observation = self._convert_observation(
agent, self._environment.observe(agent), done
)
else:
# observation for next agent
observe, reward, done, info = self._environment.last()
# Convert rewards to match spec
reward = convert_np_type(self.reward_spec()[agent].dtype, reward)
observation = self._convert_observation(agent, observe, done)
step_type = dm_env.StepType.LAST if done else dm_env.StepType.MID
return dm_env.TimeStep(
observation=observation,
reward=reward,
discount=self._discount,
step_type=step_type,
)
def env_done(self) -> bool:
return not self.agents
def agent_iter(self, max_iter: int = 2 ** 63) -> Iterator:
return self._environment.agent_iter(max_iter)
# Convert PettingZoo observation so it's dm_env compatible. Also, the list
# of legal actions must be converted to a legal actions mask.
def _convert_observation( # type: ignore[override]
self, agent: str, observe: Union[dict, np.ndarray], done: bool
) -> types.OLT:
legals: np.ndarray = None
observation: np.ndarray = None
if isinstance(observe, dict) and "action_mask" in observe:
legals = observe["action_mask"]
observation = observe["observation"]
else:
legals = np.ones(
_convert_to_spec(self._environment.action_spaces[agent]).shape,
dtype=self._environment.action_spaces[agent].dtype,
)
observation = observe
if observation.dtype == np.int8:
observation = np.dtype(np.float32).type(
observation
) # observation is not expected to be int8
if legals.dtype == np.int8:
legals = np.dtype(np.int64).type(legals)
observation = types.OLT(
observation=observation,
legal_actions=legals,
terminal=np.asarray([done], dtype=np.float32),
)
return observation
def correct_agent_name(self) -> None:
self._environment.reset()
if "tictactoe" in self._environment.metadata["name"]:
corrected_names = ["player_0", "player_1"]
self._environment.unwrapped.possible_agents = corrected_names
self._environment.unwrapped.agents = corrected_names
self._environment.possible_agents = corrected_names
self._environment.agents = corrected_names
previous_names = list(self.observation_spaces.keys())
for corrected_name, prev_name in zip(corrected_names, previous_names):
self.observation_spaces[corrected_name] = self.observation_spaces[
prev_name
]
self.action_spaces[corrected_name] = self.action_spaces[prev_name]
self.rewards[corrected_name] = self.rewards[prev_name]
self.dones[corrected_name] = self.dones[prev_name]
self.infos[corrected_name] = self.infos[prev_name]
del self.observation_spaces[prev_name]
del self.action_spaces[prev_name]
del self.rewards[prev_name]
del self.dones[prev_name]
del self.infos[prev_name]
def observation_spec(self) -> types.Observation:
observation_specs = {}
for agent in self._environment.possible_agents:
if isinstance(self._environment.observation_spaces[agent], gym.spaces.Dict):
obs_space = copy.deepcopy(
self._environment.observation_spaces[agent]["observation"]
)
legal_actions_space = copy.deepcopy(
self._environment.observation_spaces[agent]["action_mask"]
)
else:
obs_space = copy.deepcopy(self._environment.observation_spaces[agent])
legal_actions_space = copy.deepcopy(
self._environment.action_spaces[agent]
)
if obs_space.dtype == np.int8:
obs_space.dtype = np.dtype(np.float32)
if legal_actions_space.dtype == np.int8:
legal_actions_space.dtype = np.dtype(np.int64)
observation_specs[agent] = types.OLT(
observation=_convert_to_spec(obs_space),
legal_actions=_convert_to_spec(legal_actions_space),
terminal=specs.Array((1,), np.float32),
)
return observation_specs
def action_spec(self) -> Dict[str, specs.DiscreteArray]:
action_specs = {}
for agent in self.possible_agents:
action_specs[agent] = _convert_to_spec(
self._environment.action_spaces[agent]
)
return action_specs
def reward_spec(self) -> Dict[str, specs.Array]:
reward_specs = {}
for agent in self.possible_agents:
reward_specs[agent] = specs.Array((), np.float32)
return reward_specs
def discount_spec(self) -> Dict[str, specs.BoundedArray]:
discount_specs = {}
for agent in self.possible_agents:
discount_specs[agent] = specs.BoundedArray(
(), np.float32, minimum=0, maximum=1.0
)
return discount_specs
def extra_spec(self) -> Dict[str, specs.BoundedArray]:
return {}
@property
def agents(self) -> List:
return self._environment.agents
@property
def possible_agents(self) -> List:
return self._environment.possible_agents
@property
def environment(self) -> AECEnv:
"""Returns the wrapped environment."""
return self._environment
@property
def current_agent(self) -> Any:
return self._environment.agent_selection
@property
def num_agents(self) -> int:
return self._environment.num_agents
def __getattr__(self, name: str) -> Any:
"""Expose any other attributes of the underlying environment."""
if hasattr(self.__class__, name):
return self.__getattribute__(name)
else:
return getattr(self._environment, name)
class PettingZooParallelEnvWrapper(ParallelEnvWrapper):
"""Environment wrapper for PettingZoo MARL environments."""
def __init__(
self,
environment: ParallelEnv,
env_preprocess_wrappers: Optional[List] = [
# (env_preprocessor, dict_with_preprocessor_params)
(black_death_v1, None),
],
):
self._environment = environment
self._reset_next_step = True
if env_preprocess_wrappers:
self._environment = apply_env_wrapper_preprocessers(
self._environment, env_preprocess_wrappers
)
def reset(self) -> dm_env.TimeStep:
"""Resets the episode."""
self._reset_next_step = False
self._step_type = dm_env.StepType.FIRST
discount_spec = self.discount_spec()
observe = self._environment.reset()
self._discounts = {
agent: convert_np_type(discount_spec[agent].dtype, 1)
for agent in self.possible_agents
}
if type(observe) == tuple:
observe, env_extras = observe
else:
env_extras = {}
observations = self._convert_observations(
observe, {agent: False for agent in self.possible_agents}
)
rewards_spec = self.reward_spec()
rewards = {
agent: convert_np_type(rewards_spec[agent].dtype, 0)
for agent in self.possible_agents
}
return parameterized_restart(rewards, self._discounts, observations), env_extras
def step(self, actions: Dict[str, np.ndarray]) -> dm_env.TimeStep:
"""Steps the environment."""
if self._reset_next_step:
return self.reset()
observations, rewards, dones, infos = self._environment.step(actions)
rewards_spec = self.reward_spec()
# Handle empty rewards
if not rewards:
rewards = {
agent: convert_np_type(rewards_spec[agent].dtype, 0)
for agent in self.possible_agents
}
else:
rewards = {
agent: convert_np_type(rewards_spec[agent].dtype, reward)
for agent, reward in rewards.items()
}
if observations:
observations = self._convert_observations(observations, dones)
if self.env_done():
self._step_type = dm_env.StepType.LAST
self._reset_next_step = True
else:
self._step_type = dm_env.StepType.MID
return dm_env.TimeStep(
observation=observations,
reward=rewards,
discount=self._discounts,
step_type=self._step_type,
)
def env_done(self) -> bool:
return not self.agents
# Convert PettingZoo observation so it's dm_env compatible. Also, the list
# of legal actions must be converted to a legal actions mask.
def _convert_observations(
self, observes: Dict[str, np.ndarray], dones: Dict[str, bool]
) -> types.Observation:
observations: Dict[str, types.OLT] = {}
for agent, observation in observes.items():
if isinstance(observation, dict) and "action_mask" in observation:
legals = observation["action_mask"]
observation = observation["observation"]
else:
# TODO Handle legal actions better for continous envs,
# maybe have min and max for each action and clip the agents actions
# accordingly
legals = np.ones(
_convert_to_spec(self._environment.action_spaces[agent]).shape,
dtype=self._environment.action_spaces[agent].dtype,
)
observations[agent] = types.OLT(
observation=observation,
legal_actions=legals,
terminal=np.asarray([dones[agent]], dtype=np.float32),
)
return observations
def observation_spec(self) -> types.Observation:
observation_specs = {}
for agent in self.possible_agents:
observation_specs[agent] = types.OLT(
observation=_convert_to_spec(
self._environment.observation_spaces[agent]
),
legal_actions=_convert_to_spec(self._environment.action_spaces[agent]),
terminal=specs.Array((1,), np.float32),
)
return observation_specs
def action_spec(self) -> Dict[str, Union[specs.DiscreteArray, specs.BoundedArray]]:
action_specs = {}
action_spaces = self._environment.action_spaces
for agent in self.possible_agents:
action_specs[agent] = _convert_to_spec(action_spaces[agent])
return action_specs
def reward_spec(self) -> Dict[str, specs.Array]:
reward_specs = {}
for agent in self.possible_agents:
reward_specs[agent] = specs.Array((), np.float32)
return reward_specs
def discount_spec(self) -> Dict[str, specs.BoundedArray]:
discount_specs = {}
for agent in self.possible_agents:
discount_specs[agent] = specs.BoundedArray(
(), np.float32, minimum=0, maximum=1.0
)
return discount_specs
def extra_spec(self) -> Dict[str, specs.BoundedArray]:
return {}
@property
def agents(self) -> List:
return self._environment.agents
@property
def possible_agents(self) -> List:
return self._environment.possible_agents
@property
def environment(self) -> ParallelEnv:
"""Returns the wrapped environment."""
return self._environment
@property
def current_agent(self) -> Any:
return self._environment.agent_selection
def __getattr__(self, name: str) -> Any:
"""Expose any other attributes of the underlying environment."""
if hasattr(self.__class__, name):
return self.__getattribute__(name)
else:
return getattr(self._environment, name)
|
[
"copy.deepcopy",
"acme.specs.BoundedArray",
"dm_env.TimeStep",
"numpy.asarray",
"mava.utils.wrapper_utils.apply_env_wrapper_preprocessers",
"acme.specs.Array",
"mava.utils.wrapper_utils.convert_np_type",
"acme.wrappers.gym_wrapper._convert_to_spec",
"mava.utils.wrapper_utils.parameterized_restart",
"numpy.dtype"
] |
[((2552, 2610), 'mava.utils.wrapper_utils.parameterized_restart', 'parameterized_restart', (['reward', 'self._discount', 'observation'], {}), '(reward, self._discount, observation)\n', (2573, 2610), False, 'from mava.utils.wrapper_utils import apply_env_wrapper_preprocessers, convert_np_type, parameterized_restart\n'), ((3779, 3885), 'dm_env.TimeStep', 'dm_env.TimeStep', ([], {'observation': 'observation', 'reward': 'reward', 'discount': 'self._discount', 'step_type': 'step_type'}), '(observation=observation, reward=reward, discount=self.\n _discount, step_type=step_type)\n', (3794, 3885), False, 'import dm_env\n'), ((12037, 12152), 'dm_env.TimeStep', 'dm_env.TimeStep', ([], {'observation': 'observations', 'reward': 'rewards', 'discount': 'self._discounts', 'step_type': 'self._step_type'}), '(observation=observations, reward=rewards, discount=self.\n _discounts, step_type=self._step_type)\n', (12052, 12152), False, 'import dm_env\n'), ((1702, 1777), 'mava.utils.wrapper_utils.apply_env_wrapper_preprocessers', 'apply_env_wrapper_preprocessers', (['self._environment', 'env_preprocess_wrappers'], {}), '(self._environment, env_preprocess_wrappers)\n', (1733, 1777), False, 'from mava.utils.wrapper_utils import apply_env_wrapper_preprocessers, convert_np_type, parameterized_restart\n'), ((8026, 8082), 'acme.wrappers.gym_wrapper._convert_to_spec', '_convert_to_spec', (['self._environment.action_spaces[agent]'], {}), '(self._environment.action_spaces[agent])\n', (8042, 8082), False, 'from acme.wrappers.gym_wrapper import _convert_to_spec\n'), ((8298, 8325), 'acme.specs.Array', 'specs.Array', (['()', 'np.float32'], {}), '((), np.float32)\n', (8309, 8325), False, 'from acme import specs\n'), ((8525, 8583), 'acme.specs.BoundedArray', 'specs.BoundedArray', (['()', 'np.float32'], {'minimum': '(0)', 'maximum': '(1.0)'}), '((), np.float32, minimum=0, maximum=1.0)\n', (8543, 8583), False, 'from acme import specs\n'), ((10013, 10088), 'mava.utils.wrapper_utils.apply_env_wrapper_preprocessers', 'apply_env_wrapper_preprocessers', (['self._environment', 'env_preprocess_wrappers'], {}), '(self._environment, env_preprocess_wrappers)\n', (10044, 10088), False, 'from mava.utils.wrapper_utils import apply_env_wrapper_preprocessers, convert_np_type, parameterized_restart\n'), ((10417, 10463), 'mava.utils.wrapper_utils.convert_np_type', 'convert_np_type', (['discount_spec[agent].dtype', '(1)'], {}), '(discount_spec[agent].dtype, 1)\n', (10432, 10463), False, 'from mava.utils.wrapper_utils import apply_env_wrapper_preprocessers, convert_np_type, parameterized_restart\n'), ((10853, 10898), 'mava.utils.wrapper_utils.convert_np_type', 'convert_np_type', (['rewards_spec[agent].dtype', '(0)'], {}), '(rewards_spec[agent].dtype, 0)\n', (10868, 10898), False, 'from mava.utils.wrapper_utils import apply_env_wrapper_preprocessers, convert_np_type, parameterized_restart\n'), ((10971, 11032), 'mava.utils.wrapper_utils.parameterized_restart', 'parameterized_restart', (['rewards', 'self._discounts', 'observations'], {}), '(rewards, self._discounts, observations)\n', (10992, 11032), False, 'from mava.utils.wrapper_utils import apply_env_wrapper_preprocessers, convert_np_type, parameterized_restart\n'), ((14233, 14271), 'acme.wrappers.gym_wrapper._convert_to_spec', '_convert_to_spec', (['action_spaces[agent]'], {}), '(action_spaces[agent])\n', (14249, 14271), False, 'from acme.wrappers.gym_wrapper import _convert_to_spec\n'), ((14457, 14484), 'acme.specs.Array', 'specs.Array', (['()', 'np.float32'], {}), '((), np.float32)\n', (14468, 14484), False, 'from acme import specs\n'), ((14684, 14742), 'acme.specs.BoundedArray', 'specs.BoundedArray', (['()', 'np.float32'], {'minimum': '(0)', 'maximum': '(1.0)'}), '((), np.float32, minimum=0, maximum=1.0)\n', (14702, 14742), False, 'from acme import specs\n'), ((5283, 5319), 'numpy.asarray', 'np.asarray', (['[done]'], {'dtype': 'np.float32'}), '([done], dtype=np.float32)\n', (5293, 5319), True, 'import numpy as np\n'), ((6871, 6944), 'copy.deepcopy', 'copy.deepcopy', (["self._environment.observation_spaces[agent]['observation']"], {}), "(self._environment.observation_spaces[agent]['observation'])\n", (6884, 6944), False, 'import copy\n'), ((7021, 7094), 'copy.deepcopy', 'copy.deepcopy', (["self._environment.observation_spaces[agent]['action_mask']"], {}), "(self._environment.observation_spaces[agent]['action_mask'])\n", (7034, 7094), False, 'import copy\n'), ((7179, 7237), 'copy.deepcopy', 'copy.deepcopy', (['self._environment.observation_spaces[agent]'], {}), '(self._environment.observation_spaces[agent])\n', (7192, 7237), False, 'import copy\n'), ((7276, 7329), 'copy.deepcopy', 'copy.deepcopy', (['self._environment.action_spaces[agent]'], {}), '(self._environment.action_spaces[agent])\n', (7289, 7329), False, 'import copy\n'), ((7445, 7465), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (7453, 7465), True, 'import numpy as np\n'), ((7563, 7581), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (7571, 7581), True, 'import numpy as np\n'), ((11446, 11491), 'mava.utils.wrapper_utils.convert_np_type', 'convert_np_type', (['rewards_spec[agent].dtype', '(0)'], {}), '(rewards_spec[agent].dtype, 0)\n', (11461, 11491), False, 'from mava.utils.wrapper_utils import apply_env_wrapper_preprocessers, convert_np_type, parameterized_restart\n'), ((11617, 11667), 'mava.utils.wrapper_utils.convert_np_type', 'convert_np_type', (['rewards_spec[agent].dtype', 'reward'], {}), '(rewards_spec[agent].dtype, reward)\n', (11632, 11667), False, 'from mava.utils.wrapper_utils import apply_env_wrapper_preprocessers, convert_np_type, parameterized_restart\n'), ((4710, 4766), 'acme.wrappers.gym_wrapper._convert_to_spec', '_convert_to_spec', (['self._environment.action_spaces[agent]'], {}), '(self._environment.action_spaces[agent])\n', (4726, 4766), False, 'from acme.wrappers.gym_wrapper import _convert_to_spec\n'), ((4957, 4977), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (4965, 4977), True, 'import numpy as np\n'), ((5125, 5143), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (5133, 5143), True, 'import numpy as np\n'), ((7660, 7687), 'acme.wrappers.gym_wrapper._convert_to_spec', '_convert_to_spec', (['obs_space'], {}), '(obs_space)\n', (7676, 7687), False, 'from acme.wrappers.gym_wrapper import _convert_to_spec\n'), ((7719, 7756), 'acme.wrappers.gym_wrapper._convert_to_spec', '_convert_to_spec', (['legal_actions_space'], {}), '(legal_actions_space)\n', (7735, 7756), False, 'from acme.wrappers.gym_wrapper import _convert_to_spec\n'), ((7783, 7812), 'acme.specs.Array', 'specs.Array', (['(1,)', 'np.float32'], {}), '((1,), np.float32)\n', (7794, 7812), False, 'from acme import specs\n'), ((13398, 13442), 'numpy.asarray', 'np.asarray', (['[dones[agent]]'], {'dtype': 'np.float32'}), '([dones[agent]], dtype=np.float32)\n', (13408, 13442), True, 'import numpy as np\n'), ((13693, 13754), 'acme.wrappers.gym_wrapper._convert_to_spec', '_convert_to_spec', (['self._environment.observation_spaces[agent]'], {}), '(self._environment.observation_spaces[agent])\n', (13709, 13754), False, 'from acme.wrappers.gym_wrapper import _convert_to_spec\n'), ((13824, 13880), 'acme.wrappers.gym_wrapper._convert_to_spec', '_convert_to_spec', (['self._environment.action_spaces[agent]'], {}), '(self._environment.action_spaces[agent])\n', (13840, 13880), False, 'from acme.wrappers.gym_wrapper import _convert_to_spec\n'), ((13907, 13936), 'acme.specs.Array', 'specs.Array', (['(1,)', 'np.float32'], {}), '((1,), np.float32)\n', (13918, 13936), False, 'from acme import specs\n'), ((13094, 13150), 'acme.wrappers.gym_wrapper._convert_to_spec', '_convert_to_spec', (['self._environment.action_spaces[agent]'], {}), '(self._environment.action_spaces[agent])\n', (13110, 13150), False, 'from acme.wrappers.gym_wrapper import _convert_to_spec\n')]
|
import os
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import IncrementalPCA
## set paths
inputpath = './input/'
## Toggle PCA for better visualization of clusters
pca_flag = int(input("\nPerform PCA for cluster visualization?:" \
+ " press 1 if YES, and 0 if NO \n"))
# number of PCA components
N = 3
#import data set
print("\n\n")
print("\nLoading data from " + os.path.dirname(os.path.realpath(__file__)) +
inputpath[1:])
print("\nPlease be patient...this can take a while for large files...")
print("\n\n")
with open(inputpath + 'X.txt') as file:
X = np.array([[float(digit) for digit in line.split()] for line in file])
# import clustering labels but check to see if files exist first
if not os.path.exists(inputpath + 'kx.txt'):
kx = np.ones((np.shape(X)[0],))
else:
kx = np.genfromtxt(inputpath + 'kx.txt')
if not os.path.exists(inputpath + 'cx.txt'):
cx = np.ones((len(kx),))
else:
cx = np.genfromtxt(inputpath + 'cx.txt')
# create index for plotting
indx = np.vstack((kx.astype(int), cx.astype(int)))
# get number of clustering instances
K = len(np.unique(indx[0,:]))
if pca_flag:
# batch size for incremental PCA
batchsz = 10
# perform PCA for visualization of clusters
pca = IncrementalPCA(n_components = N, batch_size = batchsz)
X = pca.fit_transform(X)
# main loop
notdone = True
while notdone:
instance = input("\nWhat is the clustering instance you wish to plot? ")
instance = int(instance)
print('\nProcessing...\n')
# project onto 3D axes
plt.figure(figsize=(10,8))
ax = plt.axes(projection='3d')
title = "Cluster plot: instance %d" % instance
kindx = np.asarray(np.where(indx[0,:] == instance))
cindx = np.unique(indx[1,kindx])
#for i, target_name in zip(range(Nc), iris.target_names):
for i in cindx:
ax.scatter3D(X[kindx[indx[1,kindx] == i], 0],
X[kindx[indx[1,kindx] == i], 1],
X[kindx[indx[1,kindx] == i], 2],
label = str(i), s = 4)
#print(np.std(X[kindx[indx[1,kindx] == i],:], axis = 0))
plt.title(title + " of %d" % K)
#if display_clusternum:
# ax.text2D(1, 1, r'y ='+str(y[instance-1]), fontsize=10, transform=ax.transAxes)
plt.legend(loc="upper left", shadow=False, scatterpoints=1)
plt.show()
getuserinput = input("Want to continue?: press 1 if YES," \
+ " and 0 to EXIT \n\n")
if(eval(getuserinput) == 0):
notdone = False
print('\nExiting...\n\n')
|
[
"os.path.exists",
"numpy.shape",
"numpy.unique",
"numpy.where",
"os.path.realpath",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.title",
"sklearn.decomposition.IncrementalPCA",
"numpy.genfromtxt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((935, 971), 'os.path.exists', 'os.path.exists', (["(inputpath + 'kx.txt')"], {}), "(inputpath + 'kx.txt')\n", (949, 971), False, 'import os\n'), ((1027, 1062), 'numpy.genfromtxt', 'np.genfromtxt', (["(inputpath + 'kx.txt')"], {}), "(inputpath + 'kx.txt')\n", (1040, 1062), True, 'import numpy as np\n'), ((1071, 1107), 'os.path.exists', 'os.path.exists', (["(inputpath + 'cx.txt')"], {}), "(inputpath + 'cx.txt')\n", (1085, 1107), False, 'import os\n'), ((1156, 1191), 'numpy.genfromtxt', 'np.genfromtxt', (["(inputpath + 'cx.txt')"], {}), "(inputpath + 'cx.txt')\n", (1169, 1191), True, 'import numpy as np\n'), ((1324, 1345), 'numpy.unique', 'np.unique', (['indx[0, :]'], {}), '(indx[0, :])\n', (1333, 1345), True, 'import numpy as np\n'), ((1478, 1528), 'sklearn.decomposition.IncrementalPCA', 'IncrementalPCA', ([], {'n_components': 'N', 'batch_size': 'batchsz'}), '(n_components=N, batch_size=batchsz)\n', (1492, 1528), False, 'from sklearn.decomposition import IncrementalPCA\n'), ((1783, 1810), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1793, 1810), True, 'from matplotlib import pyplot as plt\n'), ((1820, 1845), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1828, 1845), True, 'from matplotlib import pyplot as plt\n'), ((1968, 1993), 'numpy.unique', 'np.unique', (['indx[1, kindx]'], {}), '(indx[1, kindx])\n', (1977, 1993), True, 'import numpy as np\n'), ((2379, 2410), 'matplotlib.pyplot.title', 'plt.title', (["(title + ' of %d' % K)"], {}), "(title + ' of %d' % K)\n", (2388, 2410), True, 'from matplotlib import pyplot as plt\n'), ((2535, 2594), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'shadow': '(False)', 'scatterpoints': '(1)'}), "(loc='upper left', shadow=False, scatterpoints=1)\n", (2545, 2594), True, 'from matplotlib import pyplot as plt\n'), ((2600, 2610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2608, 2610), True, 'from matplotlib import pyplot as plt\n'), ((1922, 1954), 'numpy.where', 'np.where', (['(indx[0, :] == instance)'], {}), '(indx[0, :] == instance)\n', (1930, 1954), True, 'import numpy as np\n'), ((541, 567), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (557, 567), False, 'import os\n'), ((992, 1003), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1000, 1003), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import pickle
import pandas as pd
import cv2 as cv
import os
def read_pkl(file_path):
obj = pd.read_pickle(file_path)
return obj
def show_results(obj , folder):
for im_num in obj.keys():
data = obj[im_num]
file = data['fileName'][12:]
file_path = os.path.join(folder, file)
im = cv.imread(file_path)#[:,:,::-1]
detections = data['detections']
for detection in detections:
type = detection[0]
roi = detection[2]
roi_tl = [roi[0] - roi[2]/2, roi[1] - roi[3]/2, roi[2], roi[3]]
roi_ = np.asarray(roi_tl).astype(int)
im = cv.rectangle(img=im, rec=roi_, color=(255, 0, 0), thickness=3)
plt.imshow(im[:, :, ::-1])
plt.pause(0.0001)
a=1
def track_objects(obj , folder):
for im_num in obj.keys():
data = obj[im_num]
file = data['fileName'][12:]
file_path = os.path.join(folder, file)
im = cv.imread(file_path)#[:,:,::-1]
detections = data['detections']
for detection in detections:
# type = detection[0]
roi = detection[2]
cen = [roi[0], roi[1]]
if cen[1] > 500 and cen[0] > 500 and cen[1] < 1300:
roi_tl = [roi[0] - roi[2] / 2, roi[1] - roi[3] / 2, roi[2], roi[3]]
roi_ = np.asarray(roi_tl).astype(int)
im = cv.rectangle(img=im, rec=roi_, color=(0, 0, 255), thickness=3)
continue
out_path = os.path.join(folder, 'warning', file)
cv.imwrite(out_path, im)
a=1
if __name__ == '__main__':
folder = r'E:\rafi\got_your_back\data\results_files\res\temp_dir - Copy (9)'
file_path = os.path.join(folder, r"YoloV3_res\res_pkl.pkl")
obj = read_pkl(file_path)
track_objects(obj, folder)
# show_results(obj, folder)
|
[
"pandas.read_pickle",
"cv2.imwrite",
"cv2.rectangle",
"matplotlib.pyplot.imshow",
"os.path.join",
"numpy.asarray",
"matplotlib.pyplot.pause",
"cv2.imread"
] |
[((149, 174), 'pandas.read_pickle', 'pd.read_pickle', (['file_path'], {}), '(file_path)\n', (163, 174), True, 'import pandas as pd\n'), ((1781, 1828), 'os.path.join', 'os.path.join', (['folder', '"""YoloV3_res\\\\res_pkl.pkl"""'], {}), "(folder, 'YoloV3_res\\\\res_pkl.pkl')\n", (1793, 1828), False, 'import os\n'), ((337, 363), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (349, 363), False, 'import os\n'), ((377, 397), 'cv2.imread', 'cv.imread', (['file_path'], {}), '(file_path)\n', (386, 397), True, 'import cv2 as cv\n'), ((991, 1017), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (1003, 1017), False, 'import os\n'), ((1031, 1051), 'cv2.imread', 'cv.imread', (['file_path'], {}), '(file_path)\n', (1040, 1051), True, 'import cv2 as cv\n'), ((1573, 1610), 'os.path.join', 'os.path.join', (['folder', '"""warning"""', 'file'], {}), "(folder, 'warning', file)\n", (1585, 1610), False, 'import os\n'), ((1619, 1643), 'cv2.imwrite', 'cv.imwrite', (['out_path', 'im'], {}), '(out_path, im)\n', (1629, 1643), True, 'import cv2 as cv\n'), ((693, 755), 'cv2.rectangle', 'cv.rectangle', ([], {'img': 'im', 'rec': 'roi_', 'color': '(255, 0, 0)', 'thickness': '(3)'}), '(img=im, rec=roi_, color=(255, 0, 0), thickness=3)\n', (705, 755), True, 'import cv2 as cv\n'), ((770, 796), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im[:, :, ::-1]'], {}), '(im[:, :, ::-1])\n', (780, 796), True, 'import matplotlib.pyplot as plt\n'), ((809, 826), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (818, 826), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1526), 'cv2.rectangle', 'cv.rectangle', ([], {'img': 'im', 'rec': 'roi_', 'color': '(0, 0, 255)', 'thickness': '(3)'}), '(img=im, rec=roi_, color=(0, 0, 255), thickness=3)\n', (1476, 1526), True, 'import cv2 as cv\n'), ((644, 662), 'numpy.asarray', 'np.asarray', (['roi_tl'], {}), '(roi_tl)\n', (654, 662), True, 'import numpy as np\n'), ((1412, 1430), 'numpy.asarray', 'np.asarray', (['roi_tl'], {}), '(roi_tl)\n', (1422, 1430), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
def class_name(classid):
id_dict = {1:'Scratch', 2:'Dent', 3:'Shatter', 4:'Dislocation'}
return id_dict[classid]
def damage_cost(classid):
# cost_dict = {1: [800, 1400], 2:[1200, 3000],3:19000, 4:17000}
cost_dict = {1: 900, 2:1600, 3:19000, 4:17000}
return cost_dict[classid]
def area_ratio(image, roi, mask):
y1, x1, y2, x2 = tuple(roi)
crop_mask = mask[y1:y1+(y2-y1),x1:x1+(x2-x1)].copy()
pixels = cv2.countNonZero(np.float32(crop_mask))
image_area = image.shape[0] * image.shape[1]
area_ratio = 1 + (pixels / image_area)
return area_ratio
def costEstimate(image, rois, masks, classids):
cost_id_dict = {
"Shatter": {"Count": 0, "Cost": 0},
"Scratch": {"Count": 0, "Cost": 0},
"Dent": {"Count": 0, "Cost": 0},
"Dislocation": {"Count": 0, "Cost": 0}
}
total = 0
count = int()
cost_init = int()
for index in range(rois.shape[0]):
name = class_name(classids[index])
cost = damage_cost(classids[index])
ratio = area_ratio(image, rois[index], masks[: ,: ,index])
total = total + round(cost * ratio,2)
# unique_id = str()
# for roi in rois[index]:
# unique_id = unique_id + str(roi)
if name is 'Scratch':
count = cost_id_dict[name]['Count'] + 1
cost_init = cost_id_dict[name]['Cost'] + round(cost * ratio,2)
cost_id_dict[name]['Count'] = count
cost_id_dict[name]['Cost'] = cost_init
# cost_id_dict[name] = "Range: Rs." + str(round(cost[0] * ratio,3)) + ' - Rs.' + str(round(cost[1] * ratio, 3))
elif name is 'Dent':
count = cost_id_dict[name]['Count'] + 1
cost_init = cost_id_dict[name]['Cost'] + round(cost * ratio,2)
cost_id_dict[name]['Count'] = count
cost_id_dict[name]['Cost'] = cost_init
# cost_id_dict[name] = "Range: Rs." + str(cost[0] * ratio) + ' - Rs.' + str(cost[1] * ratio)
elif name is 'Shatter':
count = cost_id_dict[name]['Count'] + 1
cost_init = cost_id_dict[name]['Cost'] + round(cost * ratio,2)
cost_id_dict[name]['Count'] = count
cost_id_dict[name]['Cost'] = cost_init
# cost_id_dict[name] = "Cost: Rs." + str(cost)
else:
count = cost_id_dict[name]['Count'] + 1
cost_init = cost_id_dict[name]['Cost'] + round(cost * ratio,2)
cost_id_dict[name]['Count'] = count
cost_id_dict[name]['Cost'] = cost_init
# cost_id_dict[name] = "Cost: Rs." + str(cost)
for name, values in cost_id_dict.copy().items():
if values['Count'] == 0:
cost_id_dict.pop(name)
return total, cost_id_dict
|
[
"numpy.float32"
] |
[((500, 521), 'numpy.float32', 'np.float32', (['crop_mask'], {}), '(crop_mask)\n', (510, 521), True, 'import numpy as np\n')]
|
import re
import numpy as np
def compounddic2atomsfraction(compounds):
def createNewDic(dic, multiplyby):
values = list(dic.values())
keys = dic.keys()
newValues = np.array(values)*multiplyby
newDic = dict(zip(keys, newValues))
return newDic
def composition2atoms(cstr):
lst = re.findall(r'([A-Z][a-z]?)(\d*\.?\d*)', cstr)
dic = {}
for i in lst:
if len(i[1]) > 0:
try:
dic[i[0]] = int(i[1])
except ValueError:
dic[i[0]] = float(i[1])
else:
dic[i[0]] = 1
return dic
dic = {}
for key in compounds.keys():
baseValue = compounds[key]
atoms = composition2atoms(key)
for a in atoms.keys():
dic[a] = dic.get(a, 0) + atoms[a]*baseValue
multiplyby = 1/np.sum(list(dic.values()))
atomsF = createNewDic(dic, multiplyby)
return atomsF
###############################################################################
# Exemplo #
###############################################################################
AvailableCompounds = ['Ag2O', 'Al2O3', 'As2O3', 'As2O5', 'B2O3', 'BaO',
'Bi2O3', 'CaO', 'CdO', 'Ce2O3', 'CeO2', 'Cl', 'Cs2O',
'Cu2O', 'CuO', 'Er2O3', 'F', 'Fe2O3', 'Fe3O4', 'FeO',
'Ga2O3', 'Gd2O3', 'GeO', 'GeO2', 'I', 'K2O', 'La2O3',
'Li2O', 'MgO', 'Mn2O3', 'Mn2O7', 'Mn3O4', 'MnO', 'MnO2',
'Mo2O3', 'Mo2O5', 'MoO', 'MoO2', 'MoO3', 'N', 'N2O5',
'NO2', 'Na2O', 'Nb2O3', 'Nb2O5', 'P2O3', 'P2O5', 'Pb3O4',
'PbO', 'PbO2', 'SO2', 'SO3', 'Sb2O3', 'Sb2O5', 'SbO2',
'SiO', 'SiO2', 'Sn2O3', 'SnO', 'SnO2', 'SrO', 'Ta2O3',
'Ta2O5', 'TeO2', 'TeO3', 'Ti2O3', 'TiO', 'TiO2', 'V2O3',
'V2O5', 'VO2', 'VO6', 'WO3', 'Y2O3', 'Yb2O3', 'ZnO',
'ZrO2']
# isto é o que o usuário pode dar de input, qualquer quantidade (numeros reais
# positivos) de um ou mais dos compostos da lista AvailableCompounds:
compostoDic = {
'Al2O3': 1,
'SiO': 0
}
atomDic = compounddic2atomsfraction(compostoDic)
print(atomDic)
# Do dicionário atomDic você tem os valores de fração atômica de cada átomo para por na rede
#ompound2atomfraction.py
#Exibindo compound2atomfraction.py.
|
[
"numpy.array",
"re.findall"
] |
[((336, 383), 're.findall', 're.findall', (['"""([A-Z][a-z]?)(\\\\d*\\\\.?\\\\d*)"""', 'cstr'], {}), "('([A-Z][a-z]?)(\\\\d*\\\\.?\\\\d*)', cstr)\n", (346, 383), False, 'import re\n'), ((194, 210), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (202, 210), True, 'import numpy as np\n')]
|
"""utilities to speedup calculations with jit
Author: <NAME>
Affiliation: TokyoTech & OSX
"""
import numpy as np
import torch
from numba import f8, jit
@jit(f8[:, :](f8[:, :]), nopython=True)
def get_normed_vec_mag(arr_vec: np.ndarray) -> np.ndarray:
"""compute
from [[x1, y1], [x2, y2], ...]
to [[normed_x1, normed_y1, mag], [normed_x2, normed_y2, mag], ...]
Args:
arr_vec (np.ndarray): un-normalized vector (2D)
Returns:
np.ndarray: normalized vector (3D)
"""
# np.linalg.norm with axis cannot be used with numba.jit
vec_mag = np.sqrt(np.sum(arr_vec ** 2, axis=1)).reshape(-1, 1)
vec_mag_avoid_zero = np.where(vec_mag == 0, 1, vec_mag)
arr_vec = arr_vec / vec_mag_avoid_zero
return np.hstack((arr_vec, vec_mag))
@jit(
f8[:, :, :](f8[:, :, :], f8[:, :], f8[:, :, :], f8[:], f8[:]),
nopython=True,
)
def get_arr_others_info(
arr_current_locs: np.ndarray,
goals: np.ndarray,
arr_prev_locs: np.ndarray,
rads: np.ndarray,
max_speeds: np.ndarray,
) -> np.ndarray:
"""get other agents relative info
Args:
arr_current_locs (np.ndarray): current locations
goals (np.ndarray): goal positions for all agents
arr_prev_locs (np.ndarray): previous locations
rads (np.ndarray): radius for all agents
max_speeds (np.ndarray): max speed for all agents
Returns:
np.ndarray:
time *
(self-agent -> other-agent) *
(current_vec, goal_vec, prev_vec, rad, speed)
Todo:
improving readability
"""
num_agents = goals.shape[0]
T = arr_current_locs.shape[0]
# get relative pos
arr_relative_pos = np.zeros((T * num_agents * num_agents * 3, 2))
for t in range(T):
for i in range(num_agents):
idx = t * (num_agents * num_agents * 3) + i * (num_agents * 3)
current_pos = arr_current_locs[t][i]
arr_relative_pos[idx + 0 * num_agents : idx + 1 * num_agents] = (
arr_current_locs[t] - current_pos
)
arr_relative_pos[idx + 1 * num_agents : idx + 2 * num_agents] = (
goals - current_pos
)
arr_relative_pos[idx + 2 * num_agents : idx + 3 * num_agents] = (
arr_prev_locs[t] - current_pos
)
arr_relative_pos = get_normed_vec_mag(arr_relative_pos)
arr_others_info = np.empty((T, num_agents * num_agents, 11))
for t in range(T):
for i in range(num_agents):
idx = t * (num_agents * num_agents * 3) + i * (num_agents * 3)
for j in range(num_agents):
k = i * num_agents + j
arr_others_info[t, k, 0:3] = arr_relative_pos[
j + 0 * num_agents + idx
]
arr_others_info[t, k, 3:6] = arr_relative_pos[
j + 1 * num_agents + idx
]
arr_others_info[t, k, 6:9] = arr_relative_pos[
j + 2 * num_agents + idx
]
arr_others_info[t, k, 9] = rads[j]
arr_others_info[t, k, 10] = max_speeds[j]
return arr_others_info
@torch.jit.script
def get_self_info(
num_agents: int, arr_others_info: torch.Tensor
) -> torch.Tensor:
"""get self-info from array of other_agents_info
Args:
num_agents (int): number of agents
arr_others_info (torch.Tensor): other agents' info
Returns:
torch.Tensor: time * agent * (goal_vec, prev_vec, rad, max_speed)
"""
T = arr_others_info.shape[0]
self_idx = torch.tensor(
[
[
t * num_agents * num_agents + num_agents * i + i
for i in range(num_agents)
]
for t in range(T)
]
).reshape(-1)
return arr_others_info.reshape(-1, arr_others_info.shape[-1])[:, 3:][
self_idx
].reshape(T, num_agents, -1)
|
[
"numpy.hstack",
"numpy.where",
"numpy.sum",
"numpy.zeros",
"numpy.empty"
] |
[((669, 703), 'numpy.where', 'np.where', (['(vec_mag == 0)', '(1)', 'vec_mag'], {}), '(vec_mag == 0, 1, vec_mag)\n', (677, 703), True, 'import numpy as np\n'), ((758, 787), 'numpy.hstack', 'np.hstack', (['(arr_vec, vec_mag)'], {}), '((arr_vec, vec_mag))\n', (767, 787), True, 'import numpy as np\n'), ((1706, 1752), 'numpy.zeros', 'np.zeros', (['(T * num_agents * num_agents * 3, 2)'], {}), '((T * num_agents * num_agents * 3, 2))\n', (1714, 1752), True, 'import numpy as np\n'), ((2428, 2470), 'numpy.empty', 'np.empty', (['(T, num_agents * num_agents, 11)'], {}), '((T, num_agents * num_agents, 11))\n', (2436, 2470), True, 'import numpy as np\n'), ((599, 627), 'numpy.sum', 'np.sum', (['(arr_vec ** 2)'], {'axis': '(1)'}), '(arr_vec ** 2, axis=1)\n', (605, 627), True, 'import numpy as np\n')]
|
import os
import os.path as op
import numpy as np
from numpy.testing import assert_almost_equal
from ..core import ShootingPoint, find_and_replace
def test_read_cv_values():
test_file_loc = op.join(op.dirname(op.abspath(__file__)),
'test_data', 'COLVAR2')
sp = ShootingPoint(name='test', input_file='.')
sp._read_cv_values(test_file_loc)
test_values = sp.cv_values
true_values = np.array([1.000000, 2.000000, 3.000000])
for test, true in zip(test_values, true_values):
assert_almost_equal(test, true)
return
def test_find_and_replace():
test_line = "abcABC123456"
test_sub = {"ABC": "EDF", "456": "789"}
test_result = find_and_replace(test_line, test_sub)
assert test_result == "abcEDF123789"
return
def test_check_if_commited():
sp = ShootingPoint(name='test', input_file='.')
test_file_loc = op.join(op.dirname(op.abspath(__file__)),
'test_data', 'commit.log')
test_result = sp.check_if_committed(test_file_loc)
assert test_result == 2, "Not getting correct basin."
test_file_loc = op.join(op.dirname(op.abspath(__file__)),
'test_data', 'no_commit.log')
test_result = sp.check_if_committed(test_file_loc)
assert not test_result, "Not reporting 'None' when it does not commit."
return
def test_log():
sp = ShootingPoint(name='test', input_file='.')
sp.name = 'test'
sp.cv_values = [1, 2, 3]
sp.result = 'accepted'
sp.log(filename="test.log")
with open("test.log", 'r') as f:
line = f.readline()
os.remove("test.log")
line = line.split(' ')
line[-1] = line[-1].rstrip()
assert line[0] == "test"
for test, true in zip(line[1:4], sp.cv_values):
assert float(test) == float(true)
# assert line[1:6] == test_CVs
assert line[-1] == "accepted"
|
[
"numpy.testing.assert_almost_equal",
"numpy.array",
"os.path.abspath",
"os.remove"
] |
[((432, 457), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (440, 457), True, 'import numpy as np\n'), ((1616, 1637), 'os.remove', 'os.remove', (['"""test.log"""'], {}), "('test.log')\n", (1625, 1637), False, 'import os\n'), ((534, 565), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['test', 'true'], {}), '(test, true)\n', (553, 565), False, 'from numpy.testing import assert_almost_equal\n'), ((217, 237), 'os.path.abspath', 'op.abspath', (['__file__'], {}), '(__file__)\n', (227, 237), True, 'import os.path as op\n'), ((914, 934), 'os.path.abspath', 'op.abspath', (['__file__'], {}), '(__file__)\n', (924, 934), True, 'import os.path as op\n'), ((1145, 1165), 'os.path.abspath', 'op.abspath', (['__file__'], {}), '(__file__)\n', (1155, 1165), True, 'import os.path as op\n')]
|
from tqdm import tqdm
import pandas as pd
import numpy as np
from pathlib import Path
from hashlib import md5
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy import sparse as sp
import argparse
def break_text(raw):
return np.array([ i for i, t in enumerate(raw) if t == '¶' ][::2])
def main(args):
if args.output.exists():
if not args.overwrite():
raise FileExistsError(f"Output directory {args.output} exists.")
print(f"Output directory {args.output} exists. It will be overwritten.")
args.output.mkdir(exist_ok=True, parents=True)
ds_path = Path(args.dataset)
raw_text = {}
break_idx = {}
for fn in tqdm(list((ds_path / "en").glob("*.txt")), desc='Parsing text'):
fid = fn.name.split("_")[2]
raw = fn.read_text()
idx = break_text(raw)
break_idx[ fid ] = np.array(idx)
for i in range(len(idx)):
t = raw[idx[i]:] if i == len(idx)-1 else raw[idx[i]:idx[i+1]]
raw_text[f"{fid}_{i}"] = t.replace('¶', '').strip()
raw_text = pd.Series(raw_text).sort_index()
rel = {}
for fn in tqdm(list((ds_path / "en").glob("*.ann")), desc='Parsing annotations'):
fid = fn.name.split("_")[2]
for annl in fn.open():
tp, bidx, eidx = annl.strip().split("\t")[1].split(" ")
if len(break_idx[fid]) == 1:
pass_id = 0
else:
pass_id = (break_idx[fid] <= int(bidx)).cumsum()[-1]-1
assert pass_id >= 0
rel[ f"{fid}_{pass_id}", tp ] = True
rel_info = pd.Series(rel).sort_index().unstack(1)\
.join(raw_text.rename('text'), how='right')\
.drop(['text', 'Other', 'Other_language'], axis=1).fillna(False)
rel_info = rel_info.rename_axis('meta_pid')\
.assign(meta_md5=rel_info.index.astype('str').map(lambda x: md5(x.encode()).hexdigest()))\
.reset_index()
assert (raw_text.index == rel_info.meta_pid).all()
X = TfidfVectorizer(sublinear_tf=True, use_idf=False).fit_transform(raw_text)
print("Saving files...")
rel_info.to_pickle( args.output / "rel_info.pkl" )
sp.save_npz( str(args.output / "X_file.npz"), X )
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=Path) # '/media/eugene/research/datasets/askfm-cyberbullying-data'
parser.add_argument('--output', type=Path)
parser.add_argument('--overwrite', action='store_true', default=False)
main(parser.parse_args())
|
[
"pandas.Series",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer"
] |
[((616, 634), 'pathlib.Path', 'Path', (['args.dataset'], {}), '(args.dataset)\n', (620, 634), False, 'from pathlib import Path\n'), ((2296, 2321), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2319, 2321), False, 'import argparse\n'), ((874, 887), 'numpy.array', 'np.array', (['idx'], {}), '(idx)\n', (882, 887), True, 'import numpy as np\n'), ((1084, 1103), 'pandas.Series', 'pd.Series', (['raw_text'], {}), '(raw_text)\n', (1093, 1103), True, 'import pandas as pd\n'), ((2042, 2091), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'sublinear_tf': '(True)', 'use_idf': '(False)'}), '(sublinear_tf=True, use_idf=False)\n', (2057, 2091), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1607, 1621), 'pandas.Series', 'pd.Series', (['rel'], {}), '(rel)\n', (1616, 1621), True, 'import pandas as pd\n')]
|
import torch
import os
from glob import glob
import numpy as np
from torch.nn import functional as F
import time
class Generator(object):
def __init__(self, model, exp_name, threshold = 0.1, checkpoint = None, device = torch.device("cuda")):
self.model = model.to(device)
self.model.eval()
self.device = device
self.checkpoint_path = os.path.dirname(__file__) + '/../experiments/{}/checkpoints/'.format( exp_name)
self.load_checkpoint(checkpoint)
self.threshold = threshold
def generate_point_cloud(self, data, num_steps = 10, num_points = 900000, filter_val = 0.009):
start = time.time()
inputs = data['inputs'].to(self.device)
for param in self.model.parameters():
param.requires_grad = False
sample_num = 200000
samples_cpu = np.zeros((0, 3))
samples = torch.rand(1, sample_num, 3).float().to(self.device) * 3 - 1.5
samples.requires_grad = True
encoding = self.model.encoder(inputs)
i = 0
while len(samples_cpu) < num_points:
print('iteration', i)
for j in range(num_steps):
print('refinement', j)
df_pred = torch.clamp(self.model.decoder(samples, *encoding), max=self.threshold)
df_pred.sum().backward()
gradient = samples.grad.detach()
samples = samples.detach()
df_pred = df_pred.detach()
inputs = inputs.detach()
samples = samples - F.normalize(gradient, dim=2) * df_pred.reshape(-1, 1) # better use Tensor.copy method?
samples = samples.detach()
samples.requires_grad = True
print('finished refinement')
if not i == 0:
samples_cpu = np.vstack((samples_cpu, samples[df_pred < filter_val].detach().cpu().numpy()))
samples = samples[df_pred < 0.03].unsqueeze(0)
indices = torch.randint(samples.shape[1], (1, sample_num))
samples = samples[[[0, ] * sample_num], indices]
samples += (self.threshold / 3) * torch.randn(samples.shape).to(self.device) # 3 sigma rule
samples = samples.detach()
samples.requires_grad = True
i += 1
print(samples_cpu.shape)
duration = time.time() - start
return samples_cpu, duration
def load_checkpoint(self, checkpoint):
checkpoints = glob(self.checkpoint_path + '/*')
if checkpoint is None:
if len(checkpoints) == 0:
print('No checkpoints found at {}'.format(self.checkpoint_path))
return 0, 0
checkpoints = [os.path.splitext(os.path.basename(path))[0].split('_')[-1] for path in checkpoints]
checkpoints = np.array(checkpoints, dtype=float)
checkpoints = np.sort(checkpoints)
path = self.checkpoint_path + 'checkpoint_{}h:{}m:{}s_{}.tar'.format(
*[*convertSecs(checkpoints[-1]), checkpoints[-1]])
else:
path = self.checkpoint_path + '{}.tar'.format(checkpoint)
print('Loaded checkpoint from: {}'.format(path))
checkpoint = torch.load(path)
self.model.load_state_dict(checkpoint['model_state_dict'])
epoch = checkpoint['epoch']
training_time = checkpoint['training_time']
return epoch, training_time
def convertMillis(millis):
seconds = int((millis / 1000) % 60)
minutes = int((millis / (1000 * 60)) % 60)
hours = int((millis / (1000 * 60 * 60)))
return hours, minutes, seconds
def convertSecs(sec):
seconds = int(sec % 60)
minutes = int((sec / 60) % 60)
hours = int((sec / (60 * 60)))
return hours, minutes, seconds
|
[
"torch.rand",
"torch.load",
"numpy.sort",
"torch.nn.functional.normalize",
"os.path.dirname",
"numpy.zeros",
"torch.randint",
"numpy.array",
"os.path.basename",
"time.time",
"torch.randn",
"glob.glob",
"torch.device"
] |
[((224, 244), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (236, 244), False, 'import torch\n'), ((645, 656), 'time.time', 'time.time', ([], {}), '()\n', (654, 656), False, 'import time\n'), ((844, 860), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (852, 860), True, 'import numpy as np\n'), ((2488, 2521), 'glob.glob', 'glob', (["(self.checkpoint_path + '/*')"], {}), "(self.checkpoint_path + '/*')\n", (2492, 2521), False, 'from glob import glob\n'), ((3231, 3247), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (3241, 3247), False, 'import torch\n'), ((371, 396), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (386, 396), False, 'import os\n'), ((1990, 2038), 'torch.randint', 'torch.randint', (['samples.shape[1]', '(1, sample_num)'], {}), '(samples.shape[1], (1, sample_num))\n', (2003, 2038), False, 'import torch\n'), ((2362, 2373), 'time.time', 'time.time', ([], {}), '()\n', (2371, 2373), False, 'import time\n'), ((2838, 2872), 'numpy.array', 'np.array', (['checkpoints'], {'dtype': 'float'}), '(checkpoints, dtype=float)\n', (2846, 2872), True, 'import numpy as np\n'), ((2899, 2919), 'numpy.sort', 'np.sort', (['checkpoints'], {}), '(checkpoints)\n', (2906, 2919), True, 'import numpy as np\n'), ((1552, 1580), 'torch.nn.functional.normalize', 'F.normalize', (['gradient'], {'dim': '(2)'}), '(gradient, dim=2)\n', (1563, 1580), True, 'from torch.nn import functional as F\n'), ((2146, 2172), 'torch.randn', 'torch.randn', (['samples.shape'], {}), '(samples.shape)\n', (2157, 2172), False, 'import torch\n'), ((879, 907), 'torch.rand', 'torch.rand', (['(1)', 'sample_num', '(3)'], {}), '(1, sample_num, 3)\n', (889, 907), False, 'import torch\n'), ((2745, 2767), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2761, 2767), False, 'import os\n')]
|
import numpy as np
def process_actions(actions, l_action):
n_steps = len(actions)
actions_1hot = np.zeros([n_steps, l_action], dtype=int)
actions_1hot[np.arange(n_steps), actions] = 1
return actions_1hot
def get_action_others_1hot(action_all, agent_id, l_action):
action_all = list(action_all)
del action_all[agent_id]
num_others = len(action_all)
actions_1hot = np.zeros([num_others, l_action], dtype=int)
actions_1hot[np.arange(num_others), action_all] = 1
return actions_1hot.flatten()
def get_action_others_1hot_batch(list_action_all, agent_id, l_action):
n_steps = len(list_action_all)
n_agents = len(list_action_all[0])
matrix = np.stack(list_action_all) # [n_steps, n_agents]
self_removed = np.delete(matrix, agent_id, axis=1)
actions_1hot = np.zeros([n_steps, n_agents-1, l_action], dtype=np.float32)
grid = np.indices((n_steps, n_agents-1))
actions_1hot[grid[0], grid[1], self_removed] = 1
actions_1hot = np.reshape(actions_1hot, [n_steps, l_action*(n_agents-1)])
return actions_1hot
def process_rewards(rewards, gamma):
n_steps = len(rewards)
gamma_prod = np.cumprod(np.ones(n_steps) * gamma)
returns = np.cumsum((rewards * gamma_prod)[::-1])[::-1]
returns = returns / gamma_prod
return returns
|
[
"numpy.reshape",
"numpy.ones",
"numpy.delete",
"numpy.indices",
"numpy.stack",
"numpy.zeros",
"numpy.cumsum",
"numpy.arange"
] |
[((112, 152), 'numpy.zeros', 'np.zeros', (['[n_steps, l_action]'], {'dtype': 'int'}), '([n_steps, l_action], dtype=int)\n', (120, 152), True, 'import numpy as np\n'), ((415, 458), 'numpy.zeros', 'np.zeros', (['[num_others, l_action]'], {'dtype': 'int'}), '([num_others, l_action], dtype=int)\n', (423, 458), True, 'import numpy as np\n'), ((719, 744), 'numpy.stack', 'np.stack', (['list_action_all'], {}), '(list_action_all)\n', (727, 744), True, 'import numpy as np\n'), ((788, 823), 'numpy.delete', 'np.delete', (['matrix', 'agent_id'], {'axis': '(1)'}), '(matrix, agent_id, axis=1)\n', (797, 823), True, 'import numpy as np\n'), ((844, 905), 'numpy.zeros', 'np.zeros', (['[n_steps, n_agents - 1, l_action]'], {'dtype': 'np.float32'}), '([n_steps, n_agents - 1, l_action], dtype=np.float32)\n', (852, 905), True, 'import numpy as np\n'), ((916, 951), 'numpy.indices', 'np.indices', (['(n_steps, n_agents - 1)'], {}), '((n_steps, n_agents - 1))\n', (926, 951), True, 'import numpy as np\n'), ((1024, 1086), 'numpy.reshape', 'np.reshape', (['actions_1hot', '[n_steps, l_action * (n_agents - 1)]'], {}), '(actions_1hot, [n_steps, l_action * (n_agents - 1)])\n', (1034, 1086), True, 'import numpy as np\n'), ((1250, 1289), 'numpy.cumsum', 'np.cumsum', (['(rewards * gamma_prod)[::-1]'], {}), '((rewards * gamma_prod)[::-1])\n', (1259, 1289), True, 'import numpy as np\n'), ((171, 189), 'numpy.arange', 'np.arange', (['n_steps'], {}), '(n_steps)\n', (180, 189), True, 'import numpy as np\n'), ((477, 498), 'numpy.arange', 'np.arange', (['num_others'], {}), '(num_others)\n', (486, 498), True, 'import numpy as np\n'), ((1209, 1225), 'numpy.ones', 'np.ones', (['n_steps'], {}), '(n_steps)\n', (1216, 1225), True, 'import numpy as np\n')]
|
# import the necessary packages
from imutils.video import VideoStream
from imutils import face_utils
import argparse
import imutils
import time
import dlib
import cv2
import tensorflow as tf
from tensorflow.keras.models import load_model
import numpy as np
from matplotlib import pyplot as plt
import os
# construct the argument parser and parse the arguments
#path to facial landmark predictor
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
#path to video or use camera
ap.add_argument("-i", "--input_method", required=True,
help="path to video or use camera")
args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then load our trained shape predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
camera_video=int(args["input_method"])
# 0 for video camera
if camera_video is 0:
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = cv2.VideoCapture(0)
time.sleep(2.0)
#1 for path to video on system
elif camera_video is 1:
vs = cv2.VideoCapture("NTHU_yAWNING/16-FemaleGlasses-Yawning.avi")
#vs=cv2.VideoCapture("D:/sayus/Pictures/Camera Roll/WIN_20200716_18_36_16_Pro.mp4")
else:
print("Invalid Argument")
d=0
e=0
#load our pre-trained feature extractotr and yawn detector
feature_extractor=load_model('feature_extractor_1.h5')
yawn_detector=load_model('GRU_best_1.h5')
#set threshold values
yawn_detection_sigmoid=0.70
yawn_detection_frames=0
yawn_detection=0
input_feature_extractor=[]
count=0
start_time = time.perf_counter()
is_yawn=False
# loop over the frames from the video stream
while True:
# grab the frame from the video stream, resize it to have a
# maximum width of 400 pixels, and convert it to grayscale
grabbed,frame = vs.read()
if grabbed==False:
break
count=count+1
frame = imutils.resize(frame, width=400)
# detect faces in image
rects = detector(frame, 0)
# loop over the face detections
for rect in rects:
# convert the dlib rectangle into an OpenCV bounding box and draw a bounding box surrounding the face
#use our custom dlib shape predictor to predict the location
# of our landmark coordinates, then convert the prediction to
# an easily parsable NumPy array
shape = predictor(frame, rect)
shape = face_utils.shape_to_np(shape)
(x, y, w, h) = cv2.boundingRect(shape)
#extract mouth region
roi = frame[y-int(h/3):y + int(h), x:x + int(w)]
#resize to 50x50
roi=cv2.resize(roi,(50,50))
cv2.rectangle(frame, (x, y-int(h/3)), (x + int(w), y + int(5*h/4)), (0, 255, 0), 2)
input_feature_extractor.append(roi)
#append 32 frames together and make prediction
if len(input_feature_extractor)<32:
continue
input_feature_extractor=np.array(input_feature_extractor)
out_feature_extractor=feature_extractor.predict(input_feature_extractor)
out_feature_extractor=out_feature_extractor.reshape(1,32,256)
out_yawn_detector=yawn_detector.predict(out_feature_extractor)
#check for threshold
if out_yawn_detector > yawn_detection_sigmoid:
yawn_detection=yawn_detection+1
if yawn_detection>yawn_detection_frames:
frame = cv2.putText(frame, 'Yawning', (275,25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 1, cv2.LINE_AA)
end_time = time.perf_counter()
u1=float("{:.2f}".format(count/(end_time-start_time)))
u="fps: "+str(u1)
#put fps on frame
cv2.putText(frame, u, (15,25), cv2.FONT_HERSHEY_SIMPLEX ,
1, (255,0,0), 1, cv2.LINE_AA)
is_yawn=True
yawn_detection=0
else:
yawn_detection=0
input_feature_extractor=[]
# show the frame
end_time = time.perf_counter()
u1=float("{:.2f}".format(count/(end_time-start_time)))
u="fps: "+str(u1)
# if is_yawn==False:
# frame = cv2.putText(frame, 'Not Yawning', (205,25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 1, cv2.LINE_AA)
# else:
# is_yawn=False
cv2.putText(frame, u, (15,25), cv2.FONT_HERSHEY_SIMPLEX ,
1, (255,0,0), 1, cv2.LINE_AA)
cv2.imshow("Frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
vs.release()
|
[
"argparse.ArgumentParser",
"dlib.shape_predictor",
"time.perf_counter",
"time.sleep",
"cv2.putText",
"dlib.get_frontal_face_detector",
"imutils.resize",
"cv2.imshow",
"tensorflow.keras.models.load_model",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"imutils.face_utils.shape_to_np",
"numpy.array",
"cv2.resize",
"cv2.waitKey",
"cv2.boundingRect"
] |
[((421, 446), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (444, 446), False, 'import argparse\n'), ((863, 895), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (893, 895), False, 'import dlib\n'), ((909, 954), 'dlib.shape_predictor', 'dlib.shape_predictor', (["args['shape_predictor']"], {}), "(args['shape_predictor'])\n", (929, 954), False, 'import dlib\n'), ((1543, 1579), 'tensorflow.keras.models.load_model', 'load_model', (['"""feature_extractor_1.h5"""'], {}), "('feature_extractor_1.h5')\n", (1553, 1579), False, 'from tensorflow.keras.models import load_model\n'), ((1595, 1622), 'tensorflow.keras.models.load_model', 'load_model', (['"""GRU_best_1.h5"""'], {}), "('GRU_best_1.h5')\n", (1605, 1622), False, 'from tensorflow.keras.models import load_model\n'), ((1771, 1790), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1788, 1790), False, 'import time\n'), ((4346, 4369), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4367, 4369), False, 'import cv2\n'), ((1165, 1184), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1181, 1184), False, 'import cv2\n'), ((1188, 1203), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (1198, 1203), False, 'import time\n'), ((2073, 2105), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(400)'}), '(frame, width=400)\n', (2087, 2105), False, 'import imutils\n'), ((3883, 3902), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3900, 3902), False, 'import time\n'), ((4147, 4240), 'cv2.putText', 'cv2.putText', (['frame', 'u', '(15, 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 0, 0)', '(1)', 'cv2.LINE_AA'], {}), '(frame, u, (15, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1,\n cv2.LINE_AA)\n', (4158, 4240), False, 'import cv2\n'), ((4260, 4286), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (4270, 4286), False, 'import cv2\n'), ((1268, 1329), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""NTHU_yAWNING/16-FemaleGlasses-Yawning.avi"""'], {}), "('NTHU_yAWNING/16-FemaleGlasses-Yawning.avi')\n", (1284, 1329), False, 'import cv2\n'), ((2533, 2562), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (2555, 2562), False, 'from imutils import face_utils\n'), ((2581, 2604), 'cv2.boundingRect', 'cv2.boundingRect', (['shape'], {}), '(shape)\n', (2597, 2604), False, 'import cv2\n'), ((2709, 2734), 'cv2.resize', 'cv2.resize', (['roi', '(50, 50)'], {}), '(roi, (50, 50))\n', (2719, 2734), False, 'import cv2\n'), ((2988, 3021), 'numpy.array', 'np.array', (['input_feature_extractor'], {}), '(input_feature_extractor)\n', (2996, 3021), True, 'import numpy as np\n'), ((4292, 4306), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4303, 4306), False, 'import cv2\n'), ((3397, 3499), 'cv2.putText', 'cv2.putText', (['frame', '"""Yawning"""', '(275, 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), "(frame, 'Yawning', (275, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0,\n 255), 1, cv2.LINE_AA)\n", (3408, 3499), False, 'import cv2\n'), ((3509, 3528), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3526, 3528), False, 'import time\n'), ((3640, 3733), 'cv2.putText', 'cv2.putText', (['frame', 'u', '(15, 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 0, 0)', '(1)', 'cv2.LINE_AA'], {}), '(frame, u, (15, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1,\n cv2.LINE_AA)\n', (3651, 3733), False, 'import cv2\n')]
|
import pandas as pd
import os, sys
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from sklearn.utils import check_array
import numpy as np
from datetime import timedelta
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))+'/'
def mean_absolute_percentage_error(y_true, y_pred):
mask = y_true != 0
return (np.fabs(y_true - y_pred)/y_true)[mask].mean()
# function that returns a list of days not including weekends, holidays, or event day
# if pge == True will return weekdays for PG&E otherwise it will return weekdays for SCE
def get_workdays(start,end):
start = pd.to_datetime(start).date()
end = pd.to_datetime(end).date()
us_bd = CustomBusinessDay(calendar=USFederalHolidayCalendar())
workdays = pd.DatetimeIndex(start=start, end=end, freq=us_bd)
return workdays
# Returns the start and end timestamp of a single day
def get_window_of_day(date):
date = pd.to_datetime(date).date()
start, end = pd.date_range(start=date, periods=2, freq='1d', tz='US/Pacific')
start_ts = start.isoformat()
end_ts = end.isoformat()
return start_ts, end_ts
def get_closest_station(site):
stations = pd.read_csv(os.path.join(PROJECT_ROOT, 'weather_stations.csv'), index_col='site')
try:
uuid = stations.loc[site].values[0]
return uuid
except:
print("couldn't find closest weather station for %s" % site)
return None
def get_date_str(date):
date = pd.to_datetime(date).date()
return format(date)
def get_month_window(date):
end_date = pd.to_datetime(date).date() + timedelta(days=2)
start_date = end_date - timedelta(days=30)
start_ts = pd.to_datetime(start_date).tz_localize('US/Pacific').isoformat()
end_ts = pd.to_datetime(end_date).tz_localize('US/Pacific').isoformat()
return start_ts, end_ts
|
[
"numpy.fabs",
"pandas.tseries.holiday.USFederalHolidayCalendar",
"pandas.DatetimeIndex",
"pandas.to_datetime",
"os.path.join",
"os.path.dirname",
"datetime.timedelta",
"pandas.date_range"
] |
[((800, 850), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', ([], {'start': 'start', 'end': 'end', 'freq': 'us_bd'}), '(start=start, end=end, freq=us_bd)\n', (816, 850), True, 'import pandas as pd\n'), ((1011, 1075), 'pandas.date_range', 'pd.date_range', ([], {'start': 'date', 'periods': '(2)', 'freq': '"""1d"""', 'tz': '"""US/Pacific"""'}), "(start=date, periods=2, freq='1d', tz='US/Pacific')\n", (1024, 1075), True, 'import pandas as pd\n'), ((268, 293), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (283, 293), False, 'import os, sys\n'), ((1225, 1275), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""weather_stations.csv"""'], {}), "(PROJECT_ROOT, 'weather_stations.csv')\n", (1237, 1275), False, 'import os, sys\n'), ((1631, 1648), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (1640, 1648), False, 'from datetime import timedelta\n'), ((1677, 1695), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (1686, 1695), False, 'from datetime import timedelta\n'), ((652, 673), 'pandas.to_datetime', 'pd.to_datetime', (['start'], {}), '(start)\n', (666, 673), True, 'import pandas as pd\n'), ((691, 710), 'pandas.to_datetime', 'pd.to_datetime', (['end'], {}), '(end)\n', (705, 710), True, 'import pandas as pd\n'), ((757, 783), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (781, 783), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((966, 986), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (980, 986), True, 'import pandas as pd\n'), ((1505, 1525), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (1519, 1525), True, 'import pandas as pd\n'), ((1601, 1621), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (1615, 1621), True, 'import pandas as pd\n'), ((388, 412), 'numpy.fabs', 'np.fabs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (395, 412), True, 'import numpy as np\n'), ((1711, 1737), 'pandas.to_datetime', 'pd.to_datetime', (['start_date'], {}), '(start_date)\n', (1725, 1737), True, 'import pandas as pd\n'), ((1789, 1813), 'pandas.to_datetime', 'pd.to_datetime', (['end_date'], {}), '(end_date)\n', (1803, 1813), True, 'import pandas as pd\n')]
|
"""
UKPDS
See:
"""
import numpy as np
from cvdm.score import BaseRisk
from cvdm.score import clean_age, clean_hba1c, clean_bp, clean_tchdl
# coefficients for survival
BETA = np.array([ 1.059, # age at diagnosis of diabetes
0.525, # risk for females
0.390, # Afro-Carribean ethnicity
1.350, # smoking
1.183, # HBA1c
1.088, # 10mmHg increase in systolic blood pressure
3.845 # unit increase in log of lipid ratio
])
Q_0 = 0.0112 # intercept
D = 1.078 # risk ratio for each year increase in duration of diagnosed diabetes
def ukpds(ageDiab, age, female, ac, smoking, hba1c, sbp, tchdl, tYear=10):
"""
Calculate the number of years to forecast the risk.
"""
xFeat = np.array([clean_age(age)-55,
female,
ac,
bool(smoking),
clean_hba1c(hba1c)-6.72,
(clean_bp(sbp) - 135.7)/10,
np.log(clean_tchdl(tchdl))-1.59])
q = Q_0 * np.prod(np.power(BETA, xFeat))
uscore = 1 - np.exp(-q * D**(age-ageDiab)* (1-D**tYear)/ (1 - D))
return max(uscore, 0.0)
class Ukpds(BaseRisk):
tYear = None
features = ["diab_age",
"index_age",
"female",
"AC",
"cur_smoke",
"hba1c",
"sbp"]
feat_key = features + ["tchdl"]
def __init__(self, tYear=10):
self.tYear = tYear
def score(self, row):
return ukpds(row["diab_age"],
row["index_age"],
row["female"],
row["AC"],
row["cur_smoke"],
row["hba1c"],
row["sbp"],
row["tchdl"],
tYear=self.tYear)
def get_features(self, row):
feat_dict = super().get_features(row)
feat_dict["tchdl_log"] = np.log(row["tchdl"])
return feat_dict
|
[
"numpy.power",
"cvdm.score.clean_age",
"numpy.log",
"numpy.exp",
"numpy.array",
"cvdm.score.clean_hba1c",
"cvdm.score.clean_tchdl",
"cvdm.score.clean_bp"
] |
[((179, 236), 'numpy.array', 'np.array', (['[1.059, 0.525, 0.39, 1.35, 1.183, 1.088, 3.845]'], {}), '([1.059, 0.525, 0.39, 1.35, 1.183, 1.088, 3.845])\n', (187, 236), True, 'import numpy as np\n'), ((1134, 1196), 'numpy.exp', 'np.exp', (['(-q * D ** (age - ageDiab) * (1 - D ** tYear) / (1 - D))'], {}), '(-q * D ** (age - ageDiab) * (1 - D ** tYear) / (1 - D))\n', (1140, 1196), True, 'import numpy as np\n'), ((2003, 2023), 'numpy.log', 'np.log', (["row['tchdl']"], {}), "(row['tchdl'])\n", (2009, 2023), True, 'import numpy as np\n'), ((1094, 1115), 'numpy.power', 'np.power', (['BETA', 'xFeat'], {}), '(BETA, xFeat)\n', (1102, 1115), True, 'import numpy as np\n'), ((807, 821), 'cvdm.score.clean_age', 'clean_age', (['age'], {}), '(age)\n', (816, 821), False, 'from cvdm.score import clean_age, clean_hba1c, clean_bp, clean_tchdl\n'), ((941, 959), 'cvdm.score.clean_hba1c', 'clean_hba1c', (['hba1c'], {}), '(hba1c)\n', (952, 959), False, 'from cvdm.score import clean_age, clean_hba1c, clean_bp, clean_tchdl\n'), ((989, 1002), 'cvdm.score.clean_bp', 'clean_bp', (['sbp'], {}), '(sbp)\n', (997, 1002), False, 'from cvdm.score import clean_age, clean_hba1c, clean_bp, clean_tchdl\n'), ((1045, 1063), 'cvdm.score.clean_tchdl', 'clean_tchdl', (['tchdl'], {}), '(tchdl)\n', (1056, 1063), False, 'from cvdm.score import clean_age, clean_hba1c, clean_bp, clean_tchdl\n')]
|
"""
Module contenant les classes utiles à la modélisation sous forme de graphe :
Sommet, Arc, Graphe
auteur : cmarichal
"""
from typing import Tuple, List
from math import floor
import numpy as np
from classes_traitement_plan import CouleurSpeciale, Plan
class Sommet:
"""Sommet ayant une position et un numéro"""
def __init__(self, numero: int, pos: Tuple[int, int], majeur: bool = False):
self.pos = pos
self.numero = numero
self.majeur = majeur
class Arc:
"""Arc comportant 2 sommets, une longueur et une route"""
def __init__(self, sommet_depart: Sommet, sommet_arrive: Sommet, longueur: int):
self.sommets = (sommet_depart, sommet_arrive)
self.longueur = longueur
class Graphe:
"""Graphe mathématique comportant la liste des sommets et la matrice d'adjacence"""
def __init__(self):
self.matrice_adjacence = np.array([])
self.liste_sommets = []
@staticmethod
def graphe_from_plan(plan: Plan):
"""retourne le graphe associé à une image prétraitée"""
nouveau_graphe = Graphe()
sommets, coefprop = Graphe.cherche_sommets(plan)
nouveau_graphe.liste_sommets = sommets
Gr = []
for i in range(len(sommets)): # crée une matrice de zéros d'une taille adaptée
Gr.append([0] * len(sommets))
for i in range(len(sommets) - 1):
for k in range(i + 1, len(sommets)):
if plan.verifLignePaint(sommets[i].pos, sommets[k].pos): # vérifie que 2 sommets sont reliés par un arc
x = sommets[i].pos[0] - sommets[k].pos[0]
y = sommets[i].pos[1] - sommets[k].pos[1]
Gr[i][k] = floor(coefprop * np.sqrt(x ** 2 + y ** 2)) # distance entre les sommets
Gr[k][i] = Gr[i][k] # matrice symetrique
else:
Gr[i][k] = -1 # sommet inaccessible
Gr[k][i] = -1
nouveau_graphe.matrice_adjacence = np.array(Gr)
return nouveau_graphe
@staticmethod
def cherche_sommets(plan: Plan) -> Tuple[List[Sommet], float]:
"""repère les sommets/pixels rouges"""
sommets = []
echelle = []
for i in range(len(plan.image_255)):
for j in range(len(plan.image_255[0])):
code_pixel = list(plan.image_255[i][j])
if code_pixel == CouleurSpeciale.ROUGE.value:
sommets.append(Sommet(numero=len(sommets), pos=(i, j)))
elif code_pixel == CouleurSpeciale.ROSE.value:
sommets.append(Sommet(numero=len(sommets), pos=(i, j), majeur=True))
elif code_pixel == CouleurSpeciale.VIOLET.value:
echelle.append((i, j))
coefprop = plan.echelle / (echelle[1][1] - echelle[0][1]) # coefficient de propotionnalité pixels/metres
return sommets, coefprop
def get_liste_arcs_graphe(self) -> List[Tuple[int, int]]:
"""renvoie la liste de tous les arcs"""
L = []
for i in range(len(self.matrice_adjacence)):
for j in range(len(self.matrice_adjacence[0])):
if self.matrice_adjacence[i][j] != 0 and self.matrice_adjacence[i][j] != -1:
L.append((i, j))
return L
def get_liste_sommets_majeurs(self) -> List[Sommet]:
"""Renvoie la liste des sommets majeurs"""
sommets_majeurs = []
for sommet in self.liste_sommets:
if sommet.majeur:
sommets_majeurs.append(sommet)
return sommets_majeurs
|
[
"numpy.array",
"numpy.sqrt"
] |
[((895, 907), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (903, 907), True, 'import numpy as np\n'), ((2004, 2016), 'numpy.array', 'np.array', (['Gr'], {}), '(Gr)\n', (2012, 2016), True, 'import numpy as np\n'), ((1730, 1754), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (1737, 1754), True, 'import numpy as np\n')]
|
"""Support routines for the qdyn_prop_gate utility"""
import re
import numpy as np
from .units import UnitFloat
def _isqrt(n):
"""Integer square root of n > 0
>>> _isqrt(1024**2)
1024
>>> _isqrt(10)
3
"""
assert n >= 0
x = n
y = (x + 1) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
def get_prop_gate_of_t(gates_file, with_t=False):
r"""Yield gates in `gates_file`, where `gates_file` is in the format
written by the ``qdyn_prop_gate`` utility's ``--write-gate`` option. That
is, each row in `gates_files` has $2 n^2 + 1$ columns. The first column is
a time stamp, the remaining columns are the real and imaginary part for
each entry in the $n \times n$ gate (vectorized in column-major format). If
`with_t` is False (default), yield only the gates, otherwise yield both the
gates and the time stamp for each gate
Returns:
* If ``with_t=False``, iterator over gates, where each gate is a
complex $n \times n$ numpy matrix, or a Gate2Q instance for a $4
\times 4$ gate
* If ``with_t=True``, iterator of tuples ``(gate, t)``, where ``t`` is
a float or an instance of UnitFloat if the time unit can be derived
from the header of `gates_file`
"""
with open(gates_file) as in_fh:
time_unit = None
for line in in_fh:
if line.startswith('#'):
try:
time_unit = re.search(r't\s*\[(\w+)\]', line).group(1)
except AttributeError:
pass
else:
vals = np.array([float(v) for v in line.split()])
n = _isqrt((len(vals) - 1) // 2)
assert 2 * n * n + 1 == len(vals)
shape = (n, n)
gate = np.reshape(
vals[1::2], shape, order='F'
) + 1j * np.reshape(vals[2::2], shape, order='F')
if with_t:
if time_unit is not None:
yield gate, UnitFloat(vals[0], time_unit)
else:
yield gate, vals[0]
else:
yield gate
|
[
"numpy.reshape",
"re.search"
] |
[((1834, 1874), 'numpy.reshape', 'np.reshape', (['vals[1::2]', 'shape'], {'order': '"""F"""'}), "(vals[1::2], shape, order='F')\n", (1844, 1874), True, 'import numpy as np\n'), ((1920, 1960), 'numpy.reshape', 'np.reshape', (['vals[2::2]', 'shape'], {'order': '"""F"""'}), "(vals[2::2], shape, order='F')\n", (1930, 1960), True, 'import numpy as np\n'), ((1490, 1526), 're.search', 're.search', (['"""t\\\\s*\\\\[(\\\\w+)\\\\]"""', 'line'], {}), "('t\\\\s*\\\\[(\\\\w+)\\\\]', line)\n", (1499, 1526), False, 'import re\n')]
|
import logging
import numpy as np
from collections import Counter
from imblearn.base import SamplerMixin
from imblearn.utils import check_target_type, hash_X_y
from sklearn.utils import check_X_y, check_random_state, safe_indexing
__all__ = ['RandomUnderSampler']
def check_ratio(ratio, y):
"""check and returns actual and valid ratio"""
target_stats = Counter(y)
diff_target = set(ratio.keys()) - set(target_stats.keys())
# check to ensure all keys in ratio are also in y
# and the ratio are all positive
if diff_target:
raise ValueError(
'The {} target class is/are not present in the data.'.format(diff_target))
if any(n_samples < 0 for n_samples in ratio.values()):
raise ValueError(
'The proportion of samples in a class cannot be negative. '
'Input ratio contains some negative value: {}'.format(ratio))
checked_ratio = {}
for target, n_samples in ratio.items():
target_samples = target_stats[target]
# if it's a float then assume it's asking for a
# proportion of the targeted sample
if isinstance(n_samples, float):
n_samples = int(n_samples * target_samples)
if n_samples > target_samples:
raise ValueError(
'With under-sampling methods, the number of '
'samples in a class should be less or equal '
'to the original number of samples. '
'Originally, there is {} samples and {} '
'samples are asked.'.format(target_samples, n_samples))
checked_ratio[target] = n_samples
return checked_ratio
class BaseSampler(SamplerMixin):
"""
Base class for sampling algorithms.
Warning: This class should not be used directly.
Use the derive classes instead.
"""
def __init__(self, ratio):
self.ratio = ratio
self.logger = logging.getLogger(__name__)
def fit(self, X, y):
"""
Find the classes statistics to perform sampling.
Parameters
----------
X : 2d ndarray or scipy sparse matrix, shape [n_samples, n_features]
Matrix containing the data which have to be sampled.
y : 1d ndarray, shape [n_samples]
Corresponding label for each sample in X.
Returns
-------
self
"""
X, y = check_X_y(X, y, accept_sparse = ['csr', 'csc'])
y = check_target_type(y)
self.X_hash_, self.y_hash_ = hash_X_y(X, y)
self.ratio_ = check_ratio(self.ratio, y)
return self
class RandomUnderSampler(BaseSampler):
"""
Class to perform random under-sampling.
Under-sample the majority class(es) by randomly picking samples
with or without replacement.
This is an "improvement" of imbalance learn's RandomUnderSampler [1]_
by only accepting a dictionary for the ratio argument and supports
float value indicating the proportional sampling.
Parameters
----------
ratio : dict[(int, int/float)]
Ratio to use for resampling the data set.
Keys correspond to the targeted classes and the values
correspond to the desired number/proportion of samples.
e.g. {0: 1.0, 1: 0.5} becauses the values are float, this
is read as we'll keep all samples from class label 0 and
keep only 50 percent of class label 1, note that in this
case {1: 0.5} will also work. We could also specify integer
value for the values in the dictionary to indicate the
actual number of samples to retain.
replacement : bool, default False
Whether the sample is with or without replacement.
random_state : int, RandomState instance or None, default None
If int, ``random_state`` is the seed used by the random number
generator; If ``RandomState`` instance, random_state is the random
number generator; If ``None``, the random number generator is the
``RandomState`` instance used by ``np.random``.
Attributes
----------
ratio_ : dict[(int, int)]
The actual ratio that was used for resampling the data set,
where the class label is the key and the number of samples is the value
X_hash_/y_hash_ : str
Hash identifier of the input X and y. This is used for ensuring
the X and y that was used for fitting is identical to sampling
(resampling is only meant for the same "training" set)
References
----------
.. [1] `imbalanced-learn RandomUnderSampler
<http://contrib.scikit-learn.org/imbalanced-learn/stable/generated/imblearn.under_sampling.RandomUnderSampler.html>`_
"""
def __init__(self, ratio, replacement = False, random_state = None):
super().__init__(ratio = ratio)
self.replacement = replacement
self.random_state = random_state
def _sample(self, X, y):
"""resample the dataset"""
random_state = check_random_state(self.random_state)
sample_indices = []
targets = np.unique(y)
for target in targets:
target_indices = np.flatnonzero(y == target)
if target in self.ratio_:
n_samples = self.ratio_[target]
target_indices = random_state.choice(
target_indices, size = n_samples, replace = self.replacement)
sample_indices.append(target_indices)
sample_indices = np.hstack(sample_indices)
return safe_indexing(X, sample_indices), safe_indexing(y, sample_indices)
|
[
"logging.getLogger",
"sklearn.utils.check_random_state",
"numpy.unique",
"sklearn.utils.check_X_y",
"numpy.hstack",
"numpy.flatnonzero",
"sklearn.utils.safe_indexing",
"collections.Counter",
"imblearn.utils.check_target_type",
"imblearn.utils.hash_X_y"
] |
[((365, 375), 'collections.Counter', 'Counter', (['y'], {}), '(y)\n', (372, 375), False, 'from collections import Counter\n'), ((1916, 1943), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1933, 1943), False, 'import logging\n'), ((2390, 2435), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'y'], {'accept_sparse': "['csr', 'csc']"}), "(X, y, accept_sparse=['csr', 'csc'])\n", (2399, 2435), False, 'from sklearn.utils import check_X_y, check_random_state, safe_indexing\n'), ((2450, 2470), 'imblearn.utils.check_target_type', 'check_target_type', (['y'], {}), '(y)\n', (2467, 2470), False, 'from imblearn.utils import check_target_type, hash_X_y\n'), ((2508, 2522), 'imblearn.utils.hash_X_y', 'hash_X_y', (['X', 'y'], {}), '(X, y)\n', (2516, 2522), False, 'from imblearn.utils import check_target_type, hash_X_y\n'), ((4983, 5020), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (5001, 5020), False, 'from sklearn.utils import check_X_y, check_random_state, safe_indexing\n'), ((5068, 5080), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5077, 5080), True, 'import numpy as np\n'), ((5468, 5493), 'numpy.hstack', 'np.hstack', (['sample_indices'], {}), '(sample_indices)\n', (5477, 5493), True, 'import numpy as np\n'), ((5141, 5168), 'numpy.flatnonzero', 'np.flatnonzero', (['(y == target)'], {}), '(y == target)\n', (5155, 5168), True, 'import numpy as np\n'), ((5509, 5541), 'sklearn.utils.safe_indexing', 'safe_indexing', (['X', 'sample_indices'], {}), '(X, sample_indices)\n', (5522, 5541), False, 'from sklearn.utils import check_X_y, check_random_state, safe_indexing\n'), ((5543, 5575), 'sklearn.utils.safe_indexing', 'safe_indexing', (['y', 'sample_indices'], {}), '(y, sample_indices)\n', (5556, 5575), False, 'from sklearn.utils import check_X_y, check_random_state, safe_indexing\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from unittest import mock
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
import create_datasets
import data_utils
import predict
import trainer
def test_validated_missing_field() -> None:
tensor_dict = {}
values_spec = {"x": tf.TensorSpec(shape=(3,), dtype=tf.float32)}
with pytest.raises(KeyError):
trainer.validated(tensor_dict, values_spec)
def test_validated_incompatible_type() -> None:
tensor_dict = {"x": tf.constant(["a", "b", "c"])}
values_spec = {"x": tf.TensorSpec(shape=(3,), dtype=tf.float32)}
with pytest.raises(TypeError):
trainer.validated(tensor_dict, values_spec)
def test_validated_incompatible_shape() -> None:
tensor_dict = {"x": tf.constant([1.0])}
values_spec = {"x": tf.TensorSpec(shape=(3,), dtype=tf.float32)}
with pytest.raises(ValueError):
trainer.validated(tensor_dict, values_spec)
def test_validated_ok() -> None:
tensor_dict = {"x": tf.constant([1.0, 2.0, 3.0])}
values_spec = {"x": tf.TensorSpec(shape=(3,), dtype=tf.float32)}
trainer.validated(tensor_dict, values_spec)
tensor_dict = {"x": tf.constant([[1.0], [2.0], [3.0]])}
values_spec = {"x": tf.TensorSpec(shape=(None, 1), dtype=tf.float32)}
trainer.validated(tensor_dict, values_spec)
def test_serialize_deserialize() -> None:
unlabeled_data = data_utils.read_data("test_data/56980685061237.npz")
labels = data_utils.read_labels("test_data/labels.csv")
data = data_utils.label_data(unlabeled_data, labels)
for training_point in data_utils.generate_training_points(data):
serialized = trainer.serialize(training_point)
inputs, outputs = trainer.deserialize(serialized)
assert set(inputs.keys()) == set(trainer.INPUTS_SPEC.keys())
assert set(outputs.keys()) == set(trainer.OUTPUTS_SPEC.keys())
@mock.patch.object(trainer, "PADDING", 2)
def test_e2e_local() -> None:
with tempfile.TemporaryDirectory() as temp_dir:
train_data_dir = os.path.join(temp_dir, "datasets", "train")
eval_data_dir = os.path.join(temp_dir, "datasets", "eval")
model_dir = os.path.join(temp_dir, "model")
tensorboard_dir = os.path.join(temp_dir, "tensorboard")
checkpoint_dir = os.path.join(temp_dir, "checkpoints")
# Create the dataset TFRecord files.
create_datasets.run(
raw_data_dir="test_data",
raw_labels_dir="test_data",
train_data_dir=train_data_dir,
eval_data_dir=eval_data_dir,
train_eval_split=[80, 20],
)
assert os.listdir(train_data_dir), "no training files found"
assert os.listdir(eval_data_dir), "no evaluation files found"
# Train the model and save it.
trainer.run(
train_data_dir=train_data_dir,
eval_data_dir=eval_data_dir,
model_dir=model_dir,
tensorboard_dir=tensorboard_dir,
checkpoint_dir=checkpoint_dir,
train_epochs=10,
batch_size=32,
)
assert os.listdir(model_dir), "no model files found"
assert os.listdir(tensorboard_dir), "no tensorboard files found"
assert os.listdir(checkpoint_dir), "no checkpoint files found"
# Load the trained model and make a prediction.
with open("test_data/56980685061237.npz", "rb") as f:
input_data = pd.DataFrame(np.load(f)["x"])
predictions = predict.run(model_dir, input_data.to_dict("list"))
# Check that we get non-empty predictions.
assert "is_fishing" in predictions
assert len(predictions["is_fishing"]) > 0
|
[
"data_utils.generate_training_points",
"data_utils.read_data",
"trainer.validated",
"trainer.OUTPUTS_SPEC.keys",
"os.listdir",
"data_utils.read_labels",
"trainer.deserialize",
"tensorflow.TensorSpec",
"pytest.raises",
"trainer.run",
"tempfile.TemporaryDirectory",
"trainer.INPUTS_SPEC.keys",
"os.path.join",
"create_datasets.run",
"tensorflow.constant",
"data_utils.label_data",
"unittest.mock.patch.object",
"trainer.serialize",
"numpy.load"
] |
[((2461, 2501), 'unittest.mock.patch.object', 'mock.patch.object', (['trainer', '"""PADDING"""', '(2)'], {}), "(trainer, 'PADDING', 2)\n", (2478, 2501), False, 'from unittest import mock\n'), ((1674, 1717), 'trainer.validated', 'trainer.validated', (['tensor_dict', 'values_spec'], {}), '(tensor_dict, values_spec)\n', (1691, 1717), False, 'import trainer\n'), ((1857, 1900), 'trainer.validated', 'trainer.validated', (['tensor_dict', 'values_spec'], {}), '(tensor_dict, values_spec)\n', (1874, 1900), False, 'import trainer\n'), ((1966, 2018), 'data_utils.read_data', 'data_utils.read_data', (['"""test_data/56980685061237.npz"""'], {}), "('test_data/56980685061237.npz')\n", (1986, 2018), False, 'import data_utils\n'), ((2032, 2078), 'data_utils.read_labels', 'data_utils.read_labels', (['"""test_data/labels.csv"""'], {}), "('test_data/labels.csv')\n", (2054, 2078), False, 'import data_utils\n'), ((2090, 2135), 'data_utils.label_data', 'data_utils.label_data', (['unlabeled_data', 'labels'], {}), '(unlabeled_data, labels)\n', (2111, 2135), False, 'import data_utils\n'), ((2162, 2203), 'data_utils.generate_training_points', 'data_utils.generate_training_points', (['data'], {}), '(data)\n', (2197, 2203), False, 'import data_utils\n'), ((869, 912), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(3,)', 'dtype': 'tf.float32'}), '(shape=(3,), dtype=tf.float32)\n', (882, 912), True, 'import tensorflow as tf\n'), ((923, 946), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (936, 946), False, 'import pytest\n'), ((956, 999), 'trainer.validated', 'trainer.validated', (['tensor_dict', 'values_spec'], {}), '(tensor_dict, values_spec)\n', (973, 999), False, 'import trainer\n'), ((1074, 1102), 'tensorflow.constant', 'tf.constant', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (1085, 1102), True, 'import tensorflow as tf\n'), ((1128, 1171), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(3,)', 'dtype': 'tf.float32'}), '(shape=(3,), dtype=tf.float32)\n', (1141, 1171), True, 'import tensorflow as tf\n'), ((1182, 1206), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1195, 1206), False, 'import pytest\n'), ((1216, 1259), 'trainer.validated', 'trainer.validated', (['tensor_dict', 'values_spec'], {}), '(tensor_dict, values_spec)\n', (1233, 1259), False, 'import trainer\n'), ((1335, 1353), 'tensorflow.constant', 'tf.constant', (['[1.0]'], {}), '([1.0])\n', (1346, 1353), True, 'import tensorflow as tf\n'), ((1379, 1422), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(3,)', 'dtype': 'tf.float32'}), '(shape=(3,), dtype=tf.float32)\n', (1392, 1422), True, 'import tensorflow as tf\n'), ((1433, 1458), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1446, 1458), False, 'import pytest\n'), ((1468, 1511), 'trainer.validated', 'trainer.validated', (['tensor_dict', 'values_spec'], {}), '(tensor_dict, values_spec)\n', (1485, 1511), False, 'import trainer\n'), ((1571, 1599), 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (1582, 1599), True, 'import tensorflow as tf\n'), ((1625, 1668), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(3,)', 'dtype': 'tf.float32'}), '(shape=(3,), dtype=tf.float32)\n', (1638, 1668), True, 'import tensorflow as tf\n'), ((1743, 1777), 'tensorflow.constant', 'tf.constant', (['[[1.0], [2.0], [3.0]]'], {}), '([[1.0], [2.0], [3.0]])\n', (1754, 1777), True, 'import tensorflow as tf\n'), ((1803, 1851), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, 1)', 'dtype': 'tf.float32'}), '(shape=(None, 1), dtype=tf.float32)\n', (1816, 1851), True, 'import tensorflow as tf\n'), ((2226, 2259), 'trainer.serialize', 'trainer.serialize', (['training_point'], {}), '(training_point)\n', (2243, 2259), False, 'import trainer\n'), ((2286, 2317), 'trainer.deserialize', 'trainer.deserialize', (['serialized'], {}), '(serialized)\n', (2305, 2317), False, 'import trainer\n'), ((2541, 2570), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2568, 2570), False, 'import tempfile\n'), ((2609, 2652), 'os.path.join', 'os.path.join', (['temp_dir', '"""datasets"""', '"""train"""'], {}), "(temp_dir, 'datasets', 'train')\n", (2621, 2652), False, 'import os\n'), ((2677, 2719), 'os.path.join', 'os.path.join', (['temp_dir', '"""datasets"""', '"""eval"""'], {}), "(temp_dir, 'datasets', 'eval')\n", (2689, 2719), False, 'import os\n'), ((2740, 2771), 'os.path.join', 'os.path.join', (['temp_dir', '"""model"""'], {}), "(temp_dir, 'model')\n", (2752, 2771), False, 'import os\n'), ((2798, 2835), 'os.path.join', 'os.path.join', (['temp_dir', '"""tensorboard"""'], {}), "(temp_dir, 'tensorboard')\n", (2810, 2835), False, 'import os\n'), ((2861, 2898), 'os.path.join', 'os.path.join', (['temp_dir', '"""checkpoints"""'], {}), "(temp_dir, 'checkpoints')\n", (2873, 2898), False, 'import os\n'), ((2953, 3121), 'create_datasets.run', 'create_datasets.run', ([], {'raw_data_dir': '"""test_data"""', 'raw_labels_dir': '"""test_data"""', 'train_data_dir': 'train_data_dir', 'eval_data_dir': 'eval_data_dir', 'train_eval_split': '[80, 20]'}), "(raw_data_dir='test_data', raw_labels_dir='test_data',\n train_data_dir=train_data_dir, eval_data_dir=eval_data_dir,\n train_eval_split=[80, 20])\n", (2972, 3121), False, 'import create_datasets\n'), ((3200, 3226), 'os.listdir', 'os.listdir', (['train_data_dir'], {}), '(train_data_dir)\n', (3210, 3226), False, 'import os\n'), ((3269, 3294), 'os.listdir', 'os.listdir', (['eval_data_dir'], {}), '(eval_data_dir)\n', (3279, 3294), False, 'import os\n'), ((3372, 3569), 'trainer.run', 'trainer.run', ([], {'train_data_dir': 'train_data_dir', 'eval_data_dir': 'eval_data_dir', 'model_dir': 'model_dir', 'tensorboard_dir': 'tensorboard_dir', 'checkpoint_dir': 'checkpoint_dir', 'train_epochs': '(10)', 'batch_size': '(32)'}), '(train_data_dir=train_data_dir, eval_data_dir=eval_data_dir,\n model_dir=model_dir, tensorboard_dir=tensorboard_dir, checkpoint_dir=\n checkpoint_dir, train_epochs=10, batch_size=32)\n', (3383, 3569), False, 'import trainer\n'), ((3671, 3692), 'os.listdir', 'os.listdir', (['model_dir'], {}), '(model_dir)\n', (3681, 3692), False, 'import os\n'), ((3732, 3759), 'os.listdir', 'os.listdir', (['tensorboard_dir'], {}), '(tensorboard_dir)\n', (3742, 3759), False, 'import os\n'), ((3805, 3831), 'os.listdir', 'os.listdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3815, 3831), False, 'import os\n'), ((2359, 2385), 'trainer.INPUTS_SPEC.keys', 'trainer.INPUTS_SPEC.keys', ([], {}), '()\n', (2383, 2385), False, 'import trainer\n'), ((2429, 2456), 'trainer.OUTPUTS_SPEC.keys', 'trainer.OUTPUTS_SPEC.keys', ([], {}), '()\n', (2454, 2456), False, 'import trainer\n'), ((4018, 4028), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (4025, 4028), True, 'import numpy as np\n')]
|
'''
<NAME>
simple ray trace - tools and classes to specify and instantiate rays
'''
import numpy as np
from srt_modules.useful_math import euler1232C
class Ray:
def __init__(self, pos=None, dirs=None):
self.X = pos # 3 x N position vectors of rays
self.d = dirs # direction vectors of rays in same frame
return
def set_pos(self, ray_starts):
self.X = ray_starts
return
def set_dir(self, ray_dirs):
self.d = ray_dirs
return
class AngledCircleRayDef:
# definition/inputs to make a light source which is a set of rays in concentric circles
# for a less naive generation of concentric circles of rays, vary the number of rays with sqrt(radius) of each ring.
def __init__(self):
self.rad = 0.5 # [m] radius of largest circle of rays
self.angles = [0.] # [arc sec] angle of rays measure wrt the instrument primary axis. providing a list will generate
# multiple sets of rays to be used in multiple runs of the experiment.
self.num_circ = 15 # number of concentric circles
self.per_circ = 150 # number of rays per circle
def make_angled_circle_rays(inputs):
rad_inc = inputs.rad / inputs.num_circ # radius increment
theta_inc = np.pi * 2 / inputs.per_circ # angle increment
ray_set_list = [] # set of sets of start points
for angle in inputs.angles:
rays = []
angle = angle / 3600. * np.pi / 180. # convert from arc sec to radians
for i in range(inputs.num_circ):
r = rad_inc * i
for j in range(inputs.per_circ):
# note x = 0 always. We assume the rays start at the y-z plane in the lab frame.
x, y, z = 0., r * np.cos(theta_inc * j), r * np.sin(theta_inc * j)
rays.append(np.array([x, y, z]))
rays = np.array(rays).transpose()
ray_dirs = np.array([np.array([1, 0, 0])] * np.shape(rays)[1]).transpose() # rays initialize down x-axis
DCM = euler1232C([0., 0., angle]).transpose()
ray_dirs = np.dot(DCM, ray_dirs) # rays rotated by given angle
ray_set_list.append(Ray(rays, ray_dirs))
return ray_set_list # here we have a list of ray sets. one set per angle given. many rays per set
def make_one_edge_ray(rad, angle):
# rad is radius of primary
# angle is the desired angle of the ray relative to primary centerline
# make one ray, starts at the edge of the generating circle at a specified angle. For checking secondary diameter
x, y, z = 0., rad, 0.,
L_X = np.array([x,y,z]).reshape([3, 1])
angle = angle/3600. * np.pi/180.
dir = np.array([np.cos(angle), -np.sin(angle), 0]).reshape([3, 1])
return Ray(L_X, dir)
|
[
"numpy.array",
"numpy.dot",
"numpy.cos",
"numpy.sin",
"srt_modules.useful_math.euler1232C",
"numpy.shape"
] |
[((2083, 2104), 'numpy.dot', 'np.dot', (['DCM', 'ray_dirs'], {}), '(DCM, ray_dirs)\n', (2089, 2104), True, 'import numpy as np\n'), ((2585, 2604), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (2593, 2604), True, 'import numpy as np\n'), ((1869, 1883), 'numpy.array', 'np.array', (['rays'], {}), '(rays)\n', (1877, 1883), True, 'import numpy as np\n'), ((2024, 2053), 'srt_modules.useful_math.euler1232C', 'euler1232C', (['[0.0, 0.0, angle]'], {}), '([0.0, 0.0, angle])\n', (2034, 2053), False, 'from srt_modules.useful_math import euler1232C\n'), ((1833, 1852), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1841, 1852), True, 'import numpy as np\n'), ((2676, 2689), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2682, 2689), True, 'import numpy as np\n'), ((1756, 1777), 'numpy.cos', 'np.cos', (['(theta_inc * j)'], {}), '(theta_inc * j)\n', (1762, 1777), True, 'import numpy as np\n'), ((1783, 1804), 'numpy.sin', 'np.sin', (['(theta_inc * j)'], {}), '(theta_inc * j)\n', (1789, 1804), True, 'import numpy as np\n'), ((2692, 2705), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2698, 2705), True, 'import numpy as np\n'), ((1925, 1944), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1933, 1944), True, 'import numpy as np\n'), ((1948, 1962), 'numpy.shape', 'np.shape', (['rays'], {}), '(rays)\n', (1956, 1962), True, 'import numpy as np\n')]
|
import os
import torch
import time
import numpy as np
from tqdm import tqdm
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from generator import generator
from utils import get_model
sns.set_style("whitegrid")
font = {'family': 'serif',
'style': 'normal',
'size': 10}
matplotlib.rc('font', **font)
sfmt = matplotlib.ticker.ScalarFormatter(useMathText=True)
sfmt.set_powerlimits((0, 0))
matplotlib.use("Agg")
class DeepPrior(object):
def __init__(self, args):
if torch.cuda.is_available() and args.cuda:
self.device = torch.device('cuda')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
self.device = torch.device('cpu')
torch.set_default_tensor_type('torch.FloatTensor')
def train(self, args):
x, spacing, shape = get_model()
self.x = x.to(self.device)
self.extent = np.array([0., self.x.shape[2]*spacing[0],
self.x.shape[3]*spacing[1], 0.])/1.0e3
G = generator(self.x.shape).to(self.device)
z = torch.randn([1, 3] + G.crop.d_dim).to(self.device)
self.y = self.x + args.sigma*torch.randn(self.x.shape).to(self.device)
optim = torch.optim.Adam(G.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
lmbda = lambda epoch: 0.55
scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optim,
lr_lambda=lmbda)
self.obj_log = []
self.err_log = []
with tqdm(range(args.max_itr), unit=" itr's", colour='#B5F2A9') as pb:
for itr in pb:
self.xhat = G(z)
obj = torch.norm(self.xhat - self.y)**2
obj.backward()
optim.step()
optim.zero_grad()
if itr%100 == 0 or itr == args.max_itr - 1:
self.test(args, itr)
scheduler.step()
self.obj_log.append(obj.item())
self.err_log.append((torch.norm(self.x - self.xhat)**2).item())
pb.set_postfix(obj="{:.2e}".format(self.obj_log[-1]),
error="{:.2e}".format(self.err_log[-1]))
def test(self, args, itr):
fig = plt.figure("Objective", figsize=(7, 2.5))
plt.semilogy(self.obj_log, label="objective")
plt.semilogy(self.err_log, label="error")
plt.legend()
plt.title("Training objecjtive and prediction error")
plt.xlabel("Iterations")
plt.ylabel("Loss vs model error")
plt.grid(True)
plt.savefig(os.path.join(args.save_path, "training_obj.png"),
format="png", bbox_inches="tight", dpi=200,
pad_inches=.05)
plt.close(fig)
fig = plt.figure("predeiction", figsize=(7, 2.5))
plt.imshow(self.xhat.cpu().detach().squeeze().numpy().T,
vmin=-.04, vmax=.04, aspect=1,
extent=self.extent, cmap='seismic', alpha=1.0,
resample=True, interpolation="lanczos", filterrad=1)
plt.colorbar(fraction=0.085, pad=0.01, format=sfmt)
plt.xlabel("Horizontal distance (km)")
plt.ylabel("Depth (km)")
plt.grid(False)
plt.title("Prediction after " + str(itr) + " updates");
plt.savefig(os.path.join(args.save_path, "xhat_" +
str(itr) + ".png"), format="png",
bbox_inches="tight", dpi=200,
pad_inches=.05)
plt.close(fig)
if itr == 0:
fig = plt.figure("true model", figsize=(7, 2.5))
plt.imshow(self.x.cpu().detach().squeeze().numpy().T,
vmin=-.04, vmax=.04, aspect=1,
extent=self.extent, cmap='seismic', alpha=1.0,
resample=True, interpolation="lanczos", filterrad=1)
plt.colorbar(fraction=0.085, pad=0.01, format=sfmt)
plt.xlabel("Horizontal distance (km)")
plt.ylabel("Depth (km)")
plt.title("True model")
plt.grid(False)
plt.savefig(os.path.join(args.save_path, "x.png"),format="png",
bbox_inches="tight", dpi=200,
pad_inches=.05)
plt.close(fig)
fig = plt.figure("observed data", figsize=(7, 2.5))
plt.imshow(self.y.cpu().detach().squeeze().numpy().T,
vmin=-.04, vmax=.04, aspect=1,
extent=self.extent, cmap='seismic', alpha=1.0,
resample=True, interpolation="lanczos", filterrad=1)
plt.colorbar(fraction=0.085, pad=0.01, format=sfmt)
plt.xlabel("Horizontal distance (km)")
plt.ylabel("Depth (km)")
plt.title("Observed data")
plt.grid(False)
plt.savefig(os.path.join(args.save_path, "y.png"),format="png",
bbox_inches="tight", dpi=200,
pad_inches=.05)
plt.close(fig)
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"numpy.array",
"matplotlib.ticker.ScalarFormatter",
"torch.cuda.is_available",
"matplotlib.rc",
"matplotlib.pyplot.semilogy",
"generator.generator",
"matplotlib.pyplot.xlabel",
"torch.set_default_tensor_type",
"matplotlib.pyplot.close",
"torch.randn",
"utils.get_model",
"matplotlib.use",
"torch.norm",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"torch.device",
"matplotlib.pyplot.colorbar",
"os.path.join",
"matplotlib.pyplot.figure",
"torch.optim.lr_scheduler.MultiplicativeLR"
] |
[((208, 234), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (221, 234), True, 'import seaborn as sns\n'), ((309, 338), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (322, 338), False, 'import matplotlib\n'), ((346, 397), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {'useMathText': '(True)'}), '(useMathText=True)\n', (379, 397), False, 'import matplotlib\n'), ((427, 448), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (441, 448), False, 'import matplotlib\n'), ((854, 865), 'utils.get_model', 'get_model', ([], {}), '()\n', (863, 865), False, 'from utils import get_model\n'), ((1414, 1479), 'torch.optim.lr_scheduler.MultiplicativeLR', 'torch.optim.lr_scheduler.MultiplicativeLR', (['optim'], {'lr_lambda': 'lmbda'}), '(optim, lr_lambda=lmbda)\n', (1455, 1479), False, 'import torch\n'), ((2346, 2387), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Objective"""'], {'figsize': '(7, 2.5)'}), "('Objective', figsize=(7, 2.5))\n", (2356, 2387), True, 'import matplotlib.pyplot as plt\n'), ((2396, 2441), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['self.obj_log'], {'label': '"""objective"""'}), "(self.obj_log, label='objective')\n", (2408, 2441), True, 'import matplotlib.pyplot as plt\n'), ((2450, 2491), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['self.err_log'], {'label': '"""error"""'}), "(self.err_log, label='error')\n", (2462, 2491), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2512), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2510, 2512), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2574), 'matplotlib.pyplot.title', 'plt.title', (['"""Training objecjtive and prediction error"""'], {}), "('Training objecjtive and prediction error')\n", (2530, 2574), True, 'import matplotlib.pyplot as plt\n'), ((2583, 2607), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (2593, 2607), True, 'import matplotlib.pyplot as plt\n'), ((2616, 2649), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss vs model error"""'], {}), "('Loss vs model error')\n", (2626, 2649), True, 'import matplotlib.pyplot as plt\n'), ((2658, 2672), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2666, 2672), True, 'import matplotlib.pyplot as plt\n'), ((2851, 2865), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2860, 2865), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2924), 'matplotlib.pyplot.figure', 'plt.figure', (['"""predeiction"""'], {'figsize': '(7, 2.5)'}), "('predeiction', figsize=(7, 2.5))\n", (2891, 2924), True, 'import matplotlib.pyplot as plt\n'), ((3186, 3237), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.085)', 'pad': '(0.01)', 'format': 'sfmt'}), '(fraction=0.085, pad=0.01, format=sfmt)\n', (3198, 3237), True, 'import matplotlib.pyplot as plt\n'), ((3246, 3284), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Horizontal distance (km)"""'], {}), "('Horizontal distance (km)')\n", (3256, 3284), True, 'import matplotlib.pyplot as plt\n'), ((3293, 3317), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Depth (km)"""'], {}), "('Depth (km)')\n", (3303, 3317), True, 'import matplotlib.pyplot as plt\n'), ((3326, 3341), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (3334, 3341), True, 'import matplotlib.pyplot as plt\n'), ((3613, 3627), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3622, 3627), True, 'import matplotlib.pyplot as plt\n'), ((518, 543), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (541, 543), False, 'import torch\n'), ((585, 605), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (597, 605), False, 'import torch\n'), ((618, 673), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.cuda.FloatTensor"""'], {}), "('torch.cuda.FloatTensor')\n", (647, 673), False, 'import torch\n'), ((714, 733), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (726, 733), False, 'import torch\n'), ((746, 796), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.FloatTensor"""'], {}), "('torch.FloatTensor')\n", (775, 796), False, 'import torch\n'), ((923, 1008), 'numpy.array', 'np.array', (['[0.0, self.x.shape[2] * spacing[0], self.x.shape[3] * spacing[1], 0.0]'], {}), '([0.0, self.x.shape[2] * spacing[0], self.x.shape[3] * spacing[1], 0.0]\n )\n', (931, 1008), True, 'import numpy as np\n'), ((2693, 2741), 'os.path.join', 'os.path.join', (['args.save_path', '"""training_obj.png"""'], {}), "(args.save_path, 'training_obj.png')\n", (2705, 2741), False, 'import os\n'), ((3668, 3710), 'matplotlib.pyplot.figure', 'plt.figure', (['"""true model"""'], {'figsize': '(7, 2.5)'}), "('true model', figsize=(7, 2.5))\n", (3678, 3710), True, 'import matplotlib.pyplot as plt\n'), ((3989, 4040), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.085)', 'pad': '(0.01)', 'format': 'sfmt'}), '(fraction=0.085, pad=0.01, format=sfmt)\n', (4001, 4040), True, 'import matplotlib.pyplot as plt\n'), ((4053, 4091), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Horizontal distance (km)"""'], {}), "('Horizontal distance (km)')\n", (4063, 4091), True, 'import matplotlib.pyplot as plt\n'), ((4104, 4128), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Depth (km)"""'], {}), "('Depth (km)')\n", (4114, 4128), True, 'import matplotlib.pyplot as plt\n'), ((4141, 4164), 'matplotlib.pyplot.title', 'plt.title', (['"""True model"""'], {}), "('True model')\n", (4150, 4164), True, 'import matplotlib.pyplot as plt\n'), ((4177, 4192), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (4185, 4192), True, 'import matplotlib.pyplot as plt\n'), ((4375, 4389), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4384, 4389), True, 'import matplotlib.pyplot as plt\n'), ((4409, 4454), 'matplotlib.pyplot.figure', 'plt.figure', (['"""observed data"""'], {'figsize': '(7, 2.5)'}), "('observed data', figsize=(7, 2.5))\n", (4419, 4454), True, 'import matplotlib.pyplot as plt\n'), ((4733, 4784), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.085)', 'pad': '(0.01)', 'format': 'sfmt'}), '(fraction=0.085, pad=0.01, format=sfmt)\n', (4745, 4784), True, 'import matplotlib.pyplot as plt\n'), ((4797, 4835), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Horizontal distance (km)"""'], {}), "('Horizontal distance (km)')\n", (4807, 4835), True, 'import matplotlib.pyplot as plt\n'), ((4848, 4872), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Depth (km)"""'], {}), "('Depth (km)')\n", (4858, 4872), True, 'import matplotlib.pyplot as plt\n'), ((4885, 4911), 'matplotlib.pyplot.title', 'plt.title', (['"""Observed data"""'], {}), "('Observed data')\n", (4894, 4911), True, 'import matplotlib.pyplot as plt\n'), ((4924, 4939), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (4932, 4939), True, 'import matplotlib.pyplot as plt\n'), ((5122, 5136), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5131, 5136), True, 'import matplotlib.pyplot as plt\n'), ((1049, 1072), 'generator.generator', 'generator', (['self.x.shape'], {}), '(self.x.shape)\n', (1058, 1072), False, 'from generator import generator\n'), ((1102, 1136), 'torch.randn', 'torch.randn', (['([1, 3] + G.crop.d_dim)'], {}), '([1, 3] + G.crop.d_dim)\n', (1113, 1136), False, 'import torch\n'), ((4217, 4254), 'os.path.join', 'os.path.join', (['args.save_path', '"""x.png"""'], {}), "(args.save_path, 'x.png')\n", (4229, 4254), False, 'import os\n'), ((4964, 5001), 'os.path.join', 'os.path.join', (['args.save_path', '"""y.png"""'], {}), "(args.save_path, 'y.png')\n", (4976, 5001), False, 'import os\n'), ((1759, 1789), 'torch.norm', 'torch.norm', (['(self.xhat - self.y)'], {}), '(self.xhat - self.y)\n', (1769, 1789), False, 'import torch\n'), ((1190, 1215), 'torch.randn', 'torch.randn', (['self.x.shape'], {}), '(self.x.shape)\n', (1201, 1215), False, 'import torch\n'), ((2113, 2143), 'torch.norm', 'torch.norm', (['(self.x - self.xhat)'], {}), '(self.x - self.xhat)\n', (2123, 2143), False, 'import torch\n')]
|
import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
import os
from scipy.spatial.distance import euclidean
from meta_mb.meta_envs.base import RandomEnv
#from mujoco-py.mujoco_py.pxd.mujoco import local
import mujoco_py
class PegFullBlueEnv(RandomEnv, utils.EzPickle):
def __init__(self, goal_dist=3e-2):
utils.EzPickle.__init__(**locals())
xml_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'assets', 'blue_full_peg_v1.xml')
x = 0.005
y = -0.5
z = -0.35
self.top_goal = np.array([x, y, z+0.15])
self.center_goal = np.array([x, y, z])
self.bottom_goal= np.array([x, y, z-0.15])
self.peg_loc = self.center_goal
self.goal_dist = goal_dist # permissible distance from goal
RandomEnv.__init__(self, 2, xml_file, 2)
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat[:-3],
#self.sim.data.body_xpos.flat[:3],
self.peg_location() - self.center_goal
])
def step(self, action):
self.do_simulation(action, self.frame_skip)
self.peg_loc = self.peg_location()
reward_dist = -self.peg_dist()
reward_ctrl = -np.square(action).sum()
reward = reward_dist + 1.25e-4 * reward_ctrl
self.peg_orient()
observation = self._get_obs()
done = False
info = dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
print(reward)
return observation, reward, done, info
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(low=-0.01, high=0.01, size=self.model.nq)
qvel = self.init_qvel + self.np_random.uniform(low=-0.01, high=0.01, size=self.model.nv)
peg_table_position = np.random.uniform(low=[-0.2, -1, 0.3], high=[0.75, -0.6, 0.3])
self.sim.model.body_pos[-8] = peg_table_position
self.top_goal = self.get_body_com("g1")
self.center_goal = self.get_body_com("g2")
self.bottom_goal = self.get_body_com("g3")
qpos[-6:-3] = np.zeros((3, ))
qpos[-3:] = self.center_goal
qvel[-6:] = 0
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def reward(self, obs, act, obs_next):
assert obs.ndim == act.ndim == obs_next.ndim
if obs.ndim == 2:
assert obs.shape == obs_next.shape and act.shape[0] == obs.shape[0]
reward_ctrl = -np.sum(np.square(act), axis=1)
reward_dist = -self.peg_dist()
reward = reward_dist + 1.25e-4 * reward_ctrl
return np.clip(reward, -1e2, 1e2)
elif obs.ndim == 1:
assert obs.shape == obs_next.shape
reward_ctrl = -np.sum(np.square(act))
reward_dist = -self.peg_dist()
reward = reward_run + 1.25e-4 * reward_ctrl
return np.clip(reward, -1e2, 1e2)
else:
raise NotImplementedError
def peg_orient(self):
return self.data.get_body_xquat("peg-center")
def peg_dist(self):
top = self.get_body_com("peg-top")
center = self.get_body_com("peg-center")
bottom = self.get_body_com("peg-bottom")
distance = (euclidean(top, self.top_goal)
+ euclidean(center, self.center_goal)
+ euclidean(bottom, self.bottom_goal))
return distance
def peg_location(self):
return self.get_body_com("peg-center")
def top(self, center):
x = center[0]
y = center[1] + 0.3
z = center[2] - 0.4
return np.array([x, y, z])
def center(self, center):
x = center[0]
y = center[1] + 0.3
z = center[2] - 0.55
return np.array([x, y, z])
def bottom(self, center):
x = center[0]
y = center[1] + 0.3
z = center[2] - 0.7
return np.array([x, y, z])
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 2
self.viewer.cam.elevation = -20
self.viewer.cam.type = 0
self.viewer.cam.azimuth = 180
if __name__ == "__main__":
env = PegFullBlueEnv()
while True:
env.reset()
for _ in range(500):
action = env.action_space.sample()
env.step(action)
env.render()
|
[
"numpy.clip",
"numpy.square",
"numpy.array",
"numpy.zeros",
"scipy.spatial.distance.euclidean",
"os.path.dirname",
"numpy.random.uniform",
"meta_mb.meta_envs.base.RandomEnv.__init__"
] |
[((571, 597), 'numpy.array', 'np.array', (['[x, y, z + 0.15]'], {}), '([x, y, z + 0.15])\n', (579, 597), True, 'import numpy as np\n'), ((623, 642), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (631, 642), True, 'import numpy as np\n'), ((669, 695), 'numpy.array', 'np.array', (['[x, y, z - 0.15]'], {}), '([x, y, z - 0.15])\n', (677, 695), True, 'import numpy as np\n'), ((813, 853), 'meta_mb.meta_envs.base.RandomEnv.__init__', 'RandomEnv.__init__', (['self', '(2)', 'xml_file', '(2)'], {}), '(self, 2, xml_file, 2)\n', (831, 853), False, 'from meta_mb.meta_envs.base import RandomEnv\n'), ((1848, 1910), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '[-0.2, -1, 0.3]', 'high': '[0.75, -0.6, 0.3]'}), '(low=[-0.2, -1, 0.3], high=[0.75, -0.6, 0.3])\n', (1865, 1910), True, 'import numpy as np\n'), ((2142, 2156), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (2150, 2156), True, 'import numpy as np\n'), ((3682, 3701), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3690, 3701), True, 'import numpy as np\n'), ((3827, 3846), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3835, 3846), True, 'import numpy as np\n'), ((3971, 3990), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3979, 3990), True, 'import numpy as np\n'), ((2697, 2727), 'numpy.clip', 'np.clip', (['reward', '(-100.0)', '(100.0)'], {}), '(reward, -100.0, 100.0)\n', (2704, 2727), True, 'import numpy as np\n'), ((3424, 3459), 'scipy.spatial.distance.euclidean', 'euclidean', (['bottom', 'self.bottom_goal'], {}), '(bottom, self.bottom_goal)\n', (3433, 3459), False, 'from scipy.spatial.distance import euclidean\n'), ((430, 455), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (445, 455), False, 'import os\n'), ((2967, 2997), 'numpy.clip', 'np.clip', (['reward', '(-100.0)', '(100.0)'], {}), '(reward, -100.0, 100.0)\n', (2974, 2997), True, 'import numpy as np\n'), ((3314, 3343), 'scipy.spatial.distance.euclidean', 'euclidean', (['top', 'self.top_goal'], {}), '(top, self.top_goal)\n', (3323, 3343), False, 'from scipy.spatial.distance import euclidean\n'), ((3366, 3401), 'scipy.spatial.distance.euclidean', 'euclidean', (['center', 'self.center_goal'], {}), '(center, self.center_goal)\n', (3375, 3401), False, 'from scipy.spatial.distance import euclidean\n'), ((1294, 1311), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (1303, 1311), True, 'import numpy as np\n'), ((2554, 2568), 'numpy.square', 'np.square', (['act'], {}), '(act)\n', (2563, 2568), True, 'import numpy as np\n'), ((2833, 2847), 'numpy.square', 'np.square', (['act'], {}), '(act)\n', (2842, 2847), True, 'import numpy as np\n')]
|
import os
import numpy as np
import tensorflow as tf
import math
from PIL import Image
#import pdb
F = tf.app.flags.FLAGS
"""
Save tensorflow model
Parameters:
* checkpoint_dir - name of the directory where model is to be saved
* sess - current tensorflow session
* saver - tensorflow saver
"""
def save_model(checkpoint_dir, sess, saver):
model_name = "model.ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver.save(sess, os.path.join(checkpoint_dir, model_name))
"""
Load tensorflow model
Parameters:
* checkpoint_dir - name of the directory where model is to be loaded from
* sess - current tensorflow session
* saver - tensorflow saver
Returns: True if the model loaded successfully, else False
"""
def load_model(checkpoint_dir, sess, saver):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
"""
To recompose an array of 3D images from patches
"""
def recompose3D_overlap(preds, img_h, img_w, img_d, stride_h, stride_w, stride_d):
patch_h = preds.shape[1]
patch_w = preds.shape[2]
patch_d = preds.shape[3]
N_patches_h = (img_h-patch_h)//stride_h+1
N_patches_w = (img_w-patch_w)//stride_w+1
N_patches_d = (img_d-patch_d)//stride_d+1
N_patches_img = N_patches_h * N_patches_w * N_patches_d
print("N_patches_h: " ,N_patches_h)
print("N_patches_w: " ,N_patches_w)
print("N_patches_d: " ,N_patches_d)
print("N_patches_img: ",N_patches_img)
assert(preds.shape[0]%N_patches_img==0)
N_full_imgs = preds.shape[0]//N_patches_img
print("According to the dimension inserted, there are " \
+str(N_full_imgs) +" full images (of " +str(img_h)+"x" +str(img_w)+"x" +str(img_d) +" each)")
# itialize to zero mega array with sum of Probabilities
raw_pred_martrix = np.zeros((N_full_imgs,img_h,img_w,img_d))
raw_sum = np.zeros((N_full_imgs,img_h,img_w,img_d))
final_matrix = np.zeros((N_full_imgs,img_h,img_w,img_d),dtype='uint16')
k = 0
# iterator over all the patches
for i in range(N_full_imgs):
for h in range((img_h-patch_h)//stride_h+1):
for w in range((img_w-patch_w)//stride_w+1):
for d in range((img_d-patch_d)//stride_d+1):
raw_pred_martrix[i,h*stride_h:(h*stride_h)+patch_h,\
w*stride_w:(w*stride_w)+patch_w,\
d*stride_d:(d*stride_d)+patch_d]+=preds[k]
raw_sum[i,h*stride_h:(h*stride_h)+patch_h,\
w*stride_w:(w*stride_w)+patch_w,\
d*stride_d:(d*stride_d)+patch_d]+=1.0
k+=1
assert(k==preds.shape[0])
#To check for non zero sum matrix
assert(np.min(raw_sum)>=1.0)
final_matrix = np.around(raw_pred_martrix/raw_sum)
return final_matrix
#functions below are added by liuhuaqing 2019-07-15
def make_grid(tensor, nrow=8, padding=2,
normalize=False, scale_each=False):
"""Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py"""
nmaps = tensor.shape[0]
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)
grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2], dtype=np.uint8)
k = 0
for y in range(ymaps):
for x in range(xmaps):
if k >= nmaps:
break
h, h_width = y * height + 1 + padding // 2, height - padding
w, w_width = x * width + 1 + padding // 2, width - padding
grid[h:h+h_width, w:w+w_width] = tensor[k]
k = k + 1
return grid
def save_image(tensor, filename, nrow=8, padding=2,
normalize=False, scale_each=False):
ndarr = make_grid(tensor, nrow=nrow, padding=padding,
normalize=normalize, scale_each=scale_each)
im = Image.fromarray(ndarr)
im.save(filename)
# 语义分割准确率的定义和计算,参考:https://blog.csdn.net/majinlei121/article/details/78965435
def fast_hist(a, b, n):
k = (a >= 0) & (a < n) #正常情况下全是True
return np.bincount(n * a[k].astype(int) + b[k], minlength=n**2).reshape(n, n)#np.bincount 用于统计数组中(从小到大)给取值出现的次数
def Hist(a,b,n):
hist = fast_hist(a,b,n)
return hist
def pixelAccuracy(trueMask,predMask,n_cls):
hist = Hist(trueMask,predMask,n_cls)
PA = np.diag(hist).sum() / hist.sum()
return PA
def MeanPixelAccuracy(trueMask,predMask,n_cls):
#epsilon = 1
hist = Hist(trueMask,predMask,n_cls)
PAs = np.diag(hist) / hist.sum(1)
return PAs
def IntersectionoverUnion(trueMask,predMask,n_cls):
#epsilon = 1
hist = Hist(trueMask,predMask,n_cls)
IoUs = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
return IoUs
def DiceScore(trueMask,predMask,n_cls):
# epsilon = 1
hist = Hist(trueMask,predMask,n_cls)
correct_pred = np.diag(hist) # 给类别正确预测的像素点数
pred_classes = np.sum(hist,0) # 预测处的各类别像素点数,
true_classes = np.sum(hist,1) # 真实的各类别像素点数
DSs = 2*correct_pred/(pred_classes+true_classes)
return DSs
|
[
"os.path.exists",
"PIL.Image.fromarray",
"os.makedirs",
"os.path.join",
"numpy.diag",
"tensorflow.train.get_checkpoint_state",
"numpy.sum",
"numpy.zeros",
"numpy.around",
"os.path.basename",
"numpy.min"
] |
[((838, 883), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (867, 883), True, 'import tensorflow as tf\n'), ((1990, 2034), 'numpy.zeros', 'np.zeros', (['(N_full_imgs, img_h, img_w, img_d)'], {}), '((N_full_imgs, img_h, img_w, img_d))\n', (1998, 2034), True, 'import numpy as np\n'), ((2045, 2089), 'numpy.zeros', 'np.zeros', (['(N_full_imgs, img_h, img_w, img_d)'], {}), '((N_full_imgs, img_h, img_w, img_d))\n', (2053, 2089), True, 'import numpy as np\n'), ((2104, 2164), 'numpy.zeros', 'np.zeros', (['(N_full_imgs, img_h, img_w, img_d)'], {'dtype': '"""uint16"""'}), "((N_full_imgs, img_h, img_w, img_d), dtype='uint16')\n", (2112, 2164), True, 'import numpy as np\n'), ((2902, 2939), 'numpy.around', 'np.around', (['(raw_pred_martrix / raw_sum)'], {}), '(raw_pred_martrix / raw_sum)\n', (2911, 2939), True, 'import numpy as np\n'), ((3396, 3495), 'numpy.zeros', 'np.zeros', (['[height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2]'], {'dtype': 'np.uint8'}), '([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding //\n 2], dtype=np.uint8)\n', (3404, 3495), True, 'import numpy as np\n'), ((4090, 4112), 'PIL.Image.fromarray', 'Image.fromarray', (['ndarr'], {}), '(ndarr)\n', (4105, 4112), False, 'from PIL import Image\n'), ((5079, 5092), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (5086, 5092), True, 'import numpy as np\n'), ((5127, 5142), 'numpy.sum', 'np.sum', (['hist', '(0)'], {}), '(hist, 0)\n', (5133, 5142), True, 'import numpy as np\n'), ((5176, 5191), 'numpy.sum', 'np.sum', (['hist', '(1)'], {}), '(hist, 1)\n', (5182, 5191), True, 'import numpy as np\n'), ((380, 410), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (394, 410), False, 'import os\n'), ((416, 443), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (427, 443), False, 'import os\n'), ((463, 503), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'model_name'], {}), '(checkpoint_dir, model_name)\n', (475, 503), False, 'import os\n'), ((942, 986), 'os.path.basename', 'os.path.basename', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (958, 986), False, 'import os\n'), ((2863, 2878), 'numpy.min', 'np.min', (['raw_sum'], {}), '(raw_sum)\n', (2869, 2878), True, 'import numpy as np\n'), ((4720, 4733), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (4727, 4733), True, 'import numpy as np\n'), ((4885, 4898), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (4892, 4898), True, 'import numpy as np\n'), ((1011, 1050), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'ckpt_name'], {}), '(checkpoint_dir, ckpt_name)\n', (1023, 1050), False, 'import os\n'), ((4930, 4943), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (4937, 4943), True, 'import numpy as np\n'), ((4556, 4569), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (4563, 4569), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.