code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# Question 06, Lab 05
# AB Satyaprakash - 180123062
# imports ----------------------------------------------------------------------------
from math import cos, log, sin, pi
from sympy.abc import t
import numpy as np
import pandas as pd
import sympy as sp
from scipy.integrate import quad
# global dictionaries ----------------------------------------------------------------
phi = {-1: 0, 0: 1}
alpha = {}
beta = {0: 0}
# functions --------------------------------------------------------------------------
def getFuncVal(x):
# cosx log(sinx)/(1 + sin^2(x))
return (cos(x)*log(sin(x))/(1+sin(x)**2))
def getInnerProduct(a, b):
poly = np.poly1d(np.polyint(np.polymul(a, b)))
return poly(1) - poly(-1)
def getPhi(n):
# The value of ϕ(n) = (x-α(n-1))ϕ(n-1) - β(n)ϕ(n-2)
if n in phi:
return phi[n]
a = getAlpha(n-1)
b = getBeta(n-1)
phi_n_1 = getPhi(n-1)
phi_n_2 = getPhi(n-2)
secondTerm = np.polymul([b], phi_n_2)
firstTerm = np.polymul(np.polysub([1, 0], a), phi_n_1)
phi[n] = np.polysub(firstTerm, secondTerm)
return phi[n]
def getAlpha(n):
# The value of α(n) = <xϕ(n), ϕ(n)>/<ϕ(n) , ϕ(n)>
if n in alpha:
return alpha[n]
phi_n = getPhi(n)
xphi_n = np.polymul([1, 0], phi_n)
num = getInnerProduct(xphi_n, phi_n)
denom = getInnerProduct(phi_n, phi_n)
alpha[n] = num/denom
return alpha[n]
def getBeta(n):
# The value of β(n) = <ϕ(n), ϕ(n)>/<ϕ(n-1) , ϕ(n-1)>
if n in beta:
return beta[n]
phi_n = getPhi(n)
phi_n_1 = getPhi(n-1)
num = getInnerProduct(phi_n, phi_n)
denom = getInnerProduct(phi_n_1, phi_n_1)
beta[n] = num/denom
return beta[n]
def getGaussian(f, roots, W, limits):
a, b = limits
I = 0
for x, w in zip(roots, W):
I += w*f(x, a, b)
return I
def f(x, a, b):
c1, c2 = (b-a)/2, (b+a)/2
x = c1*x + c2
return c1*getFuncVal(x)
# program body
table = []
for k in range(1, 6):
n = k+1
poly = np.poly1d(getPhi(n))
roots = poly.r
roots.sort()
temp = [[1] + [0]*(i) for i in range(n)]
output = []
for P in temp:
P = (np.polyint(P))
P = np.poly1d(P)
I = P(1) - P(-1)
output.append(I)
X = []
for i in range(n):
temp = [x**i for x in roots]
X.append(temp)
X_ = np.linalg.inv(X)
W = np.dot(X_, output)
limits = (0, pi/2)
result = getGaussian(f, roots, W, limits)
table.append([k, round(result, 2)])
df = pd.DataFrame(
table, columns=['N', 'Evaluated value using N+1 point Gaussian Quadrature'])
print(df)
ans, _ = quad(getFuncVal, 0, pi/2)
print("The actual value of the integral is", ans)
|
[
"pandas.DataFrame",
"numpy.polysub",
"numpy.poly1d",
"scipy.integrate.quad",
"math.sin",
"numpy.polyint",
"numpy.linalg.inv",
"math.cos",
"numpy.polymul",
"numpy.dot"
] |
[((2515, 2608), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {'columns': "['N', 'Evaluated value using N+1 point Gaussian Quadrature']"}), "(table, columns=['N',\n 'Evaluated value using N+1 point Gaussian Quadrature'])\n", (2527, 2608), True, 'import pandas as pd\n'), ((2631, 2658), 'scipy.integrate.quad', 'quad', (['getFuncVal', '(0)', '(pi / 2)'], {}), '(getFuncVal, 0, pi / 2)\n', (2635, 2658), False, 'from scipy.integrate import quad\n'), ((950, 974), 'numpy.polymul', 'np.polymul', (['[b]', 'phi_n_2'], {}), '([b], phi_n_2)\n', (960, 974), True, 'import numpy as np\n'), ((1047, 1080), 'numpy.polysub', 'np.polysub', (['firstTerm', 'secondTerm'], {}), '(firstTerm, secondTerm)\n', (1057, 1080), True, 'import numpy as np\n'), ((1251, 1276), 'numpy.polymul', 'np.polymul', (['[1, 0]', 'phi_n'], {}), '([1, 0], phi_n)\n', (1261, 1276), True, 'import numpy as np\n'), ((2354, 2370), 'numpy.linalg.inv', 'np.linalg.inv', (['X'], {}), '(X)\n', (2367, 2370), True, 'import numpy as np\n'), ((2379, 2397), 'numpy.dot', 'np.dot', (['X_', 'output'], {}), '(X_, output)\n', (2385, 2397), True, 'import numpy as np\n'), ((1002, 1023), 'numpy.polysub', 'np.polysub', (['[1, 0]', 'a'], {}), '([1, 0], a)\n', (1012, 1023), True, 'import numpy as np\n'), ((2159, 2172), 'numpy.polyint', 'np.polyint', (['P'], {}), '(P)\n', (2169, 2172), True, 'import numpy as np\n'), ((2186, 2198), 'numpy.poly1d', 'np.poly1d', (['P'], {}), '(P)\n', (2195, 2198), True, 'import numpy as np\n'), ((580, 586), 'math.cos', 'cos', (['x'], {}), '(x)\n', (583, 586), False, 'from math import cos, log, sin, pi\n'), ((675, 691), 'numpy.polymul', 'np.polymul', (['a', 'b'], {}), '(a, b)\n', (685, 691), True, 'import numpy as np\n'), ((591, 597), 'math.sin', 'sin', (['x'], {}), '(x)\n', (594, 597), False, 'from math import cos, log, sin, pi\n'), ((602, 608), 'math.sin', 'sin', (['x'], {}), '(x)\n', (605, 608), False, 'from math import cos, log, sin, pi\n')]
|
#!/usr/bin/env python
"""Test Cython interpolation"""
from __future__ import division, print_function
import argparse
import os
import sys
from viscid_test_common import next_plot_fname
from matplotlib import pyplot as plt
import numpy as np
import viscid
from viscid.plot import vpyplot as vlt
def run_test(fld, seeds, kind, show=False):
plt.clf()
vlt.plot(viscid.interp(fld, seeds, kind=kind))
plt.title(kind)
plt.savefig(next_plot_fname(__file__))
if show:
vlt.show()
def _main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--show", "--plot", action="store_true")
args = viscid.vutil.common_argparse(parser, default_verb=0)
img = np.load(os.path.join(viscid.sample_dir, "logo.npy"))
x = np.linspace(-1, 1, img.shape[0])
y = np.linspace(-1, 1, img.shape[1])
z = np.linspace(-1, 1, img.shape[2])
logo = viscid.arrays2field([x, y, z], img)
seeds = viscid.Volume([-0.8, -0.8, 0.0], [0.8, 0.8, 0.0],
n=[64, 64, 1])
run_test(logo, seeds, "Nearest", show=args.show)
run_test(logo, seeds, "Trilinear", show=args.show)
return 0
if __name__ == "__main__":
sys.exit(_main())
##
## EOF
##
|
[
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"viscid.interp",
"viscid_test_common.next_plot_fname",
"viscid.arrays2field",
"viscid.vutil.common_argparse",
"numpy.linspace",
"viscid.Volume",
"viscid.plot.vpyplot.show",
"os.path.join"
] |
[((348, 357), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (355, 357), True, 'from matplotlib import pyplot as plt\n'), ((413, 428), 'matplotlib.pyplot.title', 'plt.title', (['kind'], {}), '(kind)\n', (422, 428), True, 'from matplotlib import pyplot as plt\n'), ((532, 576), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (555, 576), False, 'import argparse\n'), ((653, 705), 'viscid.vutil.common_argparse', 'viscid.vutil.common_argparse', (['parser'], {'default_verb': '(0)'}), '(parser, default_verb=0)\n', (681, 705), False, 'import viscid\n'), ((778, 810), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'img.shape[0]'], {}), '(-1, 1, img.shape[0])\n', (789, 810), True, 'import numpy as np\n'), ((819, 851), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'img.shape[1]'], {}), '(-1, 1, img.shape[1])\n', (830, 851), True, 'import numpy as np\n'), ((860, 892), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'img.shape[2]'], {}), '(-1, 1, img.shape[2])\n', (871, 892), True, 'import numpy as np\n'), ((904, 939), 'viscid.arrays2field', 'viscid.arrays2field', (['[x, y, z]', 'img'], {}), '([x, y, z], img)\n', (923, 939), False, 'import viscid\n'), ((953, 1017), 'viscid.Volume', 'viscid.Volume', (['[-0.8, -0.8, 0.0]', '[0.8, 0.8, 0.0]'], {'n': '[64, 64, 1]'}), '([-0.8, -0.8, 0.0], [0.8, 0.8, 0.0], n=[64, 64, 1])\n', (966, 1017), False, 'import viscid\n'), ((371, 407), 'viscid.interp', 'viscid.interp', (['fld', 'seeds'], {'kind': 'kind'}), '(fld, seeds, kind=kind)\n', (384, 407), False, 'import viscid\n'), ((446, 471), 'viscid_test_common.next_plot_fname', 'next_plot_fname', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'from viscid_test_common import next_plot_fname\n'), ((494, 504), 'viscid.plot.vpyplot.show', 'vlt.show', ([], {}), '()\n', (502, 504), True, 'from viscid.plot import vpyplot as vlt\n'), ((725, 768), 'os.path.join', 'os.path.join', (['viscid.sample_dir', '"""logo.npy"""'], {}), "(viscid.sample_dir, 'logo.npy')\n", (737, 768), False, 'import os\n')]
|
import random
from typing import Union, Tuple, Any, Dict
import cv2
import numpy as np
from skimage.measure import label
from ...core.transforms_interface import DualTransform
from ...core.transforms_interface import to_tuple
__all__ = ["MaskDropout"]
class MaskDropout(DualTransform):
"""
Image & mask augmentation that zero out mask and image regions corresponding
to randomly chosen object instance from mask.
Mask must be single-channel image, zero values treated as background.
Image can be any number of channels.
Inspired by https://www.kaggle.com/c/severstal-steel-defect-detection/discussion/114254
Args:
max_objects: Maximum number of labels that can be zeroed out. Can be tuple, in this case it's [min, max]
image_fill_value: Fill value to use when filling image.
Can be 'inpaint' to apply inpaining (works only for 3-chahnel images)
mask_fill_value: Fill value to use when filling mask.
Targets:
image, mask
Image types:
uint8, float32
"""
def __init__(
self,
max_objects: int = 1,
image_fill_value: Union[int, float, str] = 0,
mask_fill_value: Union[int, float] = 0,
always_apply: bool = False,
p: float = 0.5,
):
super(MaskDropout, self).__init__(always_apply, p)
self.max_objects = to_tuple(max_objects, 1)
self.image_fill_value = image_fill_value
self.mask_fill_value = mask_fill_value
@property
def targets_as_params(self):
return ["mask"]
def get_params_dependent_on_targets(self, params) -> Dict[str, Any]:
mask = params["mask"]
label_image, num_labels = label(mask, return_num=True)
if num_labels == 0:
dropout_mask = None
else:
objects_to_drop = random.randint(self.max_objects[0], self.max_objects[1])
objects_to_drop = min(num_labels, objects_to_drop)
if objects_to_drop == num_labels:
dropout_mask = mask > 0
else:
labels_index = random.sample(range(1, num_labels + 1), objects_to_drop)
dropout_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=bool)
for label_index in labels_index:
dropout_mask |= label_image == label_index
params.update({"dropout_mask": dropout_mask})
return params
def apply(self, img: np.ndarray, dropout_mask: np.ndarray = None, **params) -> np.ndarray:
if dropout_mask is None:
return img
if self.image_fill_value == "inpaint":
dropout_mask = dropout_mask.astype(np.uint8)
_, _, w, h = cv2.boundingRect(dropout_mask)
radius = min(3, max(w, h) // 2)
img = cv2.inpaint(img, dropout_mask, radius, cv2.INPAINT_NS)
else:
img = img.copy()
img[dropout_mask] = self.image_fill_value
return img
def apply_to_mask(self, img: np.ndarray, dropout_mask: np.ndarray = None, **params) -> np.ndarray:
if dropout_mask is None:
return img
img = img.copy()
img[dropout_mask] = self.mask_fill_value
return img
def get_transform_init_args_names(self) -> Tuple[str, ...]:
return "max_objects", "image_fill_value", "mask_fill_value"
|
[
"random.randint",
"numpy.zeros",
"skimage.measure.label",
"cv2.inpaint",
"cv2.boundingRect"
] |
[((1705, 1733), 'skimage.measure.label', 'label', (['mask'], {'return_num': '(True)'}), '(mask, return_num=True)\n', (1710, 1733), False, 'from skimage.measure import label\n'), ((1839, 1895), 'random.randint', 'random.randint', (['self.max_objects[0]', 'self.max_objects[1]'], {}), '(self.max_objects[0], self.max_objects[1])\n', (1853, 1895), False, 'import random\n'), ((2707, 2737), 'cv2.boundingRect', 'cv2.boundingRect', (['dropout_mask'], {}), '(dropout_mask)\n', (2723, 2737), False, 'import cv2\n'), ((2800, 2854), 'cv2.inpaint', 'cv2.inpaint', (['img', 'dropout_mask', 'radius', 'cv2.INPAINT_NS'], {}), '(img, dropout_mask, radius, cv2.INPAINT_NS)\n', (2811, 2854), False, 'import cv2\n'), ((2183, 2235), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1])'], {'dtype': 'bool'}), '((mask.shape[0], mask.shape[1]), dtype=bool)\n', (2191, 2235), True, 'import numpy as np\n')]
|
import sys
import numpy as np
import os
import h5py
import pickle
import re
if len(sys.argv) < 2:
sys.stderr.write('Usage: %s <annotation_gtf>\n' % sys.argv[0])
sys.exit(1)
infile = sys.argv[1]
CONF = 2
def get_tags_gtf(tagline):
"""Extract tags from given tagline"""
tags = dict()
for t in tagline.strip(';').split(';'):
tt = t.strip(' ').split(' ')
tags[tt[0]] = tt[1].strip('"')
return tags
### collect junction information from GTF
trans2gene = dict()
transcripts = []
chrms = []
exons = []
strands = []
for l, line in enumerate(open(infile, 'r')):
if l > 0 and l % 100000 == 0:
sys.stdout.write('.')
if l % 1000000 == 0:
sys.stdout.write('%i\n' % l)
sys.stdout.flush()
if line[0] == '#':
continue
sl = line.strip().split('\t')
if sl[2].lower() != 'exon':
continue
tags = get_tags_gtf(sl[8])
trans2gene[tags['transcript_id']] = tags['gene_id']
chrms.append(re.sub(r'chr', '', sl[0]))
transcripts.append(tags['transcript_id'])
exons.append([int(sl[3]) - 1, int(sl[4])])
strands.append(sl[6])
transcripts = np.array(transcripts)
chrms = np.array(chrms)
exons = np.array(exons)
strands = np.array(strands)
sidx = np.argsort(transcripts)
transcripts = transcripts[sidx]
exons = exons[sidx, :]
strands = strands[sidx]
chrms = chrms[sidx]
junctions = []
jstrands = []
jchrs = []
jgenes = []
_,fidx = np.unique(transcripts, return_index=True)
lidx = np.r_[fidx[1:], transcripts.shape[0]]
for i in range(fidx.shape[0]):
tidx = np.arange(fidx[i], lidx[i])
eidx = np.argsort(exons[tidx, 0])
if eidx.shape[0] > 1:
junctions.append(np.reshape(exons[tidx[eidx], :].ravel()[1:-1], (eidx.shape[0] - 1, 2)))
jchrs.extend(chrms[tidx[:-1]])
jstrands.extend(strands[tidx[:-1]])
jgenes.extend([trans2gene[x] for x in transcripts[tidx[:-1]]])
junctions = np.vstack(junctions)
jgenes = np.array(jgenes)
jchrs = np.array(jchrs)
jstrands = np.array(jstrands)
del exons, strands, transcripts
jid = np.array(['.'.join(x) for x in np.c_[jchrs.astype('str'), junctions.astype('str')]])
_, uidx = np.unique(jid, return_index=True)
junctions = junctions[uidx, :]
jstrands = jstrands[uidx]
jchrs = jchrs[uidx]
jgenes = jgenes[uidx]
### sort everything by coordinates
sidx = np.argsort(junctions[:, 0])
junctions = junctions[sidx, :]
jstrands = jstrands[sidx]
jchrs = jchrs[sidx]
jgenes = jgenes[sidx]
sidx = np.argsort(jchrs, kind='mergesort')
junctions = junctions[sidx, :]
jstrands = jstrands[sidx]
jchrs = jchrs[sidx]
jgenes = jgenes[sidx]
### prepare output file
OUT = h5py.File(re.sub(r'.gtf$', '', infile) + '.junctions.hdf5', 'w')
OUT.create_dataset(name='gene_names', data=jgenes.view(np.chararray).encode('utf-8'), compression='gzip')
OUT.create_dataset(name='strand', data=jstrands.view(np.chararray).encode('utf-8'), compression='gzip')
OUT.create_dataset(name='pos', data=junctions, compression='gzip')
OUT.create_dataset(name='chrms', data=jchrs.view(np.chararray).encode('utf-8'), compression='gzip')
OUT.close()
|
[
"sys.stdout.write",
"numpy.unique",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"sys.stdout.flush",
"sys.exit",
"sys.stderr.write",
"re.sub",
"numpy.vstack"
] |
[((1147, 1168), 'numpy.array', 'np.array', (['transcripts'], {}), '(transcripts)\n', (1155, 1168), True, 'import numpy as np\n'), ((1177, 1192), 'numpy.array', 'np.array', (['chrms'], {}), '(chrms)\n', (1185, 1192), True, 'import numpy as np\n'), ((1201, 1216), 'numpy.array', 'np.array', (['exons'], {}), '(exons)\n', (1209, 1216), True, 'import numpy as np\n'), ((1227, 1244), 'numpy.array', 'np.array', (['strands'], {}), '(strands)\n', (1235, 1244), True, 'import numpy as np\n'), ((1253, 1276), 'numpy.argsort', 'np.argsort', (['transcripts'], {}), '(transcripts)\n', (1263, 1276), True, 'import numpy as np\n'), ((1438, 1479), 'numpy.unique', 'np.unique', (['transcripts'], {'return_index': '(True)'}), '(transcripts, return_index=True)\n', (1447, 1479), True, 'import numpy as np\n'), ((1922, 1942), 'numpy.vstack', 'np.vstack', (['junctions'], {}), '(junctions)\n', (1931, 1942), True, 'import numpy as np\n'), ((1952, 1968), 'numpy.array', 'np.array', (['jgenes'], {}), '(jgenes)\n', (1960, 1968), True, 'import numpy as np\n'), ((1977, 1992), 'numpy.array', 'np.array', (['jchrs'], {}), '(jchrs)\n', (1985, 1992), True, 'import numpy as np\n'), ((2004, 2022), 'numpy.array', 'np.array', (['jstrands'], {}), '(jstrands)\n', (2012, 2022), True, 'import numpy as np\n'), ((2158, 2191), 'numpy.unique', 'np.unique', (['jid'], {'return_index': '(True)'}), '(jid, return_index=True)\n', (2167, 2191), True, 'import numpy as np\n'), ((2334, 2361), 'numpy.argsort', 'np.argsort', (['junctions[:, 0]'], {}), '(junctions[:, 0])\n', (2344, 2361), True, 'import numpy as np\n'), ((2468, 2503), 'numpy.argsort', 'np.argsort', (['jchrs'], {'kind': '"""mergesort"""'}), "(jchrs, kind='mergesort')\n", (2478, 2503), True, 'import numpy as np\n'), ((103, 165), 'sys.stderr.write', 'sys.stderr.write', (["('Usage: %s <annotation_gtf>\\n' % sys.argv[0])"], {}), "('Usage: %s <annotation_gtf>\\n' % sys.argv[0])\n", (119, 165), False, 'import sys\n'), ((170, 181), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (178, 181), False, 'import sys\n'), ((1567, 1594), 'numpy.arange', 'np.arange', (['fidx[i]', 'lidx[i]'], {}), '(fidx[i], lidx[i])\n', (1576, 1594), True, 'import numpy as np\n'), ((1606, 1632), 'numpy.argsort', 'np.argsort', (['exons[tidx, 0]'], {}), '(exons[tidx, 0])\n', (1616, 1632), True, 'import numpy as np\n'), ((640, 661), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (656, 661), False, 'import sys\n'), ((740, 758), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (756, 758), False, 'import sys\n'), ((986, 1010), 're.sub', 're.sub', (['"""chr"""', '""""""', 'sl[0]'], {}), "('chr', '', sl[0])\n", (992, 1010), False, 'import re\n'), ((2644, 2671), 're.sub', 're.sub', (['""".gtf$"""', '""""""', 'infile'], {}), "('.gtf$', '', infile)\n", (2650, 2671), False, 'import re\n'), ((703, 731), 'sys.stdout.write', 'sys.stdout.write', (["('%i\\n' % l)"], {}), "('%i\\n' % l)\n", (719, 731), False, 'import sys\n')]
|
import cv2
import subprocess as sp
import numpy
VIDEO_URL = 'http://iphone-streaming.ustream.tv/watch/playlist.m3u8?cid=16258431&stream=live_3&appType=103&appVersion=3&conn=wifi&group=iphone'
cv2.namedWindow("GoPro",cv2.CV_WINDOW_AUTOSIZE)
pipe = sp.Popen([ 'ffmpeg.exe', "-i", VIDEO_URL,
"-loglevel", "quiet", # no text output
"-an", # disable audio
"-f", "image2pipe",
"-pix_fmt", "bgr24",
"-vcodec", "rawvideo", "-"],
stdin = sp.PIPE, stdout = sp.PIPE)
while True:
raw_image = pipe.stdout.read(432*240*3) # read 432*240*3 bytes (= 1 frame)
image = numpy.fromstring(raw_image, dtype='uint8').reshape((240,432,3))
cv2.imshow("GoPro",image)
if cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()
|
[
"subprocess.Popen",
"cv2.waitKey",
"cv2.imshow",
"cv2.destroyAllWindows",
"numpy.fromstring",
"cv2.namedWindow"
] |
[((194, 242), 'cv2.namedWindow', 'cv2.namedWindow', (['"""GoPro"""', 'cv2.CV_WINDOW_AUTOSIZE'], {}), "('GoPro', cv2.CV_WINDOW_AUTOSIZE)\n", (209, 242), False, 'import cv2\n'), ((250, 429), 'subprocess.Popen', 'sp.Popen', (["['ffmpeg.exe', '-i', VIDEO_URL, '-loglevel', 'quiet', '-an', '-f',\n 'image2pipe', '-pix_fmt', 'bgr24', '-vcodec', 'rawvideo', '-']"], {'stdin': 'sp.PIPE', 'stdout': 'sp.PIPE'}), "(['ffmpeg.exe', '-i', VIDEO_URL, '-loglevel', 'quiet', '-an', '-f',\n 'image2pipe', '-pix_fmt', 'bgr24', '-vcodec', 'rawvideo', '-'], stdin=\n sp.PIPE, stdout=sp.PIPE)\n", (258, 429), True, 'import subprocess as sp\n'), ((767, 790), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (788, 790), False, 'import cv2\n'), ((698, 724), 'cv2.imshow', 'cv2.imshow', (['"""GoPro"""', 'image'], {}), "('GoPro', image)\n", (708, 724), False, 'import cv2\n'), ((731, 745), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (742, 745), False, 'import cv2\n'), ((630, 672), 'numpy.fromstring', 'numpy.fromstring', (['raw_image'], {'dtype': '"""uint8"""'}), "(raw_image, dtype='uint8')\n", (646, 672), False, 'import numpy\n')]
|
from random import randrange
from numpy import log, array, ceil
from copy import deepcopy
from itertools import permutations
import spidev
import Color_Match as cm
valid_arrangements = ['linear']
valid_update_strategies = ['on-command']
class DotstarDevice:
def __init__(self, num_LEDs, arrangement, color_order, thermal_protection_limit = False, max_Hz = 3000000, bus = 0, device = 0, update = 'on-command', dummy = False):
self.num_LEDs = num_LEDs
# Store LEDs state in (brightness, r, g, b) order
self.LEDs_state = [(0, 0, 0, 0)] * self.num_LEDs
if arrangement not in valid_arrangements:
raise ValueError("invalid arrangement type")
else:
self.arrangement = arrangement
if color_order.lower() not in [''.join(p) for p in permutations('bgr')]:
raise ValueError("invalid color_order value")
else:
self.color_order = color_order.lower()
# note, thermal protection limit is in units of (brightness ∈ [0, 31]) * (color_channel ∈ [0, 255]),
# and is enforced on each color channel separately
if thermal_protection_limit != False:
self.thermal_limit = thermal_protection_limit
else:
self.thermal_limit = 31 * 255
self.max_Hz = float(max_Hz)
self.device = int(device)
self.bus = int(bus)
if update not in valid_update_strategies:
raise ValueError("invalid update strategy")
else:
self.update_strategy = update
if not dummy:
self.spi = spidev.SpiDev()
self.spi.open(self.bus, self.device)
self.spi.max_speed_hz = int(self.max_Hz)
def unsafe_change_LED_state(self, idx, brightness, r, g, b):
self.LEDs_state[idx] = (brightness, r, g, b)
def safe_change_LED_state(self, idx, brightness, r, g, b):
if brightness * r > self.thermal_limit:
r = int(self.thermal_limit / brightness)
if brightness * g > self.thermal_limit:
g = int(self.thermal_limit / brightness)
if brightness * b > self.thermal_limit:
b = int(self.thermal_limit / brightness)
self.LEDs_state[idx] = (brightness, r, g, b)
def set_LEDs(self, start_idx, end_idx, brightness, r, g, b, safe = True):
if start_idx < 0 or start_idx > self.num_LEDs - 1:
raise ValueError("invalid start index: " + str(start_idx))
if end_idx < start_idx or end_idx > self.num_LEDs:
raise ValueError("invalid end index: " + str(end_idx))
if brightness not in range(32):
raise ValueError("invalid brightness value: " + brightness)
if r not in range(256) or g not in range(256) or b not in range(256):
raise ValueError("invalid rgb tuple: (" + str(r) + ", " + str(g) + ", " + str(b) + ")")
for i in range(start_idx, end_idx):
self.set_LED(i, brightness, r, g, b, safe)
def set_LED(self, idx, brightness, r, g, b, safe = True):
if safe:
self.safe_change_LED_state(idx, brightness, r, g, b)
else:
self.unsafe_change_LED_state(idx, brightness, r, g, b)
def set_nth_LEDs(self, start_idx, end_idx, n, brightness, r, g, b, safe = True):
for i in range(start_idx, end_idx, n):
self.set_LED(i, brightness, r, g, b, safe)
def commit_state(self):
to_send = self.start_frame() + self.state_to_bytes() + self.end_frame()
self.spi.xfer(to_send)
def start_frame(self):
return [0x00] * 4
def end_frame(self):
return [0x00] * (int(self.num_LEDs / 128) + 1) * 4
def state_to_bytes(self, state = []):
if state == []:
state = self.LEDs_state
to_send = [self.tuple_to_bytes(LED) for LED in state]
return sum(to_send, [])
def tuple_to_bytes(self, t):
if t[0] not in range(32) or t[1] not in range(256) or t[2] not in range(256) or t[3] not in range(256):
raise ValueError("invalid LED state tuple: " + str(t))
color_indices = [int(c) for c in self.color_order.replace('r', '1').replace('g', '2').replace('b', '3')]
to_send = [0xe0 + t[0]]
to_send += [t[color_indices[0]], t[color_indices[1]], t[color_indices[2]]]
return to_send
def get_full_state(self):
return self.LEDs_state
def get_LED_state(self, idx):
return self.LEDs_state[idx]
def commit_off(self):
to_send = self.start_frame() + self.state_to_bytes([(0, 0, 0, 0)] * self.num_LEDs) + self.end_frame()
self.spi.xfer(to_send)
def reset_LEDs_state(self):
self.set_LEDs(0, self.num_LEDs, 0, 0, 0, 0)
def set_LEDs_best_match_float_rgb(self, start_idx, end_idx, r, g, b, max_pattern_width = 6, debug = False, precompute = False):
def n_batch_config(n, r, g, b, max_level, max_pattern_width = max_pattern_width):
n = int(n)
if n < 1 or n > max_pattern_width:
raise ValueError("requested pattern is too large: n = " + str(n))
if any([i < 0. or i > 1. for i in [r, g, b]]):
raise ValueError("invalid rgb float tuple: " + str(r) + ", " + str(g) + ", " + str(b))
irgbs = [r * max_level, g * max_level, b * max_level]
# def gen_sorted_idx_lookups(irgbs):
# irgbs_sorted = deepcopy(irgbs)
# irgbs_sorted.sort()
# irgbs_sorted = [[i, False] for i in irgbs_sorted]
# irgbs_flags = [[i, False] for i in irgbs]
# idx_lookups = []
# for i in irgbs_sorted:
# idx = irgbs_flags.index(i)
# idx_lookups += [idx]
# irgbs_flags[idx][1] = True
# return idx_lookups
def LED_to_irgbs(led, n):
return [1. * led[0] * led[1] / n, 1. * led[0] * led[2] / n, 1. * led[0] * led[3] / n]
def recursive_n_config(n, irgbs, n_original):
def find_best_incremental_LED(irgbs):
# use a "growing" strategy. start the test_irgb at (1, 0, 0, 0) and test out all four options of +1 to see which
# (including the current point) produces the least log_error. Then make that step and repeat the function
# until you end up with the least error produced by the current point. Then return the current point.
### ↓↓↓ check if this shouldn't be current_point = (1, 0, 0, 0) instead!!!
current_point = (0, 0, 0, 0)
next_point = (1, 0, 0, 0)
while next_point != current_point:
current_point = deepcopy(next_point)
candidate_points = [(current_point[0], current_point[1], current_point[2], current_point[3])]
if current_point[0] < 31:
candidate_points += [(current_point[0] + 1, current_point[1], current_point[2], current_point[3])]
if current_point[1] < 255:
candidate_points += [(current_point[0], current_point[1] + 1, current_point[2], current_point[3])]
if current_point[2] < 255:
candidate_points += [(current_point[0], current_point[1], current_point[2] + 1, current_point[3])]
if current_point[3] < 255:
candidate_points += [(current_point[0], current_point[1], current_point[2], current_point[3] + 1)]
# candidate_points += [(current_point[0], current_point[1], current_point[2], current_point[3])]
candidate_errors = [compute_log_error(irgbs, LED_to_irgbs(led, n_original)) for led in candidate_points]
next_point = candidate_points[candidate_errors.index(min(candidate_errors))]
if debug:
print("point iteration!!!!!")
print(candidate_points)
print(candidate_errors)
print(next_point)
return current_point
if n == 1:
return [find_best_incremental_LED(irgbs)]
elif n > 1:
best_LED = find_best_incremental_LED(irgbs)
best_LED_irgbs = [best_LED[0] * best_LED[1] / n_original, best_LED[0] * best_LED[2] / n_original, best_LED[0] * best_LED[3] / n_original]
irgb_residuals = [irgbs[0] - best_LED_irgbs[0], irgbs[1] - best_LED_irgbs[1], irgbs[2] - best_LED_irgbs[2]]
return [best_LED] + recursive_n_config(n - 1, irgb_residuals, n_original)
res = recursive_n_config(n, irgbs, n)
def adjust_ordering(cfg):
# change the ordering of the LEDs, so they are maximally mixed with bright and dim LEDs next to each other
cfg.sort(key=(lambda x: x[0] * x[1] + x[0] * x[2] + x[0] * x[3]))
def alternating_index(i):
# produces indices like 0, -1, 1, -2, 2, -3, ...
return int((-1)**i * ceil(i / 2))
return [cfg[alternating_index(i)] for i, _ in enumerate(cfg)]
return adjust_ordering(res)
def config_to_floats(cfg, max_level):
r, g, b = 0., 0., 0.
r = sum([1.0 * led[0] * led[1] / max_level for led in cfg]) / len(cfg)
g = sum([1.0 * led[0] * led[2] / max_level for led in cfg]) / len(cfg)
b = sum([1.0 * led[0] * led[3] / max_level for led in cfg]) / len(cfg)
return (r, g, b)
def config_to_irgbs(cfg):
r, g, b = 0, 0, 0
for led in cfg:
r += led[0] * led[1]
g += led[0] * led[2]
b += led[0] * led[3]
r /= len(cfg)
g /= len(cfg)
b /= len(cfg)
if debug:
print(cfg)
print((r, g, b))
return (r, g, b)
def mean_squared_error(d0, d1):
residuals = [((d0[i] - d1[i]) / d0[i])**2.0 for i in range(len(d0))]
return sum(residuals) / len(residuals)
def compute_log_error(desired_irgbs, test_irgbs):
res = 1.0
for pair in zip(desired_irgbs, test_irgbs):
if pair[1] <= pair[0]:
res *= (log(pair[0]+1) - log(pair[1]+1) + 1)
else:
res *= 2.0 * (log(pair[0] + 1) + 1)
return res
config_options = [n_batch_config(n, r, g, b, self.thermal_limit) for n in range(1, max_pattern_width + 1)]
# errors = [mean_squared_error((r, g, b), config_to_floats(cfg, self.thermal_limit)) for cfg in config_options]
errors = [compute_log_error([r * self.thermal_limit, g * self.thermal_limit, b * self.thermal_limit], config_to_irgbs(cfg)) for cfg in config_options]
if debug:
print(config_options)
print(errors)
best_config = config_options[errors.index(min(errors))]
# now set the config
pattern_length = len(best_config)
# cycle the best_config a random number of times to evenly distribute % chance of any LED being the MAX LED.
# the point of this is to make sure that we're not always blasting THE SAME LEDs with the high brightnesses,
# and thereby, high heats, in order to prolong the LED lifetimes :)
cycle_offset = randrange(0, pattern_length)
best_config = best_config[cycle_offset:] + best_config[:cycle_offset]
if precompute:
tmp = deepcopy(self.LEDs_state)
self.set_pattern(start_idx, end_idx, best_config, safe = False)
precompute_res = deepcopy(self.LEDs_state)
self.unsafe_raw_set_LEDs_state(tmp)
return precompute_res
else:
self.set_pattern(start_idx, end_idx, best_config, safe = False)
def set_pattern(self, start_idx, end_idx, pattern, safe = True):
for idx, led in enumerate(pattern):
self.set_nth_LEDs(start_idx + idx, end_idx, len(pattern), led[0], led[1], led[2], led[3], safe = safe)
def unsafe_raw_set_LEDs_state(self, state):
self.LEDs_state = deepcopy(state)
class MultiDotstarController:
def __init__(self, subdevice_configs, color_order, thermal_protection_limit = False, max_Hz = 3000000, bus = 0, device = 0, update = 'on-command', dummy = False):
def check_integrity(cfgs):
for cfg in cfgs:
tests = {'has start_idx': 'start_idx' in cfg.keys(),
'has end_idx': 'end_idx' in cfg.keys(),
'start_idx < end_idx': cfg['start_idx'] < cfg['end_idx'],
'start_idx is int': type(cfg['start_idx']) == int,
'end_idx is int': type(cfg['end_idx']) == int,
'has l1': 'l1' in cfg.keys(),
'has l2': 'l2' in cfg.keys(),
'has l3': 'l3' in cfg.keys(),
'0 ≤ l1 ≤ 1e-6': 0. <= cfg['l1'] <= 1.e-6,
'0 ≤ l2 ≤ 1e-6': 0. <= cfg['l2'] <= 1.e-6,
'0 ≤ l3 ≤ 1e-6': 0. <= cfg['l3'] <= 1.e-6}
if not all(tests.values()):
raise RuntimeError('Error - invalid subdevice configuration. Test results: ' + str(tests))
for idx in range(len(cfgs))[:-1]:
if cfgs[idx]['end_idx'] != cfgs[idx + 1]['start_idx']:
raise ValueError('Bad index continuity between ' + str(cfgs[idx]) + ' and ' + str(cfgs[idx + 1]))
check_integrity(subdevice_configs)
self.total_num_LEDs = subdevice_configs[-1]['end_idx']
self.subdevice_configs = subdevice_configs
self.control_interface = DotstarDevice(self.total_num_LEDs, 'linear', color_order, thermal_protection_limit = thermal_protection_limit, max_Hz = max_Hz, bus = bus, device = device, update = update, dummy = dummy)
def match_sense_vector_on_subdevice(self, cfg, sv, brightness):
rgbs = brightness * cm.rgb_composition(cfg['l1'], cfg['l2'], cfg['l3'], sv)
return self.control_interface.set_LEDs_best_match_float_rgb(cfg['start_idx'], cfg['end_idx'], rgbs[0,0], rgbs[1, 0], rgbs[2, 0])
def match_sense_vector(self, sv, brightness):
for cfg in self.subdevice_configs:
self.match_sense_vector_on_subdevice(cfg, sv, brightness)
def match_planck_spectrum(self, color_temp, brightness, precompute = False):
if brightness < 0. or brightness > 1.:
raise ValueError('Invalid brightness value: ' + str(brightness))
brightness = self.visual_to_absolute_brightness(brightness)
if precompute:
tmp = deepcopy(self.control_interface.LEDs_state)
sv = cm.sense_vector(cm.planck_spectrum(color_temp))
self.match_sense_vector(sv, brightness)
if precompute:
res = deepcopy(self.control_interface.LEDs_state)
self.control_interface.unsafe_raw_set_LEDs_state(tmp)
return res
def commit_all_off(self):
self.control_interface.commit_off()
def commit_all_on(self):
self.control_interface.commit_state()
def visual_to_absolute_brightness(self, b):
x = (b + 0.16) / 1.16
if x > 6./29.:
return x**3.0
else:
return (x - 4./29.) / (1./3. * (29./6.)**2.)
|
[
"copy.deepcopy",
"spidev.SpiDev",
"numpy.ceil",
"numpy.log",
"Color_Match.rgb_composition",
"itertools.permutations",
"random.randrange",
"Color_Match.planck_spectrum"
] |
[((11584, 11612), 'random.randrange', 'randrange', (['(0)', 'pattern_length'], {}), '(0, pattern_length)\n', (11593, 11612), False, 'from random import randrange\n'), ((12379, 12394), 'copy.deepcopy', 'deepcopy', (['state'], {}), '(state)\n', (12387, 12394), False, 'from copy import deepcopy\n'), ((1582, 1597), 'spidev.SpiDev', 'spidev.SpiDev', ([], {}), '()\n', (1595, 1597), False, 'import spidev\n'), ((11733, 11758), 'copy.deepcopy', 'deepcopy', (['self.LEDs_state'], {}), '(self.LEDs_state)\n', (11741, 11758), False, 'from copy import deepcopy\n'), ((11864, 11889), 'copy.deepcopy', 'deepcopy', (['self.LEDs_state'], {}), '(self.LEDs_state)\n', (11872, 11889), False, 'from copy import deepcopy\n'), ((14258, 14313), 'Color_Match.rgb_composition', 'cm.rgb_composition', (["cfg['l1']", "cfg['l2']", "cfg['l3']", 'sv'], {}), "(cfg['l1'], cfg['l2'], cfg['l3'], sv)\n", (14276, 14313), True, 'import Color_Match as cm\n'), ((14932, 14975), 'copy.deepcopy', 'deepcopy', (['self.control_interface.LEDs_state'], {}), '(self.control_interface.LEDs_state)\n', (14940, 14975), False, 'from copy import deepcopy\n'), ((15005, 15035), 'Color_Match.planck_spectrum', 'cm.planck_spectrum', (['color_temp'], {}), '(color_temp)\n', (15023, 15035), True, 'import Color_Match as cm\n'), ((15126, 15169), 'copy.deepcopy', 'deepcopy', (['self.control_interface.LEDs_state'], {}), '(self.control_interface.LEDs_state)\n', (15134, 15169), False, 'from copy import deepcopy\n'), ((802, 821), 'itertools.permutations', 'permutations', (['"""bgr"""'], {}), "('bgr')\n", (814, 821), False, 'from itertools import permutations\n'), ((6738, 6758), 'copy.deepcopy', 'deepcopy', (['next_point'], {}), '(next_point)\n', (6746, 6758), False, 'from copy import deepcopy\n'), ((9242, 9253), 'numpy.ceil', 'ceil', (['(i / 2)'], {}), '(i / 2)\n', (9246, 9253), False, 'from numpy import log, array, ceil\n'), ((10502, 10518), 'numpy.log', 'log', (['(pair[0] + 1)'], {}), '(pair[0] + 1)\n', (10505, 10518), False, 'from numpy import log, array, ceil\n'), ((10519, 10535), 'numpy.log', 'log', (['(pair[1] + 1)'], {}), '(pair[1] + 1)\n', (10522, 10535), False, 'from numpy import log, array, ceil\n'), ((10595, 10611), 'numpy.log', 'log', (['(pair[0] + 1)'], {}), '(pair[0] + 1)\n', (10598, 10611), False, 'from numpy import log, array, ceil\n')]
|
import numpy as np
class Gene(object):
"""
creates lise of genes out of files and can display them in log
"""
def __init__(self, items_file, logger):
self.items = np.loadtxt(items_file)
self.logger = logger
self.logger.debug('list of items: %s' % self.items)
return
|
[
"numpy.loadtxt"
] |
[((189, 211), 'numpy.loadtxt', 'np.loadtxt', (['items_file'], {}), '(items_file)\n', (199, 211), True, 'import numpy as np\n')]
|
import csv
import os
import random
import numpy as np
import torch
import tqdm
from torch.backends import cudnn
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
from nets import nn
from utils import util
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
cudnn.benchmark = False
cudnn.deterministic = True
def batch_fn(images, target, model, device, loss_fn, training=True):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
if training:
with torch.cuda.amp.autocast(enabled=torch.cuda.is_available()):
output = model(images)
loss = loss_fn(output, target)
else:
output = model(images)
if isinstance(output, (tuple, list)):
output = output[0]
loss = loss_fn(output, target).data
acc1, acc5 = util.accuracy(output, target, top_k=(1, 5))
return loss, acc1, acc5, output
def main():
epochs = 450
device = torch.device('cuda')
data_dir = '../Dataset/IMAGENET'
num_gpu = torch.cuda.device_count()
v_batch_size = 16 * num_gpu
t_batch_size = 256 * num_gpu
model = nn.MobileNetV3().to(device)
optimizer = nn.RMSprop(util.add_weight_decay(model), 0.016 * num_gpu, 0.9, 1e-3, momentum=0.9)
model = torch.nn.DataParallel(model)
_ = model(torch.zeros(1, 3, 224, 224).to(device))
ema = nn.EMA(model)
t_criterion = nn.CrossEntropyLoss(0.1).to(device)
v_criterion = torch.nn.CrossEntropyLoss().to(device)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
t_dataset = datasets.ImageFolder(os.path.join(data_dir, 'train'),
transforms.Compose([util.RandomResize(),
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), normalize]))
v_dataset = datasets.ImageFolder(os.path.join(data_dir, 'val'),
transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(), normalize]))
t_loader = data.DataLoader(t_dataset, batch_size=t_batch_size, shuffle=True,
num_workers=os.cpu_count(), pin_memory=True)
v_loader = data.DataLoader(v_dataset, batch_size=v_batch_size, shuffle=False,
num_workers=os.cpu_count(), pin_memory=True)
scheduler = nn.StepLR(optimizer)
amp_scale = torch.cuda.amp.GradScaler(enabled=torch.cuda.is_available())
with open(f'weights/{scheduler.__str__()}.csv', 'w') as f:
writer = csv.DictWriter(f, fieldnames=['epoch', 't_loss', 'v_loss', 'acc@1', 'acc@5'])
writer.writeheader()
best_acc1 = 0
for epoch in range(0, epochs):
print(('\n' + '%10s' * 2) % ('epoch', 'loss'))
t_bar = tqdm.tqdm(t_loader, total=len(t_loader))
model.train()
t_loss = util.AverageMeter()
v_loss = util.AverageMeter()
for images, target in t_bar:
loss, _, _, _ = batch_fn(images, target, model, device, t_criterion)
optimizer.zero_grad()
amp_scale.scale(loss).backward()
amp_scale.step(optimizer)
amp_scale.update()
ema.update(model)
torch.cuda.synchronize()
t_loss.update(loss.item(), images.size(0))
t_bar.set_description(('%10s' + '%10.4g') % ('%g/%g' % (epoch + 1, epochs), loss))
top1 = util.AverageMeter()
top5 = util.AverageMeter()
ema_model = ema.model.eval()
with torch.no_grad():
for images, target in tqdm.tqdm(v_loader, ('%10s' * 2) % ('acc@1', 'acc@5')):
loss, acc1, acc5, output = batch_fn(images, target, ema_model, device, v_criterion, False)
torch.cuda.synchronize()
v_loss.update(loss.item(), output.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
acc1, acc5 = top1.avg, top5.avg
print('%10.3g' * 2 % (acc1, acc5))
scheduler.step(epoch + 1)
writer.writerow({'epoch': epoch + 1,
't_loss': str(f'{t_loss.avg:.4f}'),
'v_loss': str(f'{v_loss.avg:.4f}'),
'acc@1': str(f'{acc1:.3f}'),
'acc@5': str(f'{acc5:.3f}')})
util.save_checkpoint({'state_dict': ema.model.state_dict()}, acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
torch.cuda.empty_cache()
def print_parameters():
model = nn.MobileNetV3().fuse().eval()
_ = model(torch.zeros(1, 3, 224, 224))
params = sum(p.numel() for p in model.parameters())
print('{:<20} {:<8}'.format('Number of parameters ', int(params)))
if __name__ == '__main__':
if not os.path.exists('weights'):
os.makedirs('weights')
print_parameters()
|
[
"torch.cuda.synchronize",
"numpy.random.seed",
"utils.util.add_weight_decay",
"nets.nn.StepLR",
"torch.cuda.device_count",
"torch.device",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"utils.util.AverageMeter",
"csv.DictWriter",
"nets.nn.CrossEntropyLoss",
"utils.util.accuracy",
"os.path.exists",
"random.seed",
"torch.zeros",
"torchvision.transforms.CenterCrop",
"tqdm.tqdm",
"nets.nn.MobileNetV3",
"torchvision.transforms.RandomHorizontalFlip",
"torch.manual_seed",
"torch.cuda.is_available",
"torchvision.transforms.Resize",
"torchvision.transforms.ColorJitter",
"os.makedirs",
"torch.nn.CrossEntropyLoss",
"nets.nn.EMA",
"os.cpu_count",
"torch.cuda.empty_cache",
"torch.nn.DataParallel",
"utils.util.RandomResize",
"torchvision.transforms.ToTensor"
] |
[((255, 270), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (266, 270), False, 'import random\n'), ((271, 289), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (285, 289), True, 'import numpy as np\n'), ((290, 311), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (307, 311), False, 'import torch\n'), ((881, 924), 'utils.util.accuracy', 'util.accuracy', (['output', 'target'], {'top_k': '(1, 5)'}), '(output, target, top_k=(1, 5))\n', (894, 924), False, 'from utils import util\n'), ((1005, 1025), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1017, 1025), False, 'import torch\n'), ((1077, 1102), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1100, 1102), False, 'import torch\n'), ((1321, 1349), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (1342, 1349), False, 'import torch\n'), ((1415, 1428), 'nets.nn.EMA', 'nn.EMA', (['model'], {}), '(model)\n', (1421, 1428), False, 'from nets import nn\n'), ((1557, 1632), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1577, 1632), False, 'from torchvision import transforms\n'), ((2722, 2742), 'nets.nn.StepLR', 'nn.StepLR', (['optimizer'], {}), '(optimizer)\n', (2731, 2742), False, 'from nets import nn\n'), ((4984, 5008), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5006, 5008), False, 'import torch\n'), ((1236, 1264), 'utils.util.add_weight_decay', 'util.add_weight_decay', (['model'], {}), '(model)\n', (1257, 1264), False, 'from utils import util\n'), ((1670, 1701), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (1682, 1701), False, 'import os\n'), ((2099, 2128), 'os.path.join', 'os.path.join', (['data_dir', '"""val"""'], {}), "(data_dir, 'val')\n", (2111, 2128), False, 'import os\n'), ((2900, 2977), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': "['epoch', 't_loss', 'v_loss', 'acc@1', 'acc@5']"}), "(f, fieldnames=['epoch', 't_loss', 'v_loss', 'acc@1', 'acc@5'])\n", (2914, 2977), False, 'import csv\n'), ((5092, 5119), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (5103, 5119), False, 'import torch\n'), ((5289, 5314), 'os.path.exists', 'os.path.exists', (['"""weights"""'], {}), "('weights')\n", (5303, 5314), False, 'import os\n'), ((5324, 5346), 'os.makedirs', 'os.makedirs', (['"""weights"""'], {}), "('weights')\n", (5335, 5346), False, 'import os\n'), ((1181, 1197), 'nets.nn.MobileNetV3', 'nn.MobileNetV3', ([], {}), '()\n', (1195, 1197), False, 'from nets import nn\n'), ((1447, 1471), 'nets.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', (['(0.1)'], {}), '(0.1)\n', (1466, 1471), False, 'from nets import nn\n'), ((1501, 1528), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1526, 1528), False, 'import torch\n'), ((2514, 2528), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (2526, 2528), False, 'import os\n'), ((2672, 2686), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (2684, 2686), False, 'import os\n'), ((2793, 2818), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2816, 2818), False, 'import torch\n'), ((3235, 3254), 'utils.util.AverageMeter', 'util.AverageMeter', ([], {}), '()\n', (3252, 3254), False, 'from utils import util\n'), ((3276, 3295), 'utils.util.AverageMeter', 'util.AverageMeter', ([], {}), '()\n', (3293, 3295), False, 'from utils import util\n'), ((3840, 3859), 'utils.util.AverageMeter', 'util.AverageMeter', ([], {}), '()\n', (3857, 3859), False, 'from utils import util\n'), ((3879, 3898), 'utils.util.AverageMeter', 'util.AverageMeter', ([], {}), '()\n', (3896, 3898), False, 'from utils import util\n'), ((1364, 1391), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (1375, 1391), False, 'import torch\n'), ((1760, 1779), 'utils.util.RandomResize', 'util.RandomResize', ([], {}), '()\n', (1777, 1779), False, 'from utils import util\n'), ((1838, 1875), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.4)', '(0.4)', '(0.4)'], {}), '(0.4, 0.4, 0.4)\n', (1860, 1875), False, 'from torchvision import transforms\n'), ((1934, 1967), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1965, 1967), False, 'from torchvision import transforms\n'), ((2026, 2047), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2045, 2047), False, 'from torchvision import transforms\n'), ((2187, 2209), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (2204, 2209), False, 'from torchvision import transforms\n'), ((2268, 2294), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (2289, 2294), False, 'from torchvision import transforms\n'), ((2353, 2374), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2372, 2374), False, 'from torchvision import transforms\n'), ((3637, 3661), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (3659, 3661), False, 'import torch\n'), ((3958, 3973), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3971, 3973), False, 'import torch\n'), ((4013, 4065), 'tqdm.tqdm', 'tqdm.tqdm', (['v_loader', "('%10s' * 2 % ('acc@1', 'acc@5'))"], {}), "(v_loader, '%10s' * 2 % ('acc@1', 'acc@5'))\n", (4022, 4065), False, 'import tqdm\n'), ((596, 621), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (619, 621), False, 'import torch\n'), ((4200, 4224), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4222, 4224), False, 'import torch\n'), ((5047, 5063), 'nets.nn.MobileNetV3', 'nn.MobileNetV3', ([], {}), '()\n', (5061, 5063), False, 'from nets import nn\n')]
|
"""
MAMBA coil
==========
Compact example of a biplanar coil producing homogeneous field in a number of target
regions arranged in a grid. Meant to demonstrate the flexibility in target choice, inspired by the
technique "multiple-acquisition micro B(0) array" (MAMBA) technique, see https://doi.org/10.1002/mrm.10464
"""
import numpy as np
from mayavi import mlab
import trimesh
from bfieldtools.mesh_conductor import MeshConductor
from bfieldtools.coil_optimize import optimize_streamfunctions
from bfieldtools.contour import scalar_contour
from bfieldtools.viz import plot_3d_current_loops
from bfieldtools.utils import combine_meshes, load_example_mesh
# Load simple plane mesh that is centered on the origin
planemesh = load_example_mesh("10x10_plane_hires")
# Specify coil plane geometry
center_offset = np.array([0, 0, 0])
standoff = np.array([0, 1.5, 0])
# Create coil plane pairs
coil_plus = trimesh.Trimesh(
planemesh.vertices + center_offset + standoff, planemesh.faces, process=False
)
coil_minus = trimesh.Trimesh(
planemesh.vertices + center_offset - standoff, planemesh.faces, process=False
)
joined_planes = combine_meshes((coil_plus, coil_minus))
# Create mesh class object
coil = MeshConductor(mesh_obj=joined_planes, fix_normals=True, basis_name="inner")
#%%
# Set up target and stray field points. Here, the target points are on a planar
# 4x4 grid slightly smaller than the coil dimensions.
center = np.array([0, 0, 0])
sidelength = 0.5
n = 4
height = 0.1
n_height = 2
xx = np.linspace(-sidelength / 2, sidelength / 2, n)
yy = np.linspace(-height / 2, height / 2, n_height)
zz = np.linspace(-sidelength / 2, sidelength / 2, n)
X, Y, Z = np.meshgrid(xx, yy, zz, indexing="ij")
x = X.ravel()
y = Y.ravel()
z = Z.ravel()
target_points = np.array([x, y, z]).T
grid_target_points = list()
target_field = list()
hori_offsets = [-3, -1, 1, 3]
vert_offsets = [-3, -1, 1, 3]
for i, offset_x in enumerate(hori_offsets):
for j, offset_y in enumerate(vert_offsets):
grid_target_points.append(target_points + np.array([offset_x, 0, offset_y]))
target_field.append((i + j - 3) * np.ones((len(target_points),)))
target_points = np.asarray(grid_target_points).reshape((-1, 3))
target_field = np.asarray(target_field).reshape((-1,))
target_field = np.array(
[np.zeros((len(target_field),)), target_field, np.zeros((len(target_field),))]
).T
target_abs_error = np.zeros_like(target_field)
target_abs_error[:, 1] += 0.1
target_abs_error[:, 0::2] += 0.1
#%%
# Plot target points and mesh
coil.plot_mesh(opacity=0.1)
mlab.quiver3d(*target_points.T, *target_field.T)
#%%
# Compute coupling matrix that is used to compute the generated magnetic field, create field specification
target_spec = {
"coupling": coil.B_coupling(target_points),
"abs_error": target_abs_error,
"target": target_field,
}
#%%
# Run QP solver, plot result
import mosek
coil.s, prob = optimize_streamfunctions(
coil,
[target_spec],
objective="minimum_inductive_energy",
solver="MOSEK",
solver_opts={"mosek_params": {mosek.iparam.num_threads: 8}},
)
coil.s.plot()
coil.s.discretize(N_contours=10).plot_loops()
|
[
"trimesh.Trimesh",
"numpy.meshgrid",
"numpy.zeros_like",
"mayavi.mlab.quiver3d",
"bfieldtools.coil_optimize.optimize_streamfunctions",
"numpy.asarray",
"bfieldtools.utils.combine_meshes",
"numpy.array",
"bfieldtools.utils.load_example_mesh",
"numpy.linspace",
"bfieldtools.mesh_conductor.MeshConductor"
] |
[((734, 772), 'bfieldtools.utils.load_example_mesh', 'load_example_mesh', (['"""10x10_plane_hires"""'], {}), "('10x10_plane_hires')\n", (751, 772), False, 'from bfieldtools.utils import combine_meshes, load_example_mesh\n'), ((820, 839), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (828, 839), True, 'import numpy as np\n'), ((851, 872), 'numpy.array', 'np.array', (['[0, 1.5, 0]'], {}), '([0, 1.5, 0])\n', (859, 872), True, 'import numpy as np\n'), ((912, 1011), 'trimesh.Trimesh', 'trimesh.Trimesh', (['(planemesh.vertices + center_offset + standoff)', 'planemesh.faces'], {'process': '(False)'}), '(planemesh.vertices + center_offset + standoff, planemesh.\n faces, process=False)\n', (927, 1011), False, 'import trimesh\n'), ((1027, 1126), 'trimesh.Trimesh', 'trimesh.Trimesh', (['(planemesh.vertices + center_offset - standoff)', 'planemesh.faces'], {'process': '(False)'}), '(planemesh.vertices + center_offset - standoff, planemesh.\n faces, process=False)\n', (1042, 1126), False, 'import trimesh\n'), ((1145, 1184), 'bfieldtools.utils.combine_meshes', 'combine_meshes', (['(coil_plus, coil_minus)'], {}), '((coil_plus, coil_minus))\n', (1159, 1184), False, 'from bfieldtools.utils import combine_meshes, load_example_mesh\n'), ((1220, 1295), 'bfieldtools.mesh_conductor.MeshConductor', 'MeshConductor', ([], {'mesh_obj': 'joined_planes', 'fix_normals': '(True)', 'basis_name': '"""inner"""'}), "(mesh_obj=joined_planes, fix_normals=True, basis_name='inner')\n", (1233, 1295), False, 'from bfieldtools.mesh_conductor import MeshConductor\n'), ((1445, 1464), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1453, 1464), True, 'import numpy as np\n'), ((1521, 1568), 'numpy.linspace', 'np.linspace', (['(-sidelength / 2)', '(sidelength / 2)', 'n'], {}), '(-sidelength / 2, sidelength / 2, n)\n', (1532, 1568), True, 'import numpy as np\n'), ((1574, 1620), 'numpy.linspace', 'np.linspace', (['(-height / 2)', '(height / 2)', 'n_height'], {}), '(-height / 2, height / 2, n_height)\n', (1585, 1620), True, 'import numpy as np\n'), ((1626, 1673), 'numpy.linspace', 'np.linspace', (['(-sidelength / 2)', '(sidelength / 2)', 'n'], {}), '(-sidelength / 2, sidelength / 2, n)\n', (1637, 1673), True, 'import numpy as np\n'), ((1684, 1722), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy', 'zz'], {'indexing': '"""ij"""'}), "(xx, yy, zz, indexing='ij')\n", (1695, 1722), True, 'import numpy as np\n'), ((2424, 2451), 'numpy.zeros_like', 'np.zeros_like', (['target_field'], {}), '(target_field)\n', (2437, 2451), True, 'import numpy as np\n'), ((2578, 2626), 'mayavi.mlab.quiver3d', 'mlab.quiver3d', (['*target_points.T', '*target_field.T'], {}), '(*target_points.T, *target_field.T)\n', (2591, 2626), False, 'from mayavi import mlab\n'), ((2935, 3104), 'bfieldtools.coil_optimize.optimize_streamfunctions', 'optimize_streamfunctions', (['coil', '[target_spec]'], {'objective': '"""minimum_inductive_energy"""', 'solver': '"""MOSEK"""', 'solver_opts': "{'mosek_params': {mosek.iparam.num_threads: 8}}"}), "(coil, [target_spec], objective=\n 'minimum_inductive_energy', solver='MOSEK', solver_opts={'mosek_params':\n {mosek.iparam.num_threads: 8}})\n", (2959, 3104), False, 'from bfieldtools.coil_optimize import optimize_streamfunctions\n'), ((1783, 1802), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1791, 1802), True, 'import numpy as np\n'), ((2187, 2217), 'numpy.asarray', 'np.asarray', (['grid_target_points'], {}), '(grid_target_points)\n', (2197, 2217), True, 'import numpy as np\n'), ((2250, 2274), 'numpy.asarray', 'np.asarray', (['target_field'], {}), '(target_field)\n', (2260, 2274), True, 'import numpy as np\n'), ((2061, 2094), 'numpy.array', 'np.array', (['[offset_x, 0, offset_y]'], {}), '([offset_x, 0, offset_y])\n', (2069, 2094), True, 'import numpy as np\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
class Queue(object):
''' a simple queue realized with numpy arrays'''
def __init__(self, max_values, **kwargs):
''' initialize the queue
@param max_values : maximum size of the queue
@param smoothing_alpha: average = old_average * (1 - smoothing_alpha) + new_value * smoothing_alpha
@param reset : define reset strategy:
True = all values are deleted
False = no value is deleted
(int) = the number of values to delete
'''
self.max_values = max_values # size of queue (self.values)
self.current_values = 0 # number of currently stored values
self.values = np.zeros(max_values) # stores values for average calculation (queue)
self.alpha = kwargs.get('smoothing_alpha', 1.0) # smoothing alpha (default=1.0 (off))
self._set_reset(**kwargs)
def add(self, value):
''' add a value to the Regularizer, e.g., max_values=3 add(0) [1 2 3] -> [0 1 2] '''
value = self._smooth(value)
self._add(value)
return self
def _set_reset(self, **kwargs):
''' limit reach behavior (complete reset, no reset, partly reset) '''
reset = kwargs.get('reset', True)
if isinstance(reset, bool):
if reset: self.reset_num_elements = 0 # complete reset
else : self.reset_num_elements = -1 # do not reset
else:
self.reset_num_elements = reset # define the number of elements to delete
def reset(self):
''' reset values '''
if self.reset_num_elements == -1: return # no reset
if self.reset_num_elements == 0 : # complete reset
self.values = np.zeros(self.max_values)
self.current_values = 0
else:
self.current_values = self.current_values - self.reset_num_elements
def _data(self, start=-1, end=-1):
''' get (slice) data '''
first = 0
last = self.current_values
if start > -1 and start < self.current_values: first = start
if end > -1 and end <= self.current_values: last = end
data = self.values[first: last]
return data
def _add(self, value):
''' add a value to queue '''
self.values[1:] = self.values[:-1]
self.values[0] = value
self.current_values = min(self.current_values + 1, self.max_values)
def _smooth(self, value):
''' sliding/incremental average '''
try : self.last = (1.0 - self.alpha) * self.last + self.alpha * value
except: self.last = value
return self.last
|
[
"numpy.zeros"
] |
[((1291, 1311), 'numpy.zeros', 'np.zeros', (['max_values'], {}), '(max_values)\n', (1299, 1311), True, 'import numpy as np\n'), ((2309, 2334), 'numpy.zeros', 'np.zeros', (['self.max_values'], {}), '(self.max_values)\n', (2317, 2334), True, 'import numpy as np\n')]
|
# Author: <NAME>
# Project: Image/video auto-captioning using Deep Learning
# This script is executed when the user has uploaded a video to the library to be
# processed. The processing involves the following steps: converting the video to mp4
# format if it is not in that format already, extracting key frames and getting their
# times, generating a caption for each key frame, and saving each caption and its time
# to a text file.
import glob
import subprocess
import os
import shutil
import sys
import numpy as np
from pickle import load
def convert_to_mp4(video_name):
"""
Uses an FFmpeg command to convert the input video to mp4 format
which is then placed in the current working directory.
Args:
video_name: the name and format of the input video
"""
new_video_name = str(video_name.split(".")[0])
new_video_name = new_video_name + ".mp4"
# Run the FFmpeg command to convert the video to mp4 format.
command = ['ffmpeg', '-i', video_name, new_video_name, '-hide_banner']
subprocess.run(command)
def get_keyframes(video_path):
"""
Uses an FFmpeg command to extract key frames from the specified video file.
Saves key frame times to a file.
...
Args:
video_path: the path to the video file to extract key frames from
Returns:
key_frame_dir: the directory storing the extracted key frames
key_frame_times: a list of the times associated with the key frames
"""
scene_change_threshold = '0.4'
# The metadata of the key frames including their times in seconds will be stored in this text file.
metadata_file = "key_frame_metadata.txt"
print("Video path before splitting:",video_path)
# Store the actual video name without the path and the file extension.
video_name = video_path.split("\\")[-1]
video_name = video_name.split(".")[0]
print("Video name:",video_name)
# If the directory for storing the key frames does not exist, create it outside the video_library directory.
key_frame_dir = video_name + '_key_frames'
if not os.path.exists(key_frame_dir):
os.makedirs(key_frame_dir)
output_file = key_frame_dir + '/img%03d.jpg'
# Run the FFmpeg command to extract video key frames and store their metadata including their times in the text file.
command = ['ffmpeg', '-i', video_path, '-filter:v', "select='gt(scene," + str(scene_change_threshold) + ")',metadata=print:file=" + metadata_file + "",'-vsync', '0', output_file]
subprocess.run(command)
# This array will store the time in seconds of each key frame.
key_frame_times = []
# Open the text file and read every line.
with open(metadata_file, encoding='utf-8') as f:
lines = f.readlines()
# Get every second line as only those contain the times of key frames.
for i in range(0, len(lines), 2):
# Each line contains three elements: frame, pts, and pts_time.
# Split around spaces and get pts_time which is the last element.
split_line = lines[i].split(' ')
pts_time = split_line[len(split_line) - 1]
# Remove the string "pts_time:" and strip new line characters from pts_time,
# so only the actual time in seconds will remain.
pts_time = pts_time[len('pts_time:') :].rstrip('\n')
# Add 0.4s to pts_time so the markers are not placed on the progress bar early.
pts_time = float(pts_time) + 0.4
pts_time = str(pts_time)
print(pts_time)
# Make a copy of pts_time
original_pts_time = pts_time
# Convert pts_time to this format: minutes : seconds. For example: 75 becomes 1:15
# The video player uses this time format.
minutes = int(float(pts_time) / 60)
seconds = float(original_pts_time) - (minutes * 60)
converted_time = str(minutes) + ":" + str(seconds)
key_frame_times.append(converted_time)
print(key_frame_times)
return key_frame_dir, key_frame_times
def get_img_to_cap_dict(filename):
"""
Opens the file storing image to caption mappings.
Creates a dictionary and adds the mappings as
key-value pairs to it.
Args:
filename: the name of the file storing image to caption mappings
Returns:
img_to_cap_dict: the dictionary storing image to caption mappings
"""
file = open(filename, 'r')
text = file.read()
file.close()
img_to_cap_dict = dict()
for line in text.split('\n'):
# Split each line by whitespace to get the image name and the caption.
line_tokens = line.split()
image_name, caption = line_tokens[0], line_tokens[1:]
# Produce a string of the caption tokens.
caption = ' '.join(caption)
# If the image name is not in the dictionary yet,
# create a list to add captions of this image.
if image_name not in img_to_cap_dict:
img_to_cap_dict[image_name] = []
img_to_cap_dict[image_name].append(caption)
return img_to_cap_dict
def get_captions_from_dict(img_to_cap_dict):
"""
Extracts captions from the img_to_cap_dict dictionary
and adds them to a list.
Args:
img_to_cap_dict: the dictionary storing image to caption mappings
Returns:
captions: a list storing all captions extracted from the dictionary
"""
captions = []
for image_name, caption_list in img_to_cap_dict.items():
for cap in caption_list:
captions.append(cap)
return captions
def load_image(image_path):
"""
Loads an image, resizes it, and adapts it to the format
which the CNN requires.
Args:
image_path: the path to the image
Returns:
img_array: the image in the preprocessed format
"""
# Load the image and resize it to what the model expects.
# If using VGG19: target_size=(224, 224))
# If using Xception: target_size=(299, 299))
image = tf.keras.preprocessing.image.load_img(image_path, target_size=(299, 299))
# Convert image pixels to a numpy array.
img_array = tf.keras.preprocessing.image.img_to_array(image)
# Reshape the image data for the model.
# Reshape it to what the model expects as input.
img_array = img_array.reshape((1, img_array.shape[0], img_array.shape[1], img_array.shape[2]))
# Adapt the image format to what the model requires.
img_array = tf.keras.applications.xception.preprocess_input(img_array)
return img_array
def extract_features(image):
"""
Extracts the features of the input image.
Returns the extracted features.
Args:
image: the image to extract features from
Returns:
features: the extracted image features
"""
# Load the CNN pre-trained on ImageNet images.
model = tf.keras.applications.xception.Xception(weights='imagenet')
# Remove the last layer (softmax output layer).
# The last Dense layer will be the new output layer.
model.layers.pop()
model = tf.keras.models.Model(inputs=model.inputs, outputs=model.layers[-1].output)
# Load the image and adapt it to the format that the model expects.
img = load_image(image)
# Get the image features.
features = model.predict(img, verbose=0)
return features
def index_to_word(searched_index, tokenizer):
"""
Takes an input integer index and returns the word it is mapped to.
Args:
searched_index: the integer index of the searched word
tokenizer: the tokenizer which contains the word-index mappings
Returns:
word: the actual string word that the index is mapped to
"""
for word, integer in tokenizer.word_index.items():
if integer == searched_index:
return word
return None
def generate_caption(model, tokenizer, image, max_length):
"""
Generates a caption for the input image.
Args:
model: the trained image captioning model that generates the caption words
tokenizer: the tokenizer trained on the captions of the training set
image: the features of the input image
max_length: the maximum length of the caption to be generated
Returns:
the generated caption without the special start and end tokens
"""
# Begin with the start token, and append words to the input text.
input_text = ["<start>"]
# Repeatedly add words to the caption sentence.
for i in range(max_length):
# Encode the input text into integers.
# Create a word to integer index mapping.
encoded_text_sequence = tokenizer.texts_to_sequences([input_text])[0]
# Pad each input text sequence to the same length.
encoded_text_sequence = tf.keras.preprocessing.sequence.pad_sequences([encoded_text_sequence], maxlen=max_length)
# Predict the upcoming word in the caption sentence.
# This returns an array of probabilities for each vocabulary word.
predictions = model.predict([image, encoded_text_sequence], verbose=0)
# Get the index of the largest probability - the index of the most likely word.
index = np.argmax(predictions)
# Get the word associated with the index.
word = index_to_word(index, tokenizer)
# If the index cannot be mapped to a word, stop.
if word is None:
break
# Add the textual word as input for generating the next word.
input_text.append(word)
# If the end of the caption is predicted, stop.
if word == '<end>':
break
# Exclude the start and end caption markers from the generated caption.
final_caption = []
for w in input_text:
if w != '<start>' and w != '<end>':
final_caption.append(w)
# Create a string of the caption words.
caption_string = ' '.join(final_caption[:])
return caption_string
def get_generated_captions(key_frame_directory):
"""
Loads the specified trained image captioning model,
then loads each key frame and generates a caption for it.
Args:
key_frame_directory: the directory where all key frames are stored
Returns:
generated_captions: the list of generated captions for the key frames
"""
tokenizer = load(open('train_tokenizer.pkl', 'rb'))
# Get the maximum caption length. This consists of the following steps:
# Load the image to caption mappings of the training set.
train_img_to_cap_dict = get_img_to_cap_dict('train_captions.txt')
# Extract all training captions from the train_img_to_cap_dict dictionary and store them in a list.
train_captions = get_captions_from_dict(train_img_to_cap_dict)
# Get the number of words in the longest caption of the training set.
# The generated caption for the key frames will be of this length maximum.
train_caps_max_length = max(len(cap.split()) for cap in train_captions)
print("Max caption length:", train_caps_max_length)
# Load the trained image captioning model.
model = tf.keras.models.load_model("Optimised_MSCOCO_FOR_DEMO.h5")
print("Model loaded")
generated_captions = []
# Load the key frame images and generate a caption for each.
directory = key_frame_directory
images = [os.path.join(directory, img) for img in os.listdir(directory)]
print("Generating captions for key frames...")
for image in images:
print("Image:", image)
image_features = extract_features(image)
caption = generate_caption(model, tokenizer, image_features, train_caps_max_length)
print("Generated caption:")
print(caption)
generated_captions.append(caption)
return generated_captions
if __name__ == '__main__':
# Find the most recently added file to the video library.
list_of_videos = glob.glob("video_library/*")
latest_video = str(max(list_of_videos, key=os.path.getctime))
print("Latest video:",latest_video)
path_to_latest_video = latest_video
# Store the actual video name without the path.
video_name = path_to_latest_video.split("\\")[-1]
# If the video file is not in mp4 format, convert it to that format
# and store it in the video_library directory.
file_extension = video_name.split(".")[1]
if file_extension != 'mp4':
convert_to_mp4(path_to_latest_video)
new_video_path = path_to_latest_video.split(".")[0] + ".mp4"
# Continuously check if the converted video file exists.
# If so, the loop is finished.
while not os.path.isfile(new_video_path):
print("Waiting for",new_video_path)
continue
# Delete the video with the non-mp4 extension.
os.remove(path_to_latest_video)
# Update the path to the video with the mp4 extension.
path_to_latest_video = new_video_path
# Update the name of the video with the mp4 extension.
video_name = path_to_latest_video.split("\\")[-1]
# Store the actual video name without the file extension.
video_name = video_name.split(".")[0]
captions_file = "captions/" + video_name + ".txt"
# If the caption file, associated with the video, does not exist, start processing the video.
if(not(os.path.isfile(captions_file))):
print("Processing video...")
key_frame_dir, key_frame_times = get_keyframes(path_to_latest_video)
# Check if there are extracted key frames.
# The first key frame will be the thumbnail of the video.
# It is copied to the "thumbnails" directory,
# and is given the same name as the video.
if len(os.listdir(key_frame_dir)) > 0:
thumbnail_image = os.listdir(key_frame_dir)[0]
source_location = key_frame_dir + "/" + thumbnail_image
shutil.copy(source_location, "thumbnails")
os.rename("thumbnails/" + thumbnail_image, "thumbnails/" + video_name + ".jpg")
print("Generating captions...")
# Make sure the Python script can access all installed packages
# from the correct location. This sys.path.insert must be here
# to ensure that the script works.
sys.path.insert(0, 'c:/users/albert jk/desktop/newenvs/newenvs/lib/site-packages')
print(sys.path)
# Due to running this Python script in a PHP script, this import is here
# just before tf functions are used.
import tensorflow as tf
# Get the generated caption for each key frame.
generated_captions = get_generated_captions(key_frame_dir)
# After the captions are generated, the directory storing the key frames
# and the metadata file is deleted.
shutil.rmtree(key_frame_dir)
os.remove("key_frame_metadata.txt")
# Write the time of each key frame to a text file.
# There are as many key frames as generated captions.
f = open(captions_file, "w+")
print("Writing captions and start times to file...")
for i in range (len(key_frame_times)):
print(generated_captions[i] + "#" + key_frame_times[i] + "\n")
f.write(generated_captions[i] + "#" + key_frame_times[i] + "\n")
f.close()
print("Finished")
|
[
"os.remove",
"numpy.argmax",
"tensorflow.keras.applications.xception.preprocess_input",
"os.path.isfile",
"glob.glob",
"shutil.rmtree",
"os.path.join",
"shutil.copy",
"os.path.exists",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.models.load_model",
"tensorflow.keras.preprocessing.image.img_to_array",
"os.rename",
"tensorflow.keras.models.Model",
"os.listdir",
"tensorflow.keras.applications.xception.Xception",
"subprocess.run",
"os.makedirs",
"sys.path.insert"
] |
[((1003, 1026), 'subprocess.run', 'subprocess.run', (['command'], {}), '(command)\n', (1017, 1026), False, 'import subprocess\n'), ((2390, 2413), 'subprocess.run', 'subprocess.run', (['command'], {}), '(command)\n', (2404, 2413), False, 'import subprocess\n'), ((5547, 5620), 'tensorflow.keras.preprocessing.image.load_img', 'tf.keras.preprocessing.image.load_img', (['image_path'], {'target_size': '(299, 299)'}), '(image_path, target_size=(299, 299))\n', (5584, 5620), True, 'import tensorflow as tf\n'), ((5677, 5725), 'tensorflow.keras.preprocessing.image.img_to_array', 'tf.keras.preprocessing.image.img_to_array', (['image'], {}), '(image)\n', (5718, 5725), True, 'import tensorflow as tf\n'), ((5982, 6040), 'tensorflow.keras.applications.xception.preprocess_input', 'tf.keras.applications.xception.preprocess_input', (['img_array'], {}), '(img_array)\n', (6029, 6040), True, 'import tensorflow as tf\n'), ((6338, 6397), 'tensorflow.keras.applications.xception.Xception', 'tf.keras.applications.xception.Xception', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (6377, 6397), True, 'import tensorflow as tf\n'), ((6532, 6607), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'model.inputs', 'outputs': 'model.layers[-1].output'}), '(inputs=model.inputs, outputs=model.layers[-1].output)\n', (6553, 6607), True, 'import tensorflow as tf\n'), ((10228, 10286), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""Optimised_MSCOCO_FOR_DEMO.h5"""'], {}), "('Optimised_MSCOCO_FOR_DEMO.h5')\n", (10254, 10286), True, 'import tensorflow as tf\n'), ((10957, 10985), 'glob.glob', 'glob.glob', (['"""video_library/*"""'], {}), "('video_library/*')\n", (10966, 10985), False, 'import glob\n'), ((1982, 2011), 'os.path.exists', 'os.path.exists', (['key_frame_dir'], {}), '(key_frame_dir)\n', (1996, 2011), False, 'import os\n'), ((2015, 2041), 'os.makedirs', 'os.makedirs', (['key_frame_dir'], {}), '(key_frame_dir)\n', (2026, 2041), False, 'import os\n'), ((8095, 8188), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'tf.keras.preprocessing.sequence.pad_sequences', (['[encoded_text_sequence]'], {'maxlen': 'max_length'}), '([encoded_text_sequence],\n maxlen=max_length)\n', (8140, 8188), True, 'import tensorflow as tf\n'), ((8476, 8498), 'numpy.argmax', 'np.argmax', (['predictions'], {}), '(predictions)\n', (8485, 8498), True, 'import numpy as np\n'), ((10443, 10471), 'os.path.join', 'os.path.join', (['directory', 'img'], {}), '(directory, img)\n', (10455, 10471), False, 'import os\n'), ((11767, 11798), 'os.remove', 'os.remove', (['path_to_latest_video'], {}), '(path_to_latest_video)\n', (11776, 11798), False, 'import os\n'), ((12269, 12298), 'os.path.isfile', 'os.path.isfile', (['captions_file'], {}), '(captions_file)\n', (12283, 12298), False, 'import os\n'), ((10483, 10504), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (10493, 10504), False, 'import os\n'), ((11629, 11659), 'os.path.isfile', 'os.path.isfile', (['new_video_path'], {}), '(new_video_path)\n', (11643, 11659), False, 'import os\n'), ((12763, 12805), 'shutil.copy', 'shutil.copy', (['source_location', '"""thumbnails"""'], {}), "(source_location, 'thumbnails')\n", (12774, 12805), False, 'import shutil\n'), ((12809, 12888), 'os.rename', 'os.rename', (["('thumbnails/' + thumbnail_image)", "('thumbnails/' + video_name + '.jpg')"], {}), "('thumbnails/' + thumbnail_image, 'thumbnails/' + video_name + '.jpg')\n", (12818, 12888), False, 'import os\n'), ((13110, 13196), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""c:/users/albert jk/desktop/newenvs/newenvs/lib/site-packages"""'], {}), "(0,\n 'c:/users/albert jk/desktop/newenvs/newenvs/lib/site-packages')\n", (13125, 13196), False, 'import sys\n'), ((13592, 13620), 'shutil.rmtree', 'shutil.rmtree', (['key_frame_dir'], {}), '(key_frame_dir)\n', (13605, 13620), False, 'import shutil\n'), ((13624, 13659), 'os.remove', 'os.remove', (['"""key_frame_metadata.txt"""'], {}), "('key_frame_metadata.txt')\n", (13633, 13659), False, 'import os\n'), ((12618, 12643), 'os.listdir', 'os.listdir', (['key_frame_dir'], {}), '(key_frame_dir)\n', (12628, 12643), False, 'import os\n'), ((12672, 12697), 'os.listdir', 'os.listdir', (['key_frame_dir'], {}), '(key_frame_dir)\n', (12682, 12697), False, 'import os\n')]
|
from functools import partial
import numpy as np
import pandas as pd
import chainer
from chainer import functions
from chainer import functions as F
from chainer.links import Linear
from chainer.dataset import to_device
from lib.graph import Graph
def zero_plus(x):
return F.softplus(x) - 0.6931472
class ElementLayerNormalization(chainer.links.LayerNormalization):
def __call__(self, x):
shape = x.shape
h = F.reshape(x, (-1, shape[-1]))
h = super(ElementLayerNormalization, self).__call__(h)
h = F.reshape(h, shape)
return h
class ElementLinear(chainer.links.Linear):
def __call__(self, x):
shape = x.shape
h = F.reshape(x, (-1, shape[-1]))
h = super(ElementLinear, self).__call__(h)
shape_after = shape[:-1] + (self.out_size,)
h = F.reshape(h, shape_after)
return h
class EdgeUpdate(chainer.Chain):
def __init__(self, C):
super(EdgeUpdate, self).__init__()
with self.init_scope():
self.W1 = ElementLinear(2 * C, nobias=True)
self.W2 = ElementLinear(C, nobias=True)
self.bn = ElementLayerNormalization(C)
def __call__(self, edge, h):
num_atom = edge.shape[1]
h1 = F.tile(F.expand_dims(h, 1), (1, num_atom, 1, 1))
h2 = F.tile(F.expand_dims(h, 2), (1, 1, num_atom, 1))
concat = F.concat([h1, h2, edge], axis=3)
add = zero_plus(self.W2(zero_plus(self.W1(concat))))
return edge + self.bn(add)
class InteractionNetwork(chainer.Chain):
def __init__(self, C):
super(InteractionNetwork, self).__init__()
with self.init_scope():
self.W1 = ElementLinear(C, nobias=True)
self.W2 = ElementLinear(C, nobias=True)
self.W3 = ElementLinear(C, nobias=True)
self.W4 = ElementLinear(C, nobias=True)
self.W5 = ElementLinear(C, nobias=True)
self.bn = ElementLayerNormalization(C)
def __call__(self, h, edge):
mt = zero_plus(self.W3(zero_plus(self.W2(edge))))
mt = self.W1(h) * F.sum(mt, axis=1)
h_add = self.W5(zero_plus(self.W4(mt)))
return h + self.bn(h_add)
class EdgeUpdateNet(chainer.Chain):
def __init__(self, num_layer, node_dim, edge_dim, gpu=0):
super(EdgeUpdateNet, self).__init__()
self.num_layer = num_layer
self.edge_dim = edge_dim
self.to_xpu = partial(to_device, gpu)
with self.init_scope():
self.gn = ElementLinear(node_dim)
for layer in range(self.num_layer):
self.add_link('eup{}'.format(layer), EdgeUpdate(edge_dim))
self.add_link('int{}'.format(layer), InteractionNetwork(node_dim))
self.interaction1 = Linear(512)
self.interaction2 = Linear(512)
self.interaction3 = Linear(4)
def __call__(self, list_g, list_y):
out = self.predict(list_g, list_y)
yv = np.concatenate([y[['fc', 'sd', 'pso', 'dso']].values.astype(np.float32) for y in list_y], axis=0)
yv_gpu = self.to_xpu(yv)
return F.mean_absolute_error(out, yv_gpu) * 4 * len(list_y)
def forward(self, list_g):
input_array = F.stack([self.to_xpu(g.get_atoms_array().astype(np.float32)) for g in list_g], 0)
dists = self.to_xpu(np.stack([g.get_dists() for g in list_g], 0).astype(np.float32))
num_atom = dists.shape[1]
num_rbf = self.edge_dim
gamma = 20.0
list_dists_rbf = list()
embedlist = self.to_xpu(np.linspace(0, 10, num_rbf - 8, dtype=self.xp.float32))
for g in list_g:
dist = F.expand_dims(self.to_xpu(g.get_dists()), 0)
dists_rbf = functions.reshape(dist, (1, num_atom, num_atom, 1))
dists_rbf = functions.broadcast_to(dists_rbf, (1, num_atom, num_atom, num_rbf - 8))
dists_rbf = functions.exp(- gamma * (dists_rbf - embedlist) ** 2)
bond_feature = F.expand_dims(self.to_xpu(g.get_bond_features()), 0)
list_dists_rbf.append(F.concat([dists_rbf, bond_feature], axis=3))
e = F.concat(list_dists_rbf, axis=0)
h = self.gn(input_array)
for layer in range(self.num_layer):
e = self['eup{}'.format(layer)](e, h)
h = self['int{}'.format(layer)](h, e)
h_out = F.concat((h, input_array), axis=2)
return h_out, e
def predict(self, list_g, list_y):
out, ko = self.forward(list_g)
list_concat1 = list()
list_concat2 = list()
for i, (g, y) in enumerate(zip(list_g, list_y)):
d = self.to_xpu(g.get_dists())
dists = F.expand_dims(d[y['atom_index_0'].values, y['atom_index_1'].values], 1)
s = F.concat((out[i, y['atom_index_0'].values, :],
out[i, y['atom_index_1'].values, :],
ko[i, y['atom_index_0'].values, y['atom_index_1'].values, :],
ko[i, y['atom_index_1'].values, y['atom_index_0'].values, :],
dists), axis=1)
list_concat1.append(s)
s = F.concat((out[i, y['atom_index_1'].values, :],
out[i, y['atom_index_0'].values, :],
ko[i, y['atom_index_1'].values, y['atom_index_0'].values, :],
ko[i, y['atom_index_0'].values, y['atom_index_1'].values, :],
dists), axis=1)
list_concat2.append(s)
concat1 = F.concat(list_concat1, axis=0)
concat2 = F.concat(list_concat2, axis=0)
h11 = F.leaky_relu(self.interaction1(concat1))
h12 = F.leaky_relu(self.interaction2(h11))
out1 = self.interaction3(h12)
h21 = F.leaky_relu(self.interaction1(concat2))
h22 = F.leaky_relu(self.interaction2(h21))
out2 = self.interaction3(h22)
return (out1 + out2) / 2.0
def main():
structures = pd.read_csv('../../../input/structures.csv')
strs_gp = structures.groupby('molecule_name')
bonds = pd.read_csv('../../../input/bonds.csv')
bonds_gp = bonds.groupby('molecule_name')
train = pd.read_csv('../../../input/train2.csv')
train_gp = train.groupby('molecule_name')
train_charges = pd.read_csv('../../../input/train_ob_charges.csv')
train_charges_gp = train_charges.groupby('molecule_name')
list_atoms = list(set(structures['atom']))
print(list_atoms)
model = EdgeUpdateNet(num_layer=10, node_dim=512, edge_dim=512)
model.to_gpu()
target1 = 'dsgdb9nsd_000008'
g1 = Graph(strs_gp.get_group(target1),
bonds_gp.get_group(target1),
list_atoms,
train_charges_gp.get_group(target1))
y1 = train_gp.get_group(target1)
out = model([g1], [y1])
print(out)
target2 = 'dsgdb9nsd_000010'
g2 = Graph(strs_gp.get_group(target2),
bonds_gp.get_group(target2),
list_atoms,
train_charges_gp.get_group(target2))
y2 = train_gp.get_group(target2)
out = model([g2], [y2])
print(out)
out = model([g1, g2], [y1, y2])
print(out)
if __name__ == '__main__':
main()
|
[
"functools.partial",
"chainer.functions.softplus",
"pandas.read_csv",
"chainer.functions.sum",
"chainer.functions.exp",
"chainer.functions.mean_absolute_error",
"chainer.functions.concat",
"chainer.functions.reshape",
"chainer.functions.expand_dims",
"numpy.linspace",
"chainer.functions.broadcast_to",
"chainer.links.Linear"
] |
[((5976, 6020), 'pandas.read_csv', 'pd.read_csv', (['"""../../../input/structures.csv"""'], {}), "('../../../input/structures.csv')\n", (5987, 6020), True, 'import pandas as pd\n'), ((6084, 6123), 'pandas.read_csv', 'pd.read_csv', (['"""../../../input/bonds.csv"""'], {}), "('../../../input/bonds.csv')\n", (6095, 6123), True, 'import pandas as pd\n'), ((6183, 6223), 'pandas.read_csv', 'pd.read_csv', (['"""../../../input/train2.csv"""'], {}), "('../../../input/train2.csv')\n", (6194, 6223), True, 'import pandas as pd\n'), ((6291, 6341), 'pandas.read_csv', 'pd.read_csv', (['"""../../../input/train_ob_charges.csv"""'], {}), "('../../../input/train_ob_charges.csv')\n", (6302, 6341), True, 'import pandas as pd\n'), ((281, 294), 'chainer.functions.softplus', 'F.softplus', (['x'], {}), '(x)\n', (291, 294), True, 'from chainer import functions as F\n'), ((441, 470), 'chainer.functions.reshape', 'F.reshape', (['x', '(-1, shape[-1])'], {}), '(x, (-1, shape[-1]))\n', (450, 470), True, 'from chainer import functions as F\n'), ((546, 565), 'chainer.functions.reshape', 'F.reshape', (['h', 'shape'], {}), '(h, shape)\n', (555, 565), True, 'from chainer import functions as F\n'), ((694, 723), 'chainer.functions.reshape', 'F.reshape', (['x', '(-1, shape[-1])'], {}), '(x, (-1, shape[-1]))\n', (703, 723), True, 'from chainer import functions as F\n'), ((839, 864), 'chainer.functions.reshape', 'F.reshape', (['h', 'shape_after'], {}), '(h, shape_after)\n', (848, 864), True, 'from chainer import functions as F\n'), ((1388, 1420), 'chainer.functions.concat', 'F.concat', (['[h1, h2, edge]'], {'axis': '(3)'}), '([h1, h2, edge], axis=3)\n', (1396, 1420), True, 'from chainer import functions as F\n'), ((2440, 2463), 'functools.partial', 'partial', (['to_device', 'gpu'], {}), '(to_device, gpu)\n', (2447, 2463), False, 'from functools import partial\n'), ((4136, 4168), 'chainer.functions.concat', 'F.concat', (['list_dists_rbf'], {'axis': '(0)'}), '(list_dists_rbf, axis=0)\n', (4144, 4168), True, 'from chainer import functions as F\n'), ((4365, 4399), 'chainer.functions.concat', 'F.concat', (['(h, input_array)'], {'axis': '(2)'}), '((h, input_array), axis=2)\n', (4373, 4399), True, 'from chainer import functions as F\n'), ((5538, 5568), 'chainer.functions.concat', 'F.concat', (['list_concat1'], {'axis': '(0)'}), '(list_concat1, axis=0)\n', (5546, 5568), True, 'from chainer import functions as F\n'), ((5587, 5617), 'chainer.functions.concat', 'F.concat', (['list_concat2'], {'axis': '(0)'}), '(list_concat2, axis=0)\n', (5595, 5617), True, 'from chainer import functions as F\n'), ((1267, 1286), 'chainer.functions.expand_dims', 'F.expand_dims', (['h', '(1)'], {}), '(h, 1)\n', (1280, 1286), True, 'from chainer import functions as F\n'), ((1329, 1348), 'chainer.functions.expand_dims', 'F.expand_dims', (['h', '(2)'], {}), '(h, 2)\n', (1342, 1348), True, 'from chainer import functions as F\n'), ((2102, 2119), 'chainer.functions.sum', 'F.sum', (['mt'], {'axis': '(1)'}), '(mt, axis=1)\n', (2107, 2119), True, 'from chainer import functions as F\n'), ((2783, 2794), 'chainer.links.Linear', 'Linear', (['(512)'], {}), '(512)\n', (2789, 2794), False, 'from chainer.links import Linear\n'), ((2827, 2838), 'chainer.links.Linear', 'Linear', (['(512)'], {}), '(512)\n', (2833, 2838), False, 'from chainer.links import Linear\n'), ((2871, 2880), 'chainer.links.Linear', 'Linear', (['(4)'], {}), '(4)\n', (2877, 2880), False, 'from chainer.links import Linear\n'), ((3565, 3619), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(num_rbf - 8)'], {'dtype': 'self.xp.float32'}), '(0, 10, num_rbf - 8, dtype=self.xp.float32)\n', (3576, 3619), True, 'import numpy as np\n'), ((3736, 3787), 'chainer.functions.reshape', 'functions.reshape', (['dist', '(1, num_atom, num_atom, 1)'], {}), '(dist, (1, num_atom, num_atom, 1))\n', (3753, 3787), False, 'from chainer import functions\n'), ((3812, 3883), 'chainer.functions.broadcast_to', 'functions.broadcast_to', (['dists_rbf', '(1, num_atom, num_atom, num_rbf - 8)'], {}), '(dists_rbf, (1, num_atom, num_atom, num_rbf - 8))\n', (3834, 3883), False, 'from chainer import functions\n'), ((3908, 3960), 'chainer.functions.exp', 'functions.exp', (['(-gamma * (dists_rbf - embedlist) ** 2)'], {}), '(-gamma * (dists_rbf - embedlist) ** 2)\n', (3921, 3960), False, 'from chainer import functions\n'), ((4687, 4758), 'chainer.functions.expand_dims', 'F.expand_dims', (["d[y['atom_index_0'].values, y['atom_index_1'].values]", '(1)'], {}), "(d[y['atom_index_0'].values, y['atom_index_1'].values], 1)\n", (4700, 4758), True, 'from chainer import functions as F\n'), ((4776, 5013), 'chainer.functions.concat', 'F.concat', (["(out[i, y['atom_index_0'].values, :], out[i, y['atom_index_1'].values, :],\n ko[i, y['atom_index_0'].values, y['atom_index_1'].values, :], ko[i, y[\n 'atom_index_1'].values, y['atom_index_0'].values, :], dists)"], {'axis': '(1)'}), "((out[i, y['atom_index_0'].values, :], out[i, y['atom_index_1'].\n values, :], ko[i, y['atom_index_0'].values, y['atom_index_1'].values, :\n ], ko[i, y['atom_index_1'].values, y['atom_index_0'].values, :], dists),\n axis=1)\n", (4784, 5013), True, 'from chainer import functions as F\n'), ((5156, 5393), 'chainer.functions.concat', 'F.concat', (["(out[i, y['atom_index_1'].values, :], out[i, y['atom_index_0'].values, :],\n ko[i, y['atom_index_1'].values, y['atom_index_0'].values, :], ko[i, y[\n 'atom_index_0'].values, y['atom_index_1'].values, :], dists)"], {'axis': '(1)'}), "((out[i, y['atom_index_1'].values, :], out[i, y['atom_index_0'].\n values, :], ko[i, y['atom_index_1'].values, y['atom_index_0'].values, :\n ], ko[i, y['atom_index_0'].values, y['atom_index_1'].values, :], dists),\n axis=1)\n", (5164, 5393), True, 'from chainer import functions as F\n'), ((3127, 3161), 'chainer.functions.mean_absolute_error', 'F.mean_absolute_error', (['out', 'yv_gpu'], {}), '(out, yv_gpu)\n', (3148, 3161), True, 'from chainer import functions as F\n'), ((4078, 4121), 'chainer.functions.concat', 'F.concat', (['[dists_rbf, bond_feature]'], {'axis': '(3)'}), '([dists_rbf, bond_feature], axis=3)\n', (4086, 4121), True, 'from chainer import functions as F\n')]
|
import os, sys
from os.path import join
ROOT_DIR = os.path.abspath(os.curdir)
if ROOT_DIR not in sys.path:
sys.path.append(ROOT_DIR)
sys.path.append(join(ROOT_DIR, 'opmatch'))
import match
import create_test_data
import numpy as np
from util import vis
X, y, ps = create_test_data.get_test_data(False, 30, 1, .15)
print(np.sum(y), 'exposed')
print(len(y)-np.sum(y), 'unexposed')
exp_unexp_dic = match.match(ps, y, 'variable')
vis.plot_matching(ps, exp_unexp_dic, save=True)
|
[
"sys.path.append",
"os.path.abspath",
"numpy.sum",
"util.vis.plot_matching",
"match.match",
"os.path.join",
"create_test_data.get_test_data"
] |
[((51, 77), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (66, 77), False, 'import os, sys\n'), ((273, 323), 'create_test_data.get_test_data', 'create_test_data.get_test_data', (['(False)', '(30)', '(1)', '(0.15)'], {}), '(False, 30, 1, 0.15)\n', (303, 323), False, 'import create_test_data\n'), ((404, 434), 'match.match', 'match.match', (['ps', 'y', '"""variable"""'], {}), "(ps, y, 'variable')\n", (415, 434), False, 'import match\n'), ((435, 482), 'util.vis.plot_matching', 'vis.plot_matching', (['ps', 'exp_unexp_dic'], {'save': '(True)'}), '(ps, exp_unexp_dic, save=True)\n', (452, 482), False, 'from util import vis\n'), ((111, 136), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (126, 136), False, 'import os, sys\n'), ((329, 338), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (335, 338), True, 'import numpy as np\n'), ((157, 182), 'os.path.join', 'join', (['ROOT_DIR', '"""opmatch"""'], {}), "(ROOT_DIR, 'opmatch')\n", (161, 182), False, 'from os.path import join\n'), ((364, 373), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (370, 373), True, 'import numpy as np\n')]
|
#todo chapter5. pandas 시작하기
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
#todo 5.1 pandas 자료 구조 소개
'''
pandas 에 대해서 알아보려면 Series 와 DataFrame, 이 두 가지 자료 구조에 익숙해질 필요가 있다.
이 두 가지 자료 구조로 모든 문제를 해결할 수는 없지만 대부분의 애플리케이션에서 사용하기 쉽고 탄탄한 기반을 제공한다.
'''
#todo 5.1.1 Series
'''
Series 는 일련의 객체를 담을 수 있는 1차원 배열 같은 자료 구조다. (어떤 NumPy 자료형이라도 담을 수 있다.)
그리고 색인이라고 하는 배열의 데이터에 연관된 이름을 가지고 있다.
가장 간단한 Series 객체는 배열 데이터로부터 생성할 수 있다.
'''
obj = Series([4, 7, -5, 3])
obj
'''
Series 객체의 문자열 표현은 왼쪽에 색인을 보여주고 오른쪽에 해당 색인의 값을 보여준다.
앞의 예제에서는 데이터의 색인을 지정하지 않았으니 기본 색인인 정수 0 에서 N-1(N은 데이터의 길이)까지의 숫자가 표시된다.
Series 의 배열과 색인 객체는 각각 values 와 index 속성을 통해 얻을 수 있다.
'''
obj.values
obj.index
'''
각각의 데이터를 지칭하는 색인을 지정해 Series 객체를 생성해야 할 때는 다음처럼 생성한다.
'''
obj2 = Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])
obj2
obj2.index
'''
배열에서 값을 선택하거나 대입할 때는 색인을 이용해서 접근한다.
'''
obj2['a']
obj2['d'] = 6
obj2[['c', 'a', 'd']]
'''
불리언 배열을 사용해서 값을 걸러내거나 산술 곱셈을 수행하거나 또는 수학 함수를 적용하는 등 NumPy 배열연산을 수행해도
색인-값 연결은 유지된다.
'''
obj2[obj2 > 0]
obj2 * 2
np.exp(obj2)
'''
Series 를 이해하는 다른 방법은 고정 길이의 정렬된 사전형이라고 이해하는 것이다.
Series 는 색인 값에 데이터 값을 매핑하고 있으므로 파이썬의 사전형과 비슷하다.
Series 객체는 파이썬의 사전형을 인자로 받아야 하는 많은 함수에서 사전형을 대체하여 사용할 수 있다.
'''
'b' in obj2
'e' in obj2
'''
파이썬 사전형에 데이터를 저장해야 한다면 파이썬 사전 객체로부터 Series 객체를 생성할 수 있다.
'''
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
obj3 = Series(sdata)
obj3
'''
사전 객체만 가지고 Series 객체를 생성하면 생성된 Series 객체의 색인은 사전의 키 값이 순서대로 들어간다.
'''
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj4 = Series(sdata, index=states)
obj4
'''
이 예제를 보면 sdata 에 있는 값 중 3 개만 확인할 수 있는데, 이는 'California'에 대한 값을 찾을 수 없기 때문이다.
이 값은 NaN(not a number)으로 표시되고 pandas 에서는 누락된 값 혹은 NA 값으로 취급된다.
나는 앞으로 '누락된'과 'NA'를 누락된 데이터를 지칭하는 데 사용하도록 하겠다.
pandas 의 isnull 과 notnull 함수는 누락된 데이터를 찾을 때 사용된다.
'''
pd.isnull(obj4)
pd.notnull(obj4)
'''
이 메서드는 Series 의 인스턴스 메서드이기도 하다.
'''
obj4.isnull()
'''
누락된 데이터를 처리하는 방법은 이 장의 끝부분에서 좀 더 자세히 살펴보기로 하자.
가장 중요한 Series 의 기능은 다르게 색인된 데이터에 대한 산술연산이다.
'''
obj3
obj4
obj3 + obj4 # 둘 중에 하나만 존재하는 키에 대해서는 NaN 값이 출력된다.
'''
Series 객체와 Series 의 색인은 모두 name 속성이 있는데, 이 속성은 pandas 의 기능에서 중요한 부분을 차지하고 있다.
'''
obj4.name = 'population'
obj4.index.name = 'state'
obj4
'''
Series 의 색인은 대입을 통해 변경할 수 있다.
'''
obj.index = ['Bob', 'Steve', 'Jeff', 'Ryan']
obj
#todo 5.1.2 DataFrame
'''
DataFrame 은 표 같은 스프레드시트 형식의 자료 구조로 여러 개의 칼럼이 있는데, 각 칼럼은 서로 다른 종류의 값(숫자, 문자열,
불리언 등)을 담을 수 있다.
DataFrame 은 로우와 칼럼에 대한 색인이 있는데, 이 DataFrame 은 색인의 모양이 같은 Series 객체를 담고 있는 파이썬 사전으로
생각하면 편하다.
R 의 data.frame 같은 다른 DataFrame 과 비슷한 자료 구조와 비교했을 때, DataFrame 에서의 로우 연산과 칼럼 연산은 거의
대칭적으로 취급된다.
내부적으로 데이터는 리스트나 사전 또는 1차원 배열을 담고 있는 다른 컬렉션이 아니라 하나 이상의 2차원 배열에 저장된다.
구체적인 DataFrame 의 내부 구조는 이 책에서 다루는 내용에서 벗어나므로 생략하겠다.
'''
'''
DataFrame 객체는 다양한 방법으로 생성할 수 있지만 가장 흔하게 사용되는 방법은 같은 길이의 리스트에 담긴 사전을 이용하거나
NumPy 배열을 이용하는 방법이다.
'''
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
frame = DataFrame(data)
'''
만들어진 DataFrame 의 색인은 Series 와 같은 방식으로 자동으로 대입되며 칼럼은 정렬되어 저장된다.
'''
frame
'''
원하는 순서대로 columns 를 지정하면 원하는 순서를 가진 DataFrame 객체가 생성된다.
'''
DataFrame(data, columns=['year', 'state', 'pop'])
'''
Series 와 마찬가지로 data 에 없는 값을 넘기면 NA 값이 저장된다.
'''
frame2 = DataFrame(data, columns=['year', 'state', 'pop', 'debt'], index=['one', 'two', 'three', 'four', 'five'])
frame2
frame2.columns
'''
DataFrame 의 칼럼은 Series 처럼 사전 형식의 표기법으로 접근하거나 속성 형식으로 접근할 수 있다.
'''
frame2['state']
frame2.year
'''
반환된 Series 객체가 DataFrame 같은 색인을 가지면 알맞은 값으로 name 속성이 채워진다.
로우는 위치나 ix 같은 몇 가지 메서드를 통해 접근할 수 있다.
'''
frame2.ix['three'] # .ix is deprecated.
frame2.loc['three'] # .loc for label based indexsing or
frame2.iloc[2] # .iloc for positional indexing
'''
칼럼은 대입이 가능하다.
예를 들면 현재 비어있는 'debt' 칼럼에 스칼라 값이나 배열의 값을 대입할 수 있다.
'''
frame2['debt'] = 16.5 # 전체 행에 대해 같은 값이 들어간다.
frame2
frame2['debt'] = np.arange(5.)
frame2
'''
리스트나 배열을 칼럼에 대입할 때는 대입하려는 값의 길이가 DataFrame 의 크기와 같아야 한다.
Series 를 대입하려면 DataFrame 의 색인에 따라 값이 대입되며 없는 색인에는 값이 대입되지 않는다.
'''
val = Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])
frame2['debt'] = val
frame2
'''
없는 칼럼을 대입하면 새로운 칼럼이 생성된다.
파이썬 사전형에서와 마찬가지로 del 예약어를 사용해서 칼럼을 삭제할 수 있다.
'''
frame2['eastern'] = frame2.state == 'Ohio'
frame2
del frame2['eastern']
frame2.columns
'''
DataFrame 의 색인을 이용해서 생성된 칼럼은 내부 데이터에 대한 뷰이며 복사가 이루어지지 않는다.
따라서 이렇게 얻은 Series 객체에 대한 변경은 실제 DataFrame 에 반영된다.
복사본이 필요할 때는 Series 의 copy 메서드를 이용하자.
'''
'''
또한 중첩된 사전을 이용해서 데이터를 생성할 수 있는데, 다음과 같은 중첩된 사전이 있다면
'''
pop = {'Nevada': {2001: 2.4, 2002: 2.9}, 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}
'''
바깥에 있는 사전의 키 값이 칼럼이 되고 안에 있는 키는 로우가 된다.
'''
frame3 = DataFrame(pop)
frame3
'''
NumPy 에서와 마찬가지로 결과 값의 순서를 뒤집을 수 있다.
'''
frame3.T
'''
중첩된 사전을 이용해서 DataFrame 을 생성할 때 안쪽에 있는 사전 값은 키 값별로 조합되어 결과의 색인이 되지만 색인을
직접 지정한다면 지정된 색인으로 DataFrame 을 생성한다.
'''
DataFrame(pop, index=[2001, 2002, 2003])
'''
Series 객체를 담고 있는 사전 데이터도 같은 방식으로 취급된다.
'''
pdata = {'Ohio': frame3['Ohio'][:-1], 'Nevada': frame3['Nevada'][:2]}
DataFrame(pdata)
'''
DataFrame 생성자에 넘길 수 있는 자료형의 목록은 [표 5-1]을 참고하자.
'''
frame3.index.name = 'year'
frame3.columns.name = 'state'
frame3
'''
Series 와 유사하게 values 속성은 DataFrame 에 저장된 데이터를 2차원 배열로 반환한다.
'''
frame3.values
'''
DataFrame 의 칼럼에 서로 다른 dtype 이 있다면 모든 칼럼을 수용하기 위해 그 칼럼 배열의 dtype 이 선택된다.
'''
frame2.values
#todo 5.1.3 색인 객체
'''
pandas 의 색인 객체는 표 형식의 데이터에서 각 로우와 칼럼에 대한 이름과 다른 메타데이터(축의 이름 등)를 저장하는
객체다.
Series 나 DataFrame 객체를 생성할 때 사용되는 배열이나 혹은 다른 순차적인 이름은 내부적으로 색인으로 변환된다.
'''
obj = Series(range(3), index=['a', 'b', 'c'])
index = obj.index
index
index[1:]
'''
색인 객체는 변경할 수 없다.
'''
index[1] = 'd'
'''
색인 객체는 변경할 수 없기에 자료 구조 사이에서 안전하게 공유될 수 있다.
'''
index = pd.Index(np.arange(3))
obj2 = Series([1.5, -2.5, 0], index=index)
obj2.index is index
'''
[표 5-2]에 pandas 에서 사용하는 내장 색인 클래스가 정리되어 있다.
특수한 목적으로 축을 색인하는 기능을 개발하기 위해 Index 클래스의 서브 클래스를 만들 수 있다.
'''
'''
또한 배열과 유사하게 Index 객체도 고정 크기로 동작한다.
'''
frame3
'Ohio' in frame3.columns
2003 in frame3.index
'''
각각의 색인은 담고 있는 데이터에 대한 정보를 취급하는 여러 가지 메서드와 속성을 가지고 있다.
'''
#todo 5.2 핵심 기능
'''
이 절에서는 Series 나 DataFrame 에 저장된 데이터를 다루는 기본 방법을 설명한다.
앞으로 pandas 를 이용한 데이터 분석과 조작에 관한 좀 더 자세한 내용을 살펴볼 것이다.
이 책은 pandas 라이브러리에 대한 완전한 설명은 자제하고 중요한 기능에만 초점을 맞추고 있다.
잘 사용하지 않는 내용에 대한 학습은 독자의 몫으로 남겨둔다.
'''
#todo 5.2.1 재색인
'''
pandas 객체의 기막힌 기능 중 하나인 reindex 는 새로운 색인에 맞도록 객체를 새로 생성하는 기능이다.
'''
obj = Series([4.5, 7.2, -5.3, 3.6], index=['d', 'b', 'a', 'c'])
obj
'''
이 Series 객체에 대해 reindex 를 호출하면 데이터를 새로운 색인에 맞게 재배열하고, 없는 색인 값이 있다면 비어있는 값을
새로 추가한다.
'''
obj2 = obj.reindex(['a', 'b', 'c', 'd', 'e'])
obj2
obj.reindex(['a', 'b', 'c', 'd', 'e'], fill_value=0)
'''
시계열 같은 순차적인 데이터를 재색인할 때 값을 보간하거나 채워 넣어야 할 경우가 있다.
이런 경우 method 옵션을 이용해서 해결할 수 있으며, ffill 메서드를 이용하면 앞의 값으로 누락된 값을 채워 넣을 수 있다.
'''
obj3 = Series(['blue', 'purple', 'yellow'], index=[0, 2, 4])
obj3.reindex(range(6), method='ffill') # 앞의 값으로 누락된 값을 채움
obj3.reindex(range(6), method='pad') # 앞의 값으로 누락된 값을 채움
obj3.reindex(range(6), method='bfill') # 뒤의 값으로 누락된 값을 채움
obj3.reindex(range(6), method='backfill') # 뒤의 값으로 누락된 값을 채움
'''
DataFrame 에 대한 reindex 는 (로우)색인, 칼럼 또는 둘 다 변경이 가능하다.
그냥 순서만 전달하면 로우가 재색인된다.
'''
frame = DataFrame(np.arange(9).reshape((3, 3)), index=['a', 'c', 'd'], columns=['Ohio', 'Texas', 'California'])
frame
frame2 = frame.reindex(['a', 'b', 'c', 'd'])
frame2
'''
열은 columns 예약어를 사용해서 재색인할 수 있다.
'''
states = ['Texas', 'Utah', 'California']
frame.reindex(columns=states)
'''
로우와 칼럼을 모두 한 번에 재색인할 수 있지만 보간은 로우에 대해서만 이루어진다. (axis 0)
'''
frame.reindex(index=['a', 'b', 'c', 'd'], method='ffill').reindex(columns=states)
'''
재색인은 ix 를 이용해서 라벨로 색인하면 좀 더 간결하게 할 수 있다.
'''
frame.ix[['a', 'b', 'c', 'd'], states] # iloc(integer position), loc(label-based indexing), ix(iloc or loc)
#todo 5.2.2 하나의 로우 또는 칼럼 삭제하기
'''
색인 배열 또는 삭제하려는 로우나 칼럼이 제외된 리스트를 이미 가지고 있다면 로우나 칼럼을 쉽게 삭제할 수 있는데, 이
방법은 데이터의 모양을 변경하는 작업이 필요하다.
drop 메서드를 사용하면 선택한 값이 삭제된 새로운 객체를 얻을 수 있다.
'''
obj = Series(np.arange(5.), index=['a', 'b', 'c', 'd', 'e'])
new_obj = obj.drop('c')
new_obj
obj.drop(['d', 'c'])
'''
DataFrame 에서는 로우와 칼럼 모두에서 값을 삭제할 수 있다.
'''
data = DataFrame(np.arange(16).reshape((4, 4)), index=['Ohio', 'Colorado', 'Utah', 'New York'], columns=['one', 'two', 'three', 'four'])
data.drop(['Colorado', 'Ohio']) # 해당 로우의 값을 삭제
data.drop('two', axis=1) # 2차원 배열 기준 (axis=0 : 행, axis=1 : 열)
data.drop(['two', 'four'], axis=1)
#todo 5.2.3 색인하기, 선택하기, 거르기
'''
Series 의 색인(obj[...])은 NumPy 배열의 색인과 유사하게 동작하는데, Series 의 색인은 정수가 아니어도 된다는 점이 다르다.
몇 가지 예제를 살펴보자.
'''
obj = Series(np.arange(4.), index=['a', 'b', 'c', 'd'])
obj['b']
obj[1]
obj[2:4]
obj[['b', 'a', 'd']]
obj[[1, 3]]
obj[obj<2]
'''
라벨 이름으로 슬라이싱하는 것은 시작점과 끝점을 포함한다는 점이 일반 파이썬에서의 슬라이싱과 다른 점이다.
'''
obj['b':'c']
'''
슬라이싱 문법으로 선택된 영역에 값을 대입하는 것은 예상한 대로 동작한다.
'''
obj['b':'c'] = 5
obj
'''
앞에서 확인했듯이 색인으로 DataFrame 에서 칼럼의 값을 하나 이상 가져올 수 있다.
'''
data = DataFrame(np.arange(16).reshape((4, 4)), index=['Ohio', 'Colorado', 'Utah', 'New York'], columns=['one', 'two', 'three', 'four'])
data
data['two']
data[['three', 'one']]
'''
슬라이싱으로 로우를 선택하거나 불리언 배열로 로우를 선택할 수도 있다.
'''
data[:2]
data[data['three'] > 5]
'''
이 문법에 모순이 있다고 생각하는 분이 있을지도 모르겠지만 이 문법은 실용성에 기인한 것일 뿐이다.
또 다른 사례는 스칼라 비교를 통해 생성된 불리언 DataFrame 을 사용해서 값을 선택하는 것이다.
'''
data < 5
data[data < 5] = 0
data
'''
이 예제는 DataFrame 을 ndarray 와 문법적으로 비슷하게 보이도록 의도한 것이다.
DataFrame 의 칼럼에 대해 라벨로 색인하는 방법으로, 특수한 색인 필드인 ix 를 소개한다.
ix 는 NumPy 와 비슷한 방식에 추가적으로 축의 라벨을 사용하여 DataFrame 의 로우와 칼럼을 선택할 수 있도록 한다.
앞에서 언급했듯이 이 방법은 재색인을 좀 더 간단하게 할 수 있는 방법이다.
'''
data.ix['Colorado', ['two', 'three']] # ix is deprecated. label based indexing -> .loc, positional indexing -> .iloc
data.ix[['Colorado', 'Utah'], [3, 0, 1]] # ix 메서드는 label + positional indexing 을 같이 사용할 때 사용한다.
data.loc[['Colorado', 'Utah'], ['four', 'one', 'two']]
data.iloc[2]
data.loc[:'Utah', 'two']
data.ix[data.three > 5, :3]
#todo 5.2.4 산술연산과 데이터 정렬
'''
pandas 에서 중요한 기능은 색인이 다른 객체 간의 산술연산이다.
객체를 더할 때 짝이 맞지 않는 색인이 있다면 결과에 두 색인이 통합된다.
'''
s1 = Series([7.3, -2.5, 3.4, 1.5], index=['a', 'c', 'd', 'e'])
s2 = Series([-2.1, 3.6, -1.5, 4, 3.1], index=['a', 'c', 'e', 'f', 'g'])
s1 + s2
'''
서로 겹치는 색인이 없다면 데이터는 NA 값이 된다.
산술연산 시 누락된 값은 전파되며, DataFrame 에서는 로우와 칼럼 모두에 적용된다.
'''
df1 = DataFrame(np.arange(9.).reshape((3, 3)), columns=list('bcd'), index=['Ohio', 'Texas', 'Colorado'])
df2 = DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'), index=['Utah', 'Ohio', 'Texas', 'Oregon'])
df1
df2
df1 + df2
#todo 산술연산 메서드에 채워 넣을 값 지정하기
'''
서로 다른 색인을 가지는 객체 간의 산술연산에서 존재하지 않는 축의 값을 특수한 값(0 같은)으로 지정하고 싶을 때는 다음과
같이 할 수 있다.
add : 덧셈을 위한 메서드
sub : 뺄셈을 위한 메서드
div : 나눗셈을 위한 메서드
mul : 곱셈을 위한 메서드
'''
df1 = DataFrame(np.arange(12.).reshape((3, 4)), columns=list('abcd'))
df2 = DataFrame(np.arange(20.).reshape((4, 5)), columns=list('abcde'))
df1 + df2
'''
df1 의 add 메서드로 df2 와 fill_value 값을 인자로 전달한다.
'''
df1.add(df2, fill_value=0) # fill_value 는 한 쪽이라도 존재하지 않는 값은 해당 DataFrame 에서 0 으로 설정한다.
'''
Series 나 DataFrame 을 재색인할 때 역시 fill_value 를 지정할 수 있다.
'''
df1.reindex(columns=df2.columns, fill_value=0)
#todo DataFrame 과 Series 간의 연산
'''
NumPy 배열의 연산처럼 DataFrame 과 Series 간의 연산도 잘 정의되어 있다.
먼저 2차원 배열과 그 배열 중 한 칼럼의 차이에 대해서 생각할 수 있는 예제를 살펴보자.
'''
arr = np.arange(12.).reshape((3, 4))
arr
arr[0]
arr - arr[0]
'''
이 예제는 브로드캐스팅에 대한 예제로, 자세한 내용은 12 장에서 살펴볼 것이다.
DataFrame 과 Series 간의 연산은 이와 유사하다.
'''
frame = DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'), index=['Utah', 'Ohio', 'Texas', 'Oregon'])
series = frame.ix[0]
frame
series
'''
기본적으로 DataFrame 과 Series 간의 산술연산은 Series 의 색인을 DataFrame 의 칼럼에 맞추고 아래 로우로 전파한다.
'''
frame - series
'''
만약 색인 값을 DataFrame 의 칼럼이나 Series 의 색인에서 찾을 수 없다면 그 객체는 형식을 맞추기 위해 재색인된다.
'''
series2 = Series(range(3), index=['b', 'e', 'f'])
frame + series2
'''
만약 각 로우에 대해 연산을 수행하고 싶다면 산술연산 메서드를 사용하면 된다.
'''
series3 = frame['d']
frame
series3
frame.sub(series3, axis=0)
'''
이 예에서 인자로 넘기는 axis 값은 연산을 적용할 축 번호이며, 여기서 axis=0 은 DataFrame 의 로우를 따라 연산을 수행하
라는 의미다.
'''
#todo 5.2.5 함수 적용과 매핑
'''
pandas 객체에도 NumPy 의 유니버설 함수(배열의 각 원소에 적용되는 메서드)를 적용할 수 있다.
'''
frame = DataFrame(np.random.randn(4, 3), columns=list('bde'), index=['Utah', 'Ohio', 'Texas', 'Oregon'])
frame
np.abs(frame)
'''
자주 사용되는 또 다른 연산은 각 로우나 칼럼의 1차원 배열에 함수를 적용하는 것이다.
DataFrame 의 apply 메서드를 통해 수행할 수 있다.
'''
f = lambda x: x.max() - x.min()
frame.apply(f)
frame.apply(f, axis=1)
'''
배열의 합계나 평균같은 일반적인 통계는 DataFrame 의 메서드로 있으므로 apply 메서드를 사용해야만 하는 것은 아니다.
apply 메서드에 전달된 함수는 스칼라 값을 반환할 필요가 없으며, Series 또는 여러 값을 반환해도 된다.
'''
def f(x):
return Series([x.min(), x.max()], index=['min', 'max'])
frame.apply(f)
'''
배열의 각 원소에 적용되는 파이썬의 함수를 사용할 수도 있다.
frame 객체에서 실수 값을 문자열 포맷으로 변환하고 싶다면 applymap 을 이용해서 다음과 같이 해도 된다.
'''
format = lambda x: '%.2f' % x
frame.applymap(format)
'''
이 메서드의 이름이 applymap 인 이유는 Series 가 각 원소에 적용할 함수를 지정하기 위한 map 메서드를 가지고 있기 때문
이다.
'''
frame['e'].map(format)
#todo 5.2.6 정렬과 순위
'''
어떤 기준에 근거해서 데이터를 정렬하는 것 역시 중요한 명령이다.
로우나 칼럼의 색인을 알파벳 순으로 정렬하려면 새로운 객체를 반환하는 sort_index 메서드를 사용하면 된다.
'''
obj = Series(range(4), index=['d', 'a', 'b', 'c'])
obj.sort_index()
'''
DataFrame 은 로우나 칼럼 중 하나의 축을 기준으로 정렬할 수 있다.
'''
frame = DataFrame(np.arange(8).reshape((2, 4)), index=['three', 'one'], columns=['d', 'a', 'b', 'c'])
frame.sort_index() # 로우 축을 기준으로 정렬
frame.sort_index(axis=1) # 칼럼 축을 기준으로 정렬
'''
데이터는 기본적으로 오름차순으로 정렬되지만 내림차순으로 정렬할 수도 있다.
'''
frame.sort_index(axis=1, ascending=False)
'''
Series 객체를 값에 따라 정렬하고 싶다면 sort_values 메서드를 사용하자.
'''
obj = Series([4, 7, -3, 2])
obj.sort_values()
'''
정렬할 때 비어있는 값은 기본적으로 Series 객체에서 가장 마지막에 위치한다.
'''
obj = Series([4, np.nan, 7, np.nan, -3, 2])
obj.sort_values()
'''
DataFrame 에서는 하나 이상의 칼럼에 있는 값으로 정렬이 필요할 수 있다.
이럴 때는 by 옵션에 필요한 칼럼의 이름을 넘기면 된다.
'''
frame = DataFrame({'b': [4, 7,-3, 2], 'a': [0, 1, 0, 1]})
frame
frame.sort_values(by='b')
'''
여러 개의 칼럼을 정렬하려면 칼럼의 이름이 담긴 리스트를 전달하면 된다.
'''
frame.sort_values(by=['a', 'b'])
'''
순위는 정렬과 거의 흡사하며, 1부터 배열의 유효한 데이터 개수까지의 순위를 매긴다.
또한 순위는 numpy.argsort 에서 반환하는 간접 정렬 색인과 유사한데, 동률인 순위를 처리하는 방식이 다르다.
기본적으로 Series 와 DataFrame 의 rank 메서드는 동점인 항목에 대해서는 평균 순위를 매긴다.
'''
obj = Series([7, -5, 7, 4, 2, 0 ,4])
obj.rank()
'''
데이터 상에서 나타나는 순서에 따라 순위를 매길 수도 있다.
'''
obj.rank(method='first')
'''
내림차순으로 순위를 매길 수도 있다.
'''
obj.rank(ascending=False, method='max')
'''
DataFrame 에서는 로우나 칼럼에 대해 순위를 정할 수 있다.
'''
frame = DataFrame({'b': [4.3, 7, -3, 2], 'a': [0, 1, 0, 1], 'c': [-2, 5, 8, -2.5]})
frame
frame.rank(axis=1)
|
[
"pandas.DataFrame",
"numpy.abs",
"numpy.random.randn",
"pandas.isnull",
"pandas.notnull",
"numpy.arange",
"numpy.exp",
"pandas.Series"
] |
[((472, 493), 'pandas.Series', 'Series', (['[4, 7, -5, 3]'], {}), '([4, 7, -5, 3])\n', (478, 493), False, 'from pandas import Series, DataFrame\n'), ((791, 840), 'pandas.Series', 'Series', (['[4, 7, -5, 3]'], {'index': "['d', 'b', 'a', 'c']"}), "([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])\n", (797, 840), False, 'from pandas import Series, DataFrame\n'), ((1075, 1087), 'numpy.exp', 'np.exp', (['obj2'], {}), '(obj2)\n', (1081, 1087), True, 'import numpy as np\n'), ((1436, 1449), 'pandas.Series', 'Series', (['sdata'], {}), '(sdata)\n', (1442, 1449), False, 'from pandas import Series, DataFrame\n'), ((1591, 1618), 'pandas.Series', 'Series', (['sdata'], {'index': 'states'}), '(sdata, index=states)\n', (1597, 1618), False, 'from pandas import Series, DataFrame\n'), ((1885, 1900), 'pandas.isnull', 'pd.isnull', (['obj4'], {}), '(obj4)\n', (1894, 1900), True, 'import pandas as pd\n'), ((1901, 1917), 'pandas.notnull', 'pd.notnull', (['obj4'], {}), '(obj4)\n', (1911, 1917), True, 'import pandas as pd\n'), ((3118, 3133), 'pandas.DataFrame', 'DataFrame', (['data'], {}), '(data)\n', (3127, 3133), False, 'from pandas import Series, DataFrame\n'), ((3282, 3331), 'pandas.DataFrame', 'DataFrame', (['data'], {'columns': "['year', 'state', 'pop']"}), "(data, columns=['year', 'state', 'pop'])\n", (3291, 3331), False, 'from pandas import Series, DataFrame\n'), ((3397, 3505), 'pandas.DataFrame', 'DataFrame', (['data'], {'columns': "['year', 'state', 'pop', 'debt']", 'index': "['one', 'two', 'three', 'four', 'five']"}), "(data, columns=['year', 'state', 'pop', 'debt'], index=['one',\n 'two', 'three', 'four', 'five'])\n", (3406, 3505), False, 'from pandas import Series, DataFrame\n'), ((4035, 4049), 'numpy.arange', 'np.arange', (['(5.0)'], {}), '(5.0)\n', (4044, 4049), True, 'import numpy as np\n'), ((4198, 4255), 'pandas.Series', 'Series', (['[-1.2, -1.5, -1.7]'], {'index': "['two', 'four', 'five']"}), "([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])\n", (4204, 4255), False, 'from pandas import Series, DataFrame\n'), ((4832, 4846), 'pandas.DataFrame', 'DataFrame', (['pop'], {}), '(pop)\n', (4841, 4846), False, 'from pandas import Series, DataFrame\n'), ((5034, 5074), 'pandas.DataFrame', 'DataFrame', (['pop'], {'index': '[2001, 2002, 2003]'}), '(pop, index=[2001, 2002, 2003])\n', (5043, 5074), False, 'from pandas import Series, DataFrame\n'), ((5196, 5212), 'pandas.DataFrame', 'DataFrame', (['pdata'], {}), '(pdata)\n', (5205, 5212), False, 'from pandas import Series, DataFrame\n'), ((5921, 5956), 'pandas.Series', 'Series', (['[1.5, -2.5, 0]'], {'index': 'index'}), '([1.5, -2.5, 0], index=index)\n', (5927, 5956), False, 'from pandas import Series, DataFrame\n'), ((6598, 6655), 'pandas.Series', 'Series', (['[4.5, 7.2, -5.3, 3.6]'], {'index': "['d', 'b', 'a', 'c']"}), "([4.5, 7.2, -5.3, 3.6], index=['d', 'b', 'a', 'c'])\n", (6604, 6655), False, 'from pandas import Series, DataFrame\n'), ((7012, 7065), 'pandas.Series', 'Series', (["['blue', 'purple', 'yellow']"], {'index': '[0, 2, 4]'}), "(['blue', 'purple', 'yellow'], index=[0, 2, 4])\n", (7018, 7065), False, 'from pandas import Series, DataFrame\n'), ((10260, 10317), 'pandas.Series', 'Series', (['[7.3, -2.5, 3.4, 1.5]'], {'index': "['a', 'c', 'd', 'e']"}), "([7.3, -2.5, 3.4, 1.5], index=['a', 'c', 'd', 'e'])\n", (10266, 10317), False, 'from pandas import Series, DataFrame\n'), ((10323, 10389), 'pandas.Series', 'Series', (['[-2.1, 3.6, -1.5, 4, 3.1]'], {'index': "['a', 'c', 'e', 'f', 'g']"}), "([-2.1, 3.6, -1.5, 4, 3.1], index=['a', 'c', 'e', 'f', 'g'])\n", (10329, 10389), False, 'from pandas import Series, DataFrame\n'), ((12491, 12504), 'numpy.abs', 'np.abs', (['frame'], {}), '(frame)\n', (12497, 12504), True, 'import numpy as np\n'), ((13805, 13826), 'pandas.Series', 'Series', (['[4, 7, -3, 2]'], {}), '([4, 7, -3, 2])\n', (13811, 13826), False, 'from pandas import Series, DataFrame\n'), ((13909, 13946), 'pandas.Series', 'Series', (['[4, np.nan, 7, np.nan, -3, 2]'], {}), '([4, np.nan, 7, np.nan, -3, 2])\n', (13915, 13946), False, 'from pandas import Series, DataFrame\n'), ((14068, 14118), 'pandas.DataFrame', 'DataFrame', (["{'b': [4, 7, -3, 2], 'a': [0, 1, 0, 1]}"], {}), "({'b': [4, 7, -3, 2], 'a': [0, 1, 0, 1]})\n", (14077, 14118), False, 'from pandas import Series, DataFrame\n'), ((14443, 14473), 'pandas.Series', 'Series', (['[7, -5, 7, 4, 2, 0, 4]'], {}), '([7, -5, 7, 4, 2, 0, 4])\n', (14449, 14473), False, 'from pandas import Series, DataFrame\n'), ((14687, 14762), 'pandas.DataFrame', 'DataFrame', (["{'b': [4.3, 7, -3, 2], 'a': [0, 1, 0, 1], 'c': [-2, 5, 8, -2.5]}"], {}), "({'b': [4.3, 7, -3, 2], 'a': [0, 1, 0, 1], 'c': [-2, 5, 8, -2.5]})\n", (14696, 14762), False, 'from pandas import Series, DataFrame\n'), ((5900, 5912), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (5909, 5912), True, 'import numpy as np\n'), ((8196, 8210), 'numpy.arange', 'np.arange', (['(5.0)'], {}), '(5.0)\n', (8205, 8210), True, 'import numpy as np\n'), ((8787, 8801), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (8796, 8801), True, 'import numpy as np\n'), ((12398, 12419), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)'], {}), '(4, 3)\n', (12413, 12419), True, 'import numpy as np\n'), ((11508, 11523), 'numpy.arange', 'np.arange', (['(12.0)'], {}), '(12.0)\n', (11517, 11523), True, 'import numpy as np\n'), ((7414, 7426), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (7423, 7426), True, 'import numpy as np\n'), ((8365, 8378), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (8374, 8378), True, 'import numpy as np\n'), ((9139, 9152), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (9148, 9152), True, 'import numpy as np\n'), ((10511, 10525), 'numpy.arange', 'np.arange', (['(9.0)'], {}), '(9.0)\n', (10520, 10525), True, 'import numpy as np\n'), ((10616, 10631), 'numpy.arange', 'np.arange', (['(12.0)'], {}), '(12.0)\n', (10625, 10631), True, 'import numpy as np\n'), ((10958, 10973), 'numpy.arange', 'np.arange', (['(12.0)'], {}), '(12.0)\n', (10967, 10973), True, 'import numpy as np\n'), ((11028, 11043), 'numpy.arange', 'np.arange', (['(20.0)'], {}), '(20.0)\n', (11037, 11043), True, 'import numpy as np\n'), ((11678, 11693), 'numpy.arange', 'np.arange', (['(12.0)'], {}), '(12.0)\n', (11687, 11693), True, 'import numpy as np\n'), ((13480, 13492), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (13489, 13492), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
x=np.array(["Anmol","Sushant","Arardhya","Aniket","Deepak"])
y=np.array([70,56,83,54,75])
'''plt.plot(x,np.sin(x),"y*")
plt.plot(x,np.cos(x),"b^")
plt.plot(x,np.tan(x),"r--")
plt.plot(x,x*2,"b")
plt.title("Graph")
plt.xlabel("x")
plt.ylabel("y")'''
z=np.array([40,85,62,45,87])
l=np.array([87,89,30,45,75])
m=np.array([52,56,72,98,20])
f,n=plt.subplots(2,2)
n[0][0].plot(x,y,"y--")
n[0][0].set_title("Maths")
n[0][1].plot(x,z,"b^")
n[0][1].set_title("English")
n[1][0].plot(x,l,"r*")
n[1][0].set_title("CSE")
n[1][1].plot(x,m,"g")
n[1][1].set_title("Science")
plt.show()
|
[
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((53, 115), 'numpy.array', 'np.array', (["['Anmol', 'Sushant', 'Arardhya', 'Aniket', 'Deepak']"], {}), "(['Anmol', 'Sushant', 'Arardhya', 'Aniket', 'Deepak'])\n", (61, 115), True, 'import numpy as np\n'), ((114, 144), 'numpy.array', 'np.array', (['[70, 56, 83, 54, 75]'], {}), '([70, 56, 83, 54, 75])\n', (122, 144), True, 'import numpy as np\n'), ((302, 332), 'numpy.array', 'np.array', (['[40, 85, 62, 45, 87]'], {}), '([40, 85, 62, 45, 87])\n', (310, 332), True, 'import numpy as np\n'), ((331, 361), 'numpy.array', 'np.array', (['[87, 89, 30, 45, 75]'], {}), '([87, 89, 30, 45, 75])\n', (339, 361), True, 'import numpy as np\n'), ((360, 390), 'numpy.array', 'np.array', (['[52, 56, 72, 98, 20]'], {}), '([52, 56, 72, 98, 20])\n', (368, 390), True, 'import numpy as np\n'), ((391, 409), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (403, 409), True, 'import matplotlib.pyplot as plt\n'), ((611, 621), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (619, 621), True, 'import matplotlib.pyplot as plt\n')]
|
import colorsys
import copy
import os
import numpy as np
from PIL import Image
import cv2
from BASNET import ModelBASNet
# --------------------------------------------#
# 使用自己训练好的模型预测需要修改2个参数
# model_path和num_classes都需要修改!
# 如果出现shape不匹配
# 一定要注意训练时的model_path和num_classes数的修改
# --------------------------------------------#
class BASNet(object):
_defaults = {
"model_path": 'Logs/ep014-loss0.678-val_loss0.562.h5',
"model_image_size": (256, 256, 3),
"num_classes": 2,
# --------------------------------#
# blend参数用于控制是否
# 让识别结果和原图混合
# --------------------------------#
"blend": True,
}
# ---------------------------------------------------#
# 初始化UNET
# ---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.generate()
# ---------------------------------------------------#
# 载入模型
# ---------------------------------------------------#
def generate(self):
# -------------------------------#
# 载入模型与权值
# -------------------------------#
self.model = ModelBASNet(self.model_image_size)
self.model.load_weights(self.model_path)
print('{} model loaded.'.format(self.model_path))
if self.num_classes == 2:
self.colors = [(255, 255, 255), (0, 0, 0)]
elif self.num_classes <= 21:
self.colors = [(0, 0, 0), (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),
(0, 128, 128),
(128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0), (192, 128, 0), (64, 0, 128),
(192, 0, 128),
(64, 128, 128), (192, 128, 128), (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0),
(0, 64, 128), (128, 64, 12)]
else:
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
def letterbox_image(self, image, size):
image = image.convert("RGB")
iw, ih = image.size
w, h = size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128, 128, 128))
new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
return new_image, nw, nh
# ---------------------------------------------------#
# 检测图片
# ---------------------------------------------------#
def detect_image(self, image):
# ---------------------------------------------------#
# 对输入图像进行一个备份,后面用于绘图
# ---------------------------------------------------#
old_img = copy.deepcopy(image)
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]
# ---------------------------------------------------#
# 进行不失真的resize,添加灰条,进行图像归一化
# ---------------------------------------------------#
img, nw, nh = self.letterbox_image(image, (self.model_image_size[1], self.model_image_size[0]))
img = np.asarray([np.array(img) / 255])
# ---------------------------------------------------#
# 图片传入网络进行预测 得到256,256,8 0-1
# ---------------------------------------------------#
pr = self.model.predict(img)[0]
pr = pr[...,0].reshape([self.model_image_size[0], self.model_image_size[1]]) #0-1
#反归一化
#256,256
pr = (pr - pr.min())/(pr.max() - pr.min()) * 255.
# --------------------------------------#
# 将灰条部分截取掉
# --------------------------------------#
pr = pr[int((self.model_image_size[0] - nh) // 2):int((self.model_image_size[0] - nh) // 2 + nh),
int((self.model_image_size[1] - nw) // 2):int((self.model_image_size[1] - nw) // 2 + nw)]
# ------------------------------------------------#
# 扩充成(,,3)
# ------------------------------------------------#
seg_img = np.expand_dims(pr,axis=2).repeat(repeats=3,axis=2)
print(seg_img.shape)
# ------------------------------------------------#
# 将新图片转换成Image的形式
# ------------------------------------------------#
image = Image.fromarray(np.uint8(seg_img)).resize((orininal_w, orininal_h), Image.NEAREST)
# ------------------------------------------------#
# 将新图片和原图片混合
# ------------------------------------------------#
if self.blend:
image = Image.blend(old_img, image, 0.7)
return image
basNet = BASNet()
path = "./Data/test/test_image/"
filepng = os.listdir(path)
for png in filepng:
image = Image.open(path + png)
r_image = basNet.detect_image(image)
r_image.save("./Data/predict/" + png)
|
[
"PIL.Image.new",
"copy.deepcopy",
"numpy.uint8",
"colorsys.hsv_to_rgb",
"numpy.expand_dims",
"PIL.Image.open",
"numpy.array",
"BASNET.ModelBASNet",
"PIL.Image.blend",
"os.listdir"
] |
[((5045, 5061), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5055, 5061), False, 'import os\n'), ((5094, 5116), 'PIL.Image.open', 'Image.open', (['(path + png)'], {}), '(path + png)\n', (5104, 5116), False, 'from PIL import Image\n'), ((1192, 1226), 'BASNET.ModelBASNet', 'ModelBASNet', (['self.model_image_size'], {}), '(self.model_image_size)\n', (1203, 1226), False, 'from BASNET import ModelBASNet\n'), ((2631, 2670), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', '(128, 128, 128)'], {}), "('RGB', size, (128, 128, 128))\n", (2640, 2670), False, 'from PIL import Image\n'), ((3109, 3129), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (3122, 3129), False, 'import copy\n'), ((4926, 4958), 'PIL.Image.blend', 'Image.blend', (['old_img', 'image', '(0.7)'], {}), '(old_img, image, 0.7)\n', (4937, 4958), False, 'from PIL import Image\n'), ((3151, 3166), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3159, 3166), True, 'import numpy as np\n'), ((3197, 3212), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3205, 3212), True, 'import numpy as np\n'), ((4413, 4439), 'numpy.expand_dims', 'np.expand_dims', (['pr'], {'axis': '(2)'}), '(pr, axis=2)\n', (4427, 4439), True, 'import numpy as np\n'), ((3517, 3530), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3525, 3530), True, 'import numpy as np\n'), ((4673, 4690), 'numpy.uint8', 'np.uint8', (['seg_img'], {}), '(seg_img)\n', (4681, 4690), True, 'import numpy as np\n'), ((2145, 2168), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (2164, 2168), False, 'import colorsys\n')]
|
# ########################################################## ##
# FlatCAM: 2D Post-processing for Manufacturing #
# http://flatcam.org #
# Author: <NAME> (c) #
# Date: 2/5/2014 #
# MIT Licence #
# ########################################################## ##
from camlib import Geometry, grace
import shapely.affinity as affinity
from shapely.geometry import Point, LineString
import numpy as np
import re
import logging
import traceback
from copy import deepcopy
# import AppTranslation as fcTranslate
import gettext
import builtins
if '_' not in builtins.__dict__:
_ = gettext.gettext
log = logging.getLogger('base')
class Excellon(Geometry):
"""
Here it is done all the Excellon parsing.
*ATTRIBUTES*
* ``tools`` (dict): The key is the tool name and the value is
a dictionary specifying the tool:
================ ====================================
Key Value
================ ====================================
tooldia Diameter of the tool
drills List that store the Shapely Points for drill points
slots List that store the Shapely Points for slots. Each is a tuple: (start_point, stop_point)
data dictionary which holds the options for each tool
solid_geometry Geometry list for each tool
================ ====================================
"""
defaults = {
"zeros": "L",
"excellon_format_upper_mm": '3',
"excellon_format_lower_mm": '3',
"excellon_format_upper_in": '2',
"excellon_format_lower_in": '4',
"excellon_units": 'INCH',
"geo_steps_per_circle": '64'
}
def __init__(self, zeros=None, excellon_format_upper_mm=None, excellon_format_lower_mm=None,
excellon_format_upper_in=None, excellon_format_lower_in=None, excellon_units=None,
geo_steps_per_circle=None):
"""
The constructor takes no parameters.
:return: Excellon object.
:rtype: Excellon
"""
self.decimals = self.app.decimals
if geo_steps_per_circle is None:
geo_steps_per_circle = int(Excellon.defaults['geo_steps_per_circle'])
self.geo_steps_per_circle = int(geo_steps_per_circle)
Geometry.__init__(self, geo_steps_per_circle=int(geo_steps_per_circle))
# dictionary to store tools, see above for description
self.tools = {}
self.source_file = ''
# it serve to flag if a start routing or a stop routing was encountered
# if a stop is encounter and this flag is still 0 (so there is no stop for a previous start) issue error
self.routing_flag = 1
self.match_routing_start = None
self.match_routing_stop = None
# ## IN|MM -> Units are inherited from Geometry
self.units = self.app.defaults['units']
self.units_found = self.app.defaults['units']
# Trailing "T" or leading "L" (default)
# self.zeros = "T"
self.zeros = zeros or self.defaults["zeros"]
self.zeros_found = deepcopy(self.zeros)
# this will serve as a default if the Excellon file has no info regarding of tool diameters (this info may be
# in another file like for PCB WIzard ECAD software
self.toolless_diam = 1.0
# signal that the Excellon file has no tool diameter informations and the tools have bogus (random) diameter
self.diameterless = False
# Excellon format
self.excellon_format_upper_in = excellon_format_upper_in or self.defaults["excellon_format_upper_in"]
self.excellon_format_lower_in = excellon_format_lower_in or self.defaults["excellon_format_lower_in"]
self.excellon_format_upper_mm = excellon_format_upper_mm or self.defaults["excellon_format_upper_mm"]
self.excellon_format_lower_mm = excellon_format_lower_mm or self.defaults["excellon_format_lower_mm"]
self.excellon_units = excellon_units or self.defaults["excellon_units"]
self.excellon_units_found = None
# detected Excellon format is stored here:
self.excellon_format = None
# Attributes to be included in serialization
# Always append to it because it carries contents
# from Geometry.
self.ser_attrs += ['zeros', 'excellon_format_upper_mm', 'excellon_format_lower_mm',
'excellon_format_upper_in', 'excellon_format_lower_in', 'excellon_units', 'source_file']
# ### Patterns ####
# Regex basics:
# ^ - beginning
# $ - end
# *: 0 or more, +: 1 or more, ?: 0 or 1
# M48 - Beginning of Part Program Header
self.hbegin_re = re.compile(r'^M48$')
# ;HEADER - Beginning of Allegro Program Header
self.allegro_hbegin_re = re.compile(r'\;\s*(HEADER)')
# M95 or % - End of Part Program Header
# NOTE: % has different meaning in the body
self.hend_re = re.compile(r'^(?:M95|%)$')
# FMAT Excellon format
# Ignored in the parser
# self.fmat_re = re.compile(r'^FMAT,([12])$')
# Uunits and possible Excellon zeros and possible Excellon format
# INCH uses 6 digits
# METRIC uses 5/6
self.units_re = re.compile(r'^(INCH|METRIC)(?:,([TL])Z)?,?(\d*\.\d+)?.*$')
# Tool definition/parameters (?= is look-ahead
# NOTE: This might be an overkill!
# self.toolset_re = re.compile(r'^T(0?\d|\d\d)(?=.*C(\d*\.?\d*))?' +
# r'(?=.*F(\d*\.?\d*))?(?=.*S(\d*\.?\d*))?' +
# r'(?=.*B(\d*\.?\d*))?(?=.*H(\d*\.?\d*))?' +
# r'(?=.*Z([-\+]?\d*\.?\d*))?[CFSBHT]')
self.toolset_re = re.compile(r'^T(\d+)(?=.*C,?(\d*\.?\d*))?' +
r'(?=.*F(\d*\.?\d*))?(?=.*S(\d*\.?\d*))?' +
r'(?=.*B(\d*\.?\d*))?(?=.*H(\d*\.?\d*))?' +
r'(?=.*Z([-\+]?\d*\.?\d*))?[CFSBHT]')
self.detect_gcode_re = re.compile(r'^G2([01])$')
# Tool select
# Can have additional data after tool number but
# is ignored if present in the header.
# Warning: This will match toolset_re too.
# self.toolsel_re = re.compile(r'^T((?:\d\d)|(?:\d))')
self.toolsel_re = re.compile(r'^T(\d+)')
# Headerless toolset
# self.toolset_hl_re = re.compile(r'^T(\d+)(?=.*C(\d*\.?\d*))')
self.toolset_hl_re = re.compile(r'^T(\d+)(?:.?C(\d+\.?\d*))?')
# Comment
self.comm_re = re.compile(r'^;(.*)$')
# Absolute/Incremental G90/G91
self.absinc_re = re.compile(r'^G9([01])$')
# Modes of operation
# 1-linear, 2-circCW, 3-cirCCW, 4-vardwell, 5-Drill
self.modes_re = re.compile(r'^G0([012345])')
# Measuring mode
# 1-metric, 2-inch
self.meas_re = re.compile(r'^M7([12])$')
# Coordinates
# self.xcoord_re = re.compile(r'^X(\d*\.?\d*)(?:Y\d*\.?\d*)?$')
# self.ycoord_re = re.compile(r'^(?:X\d*\.?\d*)?Y(\d*\.?\d*)$')
coordsperiod_re_string = r'(?=.*X([-\+]?\d*\.\d*))?(?=.*Y([-\+]?\d*\.\d*))?[XY]'
self.coordsperiod_re = re.compile(coordsperiod_re_string)
coordsnoperiod_re_string = r'(?!.*\.)(?=.*X([-\+]?\d*))?(?=.*Y([-\+]?\d*))?[XY]'
self.coordsnoperiod_re = re.compile(coordsnoperiod_re_string)
# Slots parsing
slots_re_string = r'^([^G]+)G85(.*)$'
self.slots_re = re.compile(slots_re_string)
# R - Repeat hole (# times, X offset, Y offset)
self.rep_re = re.compile(r'^R(\d+)(?=.*[XY])+(?:X([-\+]?\d*\.?\d*))?(?:Y([-\+]?\d*\.?\d*))?$')
# Various stop/pause commands
self.stop_re = re.compile(r'^((G04)|(M09)|(M06)|(M00)|(M30))')
# Allegro Excellon format support
self.tool_units_re = re.compile(r'(\;\s*Holesize \d+.\s*\=\s*(\d+.\d+).*(MILS|MM))')
# Altium Excellon format support
# it's a comment like this: ";FILE_FORMAT=2:5"
self.altium_format = re.compile(r'^;\s*(?:FILE_FORMAT)?(?:Format)?[=|:]\s*(\d+)[:|.](\d+).*$')
# Parse coordinates
self.leadingzeros_re = re.compile(r'^[-\+]?(0*)(\d*)')
# Repeating command
self.repeat_re = re.compile(r'R(\d+)')
def parse_file(self, filename=None, file_obj=None):
"""
Reads the specified file as array of lines as passes it to ``parse_lines()``.
:param filename: The file to be read and parsed.
:param file_obj:
:type filename: str
:return: None
"""
if file_obj:
estr = file_obj
else:
if filename is None:
return "fail"
efile = open(filename, 'r')
estr = efile.readlines()
efile.close()
try:
self.parse_lines(estr)
except Exception:
return "fail"
def parse_lines(self, elines):
"""
Main Excellon parser.
:param elines: List of strings, each being a line of Excellon code.
:type elines: list
:return: None
"""
# State variables
current_tool = ""
in_header = False
headerless = False
current_x = None
current_y = None
slot_current_x = None
slot_current_y = None
name_tool = 0
allegro_warning = False
line_units_found = False
repeating_x = 0
repeating_y = 0
repeat = 0
line_units = ''
# ## Parsing starts here ## ##
line_num = 0 # Line number
eline = ""
try:
for eline in elines:
if self.app.abort_flag:
# graceful abort requested by the user
raise grace
line_num += 1
# log.debug("%3d %s" % (line_num, str(eline)))
self.source_file += eline
# Cleanup lines
eline = eline.strip(' \r\n')
# Excellon files and Gcode share some extensions therefore if we detect G20 or G21 it's GCODe
# and we need to exit from here
if self.detect_gcode_re.search(eline):
log.warning("This is GCODE mark: %s" % eline)
self.app.inform.emit('[ERROR_NOTCL] %s: %s' % (_('This is GCODE mark'), eline))
return
# Header Begin (M48) #
if self.hbegin_re.search(eline):
in_header = True
headerless = False
log.warning("Found start of the header: %s" % eline)
continue
# Allegro Header Begin (;HEADER) #
if self.allegro_hbegin_re.search(eline):
in_header = True
allegro_warning = True
log.warning("Found ALLEGRO start of the header: %s" % eline)
continue
# Search for Header End #
# Since there might be comments in the header that include header end char (% or M95)
# we ignore the lines starting with ';' that contains such header end chars because it is not a
# real header end.
if self.comm_re.search(eline):
match = self.tool_units_re.search(eline)
if match:
if line_units_found is False:
line_units_found = True
line_units = match.group(3)
self.convert_units({"MILS": "IN", "MM": "MM"}[line_units])
log.warning("Type of Allegro UNITS found inline in comments: %s" % line_units)
if match.group(2):
name_tool += 1
# ---------- add a TOOL ------------ #
if name_tool not in self.tools:
self.tools[name_tool] = {}
if line_units == 'MILS':
spec = {
'tooldia': (float(match.group(2)) / 1000)
}
self.tools[name_tool]['tooldia'] = (float(match.group(2)) / 1000)
log.debug("Tool definition: %d %s" % (name_tool, spec))
else:
spec = {
'tooldia': float(match.group(2))
}
self.tools[name_tool]['tooldia'] = float(match.group(2))
log.debug("Tool definition: %d %s" % (name_tool, spec))
spec['solid_geometry'] = []
continue
# search for Altium Excellon Format / Sprint Layout who is included as a comment
match = self.altium_format.search(eline)
if match:
self.excellon_format_upper_mm = match.group(1)
self.excellon_format_lower_mm = match.group(2)
self.excellon_format_upper_in = match.group(1)
self.excellon_format_lower_in = match.group(2)
log.warning("Excellon format preset found in comments: %s:%s" %
(match.group(1), match.group(2)))
continue
else:
log.warning("Line ignored, it's a comment: %s" % eline)
else:
if self.hend_re.search(eline):
if in_header is False or bool(self.tools) is False:
log.warning("Found end of the header but there is no header: %s" % eline)
log.warning("The only useful data in header are tools, units and format.")
log.warning("Therefore we will create units and format based on defaults.")
headerless = True
try:
self.convert_units({"INCH": "IN", "METRIC": "MM"}[self.excellon_units])
except Exception as e:
log.warning("Units could not be converted: %s" % str(e))
in_header = False
# for Allegro type of Excellons we reset name_tool variable so we can reuse it for toolchange
if allegro_warning is True:
name_tool = 0
log.warning("Found end of the header: %s" % eline)
'''
In case that the units were not found in the header, we have two choices:
- one is to use the default value in the App Preferences
- the other is to make an evaluation based on a threshold
we process here the self.tools list and make a list with tools with diameter less or equal
with 0.1 and a list with tools with value greater than 0.1, 0.1 being the threshold value.
Most tools in Excellon are greater than 0.1mm therefore if most of the tools are under this
value it is safe to assume that the units are in INCH
'''
greater_tools = set()
lower_tools = set()
if not self.excellon_units_found and self.tools:
for tool in self.tools:
tool_dia = float(self.tools[tool]['tooldia'])
lower_tools.add(tool_dia) if tool_dia <= 0.1 else greater_tools.add(tool_dia)
assumed_units = "IN" if len(lower_tools) > len(greater_tools) else "MM"
self.units = assumed_units
continue
# ## Alternative units format M71/M72
# Supposed to be just in the body (yes, the body)
# but some put it in the header (PADS for example).
# Will detect anywhere. Occurrence will change the
# object's units.
match = self.meas_re.match(eline)
if match:
self.units = {"1": "MM", "2": "IN"}[match.group(1)]
# Modified for issue #80
log.debug("ALternative M71/M72 units found, before conversion: %s" % self.units)
self.convert_units(self.units)
log.debug("ALternative M71/M72 units found, after conversion: %s" % self.units)
if self.units == 'MM':
log.warning("Excellon format preset is: %s:%s" %
(str(self.excellon_format_upper_mm), str(self.excellon_format_lower_mm)))
else:
log.warning("Excellon format preset is: %s:%s" %
(str(self.excellon_format_upper_in), str(self.excellon_format_lower_in)))
continue
# ### Body ####
if not in_header:
# ## Tool change ###
match = self.toolsel_re.search(eline)
if match:
current_tool = int(match.group(1))
log.debug("Tool change: %s" % current_tool)
if bool(headerless):
match = self.toolset_hl_re.search(eline)
if match:
name = int(match.group(1))
try:
diam = float(match.group(2))
except Exception:
# it's possible that tool definition has only tool number and no diameter info
# (those could be in another file like PCB Wizard do)
# then match.group(2) = None and float(None) will create the exception
# the below construction is so each tool will have a slightly different diameter
# starting with a default value, to allow Excellon editing after that
self.diameterless = True
self.app.inform.emit('[WARNING] %s%s %s' %
(_("No tool diameter info's. See shell.\n"
"A tool change event: T"),
str(current_tool),
_("was found but the Excellon file "
"have no informations regarding the tool "
"diameters therefore the application will try to load it "
"by using some 'fake' diameters.\n"
"The user needs to edit the resulting Excellon object and "
"change the diameters to reflect the real diameters.")
)
)
if self.excellon_units == 'MM':
diam = self.toolless_diam + (int(current_tool) - 1) / 100
else:
diam = (self.toolless_diam + (int(current_tool) - 1) / 100) / 25.4
# ---------- add a TOOL ------------ #
spec = {"tooldia": diam, 'solid_geometry': []}
if name not in self.tools:
self.tools[name] = {}
self.tools[name]['tooldia'] = diam
self.tools[name]['solid_geometry'] = []
log.debug("Tool definition out of header: %s %s" % (name, spec))
continue
# ## Allegro Type Tool change ###
if allegro_warning is True:
match = self.absinc_re.search(eline)
match1 = self.stop_re.search(eline)
if match or match1:
name_tool += 1
current_tool = name_tool
log.debug("Tool change for Allegro type of Excellon: %d" % current_tool)
continue
# ## Slots parsing for drilled slots (contain G85)
# a Excellon drilled slot line may look like this:
# X01125Y0022244G85Y0027756
match = self.slots_re.search(eline)
if match:
# signal that there are milling slots operations
self.defaults['excellon_drills'] = False
# the slot start coordinates group is to the left of G85 command (group(1) )
# the slot stop coordinates group is to the right of G85 command (group(2) )
start_coords_match = match.group(1)
stop_coords_match = match.group(2)
# Slot coordinates without period # ##
# get the coordinates for slot start and for slot stop into variables
start_coords_noperiod = self.coordsnoperiod_re.search(start_coords_match)
stop_coords_noperiod = self.coordsnoperiod_re.search(stop_coords_match)
if start_coords_noperiod:
try:
slot_start_x = self.parse_number(start_coords_noperiod.group(1))
slot_current_x = slot_start_x
except TypeError:
slot_start_x = slot_current_x
except Exception:
return
try:
slot_start_y = self.parse_number(start_coords_noperiod.group(2))
slot_current_y = slot_start_y
except TypeError:
slot_start_y = slot_current_y
except Exception:
return
try:
slot_stop_x = self.parse_number(stop_coords_noperiod.group(1))
slot_current_x = slot_stop_x
except TypeError:
slot_stop_x = slot_current_x
except Exception:
return
try:
slot_stop_y = self.parse_number(stop_coords_noperiod.group(2))
slot_current_y = slot_stop_y
except TypeError:
slot_stop_y = slot_current_y
except Exception:
return
if (slot_start_x is None or slot_start_y is None or
slot_stop_x is None or slot_stop_y is None):
log.error("Slots are missing some or all coordinates.")
continue
# we have a slot
log.debug('Parsed a slot with coordinates: ' + str([slot_start_x,
slot_start_y, slot_stop_x,
slot_stop_y]))
# store current tool diameter as slot diameter
slot_dia = 0.05
try:
slot_dia = float(self.tools[current_tool]['tooldia'])
except Exception:
pass
log.debug(
'Milling/Drilling slot with tool %s, diam=%f' % (
current_tool,
slot_dia
)
)
# ---------- add a slot ------------ #
slot = (
Point(slot_start_x, slot_start_y),
Point(slot_stop_x, slot_stop_y)
)
if current_tool not in self.tools:
self.tools[current_tool] = {}
if 'slots' in self.tools[current_tool]:
self.tools[current_tool]['slots'].append(slot)
else:
self.tools[current_tool]['slots'] = [slot]
continue
# Slot coordinates with period: Use literally. ###
# get the coordinates for slot start and for slot stop into variables
start_coords_period = self.coordsperiod_re.search(start_coords_match)
stop_coords_period = self.coordsperiod_re.search(stop_coords_match)
if start_coords_period:
try:
slot_start_x = float(start_coords_period.group(1))
slot_current_x = slot_start_x
except TypeError:
slot_start_x = slot_current_x
except Exception:
return
try:
slot_start_y = float(start_coords_period.group(2))
slot_current_y = slot_start_y
except TypeError:
slot_start_y = slot_current_y
except Exception:
return
try:
slot_stop_x = float(stop_coords_period.group(1))
slot_current_x = slot_stop_x
except TypeError:
slot_stop_x = slot_current_x
except Exception:
return
try:
slot_stop_y = float(stop_coords_period.group(2))
slot_current_y = slot_stop_y
except TypeError:
slot_stop_y = slot_current_y
except Exception:
return
if (slot_start_x is None or slot_start_y is None or
slot_stop_x is None or slot_stop_y is None):
log.error("Slots are missing some or all coordinates.")
continue
# we have a slot
log.debug('Parsed a slot with coordinates: ' + str([slot_start_x,
slot_start_y, slot_stop_x,
slot_stop_y]))
# store current tool diameter as slot diameter
slot_dia = 0.05
try:
slot_dia = float(self.tools[current_tool]['tooldia'])
except Exception:
pass
log.debug(
'Milling/Drilling slot with tool %s, diam=%f' % (
current_tool,
slot_dia
)
)
# ---------- add a Slot ------------ #
slot = (
Point(slot_start_x, slot_start_y),
Point(slot_stop_x, slot_stop_y)
)
if current_tool not in self.tools:
self.tools[current_tool] = {}
if 'slots' in self.tools[current_tool]:
self.tools[current_tool]['slots'].append(slot)
else:
self.tools[current_tool]['slots'] = [slot]
continue
# ## Coordinates without period # ##
match = self.coordsnoperiod_re.search(eline)
if match:
matchr = self.repeat_re.search(eline)
if matchr: # if we have a repeat command
repeat = int(matchr.group(1))
if match.group(1):
repeating_x = self.parse_number(match.group(1))
else:
repeating_x = 0
if match.group(2):
repeating_y = self.parse_number(match.group(2))
else:
repeating_y = 0
coordx = current_x
coordy = current_y
while repeat > 0:
if repeating_x:
coordx += repeating_x
if repeating_y:
coordy += repeating_y
# ---------- add a Drill ------------ #
if current_tool not in self.tools:
self.tools[current_tool] = {}
if 'drills' in self.tools[current_tool]:
self.tools[current_tool]['drills'].append(Point((coordx, coordy)))
else:
self.tools[current_tool]['drills'] = [Point((coordx, coordy))]
repeat -= 1
current_x = coordx
current_y = coordy
continue
else: # those are normal coordinates
try:
x = self.parse_number(match.group(1))
current_x = x
except TypeError:
x = current_x
except Exception:
return
try:
y = self.parse_number(match.group(2))
current_y = y
except TypeError:
y = current_y
except Exception:
return
if x is None or y is None:
log.error("Missing coordinates")
continue
# ## Excellon Routing parse
if len(re.findall("G00", eline)) > 0:
self.match_routing_start = 'G00'
# signal that there are milling slots operations
self.defaults['excellon_drills'] = False
self.routing_flag = 0
slot_start_x = x
slot_start_y = y
continue
if self.routing_flag == 0:
if len(re.findall("G01", eline)) > 0:
self.match_routing_stop = 'G01'
# signal that there are milling slots operations
self.defaults['excellon_drills'] = False
self.routing_flag = 1
slot_stop_x = x
slot_stop_y = y
# ---------- add a Slot ------------ #
slot = (
Point(slot_start_x, slot_start_y),
Point(slot_stop_x, slot_stop_y)
)
if current_tool not in self.tools:
self.tools[current_tool] = {}
if 'slots' in self.tools[current_tool]:
self.tools[current_tool]['slots'].append(slot)
else:
self.tools[current_tool]['slots'] = [slot]
continue
if self.match_routing_start is None and self.match_routing_stop is None:
# signal that there are drill operations
self.defaults['excellon_drills'] = True
# ---------- add a Drill ------------ #
if current_tool not in self.tools:
self.tools[current_tool] = {}
if 'drills' in self.tools[current_tool]:
self.tools[current_tool]['drills'].append(Point((x, y)))
else:
self.tools[current_tool]['drills'] = [Point((x, y))]
# log.debug("{:15} {:8} {:8}".format(eline, x, y))
continue
# ## Coordinates with period: Use literally. # ##
match = self.coordsperiod_re.search(eline)
if match:
matchr = self.repeat_re.search(eline)
if matchr:
repeat = int(matchr.group(1))
if match:
# signal that there are drill operations
self.defaults['excellon_drills'] = True
try:
x = float(match.group(1))
repeating_x = current_x
current_x = x
except TypeError:
x = current_x
repeating_x = 0
try:
y = float(match.group(2))
repeating_y = current_y
current_y = y
except TypeError:
y = current_y
repeating_y = 0
if x is None or y is None:
log.error("Missing coordinates")
continue
# ## Excellon Routing parse
if len(re.findall("G00", eline)) > 0:
self.match_routing_start = 'G00'
# signal that there are milling slots operations
self.defaults['excellon_drills'] = False
self.routing_flag = 0
slot_start_x = x
slot_start_y = y
continue
if self.routing_flag == 0:
if len(re.findall("G01", eline)) > 0:
self.match_routing_stop = 'G01'
# signal that there are milling slots operations
self.defaults['excellon_drills'] = False
self.routing_flag = 1
slot_stop_x = x
slot_stop_y = y
# ---------- add a Slot ------------ #
slot = (
Point(slot_start_x, slot_start_y),
Point(slot_stop_x, slot_stop_y)
)
if current_tool not in self.tools:
self.tools[current_tool] = {}
if 'slots' in self.tools[current_tool]:
self.tools[current_tool]['slots'].append(slot)
else:
self.tools[current_tool]['slots'] = [slot]
continue
if self.match_routing_start is None and self.match_routing_stop is None:
# signal that there are drill operations
if repeat == 0:
# signal that there are drill operations
self.defaults['excellon_drills'] = True
# ---------- add a Drill ------------ #
if current_tool not in self.tools:
self.tools[current_tool] = {}
if 'drills' in self.tools[current_tool]:
self.tools[current_tool]['drills'].append(Point((x, y)))
else:
self.tools[current_tool]['drills'] = [Point((x, y))]
else:
coordx = x
coordy = y
while repeat > 0:
if repeating_x:
coordx = (repeat * x) + repeating_x
if repeating_y:
coordy = (repeat * y) + repeating_y
# ---------- add a Drill ------------ #
if current_tool not in self.tools:
self.tools[current_tool] = {}
if 'drills' in self.tools[current_tool]:
self.tools[current_tool]['drills'].append(Point((coordx, coordy)))
else:
self.tools[current_tool]['drills'] = [Point((coordx, coordy))]
repeat -= 1
repeating_x = repeating_y = 0
# log.debug("{:15} {:8} {:8}".format(eline, x, y))
continue
# ### Header ####
if in_header:
# ## Tool definitions # ##
match = self.toolset_re.search(eline)
if match:
# ---------- add a TOOL ------------ #
name = int(match.group(1))
spec = {"C": float(match.group(2)), 'solid_geometry': []}
if name not in self.tools:
self.tools[name] = {}
self.tools[name]['tooldia'] = float(match.group(2))
self.tools[name]['solid_geometry'] = []
log.debug("Tool definition: %s %s" % (name, spec))
continue
# ## Units and number format # ##
match = self.units_re.match(eline)
if match:
self.units = {"METRIC": "MM", "INCH": "IN"}[match.group(1)]
self.excellon_units_found = self.units
self.zeros = match.group(2) # "T" or "L". Might be empty
self.excellon_format = match.group(3)
if self.excellon_format:
upper = len(self.excellon_format.partition('.')[0])
lower = len(self.excellon_format.partition('.')[2])
if self.units == 'MM':
self.excellon_format_upper_mm = upper
self.excellon_format_lower_mm = lower
else:
self.excellon_format_upper_in = upper
self.excellon_format_lower_in = lower
# Modified for issue #80
log.warning("UNITS found inline - Value before conversion: %s" % self.units)
self.convert_units(self.units)
log.warning("UNITS found inline - Value after conversion: %s" % self.units)
if self.units == 'MM':
log.warning("Excellon format preset is: %s:%s" %
(str(self.excellon_format_upper_mm), str(self.excellon_format_lower_mm)))
else:
log.warning("Excellon format preset is: %s:%s" %
(str(self.excellon_format_upper_in), str(self.excellon_format_lower_in)))
log.warning("Type of ZEROS found inline, in header: %s" % self.zeros)
continue
# Search for units type again it might be alone on the line
if "INCH" in eline:
line_units = "IN"
# Modified for issue #80
log.warning("Type of UNITS found inline, in header, before conversion: %s" % line_units)
self.convert_units(line_units)
log.warning("Type of UNITS found inline, in header, after conversion: %s" % self.units)
log.warning("Excellon format preset is: %s:%s" %
(str(self.excellon_format_upper_in), str(self.excellon_format_lower_in)))
self.excellon_units_found = "IN"
continue
elif "METRIC" in eline:
line_units = "MM"
# Modified for issue #80
log.warning("Type of UNITS found inline, in header, before conversion: %s" % line_units)
self.convert_units(line_units)
log.warning("Type of UNITS found inline, in header, after conversion: %s" % self.units)
log.warning("Excellon format preset is: %s:%s" %
(str(self.excellon_format_upper_mm), str(self.excellon_format_lower_mm)))
self.excellon_units_found = "MM"
continue
# Search for zeros type again because it might be alone on the line
match = re.search(r'[LT]Z', eline)
if match:
self.zeros = match.group()
log.warning("Type of ZEROS found: %s" % self.zeros)
continue
# ## Units and number format outside header# ##
match = self.units_re.match(eline)
if match:
self.units = {"METRIC": "MM", "INCH": "IN"}[match.group(1)]
self.excellon_units_found = self.units
self.zeros = match.group(2) # "T" or "L". Might be empty
self.excellon_format = match.group(3)
if self.excellon_format:
upper = len(self.excellon_format.partition('.')[0])
lower = len(self.excellon_format.partition('.')[2])
if self.units == 'MM':
self.excellon_format_upper_mm = upper
self.excellon_format_lower_mm = lower
else:
self.excellon_format_upper_in = upper
self.excellon_format_lower_in = lower
# Modified for issue #80
log.warning("Type of UNITS found outside header, inline before conversion: %s" % self.units)
self.convert_units(self.units)
log.warning("Type of UNITS found outside header, inline after conversion: %s" % self.units)
if self.units == 'MM':
log.warning("Excellon format preset is: %s:%s" %
(str(self.excellon_format_upper_mm), str(self.excellon_format_lower_mm)))
else:
log.warning("Excellon format preset is: %s:%s" %
(str(self.excellon_format_upper_in), str(self.excellon_format_lower_in)))
log.warning("Type of ZEROS found outside header, inline: %s" % self.zeros)
continue
log.warning("Line ignored: %s" % eline)
# make sure that since we are in headerless mode, we convert the tools only after the file parsing
# is finished since the tools definitions are spread in the Excellon body. We use as units the value
# from self.defaults['excellon_units']
# the data structure of the Excellon object has to include bot the 'drills' and the 'slots' keys otherwise
# I will need to test for them everywhere.
# Even if there are not drills or slots I just add the storage there with an empty list
for tool in self.tools:
if 'drills' not in self.tools[tool]:
self.tools[tool]['drills'] = []
if 'slots' not in self.tools[tool]:
self.tools[tool]['slots'] = []
log.info("Zeros: %s, Units %s." % (self.zeros, self.units))
except Exception:
log.error("Excellon PARSING FAILED. Line %d: %s" % (line_num, eline))
msg = '[ERROR_NOTCL] %s' % _("An internal error has occurred. See shell.\n")
msg += '{e_code} {tx} {l_nr}: {line}\n'.format(
e_code='[ERROR]',
tx=_("Excellon Parser error.\nParsing Failed. Line"),
l_nr=line_num,
line=eline)
msg += traceback.format_exc()
self.app.inform.emit(msg)
return "fail"
def parse_number(self, number_str):
"""
Parses coordinate numbers without period.
:param number_str: String representing the numerical value.
:type number_str: str
:return: Floating point representation of the number
:rtype: float
"""
match = self.leadingzeros_re.search(number_str)
nr_length = len(match.group(1)) + len(match.group(2))
try:
if self.zeros == "L" or self.zeros == "LZ": # Leading
# With leading zeros, when you type in a coordinate,
# the leading zeros must always be included. Trailing zeros
# are unneeded and may be left off. The CNC-7 will automatically add them.
# r'^[-\+]?(0*)(\d*)'
# 6 digits are divided by 10^4
# If less than size digits, they are automatically added,
# 5 digits then are divided by 10^3 and so on.
if self.units.lower() == "in":
result = float(number_str) / (10 ** (float(nr_length) - float(self.excellon_format_upper_in)))
else:
result = float(number_str) / (10 ** (float(nr_length) - float(self.excellon_format_upper_mm)))
return result
else: # Trailing
# You must show all zeros to the right of the number and can omit
# all zeros to the left of the number. The CNC-7 will count the number
# of digits you typed and automatically fill in the missing zeros.
# ## flatCAM expects 6digits
# flatCAM expects the number of digits entered into the defaults
if self.units.lower() == "in": # Inches is 00.0000
result = float(number_str) / (10 ** (float(self.excellon_format_lower_in)))
else: # Metric is 000.000
result = float(number_str) / (10 ** (float(self.excellon_format_lower_mm)))
return result
except Exception as e:
log.error("Aborted. Operation could not be completed due of %s" % str(e))
return
def create_geometry(self):
"""
Creates circles of the tool diameter at every point
specified in self.tools[tool]['drills'].
Also creates geometries (polygons)
for the slots as specified in self.tools[tool]['slots']
All the resulting geometry is stored into self.solid_geometry list.
The list self.solid_geometry has 2 elements: first is a dict with the drills geometry,
and second element is another similar dict that contain the slots geometry.
Each dict has as keys the tool diameters and as values lists with Shapely objects, the geometries
================ ====================================
Key Value
================ ====================================
tool_diameter list of (Shapely.Point) Where to drill
================ ====================================
:return: None
"""
log.debug("appParsers.ParseExcellon.Excellon.create_geometry()")
self.solid_geometry = []
try:
# clear the solid_geometry in self.tools
for tool in self.tools:
self.tools[tool]['solid_geometry'] = []
self.tools[tool]['data'] = {}
for tool in self.tools:
tooldia = self.tools[tool]['tooldia']
if 'drills' in self.tools[tool]:
for drill in self.tools[tool]['drills']:
poly = drill.buffer(tooldia / 2.0, int(int(self.geo_steps_per_circle) / 4))
# add poly in the tools geometry
self.tools[tool]['solid_geometry'].append(poly)
self.tools[tool]['data'] = deepcopy(self.default_data)
# add poly to the total solid geometry
self.solid_geometry.append(poly)
if 'slots' in self.tools[tool]:
for slot in self.tools[tool]['slots']:
start = slot[0]
stop = slot[1]
lines_string = LineString([start, stop])
poly = lines_string.buffer(tooldia / 2.0, int(int(self.geo_steps_per_circle) / 4))
# add poly in the tools geometry
self.tools[tool]['solid_geometry'].append(poly)
self.tools[tool]['data'] = deepcopy(self.default_data)
# add poly to the total solid geometry
self.solid_geometry.append(poly)
except Exception as e:
log.debug("appParsers.ParseExcellon.Excellon.create_geometry() -> "
"Excellon geometry creation failed due of ERROR: %s" % str(e))
return "fail"
def bounds(self, flatten=None):
"""
Returns coordinates of rectangular bounds
of Excellon geometry: (xmin, ymin, xmax, ymax).
:param flatten: No used
"""
log.debug("appParsers.ParseExcellon.Excellon.bounds()")
if self.solid_geometry is None or not self.tools:
log.debug("appParsers.ParseExcellon.Excellon -> solid_geometry is None")
return 0, 0, 0, 0
def bounds_rec(obj):
if type(obj) is list:
minx = np.Inf
miny = np.Inf
maxx = -np.Inf
maxy = -np.Inf
for k in obj:
if type(k) is dict:
for key in k:
minx_, miny_, maxx_, maxy_ = bounds_rec(k[key])
minx = min(minx, minx_)
miny = min(miny, miny_)
maxx = max(maxx, maxx_)
maxy = max(maxy, maxy_)
else:
minx_, miny_, maxx_, maxy_ = bounds_rec(k)
minx = min(minx, minx_)
miny = min(miny, miny_)
maxx = max(maxx, maxx_)
maxy = max(maxy, maxy_)
return minx, miny, maxx, maxy
else:
# it's a Shapely object, return it's bounds
return obj.bounds
minx_list = []
miny_list = []
maxx_list = []
maxy_list = []
for tool in self.tools:
eminx, eminy, emaxx, emaxy = bounds_rec(self.tools[tool]['solid_geometry'])
minx_list.append(eminx)
miny_list.append(eminy)
maxx_list.append(emaxx)
maxy_list.append(emaxy)
return min(minx_list), min(miny_list), max(maxx_list), max(maxy_list)
def convert_units(self, units):
"""
This function first convert to the the units found in the Excellon file but it converts tools that
are not there yet so it has no effect other than it signal that the units are the ones in the file.
On object creation, in app_obj.new_object(), true conversion is done because this is done at the end of the
Excellon file parsing, the tools are inside and self.tools is really converted from the units found
inside the file to the FlatCAM units.
Kind of convolute way to make the conversion and it is based on the assumption that the Excellon file
will have detected the units before the tools are parsed and stored in self.tools
:param units: 'IN' or 'MM'. String
:return:
"""
# factor = Geometry.convert_units(self, units)
obj_units = units
if obj_units.upper() == self.units.upper():
factor = 1.0
elif obj_units.upper() == "MM":
factor = 25.4
elif obj_units.upper() == "IN":
factor = 1 / 25.4
else:
log.error("Unsupported units: %s" % str(obj_units))
factor = 1.0
log.debug("appParsers.ParseExcellon.Excellon.convert_units() --> Factor: %s" % str(factor))
self.units = obj_units
self.scale(factor, factor)
self.file_units_factor = factor
# Tools
for tname in self.tools:
self.tools[tname]["tooldia"] *= factor
self.create_geometry()
return factor
def scale(self, xfactor, yfactor=None, point=None):
"""
Scales geometry on the XY plane in the object by a given factor.
Tool sizes, feedrates an Z-plane dimensions are untouched.
:param xfactor: Number by which to scale the object.
:type xfactor: float
:param yfactor: Number by which to scale the object.
:type yfactor: float
:param point: Origin point for scale
:return: None
:rtype: None
"""
log.debug("appParsers.ParseExcellon.Excellon.scale()")
if yfactor is None:
yfactor = xfactor
if point is None:
px = 0
py = 0
else:
px, py = point
if xfactor == 0 and yfactor == 0:
return
def scale_geom(obj):
if type(obj) is list:
new_obj = []
for g in obj:
new_obj.append(scale_geom(g))
return new_obj
else:
try:
return affinity.scale(obj, xfactor, yfactor, origin=(px, py))
except AttributeError:
return obj
# variables to display the percentage of work done
self.geo_len = 0
try:
self.geo_len = len(self.tools)
except TypeError:
self.geo_len = 1
self.old_disp_number = 0
self.el_count = 0
for tool in self.tools:
# Scale Drills
if 'drills' in self.tools[tool]:
new_drills = []
for drill in self.tools[tool]['drills']:
new_drills.append(affinity.scale(drill, xfactor, yfactor, origin=(px, py)))
self.tools[tool]['drills'] = new_drills
# Scale Slots
if 'slots' in self.tools[tool]:
new_slots = []
for slot in self.tools[tool]['slots']:
new_start = affinity.scale(slot[0], xfactor, yfactor, origin=(px, py))
new_stop = affinity.scale(slot[1], xfactor, yfactor, origin=(px, py))
new_slot = (new_start, new_stop)
new_slots.append(new_slot)
self.tools[tool]['slots'] = new_slots
# Scale solid_geometry
self.tools[tool]['solid_geometry'] = scale_geom(self.tools[tool]['solid_geometry'])
# update status display
self.el_count += 1
disp_number = int(np.interp(self.el_count, [0, self.geo_len], [0, 100]))
if self.old_disp_number < disp_number <= 100:
self.app.proc_container.update_view_text(' %d%%' % disp_number)
self.old_disp_number = disp_number
self.create_geometry()
self.app.proc_container.new_text = ''
def offset(self, vect):
"""
Offsets geometry on the XY plane in the object by a given vector.
:param vect: (x, y) offset vector.
:type vect: tuple
:return: None
"""
log.debug("appParsers.ParseExcellon.Excellon.offset()")
dx, dy = vect
if dx == 0 and dy == 0:
return
def offset_geom(obj):
try:
new_obj = []
for geo in obj:
new_obj.append(offset_geom(geo))
return new_obj
except TypeError:
try:
return affinity.translate(obj, xoff=dx, yoff=dy)
except AttributeError:
return obj
# variables to display the percentage of work done
self.geo_len = 0
try:
self.geo_len = len(self.tools)
except TypeError:
self.geo_len = 1
self.old_disp_number = 0
self.el_count = 0
for tool in self.tools:
# Offset Drills
if 'drills' in self.tools[tool]:
new_drills = []
for drill in self.tools[tool]['drills']:
new_drills.append(affinity.translate(drill, xoff=dx, yoff=dy))
self.tools[tool]['drills'] = new_drills
# Offset Slots
if 'slots' in self.tools[tool]:
new_slots = []
for slot in self.tools[tool]['slots']:
new_start = affinity.translate(slot[0], xoff=dx, yoff=dy)
new_stop = affinity.translate(slot[1], xoff=dx, yoff=dy)
new_slot = (new_start, new_stop)
new_slots.append(new_slot)
self.tools[tool]['slots'] = new_slots
# Offset solid_geometry
self.tools[tool]['solid_geometry'] = offset_geom(self.tools[tool]['solid_geometry'])
# update status display
self.el_count += 1
disp_number = int(np.interp(self.el_count, [0, self.geo_len], [0, 100]))
if self.old_disp_number < disp_number <= 100:
self.app.proc_container.update_view_text(' %d%%' % disp_number)
self.old_disp_number = disp_number
# Recreate geometry
self.create_geometry()
self.app.proc_container.new_text = ''
def mirror(self, axis, point):
"""
:param axis: "X" or "Y" indicates around which axis to mirror.
:type axis: str
:param point: [x, y] point belonging to the mirror axis.
:type point: list
:return: None
"""
log.debug("appParsers.ParseExcellon.Excellon.mirror()")
px, py = point
xscale, yscale = {"X": (1.0, -1.0), "Y": (-1.0, 1.0)}[axis]
def mirror_geom(obj):
try:
new_obj = []
for geo in obj:
new_obj.append(mirror_geom(geo))
return new_obj
except TypeError:
try:
return affinity.scale(obj, xscale, yscale, origin=(px, py))
except AttributeError:
return obj
# Modify data
# variables to display the percentage of work done
self.geo_len = 0
try:
self.geo_len = len(self.tools)
except TypeError:
self.geo_len = 1
self.old_disp_number = 0
self.el_count = 0
for tool in self.tools:
# Offset Drills
if 'drills' in self.tools[tool]:
new_drills = []
for drill in self.tools[tool]['drills']:
new_drills.append(affinity.scale(drill, xscale, yscale, origin=(px, py)))
self.tools[tool]['drills'] = new_drills
# Offset Slots
if 'slots' in self.tools[tool]:
new_slots = []
for slot in self.tools[tool]['slots']:
new_start = affinity.scale(slot[0], xscale, yscale, origin=(px, py))
new_stop = affinity.scale(slot[1], xscale, yscale, origin=(px, py))
new_slot = (new_start, new_stop)
new_slots.append(new_slot)
self.tools[tool]['slots'] = new_slots
# Offset solid_geometry
self.tools[tool]['solid_geometry'] = mirror_geom(self.tools[tool]['solid_geometry'])
# update status display
self.el_count += 1
disp_number = int(np.interp(self.el_count, [0, self.geo_len], [0, 100]))
if self.old_disp_number < disp_number <= 100:
self.app.proc_container.update_view_text(' %d%%' % disp_number)
self.old_disp_number = disp_number
# Recreate geometry
self.create_geometry()
self.app.proc_container.new_text = ''
def skew(self, angle_x=None, angle_y=None, point=None):
"""
Shear/Skew the geometries of an object by angles along x and y dimensions.
Tool sizes, feedrates an Z-plane dimensions are untouched.
:param angle_x:
:param angle_y:
The shear angle(s) for the x and y axes respectively. These can be
specified in either degrees (default) or radians by setting
use_radians=True.
:param point: Origin point for Skew
See shapely manual for more information:
http://toblerity.org/shapely/manual.html#affine-transformations
"""
log.debug("appParsers.ParseExcellon.Excellon.skew()")
if angle_x is None:
angle_x = 0.0
if angle_y is None:
angle_y = 0.0
if angle_x == 0 and angle_y == 0:
return
def skew_geom(obj):
try:
new_obj = []
for g in obj:
new_obj.append(skew_geom(g))
return new_obj
except TypeError:
try:
return affinity.skew(obj, angle_x, angle_y, origin=(px, py))
except AttributeError:
return obj
# variables to display the percentage of work done
self.geo_len = 0
try:
self.geo_len = len(self.tools)
except TypeError:
self.geo_len = 1
self.old_disp_number = 0
self.el_count = 0
if point is None:
px, py = 0, 0
else:
px, py = point
for tool in self.tools:
# Offset Drills
if 'drills' in self.tools[tool]:
new_drills = []
for drill in self.tools[tool]['drills']:
new_drills.append(affinity.skew(drill, angle_x, angle_y, origin=(px, py)))
self.tools[tool]['drills'] = new_drills
# Offset Slots
if 'slots' in self.tools[tool]:
new_slots = []
for slot in self.tools[tool]['slots']:
new_start = affinity.skew(slot[0], angle_x, angle_y, origin=(px, py))
new_stop = affinity.skew(slot[1], angle_x, angle_y, origin=(px, py))
new_slot = (new_start, new_stop)
new_slots.append(new_slot)
self.tools[tool]['slots'] = new_slots
# Offset solid_geometry
self.tools[tool]['solid_geometry'] = skew_geom(self.tools[tool]['solid_geometry'])
# update status display
self.el_count += 1
disp_number = int(np.interp(self.el_count, [0, self.geo_len], [0, 100]))
if self.old_disp_number < disp_number <= 100:
self.app.proc_container.update_view_text(' %d%%' % disp_number)
self.old_disp_number = disp_number
self.create_geometry()
self.app.proc_container.new_text = ''
def rotate(self, angle, point=None):
"""
Rotate the geometry of an object by an angle around the 'point' coordinates
:param angle:
:param point: tuple of coordinates (x, y)
:return: None
"""
log.debug("appParsers.ParseExcellon.Excellon.rotate()")
if angle == 0:
return
def rotate_geom(obj, origin=None):
if type(obj) is list:
new_obj = []
for g in obj:
new_obj.append(rotate_geom(g))
return new_obj
else:
if origin:
try:
return affinity.rotate(obj, angle, origin=origin)
except AttributeError:
return obj
else:
try:
return affinity.rotate(obj, angle, origin=orig)
except AttributeError:
return obj
# variables to display the percentage of work done
self.geo_len = 0
try:
self.geo_len = len(self.tools)
except TypeError:
self.geo_len = 1
self.old_disp_number = 0
self.el_count = 0
if point is None:
orig = 'center'
else:
orig = point
for tool in self.tools:
# Offset Drills
if 'drills' in self.tools[tool]:
new_drills = []
for drill in self.tools[tool]['drills']:
new_drills.append(affinity.rotate(drill, angle, origin=orig))
self.tools[tool]['drills'] = new_drills
# Offset Slots
if 'slots' in self.tools[tool]:
new_slots = []
for slot in self.tools[tool]['slots']:
new_start = affinity.rotate(slot[0], angle, origin=orig)
new_stop = affinity.rotate(slot[1], angle, origin=orig)
new_slot = (new_start, new_stop)
new_slots.append(new_slot)
self.tools[tool]['slots'] = new_slots
# Offset solid_geometry
self.tools[tool]['solid_geometry'] = rotate_geom(self.tools[tool]['solid_geometry'], origin=orig)
# update status display
self.el_count += 1
disp_number = int(np.interp(self.el_count, [0, self.geo_len], [0, 100]))
if self.old_disp_number < disp_number <= 100:
self.app.proc_container.update_view_text(' %d%%' % disp_number)
self.old_disp_number = disp_number
self.create_geometry()
self.app.proc_container.new_text = ''
def buffer(self, distance, join, factor):
"""
:param distance: if 'factor' is True then distance is the factor
:param factor: True or False (None)
:param join: The type of line joint used by the shapely buffer method: round, square, bevel
:return: None
"""
log.debug("appParsers.ParseExcellon.Excellon.buffer()")
if distance == 0:
return
def buffer_geom(obj):
try:
new_obj = []
for g in obj:
new_obj.append(buffer_geom(g))
return new_obj
except TypeError:
try:
if factor is None:
return obj.buffer(distance, resolution=self.geo_steps_per_circle)
else:
return affinity.scale(obj, xfact=distance, yfact=distance, origin='center')
except AttributeError:
return obj
# buffer solid_geometry
for tool, tool_dict in list(self.tools.items()):
res = buffer_geom(tool_dict['solid_geometry'])
try:
__ = iter(res)
self.tools[tool]['solid_geometry'] = res
except TypeError:
self.tools[tool]['solid_geometry'] = [res]
if factor is None:
self.tools[tool]['tooldia'] += distance
else:
self.tools[tool]['tooldia'] *= distance
self.create_geometry()
|
[
"copy.deepcopy",
"shapely.geometry.Point",
"shapely.affinity.translate",
"shapely.affinity.scale",
"shapely.affinity.rotate",
"shapely.geometry.LineString",
"re.findall",
"traceback.format_exc",
"numpy.interp",
"shapely.affinity.skew",
"re.search",
"logging.getLogger",
"re.compile"
] |
[((783, 808), 'logging.getLogger', 'logging.getLogger', (['"""base"""'], {}), "('base')\n", (800, 808), False, 'import logging\n'), ((3281, 3301), 'copy.deepcopy', 'deepcopy', (['self.zeros'], {}), '(self.zeros)\n', (3289, 3301), False, 'from copy import deepcopy\n'), ((4904, 4923), 're.compile', 're.compile', (['"""^M48$"""'], {}), "('^M48$')\n", (4914, 4923), False, 'import re\n'), ((5015, 5044), 're.compile', 're.compile', (['"""\\\\;\\\\s*(HEADER)"""'], {}), "('\\\\;\\\\s*(HEADER)')\n", (5025, 5044), False, 'import re\n'), ((5168, 5193), 're.compile', 're.compile', (['"""^(?:M95|%)$"""'], {}), "('^(?:M95|%)$')\n", (5178, 5193), False, 'import re\n'), ((5467, 5527), 're.compile', 're.compile', (['"""^(INCH|METRIC)(?:,([TL])Z)?,?(\\\\d*\\\\.\\\\d+)?.*$"""'], {}), "('^(INCH|METRIC)(?:,([TL])Z)?,?(\\\\d*\\\\.\\\\d+)?.*$')\n", (5477, 5527), False, 'import re\n'), ((5971, 6169), 're.compile', 're.compile', (["('^T(\\\\d+)(?=.*C,?(\\\\d*\\\\.?\\\\d*))?' +\n '(?=.*F(\\\\d*\\\\.?\\\\d*))?(?=.*S(\\\\d*\\\\.?\\\\d*))?' +\n '(?=.*B(\\\\d*\\\\.?\\\\d*))?(?=.*H(\\\\d*\\\\.?\\\\d*))?' +\n '(?=.*Z([-\\\\+]?\\\\d*\\\\.?\\\\d*))?[CFSBHT]')"], {}), "('^T(\\\\d+)(?=.*C,?(\\\\d*\\\\.?\\\\d*))?' +\n '(?=.*F(\\\\d*\\\\.?\\\\d*))?(?=.*S(\\\\d*\\\\.?\\\\d*))?' +\n '(?=.*B(\\\\d*\\\\.?\\\\d*))?(?=.*H(\\\\d*\\\\.?\\\\d*))?' +\n '(?=.*Z([-\\\\+]?\\\\d*\\\\.?\\\\d*))?[CFSBHT]')\n", (5981, 6169), False, 'import re\n'), ((6285, 6309), 're.compile', 're.compile', (['"""^G2([01])$"""'], {}), "('^G2([01])$')\n", (6295, 6309), False, 'import re\n'), ((6578, 6600), 're.compile', 're.compile', (['"""^T(\\\\d+)"""'], {}), "('^T(\\\\d+)')\n", (6588, 6600), False, 'import re\n'), ((6732, 6776), 're.compile', 're.compile', (['"""^T(\\\\d+)(?:.?C(\\\\d+\\\\.?\\\\d*))?"""'], {}), "('^T(\\\\d+)(?:.?C(\\\\d+\\\\.?\\\\d*))?')\n", (6742, 6776), False, 'import re\n'), ((6816, 6837), 're.compile', 're.compile', (['"""^;(.*)$"""'], {}), "('^;(.*)$')\n", (6826, 6837), False, 'import re\n'), ((6904, 6928), 're.compile', 're.compile', (['"""^G9([01])$"""'], {}), "('^G9([01])$')\n", (6914, 6928), False, 'import re\n'), ((7044, 7071), 're.compile', 're.compile', (['"""^G0([012345])"""'], {}), "('^G0([012345])')\n", (7054, 7071), False, 'import re\n'), ((7149, 7173), 're.compile', 're.compile', (['"""^M7([12])$"""'], {}), "('^M7([12])$')\n", (7159, 7173), False, 'import re\n'), ((7462, 7496), 're.compile', 're.compile', (['coordsperiod_re_string'], {}), '(coordsperiod_re_string)\n', (7472, 7496), False, 'import re\n'), ((7620, 7656), 're.compile', 're.compile', (['coordsnoperiod_re_string'], {}), '(coordsnoperiod_re_string)\n', (7630, 7656), False, 'import re\n'), ((7752, 7779), 're.compile', 're.compile', (['slots_re_string'], {}), '(slots_re_string)\n', (7762, 7779), False, 'import re\n'), ((7859, 7957), 're.compile', 're.compile', (['"""^R(\\\\d+)(?=.*[XY])+(?:X([-\\\\+]?\\\\d*\\\\.?\\\\d*))?(?:Y([-\\\\+]?\\\\d*\\\\.?\\\\d*))?$"""'], {}), "(\n '^R(\\\\d+)(?=.*[XY])+(?:X([-\\\\+]?\\\\d*\\\\.?\\\\d*))?(?:Y([-\\\\+]?\\\\d*\\\\.?\\\\d*))?$'\n )\n", (7869, 7957), False, 'import re\n'), ((8002, 8048), 're.compile', 're.compile', (['"""^((G04)|(M09)|(M06)|(M00)|(M30))"""'], {}), "('^((G04)|(M09)|(M06)|(M00)|(M30))')\n", (8012, 8048), False, 'import re\n'), ((8122, 8192), 're.compile', 're.compile', (['"""(\\\\;\\\\s*Holesize \\\\d+.\\\\s*\\\\=\\\\s*(\\\\d+.\\\\d+).*(MILS|MM))"""'], {}), "('(\\\\;\\\\s*Holesize \\\\d+.\\\\s*\\\\=\\\\s*(\\\\d+.\\\\d+).*(MILS|MM))')\n", (8132, 8192), False, 'import re\n'), ((8312, 8388), 're.compile', 're.compile', (['"""^;\\\\s*(?:FILE_FORMAT)?(?:Format)?[=|:]\\\\s*(\\\\d+)[:|.](\\\\d+).*$"""'], {}), "('^;\\\\s*(?:FILE_FORMAT)?(?:Format)?[=|:]\\\\s*(\\\\d+)[:|.](\\\\d+).*$')\n", (8322, 8388), False, 'import re\n'), ((8446, 8478), 're.compile', 're.compile', (['"""^[-\\\\+]?(0*)(\\\\d*)"""'], {}), "('^[-\\\\+]?(0*)(\\\\d*)')\n", (8456, 8478), False, 'import re\n'), ((8532, 8553), 're.compile', 're.compile', (['"""R(\\\\d+)"""'], {}), "('R(\\\\d+)')\n", (8542, 8553), False, 'import re\n'), ((47803, 47825), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (47823, 47825), False, 'import traceback\n'), ((58872, 58925), 'numpy.interp', 'np.interp', (['self.el_count', '[0, self.geo_len]', '[0, 100]'], {}), '(self.el_count, [0, self.geo_len], [0, 100])\n', (58881, 58925), True, 'import numpy as np\n'), ((61223, 61276), 'numpy.interp', 'np.interp', (['self.el_count', '[0, self.geo_len]', '[0, 100]'], {}), '(self.el_count, [0, self.geo_len], [0, 100])\n', (61232, 61276), True, 'import numpy as np\n'), ((63775, 63828), 'numpy.interp', 'np.interp', (['self.el_count', '[0, self.geo_len]', '[0, 100]'], {}), '(self.el_count, [0, self.geo_len], [0, 100])\n', (63784, 63828), True, 'import numpy as np\n'), ((66799, 66852), 'numpy.interp', 'np.interp', (['self.el_count', '[0, self.geo_len]', '[0, 100]'], {}), '(self.el_count, [0, self.geo_len], [0, 100])\n', (66808, 66852), True, 'import numpy as np\n'), ((69512, 69565), 'numpy.interp', 'np.interp', (['self.el_count', '[0, self.geo_len]', '[0, 100]'], {}), '(self.el_count, [0, self.geo_len], [0, 100])\n', (69521, 69565), True, 'import numpy as np\n'), ((44384, 44409), 're.search', 're.search', (['"""[LT]Z"""', 'eline'], {}), "('[LT]Z', eline)\n", (44393, 44409), False, 'import re\n'), ((57424, 57478), 'shapely.affinity.scale', 'affinity.scale', (['obj', 'xfactor', 'yfactor'], {'origin': '(px, py)'}), '(obj, xfactor, yfactor, origin=(px, py))\n', (57438, 57478), True, 'import shapely.affinity as affinity\n'), ((58339, 58397), 'shapely.affinity.scale', 'affinity.scale', (['slot[0]', 'xfactor', 'yfactor'], {'origin': '(px, py)'}), '(slot[0], xfactor, yfactor, origin=(px, py))\n', (58353, 58397), True, 'import shapely.affinity as affinity\n'), ((58429, 58487), 'shapely.affinity.scale', 'affinity.scale', (['slot[1]', 'xfactor', 'yfactor'], {'origin': '(px, py)'}), '(slot[1], xfactor, yfactor, origin=(px, py))\n', (58443, 58487), True, 'import shapely.affinity as affinity\n'), ((60714, 60759), 'shapely.affinity.translate', 'affinity.translate', (['slot[0]'], {'xoff': 'dx', 'yoff': 'dy'}), '(slot[0], xoff=dx, yoff=dy)\n', (60732, 60759), True, 'import shapely.affinity as affinity\n'), ((60791, 60836), 'shapely.affinity.translate', 'affinity.translate', (['slot[1]'], {'xoff': 'dx', 'yoff': 'dy'}), '(slot[1], xoff=dx, yoff=dy)\n', (60809, 60836), True, 'import shapely.affinity as affinity\n'), ((63244, 63300), 'shapely.affinity.scale', 'affinity.scale', (['slot[0]', 'xscale', 'yscale'], {'origin': '(px, py)'}), '(slot[0], xscale, yscale, origin=(px, py))\n', (63258, 63300), True, 'import shapely.affinity as affinity\n'), ((63332, 63388), 'shapely.affinity.scale', 'affinity.scale', (['slot[1]', 'xscale', 'yscale'], {'origin': '(px, py)'}), '(slot[1], xscale, yscale, origin=(px, py))\n', (63346, 63388), True, 'import shapely.affinity as affinity\n'), ((66268, 66325), 'shapely.affinity.skew', 'affinity.skew', (['slot[0]', 'angle_x', 'angle_y'], {'origin': '(px, py)'}), '(slot[0], angle_x, angle_y, origin=(px, py))\n', (66281, 66325), True, 'import shapely.affinity as affinity\n'), ((66357, 66414), 'shapely.affinity.skew', 'affinity.skew', (['slot[1]', 'angle_x', 'angle_y'], {'origin': '(px, py)'}), '(slot[1], angle_x, angle_y, origin=(px, py))\n', (66370, 66414), True, 'import shapely.affinity as affinity\n'), ((68992, 69036), 'shapely.affinity.rotate', 'affinity.rotate', (['slot[0]', 'angle'], {'origin': 'orig'}), '(slot[0], angle, origin=orig)\n', (69007, 69036), True, 'import shapely.affinity as affinity\n'), ((69068, 69112), 'shapely.affinity.rotate', 'affinity.rotate', (['slot[1]', 'angle'], {'origin': 'orig'}), '(slot[1], angle, origin=orig)\n', (69083, 69112), True, 'import shapely.affinity as affinity\n'), ((51791, 51818), 'copy.deepcopy', 'deepcopy', (['self.default_data'], {}), '(self.default_data)\n', (51799, 51818), False, 'from copy import deepcopy\n'), ((52167, 52192), 'shapely.geometry.LineString', 'LineString', (['[start, stop]'], {}), '([start, stop])\n', (52177, 52192), False, 'from shapely.geometry import Point, LineString\n'), ((52481, 52508), 'copy.deepcopy', 'deepcopy', (['self.default_data'], {}), '(self.default_data)\n', (52489, 52508), False, 'from copy import deepcopy\n'), ((58036, 58092), 'shapely.affinity.scale', 'affinity.scale', (['drill', 'xfactor', 'yfactor'], {'origin': '(px, py)'}), '(drill, xfactor, yfactor, origin=(px, py))\n', (58050, 58092), True, 'import shapely.affinity as affinity\n'), ((59823, 59864), 'shapely.affinity.translate', 'affinity.translate', (['obj'], {'xoff': 'dx', 'yoff': 'dy'}), '(obj, xoff=dx, yoff=dy)\n', (59841, 59864), True, 'import shapely.affinity as affinity\n'), ((60423, 60466), 'shapely.affinity.translate', 'affinity.translate', (['drill'], {'xoff': 'dx', 'yoff': 'dy'}), '(drill, xoff=dx, yoff=dy)\n', (60441, 60466), True, 'import shapely.affinity as affinity\n'), ((62308, 62360), 'shapely.affinity.scale', 'affinity.scale', (['obj', 'xscale', 'yscale'], {'origin': '(px, py)'}), '(obj, xscale, yscale, origin=(px, py))\n', (62322, 62360), True, 'import shapely.affinity as affinity\n'), ((62942, 62996), 'shapely.affinity.scale', 'affinity.scale', (['drill', 'xscale', 'yscale'], {'origin': '(px, py)'}), '(drill, xscale, yscale, origin=(px, py))\n', (62956, 62996), True, 'import shapely.affinity as affinity\n'), ((65259, 65312), 'shapely.affinity.skew', 'affinity.skew', (['obj', 'angle_x', 'angle_y'], {'origin': '(px, py)'}), '(obj, angle_x, angle_y, origin=(px, py))\n', (65272, 65312), True, 'import shapely.affinity as affinity\n'), ((65965, 66020), 'shapely.affinity.skew', 'affinity.skew', (['drill', 'angle_x', 'angle_y'], {'origin': '(px, py)'}), '(drill, angle_x, angle_y, origin=(px, py))\n', (65978, 66020), True, 'import shapely.affinity as affinity\n'), ((67802, 67844), 'shapely.affinity.rotate', 'affinity.rotate', (['obj', 'angle'], {'origin': 'origin'}), '(obj, angle, origin=origin)\n', (67817, 67844), True, 'import shapely.affinity as affinity\n'), ((68001, 68041), 'shapely.affinity.rotate', 'affinity.rotate', (['obj', 'angle'], {'origin': 'orig'}), '(obj, angle, origin=orig)\n', (68016, 68041), True, 'import shapely.affinity as affinity\n'), ((68702, 68744), 'shapely.affinity.rotate', 'affinity.rotate', (['drill', 'angle'], {'origin': 'orig'}), '(drill, angle, origin=orig)\n', (68717, 68744), True, 'import shapely.affinity as affinity\n'), ((70707, 70775), 'shapely.affinity.scale', 'affinity.scale', (['obj'], {'xfact': 'distance', 'yfact': 'distance', 'origin': '"""center"""'}), "(obj, xfact=distance, yfact=distance, origin='center')\n", (70721, 70775), True, 'import shapely.affinity as affinity\n'), ((25401, 25434), 'shapely.geometry.Point', 'Point', (['slot_start_x', 'slot_start_y'], {}), '(slot_start_x, slot_start_y)\n', (25406, 25434), False, 'from shapely.geometry import Point, LineString\n'), ((25468, 25499), 'shapely.geometry.Point', 'Point', (['slot_stop_x', 'slot_stop_y'], {}), '(slot_stop_x, slot_stop_y)\n', (25473, 25499), False, 'from shapely.geometry import Point, LineString\n'), ((29206, 29239), 'shapely.geometry.Point', 'Point', (['slot_start_x', 'slot_start_y'], {}), '(slot_start_x, slot_start_y)\n', (29211, 29239), False, 'from shapely.geometry import Point, LineString\n'), ((29273, 29304), 'shapely.geometry.Point', 'Point', (['slot_stop_x', 'slot_stop_y'], {}), '(slot_stop_x, slot_stop_y)\n', (29278, 29304), False, 'from shapely.geometry import Point, LineString\n'), ((36473, 36497), 're.findall', 're.findall', (['"""G00"""', 'eline'], {}), "('G00', eline)\n", (36483, 36497), False, 'import re\n'), ((32530, 32554), 're.findall', 're.findall', (['"""G00"""', 'eline'], {}), "('G00', eline)\n", (32540, 32554), False, 'import re\n'), ((36977, 37001), 're.findall', 're.findall', (['"""G01"""', 'eline'], {}), "('G01', eline)\n", (36987, 37001), False, 'import re\n'), ((37529, 37562), 'shapely.geometry.Point', 'Point', (['slot_start_x', 'slot_start_y'], {}), '(slot_start_x, slot_start_y)\n', (37534, 37562), False, 'from shapely.geometry import Point, LineString\n'), ((37600, 37631), 'shapely.geometry.Point', 'Point', (['slot_stop_x', 'slot_stop_y'], {}), '(slot_stop_x, slot_stop_y)\n', (37605, 37631), False, 'from shapely.geometry import Point, LineString\n'), ((31221, 31244), 'shapely.geometry.Point', 'Point', (['(coordx, coordy)'], {}), '((coordx, coordy))\n', (31226, 31244), False, 'from shapely.geometry import Point, LineString\n'), ((31358, 31381), 'shapely.geometry.Point', 'Point', (['(coordx, coordy)'], {}), '((coordx, coordy))\n', (31363, 31381), False, 'from shapely.geometry import Point, LineString\n'), ((33070, 33094), 're.findall', 're.findall', (['"""G01"""', 'eline'], {}), "('G01', eline)\n", (33080, 33094), False, 'import re\n'), ((33658, 33691), 'shapely.geometry.Point', 'Point', (['slot_start_x', 'slot_start_y'], {}), '(slot_start_x, slot_start_y)\n', (33663, 33691), False, 'from shapely.geometry import Point, LineString\n'), ((33733, 33764), 'shapely.geometry.Point', 'Point', (['slot_stop_x', 'slot_stop_y'], {}), '(slot_stop_x, slot_stop_y)\n', (33738, 33764), False, 'from shapely.geometry import Point, LineString\n'), ((34883, 34896), 'shapely.geometry.Point', 'Point', (['(x, y)'], {}), '((x, y))\n', (34888, 34896), False, 'from shapely.geometry import Point, LineString\n'), ((35010, 35023), 'shapely.geometry.Point', 'Point', (['(x, y)'], {}), '((x, y))\n', (35015, 35023), False, 'from shapely.geometry import Point, LineString\n'), ((38827, 38840), 'shapely.geometry.Point', 'Point', (['(x, y)'], {}), '((x, y))\n', (38832, 38840), False, 'from shapely.geometry import Point, LineString\n'), ((38954, 38967), 'shapely.geometry.Point', 'Point', (['(x, y)'], {}), '((x, y))\n', (38959, 38967), False, 'from shapely.geometry import Point, LineString\n'), ((39774, 39797), 'shapely.geometry.Point', 'Point', (['(coordx, coordy)'], {}), '((coordx, coordy))\n', (39779, 39797), False, 'from shapely.geometry import Point, LineString\n'), ((39919, 39942), 'shapely.geometry.Point', 'Point', (['(coordx, coordy)'], {}), '((coordx, coordy))\n', (39924, 39942), False, 'from shapely.geometry import Point, LineString\n')]
|
#! /usr/bin/env python3
import rospy
import numpy as np
import sys
from geometry_msgs.msg import PoseWithCovarianceStamped
class SimBall:
def __init__(self):
rospy.init_node('ball_sim')
self.pub_frequency = 20
if len(sys.argv) > 1:
self.pub_frequency = int(sys.argv[1])
self.dt = 1.0 / self.pub_frequency
self.max_acceleration = .8
self.max_error = np.array([2, 4])
self.velocity = np.zeros((2))
self.position = np.zeros((2))
self.p_pub = rospy.Publisher('position', PoseWithCovarianceStamped, queue_size=1)
self.p_err_pub = rospy.Publisher('ball_relative', PoseWithCovarianceStamped, queue_size=1)
rate = rospy.Rate(self.pub_frequency)
while not rospy.is_shutdown():
self.position += self.velocity * self.dt
self.velocity = np.clip(self.velocity + self.gen_acceleration(), -3, 3)
p_err = self.position + self.gen_error()
self.p_pub.publish(self.gen_msg(self.position[0], self.position[1]))
self.p_err_pub.publish(self.gen_msg(p_err[0], p_err[1]))
rate.sleep()
def gen_error(self):
return np.multiply(np.random.rand(2) * 2 - 1, self.max_error)
def gen_acceleration(self):
return np.multiply(np.random.randn(2), self.max_acceleration) * self.dt
def gen_msg(self, x, y):
msg = PoseWithCovarianceStamped()
msg.header.frame_id = 'world'
msg.pose.pose.position.x = x
msg.pose.pose.position.y = y
msg.pose.pose.orientation.w = 1
return msg
if __name__ == '__main__':
SimBall()
|
[
"numpy.random.randn",
"numpy.zeros",
"rospy.Publisher",
"rospy.Rate",
"rospy.is_shutdown",
"numpy.array",
"rospy.init_node",
"geometry_msgs.msg.PoseWithCovarianceStamped",
"numpy.random.rand"
] |
[((173, 200), 'rospy.init_node', 'rospy.init_node', (['"""ball_sim"""'], {}), "('ball_sim')\n", (188, 200), False, 'import rospy\n'), ((418, 434), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (426, 434), True, 'import numpy as np\n'), ((460, 471), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (468, 471), True, 'import numpy as np\n'), ((498, 509), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (506, 509), True, 'import numpy as np\n'), ((533, 601), 'rospy.Publisher', 'rospy.Publisher', (['"""position"""', 'PoseWithCovarianceStamped'], {'queue_size': '(1)'}), "('position', PoseWithCovarianceStamped, queue_size=1)\n", (548, 601), False, 'import rospy\n'), ((627, 700), 'rospy.Publisher', 'rospy.Publisher', (['"""ball_relative"""', 'PoseWithCovarianceStamped'], {'queue_size': '(1)'}), "('ball_relative', PoseWithCovarianceStamped, queue_size=1)\n", (642, 700), False, 'import rospy\n'), ((716, 746), 'rospy.Rate', 'rospy.Rate', (['self.pub_frequency'], {}), '(self.pub_frequency)\n', (726, 746), False, 'import rospy\n'), ((1404, 1431), 'geometry_msgs.msg.PoseWithCovarianceStamped', 'PoseWithCovarianceStamped', ([], {}), '()\n', (1429, 1431), False, 'from geometry_msgs.msg import PoseWithCovarianceStamped\n'), ((765, 784), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (782, 784), False, 'import rospy\n'), ((1307, 1325), 'numpy.random.randn', 'np.random.randn', (['(2)'], {}), '(2)\n', (1322, 1325), True, 'import numpy as np\n'), ((1204, 1221), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1218, 1221), True, 'import numpy as np\n')]
|
"""
The way matplotlib does text layout is counter-intuitive to some, so
this example is designed to make it a little clearer. The text is
aligned by it's bounding box (the rectangular box that surrounds the
ink rectangle). The order of operations is basically rotation then
alignment, rather than alignment then rotation. Basically, the text
is centered at your x,y location, rotated around this point, and then
aligned according to the bounding box of the rotated text.
So if you specify left, bottom alignment, the bottom left of the
bounding box of the rotated text will be at the x,y coordinate of the
text.
But a picture is worth a thousand words!
"""
import matplotlib.pyplot as plt
import numpy as np
def addtext(props):
plt.text(0.5, 0.5, 'text 0', props, rotation=0)
plt.text(1.5, 0.5, 'text 45', props, rotation=45)
plt.text(2.5, 0.5, 'text 135', props, rotation=135)
plt.text(3.5, 0.5, 'text 225', props, rotation=225)
plt.text(4.5, 0.5, 'text -45', props, rotation=-45)
plt.yticks([0, .5, 1])
plt.grid(True)
# the text bounding box
bbox = {'fc': '0.8', 'pad': 0}
plt.subplot(211)
addtext({'ha': 'center', 'va': 'center', 'bbox': bbox})
plt.xlim(0, 5)
plt.xticks(np.arange(0, 5.1, 0.5), [])
plt.ylabel('center / center')
plt.subplot(212)
addtext({'ha': 'left', 'va': 'bottom', 'bbox': bbox})
plt.xlim(0, 5)
plt.xticks(np.arange(0, 5.1, 0.5))
plt.ylabel('left / bottom')
plt.show()
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.text",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid"
] |
[((1114, 1130), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1125, 1130), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1201), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(5)'], {}), '(0, 5)\n', (1195, 1201), True, 'import matplotlib.pyplot as plt\n'), ((1241, 1270), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""center / center"""'], {}), "('center / center')\n", (1251, 1270), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1288), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (1283, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1357), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(5)'], {}), '(0, 5)\n', (1351, 1357), True, 'import matplotlib.pyplot as plt\n'), ((1393, 1420), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""left / bottom"""'], {}), "('left / bottom')\n", (1403, 1420), True, 'import matplotlib.pyplot as plt\n'), ((1421, 1431), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1429, 1431), True, 'import matplotlib.pyplot as plt\n'), ((741, 788), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.5)', '"""text 0"""', 'props'], {'rotation': '(0)'}), "(0.5, 0.5, 'text 0', props, rotation=0)\n", (749, 788), True, 'import matplotlib.pyplot as plt\n'), ((793, 842), 'matplotlib.pyplot.text', 'plt.text', (['(1.5)', '(0.5)', '"""text 45"""', 'props'], {'rotation': '(45)'}), "(1.5, 0.5, 'text 45', props, rotation=45)\n", (801, 842), True, 'import matplotlib.pyplot as plt\n'), ((847, 898), 'matplotlib.pyplot.text', 'plt.text', (['(2.5)', '(0.5)', '"""text 135"""', 'props'], {'rotation': '(135)'}), "(2.5, 0.5, 'text 135', props, rotation=135)\n", (855, 898), True, 'import matplotlib.pyplot as plt\n'), ((903, 954), 'matplotlib.pyplot.text', 'plt.text', (['(3.5)', '(0.5)', '"""text 225"""', 'props'], {'rotation': '(225)'}), "(3.5, 0.5, 'text 225', props, rotation=225)\n", (911, 954), True, 'import matplotlib.pyplot as plt\n'), ((959, 1010), 'matplotlib.pyplot.text', 'plt.text', (['(4.5)', '(0.5)', '"""text -45"""', 'props'], {'rotation': '(-45)'}), "(4.5, 0.5, 'text -45', props, rotation=-45)\n", (967, 1010), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1038), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 0.5, 1]'], {}), '([0, 0.5, 1])\n', (1025, 1038), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1056), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1050, 1056), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1235), 'numpy.arange', 'np.arange', (['(0)', '(5.1)', '(0.5)'], {}), '(0, 5.1, 0.5)\n', (1222, 1235), True, 'import numpy as np\n'), ((1369, 1391), 'numpy.arange', 'np.arange', (['(0)', '(5.1)', '(0.5)'], {}), '(0, 5.1, 0.5)\n', (1378, 1391), True, 'import numpy as np\n')]
|
'''
k Bandit problem
'''
import numpy as np
import matplotlib.pyplot as plt
class Bandit():
def __init__(self, k):
self.k = k # number of bandits
self.true_val = np.random.normal(0,1,self.k) # q*(a)
def reward(self, t):
return np.random.normal(self.true_val[t],1,1) #R_t
class Agent():
def __init__(self, bandit, epsilon):
self.epsilon = epsilon
self.Q = np.zeros(bandit.k) # Q(a)
self.N = np.zeros(bandit.k, dtype=int) # N(a)
def select_action(self, bandit):
prob = np.random.random()
if prob <= self.epsilon:
return np.random.randint(bandit.k) # choose random action
else:
return np.argmax(self.Q) # argmax Q(a)
def update(self, action, reward):
self.N[action] += 1
self.Q[action] += (1/self.N[action]) * (reward - self.Q[action])
def play(self):
rewards = []
for t_step in range(1000):
action = self.select_action(bandit)
reward = bandit.reward(action)
self.update(action, reward)
rewards.append(reward)
return np.array(rewards)
k = 10
epsilon = 0.01
rewards = np.zeros(1000)
for episode in range(2000):
bandit = Bandit(k)
agent = Agent(bandit, epsilon)
reward = agent.play().reshape((1000,))
rewards += reward
rewards /= 2000 # average out the rewards
#plot average rewards against steps
plt.plot(rewards)
plt.xlabel("Steps")
plt.ylabel("Rewards")
plt.xlim([1,1000])
plt.show()
|
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.zeros",
"numpy.random.random",
"numpy.array",
"numpy.random.randint",
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((1187, 1201), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (1195, 1201), True, 'import numpy as np\n'), ((1435, 1452), 'matplotlib.pyplot.plot', 'plt.plot', (['rewards'], {}), '(rewards)\n', (1443, 1452), True, 'import matplotlib.pyplot as plt\n'), ((1453, 1472), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steps"""'], {}), "('Steps')\n", (1463, 1472), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1494), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rewards"""'], {}), "('Rewards')\n", (1483, 1494), True, 'import matplotlib.pyplot as plt\n'), ((1495, 1514), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[1, 1000]'], {}), '([1, 1000])\n', (1503, 1514), True, 'import matplotlib.pyplot as plt\n'), ((1514, 1524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1522, 1524), True, 'import matplotlib.pyplot as plt\n'), ((188, 218), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'self.k'], {}), '(0, 1, self.k)\n', (204, 218), True, 'import numpy as np\n'), ((266, 306), 'numpy.random.normal', 'np.random.normal', (['self.true_val[t]', '(1)', '(1)'], {}), '(self.true_val[t], 1, 1)\n', (282, 306), True, 'import numpy as np\n'), ((417, 435), 'numpy.zeros', 'np.zeros', (['bandit.k'], {}), '(bandit.k)\n', (425, 435), True, 'import numpy as np\n'), ((460, 489), 'numpy.zeros', 'np.zeros', (['bandit.k'], {'dtype': 'int'}), '(bandit.k, dtype=int)\n', (468, 489), True, 'import numpy as np\n'), ((550, 568), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (566, 568), True, 'import numpy as np\n'), ((1135, 1152), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (1143, 1152), True, 'import numpy as np\n'), ((621, 648), 'numpy.random.randint', 'np.random.randint', (['bandit.k'], {}), '(bandit.k)\n', (638, 648), True, 'import numpy as np\n'), ((705, 722), 'numpy.argmax', 'np.argmax', (['self.Q'], {}), '(self.Q)\n', (714, 722), True, 'import numpy as np\n')]
|
print('Importing dependencies...', end='')
import sys
sys.stdout.flush()
sys.path.append('/home/pi/.local/lib/python3.7/site-packages')
from time import sleep
import numpy as np
import board
import neopixel
import scara_arm as arm
import scara_imaging as imaging
print('done.')
def read_config(filename):
''' Read config file to dict, containing servo pulse ranges and
image warp matrix.
Inputs:
filename: name (and path, if necessary) of config file (str)
Returns: config dict (keys = config names,
vals = config values (as strings))
'''
config = {}
with open(filename) as config_file:
for line in config_file:
try:
(key, val) = line.split(': ')
config[key] = val
except ValueError:
pass
# Reformat config entries from strings to their proper types
warp_matrix = config['Image warp matrix']
warp_matrix = warp_matrix.strip('[ ]')
warp_matrix = warp_matrix.replace('] [', '; ')
config['Image warp matrix'] = np.array(np.mat(warp_matrix))
config['Shoulder min pulse'] = int(config['Shoulder min pulse'])
config['Shoulder max pulse'] = int(config['Shoulder max pulse'])
config['Elbow min pulse'] = int(config['Elbow min pulse'])
config['Elbow max pulse'] = int(config['Elbow max pulse'])
return config
def get_menu_input(commands, input_text=''):
''' Gets and validates user input for menu selections
Inputs:
commands (dict): valid commands.
keys (str): commands
values (str): modes
input_text (str, optional): text to print pre-input. Defaults to empty.
Returns: validated user input (str)
'''
while True:
user_input = input(input_text)
if user_input == 'ls':
print('Valid commands:')
for key in commands:
padded_key = key.ljust(14, '.')
print(f' {padded_key}{commands[key]}')
elif user_input not in commands.keys():
print('Invalid command. ', end='')
else:
return user_input
def angle_entry(shoulder, elbow):
''' Direct angle entry
Inputs:
shoulder, elbow: joint objects for interacting directly with joints
Returns: None
'''
commands = {'s': 'Shoulder', 'e': 'Elbow', 'x': 'Exit'}
print(
'\n*****ANGLE MODE*****\n'
'Enter angle, or "j" to select joint.\n'
' Shoulder joint range: ±90° \n Elbow joint range: ±158°\n'
'Enter "j" to select joint. Enter "x" to exit at any time.\n'
)
angles = [None, None]
while True:
joint = get_menu_input(commands, 'Select joint ("ls" for options): ')
if joint == 'x':
print('Exiting to menu.\n')
return
else:
print(f'{commands[joint]} joint selected.')
while True:
user_input = input('Enter angle: ')
if user_input == 'x':
print('Exiting to menu.\n')
return
elif user_input == 'j':
break
try:
if joint == 's':
angles[0] = float(user_input)
elif joint == 'e':
angles[1] = float(user_input)
shoulder.set_angle(angles[0])
elbow.set_angle(angles[1])
except (ValueError, TypeError):
print('Invalid input (Double-check valid angles).')
def coordinate_entry(shoulder, elbow, arm_lengths):
''' Direct coordinate entry
Inputs:
kit (ServoKit object): arm servo ServoKit
a (float): upper arm length (inches)
b (float): lower arm length (inches)
Returns: None
'''
def get_coord():
''' Generates coordinate pair based on user input.
Inputs: none
Returns: either list containing user-inputted coordinates,
in format (x, y), or None
'''
while True:
user_input = input('Enter coordinates: ')
if user_input == 'x':
return None, None
else:
try:
user_input = user_input.split(',')
user_input = [float(i) for i in user_input]
return user_input
except ValueError:
print('Invalid input.')
print(
'\n*****COORDINATE MODE***** \nInput coordinates in format "x, y".'
'\n x range: 0 ≤ x ≤ 10.5\n y range: ±10.5'
'\nEnter "x" to exit at any time.\n'
)
angles0 = [0, 0]
while True:
x, y = get_coord()
if x is None:
print('Exiting to menu.\n')
return
# Attempt first solution, then attempt second if first is invalid
try:
angles = arm.find_angles(x, y, arm_lengths)
if (abs(angles[0][0]-angles0[0])
> abs(angles[1][0]-angles0[0])):
# Order IK solutions to prefer less shoulder movement
angles[0], angles[1] = angles[1], angles[0]
shoulder.set_angle(angles[0][0])
elbow.set_angle(angles[0][1])
angles0 = angles[0]
except ValueError:
try:
shoulder.set_angle(angles[1][0])
elbow.set_angle(angles[1][1])
angles0 = angles[1]
except:
print('Input error. Ensure coordinate is reachable.')
def target_finding(camera, warp_matrix, target_color, maps):
''' Finds target disk
Inputs:
camera: PiCamera camera object
warp_matrix (numpy array): transformation matrix defining image warp
target_color (tup): 2-element tuple housing numpy arrays. First element
houses minimum HSV values of target, second houses maximum values.
Returns: None
'''
print(
'\n*****TARGET-FINDING MODE*****\n'
'Place target disk within camera view, then press "enter" '
'to get coordinates. Type "x" to exit at any time.'
)
while True:
is_ready = input(': ')
if not is_ready:
x, y = imaging.find_target(
camera, warp_matrix, target_color, maps
)
print(f'Target coordinates: ({x}, {y})')
else:
return
def auto_operation(camera, warp_matrix, target_color, arm_lengths,
shoulder, elbow, maps):
''' Autonomous operation for SCARA arm. On loop, gets an image, detects if
there is a target present, and moves the arm to its location.
Inputs:
camera: PiCamera camera object.
warp_matrix (numpy array): transformation matrix defining image warp
target_color (tup): 2-element tuple housing numpy arrays. First element
houses minimum HSV values of target, second houses maximum values.
a, b (floats): upper and lower arm lengths (in inches)
kit: ServoKit object housing arm servos
maps (tuple of arrays): fisheye correction arrays for remapping
Returns: None
'''
print(
'\n*****AUTONOMOUS OPERATION*****\n'
'Place target disk within camera view.\n'
'Press ctrl-C to exit at any time.'
)
x = 0
y = 0
x0 = 0
y0 = 0
angles0 = [0, 0]
while True:
try:
sleep(0.2)
#input('Press enter when ready.')
try:
x, y = imaging.find_target(
camera, warp_matrix, target_color, maps
)
except ZeroDivisionError:
pass
if x is None:
pass
elif abs(x-x0) >= 0.1 and abs(y-y0) >= 0.1:
x0 = x
y0 = y
print(f'Target coordinates: ({x}, {y})')
try:
angles = arm.find_angles(x, y, arm_lengths)
if (abs(angles[0][0]-angles0[0])
> abs(angles[1][0]-angles0[0])):
# Order IK solutions to prefer less shoulder movement
angles[0], angles[1] = angles[1], angles[0]
shoulder.set_angle(angles[0][0])
elbow.set_angle(angles[0][1])
angles0 = angles[0]
except ValueError:
try:
shoulder.set_angle(angles[1][0])
elbow.set_angle(angles[1][1])
angles0 = angles[1]
except:
print('Error. Ensure coordinate is reachable.')
except KeyboardInterrupt:
shoulder.set_angle(None)
elbow.set_angle(None)
return
def get_color():
''' Get user-input RGBW color tetrad, and return as tuple
Inputs:
None
Returns:
color (tup): tuple housing color data in form (R, G, B, W)
'''
while True:
try:
color = input('Enter color, in format (r, g, b, w): ')
color = color.strip('()')
color = color.split(',')
color = [int(i) for i in color]
color = tuple(color)
return color
except ValueError:
print('Invalid input')
print('Loading configuration........',end='')
sys.stdout.flush()
config = read_config('scara_config.txt')
warp_matrix = config['Image warp matrix']
# HSV bounds on target color to detect (lower bound, upper bound).
target_color = (np.array([1, 125, 50]), np.array([14, 255, 200]))
# Arm lengths in inches (shoulder, elbow)
arm_lengths = (6.0, 4.5)
print('done.')
print('Initializing........',end='')
sys.stdout.flush()
i2c, shoulder, elbow = arm.arm_init(config)
# Initialize ringlight and turn all LEDs on white
pixels = neopixel.NeoPixel(board.D18, 12, bpp=4)
rl_color = (255, 255, 255, 255)
pixels.fill(rl_color)
camera, maps = imaging.camera_init()
print('done.')
commands = {'main': {
'arm': 'Arm control','auto': 'Autonomous mode',
'cam': 'Camera control', 'light': 'Ringlight control',
'x': 'Exit'
},
'arm': {'ang': 'Angle entry', 'cal': 'Arm calibration',
'coord': 'Coordinate entry', 'k': 'Kill motors',
'x': 'Exit'
},
'auto': None,
'cam': {'cal': 'Camera calibration', 'img': 'Capture image',
'targ': 'Locate target', 'x': 'Exit'
},
'light': {'on': 'Turn on', 'off': 'Turn off',
'col': 'Change color', 'x': 'Exit'
}
}
print('\n*****WELCOME TO SCARA BOT 1.0*****\n')
print('***MAIN MENU***')
while True:
mode = get_menu_input(commands['main'],
'What would you like to do? ("ls" for valid commands): '
)
if mode == 'arm':
while True:
print('\n***MANUAL ARM CONTROL***')
mode = get_menu_input(commands['arm'],
'What would you like to do? ("ls" for valid commands): '
)
if mode == 'ang':
angle_entry(shoulder, elbow)
elif mode == 'cal':
config = arm.servo_calibration(shoulder, elbow, config)
elif mode == 'coord':
coordinate_entry(shoulder, elbow, arm_lengths)
elif mode == 'k':
print('Killing motor power........',end='')
shoulder.set_angle(None)
elbow.set_angle(None)
print('done.\n')
elif mode == 'x':
print('Returning to main menu.\n\n***MAIN MENU***')
break
elif mode == 'auto':
auto_operation(
camera, warp_matrix, target_color,
arm_lengths, shoulder, elbow, maps
)
print('Autonomous operation ended. Returning to main menu.'
'\n\n***MAIN MENU***'
)
elif mode == 'cam':
while True:
print('\n***CAMERA CONTROL***')
mode = get_menu_input(commands['cam'],
'What would you like to do? ("ls" for valid commands): '
)
if mode == 'cal':
warp_matrix = imaging.calibrate_camera(
camera, target_color, maps, shoulder, elbow
)
elif mode == 'img':
print('Image Capture Mode')
imaging.image_capture(camera)
elif mode == 'targ':
target_finding(
camera, warp_matrix, target_color, maps
)
elif mode == 'x':
print('Returning to main menu.\n\n***MAIN MENU***')
break
elif mode == 'light':
while True:
print('\n***RINGLIGHT CONTROL***')
mode = get_menu_input(commands['light'],
'What would you like to do? ("ls" for valid commands): '
)
if mode == 'on':
pixels.fill(rl_color)
elif mode == 'off':
pixels.fill((0, 0, 0, 0))
elif mode == 'col':
rl_color = get_color()
pixels.fill(rl_color)
elif mode == 'x':
print('Returning to main menu.\n\n***MAIN MENU***')
break
elif mode == 'x':
print('Shutting down........',end='')
arm.arm_deinit(i2c, shoulder, elbow)
pixels.deinit()
camera.close()
print('Goodbye.\n')
sys.exit()
else:
print('Invalid command. ', end='')
|
[
"sys.path.append",
"scara_arm.arm_deinit",
"scara_imaging.camera_init",
"scara_imaging.find_target",
"scara_arm.servo_calibration",
"time.sleep",
"scara_arm.arm_init",
"sys.stdout.flush",
"numpy.array",
"scara_imaging.calibrate_camera",
"scara_imaging.image_capture",
"neopixel.NeoPixel",
"numpy.mat",
"sys.exit",
"scara_arm.find_angles"
] |
[((56, 74), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (72, 74), False, 'import sys\n'), ((76, 138), 'sys.path.append', 'sys.path.append', (['"""/home/pi/.local/lib/python3.7/site-packages"""'], {}), "('/home/pi/.local/lib/python3.7/site-packages')\n", (91, 138), False, 'import sys\n'), ((9704, 9722), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9720, 9722), False, 'import sys\n'), ((10069, 10087), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10085, 10087), False, 'import sys\n'), ((10112, 10132), 'scara_arm.arm_init', 'arm.arm_init', (['config'], {}), '(config)\n', (10124, 10132), True, 'import scara_arm as arm\n'), ((10194, 10233), 'neopixel.NeoPixel', 'neopixel.NeoPixel', (['board.D18', '(12)'], {'bpp': '(4)'}), '(board.D18, 12, bpp=4)\n', (10211, 10233), False, 'import neopixel\n'), ((10306, 10327), 'scara_imaging.camera_init', 'imaging.camera_init', ([], {}), '()\n', (10325, 10327), True, 'import scara_imaging as imaging\n'), ((9893, 9915), 'numpy.array', 'np.array', (['[1, 125, 50]'], {}), '([1, 125, 50])\n', (9901, 9915), True, 'import numpy as np\n'), ((9917, 9941), 'numpy.array', 'np.array', (['[14, 255, 200]'], {}), '([14, 255, 200])\n', (9925, 9941), True, 'import numpy as np\n'), ((1126, 1145), 'numpy.mat', 'np.mat', (['warp_matrix'], {}), '(warp_matrix)\n', (1132, 1145), True, 'import numpy as np\n'), ((5082, 5116), 'scara_arm.find_angles', 'arm.find_angles', (['x', 'y', 'arm_lengths'], {}), '(x, y, arm_lengths)\n', (5097, 5116), True, 'import scara_arm as arm\n'), ((6445, 6505), 'scara_imaging.find_target', 'imaging.find_target', (['camera', 'warp_matrix', 'target_color', 'maps'], {}), '(camera, warp_matrix, target_color, maps)\n', (6464, 6505), True, 'import scara_imaging as imaging\n'), ((7682, 7692), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (7687, 7692), False, 'from time import sleep\n'), ((7782, 7842), 'scara_imaging.find_target', 'imaging.find_target', (['camera', 'warp_matrix', 'target_color', 'maps'], {}), '(camera, warp_matrix, target_color, maps)\n', (7801, 7842), True, 'import scara_imaging as imaging\n'), ((11693, 11739), 'scara_arm.servo_calibration', 'arm.servo_calibration', (['shoulder', 'elbow', 'config'], {}), '(shoulder, elbow, config)\n', (11714, 11739), True, 'import scara_arm as arm\n'), ((8212, 8246), 'scara_arm.find_angles', 'arm.find_angles', (['x', 'y', 'arm_lengths'], {}), '(x, y, arm_lengths)\n', (8227, 8246), True, 'import scara_arm as arm\n'), ((12752, 12821), 'scara_imaging.calibrate_camera', 'imaging.calibrate_camera', (['camera', 'target_color', 'maps', 'shoulder', 'elbow'], {}), '(camera, target_color, maps, shoulder, elbow)\n', (12776, 12821), True, 'import scara_imaging as imaging\n'), ((13967, 14003), 'scara_arm.arm_deinit', 'arm.arm_deinit', (['i2c', 'shoulder', 'elbow'], {}), '(i2c, shoulder, elbow)\n', (13981, 14003), True, 'import scara_arm as arm\n'), ((14091, 14101), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14099, 14101), False, 'import sys\n'), ((12961, 12990), 'scara_imaging.image_capture', 'imaging.image_capture', (['camera'], {}), '(camera)\n', (12982, 12990), True, 'import scara_imaging as imaging\n')]
|
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import time
import os
import argparse
from torch.autograd import Function
import ssenet_resnet
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torchvision
from PIL import Image
import SimpleITK as sitk
import xlwt
import math
import shutil
from ActivationMap import GradCam
def transform_image(img_path,mask_array):
image = sitk.ReadImage(img_path)
img = sitk.GetArrayFromImage(image)[0, :, :]
img[np.where(mask_array == 0)] = -1024
img_max = np.max(img)
img_min = np.min(img)
img_range = img_max - img_min
img = (img.astype('float32') - img_min) / img_range
PIL_image = Image.fromarray(img)
out = torchvision.transforms.Resize((224, 224))(PIL_image)
out_tensor = torchvision.transforms.ToTensor()(out)
return out_tensor
def show_cam_on_image(img, mask_pos, file_name = 'CAM.jpg'):
pos_heatmap = cv2.applyColorMap(np.uint8(255 * mask_pos), cv2.COLORMAP_JET)
pos_heatmap = np.float32(pos_heatmap) / 255
img = img.cpu().numpy()
img = np.transpose(img, (1, 2, 0))
pos_cam = pos_heatmap + np.float32(img)
pos_cam = pos_cam / np.max(pos_cam)
squeeze_img = np.squeeze(img)
plt.figure()
plt.subplot(121)
plt.imshow(squeeze_img, cmap='gray')
plt.subplot(122)
pos_cam = cv2.cvtColor(pos_cam, cv2.COLOR_BGR2RGB)
plt.imshow(pos_cam)
plt.savefig(file_name)
plt.close('all')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="PyTorch implementation of SENet")
parser.add_argument('--data-dir', type=str, default="/ImageNet")
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--num-class', type=int, default=2)
parser.add_argument('--num-epochs', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--num-workers', type=int, default=0)
parser.add_argument('--gpus', type=str, default=1)
parser.add_argument('--print-freq', type=int, default=10)
parser.add_argument('--save-epoch-freq', type=int, default=1)
parser.add_argument('--save-path', type=str, default="output2")
parser.add_argument('--resume', type=str, default="./checkpoint/model.pth.tar", help="For training from one checkpoint")
parser.add_argument('--start-epoch', type=int, default=0, help="Corresponding to the epoch of resume ")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
# use gpu or not
use_gpu = torch.cuda.is_available()
print("use_gpu:{}".format(use_gpu))
# get model
model = getattr(ssenet_resnet, "SSe_resnet_18")(num_classes=2)
print(model)
if args.resume:
if os.path.isfile(args.resume):
print(("=> loading checkpoint '{}'".format(args.resume)))
checkpoint = torch.load(args.resume)
base_dict = {k.replace('module.',''): v for k, v in list(checkpoint.state_dict().items())}
model.load_state_dict(base_dict)
else:
print(("=> no checkpoint found at '{}'".format(args.resume)))
if use_gpu:
model = model.cuda()
torch.backends.cudnn.enabled = False
#model = torch.nn.DataParallel(model, device_ids=[int(i) for i in args.gpus.strip().split(',')])
grad_cam = GradCam(model=model, feature_module=model.layer4, \
target_layer_names=["1"], use_cuda=True)
parent_path = os.path.abspath(os.path.join(os.getcwd(), "../.."))
dataset_folder = os.path.join(parent_path,'Dataset')
result_folder = os.path.join(parent_path,'Result')
img_save_root_path = os.path.join(result_folder,'ActivationMap')
target_folder = os.path.join(result_folder,'SelectedSlices')
thresh_area = 800
thresh_heat =0.88
workbook = xlwt.Workbook(encoding='utf-8')
worksheet = workbook.add_sheet('sheet1')
for idx,patient_folder in enumerate(os.listdir(dataset_folder)):
patient_path = os.path.join(dataset_folder, patient_folder)
img_save_patient_path = os.path.join(img_save_root_path, patient_folder)
patient_voxel_volume = [0,0,0,0,0]
patient_cam_area = [0,0,0,0,0]
prob_list = []
dcm_fnames = []
for file in os.listdir(patient_path):
if file.endswith('dcm'):
#read dcm slice and corresponding lung mask
dcm_fnames.append(file)
img_path = os.path.join(patient_path, file)
print('Processing ', img_path)
mask_path = os.path.join(patient_path,'lobes_' + file.replace('dcm','nii.gz'))
mask_image = sitk.ReadImage(mask_path)
mask_array = sitk.GetArrayFromImage(mask_image)[0, :, :]
out_tensor = transform_image(img_path,mask_array)
#activation map inference
image_variable = Variable(torch.unsqueeze(out_tensor, dim=0).float(), requires_grad=False)
mask_pos, class_output, prob_output = grad_cam(image_variable, 1)
if math.isnan(prob_output[0][1]):
prob_list.append(0)
else:
prob_list.append(prob_output[0][1])
#Save Activation Map result
save_img_path = os.path.join(img_save_patient_path,file[:-4] + '_' + str(class_output) + '.jpg')
if not os.path.exists(img_save_patient_path):
os.makedirs(img_save_patient_path)
print('saving ', save_img_path)
show_cam_on_image(out_tensor, mask_pos, save_img_path)
#Calculate serverity evaluation result
total_area = [0,0,0,0,0]
grad_mask_area = [0,0,0,0,0]
mask_lobe = mask_array
mask_lobe = cv2.resize(mask_lobe,(224,224))
grad_mask_pos = np.zeros(mask_pos.shape[:2], np.uint8)
grad_mask_pos[np.where(mask_pos>thresh_heat)]=1
mask_1 = np.zeros(mask_lobe.shape[:2], np.uint8)
mask_1[np.where(mask_lobe == 1)]=1
cam_mask_1 = mask_1*grad_mask_pos
cam_area_1 = len(cam_mask_1.nonzero()[0])
total_area[0] = len(mask_1.nonzero()[0])
grad_mask_area[0] = cam_area_1
mask_2 = np.zeros(mask_lobe.shape[:2], np.uint8)
mask_2[np.where(mask_lobe == 2)]=1
cam_mask_2 = mask_2*grad_mask_pos
cam_area_2 = len(cam_mask_2.nonzero()[0])
total_area[1] = len(mask_2.nonzero()[0])
grad_mask_area[1] = cam_area_2
mask_3 = np.zeros(mask_lobe.shape[:2], np.uint8)
mask_3[np.where(mask_lobe == 3)]=1
cam_mask_3 = mask_3*grad_mask_pos
cam_area_3 = len(cam_mask_3.nonzero()[0])
total_area[2] = len(mask_3.nonzero()[0])
grad_mask_area[2] = cam_area_3
mask_4 = np.zeros(mask_lobe.shape[:2], np.uint8)
mask_4[np.where(mask_lobe == 4)]=1
cam_mask_4 = mask_4*grad_mask_pos
cam_area_4 = len(cam_mask_4.nonzero()[0])
total_area[3] = len(mask_4.nonzero()[0])
grad_mask_area[3] = cam_area_4
mask_5 = np.zeros(mask_lobe.shape[:2], np.uint8)
mask_5[np.where(mask_lobe == 5)]=1
cam_mask_5 = mask_5*grad_mask_pos
cam_area_5 = len(cam_mask_5.nonzero()[0])
total_area[4] = len(mask_5.nonzero()[0])
grad_mask_area[4] = cam_area_5
if class_output == 1: #Accumulate interest area for positive slices
for part in range(5):
if total_area[part] > thresh_area:
patient_cam_area[part] = patient_cam_area[part] + grad_mask_area[part]
patient_voxel_volume[part] = patient_voxel_volume[part] + total_area[part]
#Select layers with TOP 10 probability
prob_array = np.array(prob_list)
top_10_index = prob_array.argsort()[-10:][::-1]
print(np.array(dcm_fnames)[top_10_index])
target_patient_path = os.path.join(target_folder,patient_folder)
if not os.path.exists(target_patient_path):
os.mkdir(target_patient_path)
for index in top_10_index:
if prob_array[index] >= 0.995:
dcm_file = np.array(dcm_fnames)[index]
shutil.copyfile(os.path.join(patient_path,dcm_file),os.path.join(target_patient_path,dcm_file)) #Copy TOP 10 layers to target directory
#Calculate and save Serverity evaluation result
ratio = np.array(patient_cam_area)/np.array(patient_voxel_volume)
for index,part_ratio in enumerate(ratio):
if math.isnan(part_ratio):
ratio[index] = 0
worksheet.write(idx, 0, patient_folder)
worksheet.write(idx, 2, ratio[0])
worksheet.write(idx, 5, ratio[1])
worksheet.write(idx, 8, ratio[2])
worksheet.write(idx, 11, ratio[3])
worksheet.write(idx, 14, ratio[4])
worksheet.write(idx, 17, np.sum(np.array(patient_cam_area)) / np.sum(np.array(patient_voxel_volume)))
workbook.save(os.path.join(result_folder,'Serverity.xls')) #Save serverity assesment result
|
[
"os.mkdir",
"xlwt.Workbook",
"argparse.ArgumentParser",
"ActivationMap.GradCam",
"matplotlib.pyplot.figure",
"os.path.isfile",
"os.path.join",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"SimpleITK.ReadImage",
"matplotlib.pyplot.close",
"numpy.transpose",
"torch.load",
"SimpleITK.GetArrayFromImage",
"os.path.exists",
"numpy.max",
"cv2.resize",
"math.isnan",
"numpy.uint8",
"numpy.min",
"torch.cuda.is_available",
"numpy.squeeze",
"torch.unsqueeze",
"os.listdir",
"torchvision.transforms.Resize",
"matplotlib.pyplot.subplot",
"os.makedirs",
"os.getcwd",
"numpy.float32",
"numpy.zeros",
"numpy.where",
"numpy.array",
"PIL.Image.fromarray",
"matplotlib.pyplot.savefig",
"torchvision.transforms.ToTensor"
] |
[((536, 560), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['img_path'], {}), '(img_path)\n', (550, 560), True, 'import SimpleITK as sitk\n'), ((667, 678), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (673, 678), True, 'import numpy as np\n'), ((693, 704), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (699, 704), True, 'import numpy as np\n'), ((811, 831), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (826, 831), False, 'from PIL import Image\n'), ((1202, 1230), 'numpy.transpose', 'np.transpose', (['img', '(1, 2, 0)'], {}), '(img, (1, 2, 0))\n', (1214, 1230), True, 'import numpy as np\n'), ((1334, 1349), 'numpy.squeeze', 'np.squeeze', (['img'], {}), '(img)\n', (1344, 1349), True, 'import numpy as np\n'), ((1355, 1367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1365, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1388), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1383, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1393, 1429), 'matplotlib.pyplot.imshow', 'plt.imshow', (['squeeze_img'], {'cmap': '"""gray"""'}), "(squeeze_img, cmap='gray')\n", (1403, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1434, 1450), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1445, 1450), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1505), 'cv2.cvtColor', 'cv2.cvtColor', (['pos_cam', 'cv2.COLOR_BGR2RGB'], {}), '(pos_cam, cv2.COLOR_BGR2RGB)\n', (1477, 1505), False, 'import cv2\n'), ((1510, 1529), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pos_cam'], {}), '(pos_cam)\n', (1520, 1529), True, 'import matplotlib.pyplot as plt\n'), ((1534, 1556), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (1545, 1556), True, 'import matplotlib.pyplot as plt\n'), ((1561, 1577), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1570, 1577), True, 'import matplotlib.pyplot as plt\n'), ((1620, 1690), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch implementation of SENet"""'}), "(description='PyTorch implementation of SENet')\n", (1643, 1690), False, 'import argparse\n'), ((2661, 2686), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2684, 2686), False, 'import torch\n'), ((3459, 3553), 'ActivationMap.GradCam', 'GradCam', ([], {'model': 'model', 'feature_module': 'model.layer4', 'target_layer_names': "['1']", 'use_cuda': '(True)'}), "(model=model, feature_module=model.layer4, target_layer_names=['1'],\n use_cuda=True)\n", (3466, 3553), False, 'from ActivationMap import GradCam\n'), ((3660, 3696), 'os.path.join', 'os.path.join', (['parent_path', '"""Dataset"""'], {}), "(parent_path, 'Dataset')\n", (3672, 3696), False, 'import os\n'), ((3716, 3751), 'os.path.join', 'os.path.join', (['parent_path', '"""Result"""'], {}), "(parent_path, 'Result')\n", (3728, 3751), False, 'import os\n'), ((3776, 3820), 'os.path.join', 'os.path.join', (['result_folder', '"""ActivationMap"""'], {}), "(result_folder, 'ActivationMap')\n", (3788, 3820), False, 'import os\n'), ((3840, 3885), 'os.path.join', 'os.path.join', (['result_folder', '"""SelectedSlices"""'], {}), "(result_folder, 'SelectedSlices')\n", (3852, 3885), False, 'import os\n'), ((3950, 3981), 'xlwt.Workbook', 'xlwt.Workbook', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (3963, 3981), False, 'import xlwt\n'), ((571, 600), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image'], {}), '(image)\n', (593, 600), True, 'import SimpleITK as sitk\n'), ((618, 643), 'numpy.where', 'np.where', (['(mask_array == 0)'], {}), '(mask_array == 0)\n', (626, 643), True, 'import numpy as np\n'), ((842, 883), 'torchvision.transforms.Resize', 'torchvision.transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (871, 883), False, 'import torchvision\n'), ((912, 945), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (943, 945), False, 'import torchvision\n'), ((1071, 1095), 'numpy.uint8', 'np.uint8', (['(255 * mask_pos)'], {}), '(255 * mask_pos)\n', (1079, 1095), True, 'import numpy as np\n'), ((1133, 1156), 'numpy.float32', 'np.float32', (['pos_heatmap'], {}), '(pos_heatmap)\n', (1143, 1156), True, 'import numpy as np\n'), ((1259, 1274), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (1269, 1274), True, 'import numpy as np\n'), ((1299, 1314), 'numpy.max', 'np.max', (['pos_cam'], {}), '(pos_cam)\n', (1305, 1314), True, 'import numpy as np\n'), ((2859, 2886), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (2873, 2886), False, 'import os\n'), ((4067, 4093), 'os.listdir', 'os.listdir', (['dataset_folder'], {}), '(dataset_folder)\n', (4077, 4093), False, 'import os\n'), ((4119, 4163), 'os.path.join', 'os.path.join', (['dataset_folder', 'patient_folder'], {}), '(dataset_folder, patient_folder)\n', (4131, 4163), False, 'import os\n'), ((4196, 4244), 'os.path.join', 'os.path.join', (['img_save_root_path', 'patient_folder'], {}), '(img_save_root_path, patient_folder)\n', (4208, 4244), False, 'import os\n'), ((4394, 4418), 'os.listdir', 'os.listdir', (['patient_path'], {}), '(patient_path)\n', (4404, 4418), False, 'import os\n'), ((8238, 8257), 'numpy.array', 'np.array', (['prob_list'], {}), '(prob_list)\n', (8246, 8257), True, 'import numpy as np\n'), ((8395, 8438), 'os.path.join', 'os.path.join', (['target_folder', 'patient_folder'], {}), '(target_folder, patient_folder)\n', (8407, 8438), False, 'import os\n'), ((9472, 9516), 'os.path.join', 'os.path.join', (['result_folder', '"""Serverity.xls"""'], {}), "(result_folder, 'Serverity.xls')\n", (9484, 9516), False, 'import os\n'), ((2983, 3006), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (2993, 3006), False, 'import torch\n'), ((3616, 3627), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3625, 3627), False, 'import os\n'), ((8453, 8488), 'os.path.exists', 'os.path.exists', (['target_patient_path'], {}), '(target_patient_path)\n', (8467, 8488), False, 'import os\n'), ((8502, 8531), 'os.mkdir', 'os.mkdir', (['target_patient_path'], {}), '(target_patient_path)\n', (8510, 8531), False, 'import os\n'), ((8898, 8924), 'numpy.array', 'np.array', (['patient_cam_area'], {}), '(patient_cam_area)\n', (8906, 8924), True, 'import numpy as np\n'), ((8925, 8955), 'numpy.array', 'np.array', (['patient_voxel_volume'], {}), '(patient_voxel_volume)\n', (8933, 8955), True, 'import numpy as np\n'), ((9021, 9043), 'math.isnan', 'math.isnan', (['part_ratio'], {}), '(part_ratio)\n', (9031, 9043), False, 'import math\n'), ((4585, 4617), 'os.path.join', 'os.path.join', (['patient_path', 'file'], {}), '(patient_path, file)\n', (4597, 4617), False, 'import os\n'), ((4789, 4814), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['mask_path'], {}), '(mask_path)\n', (4803, 4814), True, 'import SimpleITK as sitk\n'), ((5206, 5235), 'math.isnan', 'math.isnan', (['prob_output[0][1]'], {}), '(prob_output[0][1])\n', (5216, 5235), False, 'import math\n'), ((5958, 5991), 'cv2.resize', 'cv2.resize', (['mask_lobe', '(224, 224)'], {}), '(mask_lobe, (224, 224))\n', (5968, 5991), False, 'import cv2\n'), ((6023, 6061), 'numpy.zeros', 'np.zeros', (['mask_pos.shape[:2]', 'np.uint8'], {}), '(mask_pos.shape[:2], np.uint8)\n', (6031, 6061), True, 'import numpy as np\n'), ((6152, 6191), 'numpy.zeros', 'np.zeros', (['mask_lobe.shape[:2]', 'np.uint8'], {}), '(mask_lobe.shape[:2], np.uint8)\n', (6160, 6191), True, 'import numpy as np\n'), ((6481, 6520), 'numpy.zeros', 'np.zeros', (['mask_lobe.shape[:2]', 'np.uint8'], {}), '(mask_lobe.shape[:2], np.uint8)\n', (6489, 6520), True, 'import numpy as np\n'), ((6810, 6849), 'numpy.zeros', 'np.zeros', (['mask_lobe.shape[:2]', 'np.uint8'], {}), '(mask_lobe.shape[:2], np.uint8)\n', (6818, 6849), True, 'import numpy as np\n'), ((7139, 7178), 'numpy.zeros', 'np.zeros', (['mask_lobe.shape[:2]', 'np.uint8'], {}), '(mask_lobe.shape[:2], np.uint8)\n', (7147, 7178), True, 'import numpy as np\n'), ((7468, 7507), 'numpy.zeros', 'np.zeros', (['mask_lobe.shape[:2]', 'np.uint8'], {}), '(mask_lobe.shape[:2], np.uint8)\n', (7476, 7507), True, 'import numpy as np\n'), ((8329, 8349), 'numpy.array', 'np.array', (['dcm_fnames'], {}), '(dcm_fnames)\n', (8337, 8349), True, 'import numpy as np\n'), ((4844, 4878), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['mask_image'], {}), '(mask_image)\n', (4866, 4878), True, 'import SimpleITK as sitk\n'), ((5536, 5573), 'os.path.exists', 'os.path.exists', (['img_save_patient_path'], {}), '(img_save_patient_path)\n', (5550, 5573), False, 'import os\n'), ((5595, 5629), 'os.makedirs', 'os.makedirs', (['img_save_patient_path'], {}), '(img_save_patient_path)\n', (5606, 5629), False, 'import os\n'), ((6092, 6124), 'numpy.where', 'np.where', (['(mask_pos > thresh_heat)'], {}), '(mask_pos > thresh_heat)\n', (6100, 6124), True, 'import numpy as np\n'), ((6215, 6239), 'numpy.where', 'np.where', (['(mask_lobe == 1)'], {}), '(mask_lobe == 1)\n', (6223, 6239), True, 'import numpy as np\n'), ((6544, 6568), 'numpy.where', 'np.where', (['(mask_lobe == 2)'], {}), '(mask_lobe == 2)\n', (6552, 6568), True, 'import numpy as np\n'), ((6873, 6897), 'numpy.where', 'np.where', (['(mask_lobe == 3)'], {}), '(mask_lobe == 3)\n', (6881, 6897), True, 'import numpy as np\n'), ((7202, 7226), 'numpy.where', 'np.where', (['(mask_lobe == 4)'], {}), '(mask_lobe == 4)\n', (7210, 7226), True, 'import numpy as np\n'), ((7531, 7555), 'numpy.where', 'np.where', (['(mask_lobe == 5)'], {}), '(mask_lobe == 5)\n', (7539, 7555), True, 'import numpy as np\n'), ((8637, 8657), 'numpy.array', 'np.array', (['dcm_fnames'], {}), '(dcm_fnames)\n', (8645, 8657), True, 'import numpy as np\n'), ((8697, 8733), 'os.path.join', 'os.path.join', (['patient_path', 'dcm_file'], {}), '(patient_path, dcm_file)\n', (8709, 8733), False, 'import os\n'), ((8733, 8776), 'os.path.join', 'os.path.join', (['target_patient_path', 'dcm_file'], {}), '(target_patient_path, dcm_file)\n', (8745, 8776), False, 'import os\n'), ((9379, 9405), 'numpy.array', 'np.array', (['patient_cam_area'], {}), '(patient_cam_area)\n', (9387, 9405), True, 'import numpy as np\n'), ((9416, 9446), 'numpy.array', 'np.array', (['patient_voxel_volume'], {}), '(patient_voxel_volume)\n', (9424, 9446), True, 'import numpy as np\n'), ((5040, 5074), 'torch.unsqueeze', 'torch.unsqueeze', (['out_tensor'], {'dim': '(0)'}), '(out_tensor, dim=0)\n', (5055, 5074), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import numpy as np
# <codecell>
def SetInitialConditions(ICs, ICset = 'Howell', ICtestcase = 0, numPoints = 2000):
# user inputs
#ICset = 'Howell' # 'Sharp' 'Howell' 'Barbee'
#ICtestcase = 0
#numPoints = 2000
# assign simulation variables using specified elements from dictionary of IC's
mu = ICs[ICset]['mu'][ICtestcase]
timespan = np.linspace(0, ICs[ICset]['T'][ICtestcase], numPoints)
initialstate1 = [ICs[ICset]['X'][ICtestcase], 0, ICs[ICset]['Z'][ICtestcase],
0, ICs[ICset]['Ydot'][ICtestcase], 0]
return mu, timespan, initialstate1
# <codecell>
def InputDataDictionary():
# create a dictionary for the initial conditions
ICs = dict()
# From Sharp, A Collection of Restricted Three-Body Test Problems
# For problems 1 to 15, mu = 0.012277471 and for problems 16 to 20, mu = 0.000953875
ICs['Sharp'] = {'mu': np.ones(20),
'X': np.zeros(20),
'Z': np.zeros(20),
'Ydot': np.zeros(20),
'T': np.zeros(20)}
Sharp_X_Z_Ydot_T = np.matrix([[0.994000E+00, 0.0, -0.21138987966945026683E+01, 0.54367954392601899690E+01],
[0.994000E+00, 0.0, -0.20317326295573368357E+01, 0.11124340337266085135E+02],
[0.994000E+00, 0.0, -0.20015851063790825224E+01, 0.17065216560157962559E+02],
[0.997000E+00, 0.0, -0.16251217072210773125E+01, 0.22929723423442969481E+02],
[0.879962E+00, 0.0, -0.66647197988564140807E+00, 0.63006757422352314657E+01],
[0.879962E+00, 0.0, -0.43965281709207999128E+00, 0.12729711861022426544E+02],
[0.879962E+00, 0.0, -0.38089067106386964470E+00, 0.19138746281183026809E+02],
[0.997000E+00, 0.0, -0.18445010489730401177E+01, 0.12353901248612092736E+02],
[0.100000E+01, 0.0, -0.16018768253456252603E+01, 0.12294387796695023304E+02],
[0.100300E+01, 0.0, -0.14465123738451062297E+01, 0.12267904265603897140E+02],
[0.120000E+01, 0.0, -0.71407169828407848921E+00, 0.18337451820715063383E+02],
[0.120000E+01, 0.0, -0.67985320356540547720E+00, 0.30753758552146029263E+02],
[0.120000E+01, 0.0, -0.67153130632829144331E+00, 0.43214375227857454128E+02],
[0.120000E+01, 0.0, -0.66998291305226832207E+00, 0.55672334134347612727E+02],
[0.120000E+01, 0.0, -0.66975741517271092087E+00, 0.68127906604713772763E+02],
[-0.102745E+01, 0.0, 0.40334488290490413053E-01, 0.18371316400018903965E+03],
[-0.976680E+00, 0.0, -0.61191623926410837000E-01, 0.17733241131524483004E+03],
[-0.766650E+00, 0.0, -0.51230158665978820282E+00, 0.17660722897242937108E+03],
[-0.109137E+01, 0.0, 0.14301959822238380020E+00, 0.82949461922342093092E+02],
[-0.110137E+01, 0.0, 0.15354250908611454510E+00, 0.60952121909407746612E+02]])
ICs['Sharp']['mu'][0:15] *= 0.012277471
ICs['Sharp']['mu'][15:20] *= 0.000953875
ICs['Sharp']['X'] = np.array(Sharp_X_Z_Ydot_T[:,0])
ICs['Sharp']['Z'] = np.array(Sharp_X_Z_Ydot_T[:,1])
ICs['Sharp']['Ydot'] = np.array(Sharp_X_Z_Ydot_T[:,2])
ICs['Sharp']['T'] = np.array(Sharp_X_Z_Ydot_T[:,3])
# From Howell, Three-Dimensional, Periodic, 'Halo' Orbits
ICs['Howell'] = {'mu': [0.04, 0.04],
'X': [0.723268, 0.723268],
'Z': [0.040000, -0.040000],
'Ydot': [0.198019, 0.198019],
'T': [1.300177*2.0, 1.300177*2.0]}
# From Barbee, Notional Mission 4 (Earth-Moon)
ICs['Barbee'] = {'mu': [0.012277471],
'X': [0.862307159058101],
'Z': [0.0],
'Ydot': [-0.187079489569182],
'T': [2.79101343456226]} # 12.135 days
return ICs
# <codecell>
|
[
"numpy.matrix",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.linspace"
] |
[((439, 493), 'numpy.linspace', 'np.linspace', (['(0)', "ICs[ICset]['T'][ICtestcase]", 'numPoints'], {}), "(0, ICs[ICset]['T'][ICtestcase], numPoints)\n", (450, 493), True, 'import numpy as np\n'), ((1243, 2435), 'numpy.matrix', 'np.matrix', (['[[0.994, 0.0, -2.1138987966945026, 5.43679543926019], [0.994, 0.0, -\n 2.031732629557337, 11.124340337266085], [0.994, 0.0, -\n 2.0015851063790824, 17.065216560157964], [0.997, 0.0, -\n 1.6251217072210773, 22.92972342344297], [0.879962, 0.0, -\n 0.6664719798856414, 6.3006757422352315], [0.879962, 0.0, -\n 0.43965281709208, 12.729711861022427], [0.879962, 0.0, -\n 0.38089067106386965, 19.13874628118303], [0.997, 0.0, -1.84450104897304,\n 12.353901248612093], [1.0, 0.0, -1.6018768253456253, 12.294387796695023\n ], [1.003, 0.0, -1.4465123738451062, 12.267904265603898], [1.2, 0.0, -\n 0.7140716982840785, 18.337451820715064], [1.2, 0.0, -0.6798532035654055,\n 30.753758552146028], [1.2, 0.0, -0.6715313063282914, 43.21437522785745],\n [1.2, 0.0, -0.6699829130522683, 55.672334134347615], [1.2, 0.0, -\n 0.6697574151727109, 68.12790660471377], [-1.02745, 0.0, \n 0.04033448829049041, 183.71316400018904], [-0.97668, 0.0, -\n 0.06119162392641084, 177.33241131524483], [-0.76665, 0.0, -\n 0.5123015866597882, 176.60722897242937], [-1.09137, 0.0, \n 0.1430195982223838, 82.9494619223421], [-1.10137, 0.0, \n 0.15354250908611455, 60.952121909407744]]'], {}), '([[0.994, 0.0, -2.1138987966945026, 5.43679543926019], [0.994, 0.0,\n -2.031732629557337, 11.124340337266085], [0.994, 0.0, -\n 2.0015851063790824, 17.065216560157964], [0.997, 0.0, -\n 1.6251217072210773, 22.92972342344297], [0.879962, 0.0, -\n 0.6664719798856414, 6.3006757422352315], [0.879962, 0.0, -\n 0.43965281709208, 12.729711861022427], [0.879962, 0.0, -\n 0.38089067106386965, 19.13874628118303], [0.997, 0.0, -1.84450104897304,\n 12.353901248612093], [1.0, 0.0, -1.6018768253456253, 12.294387796695023\n ], [1.003, 0.0, -1.4465123738451062, 12.267904265603898], [1.2, 0.0, -\n 0.7140716982840785, 18.337451820715064], [1.2, 0.0, -0.6798532035654055,\n 30.753758552146028], [1.2, 0.0, -0.6715313063282914, 43.21437522785745],\n [1.2, 0.0, -0.6699829130522683, 55.672334134347615], [1.2, 0.0, -\n 0.6697574151727109, 68.12790660471377], [-1.02745, 0.0, \n 0.04033448829049041, 183.71316400018904], [-0.97668, 0.0, -\n 0.06119162392641084, 177.33241131524483], [-0.76665, 0.0, -\n 0.5123015866597882, 176.60722897242937], [-1.09137, 0.0, \n 0.1430195982223838, 82.9494619223421], [-1.10137, 0.0, \n 0.15354250908611455, 60.952121909407744]])\n', (1252, 2435), True, 'import numpy as np\n'), ((3580, 3612), 'numpy.array', 'np.array', (['Sharp_X_Z_Ydot_T[:, 0]'], {}), '(Sharp_X_Z_Ydot_T[:, 0])\n', (3588, 3612), True, 'import numpy as np\n'), ((3639, 3671), 'numpy.array', 'np.array', (['Sharp_X_Z_Ydot_T[:, 1]'], {}), '(Sharp_X_Z_Ydot_T[:, 1])\n', (3647, 3671), True, 'import numpy as np\n'), ((3698, 3730), 'numpy.array', 'np.array', (['Sharp_X_Z_Ydot_T[:, 2]'], {}), '(Sharp_X_Z_Ydot_T[:, 2])\n', (3706, 3730), True, 'import numpy as np\n'), ((3757, 3789), 'numpy.array', 'np.array', (['Sharp_X_Z_Ydot_T[:, 3]'], {}), '(Sharp_X_Z_Ydot_T[:, 3])\n', (3765, 3789), True, 'import numpy as np\n'), ((1042, 1053), 'numpy.ones', 'np.ones', (['(20)'], {}), '(20)\n', (1049, 1053), True, 'import numpy as np\n'), ((1081, 1093), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (1089, 1093), True, 'import numpy as np\n'), ((1121, 1133), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (1129, 1133), True, 'import numpy as np\n'), ((1163, 1175), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (1171, 1175), True, 'import numpy as np\n'), ((1205, 1217), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (1213, 1217), True, 'import numpy as np\n')]
|
"""
python test_PETA.py -g 0 -c 61 -b 256 -m GoogLeNetSPP -w ../models/
python test_PETA.py -g 0 -c 61 -b 256 -m GoogLeNet -w ../models/
python test_PETA.py -g 0 -c 61 -b 256 -m GoogLeNet -w ../models/imagenet_models/GoogLeNet_PETA/binary61_depth
python test_PETA.py -g 1 -c 68 -b 256 -m OEDCGoogLeNetSPP -w ../models/xxxxx.hdf5
python test_PETA.py -g 1 -c 14 -b 64 -m OEDCGoogLeNetSPP_lowerBody -w ../models/xxxxx.hdf5
"""
from network.GoogLenetSPP import GoogLeNetSPP
from network.GoogleLenet import GoogLeNet
from network.Inception_v4 import Inception_v4
from network.GoogleLenet_gap import GoogLeNet as GoogLeNet_gap
from network.OEDC_GoogLenetSPP import OEDCGoogLeNetSPP
from network.OEDC_GoogLenetSPP_lowerBody import OEDCGoogLeNetSPP_lowerBody
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.utils import multi_gpu_model
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping, TensorBoard, CSVLogger
from keras import Model, Sequential
import sys
import os
import argparse
import json
import numpy as np
import pandas as pd
import re
import tqdm
from keras import backend as K
from angular_losses import weighted_categorical_crossentropy, coarse_to_fine_categorical_crossentropy_lowerbody
alpha = []
def parse_arg():
models = ['GoogLeNet', 'GoogLeNet_gap', 'Inception_v4', 'GoogLeNetSPP', 'OEDCGoogLeNetSPP', 'OEDCGoogLeNetSPP_lowerBody']
parser = argparse.ArgumentParser(description='training of the WPAL...')
parser.add_argument('-g', '--gpus', type=str, default='',
help='The gpu device\'s ID need to be used')
parser.add_argument('-c', '--classes', type=int, default=65,
help='The total number of classes to be predicted')
parser.add_argument('-b', '--batch', type=int, default=64,
help='The batch size of the training process')
parser.add_argument('-wd', '--width', type=int, default=160,
help='The width of thWPAL_PETAe picture')
parser.add_argument('-hg', '--height', type=int, default=75,
help='The height of the picture')
parser.add_argument('-w', '--weight', type=str, default='',
help='The weights file of the pre-training')
parser.add_argument('-m', '--model', type=str, default='',
help='The model including: '+str(models))
parser.add_argument('-d', '--depth', type=int, default=9,
help='The model depth')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
return args
if __name__ == "__main__":
save_name = "binary61"
args = parse_arg()
class_num = args.classes
alpha = np.zeros((class_num,))
# Data augmentation to pre-processing
heavy_augmentation = True
if heavy_augmentation:
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False)
else:
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False)
image_width = args.width
image_height = args.height
filename = r"../results/PETA.csv"
data = np.array(pd.read_csv(filename))[:, 1:]
length = len(data)
data_x = np.zeros((length, image_width, image_height, 3))
data_y = np.zeros((length, class_num))
for i in range(11400, length):
#img = image.load_img(path + m)
img = image.load_img(data[i, 0], target_size=(image_width, image_height, 3))
data_x[i] = image.img_to_array(img)
data_y[i] = np.array(data[i, 1:1+class_num], dtype="float32")
X_test = data_x[11400:]
y_test = data_y[11400:]
print("The shape of the X_test is: ", X_test.shape)
print("The shape of the y_test is: ", y_test.shape)
#googleNet默认输入32*32的图片
if args.model == "GoogLeNetSPP":
model_dir = "GoogLeNetSPP_PETA/"
model = GoogLeNetSPP.build(None, None, 3, class_num)
loss_func = 'binary_crossentropy'
loss_weights = None
metrics=['accuracy']
elif args.model == "GoogLeNet":
model_dir = "GoogLeNet_PETA/"
model = GoogLeNet.build(image_width, image_height, 3, class_num, model_depth=args.depth)
#model = Model(inputs=model.input, outputs=model.get_layer('dense_1').output)
loss_func = 'binary_crossentropy'
loss_weights = None
metrics=['accuracy']
elif args.model == "Inception_v4":
model_dir = "InceptionV4_PETA/"
model = Inception_v4(image_width, image_height, 3, class_num)
loss_func = 'binary_crossentropy'
loss_weights = None
metrics=['accuracy']
elif args.model == "GoogLeNet_gap":
model = GoogLeNet_gap.build(image_width, image_height, 3, class_num, model_depth=args.depth)
#model = Model(inputs=model.input, outputs=model.get_layer('dense_1').output)
loss_func = 'binary_crossentropy'
loss_weights = None
metrics=['accuracy']
elif args.model == "OEDCGoogLeNetSPP":
model = OEDCGoogLeNetSPP.build(None, None, 3, 7, [3, 7, 11, 6, 7, 12, 15])#[4, 7, 11, 7, 7, 13, 16]
loss_func = 'binary_crossentropy'
loss_weights = None
metrics=['accuracy']
elif args.model == "OEDCGoogLeNetSPP_lowerBody":
model = OEDCGoogLeNetSPP_lowerBody.build(None, None, 3, 2, 7, 5)
loss_func = 'binary_crossentropy'#coarse_to_fine_categorical_crossentropy_lowerbody(alpha)#['categorical_crossentropy', lambda y_true,y_pred: y_pred]
loss_weights=None#[1.,1.]
metrics={'softmax_labels':'accuracy'}
gpus_num = len(args.gpus.split(','))
if gpus_num != 1:
multi_gpu_model(model, gpus=gpus_num)
#model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=['accuracy'])
model.compile(loss=loss_func, optimizer='adam', loss_weights=loss_weights, metrics=metrics)
model.summary()
reg = args.weight + "_(e|f)1*"
print(reg)
weights = [s for s in os.listdir("../models/imagenet_models/" + model_dir)
if re.match(reg, s)]
print(weights)
for w in tqdm.tqdm(weights):
model.load_weights("../models/imagenet_models/" + model_dir + w, by_name=True)
predictions = model.predict(X_test)
print("The shape of the predictions_test is: ", predictions.shape)
np.save("../results/predictions/" + args.model + "_" + w + ".npy", predictions)
print("../results/predictions/" + args.model + "_" + w + ".npy")
#np.save("../results/predictions/" + args.model+'_depth'+str(args.depth) + '_' + save_name + "_predictions50_imagenet_test7600.npy", predictions)
|
[
"keras.preprocessing.image.ImageDataGenerator",
"tqdm.tqdm",
"network.GoogLenetSPP.GoogLeNetSPP.build",
"numpy.save",
"argparse.ArgumentParser",
"network.GoogleLenet.GoogLeNet.build",
"keras.utils.multi_gpu_model",
"pandas.read_csv",
"numpy.zeros",
"re.match",
"network.OEDC_GoogLenetSPP.OEDCGoogLeNetSPP.build",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"network.GoogleLenet_gap.GoogLeNet.build",
"numpy.array",
"network.OEDC_GoogLenetSPP_lowerBody.OEDCGoogLeNetSPP_lowerBody.build",
"network.Inception_v4.Inception_v4",
"os.listdir"
] |
[((1526, 1588), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""training of the WPAL..."""'}), "(description='training of the WPAL...')\n", (1549, 1588), False, 'import argparse\n'), ((2846, 2868), 'numpy.zeros', 'np.zeros', (['(class_num,)'], {}), '((class_num,))\n', (2854, 2868), True, 'import numpy as np\n'), ((3584, 3632), 'numpy.zeros', 'np.zeros', (['(length, image_width, image_height, 3)'], {}), '((length, image_width, image_height, 3))\n', (3592, 3632), True, 'import numpy as np\n'), ((3646, 3675), 'numpy.zeros', 'np.zeros', (['(length, class_num)'], {}), '((length, class_num))\n', (3654, 3675), True, 'import numpy as np\n'), ((6452, 6470), 'tqdm.tqdm', 'tqdm.tqdm', (['weights'], {}), '(weights)\n', (6461, 6470), False, 'import tqdm\n'), ((2988, 3134), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'featurewise_center': '(False)', 'samplewise_center': '(False)', 'featurewise_std_normalization': '(False)', 'samplewise_std_normalization': '(False)'}), '(featurewise_center=False, samplewise_center=False,\n featurewise_std_normalization=False, samplewise_std_normalization=False)\n', (3006, 3134), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((3208, 3354), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'featurewise_center': '(False)', 'samplewise_center': '(False)', 'featurewise_std_normalization': '(False)', 'samplewise_std_normalization': '(False)'}), '(featurewise_center=False, samplewise_center=False,\n featurewise_std_normalization=False, samplewise_std_normalization=False)\n', (3226, 3354), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((3765, 3835), 'keras.preprocessing.image.load_img', 'image.load_img', (['data[i, 0]'], {'target_size': '(image_width, image_height, 3)'}), '(data[i, 0], target_size=(image_width, image_height, 3))\n', (3779, 3835), False, 'from keras.preprocessing import image\n'), ((3856, 3879), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (3874, 3879), False, 'from keras.preprocessing import image\n'), ((3900, 3951), 'numpy.array', 'np.array', (['data[i, 1:1 + class_num]'], {'dtype': '"""float32"""'}), "(data[i, 1:1 + class_num], dtype='float32')\n", (3908, 3951), True, 'import numpy as np\n'), ((4249, 4293), 'network.GoogLenetSPP.GoogLeNetSPP.build', 'GoogLeNetSPP.build', (['None', 'None', '(3)', 'class_num'], {}), '(None, None, 3, class_num)\n', (4267, 4293), False, 'from network.GoogLenetSPP import GoogLeNetSPP\n'), ((6008, 6045), 'keras.utils.multi_gpu_model', 'multi_gpu_model', (['model'], {'gpus': 'gpus_num'}), '(model, gpus=gpus_num)\n', (6023, 6045), False, 'from keras.utils import multi_gpu_model\n'), ((6686, 6765), 'numpy.save', 'np.save', (["('../results/predictions/' + args.model + '_' + w + '.npy')", 'predictions'], {}), "('../results/predictions/' + args.model + '_' + w + '.npy', predictions)\n", (6693, 6765), True, 'import numpy as np\n'), ((3518, 3539), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (3529, 3539), True, 'import pandas as pd\n'), ((4483, 4568), 'network.GoogleLenet.GoogLeNet.build', 'GoogLeNet.build', (['image_width', 'image_height', '(3)', 'class_num'], {'model_depth': 'args.depth'}), '(image_width, image_height, 3, class_num, model_depth=args.depth\n )\n', (4498, 4568), False, 'from network.GoogleLenet import GoogLeNet\n'), ((6335, 6387), 'os.listdir', 'os.listdir', (["('../models/imagenet_models/' + model_dir)"], {}), "('../models/imagenet_models/' + model_dir)\n", (6345, 6387), False, 'import os\n'), ((6402, 6418), 're.match', 're.match', (['reg', 's'], {}), '(reg, s)\n', (6410, 6418), False, 'import re\n'), ((4844, 4897), 'network.Inception_v4.Inception_v4', 'Inception_v4', (['image_width', 'image_height', '(3)', 'class_num'], {}), '(image_width, image_height, 3, class_num)\n', (4856, 4897), False, 'from network.Inception_v4 import Inception_v4\n'), ((5053, 5142), 'network.GoogleLenet_gap.GoogLeNet.build', 'GoogLeNet_gap.build', (['image_width', 'image_height', '(3)', 'class_num'], {'model_depth': 'args.depth'}), '(image_width, image_height, 3, class_num, model_depth=\n args.depth)\n', (5072, 5142), True, 'from network.GoogleLenet_gap import GoogLeNet as GoogLeNet_gap\n'), ((5382, 5448), 'network.OEDC_GoogLenetSPP.OEDCGoogLeNetSPP.build', 'OEDCGoogLeNetSPP.build', (['None', 'None', '(3)', '(7)', '[3, 7, 11, 6, 7, 12, 15]'], {}), '(None, None, 3, 7, [3, 7, 11, 6, 7, 12, 15])\n', (5404, 5448), False, 'from network.OEDC_GoogLenetSPP import OEDCGoogLeNetSPP\n'), ((5642, 5698), 'network.OEDC_GoogLenetSPP_lowerBody.OEDCGoogLeNetSPP_lowerBody.build', 'OEDCGoogLeNetSPP_lowerBody.build', (['None', 'None', '(3)', '(2)', '(7)', '(5)'], {}), '(None, None, 3, 2, 7, 5)\n', (5674, 5698), False, 'from network.OEDC_GoogLenetSPP_lowerBody import OEDCGoogLeNetSPP_lowerBody\n')]
|
from __future__ import division
from itertools import product
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
__alll__ = [
"datasets_from_frame", "categorical_longform", "continuous_longform",
"plot_categorical_diff", "plot_continuous_diff", "TrainTestDiff"
]
def _check_features_presence(datasets, features):
for feature in features:
for name, dataset in datasets.items():
if feature not in dataset.columns:
message = "`{}` feature missing in `{}`".format(feature, name)
raise KeyError(message)
def datasets_from_frame(dataframe, feature):
"""Creates a dict dataset from a dataframe
Given a categorical feature it creates a dict where each key is
a level of the feature and each value is a dataframe, then you
can use this datasets dict to plot graphs
Args:
dataframe (pandas.DataFrame): the frame that you're going to
use to create a dict datasets
feature (str): this feature will be used for grouping and
creating the datasets dict
Returns:
dict: A :class:`dict` where keys are levels of ``feature`` and values
are :class:`pandas.core.frame.DataFrame` from a ``dataframe.groupby(feature)``
Raises:
KeyError: if ``feature`` is not present in ``dataframe``
"""
grouped = dataframe.groupby([feature])
datasets = dict(e for e in grouped)
return datasets
# Long Form Data
def _cat_longform(dataset, name, feature):
data = dataset[feature].value_counts().reset_index()
data.columns = ['level', 'count']
data['feature'] = feature
data['dataset'] = name
data['prop'] = data['count'] / dataset.shape[0]
return data
def _cont_longform(dataset, name, feature):
data = pd.DataFrame()
data['dataset'] = np.repeat(name, dataset.shape[0])
data['feature'] = np.repeat(feature, dataset.shape[0])
data['value'] = dataset[feature]
return data
def _longform_frame(datasets, features, func):
_check_features_presence(datasets, features)
data_grid = product(datasets.items(), features)
data = [func(d, n, f) for (n, d), f in data_grid]
data = pd.concat(data)
return data
def categorical_longform(datasets, features):
"""Given datasets and features it returns a long form representation of it
Args:
datasets (dict): each key is a dataset name and each value is a ``pandas.DataFrame``
features (list): a list of string features present in the datasets
Returns:
pandas.core.frame.DataFrame: A tidy data long form
Raises:
KeyError: if any of the ``features`` isn't present in the ``datasets`` dict
"""
longform = _longform_frame(datasets, features, _cat_longform)
return longform
def continuous_longform(datasets, features):
"""Given datasets and features it returns a long form representation of it
Args:
datasets (dict): each key is a dataset name and each value is a ``pandas.DataFrame``
features (list): a list of string features present in the datasets
Returns:
pandas.core.frame.DataFrame: A tidy data longform dataframe
Raises:
KeyError: if any of the ``features`` isn't present in the ``datasets`` dict
"""
longform = _longform_frame(datasets, features, _cont_longform)
return longform
# Plot Style config
TITLE_FONTSIZE = 20
TITLE_YSPACE = 1.06
def plot_continuous_diff(datasets,
features,
kind="box",
col_wrap=3,
size=4,
aspect=1,
title=None):
"""Plots the distribution differences of continuous features in each dataset
Args:
datasets (dict): a dict where the keys are names and the values
are ``pandas.DataFrame``
features (list): a list of continuous features present in every
dataset of ``datasets``
kind (str): {point, bar, box, violin, strip}
The kind of plot to draw.
col_wrap (int): how many charts you want per row
size (float): Height (in inches)
aspect (float): Aspect ratio of each facet, so that aspect * size gives the width
of each facet in inches
title (str): the title of the figure
Returns:
(pandas.core.frameDataFrame, matplotlib.Figure): a tuple with a longform data frame
and matplotlib figure to customize
Raises:
KeyError: if any of the ``features`` isn't present in the ``datasets`` dict
"""
if title is None:
title = "{} differences".format("/".join(datasets.keys()))
data = continuous_longform(datasets, features)
grid = sns.factorplot(
x="dataset",
y="value",
col="feature",
data=data,
kind=kind,
sharey=False,
size=size,
aspect=aspect,
col_wrap=col_wrap)
grid.fig.suptitle(title, y=TITLE_YSPACE, fontsize=TITLE_FONTSIZE)
return data, grid.fig
def plot_categorical_diff(datasets,
features,
kind="prop",
col_wrap=4,
size=4,
aspect=1,
title=None):
"""Plots the distribution differences of categorical features in each dataset
Args:
datasets (dict): a dict where the keys are names and the values
are ``pandas.DataFrame``
features (list): a list of categorical features present in every
dataset of ``datasets``
kind (Optional[str]): {count, prop}
Use "count" for count of unique values for every level of a feature
in every dataset present in ``datasets``
Use "prop" for the proportion of that level of a feature
col_wrap (int): how many charts you want per row
size (float): Height (in inches)
aspect (float): Aspect ratio of each facet, so that aspect * size gives the
width of each facet in inches
title (str): the title of the figure
Returns:
(pandas.core.frameDataFrame, matplotlib.Figure): a tuple with a longform data frame
and matplotlib figure to customize
Raises:
KeyError: if any of the ``features`` isn't present in the ``datasets`` dict
"""
if title is None:
title = "{} differences".format("/".join(datasets.keys()))
longform_data = categorical_longform(datasets, features)
# Group longform and sort by `features` order
grouped_features = longform_data.groupby(['feature'])
grouped_features = sorted(
grouped_features, key=lambda x: features.index(x[0]))
ncol = col_wrap
n_axes = len(features)
nrow = int(np.ceil(n_axes / col_wrap))
figsize = (ncol * size * aspect, nrow * size)
fig = plt.figure(figsize=figsize)
fig.suptitle(title, y=TITLE_YSPACE, fontsize=TITLE_FONTSIZE)
plt.subplots_adjust(wspace=0.5, hspace=0.35)
axes = np.empty(n_axes, object)
for i in range(n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1)
data_grid = zip(grouped_features, axes)
for (name, data), ax in data_grid:
sns.barplot(
x="level", y=kind, hue="dataset", data=data, ax=ax).set_title(name)
return longform_data, fig
class TrainTestDiff(object):
""" Helper class to ease distribution analysis on the same datasets"""
def __init__(self, datasets):
self.datasets = datasets
def plot_cont_diff(self,
features,
kind="box",
col_wrap=3,
size=4,
aspect=1,
title=None):
""" See :func:`plot_continuous_diff`"""
return plot_continuous_diff(self.datasets, features, kind, col_wrap,
size, aspect, title)
def plot_cat_diff(self, features, col_wrap=3, kind="prop", title=None):
""" See :func:`plot_categorical_diff`"""
return plot_categorical_diff(
self.datasets, features, kind=kind, col_wrap=col_wrap, title=title)
|
[
"pandas.DataFrame",
"numpy.ceil",
"seaborn.factorplot",
"numpy.empty",
"seaborn.barplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplots_adjust",
"pandas.concat",
"numpy.repeat"
] |
[((1823, 1837), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1835, 1837), True, 'import pandas as pd\n'), ((1860, 1893), 'numpy.repeat', 'np.repeat', (['name', 'dataset.shape[0]'], {}), '(name, dataset.shape[0])\n', (1869, 1893), True, 'import numpy as np\n'), ((1916, 1952), 'numpy.repeat', 'np.repeat', (['feature', 'dataset.shape[0]'], {}), '(feature, dataset.shape[0])\n', (1925, 1952), True, 'import numpy as np\n'), ((2223, 2238), 'pandas.concat', 'pd.concat', (['data'], {}), '(data)\n', (2232, 2238), True, 'import pandas as pd\n'), ((4797, 4935), 'seaborn.factorplot', 'sns.factorplot', ([], {'x': '"""dataset"""', 'y': '"""value"""', 'col': '"""feature"""', 'data': 'data', 'kind': 'kind', 'sharey': '(False)', 'size': 'size', 'aspect': 'aspect', 'col_wrap': 'col_wrap'}), "(x='dataset', y='value', col='feature', data=data, kind=kind,\n sharey=False, size=size, aspect=aspect, col_wrap=col_wrap)\n", (4811, 4935), True, 'import seaborn as sns\n'), ((6950, 6977), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6960, 6977), True, 'import matplotlib.pyplot as plt\n'), ((7047, 7091), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.5)', 'hspace': '(0.35)'}), '(wspace=0.5, hspace=0.35)\n', (7066, 7091), True, 'import matplotlib.pyplot as plt\n'), ((7104, 7128), 'numpy.empty', 'np.empty', (['n_axes', 'object'], {}), '(n_axes, object)\n', (7112, 7128), True, 'import numpy as np\n'), ((6861, 6887), 'numpy.ceil', 'np.ceil', (['(n_axes / col_wrap)'], {}), '(n_axes / col_wrap)\n', (6868, 6887), True, 'import numpy as np\n'), ((7302, 7365), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""level"""', 'y': 'kind', 'hue': '"""dataset"""', 'data': 'data', 'ax': 'ax'}), "(x='level', y=kind, hue='dataset', data=data, ax=ax)\n", (7313, 7365), True, 'import seaborn as sns\n')]
|
import numpy as np
def load_train_data(dim):
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train_convert = [convert_pic(data,dim) for data in x_train]
x_test_convert = [convert_pic(data,dim) for data in x_test]
return (np.array(x_train_convert),y_train),(np.array(x_test_convert),y_test)
def convert_pic(data,dim):
import cv2
return cv2.resize(data, dim, interpolation=cv2.INTER_AREA)
def load_test_data(path,dim):
import cv2
img = cv2.imread(path)
img_convert = convert_pic(img, dim)
return img_convert
|
[
"cv2.imread",
"numpy.array",
"cv2.resize",
"tensorflow.keras.datasets.mnist.load_data"
] |
[((132, 149), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (147, 149), False, 'from tensorflow.keras.datasets import mnist\n'), ((402, 453), 'cv2.resize', 'cv2.resize', (['data', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(data, dim, interpolation=cv2.INTER_AREA)\n', (412, 453), False, 'import cv2\n'), ((505, 521), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (515, 521), False, 'import cv2\n'), ((284, 309), 'numpy.array', 'np.array', (['x_train_convert'], {}), '(x_train_convert)\n', (292, 309), True, 'import numpy as np\n'), ((320, 344), 'numpy.array', 'np.array', (['x_test_convert'], {}), '(x_test_convert)\n', (328, 344), True, 'import numpy as np\n')]
|
'''Functions for estimating an adjustment to the posterior prediction
over subtypes when making predictions online.
Author: <NAME>
'''
import numpy as np
import logging
from scipy.optimize import minimize
from scipy.misc import logsumexp
class OnlineAdjustment:
def __init__(self, model, penalty, seed=0):
self.model = model
self.penalty = penalty
self.seed = seed
def fit(self, training_data, w0=None, **options):
self.objective = OnlineLoss(training_data, self.model, self.penalty)
f = self.objective.value
g = self.objective.gradient
if w0 is None:
num_feat = self.objective.encoder.num_features
w0 = np.ones(num_feat)
#w0 = np.random.RandomState(self.seed).normal(size=num_feat)
self.solution = minimize(f, w0, jac=g, method='BFGS', options=options)
self.w = self.solution.x
return self
def proba(self, histories):
p = [self.objective.engine.run(X, self.w) for X in histories]
return np.array(p)
def log_proba(self, histories):
p = self.proba(histories)
return np.log(p)
class OnlineLoss:
def __init__(self, training_data, model, penalty):
self.training_data = training_data
self.model = model
self.penalty = penalty
self.encoder = OnlineFeatureEncoder(model)
self.engine = InferenceEngine(self.encoder)
def value(self, w):
v = 0.0
n = 0
for X, y in self.training_data:
if len(y[1][0][0]) < 1:
continue
lp = np.log(self.engine.run(X, w))
ll = self.model.likelihood(*y[1][0])
v -= logsumexp(lp + ll)
n += 1
v /= n
v += self.penalty / 2.0 * np.dot(w, w)
logging.info('f(w) = {:.06f}'.format(v))
return v
def gradient(self, w):
g = np.zeros_like(w)
n = 0
for X, y in self.training_data:
if len(y[1][0][0]) < 1:
continue
lp = np.log(self.engine.run(X, w))
ll = self.model.likelihood(*y[1][0])
lj = lp + ll
wt = np.exp(lj - logsumexp(lj))
logging.debug('Predicted {}'.format(np.round(np.exp(lp), 2)))
logging.debug('Posterior {}'.format(np.round(wt, 2)))
f_exp = self.encoder.expected_encoding(np.exp(lp), X)
g_i = f_exp
for z, _ in enumerate(wt):
f_obs = self.encoder.encode(z, X)
g_i -= wt[z] * f_obs
# g += wt[z] * (f_exp - f_obs)
logging.debug('Gradient {}'.format(np.round(g_i, 2)))
g += g_i
n += 1
g /= n
g += self.penalty * w
logging.info('||g(w)||_inf = {:.06f}'.format(g.max()))
return g
class OnlineFeatureEncoder:
def __init__(self, model):
self.num_subtypes = model.num_subtypes
self.model = model
@property
def num_features(self):
# return 2
return 2 * self.num_subtypes
@property
def num_outputs(self):
return self.num_subtypes
# def encode(self, z, history):
# 'Single weight encoding.'
# d = history[0]
# lp = self.model.prior(*d)
# ll = self.model.likelihood(*d)
# f = np.zeros(2)
# f[0] = lp[z]
# f[1] = ll[z]
# return f
def encode(self, z, history):
'Likelihood ratio encoding.'
d = history[0]
lp = self.model.prior(*d)
ll = self.model.likelihood(*d)
lpr = lp - lp[0]
llr = ll - ll[0]
k = self.num_subtypes
f = np.zeros(2 * k)
f[z] = lpr[z]
f[k + z] = llr[z]
return f
# def encode(self, z, history):
# d = history[0]
# lp = self.model.prior(*d)
# ll = self.model.likelihood(*d)
# k = self.num_subtypes
# f = np.zeros(2 * k)
# f[z] = lp[z]
# f[k + z] = ll[z]
# return f
# def expected_encoding(self, pz, history):
# 'Single weight encoding.'
# d = history[0]
# lp = self.model.prior(*d)
# ll = self.model.likelihood(*d)
# f = np.zeros(2)
# f[0] = (pz * lp).sum()
# f[1] = (pz * ll).sum()
# return f
def expected_encoding(self, pz, history):
'Likelihood ratio encoding.'
d = history[0]
lp = self.model.prior(*d)
ll = self.model.likelihood(*d)
lpr = lp - lp[0]
llr = ll - ll[0]
k = self.num_subtypes
f = np.zeros(2 * k)
f[:k] = pz * lpr
f[k:] = pz * llr
return f
# def expected_encoding(self, pz, history):
# d = history[0]
# lp = self.model.prior(*d)
# ll = self.model.likelihood(*d)
# k = self.num_subtypes
# f = np.zeros(2 * k)
# f[:k] = pz * lp
# f[k:] = pz * ll
# return f
class InferenceEngine:
def __init__(self, encoder):
self.encoder = encoder
def run(self, history, w):
s = np.zeros(self.encoder.num_outputs)
for z, _ in enumerate(s):
f = self.encoder.encode(z, history)
s[z] = np.dot(f, w)
p = np.exp(s - logsumexp(s))
return p
|
[
"scipy.optimize.minimize",
"numpy.zeros_like",
"numpy.log",
"numpy.zeros",
"numpy.ones",
"scipy.misc.logsumexp",
"numpy.array",
"numpy.exp",
"numpy.dot",
"numpy.round"
] |
[((827, 881), 'scipy.optimize.minimize', 'minimize', (['f', 'w0'], {'jac': 'g', 'method': '"""BFGS"""', 'options': 'options'}), "(f, w0, jac=g, method='BFGS', options=options)\n", (835, 881), False, 'from scipy.optimize import minimize\n'), ((1054, 1065), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (1062, 1065), True, 'import numpy as np\n'), ((1152, 1161), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (1158, 1161), True, 'import numpy as np\n'), ((1929, 1945), 'numpy.zeros_like', 'np.zeros_like', (['w'], {}), '(w)\n', (1942, 1945), True, 'import numpy as np\n'), ((3708, 3723), 'numpy.zeros', 'np.zeros', (['(2 * k)'], {}), '(2 * k)\n', (3716, 3723), True, 'import numpy as np\n'), ((4636, 4651), 'numpy.zeros', 'np.zeros', (['(2 * k)'], {}), '(2 * k)\n', (4644, 4651), True, 'import numpy as np\n'), ((5139, 5173), 'numpy.zeros', 'np.zeros', (['self.encoder.num_outputs'], {}), '(self.encoder.num_outputs)\n', (5147, 5173), True, 'import numpy as np\n'), ((699, 716), 'numpy.ones', 'np.ones', (['num_feat'], {}), '(num_feat)\n', (706, 716), True, 'import numpy as np\n'), ((1720, 1738), 'scipy.misc.logsumexp', 'logsumexp', (['(lp + ll)'], {}), '(lp + ll)\n', (1729, 1738), False, 'from scipy.misc import logsumexp\n'), ((1808, 1820), 'numpy.dot', 'np.dot', (['w', 'w'], {}), '(w, w)\n', (1814, 1820), True, 'import numpy as np\n'), ((5275, 5287), 'numpy.dot', 'np.dot', (['f', 'w'], {}), '(f, w)\n', (5281, 5287), True, 'import numpy as np\n'), ((2421, 2431), 'numpy.exp', 'np.exp', (['lp'], {}), '(lp)\n', (2427, 2431), True, 'import numpy as np\n'), ((5311, 5323), 'scipy.misc.logsumexp', 'logsumexp', (['s'], {}), '(s)\n', (5320, 5323), False, 'from scipy.misc import logsumexp\n'), ((2213, 2226), 'scipy.misc.logsumexp', 'logsumexp', (['lj'], {}), '(lj)\n', (2222, 2226), False, 'from scipy.misc import logsumexp\n'), ((2351, 2366), 'numpy.round', 'np.round', (['wt', '(2)'], {}), '(wt, 2)\n', (2359, 2366), True, 'import numpy as np\n'), ((2681, 2697), 'numpy.round', 'np.round', (['g_i', '(2)'], {}), '(g_i, 2)\n', (2689, 2697), True, 'import numpy as np\n'), ((2286, 2296), 'numpy.exp', 'np.exp', (['lp'], {}), '(lp)\n', (2292, 2296), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import xml.etree.ElementTree as et
CLASSES = ('ore carrier', 'bulk cargo carrier', 'container ship',
'general cargo ship', 'fishing boat', 'passenger ship')
class BboxAnalyze:
def __init__(self, data_root, *, idx_file='trainval'):
"""
:param data_root:
"""
self.root = os.path.join(data_root)
idx_file_ = open(os.path.join(self.root, 'ImageSets',
'Main', f'{idx_file}.txt')).readlines()
self.idx_list = [i.rstrip('\n') for i in idx_file_]
self.label_names = CLASSES
@property
def get_collect_data(self):
collected = {'img_size': [],
'img_path': [],
'bbox': [],
'labels': []}
for idx in self.idx_list:
img_size, img_path, bbox, labels = self._get_collect(idx)
collected['img_size'].append(img_size)
collected['bbox'].append(bbox)
collected['labels'].append(labels)
collected['img_path'].append(img_path)
return collected
# 锚框长宽比,锚框区域大小
def analyze_bbox_dist(self, bbox: np.array):
h, w = bbox[:, 3] - bbox[:, 1], bbox[:, 2] - bbox[:, 0]
area = h * w
area_count, area_bins = np.histogram(area)
try:
ratio = h / w
except Warning:
print(f'{w}: dividee by zero, height: {h}, width: {w}')
n, bins, _, = plt.hist(area, bins=50)
# 标注最高那条bar所占数量
n_max = np.argmax(n)
bins_max = bins[n_max]
plt.text(bins[n_max] + (bins[1] - bins[0]) / 2, n[n_max] * 1.01, int(n[n_max]), ha='center', va='bottom')
plt.text(bins[n_max] + (bins[1] - bins[0]) / 2, -0.01, int(bins_max), ha='center', va='bottom')
# 将xticks设为plt.hist返回的bins分布,bins位置为柱状图最左侧,且bins数量为n+1,其中包含最后一个bins的左侧坐标和右侧坐标
plt.xticks(bins[:-1:10])
plt.xlabel = 'area'
plt.show()
n, bins, _ = plt.hist(ratio, bins=10)
for i in range(len(n)):
plt.text(bins[i] + (bins[1] - bins[0]) / 2, n[i] * 1.01, int(n[i]), ha='center', va='bottom')
plt.xticks(bins[:-1])
plt.xlabel = 'ratio'
plt.show()
def analyze_label_dist(self, labels: np.array):
n, bins, _ = plt.hist(labels, bins=len(self.label_names), align='mid')
for i in range(len(n)):
plt.text(bins[i] + (bins[1] - bins[0]) / 2, n[i] * 1.01, int(n[i]), ha='center', va='bottom')
plt.xlabel = 'label distribution'
plt.ylabel = 'times'
plt.xticks(bins[:-1],
list(self.label_names),
color='blue',
rotation=60)
plt.show()
def analyze_bboxnums(self, bbox: list):
bbox_bincount = np.zeros(len(bbox))
for idx, bbox_tmp in enumerate(bbox):
bbox_bincount[idx] = len(bbox_tmp)
n, bins, _ = plt.hist(bbox_bincount, bins=np.array(bbox_bincount, dtype=np.uint8).max(),
align='mid')
for i in range(len(n)):
plt.text(bins[i] + (bins[1] - bins[0]) / 2, n[i] * 1.01, int(n[i]), ha='center', va='bottom')
plt.xticks(bins[:-1])
plt.xlabel = 'bbox_nums'
plt.show()
def bbox_vision(self, img_path: str, bbox: np.array, labels: np.array):
pass
def _get_collect(self, id_):
bbox = []
labels = []
annotation = et.parse(os.path.join(self.root, 'Annotations', f'{id_}.xml'))
size = annotation.find('size')
img_width = size.find('width').text
img_height = size.find('height').text
img_channel = size.find('depth').text
img_size = [img_height, img_width, img_channel]
for anno in annotation.findall('object'):
name = anno.find('name').text
try:
labels.append(self.label_names.index(name))
except ValueError as e:
print(f'error xml name: {name}')
# difficult.append(anno.find('difficult').text)
bbox_info = anno.find('bndbox')
xmin = bbox_info.find('xmin').text
ymin = bbox_info.find('ymin').text
xmax = bbox_info.find('xmax').text
ymax = bbox_info.find('ymax').text
bbox.append([xmin, ymin, xmax, ymax])
bbox = np.array(bbox, dtype=np.uint16)
labels = np.array(labels, dtype=np.uint16)
img_path = os.path.join(self.root, 'JPEGImages', f'{id_}.jpg')
return img_size, img_path, bbox, labels
if __name__ == '__main__':
bb_analy = BboxAnalyze('../../SeaShips', idx_file='test')
collect_data = bb_analy.get_collect_data
bbox = collect_data['bbox']
label = collect_data['labels']
# 对所有图像中的bbox, labels进行concat
bbox_cat = np.concatenate(bbox, axis=0)
label_cat = np.concatenate(label, axis=0)
bb_analy.analyze_bbox_dist(bbox_cat)
bb_analy.analyze_label_dist(label_cat)
bb_analy.analyze_bboxnums(bbox)
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"numpy.argmax",
"numpy.histogram",
"numpy.array",
"matplotlib.pyplot.xticks",
"os.path.join",
"numpy.concatenate"
] |
[((4887, 4915), 'numpy.concatenate', 'np.concatenate', (['bbox'], {'axis': '(0)'}), '(bbox, axis=0)\n', (4901, 4915), True, 'import numpy as np\n'), ((4932, 4961), 'numpy.concatenate', 'np.concatenate', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (4946, 4961), True, 'import numpy as np\n'), ((416, 439), 'os.path.join', 'os.path.join', (['data_root'], {}), '(data_root)\n', (428, 439), False, 'import os\n'), ((1371, 1389), 'numpy.histogram', 'np.histogram', (['area'], {}), '(area)\n', (1383, 1389), True, 'import numpy as np\n'), ((1544, 1567), 'matplotlib.pyplot.hist', 'plt.hist', (['area'], {'bins': '(50)'}), '(area, bins=50)\n', (1552, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1609, 1621), 'numpy.argmax', 'np.argmax', (['n'], {}), '(n)\n', (1618, 1621), True, 'import numpy as np\n'), ((1966, 1990), 'matplotlib.pyplot.xticks', 'plt.xticks', (['bins[:-1:10]'], {}), '(bins[:-1:10])\n', (1976, 1990), True, 'import matplotlib.pyplot as plt\n'), ((2027, 2037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2035, 2037), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2083), 'matplotlib.pyplot.hist', 'plt.hist', (['ratio'], {'bins': '(10)'}), '(ratio, bins=10)\n', (2067, 2083), True, 'import matplotlib.pyplot as plt\n'), ((2230, 2251), 'matplotlib.pyplot.xticks', 'plt.xticks', (['bins[:-1]'], {}), '(bins[:-1])\n', (2240, 2251), True, 'import matplotlib.pyplot as plt\n'), ((2289, 2299), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2297, 2299), True, 'import matplotlib.pyplot as plt\n'), ((2788, 2798), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2796, 2798), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3289), 'matplotlib.pyplot.xticks', 'plt.xticks', (['bins[:-1]'], {}), '(bins[:-1])\n', (3278, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3331, 3341), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3339, 3341), True, 'import matplotlib.pyplot as plt\n'), ((4432, 4463), 'numpy.array', 'np.array', (['bbox'], {'dtype': 'np.uint16'}), '(bbox, dtype=np.uint16)\n', (4440, 4463), True, 'import numpy as np\n'), ((4481, 4514), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.uint16'}), '(labels, dtype=np.uint16)\n', (4489, 4514), True, 'import numpy as np\n'), ((4534, 4585), 'os.path.join', 'os.path.join', (['self.root', '"""JPEGImages"""', 'f"""{id_}.jpg"""'], {}), "(self.root, 'JPEGImages', f'{id_}.jpg')\n", (4546, 4585), False, 'import os\n'), ((3535, 3587), 'os.path.join', 'os.path.join', (['self.root', '"""Annotations"""', 'f"""{id_}.xml"""'], {}), "(self.root, 'Annotations', f'{id_}.xml')\n", (3547, 3587), False, 'import os\n'), ((465, 528), 'os.path.join', 'os.path.join', (['self.root', '"""ImageSets"""', '"""Main"""', 'f"""{idx_file}.txt"""'], {}), "(self.root, 'ImageSets', 'Main', f'{idx_file}.txt')\n", (477, 528), False, 'import os\n'), ((3032, 3071), 'numpy.array', 'np.array', (['bbox_bincount'], {'dtype': 'np.uint8'}), '(bbox_bincount, dtype=np.uint8)\n', (3040, 3071), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
# some helper methods mostly to improve legibility in main notebooks
def load_wdi(data_path='../data/wdi/WDIData.csv', series_path='../data/wdi/WDISeries.csv'):
return pd.read_csv(data_path, low_memory=False), pd.read_csv(series_path, low_memory=False)
def project_size_USD_calculated(row):
donor = row['donor_name']
if donor == "AsianDB":
return row['asdb_approvedamount'] * 1e6
elif donor == "DFID":
return row['dfid_projectbudgetcurrent'] * 1.51
elif donor == "GFATM":
return row['gfatm_projectdisbconst_amount']
elif donor == "GiZ":
return row['giz_projectsize'] * 1306.5
elif donor == "IFAD":
return row['ifad_projectsize'] * 1e6
elif donor == "JICA":
return row['jica_projectsize'] * 10687
elif donor == "KfW":
return row['kfw_projectsize'] * 1.28
elif donor == 'WB':
return row['wb_lendingproject_cost']
else:
return 0
def load_projects(data_path="../data/aid_projects.csv", convert_dt=True, merge_purpose=True):
df = pd.read_csv(data_path, low_memory=False)
if convert_dt:
df['start_dt'] = pd.to_datetime(df['start_date'], format='%d%b%Y', errors='coerce')
df['start_year'] = df['start_dt'].dt.year
df['completion_dt'] = pd.to_datetime(df['completion_date'], format='%d%b%Y', errors='coerce')
df['end_year'] = df['completion_dt'].dt.year
if merge_purpose:
sector_mapping_table = pd.read_csv('../data/sector_mapping.csv')
df = df.merge(
sector_mapping_table[['mmg_purpose_sector', 'sector']],
left_on='mmg_purpose_sector',
right_on='mmg_purpose_sector',
how='left'
)
df['project_size_USD_calculated'] = df.apply(project_size_USD_calculated, axis=1)
return df
def narrow_convert_project_data(df, relevant_cols, start_date_col='start_date', end_date_col='completion_date', drop_na_end_date=True):
df = df[relevant_cols]
df['start_dt'] = pd.to_datetime(df[start_date_col], format='%d%b%Y', errors='coerce')
df['start_year'] = df['start_dt'].dt.year
df['completion_dt'] = pd.to_datetime(df[end_date_col], format='%d%b%Y', errors='coerce')
df['end_year'] = df['completion_dt'].dt.year
if drop_na_end_date:
df = df.dropna(subset=['end_year'])
return df
def extract_wb_projects(project_df):
wb_df = project_df[project_df.donor_name == 'WB']
wb_df['start_date'] = pd.to_datetime(wb_df.start_date)
wb_df['created_year'] = wb_df.start_date.dt.year
def assemble_sector_ratings(project_df, sector):
sector_df = project_df[project_df.sector == sector]
end_years_ids = sector_df.groupby(['country_code', 'end_year', 'ppd_project_id'], as_index=False).agg(
mean_proj_rating=('six_overall_rating', 'mean'),
total_proj_size=('project_size_USD_calculated', 'sum'),
project_rating=('six_overall_rating', 'mean')
)
def wm(series):
project_sizes = end_years_ids.iloc[series.index]['total_proj_size']
if np.any(project_sizes == 0):
return np.average(series)
else:
return np.average(series, weights=end_years_ids.iloc[series.index]['total_proj_size'])
treatment_df = end_years_ids.groupby(['country_code', 'end_year'], as_index=False).agg(
num_projs=('ppd_project_id', 'nunique'),
total_proj_size=('total_proj_size', 'sum'),
w_avg_rating=('project_rating', wm),
min_rating=('project_rating', min),
max_rating=('project_rating', max)
)
return treatment_df
|
[
"pandas.read_csv",
"numpy.any",
"numpy.average",
"pandas.to_datetime"
] |
[((1083, 1123), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'low_memory': '(False)'}), '(data_path, low_memory=False)\n', (1094, 1123), True, 'import pandas as pd\n'), ((1981, 2049), 'pandas.to_datetime', 'pd.to_datetime', (['df[start_date_col]'], {'format': '"""%d%b%Y"""', 'errors': '"""coerce"""'}), "(df[start_date_col], format='%d%b%Y', errors='coerce')\n", (1995, 2049), True, 'import pandas as pd\n'), ((2118, 2184), 'pandas.to_datetime', 'pd.to_datetime', (['df[end_date_col]'], {'format': '"""%d%b%Y"""', 'errors': '"""coerce"""'}), "(df[end_date_col], format='%d%b%Y', errors='coerce')\n", (2132, 2184), True, 'import pandas as pd\n'), ((2421, 2453), 'pandas.to_datetime', 'pd.to_datetime', (['wb_df.start_date'], {}), '(wb_df.start_date)\n', (2435, 2453), True, 'import pandas as pd\n'), ((210, 250), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'low_memory': '(False)'}), '(data_path, low_memory=False)\n', (221, 250), True, 'import pandas as pd\n'), ((252, 294), 'pandas.read_csv', 'pd.read_csv', (['series_path'], {'low_memory': '(False)'}), '(series_path, low_memory=False)\n', (263, 294), True, 'import pandas as pd\n'), ((1163, 1229), 'pandas.to_datetime', 'pd.to_datetime', (["df['start_date']"], {'format': '"""%d%b%Y"""', 'errors': '"""coerce"""'}), "(df['start_date'], format='%d%b%Y', errors='coerce')\n", (1177, 1229), True, 'import pandas as pd\n'), ((1302, 1373), 'pandas.to_datetime', 'pd.to_datetime', (["df['completion_date']"], {'format': '"""%d%b%Y"""', 'errors': '"""coerce"""'}), "(df['completion_date'], format='%d%b%Y', errors='coerce')\n", (1316, 1373), True, 'import pandas as pd\n'), ((1471, 1512), 'pandas.read_csv', 'pd.read_csv', (['"""../data/sector_mapping.csv"""'], {}), "('../data/sector_mapping.csv')\n", (1482, 1512), True, 'import pandas as pd\n'), ((2982, 3008), 'numpy.any', 'np.any', (['(project_sizes == 0)'], {}), '(project_sizes == 0)\n', (2988, 3008), True, 'import numpy as np\n'), ((3025, 3043), 'numpy.average', 'np.average', (['series'], {}), '(series)\n', (3035, 3043), True, 'import numpy as np\n'), ((3069, 3148), 'numpy.average', 'np.average', (['series'], {'weights': "end_years_ids.iloc[series.index]['total_proj_size']"}), "(series, weights=end_years_ids.iloc[series.index]['total_proj_size'])\n", (3079, 3148), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 11 01:26:31 2019
@author: f.divruno
"""
import numpy as np
import pycraf as pycraf
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.time import Time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import cartopy.crs as ccrs
import astropy.constants as const
import os as os
import astropy.units as u
from numba import jit
#%% Functions
def antenna_gain_times(angle):
'''
SKA antenna gain considering that is equal in azimuth, only varying in
elevation.
'''
N = len(angle)
G = np.zeros(N)
angle = np.abs(angle)
G[angle<=0.1] = 10**6
G[(angle>0.1) & (angle<=3.5)] = 1.58e3*(angle[(angle>0.1) & (angle<=3.5)]**(-2.5))
G[angle> 3.5] = 1
return G # gain in the beam
def antenna_gain_RA1631(angle,do_bessel=False):
'''
Using pycraf to implement the antenna pattern in RA.1631
'''
D = 14.5*u.m
lda = 3e8/11e9*u.m
GdB = pycraf.antenna.ras_pattern(angle*u.deg,D,lda,do_bessel=do_bessel).value
G = 10**(GdB/10)+1e-4
return G
def create_orbits(N_planes=24, Sats_per_plane=66, orbit_incl=53*u.deg,
orbit_height=550*u.km, orbit_period=96*u.min, total_time=1*u.h,
time_steps=1000, Obs_Lat=-30.7*u.deg,
Obs_Lon=21.44*u.deg, Obs_height=1000*u.m,
plot_flag=0,
rand_seed=0):
'''
Creats the orbits of all the satellites and propagates them for the amount of time indicated
then it makes an interpolation of the XYZ position of the satellite in the AltAz ref frame
to refine the propagation without so much computational overhead.
Inputs: (all quantities from astropy.quantity)
N_planes : number or orbital planes
Sats_per_plane: num of satellites in an orbital plane
orbit_incl: inclination of the orbit wrt the equator in u.deg
orbit_height: height of the orbits in u.m or u.km
orbit_period:period in u.s
total_time: total time of the orbit generation in u.s
time_steps: number of steps to compute the position of the sats in the AzAlt frame.
Obs_Lat: observer latitude
Obs_Lon: observer longitude
Obs_height: observer altitude wrt sea level.
rand_seed: random seed to generate the offset in time
output:
sat_pos: coordinates in AltAz coord system
c_AltAz: Original Skycoord elements with the calculated positions.
'''
period = orbit_period
steps = (int(total_time/orbit_period)+1)*100 #100 steps as a standard for the first orbit generation.
if steps == 0:
raise SystemExit
t = np.linspace(0,1,steps)*total_time
N_orbits = (total_time.to(u.s)/period.to(u.s)).value
#generate the random offset in time
np.random.seed(rand_seed)
delta = np.random.random(1)*60*60*24*u.s
epoch0 = Time("2015-01-01 00:00:00", scale="utc")+delta
epoch = epoch0 + t
phi_max = 2*np.pi*N_orbits
# N_planes = 24
# Sats_per_plane = 66
N_sats = int(N_planes*Sats_per_plane)
raa_step = 360/N_planes*u.deg #Right ascencion steps
mA_step = 360/Sats_per_plane*u.deg #meanAnomaly steps
height = orbit_height + const.R_earth
incl = orbit_incl
if plot_flag:
# Plot the trajectory of the satellites
plt.figure(figsize=[20,10])
ax = plt.axes(projection=ccrs.PlateCarree())
ax.stock_img()
# Plot the trajectory of the satellites
Pos = np.zeros([N_sats,3,steps])
indSat = 0
c = list()
for i in range(N_planes):
raa = raa_step*i
for j in range(Sats_per_plane):
# position every satellite in the corresponding mean Anomaly
mA = mA_step*j #mean anomaly of the satellite (i,j)
phi = np.linspace(mA.to(u.rad).value,mA.to(u.rad).value+phi_max,steps)*u.rad # angle as a parameter
theta = 0
x = height*np.cos(phi)*np.cos(theta)
y = height*np.cos(phi)*np.sin(theta)
z = height*np.sin(phi)
#rotation about X axis (equivalent to inclination)
alpha = -(90*u.deg-incl)
C = np.cos(alpha)
S = np.sin(alpha)
INC_T = np.array([[1, 0, 0],[0, C, -S],[0, S, C]])
x1,y1,z1 = np.matmul(INC_T,np.array([x,y,z]))
#rotation about Z axis (equivalent to right ascension)
C = np.cos(raa)
S = np.sin(raa)
RA_T = np.array([[C, -S, 0],[S, C, 0],[0, 0, 1]])
Pos[indSat] = np.matmul(RA_T,np.array([x1,y1,z1]))
#generate the Skycoord element for each satellite
c.append(SkyCoord(Pos[indSat,0],Pos[indSat,1],Pos[indSat,2],unit='km', representation_type='cartesian', frame='gcrs', obstime = epoch))
indSat += 1
print ('Generationg orbit, sat num: ' + str(indSat))
if plot_flag:
#conversion to spherical coordinaes to plot in the world map view.
ax.plot(c[-1].spherical.lon.wrap_at(180*u.deg),c[-1].spherical.lat,'o')
# Convert the GCRS coordinates to Horzontal (or AltAz) coordinates
Obs_location = EarthLocation.from_geodetic(lon=Obs_Lon,lat=Obs_Lat,height=Obs_height)
Obs_frame = AltAz(obstime=epoch,location=Obs_location)
c_AltAz = [None]*N_sats
for i in range(N_sats):
c_AltAz[i] = c[i].transform_to(Obs_frame)
print ('Transformation GCRS to AzAlt, sat num: ' + str(i))
# convert the list to a numpy array to operate easily
# Inerpolate the position obtained with the orbit propagator
# Interpolate the cartesian coordinates then recalculate the polar coords
steps = int(time_steps)
t2 = np.linspace(t[0],t[-1],steps)
sat_pos = np.zeros([N_sats,steps,6]) #[az,alt,distance,x,y,z]
# @jit(nopython=True, parallel=True)
def interpolation_sat():
for i in range(N_sats):
sat_pos[i,:,3] = np.interp(t2,t,c_AltAz[i].cartesian.x)
sat_pos[i,:,4] = np.interp(t2,t,c_AltAz[i].cartesian.y)
sat_pos[i,:,5] = np.interp(t2,t,c_AltAz[i].cartesian.z)
sat_pos[i,:,0] = np.arctan2(sat_pos[i,:,4],sat_pos[i,:,3])*180/np.pi*u.deg
sat_pos[i,:,1] = np.arctan2(sat_pos[i,:,5],np.sqrt(sat_pos[i,:,3]**2+sat_pos[i,:,4]**2))*180/np.pi*u.deg
sat_pos[i,:,2] = np.sqrt(sat_pos[i,:,3]**2+sat_pos[i,:,4]**2+sat_pos[i,:,5]**2)
print('Interpolating sat num: ' + str(i))
interpolation_sat()
return sat_pos, c_AltAz
def plot_orbit_AltAz(sat_pos, AzPoint=0, AltPoint=0):
'''
Plots the orbits in 2d rectangular plot
'''
N_sats = np.size(sat_pos,0)
plt.figure()
for i in range(N_sats):
plt.plot(sat_pos[i,sat_pos[i,:,1]>=0,0],sat_pos[i,sat_pos[i,:,1]>=0,1],'o')# uncomment to plot only the times that the elevation is grater than 0, (visible sat)
plt.plot(AzPoint,AltPoint,'xr',markersize=5)
def plot_visible_sats(sat_pos,indT=40,):
ind = np.where(sat_pos[:,indT,1]>=0)[0]
plt.figure()
plt.plot(sat_pos[ind,indT,0],sat_pos[ind,indT,1],'o')
plt.title('visible satellites in time index : ' + str(indT))
plt.xlabel('Azimuth')
plt.ylabel('Altitude')
plt.figure()
plt.plot(sat_pos[ind,indT,2],'o')
plt.title('distance to visible satellites in time index : ' + str(indT))
plt.ylabel('Distance in km')
print('Number of visible satellites in %d is: %d'%(indT,len(ind)))
def generate_az_el_grid(el_min=15*u.deg,el_max=90*u.deg,el_step=1*u.deg):
'''
Generates the el az grid according to the ITU-R recommendation on
epfd calculation.
'''
# Creating the elev az grid
#Pointing parameters
elev_min = el_min
elev_max = el_max
elev_step = el_step
elev = np.linspace(elev_min.value,elev_max.value,int((elev_max.value-elev_min.value)/elev_step.value))*u.deg
N_elev = len(elev)
el = list()
az = list()
for i in range(len(elev)):
width = (elev[-1] - elev[0]).value/N_elev/np.cos(elev[i]*np.pi/180)
Az = (np.linspace(-180,180,int(360/width)))*u.deg
for j in range(len(Az)):
el.append(elev[i].value)
az.append(Az[j].value)
el = np.array(el)
az = np.array(az)
return el, az
def receive_power2(EIRP,fo,sat_pos,el,az):
'''
calculates the received power in the grid of pointings from el,az
input:
EIRP: in dBm
fo: centre frequency in astropy units
sat_pos: (sat index, time step, 6)
el: elevation array as numpy array
az: az arrayas numpy array
'''
EIRP_lin = 10**(EIRP/10)
fo = fo.to(u.MHz).value
steps = np.size(sat_pos,1)
#prepare the loop
avePrx = np.zeros(len(el))
maxPrx = np.zeros(len(el))
#To test with only one pointing
#el = np.array([ 32.7832])
#az = np.array([ 147.152])
ind = np.where(sat_pos[:,:,1]>=0)
visible_sats = np.unique(ind[0])
Prx = np.zeros([len(el),time_steps])
for i in range(len(el)):
# pointing in cartesian coords [3]
Po = np.array([np.cos(az[i]*u.deg)*np.cos(el[i]*u.deg),np.sin(az[i]*u.deg)*np.cos(el[i]*u.deg), np.sin(el[i]*u.deg)])
for sat_ind in visible_sats:
# Get the time indices where the satellite is visible
time_ind = ind[1][ind[0] == sat_ind]
# meshgrid to match the shape of the visible satellites
blk,P = np.meshgrid(np.ones(len(time_ind)),Po)
# Distance in metres
d = sat_pos[sat_ind,time_ind,2]
# satellite versor in AzAlt frame, cartesian coords
P_sats = np.array([sat_pos[sat_ind,time_ind,3], sat_pos[sat_ind,time_ind,4], sat_pos[sat_ind,time_ind,5]])/d
#angle between pointing vector and position of the satellite in AltAz frame
eff_alt = np.arccos(np.einsum('ij,ij->j', P, P_sats)) * 180 / np.pi
# Linear gain
# G_lin = antenna_gain_times(eff_alt)
G_lin = antenna_gain_RA1631(eff_alt,do_bessel=False)
# FSPL in linear units
# FSPL = 20*log10(f_MHz) + 20*log10(d_m) - 27.55 # in dB
FSPL_lin = (d**2)*(fo**2)*0.0017579236139586931 # d in metres, fo in MHz
#Power received from the satellite for each time step
Prx_sat = EIRP_lin * G_lin / FSPL_lin
# Accumulate the received power from each satellite in each time step
Prx[i,time_ind] += Prx_sat
# calculate the maximum power in each timestep, is for debugging
# Prx_max_time = np.maximum(Prx_max_time, Prx)
# Average for all the time calculated
avePrx[i] = np.sum(Prx[i])/steps
# Maximum power in all the time considered
maxPrx[i] = np.max(Prx[i])
print('Received power, point %d of %d' %(i,len(el)))
# return Prx, Prx_time, avePrx, maxPrx
return Prx,avePrx, maxPrx
@jit(nopython=True,parallel=True)
def offbeam_angle(ska_el,ska_az,sat_el,sat_az):
#angle between pointing vector and position of the satellite in AltAz frame
# angles need to be in radians
CEa = np.cos(ska_el)
CAa = np.cos(ska_az)
SEa = np.sin(ska_el)
SAa = np.sin(ska_az)
CEb = np.cos(sat_el)
CAb = np.cos(sat_az)
SEb = np.sin(sat_el)
SAb = np.sin(sat_az)
eff_alt = np.arccos(CEa*CAa*CEb*CAb+CEa*SAa*CEb*SAb+SEa*SEb)*180/np.pi
# eff_alt = np.arccos(np.einsum('ij,ij->j', P, P_sats)) * 180 / np.pi
return eff_alt
def receive_power3(EIRP,fo,sat_pos,el,az):
'''
calculates the received power in the grid of pointings from el,az
input:
EIRP: in dBm
fo: centre frequency in astropy units
sat_pos: (sat index, time step, 6)
el: elevation array as numpy array
az: az arrayas numpy array
'''
EIRP_lin = 10**(EIRP/10)
fo = fo.to(u.MHz).value
steps = np.size(sat_pos,1)
#prepare the loop
avePrx = np.zeros(len(el))
maxPrx = np.zeros(len(el))
#To test with only one pointing
#el = np.array([ 32.7832])
#az = np.array([ 147.152])
#looks for the time steps where the elevation is >=0
ind = np.where(sat_pos[:,:,1]>=0)
visible_sats = np.unique(ind[0])
Prx = np.zeros([len(el),time_steps])
for i in range(len(el)):
for sat_ind in visible_sats:
# Get the time indices where the satellite is visible
time_ind = ind[1][ind[0] == sat_ind]
# generate vectors of pointing angles
ska_el = np.ones(len(time_ind))*el[i]*np.pi/180
ska_az = np.ones(len(time_ind))*az[i]*np.pi/180
# Distance in metres
d = sat_pos[sat_ind,time_ind,2]
# satellite versor in AzAlt frame, cartesian coords
sat_el = (sat_pos[sat_ind,time_ind,1])*np.pi/180
sat_az = (sat_pos[sat_ind,time_ind,0])*np.pi/180
eff_alt = offbeam_angle(ska_el,ska_az,sat_el,sat_az)
# Linear gain
G_lin = antenna_gain_RA1631(eff_alt,do_bessel=False)
# FSPL in linear units
FSPL_lin = (d**2)*(fo**2)*0.0017579236139586931 # d in metres, fo in MHz
#Power received from the satellite for each time step
Prx_sat = EIRP_lin * G_lin / FSPL_lin
# Accumulate the received power from each satellite in each time step
Prx[i,time_ind] += Prx_sat
print('Received power, point %d of %d' %(i,len(el)))
# Average for all the time calculated
avePrx = np.sum(Prx,1)/steps
# Maximum power in all the time considered
maxPrx = np.max(Prx,1)
return Prx,avePrx, maxPrx
def plot_trimesh(el,az,Z,title='', view_el = 60, view_az = -90):
'''
'''
import matplotlib.tri as mtri
y = np.array(el)
x = np.array(az)
z = 10*np.log10(np.array(Z))
triang = mtri.Triangulation(x, y)
fig = plt.figure(figsize=[15,9])
ax = fig.add_subplot(1,1,1,projection='3d')
ax.plot_trisurf(triang, z, cmap='jet', edgecolor='none', linewidth=0, antialiased=False)
ax.view_init(elev = view_el, azim = view_az)
ax.set_xlabel('Azimuth')
ax.set_ylabel('Elevation')
ax.set_zlabel('Averaged power dBm ')
plt.title(title)
return fig,ax
def plot_rx_power_in_time(Point_az, Point_el, Prx_time,fig=[]):
ind = np.where((el>=Point_el) & (az>= Point_az))[0][0]
if fig==[]:
plt.figure()
else:
plt.figure(fig)
plt.plot(10*np.log10(Prx_time[ind]+1e-20))
plt.title('received power in time poining: el= %.1f deg, az = %f deg'%(Point_el, Point_az))
# plt.title('time domain received power in Az:%f , El: %f' %(Point_az,Point_el))
#%% Start rhe calculation
if __name__ == '__main__':
max_time = 3600*u.s #1*u.h
time_steps = 4000
N_planes = 24
Sats_per_plane = 66
RS = 5 # random seed for create_orbits
el_min = 20*u.deg
el_max = 90*u.deg
el_step = 1*u.deg
# Calculating received power from satellites
el,az = generate_az_el_grid(el_min, el_max, el_step)
N_trys = 1
avePrx = np.zeros([N_trys,len(el)])
maxPrx = np.zeros([N_trys,len(el)])
for i in range(N_trys):
np.random.seed()
RS = int(np.random.random()*10000)
identifier = '- %d planes - %d sats pp - seed %d'%(N_planes,Sats_per_plane,RS) # for plotting and saving
# Generate orbits
sat_pos,c_AltAz = create_orbits(N_planes=N_planes,
Sats_per_plane=Sats_per_plane,
rand_seed=RS,plot_flag=0,
total_time=max_time,
time_steps=time_steps)
# Plot the orbits in the AltAz frame
plot_orbit_AltAz(sat_pos)
plt.title('Orbits'+identifier)
plt.savefig('../satellite_results/Orbits in az el '+identifier+'-side.png')
# plot the visible sats in a determined time
#plot_visible_sats(sat_pos,indT=40)
# Radiated power
EIRP_4kHz = -15 #dBW/4kHz
#in 250 MHz channel
EIRP = EIRP_4kHz + 10*np.log10(250e6/4e3)
#Calculate the received power
Prx, avePrx[i], maxPrx[i] = receive_power3(EIRP,11e9*u.Hz,sat_pos,el,az)
# plot the received power in the sky
blk,ax = plot_trimesh(el,az,maxPrx[i],'Maximum received power '+ identifier ,0,180)
plt.savefig('../satellite_results/Max received power - full sky '+identifier+'-side.png')
ax.view_init(90,-90)
plt.draw()
plt.savefig('../satellite_results/Max received power - full sky '+identifier+'-front.png')
blk,ax = plot_trimesh(el,az,avePrx[i], 'Average received power in %s'%(max_time)+ identifier ,0,180)
plt.savefig('../satellite_results/Avg received power - full sky '+identifier+'-side.png')
ax.view_init(90,-90)
plt.draw()
plt.savefig('../satellite_results/Avg received power - full sky '+identifier+'-front.png')
#max in time domain:
k = np.where(maxPrx[i]==np.max(maxPrx[i]))
plot_rx_power_in_time(az[k],el[k],Prx)
plt.savefig('../satellite_results/Instantaneous received power - el %.2f Az %.2f'%(el[k],az[k])+identifier+'.png')
#save the max and averaged power
savefile = 0
if savefile :
files = os.listdir('../satellite_results')
filename = '../satellite_results/Satellites '+identifier
j=0
filename2 = filename + ' - ' + str(j)
while filename2 in files:
j+=1
filename2 = filename + ' - ' + str(j)
np.savez(filename2,el=el,az=az,Prx=Prx,maxPrx=maxPrx[i],avePrx=avePrx[i])
|
[
"matplotlib.pyplot.title",
"numpy.abs",
"numpy.random.seed",
"numpy.sum",
"numpy.arctan2",
"astropy.coordinates.AltAz",
"numpy.einsum",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.interp",
"numpy.unique",
"matplotlib.pyplot.draw",
"numpy.max",
"numpy.linspace",
"numpy.log10",
"numpy.arccos",
"astropy.coordinates.EarthLocation.from_geodetic",
"numpy.size",
"astropy.time.Time",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"numpy.savez",
"os.listdir",
"matplotlib.pyplot.plot",
"numpy.zeros",
"pycraf.antenna.ras_pattern",
"numpy.where",
"numba.jit",
"numpy.array",
"numpy.random.random",
"matplotlib.tri.Triangulation",
"cartopy.crs.PlateCarree",
"matplotlib.pyplot.xlabel",
"astropy.coordinates.SkyCoord",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((11518, 11551), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'parallel': '(True)'}), '(nopython=True, parallel=True)\n', (11521, 11551), False, 'from numba import jit\n'), ((614, 625), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (622, 625), True, 'import numpy as np\n'), ((639, 652), 'numpy.abs', 'np.abs', (['angle'], {}), '(angle)\n', (645, 652), True, 'import numpy as np\n'), ((2932, 2957), 'numpy.random.seed', 'np.random.seed', (['rand_seed'], {}), '(rand_seed)\n', (2946, 2957), True, 'import numpy as np\n'), ((3659, 3687), 'numpy.zeros', 'np.zeros', (['[N_sats, 3, steps]'], {}), '([N_sats, 3, steps])\n', (3667, 3687), True, 'import numpy as np\n'), ((5415, 5487), 'astropy.coordinates.EarthLocation.from_geodetic', 'EarthLocation.from_geodetic', ([], {'lon': 'Obs_Lon', 'lat': 'Obs_Lat', 'height': 'Obs_height'}), '(lon=Obs_Lon, lat=Obs_Lat, height=Obs_height)\n', (5442, 5487), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz\n'), ((5502, 5545), 'astropy.coordinates.AltAz', 'AltAz', ([], {'obstime': 'epoch', 'location': 'Obs_location'}), '(obstime=epoch, location=Obs_location)\n', (5507, 5545), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz\n'), ((5975, 6006), 'numpy.linspace', 'np.linspace', (['t[0]', 't[-1]', 'steps'], {}), '(t[0], t[-1], steps)\n', (5986, 6006), True, 'import numpy as np\n'), ((6023, 6051), 'numpy.zeros', 'np.zeros', (['[N_sats, steps, 6]'], {}), '([N_sats, steps, 6])\n', (6031, 6051), True, 'import numpy as np\n'), ((6942, 6961), 'numpy.size', 'np.size', (['sat_pos', '(0)'], {}), '(sat_pos, 0)\n', (6949, 6961), True, 'import numpy as np\n'), ((6965, 6977), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6975, 6977), True, 'import matplotlib.pyplot as plt\n'), ((7184, 7231), 'matplotlib.pyplot.plot', 'plt.plot', (['AzPoint', 'AltPoint', '"""xr"""'], {'markersize': '(5)'}), "(AzPoint, AltPoint, 'xr', markersize=5)\n", (7192, 7231), True, 'import matplotlib.pyplot as plt\n'), ((7326, 7338), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7336, 7338), True, 'import matplotlib.pyplot as plt\n'), ((7343, 7402), 'matplotlib.pyplot.plot', 'plt.plot', (['sat_pos[ind, indT, 0]', 'sat_pos[ind, indT, 1]', '"""o"""'], {}), "(sat_pos[ind, indT, 0], sat_pos[ind, indT, 1], 'o')\n", (7351, 7402), True, 'import matplotlib.pyplot as plt\n'), ((7466, 7487), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Azimuth"""'], {}), "('Azimuth')\n", (7476, 7487), True, 'import matplotlib.pyplot as plt\n'), ((7492, 7514), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude"""'], {}), "('Altitude')\n", (7502, 7514), True, 'import matplotlib.pyplot as plt\n'), ((7524, 7536), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7534, 7536), True, 'import matplotlib.pyplot as plt\n'), ((7541, 7577), 'matplotlib.pyplot.plot', 'plt.plot', (['sat_pos[ind, indT, 2]', '"""o"""'], {}), "(sat_pos[ind, indT, 2], 'o')\n", (7549, 7577), True, 'import matplotlib.pyplot as plt\n'), ((7656, 7684), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance in km"""'], {}), "('Distance in km')\n", (7666, 7684), True, 'import matplotlib.pyplot as plt\n'), ((8544, 8556), 'numpy.array', 'np.array', (['el'], {}), '(el)\n', (8552, 8556), True, 'import numpy as np\n'), ((8566, 8578), 'numpy.array', 'np.array', (['az'], {}), '(az)\n', (8574, 8578), True, 'import numpy as np\n'), ((9046, 9065), 'numpy.size', 'np.size', (['sat_pos', '(1)'], {}), '(sat_pos, 1)\n', (9053, 9065), True, 'import numpy as np\n'), ((9273, 9304), 'numpy.where', 'np.where', (['(sat_pos[:, :, 1] >= 0)'], {}), '(sat_pos[:, :, 1] >= 0)\n', (9281, 9304), True, 'import numpy as np\n'), ((9320, 9337), 'numpy.unique', 'np.unique', (['ind[0]'], {}), '(ind[0])\n', (9329, 9337), True, 'import numpy as np\n'), ((11729, 11743), 'numpy.cos', 'np.cos', (['ska_el'], {}), '(ska_el)\n', (11735, 11743), True, 'import numpy as np\n'), ((11754, 11768), 'numpy.cos', 'np.cos', (['ska_az'], {}), '(ska_az)\n', (11760, 11768), True, 'import numpy as np\n'), ((11779, 11793), 'numpy.sin', 'np.sin', (['ska_el'], {}), '(ska_el)\n', (11785, 11793), True, 'import numpy as np\n'), ((11804, 11818), 'numpy.sin', 'np.sin', (['ska_az'], {}), '(ska_az)\n', (11810, 11818), True, 'import numpy as np\n'), ((11829, 11843), 'numpy.cos', 'np.cos', (['sat_el'], {}), '(sat_el)\n', (11835, 11843), True, 'import numpy as np\n'), ((11854, 11868), 'numpy.cos', 'np.cos', (['sat_az'], {}), '(sat_az)\n', (11860, 11868), True, 'import numpy as np\n'), ((11879, 11893), 'numpy.sin', 'np.sin', (['sat_el'], {}), '(sat_el)\n', (11885, 11893), True, 'import numpy as np\n'), ((11904, 11918), 'numpy.sin', 'np.sin', (['sat_az'], {}), '(sat_az)\n', (11910, 11918), True, 'import numpy as np\n'), ((12538, 12557), 'numpy.size', 'np.size', (['sat_pos', '(1)'], {}), '(sat_pos, 1)\n', (12545, 12557), True, 'import numpy as np\n'), ((12822, 12853), 'numpy.where', 'np.where', (['(sat_pos[:, :, 1] >= 0)'], {}), '(sat_pos[:, :, 1] >= 0)\n', (12830, 12853), True, 'import numpy as np\n'), ((12869, 12886), 'numpy.unique', 'np.unique', (['ind[0]'], {}), '(ind[0])\n', (12878, 12886), True, 'import numpy as np\n'), ((14388, 14402), 'numpy.max', 'np.max', (['Prx', '(1)'], {}), '(Prx, 1)\n', (14394, 14402), True, 'import numpy as np\n'), ((14584, 14596), 'numpy.array', 'np.array', (['el'], {}), '(el)\n', (14592, 14596), True, 'import numpy as np\n'), ((14605, 14617), 'numpy.array', 'np.array', (['az'], {}), '(az)\n', (14613, 14617), True, 'import numpy as np\n'), ((14670, 14694), 'matplotlib.tri.Triangulation', 'mtri.Triangulation', (['x', 'y'], {}), '(x, y)\n', (14688, 14694), True, 'import matplotlib.tri as mtri\n'), ((14710, 14737), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[15, 9]'}), '(figsize=[15, 9])\n', (14720, 14737), True, 'import matplotlib.pyplot as plt\n'), ((15047, 15063), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (15056, 15063), True, 'import matplotlib.pyplot as plt\n'), ((15343, 15441), 'matplotlib.pyplot.title', 'plt.title', (["('received power in time poining: el= %.1f deg, az = %f deg' % (Point_el,\n Point_az))"], {}), "('received power in time poining: el= %.1f deg, az = %f deg' % (\n Point_el, Point_az))\n", (15352, 15441), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1074), 'pycraf.antenna.ras_pattern', 'pycraf.antenna.ras_pattern', (['(angle * u.deg)', 'D', 'lda'], {'do_bessel': 'do_bessel'}), '(angle * u.deg, D, lda, do_bessel=do_bessel)\n', (1030, 1074), True, 'import pycraf as pycraf\n'), ((2792, 2816), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'steps'], {}), '(0, 1, steps)\n', (2803, 2816), True, 'import numpy as np\n'), ((3016, 3056), 'astropy.time.Time', 'Time', (['"""2015-01-01 00:00:00"""'], {'scale': '"""utc"""'}), "('2015-01-01 00:00:00', scale='utc')\n", (3020, 3056), False, 'from astropy.time import Time\n'), ((3490, 3518), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 10]'}), '(figsize=[20, 10])\n', (3500, 3518), True, 'import matplotlib.pyplot as plt\n'), ((7014, 7107), 'matplotlib.pyplot.plot', 'plt.plot', (['sat_pos[i, sat_pos[i, :, 1] >= 0, 0]', 'sat_pos[i, sat_pos[i, :, 1] >= 0, 1]', '"""o"""'], {}), "(sat_pos[i, sat_pos[i, :, 1] >= 0, 0], sat_pos[i, sat_pos[i, :, 1] >=\n 0, 1], 'o')\n", (7022, 7107), True, 'import matplotlib.pyplot as plt\n'), ((7287, 7321), 'numpy.where', 'np.where', (['(sat_pos[:, indT, 1] >= 0)'], {}), '(sat_pos[:, indT, 1] >= 0)\n', (7295, 7321), True, 'import numpy as np\n'), ((11354, 11368), 'numpy.max', 'np.max', (['Prx[i]'], {}), '(Prx[i])\n', (11360, 11368), True, 'import numpy as np\n'), ((14303, 14317), 'numpy.sum', 'np.sum', (['Prx', '(1)'], {}), '(Prx, 1)\n', (14309, 14317), True, 'import numpy as np\n'), ((15245, 15257), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15255, 15257), True, 'import matplotlib.pyplot as plt\n'), ((15276, 15291), 'matplotlib.pyplot.figure', 'plt.figure', (['fig'], {}), '(fig)\n', (15286, 15291), True, 'import matplotlib.pyplot as plt\n'), ((16037, 16053), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (16051, 16053), True, 'import numpy as np\n'), ((16661, 16693), 'matplotlib.pyplot.title', 'plt.title', (["('Orbits' + identifier)"], {}), "('Orbits' + identifier)\n", (16670, 16693), True, 'import matplotlib.pyplot as plt\n'), ((16700, 16779), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../satellite_results/Orbits in az el ' + identifier + '-side.png')"], {}), "('../satellite_results/Orbits in az el ' + identifier + '-side.png')\n", (16711, 16779), True, 'import matplotlib.pyplot as plt\n'), ((17345, 17442), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../satellite_results/Max received power - full sky ' + identifier +\n '-side.png')"], {}), "('../satellite_results/Max received power - full sky ' +\n identifier + '-side.png')\n", (17356, 17442), True, 'import matplotlib.pyplot as plt\n'), ((17472, 17482), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (17480, 17482), True, 'import matplotlib.pyplot as plt\n'), ((17491, 17589), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../satellite_results/Max received power - full sky ' + identifier +\n '-front.png')"], {}), "('../satellite_results/Max received power - full sky ' +\n identifier + '-front.png')\n", (17502, 17589), True, 'import matplotlib.pyplot as plt\n'), ((17716, 17813), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../satellite_results/Avg received power - full sky ' + identifier +\n '-side.png')"], {}), "('../satellite_results/Avg received power - full sky ' +\n identifier + '-side.png')\n", (17727, 17813), True, 'import matplotlib.pyplot as plt\n'), ((17843, 17853), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (17851, 17853), True, 'import matplotlib.pyplot as plt\n'), ((17862, 17960), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../satellite_results/Avg received power - full sky ' + identifier +\n '-front.png')"], {}), "('../satellite_results/Avg received power - full sky ' +\n identifier + '-front.png')\n", (17873, 17960), True, 'import matplotlib.pyplot as plt\n'), ((18097, 18227), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../satellite_results/Instantaneous received power - el %.2f Az %.2f' % (\n el[k], az[k]) + identifier + '.png')"], {}), "(\n '../satellite_results/Instantaneous received power - el %.2f Az %.2f' %\n (el[k], az[k]) + identifier + '.png')\n", (18108, 18227), True, 'import matplotlib.pyplot as plt\n'), ((4370, 4383), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (4376, 4383), True, 'import numpy as np\n'), ((4400, 4413), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (4406, 4413), True, 'import numpy as np\n'), ((4434, 4478), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, C, -S], [0, S, C]]'], {}), '([[1, 0, 0], [0, C, -S], [0, S, C]])\n', (4442, 4478), True, 'import numpy as np\n'), ((4623, 4634), 'numpy.cos', 'np.cos', (['raa'], {}), '(raa)\n', (4629, 4634), True, 'import numpy as np\n'), ((4651, 4662), 'numpy.sin', 'np.sin', (['raa'], {}), '(raa)\n', (4657, 4662), True, 'import numpy as np\n'), ((4682, 4726), 'numpy.array', 'np.array', (['[[C, -S, 0], [S, C, 0], [0, 0, 1]]'], {}), '([[C, -S, 0], [S, C, 0], [0, 0, 1]])\n', (4690, 4726), True, 'import numpy as np\n'), ((6215, 6255), 'numpy.interp', 'np.interp', (['t2', 't', 'c_AltAz[i].cartesian.x'], {}), '(t2, t, c_AltAz[i].cartesian.x)\n', (6224, 6255), True, 'import numpy as np\n'), ((6283, 6323), 'numpy.interp', 'np.interp', (['t2', 't', 'c_AltAz[i].cartesian.y'], {}), '(t2, t, c_AltAz[i].cartesian.y)\n', (6292, 6323), True, 'import numpy as np\n'), ((6351, 6391), 'numpy.interp', 'np.interp', (['t2', 't', 'c_AltAz[i].cartesian.z'], {}), '(t2, t, c_AltAz[i].cartesian.z)\n', (6360, 6391), True, 'import numpy as np\n'), ((6623, 6701), 'numpy.sqrt', 'np.sqrt', (['(sat_pos[i, :, 3] ** 2 + sat_pos[i, :, 4] ** 2 + sat_pos[i, :, 5] ** 2)'], {}), '(sat_pos[i, :, 3] ** 2 + sat_pos[i, :, 4] ** 2 + sat_pos[i, :, 5] ** 2)\n', (6630, 6701), True, 'import numpy as np\n'), ((8341, 8370), 'numpy.cos', 'np.cos', (['(elev[i] * np.pi / 180)'], {}), '(elev[i] * np.pi / 180)\n', (8347, 8370), True, 'import numpy as np\n'), ((11253, 11267), 'numpy.sum', 'np.sum', (['Prx[i]'], {}), '(Prx[i])\n', (11259, 11267), True, 'import numpy as np\n'), ((11938, 12006), 'numpy.arccos', 'np.arccos', (['(CEa * CAa * CEb * CAb + CEa * SAa * CEb * SAb + SEa * SEb)'], {}), '(CEa * CAa * CEb * CAb + CEa * SAa * CEb * SAb + SEa * SEb)\n', (11947, 12006), True, 'import numpy as np\n'), ((14638, 14649), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (14646, 14649), True, 'import numpy as np\n'), ((15167, 15212), 'numpy.where', 'np.where', (['((el >= Point_el) & (az >= Point_az))'], {}), '((el >= Point_el) & (az >= Point_az))\n', (15175, 15212), True, 'import numpy as np\n'), ((15308, 15339), 'numpy.log10', 'np.log10', (['(Prx_time[ind] + 1e-20)'], {}), '(Prx_time[ind] + 1e-20)\n', (15316, 15339), True, 'import numpy as np\n'), ((18325, 18359), 'os.listdir', 'os.listdir', (['"""../satellite_results"""'], {}), "('../satellite_results')\n", (18335, 18359), True, 'import os as os\n'), ((18621, 18699), 'numpy.savez', 'np.savez', (['filename2'], {'el': 'el', 'az': 'az', 'Prx': 'Prx', 'maxPrx': 'maxPrx[i]', 'avePrx': 'avePrx[i]'}), '(filename2, el=el, az=az, Prx=Prx, maxPrx=maxPrx[i], avePrx=avePrx[i])\n', (18629, 18699), True, 'import numpy as np\n'), ((3551, 3569), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3567, 3569), True, 'import cartopy.crs as ccrs\n'), ((4146, 4159), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4152, 4159), True, 'import numpy as np\n'), ((4195, 4208), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4201, 4208), True, 'import numpy as np\n'), ((4232, 4243), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4238, 4243), True, 'import numpy as np\n'), ((4516, 4535), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (4524, 4535), True, 'import numpy as np\n'), ((4766, 4788), 'numpy.array', 'np.array', (['[x1, y1, z1]'], {}), '([x1, y1, z1])\n', (4774, 4788), True, 'import numpy as np\n'), ((4884, 5017), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['Pos[indSat, 0]', 'Pos[indSat, 1]', 'Pos[indSat, 2]'], {'unit': '"""km"""', 'representation_type': '"""cartesian"""', 'frame': '"""gcrs"""', 'obstime': 'epoch'}), "(Pos[indSat, 0], Pos[indSat, 1], Pos[indSat, 2], unit='km',\n representation_type='cartesian', frame='gcrs', obstime=epoch)\n", (4892, 5017), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz\n'), ((9566, 9587), 'numpy.sin', 'np.sin', (['(el[i] * u.deg)'], {}), '(el[i] * u.deg)\n', (9572, 9587), True, 'import numpy as np\n'), ((10091, 10198), 'numpy.array', 'np.array', (['[sat_pos[sat_ind, time_ind, 3], sat_pos[sat_ind, time_ind, 4], sat_pos[\n sat_ind, time_ind, 5]]'], {}), '([sat_pos[sat_ind, time_ind, 3], sat_pos[sat_ind, time_ind, 4],\n sat_pos[sat_ind, time_ind, 5]])\n', (10099, 10198), True, 'import numpy as np\n'), ((16071, 16089), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (16087, 16089), True, 'import numpy as np\n'), ((17025, 17055), 'numpy.log10', 'np.log10', (['(250000000.0 / 4000.0)'], {}), '(250000000.0 / 4000.0)\n', (17033, 17055), True, 'import numpy as np\n'), ((18023, 18040), 'numpy.max', 'np.max', (['maxPrx[i]'], {}), '(maxPrx[i])\n', (18029, 18040), True, 'import numpy as np\n'), ((2970, 2989), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (2986, 2989), True, 'import numpy as np\n'), ((4134, 4145), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4140, 4145), True, 'import numpy as np\n'), ((4183, 4194), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4189, 4194), True, 'import numpy as np\n'), ((9485, 9506), 'numpy.cos', 'np.cos', (['(az[i] * u.deg)'], {}), '(az[i] * u.deg)\n', (9491, 9506), True, 'import numpy as np\n'), ((9505, 9526), 'numpy.cos', 'np.cos', (['(el[i] * u.deg)'], {}), '(el[i] * u.deg)\n', (9511, 9526), True, 'import numpy as np\n'), ((9525, 9546), 'numpy.sin', 'np.sin', (['(az[i] * u.deg)'], {}), '(az[i] * u.deg)\n', (9531, 9546), True, 'import numpy as np\n'), ((9545, 9566), 'numpy.cos', 'np.cos', (['(el[i] * u.deg)'], {}), '(el[i] * u.deg)\n', (9551, 9566), True, 'import numpy as np\n'), ((6419, 6465), 'numpy.arctan2', 'np.arctan2', (['sat_pos[i, :, 4]', 'sat_pos[i, :, 3]'], {}), '(sat_pos[i, :, 4], sat_pos[i, :, 3])\n', (6429, 6465), True, 'import numpy as np\n'), ((10336, 10368), 'numpy.einsum', 'np.einsum', (['"""ij,ij->j"""', 'P', 'P_sats'], {}), "('ij,ij->j', P, P_sats)\n", (10345, 10368), True, 'import numpy as np\n'), ((6532, 6586), 'numpy.sqrt', 'np.sqrt', (['(sat_pos[i, :, 3] ** 2 + sat_pos[i, :, 4] ** 2)'], {}), '(sat_pos[i, :, 3] ** 2 + sat_pos[i, :, 4] ** 2)\n', (6539, 6586), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.special import kv, iv # Needed for K1 in Well class, and in CircInhom
from .aquifer import AquiferData
from .element import Element
from .equation import InhomEquation
class CircInhomData(AquiferData):
def __init__(self, model, x0=0, y0=0, R=1, kaq=[1], Haq=[1], c=[1],
Saq=[.1], Sll=[.1], topboundary='imp'):
AquiferData.__init__(self, model, kaq, Haq, Hll, c, Saq, Sll,
topboundary, phreatictop)
self.x0 = float(x0)
self.y0 = float(y0)
self.R = float(R)
self.Rsq = self.R ** 2
self.area = np.pi * self.Rsq
self.model.addInhom(self)
def isInside(self, x, y):
rv = False
if (x - self.x0) ** 2 + (y - self.y0) ** 2 < self.Rsq:
rv = True
return rv
class CircInhomDataMaq(CircInhomData):
def __init__(self, model, x0=0, y0=0, R=1, kaq=[1], z=[1, 0], c=[],
Saq=[0.001], Sll=[0], topboundary='imp', phreatictop=False):
kaq, Haq, Hll, c, Saq, Sll = param_maq(kaq, z, c, Saq, Sll,
topboundary, phreatictop)
CircInhomData.__init__(self, model, x0, y0, R, kaq, Haq, c, Saq, Sll,
topboundary, phreatictop)
class CircInhomData3D(CircInhomData):
def __init__(self, model, x0=0, y0=0, R=1, kaq=1, z=[4, 3, 2, 1],
Saq=[0.3, 0.001, 0.001], kzoverkh=0.1, phreatictop=True,
topboundary='conf', topres=0, topthick=0, topSll=0):
kaq, Haq, Hll, c, Saq, Sll = param_3d(kaq, z, Saq, kzoverkh,
phreatictop, topboundary, topres,
topthick, topSll)
CircInhomData.__init__(self, model, x0, y0, R, kaq, Haq, c, Saq, Sll,
'imp')
class BesselRatioApprox:
# Never fully debugged
def __init__(self, Norder, Nterms):
self.Norder= Norder+1
self.Nterms = Nterms+1
self.krange = np.arange(self.Nterms)
self.minonek = (-np.ones(self.Nterms)) ** self.krange
self.hankeltot = np.ones( (self.Norder,2*self.Nterms), 'd' )
self.muk = np.ones( (self.Norder,self.Nterms), 'd' )
self.nuk = np.ones( (self.Norder,self.Nterms), 'd' )
for n in range(self.Norder):
mu = 4.0*n**2
for k in range(1,self.Nterms):
self.hankeltot[n,k] = self.hankeltot[n,k-1] * (mu - (2*k-1)**2) / ( 4.0 * k )
for k in range(self.Nterms):
self.muk[n,k] = ( 4.0 * n**2 + 16.0 * k**2 - 1.0 ) / ( 4.0 * n**2 - (4.0*k - 1.0)**2 )
self.nuk[n,k] = ( 4.0 * n**2 + 4.0 * (2.0*k+1.0)**2 - 1.0 ) / ( 4.0 * n**2 - (4.0*k + 1.0)**2 )
self.hankelnk = self.hankeltot[:,:self.Nterms]
self.hankeln2k = self.hankeltot[:,::2]
self.hankeln2kp1 = self.hankeltot[:,1::2]
def ivratio( self, rho, R, lab):
lab = np.atleast_1d(lab)
rv = np.empty((self.Norder,len(lab)),'D')
for k in range(len(lab)):
top = np.sum( self.minonek * self.hankelnk / ( 2.0 * rho / lab[k] )**self.krange, 1 )
bot = np.sum( self.minonek * self.hankelnk / ( 2.0 * R / lab[k] )**self.krange, 1 )
rv[:,k] = top / bot * np.sqrt ( float(R) / rho ) * np.exp( (rho-R)/ lab[k] )
return rv
def kvratio( self, rho, R, lab ):
lab = np.atleast_1d(lab)
rv = np.empty((self.Norder,len(lab)),'D')
for k in range(len(lab)):
top = np.sum( self.hankelnk / ( 2.0 * rho / lab[k] )**self.krange, 1 )
bot = np.sum( self.hankelnk / ( 2.0 * R / lab[k] )**self.krange, 1 )
rv[:,k] = top / bot * np.sqrt ( float(R) / rho ) * np.exp( (R-rho)/ lab[k] )
return rv
def ivratiop( self, rho, R, lab ):
lab = np.atleast_1d(lab)
rv = np.empty((self.Norder,len(lab)),'D')
for k in range(len(lab)):
top = np.sum( self.muk * self.hankeln2k / ( 2.0 * rho / lab[k] )**(2*self.krange), 1 ) - \
np.sum( self.nuk * self.hankeln2kp1 / ( 2.0 * rho / lab[k] )**(2*self.krange+1), 1 )
bot = np.sum( self.minonek * self.hankelnk / ( 2.0 * R / lab[k] )**self.krange, 1 )
rv[:,k] = top / bot * np.sqrt ( float(R) / rho ) * np.exp( (rho-R)/ lab[k] )
return rv
def kvratiop( self, rho, R, lab ):
lab = np.atleast_1d(lab)
rv = np.empty((self.Norder,len(lab)),'D')
for k in range(len(lab)):
top = np.sum( self.muk * self.hankeln2k / ( 2.0 * rho / lab[k] )**(2*self.krange), 1 ) + \
np.sum( self.nuk * self.hankeln2kp1 / ( 2.0 * rho / lab[k] )**(2*self.krange+1), 1 )
bot = np.sum( self.hankelnk / ( 2.0 * R / lab[k] )**self.krange, 1 )
rv[:,k] = -top / bot * np.sqrt ( float(R) / rho ) * np.exp( (R-rho)/ lab[k] )
return rv
class CircInhomRadial(Element, InhomEquation):
def __init__(self, model, x0=0, y0=0, R=1.0, label=None):
Element.__init__(self, model, nparam=2 * model.aq.naq,
nunknowns=2 * model.aq.naq,
layers=range(model.aq.naq), type='z',
name='CircInhom', label=label)
self.x0 = float(x0)
self.y0 = float(y0)
self.R = float(R)
self.model.addElement(self)
self.approx = BesselRatioApprox(0, 2)
def __repr__(self):
return self.name + ' at ' + str((self.x0, self.y0))
def initialize(self):
self.xc = np.array([self.x0 + self.R]); self.yc = np.array([self.y0])
self.thetacp = np.zeros(1)
self.ncp = 1
self.aqin = self.model.aq.findAquiferData(
self.x0 + (1 - 1e-8) * self.R, self.y0)
assert self.aqin.R == self.R, (
'Radius of CircInhom and CircInhomData must be equal')
self.aqout = self.model.aq.findAquiferData(
self.x0 + (1 + 1e-8) * self.R, self.y0)
self.setbc()
self.facin = np.ones_like(self.aqin.lab2)
self.facout = np.ones_like(self.aqout.lab2)
# To keep track which circles are small
self.circ_in_small = np.ones((self.aqin.naq, self.model.nin), dtype='i')
self.circ_out_small = np.ones((self.aqout.naq,self.model.nin),dtype='i')
self.Rbig = 700
#for i in range(self.aqin.Naq):
# for j in range(self.model.Nin):
# assert self.R / abs(self.aqin.lab2[i,j,0]) < self.Rbig, 'TTim input error, Radius too big'
# assert self.R / abs(self.aqout.lab2[i,j,0]) < self.Rbig, 'TTim input error, Radius too big'
# if self.R / abs(self.aqin.lab2[i,j,0]) < self.Rbig:
# self.circ_in_small[i,j] = 1
# self.facin[i,j,:] = 1.0 / iv(0, self.R / self.aqin.lab2[i,j,:])
# if self.R / abs(self.aqout.lab2[i,j,0]) < self.Rbig:
# self.circ_out_small[i,j] = 1
# self.facout[i,j,:] = 1.0 / kv(0, self.R / self.aqout.lab2[i,j,:])
#for i in range(self.aqin.Naq):
# for j in range(self.model.Nin):
# assert self.R / abs(self.aqin.lab2[i,j,0]) < 900, 'radius too large compared to aqin lab2[i,j,0] '+str((i,j))
# assert self.R / abs(self.aqout.lab2[i,j,0]) < 900, 'radius too large compared to aqin lab2[i,j,0] '+str((i,j))
#self.facin = 1.0 / iv(0, self.R / self.aqin.lab2)
#self.facout = 1.0 / kv(0, self.R / self.aqout.lab2)
self.parameters = np.zeros((self.model.Ngvbc, self.Nparam,
self.model.Np), 'D')
def potinf(self, x, y, aq=None):
'''Can be called with only one x,y value'''
if aq is None: aq = self.model.aq.findAquiferData(x, y)
rv = np.zeros((self.nparam, aq.naq, self.model.nin,
self.model.npin), 'D')
if aq == self.aqin:
r = np.sqrt((x - self.x0) ** 2 + (y - self.y0) ** 2)
for i in range(self.aqin.Naq):
for j in range(self.model.Nin):
if abs(r - self.R) / abs(self.aqin.lab2[i, j, 0]) < self.Rzero:
if self.circ_in_small[i, j]:
rv[i, i, j, :] = self.facin[i, j, :] * \
iv(0, r / self.aqin.lab2[i, j, :])
else:
print('using approx')
rv[i, i, j, :] = self.approx.ivratio(
r, self.R, self.aqin.lab2[i, j, :])
if aq == self.aqout:
r = np.sqrt( (x - self.x0) ** 2 + (y - self.y0) ** 2)
for i in range(self.aqout.Naq):
for j in range(self.model.Nin):
if abs(r - self.R) / abs(self.aqout.lab2[i, j, 0]) < self.Rzero:
if self.circ_out_small[i, j]:
rv[self.aqin.Naq + i, i, j, :] = \
self.facin[i, j, :] * \
kv(0, r / self.aqout.lab2[i, j, :])
else:
print('using approx')
rv[self.aqin.Naq + i, i, j, :] = \
self.approx.kvratio(r, self.R,
self.aqout.lab2[i, j, :])
rv.shape = (self.Nparam, aq.Naq, self.model.Np)
return rv
def disinf(self,x,y,aq=None):
'''Can be called with only one x,y value'''
if aq is None:
aq = self.model.aq.findAquiferData(x, y)
qx = np.zeros((self.nparam, aq.naq, self.model.np), 'D')
qy = np.zeros((self.nparam, aq.naq, self.model.np), 'D')
if aq == self.aqin:
qr = np.zeros((self.nparam, aq.naq, self.model.nin,
self.model.npin), 'D')
r = np.sqrt((x - self.x0) ** 2 + (y - self.y0) ** 2)
if r < 1e-20:
r = 1e-20 # As we divide by that on the return
for i in range(self.aqin.Naq):
for j in range(self.model.Nin):
if abs(r - self.R) / abs(self.aqin.lab2[i, j, 0]) < self.Rzero:
if self.circ_in_small[i, j]:
qr[i, i, j, :] = -self.facin[i, j, :] * \
iv(1, r / self.aqin.lab2[i, j, :] ) / \
self.aqin.lab2[i, j, :]
else:
qr[i, i, j, :] = -self.approx.ivratiop(r, self.R,
self.aqin.lab2[i, j, :]) / \
self.aqin.lab2[i, j, :]
qr.shape = (self.nparam, aq.naq, self.model.np)
qx[:] = qr * (x-self.x0) / r; qy[:] = qr * (y-self.y0) / r
if aq == self.aqout:
qr = np.zeros((self.Nparam, aq.Naq,
self.model.Nin, self.model.Npin), 'D')
r = np.sqrt((x-self.x0) ** 2 + (y - self.y0) ** 2)
for i in range(self.aqout.Naq):
for j in range(self.model.Nin):
if abs(r - self.R) / abs(self.aqout.lab2[i, j, 0]) < self.Rzero:
if self.circ_out_small[i,j]:
qr[self.aqin.Naq + i, i, j, :] = \
self.facin[i, j, :] * \
kv(1, r / self.aqout.lab2[i, j, :]) / \
self.aqout.lab2[i, j, :]
else:
qr[self.aqin.Naq + i, i, j, :] = \
self.approx.kvratiop(r, self.R,
self.aqout.lab2[i, j, :]) / \
self.aqout.lab2[i, j, :]
qr.shape = (self.Nparam, aq.Naq, self.model.Np)
qx[:] = qr * (x - self.x0) / r
qy[:] = qr * (y - self.y0) / r
return qx, qy
def layout(self):
alpha = np.linspace(0, 2 * np.pi, 100)
return 'line', self.x0 + self.R * np.cos(alpha), \
self.y0 + self.R * np.sin(alpha)
# class CircInhom(Element,InhomEquation):
# def __init__(self,model,x0=0,y0=0,R=1.0,order=0,label=None,test=False):
# Element.__init__(self, model, Nparam=2*model.aq.Naq*(2*order+1), Nunknowns=2*model.aq.Naq*(2*order+1), layers=range(model.aq.Naq), type='z', name='CircInhom', label=label)
# self.x0 = float(x0); self.y0 = float(y0); self.R = float(R)
# self.order = order
# self.approx = BesselRatioApprox(0,3)
# self.test=test
# self.model.addElement(self)
# def __repr__(self):
# return self.name + ' at ' + str((self.x0,self.y0))
# def initialize(self):
# self.Ncp = 2*self.order + 1
# self.thetacp = np.arange(0,2*np.pi,(2*np.pi)/self.Ncp)
# self.xc = self.x0 + self.R * np.cos( self.thetacp )
# self.yc = self.y0 + self.R * np.sin( self.thetacp )
# self.aqin = self.model.aq.findAquiferData(self.x0 + (1-1e-10)*self.R,self.y0)
# self.aqout = self.model.aq.findAquiferData(self.x0+(1.0+1e-8)*self.R,self.y0)
# assert self.aqin.Naq == self.aqout.Naq, 'TTim input error: Number of layers needs to be the same inside and outside circular inhomogeneity'
# # Now that aqin is known, check that radii of circles are the same
# assert self.aqin.R == self.R, 'TTim Input Error: Radius of CircInhom and CircInhomData must be equal'
# self.setbc()
# self.facin = np.zeros((self.order+1,self.aqin.Naq,self.model.Nin,self.model.Npin),dtype='D')
# self.facout = np.zeros((self.order+1,self.aqin.Naq,self.model.Nin,self.model.Npin),dtype='D')
# self.circ_in_small = np.zeros((self.aqin.Naq,self.model.Nin),dtype='i') # To keep track which circles are small
# self.circ_out_small = np.zeros((self.aqout.Naq,self.model.Nin),dtype='i')
# self.besapprox = BesselRatioApprox(self.order,2) # Nterms = 2 is probably enough
# self.Rbig = 200
# for i in range(self.aqin.Naq):
# for j in range(self.model.Nin):
# # When the circle is too big, an assertion is thrown. In the future, the approximation of the ratio of bessel functions needs to be completed
# # For now, the logic is there, but not used
# if self.test:
# print('inside relative radius: ',self.R / abs(self.aqin.lab2[i,j,0]))
# print('outside relative radius: ',self.R / abs(self.aqout.lab2[i,j,0]))
# #assert self.R / abs(self.aqin.lab2[i,j,0]) < self.Rbig, 'TTim input error, Radius too big'
# #assert self.R / abs(self.aqout.lab2[i,j,0]) < self.Rbig, 'TTim input error, Radius too big'
# if self.R / abs(self.aqin.lab2[i,j,0]) < self.Rbig:
# self.circ_in_small[i,j] = 1
# for n in range(self.order+1):
# self.facin[n,i,j,:] = 1.0 / iv(n, self.R / self.aqin.lab2[i,j,:])
# if self.R / abs(self.aqout.lab2[i,j,0]) < self.Rbig:
# self.circ_out_small[i,j] = 1
# for n in range(self.order+1):
# self.facout[n,i,j,:] = 1.0 / kv(n, self.R / self.aqout.lab2[i,j,:])
# self.parameters = np.zeros( (self.model.Ngvbc, self.Nparam, self.model.Np), 'D' )
# def potinf(self,x,y,aq=None):
# '''Can be called with only one x,y value'''
# if aq is None: aq = self.model.aq.findAquiferData( x, y )
# rv = np.zeros((2*aq.Naq,1+2*self.order,aq.Naq,self.model.Nin,self.model.Npin),'D')
# if aq == self.aqin:
# r = np.sqrt( (x-self.x0)**2 + (y-self.y0)**2 )
# alpha = np.arctan2(y-self.y0, x-self.x0)
# for i in range(self.aqin.Naq):
# for j in range(self.model.Nin):
# if abs(r-self.R) / abs(self.aqin.lab2[i,j,0]) < self.Rzero:
# if self.circ_in_small[i,j]:
# pot = np.zeros((self.model.Npin),'D')
# rv[i,0,i,j,:] = iv( 0, r / self.aqin.lab2[i,j,:] ) * self.facin[0,i,j,:]
# for n in range(1,self.order+1):
# pot[:] = iv( n, r / self.aqin.lab2[i,j,:] ) * self.facin[n,i,j,:]
# rv[i,2*n-1,i,j,:] = pot * np.cos(n*alpha)
# rv[i,2*n ,i,j,:] = pot * np.sin(n*alpha)
# else:
# pot = self.besapprox.ivratio(r,self.R,self.aqin.lab2[i,j,:])
# rv[i,0,i,j,:] = pot[0]
# for n in range(1,self.order+1):
# rv[i,2*n-1,i,j,:] = pot[n] * np.cos(n*alpha)
# rv[i,2*n ,i,j,:] = pot[n] * np.sin(n*alpha)
# if aq == self.aqout:
# r = np.sqrt( (x-self.x0)**2 + (y-self.y0)**2 )
# alpha = np.arctan2(y-self.y0, x-self.x0)
# for i in range(self.aqout.Naq):
# for j in range(self.model.Nin):
# if abs(r-self.R) / abs(self.aqout.lab2[i,j,0]) < self.Rzero:
# if self.circ_out_small[i,j]:
# pot = np.zeros((self.model.Npin),'D')
# rv[aq.Naq+i,0,i,j,:] = kv( 0, r / self.aqout.lab2[i,j,:] ) * self.facout[0,i,j,:]
# for n in range(1,self.order+1):
# pot[:] = kv( n, r / self.aqout.lab2[i,j,:] ) * self.facout[n,i,j,:]
# rv[aq.Naq+i,2*n-1,i,j,:] = pot * np.cos(n*alpha)
# rv[aq.Naq+i,2*n ,i,j,:] = pot * np.sin(n*alpha)
# else:
# pot = self.besapprox.kvratio(r,self.R,self.aqout.lab2[i,j,:])
# rv[aq.Naq+i,0,i,j,:] = pot[0]
# for n in range(1,self.order+1):
# rv[aq.Naq+i,2*n-1,i,j,:] = pot[n] * np.cos(n*alpha)
# rv[aq.Naq+i,2*n ,i,j,:] = pot[n] * np.sin(n*alpha)
# rv.shape = (self.Nparam,aq.Naq,self.model.Np)
# return rv
# def disinf(self,x,y,aq=None):
# '''Can be called with only one x,y value'''
# if aq is None: aq = self.model.aq.findAquiferData( x, y )
# qx = np.zeros((self.Nparam,aq.Naq,self.model.Np),'D')
# qy = np.zeros((self.Nparam,aq.Naq,self.model.Np),'D')
# if aq == self.aqin:
# r = np.sqrt( (x-self.x0)**2 + (y-self.y0)**2 )
# alpha = np.arctan2(y-self.y0, x-self.x0)
# qr = np.zeros((aq.Naq,1+2*self.order,aq.Naq,self.model.Nin,self.model.Npin),'D')
# qt = np.zeros((aq.Naq,1+2*self.order,aq.Naq,self.model.Nin,self.model.Npin),'D')
# if r < 1e-20: r = 1e-20 # As we divide by that on the return
# for i in range(self.aqin.Naq):
# for j in range(self.model.Nin):
# if abs(r-self.R) / abs(self.aqin.lab2[i,j,0]) < self.Rzero:
# if self.circ_in_small[i,j]:
# pot = np.zeros((self.order+2,self.model.Npin),'D')
# for n in range(self.order+2):
# pot[n] = iv( n, r / self.aqin.lab2[i,j,:] )
# qr[i,0,i,j,:] = -pot[1] / self.aqin.lab2[i,j,:] * self.facin[0,i,j,:]
# for n in range(1,self.order+1):
# qr[i,2*n-1,i,j,:] = -(pot[n-1] + pot[n+1]) / 2 / self.aqin.lab2[i,j,:] * np.cos(n*alpha) * self.facin[n,i,j,:]
# qr[i,2*n ,i,j,:] = -(pot[n-1] + pot[n+1]) / 2 / self.aqin.lab2[i,j,:] * np.sin(n*alpha) * self.facin[n,i,j,:]
# qt[i,2*n-1,i,j,:] = pot[n] * np.sin(n*alpha) * n / r * self.facin[n,i,j,:]
# qt[i,2*n ,i,j,:] = -pot[n] * np.cos(n*alpha) * n / r * self.facin[n,i,j,:]
# else:
# pot = self.besapprox.ivratio(r,self.R,self.aqin.lab2[i,j,:])
# potp = self.besapprox.ivratiop(r,self.R,self.aqin.lab2[i,j,:])
# qr[i,0,i,j,:] = -potp[0] / self.aqin.lab2[i,j,:]
# for n in range(1,self.order+1):
# qr[i,2*n-1,i,j,:] = -potp[n] / self.aqin.lab2[i,j,:] * np.cos(n*alpha)
# qr[i,2*n ,i,j,:] = -potp[n] / 2 / self.aqin.lab2[i,j,:] * np.sin(n*alpha)
# qt[i,2*n-1,i,j,:] = pot[n] * np.sin(n*alpha) * n / r
# qt[i,2*n ,i,j,:] = -pot[n] * np.cos(n*alpha) * n / r
# qr.shape = (self.Nparam/2,aq.Naq,self.model.Np)
# qt.shape = (self.Nparam/2,aq.Naq,self.model.Np)
# qx[:self.Nparam/2,:,:] = qr * np.cos(alpha) - qt * np.sin(alpha);
# qy[:self.Nparam/2,:,:] = qr * np.sin(alpha) + qt * np.cos(alpha);
# if aq == self.aqout:
# r = np.sqrt( (x-self.x0)**2 + (y-self.y0)**2 )
# alpha = np.arctan2(y-self.y0, x-self.x0)
# qr = np.zeros((aq.Naq,1+2*self.order,aq.Naq,self.model.Nin,self.model.Npin),'D')
# qt = np.zeros((aq.Naq,1+2*self.order,aq.Naq,self.model.Nin,self.model.Npin),'D')
# if r < 1e-20: r = 1e-20 # As we divide by that on the return
# for i in range(self.aqout.Naq):
# for j in range(self.model.Nin):
# if abs(r-self.R) / abs(self.aqout.lab2[i,j,0]) < self.Rzero:
# if self.circ_out_small[i,j]:
# pot = np.zeros((self.order+2,self.model.Npin),'D')
# for n in range(self.order+2):
# pot[n] = kv( n, r / self.aqout.lab2[i,j,:] )
# qr[i,0,i,j,:] = pot[1] / self.aqout.lab2[i,j,:] * self.facout[0,i,j,:]
# for n in range(1,self.order+1):
# qr[i,2*n-1,i,j,:] = (pot[n-1] + pot[n+1]) / 2 / self.aqout.lab2[i,j,:] * np.cos(n*alpha) * self.facout[n,i,j,:]
# qr[i,2*n ,i,j,:] = (pot[n-1] + pot[n+1]) / 2 / self.aqout.lab2[i,j,:] * np.sin(n*alpha) * self.facout[n,i,j,:]
# qt[i,2*n-1,i,j,:] = pot[n] * np.sin(n*alpha) * n / r * self.facout[n,i,j,:]
# qt[i,2*n ,i,j,:] = -pot[n] * np.cos(n*alpha) * n / r * self.facout[n,i,j,:]
# else:
# pot = self.besapprox.kvratio(r,self.R,self.aqout.lab2[i,j,:])
# potp = self.besapprox.kvratiop(r,self.R,self.aqout.lab2[i,j,:])
# qr[i,0,i,j,:] = -potp[0] / self.aqout.lab2[i,j,:]
# for n in range(1,self.order+1):
# qr[i,2*n-1,i,j,:] = -potp[n] / self.aqout.lab2[i,j,:] * np.cos(n*alpha)
# qr[i,2*n ,i,j,:] = -potp[n] / self.aqout.lab2[i,j,:] * np.sin(n*alpha)
# qt[i,2*n-1,i,j,:] = pot[n] * np.sin(n*alpha) * n / r
# qt[i,2*n ,i,j,:] = -pot[n] * np.cos(n*alpha) * n / r
# qr.shape = (self.Nparam/2,aq.Naq,self.model.Np)
# qt.shape = (self.Nparam/2,aq.Naq,self.model.Np)
# qx[self.Nparam/2:,:,:] = qr * np.cos(alpha) - qt * np.sin(alpha);
# qy[self.Nparam/2:,:,:] = qr * np.sin(alpha) + qt * np.cos(alpha);
# return qx,qy
# def layout(self):
# return 'line', self.x0 + self.R * np.cos(np.linspace(0,2*np.pi,100)), self.y0 + self.R * np.sin(np.linspace(0,2*np.pi,100))
# def CircInhomMaq(model,x0=0,y0=0,R=1,order=1,kaq=[1],z=[1,0],c=[],Saq=[0.001],Sll=[0],topboundary='imp',phreatictop=False,label=None,test=False):
# CircInhomDataMaq(model,x0,y0,R,kaq,z,c,Saq,Sll,topboundary,phreatictop)
# return CircInhom(model,x0,y0,R,order,label,test)
# def CircInhom3D(model,x0=0,y0=0,R=1,order=1,kaq=[1,1,1],z=[4,3,2,1],Saq=[0.3,0.001,0.001],kzoverkh=[.1,.1,.1],phreatictop=True,label=None):
# CircInhomData3D(model,x0,y0,R,kaq,z,Saq,kzoverkh,phreatictop)
# return CircInhom(model,x0,y0,R,order,label)
#
#ml = ModelMaq(kaq=[4,5],z=[4,2,1,0],c=[100],Saq=[1e-3,1e-4],Sll=[1e-6],tmin=1,tmax=10,M=20)
##ls = MscreenLineSinkDitchString(ml,[(-1,0),(0,0),(1,0)],tsandQ=[(0.0,1.0)],layers=[2])
#e1a = EllipseInhomDataMaq(ml,0,0,along=2.0,bshort=1.0,angle=0.0,kaq=[10,2],z=[4,2,1,0],c=[200],Saq=[2e-3,2e-4],Sll=[1e-5])
#e1 = EllipseInhom(ml,0,0,along=2.0,bshort=1.0,angle=0.0,order=5)
#e1 = EllipseInhomMaq(ml,0,0,along=2.0,bshort=1.0,angle=0.0,order=5,kaq=[10,2],z=[4,2,1,0],c=[200],Saq=[2e-3,2e-4],Sll=[1e-5])
## Same inside and outside
#c1 = CircInhomMaq(ml,0,0,2.0,order=5,kaq=[4,5],z=[4,2,1,0],c=[100],Saq=[1e-3,1e-4],Sll=[1e-6])
#c1 = CircInhomMaq(ml,0,0,2.0,order=5,kaq=[10,.1],z=[4,2,1,0],c=[200],Saq=[2e-3,2e-4],Sll=[1e-5])
##c2 = CircInhomMaq(ml,0,0,5000.0,order=1,kaq=[10,2],z=[4,2,1,0],c=[200],Saq=[2e-3,2e-4],Sll=[1e-5])
##ml.initialize()
##c2.circ_in_small[:] = 0
##c2.circ_out_small[:] = 0
#w = DischargeWell(ml,xw=.5,yw=0,rw=.1,tsandQ=[0,5.0],layers=1)
#ml.solve()
#ml.solve()
#h1,h2 = np.zeros((2,e1.Ncp)), np.zeros((2,e1.Ncp))
#qn1,qn2 = np.zeros((2,e1.Ncp)), np.zeros((2,e1.Ncp))
#for i in range(e1.Ncp):
# h1[:,i] = ml.head(e1.xc[i],e1.yc[i],2,aq=e1.aqin)[:,0]
# h2[:,i] = ml.head(e1.xc[i],e1.yc[i],2,aq=e1.aqout)[:,0]
# qx1,qy1 = ml.discharge(e1.xc[i],e1.yc[i],2,aq=e1.aqin)
# qx2,qy2 = ml.discharge(e1.xc[i],e1.yc[i],2,aq=e1.aqout)
# a = e1a.outwardnormalangle(e1.xc[i],e1.yc[i])
# qn1[:,i] = qx1[:,0]*np.cos(a) + qy1[:,0]*np.sin(a)
# qn2[:,i] = qx2[:,0]*np.cos(a) + qy2[:,0]*np.sin(a)
#ml = ModelMaq(kaq=[10,5],z=[4,2,1,0],c=[100],Saq=[1e-3,1e-4],Sll=[1e-6],tmin=.1,tmax=10)
#w1 = Well(ml,0,2,.1,tsandQ=[(0,10)],layers=[1])
#ls2 = ZeroHeadLineSinkString(ml,xy=[(-10,-2),(0,-4),(4,0)],layers=[1])
#ls1 = MscreenLineSinkDitchString(ml,xy=[(-10,0),(0,0),(10,10)],tsandQ=[(0.0,7.0)],res=0.0,wh='H',layers=[2],label=None)
#ml.solve()
#ml = ModelMaq([1,20,2],[25,20,18,10,8,0],c=[1000,2000],Saq=[0.1,1e-4,1e-4],Sll=[0,0],phreatictop=True,tmin=1e-6,tmax=10,M=30)
#w1 = Well(ml,0,0,.1,tsandQ=[(0,1000)],layers=[2])
#ls1 = ZeroMscreenLineSink(ml,10,-5,10,5,layers=[1,2,3],res=0.5,wh=1,vres=3,wv=1)
#w2 = ZeroMscreenWell(ml,10,0,res=1.0,layers=[1,2,3],vres=1.0)
#w3 = Well(ml,0,-10,.1,tsandQ=[(0,700)],layers=[2])
#ml.solve()
##ml1 = ModelMaq([1,20,2],[25,20,18,10,8,0],c=[1000,2000],Saq=[1e-4,1e-4,1e-4],Sll=[0,0],tmin=0.1,tmax=10000,M=30)
##w1 = Well(ml1,0,0,.1,tsandQ=[(0,1000)],layers=[2],res=0.1)
##ml1.solve()
#t = np.logspace(-1,3,100)
#h0 = ml.head(50,0,t)
##h1 = ml1.head(50,0,t)
##w = MscreenWell(ml,0,0,.1,tsandQ=[(0,1000),(100,0),(365,1000),(465,0)],layers=[2,3])
##w2 = HeadWell(ml,50,0,.2,tsandh=[(0,1)],layers=[2])
##y = [-500,-300,-200,-100,-50,0,50,100,200,300,500]
##x = 50 * np.ones(len(y))
##ls = ZeroHeadLineSinkString(ml,xy=zip(x,y),layers=[1])
##w = Well(ml,0,0,.1,tsandQ=[(0,1000),(100,0)],layers=[2])
##ml.solve()
#ml = Model3D( kaq=[2,1,5,10,4], z=[10,8,6,4,2,0], Saq=[.1,.0001,.0002,.0002,.0001], phreatictop=True, kzoverkh=0.1, tmin=1e-3, tmax=1e3 )
#w = MscreenWell(ml,0,-25,rw=.3,tsandQ=[(0,100),(100,50)],layers=[2,3])
#ml.solve()
##ml = Model3D(kaq=2.0,z=[10,5,0],Saq=[.002,.001],kzoverkh=0.2,phreatictop=False,tmin=.1,tmax=10,M=15)
#ml = ModelMaq(kaq=[10,5],z=[4,2,1,0],c=[100],Saq=[1e-3,1e-4],Sll=[1e-6],tmin=100,tmax=300,M=50)
#w = HeadWellNew(ml,0,0,.1,tsandh=[(0.0,1.0)],layers=1)
#ml.solve()
##L1 = np.sqrt(10**2+5**2)
##ls1 = LineSink(ml,-10,-10,0,-5,tsandQ=[(0,.05*L1),(1,.02*L1)],res=1.0,layers=[1,2],label='mark1')
#w = MscreenWell(ml,-5,-5,.1,[0,5],layers=[1,2])
#L2 = np.sqrt(10**2+15**2)
#ls2 = LineSink(ml,0,-5,10,10,tsandQ=[(0,.03*L2),(2,.07*L2)],layers=[1],label='mark2')
##ls3a = ZeroHeadLineSink(ml,-10,5,-5,5,res=1.0,layers=[1,2])
##ls3b = ZeroHeadLineSink(ml,-5,5,0,5,res=1.0,layers=[1,2])
##ls3c = ZeroHeadLineSink(ml,0,5,5,5,res=1.0,layers=[1,2])
##lss = HeadLineSinkString(ml,[(-10,5),(-5,5),(0,5)],tsandh=[(0,0.02),(3,0.01)],res=1.0,layers=[1,2])
#lss = ZeroHeadLineSinkString(ml,[(-10,5),(-5,5),(0,5),(5,5)],res=1.0,layers=[1,2])
##lss = MscreenLineSinkString(ml,[(-10,5),(-5,5),(0,5)],tsandQ=[(0,0.2),(3,0.1)],res=1.0,layers=[1,2])
##lss = ZeroMscreenLineSinkString(ml,[(-10,5),(-5,5),(0,5)],res=1.0,layers=[1,2])
##ml.initialize()
#ml.solve()
#print ml.potential(50,50,[0.5,5])
#ml2 = ModelMaq(kaq=[10,5],z=[4,2,1,0],c=[100],Saq=[1e-3,1e-4],Sll=[1e-6],tmin=.1,tmax=10,M=15)
#L1 = np.sqrt(10**2+5**2)
#ls1b = LineSink(ml2,-10,-10,0,-5,tsandQ=[(0,.05*L1),(1,.02*L1)],res=1.0,layers=[1,2],label='mark1')
#L2 = np.sqrt(10**2+15**2)
#ls2b = LineSink(ml2,0,-5,10,10,tsandQ=[(0,.03*L2),(2,.07*L2)],layers=[1],label='mark2')
##ls3a = HeadLineSink(ml2,-10,5,-5,5,tsandh=[(0,0.02),(3,0.01)],res=1.0,layers=[1,2])
##ls3b = HeadLineSink(ml2,-5,5,0,5,tsandh=[(0,0.02),(3,0.01)],res=1.0,layers=[1,2])
##ls3a = ZeroHeadLineSink(ml2,-10,5,-5,5,res=1.0,layers=[1,2])
##ls3b = ZeroHeadLineSink(ml2,-5,5,0,5,res=1.0,layers=[1,2])
##ls3a = MscreenLineSink(ml2,-10,5,-5,5,tsandQ=[(0,0.2),(3,0.1)],res=1.0,layers=[1,2])
##ls3b = MscreenLineSink(ml2,-5,5,0,5,tsandQ=[(0,0.2),(3,0.1)],res=1.0,layers=[1,2])
#ls3a = ZeroMscreenLineSink(ml2,-10,5,-5,5,res=1.0,layers=[1,2])
#ls3b = ZeroMscreenLineSink(ml2,-5,5,0,5,res=1.0,layers=[1,2])
##lssb = HeadLineSinkStringOld(ml2,[(-10,5),(-5,5),(0,5)],tsandh=[(0,0.02),(3,0.01)],res=0.0,layers=[1,2])
#ml2.solve()
#print ml2.potential(50,50,[0.5,5])
#lss = HeadLineSinkString(ml,[(-10,5),(-5,5),(0,5)],tsandh=[(0,0.02),(3,0.01)],res=1.0,layers=[1,2])
#lss = MscreenLineSinkString(ml,[(-10,5),(-5,5),(0,5)],tsandQ=[(0,.03*5),(2,.07*5)],res=0.5,layers=[1,2])
#ls3a = MscreenLineSink(ml,-10,5,-5,5,tsandQ=[(0,.03*5),(2,.07*5)],res=0.5,layers=[1,2])
#ls3b = MscreenLineSink(ml,-5,5,0,5,tsandQ=[(0,.03*5),(2,.07*5)],res=0.5,layers=[1,2])
#
#ml2 = ModelMaq(kaq=[10,5],z=[4,2,1,0],c=[100],Saq=[1e-3,1e-4],Sll=[1e-6],tmin=.1,tmax=10,M=15)
#L1 = np.sqrt(10**2+5**2)
#ls1a = LineSink(ml2,-10,-10,0,-5,tsandQ=[(0,.05*L1),(1,.02*L1)],res=1.0,layers=[1,2],label='mark1')
#L2 = np.sqrt(10**2+15**2)
#ls2a = LineSink(ml2,0,-5,10,10,tsandQ=[(0,.03*L2),(2,.07*L2)],layers=[1],label='mark2')
#ls3a = HeadLineSink(ml2,-10,5,-5,5,tsandh=[(0,0.02),(3,0.01)],res=1.0,layers=[1,2])
#ls3b = HeadLineSink(ml2,-5,5,0,5,tsandh=[(0,0.02),(3,0.01)],res=1.0,layers=[1,2])
##lss = HeadLineSinkString(ml,[(-10,5),(-5,5),(0,5)],tsandh=[(0,0.02),(3,0.01)],res=0.0,layers=[1,2])
##ls3 = ZeroMscreenLineSink(ml,-10,5,0,5,res=1.0,layers=[1,2])
#ml = ModelMaq(kaq=[10,5],z=[4,2,1,0],c=[100],Saq=[1e-3,1e-4],Sll=[1e-6],tmin=.1,tmax=10,M=15)
#w1 = Well(ml,0,0,.1,tsandQ=[(0,5),(1,2)],res=1.0,layers=[1,2])
#w2 = Well(ml,100,0,.1,tsandQ=[(0,3),(2,7)],layers=[1])
##w3 = MscreenWell(ml,0,100,.1,tsandQ=[(0,2),(3,1)],res=2.0,layers=[1,2])
#w3 = ZeroMscreenWell(ml,0,100,.1,res=2.0,layers=[1,2])
##w3 = ZeroHeadWell(ml,0,100,.1,res=1.0,layers=[1,2])
##w3 = HeadWell(ml,0,100,.1,tsandh=[(0,2),(3,1)],res=1.0,layers=[1,2])
#ml.solve()
###print ml.potential(2,3,[.5,5])
#print ml.potential(50,50,[0.5,5])
#ml2.solve()
#print ml2.potential(50,50,[.5,5])
#print lss.strength([.5,5])
#
#ml2 = ModelMaq(kaq=[10,5],z=[4,2,1,0],c=[100],Saq=[1e-3,1e-4],Sll=[1e-6],tmin=0.1,tmax=10,M=15)
#ls1a = LineSink(ml2,-10,-10,0,-5,tsandsig=[(0,.05),(1,.02)],res=1.0,layers=[1,2],label='mark1')
#ls2a = LineSink(ml2,0,-5,10,10,tsandsig=[(0,.03),(2,.07)],layers=[1],label='mark2')
#ls3a = HeadLineSinkStringOld(ml2,[(-10,5),(-5,5),(0,5)],tsandh=[(0,0.02),(3,0.01)],res=0.0,layers=[1,2])
#ml2.solve()
#print ml2.potential(50,50,[0.5,5])
#print 'Q from strength: ',w3.strength(.5)
#print 'Q from head diff: ',(ml.head(w3.xc,w3.yc,.5)-w3.headinside(.5))/w3.res*2*np.pi*w3.rw*ml.aq.Haq[:,np.newaxis]
#print 'Q from head diff: ',(ml.head(w3.xc,w3.yc,.5)-2.0)/w3.res*2*np.pi*w3.rw*ml.aq.Haq[:,np.newaxis]
#print w3.strength([.5,5])
#print ls3.strength([.5,5])
#print sum(ls3.strength([.5,5]),0)
#Q = w3.strength([.5,5])
#print sum(Q,0)
#print ml.potential(w3.xc,w3.yc,[.5,5])
|
[
"numpy.ones_like",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"scipy.special.iv",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.exp",
"numpy.cos",
"scipy.special.kv",
"numpy.atleast_1d",
"numpy.sqrt"
] |
[((2073, 2095), 'numpy.arange', 'np.arange', (['self.Nterms'], {}), '(self.Nterms)\n', (2082, 2095), True, 'import numpy as np\n'), ((2183, 2227), 'numpy.ones', 'np.ones', (['(self.Norder, 2 * self.Nterms)', '"""d"""'], {}), "((self.Norder, 2 * self.Nterms), 'd')\n", (2190, 2227), True, 'import numpy as np\n'), ((2246, 2286), 'numpy.ones', 'np.ones', (['(self.Norder, self.Nterms)', '"""d"""'], {}), "((self.Norder, self.Nterms), 'd')\n", (2253, 2286), True, 'import numpy as np\n'), ((2307, 2347), 'numpy.ones', 'np.ones', (['(self.Norder, self.Nterms)', '"""d"""'], {}), "((self.Norder, self.Nterms), 'd')\n", (2314, 2347), True, 'import numpy as np\n'), ((3008, 3026), 'numpy.atleast_1d', 'np.atleast_1d', (['lab'], {}), '(lab)\n', (3021, 3026), True, 'import numpy as np\n'), ((3464, 3482), 'numpy.atleast_1d', 'np.atleast_1d', (['lab'], {}), '(lab)\n', (3477, 3482), True, 'import numpy as np\n'), ((3895, 3913), 'numpy.atleast_1d', 'np.atleast_1d', (['lab'], {}), '(lab)\n', (3908, 3913), True, 'import numpy as np\n'), ((4460, 4478), 'numpy.atleast_1d', 'np.atleast_1d', (['lab'], {}), '(lab)\n', (4473, 4478), True, 'import numpy as np\n'), ((5615, 5643), 'numpy.array', 'np.array', (['[self.x0 + self.R]'], {}), '([self.x0 + self.R])\n', (5623, 5643), True, 'import numpy as np\n'), ((5655, 5674), 'numpy.array', 'np.array', (['[self.y0]'], {}), '([self.y0])\n', (5663, 5674), True, 'import numpy as np\n'), ((5698, 5709), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (5706, 5709), True, 'import numpy as np\n'), ((6087, 6115), 'numpy.ones_like', 'np.ones_like', (['self.aqin.lab2'], {}), '(self.aqin.lab2)\n', (6099, 6115), True, 'import numpy as np\n'), ((6138, 6167), 'numpy.ones_like', 'np.ones_like', (['self.aqout.lab2'], {}), '(self.aqout.lab2)\n', (6150, 6167), True, 'import numpy as np\n'), ((6246, 6297), 'numpy.ones', 'np.ones', (['(self.aqin.naq, self.model.nin)'], {'dtype': '"""i"""'}), "((self.aqin.naq, self.model.nin), dtype='i')\n", (6253, 6297), True, 'import numpy as np\n'), ((6328, 6380), 'numpy.ones', 'np.ones', (['(self.aqout.naq, self.model.nin)'], {'dtype': '"""i"""'}), "((self.aqout.naq, self.model.nin), dtype='i')\n", (6335, 6380), True, 'import numpy as np\n'), ((7601, 7662), 'numpy.zeros', 'np.zeros', (['(self.model.Ngvbc, self.Nparam, self.model.Np)', '"""D"""'], {}), "((self.model.Ngvbc, self.Nparam, self.model.Np), 'D')\n", (7609, 7662), True, 'import numpy as np\n'), ((7866, 7935), 'numpy.zeros', 'np.zeros', (['(self.nparam, aq.naq, self.model.nin, self.model.npin)', '"""D"""'], {}), "((self.nparam, aq.naq, self.model.nin, self.model.npin), 'D')\n", (7874, 7935), True, 'import numpy as np\n'), ((9684, 9735), 'numpy.zeros', 'np.zeros', (['(self.nparam, aq.naq, self.model.np)', '"""D"""'], {}), "((self.nparam, aq.naq, self.model.np), 'D')\n", (9692, 9735), True, 'import numpy as np\n'), ((9749, 9800), 'numpy.zeros', 'np.zeros', (['(self.nparam, aq.naq, self.model.np)', '"""D"""'], {}), "((self.nparam, aq.naq, self.model.np), 'D')\n", (9757, 9800), True, 'import numpy as np\n'), ((12079, 12109), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (12090, 12109), True, 'import numpy as np\n'), ((3129, 3206), 'numpy.sum', 'np.sum', (['(self.minonek * self.hankelnk / (2.0 * rho / lab[k]) ** self.krange)', '(1)'], {}), '(self.minonek * self.hankelnk / (2.0 * rho / lab[k]) ** self.krange, 1)\n', (3135, 3206), True, 'import numpy as np\n'), ((3227, 3302), 'numpy.sum', 'np.sum', (['(self.minonek * self.hankelnk / (2.0 * R / lab[k]) ** self.krange)', '(1)'], {}), '(self.minonek * self.hankelnk / (2.0 * R / lab[k]) ** self.krange, 1)\n', (3233, 3302), True, 'import numpy as np\n'), ((3585, 3647), 'numpy.sum', 'np.sum', (['(self.hankelnk / (2.0 * rho / lab[k]) ** self.krange)', '(1)'], {}), '(self.hankelnk / (2.0 * rho / lab[k]) ** self.krange, 1)\n', (3591, 3647), True, 'import numpy as np\n'), ((3668, 3728), 'numpy.sum', 'np.sum', (['(self.hankelnk / (2.0 * R / lab[k]) ** self.krange)', '(1)'], {}), '(self.hankelnk / (2.0 * R / lab[k]) ** self.krange, 1)\n', (3674, 3728), True, 'import numpy as np\n'), ((4222, 4297), 'numpy.sum', 'np.sum', (['(self.minonek * self.hankelnk / (2.0 * R / lab[k]) ** self.krange)', '(1)'], {}), '(self.minonek * self.hankelnk / (2.0 * R / lab[k]) ** self.krange, 1)\n', (4228, 4297), True, 'import numpy as np\n'), ((4787, 4847), 'numpy.sum', 'np.sum', (['(self.hankelnk / (2.0 * R / lab[k]) ** self.krange)', '(1)'], {}), '(self.hankelnk / (2.0 * R / lab[k]) ** self.krange, 1)\n', (4793, 4847), True, 'import numpy as np\n'), ((8004, 8052), 'numpy.sqrt', 'np.sqrt', (['((x - self.x0) ** 2 + (y - self.y0) ** 2)'], {}), '((x - self.x0) ** 2 + (y - self.y0) ** 2)\n', (8011, 8052), True, 'import numpy as np\n'), ((8676, 8724), 'numpy.sqrt', 'np.sqrt', (['((x - self.x0) ** 2 + (y - self.y0) ** 2)'], {}), '((x - self.x0) ** 2 + (y - self.y0) ** 2)\n', (8683, 8724), True, 'import numpy as np\n'), ((9846, 9915), 'numpy.zeros', 'np.zeros', (['(self.nparam, aq.naq, self.model.nin, self.model.npin)', '"""D"""'], {}), "((self.nparam, aq.naq, self.model.nin, self.model.npin), 'D')\n", (9854, 9915), True, 'import numpy as np\n'), ((9960, 10008), 'numpy.sqrt', 'np.sqrt', (['((x - self.x0) ** 2 + (y - self.y0) ** 2)'], {}), '((x - self.x0) ** 2 + (y - self.y0) ** 2)\n', (9967, 10008), True, 'import numpy as np\n'), ((10954, 11023), 'numpy.zeros', 'np.zeros', (['(self.Nparam, aq.Naq, self.model.Nin, self.model.Npin)', '"""D"""'], {}), "((self.Nparam, aq.Naq, self.model.Nin, self.model.Npin), 'D')\n", (10962, 11023), True, 'import numpy as np\n'), ((11067, 11115), 'numpy.sqrt', 'np.sqrt', (['((x - self.x0) ** 2 + (y - self.y0) ** 2)'], {}), '((x - self.x0) ** 2 + (y - self.y0) ** 2)\n', (11074, 11115), True, 'import numpy as np\n'), ((2121, 2141), 'numpy.ones', 'np.ones', (['self.Nterms'], {}), '(self.Nterms)\n', (2128, 2141), True, 'import numpy as np\n'), ((3368, 3394), 'numpy.exp', 'np.exp', (['((rho - R) / lab[k])'], {}), '((rho - R) / lab[k])\n', (3374, 3394), True, 'import numpy as np\n'), ((3794, 3820), 'numpy.exp', 'np.exp', (['((R - rho) / lab[k])'], {}), '((R - rho) / lab[k])\n', (3800, 3820), True, 'import numpy as np\n'), ((4016, 4101), 'numpy.sum', 'np.sum', (['(self.muk * self.hankeln2k / (2.0 * rho / lab[k]) ** (2 * self.krange))', '(1)'], {}), '(self.muk * self.hankeln2k / (2.0 * rho / lab[k]) ** (2 * self.krange), 1\n )\n', (4022, 4101), True, 'import numpy as np\n'), ((4119, 4210), 'numpy.sum', 'np.sum', (['(self.nuk * self.hankeln2kp1 / (2.0 * rho / lab[k]) ** (2 * self.krange + 1))', '(1)'], {}), '(self.nuk * self.hankeln2kp1 / (2.0 * rho / lab[k]) ** (2 * self.\n krange + 1), 1)\n', (4125, 4210), True, 'import numpy as np\n'), ((4363, 4389), 'numpy.exp', 'np.exp', (['((rho - R) / lab[k])'], {}), '((rho - R) / lab[k])\n', (4369, 4389), True, 'import numpy as np\n'), ((4581, 4666), 'numpy.sum', 'np.sum', (['(self.muk * self.hankeln2k / (2.0 * rho / lab[k]) ** (2 * self.krange))', '(1)'], {}), '(self.muk * self.hankeln2k / (2.0 * rho / lab[k]) ** (2 * self.krange), 1\n )\n', (4587, 4666), True, 'import numpy as np\n'), ((4684, 4775), 'numpy.sum', 'np.sum', (['(self.nuk * self.hankeln2kp1 / (2.0 * rho / lab[k]) ** (2 * self.krange + 1))', '(1)'], {}), '(self.nuk * self.hankeln2kp1 / (2.0 * rho / lab[k]) ** (2 * self.\n krange + 1), 1)\n', (4690, 4775), True, 'import numpy as np\n'), ((4914, 4940), 'numpy.exp', 'np.exp', (['((R - rho) / lab[k])'], {}), '((R - rho) / lab[k])\n', (4920, 4940), True, 'import numpy as np\n'), ((12152, 12165), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (12158, 12165), True, 'import numpy as np\n'), ((12211, 12224), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (12217, 12224), True, 'import numpy as np\n'), ((8382, 8416), 'scipy.special.iv', 'iv', (['(0)', '(r / self.aqin.lab2[i, j, :])'], {}), '(0, r / self.aqin.lab2[i, j, :])\n', (8384, 8416), False, 'from scipy.special import kv, iv\n'), ((9108, 9143), 'scipy.special.kv', 'kv', (['(0)', '(r / self.aqout.lab2[i, j, :])'], {}), '(0, r / self.aqout.lab2[i, j, :])\n', (9110, 9143), False, 'from scipy.special import kv, iv\n'), ((10430, 10464), 'scipy.special.iv', 'iv', (['(1)', '(r / self.aqin.lab2[i, j, :])'], {}), '(1, r / self.aqin.lab2[i, j, :])\n', (10432, 10464), False, 'from scipy.special import kv, iv\n'), ((11495, 11530), 'scipy.special.kv', 'kv', (['(1)', '(r / self.aqout.lab2[i, j, :])'], {}), '(1, r / self.aqout.lab2[i, j, :])\n', (11497, 11530), False, 'from scipy.special import kv, iv\n')]
|
'''
Created on 9 de nov de 2020
@author: klaus
'''
import jsonlines
from folders import DATA_DIR, SUBMISSIONS_DIR
import os
from os import path
import pandas as pd
import numpy as np
import urllib
import igraph as ig
from input.read_input import read_item_data, get_emb
def create_ratio(mode = 'train',CUTOFF=50, which='domain_id',alternate=False):
assert mode in ['train','val']
assert which in ['domain_id','category_id','item_id','price','condition']
df = read_item_data()
df['price'] = pd.qcut(df['price'].values,100)
dct_attr = df[which].to_dict()
dct_dom = df['domain_id'].to_dict()
if mode == 'train':
check = lambda x: x <= np.round(413163*0.8).astype(np.int32)
elif mode == 'val':
check = lambda x: x > np.round(413163*0.8).astype(np.int32)
else:
raise Exception("mode must be train or val")
DATA_PATH = path.join(DATA_DIR,'train_dataset.jl')
i = 0
""" Create dictionary holding domain counts (searched, bought) """
attr_s = dict([(k,0) for k in pd.unique(df[which])])
attr_b = dict([(k,0) for k in pd.unique(df[which])])
attr_o = dict([(k,0) for k in pd.unique(df[which])])
with jsonlines.open(DATA_PATH) as reader:
for obj in reader:
if check(i):
#print(i)
L = []
for h in obj['user_history']:
if h['event_type'] == 'view':
#print("Viewed {}".format(dct[h['event_info']]))
L.append(h['event_info'])
elif h['event_type'] == 'search':
#print("Searched {}".format(h['event_info']))
pass
temp = pd.Series(L,index=range(len(L)),dtype=np.float64)
L_k = pd.unique(L[::-1])[::-1]
attr_unique = list(pd.unique([dct_attr[k] for k in L_k]))
for dom in attr_unique:
if dom in attr_s:
attr_s[dom] += 1
if alternate:
for attr in attr_unique:
if dct_dom[attr] == dct_dom[obj['item_bought']]:
attr_b[attr] += 1
else:
attr_o[attr] += 1
else:
if dct_attr[obj['item_bought']] in attr_unique:
attr_b[dct_attr[obj['item_bought']]] += 1
else:
attr_o[dct_attr[obj['item_bought']]] += 1
i += 1
#L.append(obj)
attr_b, attr_s = pd.DataFrame.from_dict(attr_b,orient = 'index'),\
pd.DataFrame.from_dict(attr_s,orient = 'index')
attr_o = pd.DataFrame.from_dict(attr_o,orient = 'index')
attr_b.columns, attr_s.columns, attr_o.columns = ['bought'],['searched'], ['out_bought']
attr_b['bought'] = attr_b['bought'].values.astype(np.float32)
attr_s['searched'] = attr_s['searched'].values.astype(np.float32)
rat = attr_b['bought'].values/(1.0+attr_s['searched'].values)
rat[attr_s['searched'].values < CUTOFF] = np.mean(rat[attr_s['searched'].values >= CUTOFF])
rat2 = attr_o['out_bought'].values/(1.0+attr_b['bought'].values)
rat2[attr_s['searched'].values < CUTOFF] = np.mean(rat2[attr_s['searched'].values >= CUTOFF])
rat = pd.DataFrame({"rat":np.array(rat)},index=attr_b.index)
rat2 = pd.DataFrame({"rat2":np.array(rat2)},index=attr_b.index)
res = pd.concat([attr_s,attr_b,attr_o,rat,rat2],axis=1)
if alternate:
res.to_csv(path.join(DATA_DIR,'{}_ratio_alternate.csv'.format(which)))
else:
res.to_csv(path.join(DATA_DIR,'{}_ratio.csv'.format(which)))
def create_language():
df = read_item_data()
import fasttext
model_fname = path.join(DATA_DIR,"lid.176.bin")
if not path.isfile(model_fname):
print("Did not find fasttext model at {}".format(model_fname))
print("Trying to download from the web...")
try:
urllib.request.urlretrieve ("https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin", model_fname)
except:
raise Exception("Could not get fasttext model")
if not path.isfile(model_fname):
raise Exception("Could not get fasttext model")
else:
print("Fasttext model found at {}".format(model_fname))
lid_model = fasttext.FastText.load_model(model_fname)
def get_language(i,x):
print(i)
languages, scores = lid_model.predict(str(x),k=999,threshold=-1.0)
languages = np.array(languages)
return scores[np.where(languages=='__label__es')[0][0]], scores[np.where(languages=='__label__pt')[0][0]], scores[np.where(languages=='__label__en')[0][0]]
X = np.array([get_language(i,x) for i,x in enumerate(df['title'].values)])
for i,c in enumerate(['score_es','score_pt','score_en']):
df[c] = X[:,i]
df.loc[:,['score_es','score_pt','score_en']].to_csv(path.join(DATA_DIR,'language_identification.csv'))
def load_language_df():
return pd.read_csv(path.join(DATA_DIR,'language_identification.csv'),index_col=0)
def get_ratio(which='domain_id',full=False,standardize=False,alternate=False):
assert which in ['domain_id','category_id','item_id','used']
if alternate:
fname = path.join(DATA_DIR,'{}_ratio_alternate.csv'.format(which))
else:
fname = path.join(DATA_DIR,'{}_ratio.csv'.format(which))
df = pd.read_csv(fname,index_col=0)
if standardize:
for c in df.columns:
df[c] = (df[c] - np.mean(df[c].values))/np.std(df[c].values)
if full:
return df
return df['rat'].to_dict()
def create_all():
print("Creating language classification DataFrame...")
create_language()
print("Creating item_id feature DataFrame...")
create_ratio('train',CUTOFF=0,which='item_id',alternate=False)
print("Creating domain_id feature DataFrame...")
create_ratio('train',CUTOFF=0,which='domain_id')
print("Creating category_id feature DataFrame...")
create_ratio('train',CUTOFF=0,which='category_id')
print("Creating (alternate) item id feature DataFrame...")
create_ratio('train',CUTOFF=0,which='item_id',alternate=True)
if __name__ == "__main__":
create_all()
|
[
"fasttext.FastText.load_model",
"pandas.DataFrame.from_dict",
"pandas.read_csv",
"numpy.std",
"numpy.round",
"input.read_input.read_item_data",
"pandas.unique",
"urllib.request.urlretrieve",
"os.path.isfile",
"numpy.mean",
"numpy.array",
"numpy.where",
"pandas.qcut",
"jsonlines.open",
"os.path.join",
"pandas.concat"
] |
[((475, 491), 'input.read_input.read_item_data', 'read_item_data', ([], {}), '()\n', (489, 491), False, 'from input.read_input import read_item_data, get_emb\n'), ((515, 547), 'pandas.qcut', 'pd.qcut', (["df['price'].values", '(100)'], {}), "(df['price'].values, 100)\n", (522, 547), True, 'import pandas as pd\n'), ((898, 937), 'os.path.join', 'path.join', (['DATA_DIR', '"""train_dataset.jl"""'], {}), "(DATA_DIR, 'train_dataset.jl')\n", (907, 937), False, 'from os import path\n'), ((2809, 2855), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['attr_o'], {'orient': '"""index"""'}), "(attr_o, orient='index')\n", (2831, 2855), True, 'import pandas as pd\n'), ((3303, 3352), 'numpy.mean', 'np.mean', (["rat[attr_s['searched'].values >= CUTOFF]"], {}), "(rat[attr_s['searched'].values >= CUTOFF])\n", (3310, 3352), True, 'import numpy as np\n'), ((3470, 3520), 'numpy.mean', 'np.mean', (["rat2[attr_s['searched'].values >= CUTOFF]"], {}), "(rat2[attr_s['searched'].values >= CUTOFF])\n", (3477, 3520), True, 'import numpy as np\n'), ((3671, 3725), 'pandas.concat', 'pd.concat', (['[attr_s, attr_b, attr_o, rat, rat2]'], {'axis': '(1)'}), '([attr_s, attr_b, attr_o, rat, rat2], axis=1)\n', (3680, 3725), True, 'import pandas as pd\n'), ((3939, 3955), 'input.read_input.read_item_data', 'read_item_data', ([], {}), '()\n', (3953, 3955), False, 'from input.read_input import read_item_data, get_emb\n'), ((3994, 4028), 'os.path.join', 'path.join', (['DATA_DIR', '"""lid.176.bin"""'], {}), "(DATA_DIR, 'lid.176.bin')\n", (4003, 4028), False, 'from os import path\n'), ((4597, 4638), 'fasttext.FastText.load_model', 'fasttext.FastText.load_model', (['model_fname'], {}), '(model_fname)\n', (4625, 4638), False, 'import fasttext\n'), ((5697, 5728), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'index_col': '(0)'}), '(fname, index_col=0)\n', (5708, 5728), True, 'import pandas as pd\n'), ((1199, 1224), 'jsonlines.open', 'jsonlines.open', (['DATA_PATH'], {}), '(DATA_PATH)\n', (1213, 1224), False, 'import jsonlines\n'), ((2672, 2718), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['attr_b'], {'orient': '"""index"""'}), "(attr_b, orient='index')\n", (2694, 2718), True, 'import pandas as pd\n'), ((2748, 2794), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['attr_s'], {'orient': '"""index"""'}), "(attr_s, orient='index')\n", (2770, 2794), True, 'import pandas as pd\n'), ((4039, 4063), 'os.path.isfile', 'path.isfile', (['model_fname'], {}), '(model_fname)\n', (4050, 4063), False, 'from os import path\n'), ((4415, 4439), 'os.path.isfile', 'path.isfile', (['model_fname'], {}), '(model_fname)\n', (4426, 4439), False, 'from os import path\n'), ((4790, 4809), 'numpy.array', 'np.array', (['languages'], {}), '(languages)\n', (4798, 4809), True, 'import numpy as np\n'), ((5208, 5258), 'os.path.join', 'path.join', (['DATA_DIR', '"""language_identification.csv"""'], {}), "(DATA_DIR, 'language_identification.csv')\n", (5217, 5258), False, 'from os import path\n'), ((5311, 5361), 'os.path.join', 'path.join', (['DATA_DIR', '"""language_identification.csv"""'], {}), "(DATA_DIR, 'language_identification.csv')\n", (5320, 5361), False, 'from os import path\n'), ((3553, 3566), 'numpy.array', 'np.array', (['rat'], {}), '(rat)\n', (3561, 3566), True, 'import numpy as np\n'), ((3620, 3634), 'numpy.array', 'np.array', (['rat2'], {}), '(rat2)\n', (3628, 3634), True, 'import numpy as np\n'), ((4213, 4334), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['"""https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin"""', 'model_fname'], {}), "(\n 'https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin',\n model_fname)\n", (4239, 4334), False, 'import urllib\n'), ((1053, 1073), 'pandas.unique', 'pd.unique', (['df[which]'], {}), '(df[which])\n', (1062, 1073), True, 'import pandas as pd\n'), ((1110, 1130), 'pandas.unique', 'pd.unique', (['df[which]'], {}), '(df[which])\n', (1119, 1130), True, 'import pandas as pd\n'), ((1167, 1187), 'pandas.unique', 'pd.unique', (['df[which]'], {}), '(df[which])\n', (1176, 1187), True, 'import pandas as pd\n'), ((5829, 5849), 'numpy.std', 'np.std', (['df[c].values'], {}), '(df[c].values)\n', (5835, 5849), True, 'import numpy as np\n'), ((1822, 1840), 'pandas.unique', 'pd.unique', (['L[::-1]'], {}), '(L[::-1])\n', (1831, 1840), True, 'import pandas as pd\n'), ((1899, 1936), 'pandas.unique', 'pd.unique', (['[dct_attr[k] for k in L_k]'], {}), '([dct_attr[k] for k in L_k])\n', (1908, 1936), True, 'import pandas as pd\n'), ((5806, 5827), 'numpy.mean', 'np.mean', (['df[c].values'], {}), '(df[c].values)\n', (5813, 5827), True, 'import numpy as np\n'), ((684, 706), 'numpy.round', 'np.round', (['(413163 * 0.8)'], {}), '(413163 * 0.8)\n', (692, 706), True, 'import numpy as np\n'), ((4841, 4877), 'numpy.where', 'np.where', (["(languages == '__label__es')"], {}), "(languages == '__label__es')\n", (4849, 4877), True, 'import numpy as np\n'), ((4891, 4927), 'numpy.where', 'np.where', (["(languages == '__label__pt')"], {}), "(languages == '__label__pt')\n", (4899, 4927), True, 'import numpy as np\n'), ((4941, 4977), 'numpy.where', 'np.where', (["(languages == '__label__en')"], {}), "(languages == '__label__en')\n", (4949, 4977), True, 'import numpy as np\n'), ((776, 798), 'numpy.round', 'np.round', (['(413163 * 0.8)'], {}), '(413163 * 0.8)\n', (784, 798), True, 'import numpy as np\n')]
|
import os
from math import sqrt
import numpy as np
import scipy.optimize
from scipy.stats import chi2
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 15})
matplotlib.rc('text', usetex=False)
import matplotlib.pyplot as plt
from enrico import utils
from enrico import plotting
from enrico import environ
from enrico.config import get_config
from enrico.constants import LightcurvePath,FoldedLCPath,DAY_IN_SECOND
from enrico.submit import call
from enrico.RunGTlike import run, GenAnalysisObjects
from enrico import Loggin
from enrico.plotting import plot_errorbar_withuls
import astropy.io.fits as pyfits
pol0 = lambda x,p1: p1
pol1 = lambda x,p1,p2: p1+p2*x
class LightCurve(Loggin.Message):
"""Class to calculate light curves and variability indexes."""
def __init__(self, config, parent_filename=""):
super(LightCurve,self).__init__()
Loggin.Message.__init__(self)
self.parent_filename = os.path.abspath(parent_filename)
self.config = get_config(config)
self.generalconfig = get_config(config)
print((self.generalconfig))
#Read the config
self.srcname = self.config['target']['name'] #src name
self.Tag = self.config['file']['tag']
self.tmin = self.config['time']['tmin']
self.tmax = self.config['time']['tmax']
self.submit = self.config['Submit']
# One point of the LC will be computed as a spectrum plot.
# enrico_sed will be used
# Do fits files will be generated
#self.config['target']['spectrum'] = 'PowerLaw' # simplify the spectrum
self.config['Spectrum']['FitsGeneration'] = self.config['LightCurve']['FitsGeneration']
#Freeze the Spectral index at a value of self.config['LightCurve']['SpectralIndex'] (no effect if 0)
self.config['Spectrum']['FrozenSpectralIndex'] = self.config['LightCurve']['SpectralIndex']
if (self.config['LightCurve']['SpectralIndex'] != 0):
self.config['UpperLimit']['SpectralIndex'] = self.config['LightCurve']['SpectralIndex']
#TS limit. Compute an UL if the TS is below TSLightCurve
self.config['UpperLimit']['TSlimit'] = self.config['LightCurve']['TSLightCurve']
self.folder = self.config['out']
# Do not create plots
self.config['Spectrum']['ResultPlots'] = 'no' # no
self.config['Spectrum']['ResultParentPlots'] = 'no' # no
self.config['Ebin']['NumEnergyBins'] = 0
self.config['energy']['decorrelation_energy'] = 'yes' # no
self.config['UpperLimit']['envelope'] = 'no'
# Submission will be directly handle by this soft
self.config['Submit'] = 'no'
# self.config['verbose'] ='no' #Be quiet
# Speed-up the analysis by reusing the evt file from the main analysis
self._RecycleEvtCoarse()
self.configfile = [] #All the config file in the disk are stored in a list
def _RecycleEvtCoarse(self):
''' Try to guess if there's an EvtCoarse file with the events extracted, reuse it '''
import os.path
evtcoarsefile = str("%s/%s_%s_EvtCoarse.fits"%(self.folder,self.srcname,self.Tag))
if os.path.isfile(evtcoarsefile):
print(("reusing %s as event file to speed-up the analysis" %evtcoarsefile))
self.config['file']['event'] = evtcoarsefile
def _MakeTimeBins(self):
self.time_array = np.zeros(0)
self.Nbin = 0
self.gtifile = []
if self.config['time']['file'] != '':
if ".fit" not in self.config['time']['file']:
# Assume it is a text file
print(("use "+self.config['time']['file']))
self.gtifile.append(self.config['time']['file'])
times = np.genfromtxt(self.gtifile[0],dtype="float",unpack=True)
self.Nbin = int(times.size/2)
self.time_array=np.reshape(times,times.size,'F')
if self.config['time']['type']=='MJD':
self.time_array = utils.MJD_to_met(self.time_array)
elif self.config['time']['type']=='JD':
self.time_array = utils.JD_to_met(self.time_array)
else:
# Assume it is a catalog.fits file
# get from the header the BEGIN and END time
with pyfits.open(self.config['time']['file']) as catfile:
self.tmin = catfile[1].header['TSTART']
self.tmax = catfile[1].header['TSTOP']
self.Nbin = self.config['LightCurve']['NLCbin']
self.time_array = np.zeros(self.Nbin*2)
t = np.arange(self.tmin,self.tmax+1e-5,\
(self.tmax - self.tmin) / self.Nbin)
for i in range(self.Nbin):
self.time_array[2*i] = t[i]
self.time_array[2*i+1]= t[i+1]
else:
self.Nbin = int(self.config['LightCurve']['NLCbin'])
self.time_array = np.zeros(self.Nbin*2)
# self.dt = (self.tmax - self.tmin) / self.Nbin
t = np.arange(self.tmin,self.tmax+0.000001,(self.tmax - self.tmin) / self.Nbin)
for i in range(self.Nbin):
self.time_array[2*i] = t[i]
self.time_array[2*i+1]= t[i+1]
self.info("Running LC with "+str(self.Nbin)+" bins")
for i in range(self.Nbin):
print(("Bin ",i," Start=",self.time_array[2*i]," Stop=",self.time_array[2*i+1]))
print()
def _errorReading(self,message,i):
self.warning(message+" : "+self.configfile[i])
print(("Job Number : {}".format(i)))
self.warning("Please have a look at this job log file")
def _ManageFolder(self,path):
""" All files will be stored in a subfolder name path + NLCbin
Create a subfolder"""
self.LCfolder = self.folder+"/LightCurve_"+str(self.Nbin)+"bins/"
utils.mkdir_p(self.LCfolder)
self.config['out'] = self.LCfolder
def PrepareLC(self,write = 'no'):
"""Simple function to prepare the LC generation : generate and write the config files"""
for i in range(self.Nbin):
self.config['time']['tmin'] = self.time_array[2*i]
self.config['time']['tmax'] = self.time_array[2*i+1]
self.config['file']['tag'] = self.Tag + '_LC_' + str(i)
self.config['target']['spectrum'] = 'PowerLaw' # simplify the spectrum
filename = (self.config['out'] + "Config_" + str(i) + "_" +
str(self.config['time']['tmin']) + "_" +
str(self.config['time']['tmax']))#Name of the config file
xmlfilename = (self.config['out'] + "" + str(i) + "_" +
str(self.config['time']['tmin']) + "_" +
str(self.config['time']['tmax'])) + ".xml" #Name of the xml file
self.config['file']['xml'] = xmlfilename
# Do not produce spectral plots
if len(self.gtifile)==1:
self.config['time']['file']=self.gtifile[0]
elif len(self.gtifile)>1:
print(('Time selection file for bin {0} = {1}'.format(i,self.gtifile[i])))
self.config['time']['file']=self.gtifile[i]
if write == 'yes':
self.config.filename = filename
#self.config.write(open(filename, 'wb'))
self.config.write()
self.configfile.append(filename)
def _MakeLC(self,Path=LightcurvePath) :
#import gc
import os
#gc.enable()
'''Main function of the Lightcurve script. Read the config file and run the gtlike analysis'''
enricodir = environ.DIRS.get('ENRICO_DIR')
fermidir = environ.DIRS.get('FERMI_DIR')
self.PrepareLC(self.config['LightCurve']['MakeConfFile'])#Get the config file
for i in range(self.Nbin):
#gc.collect()
cmd = str("enrico_sed %s && enrico_plot_lc %s" %(self.configfile[i], self.parent_filename))
if self.submit == 'yes':
scriptname = self.LCfolder+"LC_Script_"+str(i)+".sh"
JobLog = self.LCfolder+"LC_Job_"+str(i)+".log"
JobName = (self.config['target']['name'] + "_" +
self.config['analysis']['likelihood'] +
"_LC_" + self.config['file']['tag'])+"_"+str(i)+".log"
call(cmd,enricodir,fermidir,scriptname,JobLog,JobName)#Submit the job
else :
os.system(cmd)
#run(self.configfile[i])#run in command line
def _MakePhasebin(self):
"""document me """
self.time_array = np.zeros(self.Nbin*2)
self.config['time']['type'] = 'MJD'
T0 = self.config['FoldedLC']['epoch']
Period = self.config['FoldedLC']['Period']
t1 = utils.met_to_MJD(self.config['time']['tmin'])
t2 = utils.met_to_MJD(self.config['time']['tmax'])
if T0==0:
T0=t1
elif t1 < T0:
T0 -= np.ceil((T0-t1)/Period)*Period
# find orbit numbers covered by range (t1,t2)
norbt1 = int(np.floor((t1-T0)/Period))
norbt2 = int(np.ceil((t2-T0)/Period))
phase = np.linspace(0,1,self.Nbin+1)
self.gtifile=[] #reset gtifiles
for i in range(self.Nbin):
self.time_array[2*i] = self.config['time']['tmin']
self.time_array[2*i+1] = self.config['time']['tmax']
gtifn = os.path.join(self.LCfolder,"TimeSelection_{0:02.0f}.dat".format(i))
ints=[]
for norb in range(norbt1,norbt2+1):
ints.append(((T0+(norb+phase[i])*Period),(T0+(norb+phase[i+1])*Period)))
tsel = np.array(ints)
np.savetxt(gtifn,tsel)
self.gtifile.append(gtifn)
def MakeLC(self) :
"""Run a std lc """
self._MakeTimeBins()
self._ManageFolder(LightcurvePath)
self._MakeLC()
def MakeFoldedLC(self):
"""run a folded lc """
self.Nbin = self.config['FoldedLC']['NLCbin']
self._ManageFolder(FoldedLCPath)
self._MakePhasebin()
self._MakeLC()
def PlotLC(self):
'''Plot a lightcurve which have been generated previously'''
self._MakeTimeBins()
self._ManageFolder(LightcurvePath)
self.PrepareLC()#Get the config file
self._PlotLC()
def PlotFoldedLC(self):
'''Plot a lightcurve which have been generated previously'''
self.Nbin = self.config['FoldedLC']['NLCbin']
self._ManageFolder(FoldedLCPath)
self._MakePhasebin()
self.PrepareLC()#Get the config file
self._PlotLC(True)
def _PlotLC(self,folded=False):
self.info("Reading files produced by enrico")
LcOutPath = self.LCfolder + self.config['target']['name']
#Result are stored into list. This allow to get rid of the bin which failled
Time = []
TimeErr = []
Flux = []
FluxErr = []
# FluxErrChi2 = []
Index = []
IndexErr = []
Cutoff = []
CutoffErr = []
FluxForNpred = []
# FluxErrForNpred = []
Npred = []
Npred_detected_indices = []
TS = []
uplim = []
# Find name used for index parameter
if ((self.config['target']['spectrum'] == 'PowerLaw' or
self.config['target']['spectrum'] == 'PowerLaw2') and
self.config['target']['redshift'] == 0):
IndexName = 'Index'
CutoffName = None
elif (self.config['target']['spectrum'] == 'PLExpCutoff' or
self.config['target']['spectrum'] == 'PLSuperExpCutoff'):
IndexName = 'Index1'
CutoffName = 'Cutoff'
CutoffErrName = 'dCutoff'
else:
IndexName = 'alpha'
CutoffName = None
IndexErrName = 'd' + IndexName
Nfail = 0
for i in range(self.Nbin):
CurConfig = get_config(self.configfile[i])
#Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed
try :
ResultDic = utils.ReadResult(CurConfig)
if ResultDic == {}:
raise(ValueError)
except :
self._errorReading("Fail reading config file",i)
Nfail+=1
continue
#Update the time and time error array
Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.)
TimeErr.append((ResultDic.get("tmax")-ResultDic.get("tmin"))/2.)
#Check is an ul have been computed. The error is set to zero for the TGraph.
if 'Ulvalue' in ResultDic :
uplim.append(1)
Flux.append(ResultDic.get("Ulvalue"))
# FluxErr.append(0)
# FluxErrChi2.append(ResultDic.get("dFlux"))
# Index.append(ResultDic.get(IndexName))
# IndexErr.append(0)
else :
uplim.append(0)
Flux.append(ResultDic.get("Flux"))
FluxErr.append(ResultDic.get("dFlux"))
# FluxErrChi2.append(ResultDic.get("dFlux"))
Index.append(ResultDic.get(IndexName))
IndexErr.append(ResultDic.get(IndexErrName))
# if CutoffName is not None:
# Cutoff.append(ResultDic.get(CutoffName))
# CutoffErr.append(ResultDic.get(CutoffErrName))
# FluxErrForNpred.append(ResultDic.get("dFlux"))
FluxForNpred.append(ResultDic.get("Flux"))
#Get the Npred and TS values
Npred.append(ResultDic.get("Npred"))
TS.append(ResultDic.get("TS"))
if (CurConfig['LightCurve']['TSLightCurve']<float(ResultDic.get("TS"))):
Npred_detected_indices.append(i-Nfail)
# #change the list into np array
# TS = np.array(TS)
Npred = np.asarray(Npred)
Npred_detected = np.asarray(Npred[Npred_detected_indices])
Time = np.asarray(Time)
TimeErr = np.asarray(TimeErr)
if self.config['time']['type']=='MJD':
Time = utils.MJD_to_met(Time)
TimeErr = TimeErr*DAY_IN_SECOND
elif self.config['time']['type']=='JD':
Time = utils.JD_to_met(Time)
TimeErr = TimeErr*DAY_IN_SECOND
Flux = np.asarray(Flux)
FluxErr = np.asarray(FluxErr)
# Index = np.array(Index)
# IndexErr = np.array(IndexErr)
# Cutoff = np.array(Cutoff)
# CutoffErr = np.array(CutoffErr)
FluxForNpred = np.asarray(FluxForNpred)
# FluxErrForNpred = np.array(FluxErrForNpred)
uplim = np.asarray(uplim,dtype=bool)
#Plots the diagnostic plots is asked
# Plots are : Npred vs flux
# TS vs Time
if self.config['LightCurve']['DiagnosticPlots'] == 'yes' and len(Npred)>0:
#plot Npred vs flux
plt.figure()
NdN = np.asarray(Npred) /np.sqrt(Npred)
FdF = np.asarray(FluxForNpred) / (np.asarray(FluxErr) + 1e-20)
plt.errorbar(NdN, FdF,fmt='+',color='black')
if len(Npred_detected)>2:
NdN = np.asarray(Npred_detected) /np.sqrt(Npred_detected)
FdF = np.asarray(FluxForNpred[Npred_detected_indices]) / (np.asarray(FluxErr[Npred_detected_indices]) + 1e-20)
plt.errorbar(NdN, FdF,fmt='+',color='red')
popt,_ = scipy.optimize.curve_fit(pol1, NdN, FdF, p0=[0,1])#, sigma=dydata)
for i in range(len(FluxForNpred)):
if FluxForNpred[i]/FluxErr[i]>2*pol1(sqrt(Npred[i]),popt[0],popt[1]):
self._errorReading("problem in errors calculation for",i)
print(("Flux +/- error = ",FluxForNpred[i]," +/- ",FluxErr[i]))
print(("V(Npred) = ",sqrt(Npred[i])))
print()
plt.plot(np.array([0,max(NdN)]),pol1(np.array([0,max(NdN)]),popt[0],popt[1]),'--',color='black')
plt.xlabel(r"${\rm Npred/\sqrt{Npred}}$")
plt.ylabel(r"${\rm Flux/\Delta Flux}$")
plt.savefig(LcOutPath+"_Npred.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
else :
print("No Npred Plot produced")
#plot TS vs Time
plt.figure()
plt.xlabel(r"Time (s)")
plt.ylabel(r"Test Statistic")
plt.errorbar(x=Time,y=TS,xerr=TimeErr,fmt='+',color='black',ls='None')
plt.ylim(ymin=min(TS)*0.8,ymax=max(TS)*1.2)
plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)),xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time)))
# Move the offset to the axis label
ax = plt.gca()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim()))))
if (offset_factor != 0):
ax.set_yticklabels([float(round(k,5)) for k in ax.get_yticks()*10**(-offset_factor)])
ax.yaxis.set_label_text(ax.yaxis.get_label_text() + r" [${\times 10^{%d}}$]" %offset_factor)
# Secondary axis with MJD
mjdaxis = ax.twiny()
mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()])
mjdaxis.set_xlabel(r"Time (MJD)")
mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 )
plt.tight_layout()
plt.savefig(LcOutPath+"_TS.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
# Plot the LC itself. This function return a TH2F for a nice plot
# a TGraph and a list of TArrow for the ULs
# if folded:
# phase = np.linspace(0,1,self.Nbin+1)
# Time = (phase[1:]+phase[:-1])/2.
# TimeErr = (phase[1:]-phase[:-1])/2.
# gTHLC,TgrLC,ArrowLC = plotting.PlotFoldedLC(Time,TimeErr,Flux,FluxErr)
# gTHIndex,TgrIndex,ArrowIndex = plotting.PlotFoldedLC(Time,TimeErr,Index,IndexErr)
# if CutoffName is not None:
# gTHCutoff,TgrCutoff,ArrowCutoff = plotting.PlotFoldedLC(Time,TimeErr,Cutoff,CutoffErr)
# else :
# gTHLC,TgrLC,ArrowLC = plotting.PlotLC(Time,TimeErr,Flux,FluxErr)
# gTHIndex,TgrIndex,ArrowIndex = plotting.PlotLC(Time,TimeErr,Index,IndexErr)
# if CutoffName is not None:
# gTHCutoff,TgrCutoff,ArrowCutoff = plotting.PlotFoldedLC(Time,TimeErr,Cutoff,CutoffErr)
# xmin = min(Time) - max(TimeErr) * 10
# xmax = max(Time) + max(TimeErr) * 10
# ymin = min(Flux) - max(FluxErr) * 1.3
# ymax = max(Flux) + max(FluxErr) * 1.3
plt.figure()
plt.xlabel(r"Time (s)")
plt.ylabel(r"${\rm Flux\ (photon\ cm^{-2}\ s^{-1})}$")
# plt.ylim(ymin=ymin,ymax=ymax)
# plt.xlim(xmin=xmin,xmax=xmax)
#plt.errorbar(Time,Flux,xerr=TimeErr,yerr=FluxErr,i
# fmt='o',color='black',ls='None',uplims=uplim)
plot_errorbar_withuls(Time,TimeErr,TimeErr,Flux,FluxErr,FluxErr,
uplim,bblocks=True)
try:
plt.ylim(ymin=max(plt.ylim()[0],np.percentile(Flux[~uplim],1)*0.1),
ymax=min(plt.ylim()[1],np.percentile(Flux[~uplim],99)*2.0))
plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)),
xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time)))
except IndexError:
pass
# Move the offset to the axis label
ax = plt.gca()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim()))))
if (offset_factor != 0):
ax.set_yticklabels([float(round(k,5)) \
for k in ax.get_yticks()*10**(-offset_factor)])
ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\
r" [${\times 10^{%d}}$]" %offset_factor)
# Secondary axis with MJD
mjdaxis = ax.twiny()
mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()])
mjdaxis.set_xlabel(r"Time (MJD)")
mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 )
plt.tight_layout()
plt.savefig(LcOutPath+"_LC.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
if self.config["LightCurve"]["SpectralIndex"] == 0 :
plt.figure()
plt.xlabel(r"Time (s)")
plt.ylabel(r"${\rm Index}$")
Index = np.asarray(Index)
IndexErr = np.asarray(IndexErr)
uplimIndex = uplim #+ Index<0.55
plot_errorbar_withuls(Time[~uplimIndex],
TimeErr[~uplimIndex],
TimeErr[~uplimIndex],
Index[~uplimIndex],
IndexErr[~uplimIndex],
IndexErr[~uplimIndex],
uplimIndex[~uplimIndex],
bblocks=True)
plt.ylim(ymin=max(plt.ylim()[0],np.percentile(Index[~uplimIndex],1)*0.1),
ymax=min(plt.ylim()[1],np.percentile(Index[~uplimIndex],99)*2.0))
plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)),
xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time)))
# Move the offset to the axis label
ax = plt.gca()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim()))))
if (offset_factor != 0):
ax.set_yticklabels([float(round(k,5)) \
for k in ax.get_yticks()*10**(-offset_factor)])
ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\
r" [${\times 10^{%d}}$]" %offset_factor)
# Secondary axis with MJD
mjdaxis = ax.twiny()
mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()])
mjdaxis.set_xlabel(r"Time (MJD)")
mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 )
plt.tight_layout()
plt.savefig(LcOutPath+"_Index.png", dpi=150,
facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
# compute Fvar and probability of being cst
self.info("Flux vs Time: infos")
self.FitWithCst(Time,Flux,FluxErr)
self.Fvar(Flux,FluxErr)
# ### plot and save the Index LC
# CanvIndex = ROOT.TCanvas()
# gTHIndex.Draw()
# TgrIndex.Draw('zP')
# #plot the ul as arrow
# for i in xrange(len(ArrowIndex)):
# ArrowIndex[i].Draw()
# #Save the canvas in the LightCurve subfolder
# if self.config["LightCurve"]["SpectralIndex"] == 0 :
# self.info("Index vs Time")
# self.FitWithCst(Time,Index,IndexErr)
# CanvIndex.Print(LcOutPath+'_Index.png')
# CanvIndex.Print(LcOutPath+'_Index.eps')
# CanvIndex.Print(LcOutPath+'_Index.C')
#Dump into ascii
lcfilename = LcOutPath+"_results.dat"
self.info("Write to Ascii file : "+lcfilename)
WriteToAscii(Time,TimeErr,Flux,FluxErr,Index,IndexErr,
Cutoff,CutoffErr,TS,Npred,lcfilename)
if self.config["LightCurve"]['ComputeVarIndex'] == 'yes':
self.VariabilityIndex()
def Fvar(self,Flux,FluxErr):
"""Compute the Fvar as defined in Vaughan et al."""
moy=np.average(Flux)
var=np.var(Flux)
expvar=np.average((np.array(FluxErr))**2)
intvar=var-expvar #Correct for errors
try :
fvar=sqrt(intvar)/moy
err_fvar = sqrt( ( 1./sqrt(2*len(Flux))*expvar/moy**2/fvar)**2 + (sqrt(expvar/len(Flux))*1./moy)**2)
self.info("Calculation of Fvar (Vaughan et al. 2003)")
print(("\tFvar = ",fvar," +/- ",err_fvar))
except :
print(("\tFvar is negative, Fvar**2 = %2.2e +/- %2.2e"%(intvar/(moy*moy), ((1./sqrt(2*len(Flux))*expvar/moy**2)**2/(intvar/(moy*moy)) + (sqrt(expvar/len(Flux))*1./moy)**2))))
print()
def FitWithCst(self,x,y,dy):
"""Fit the LC with a constant function an
print the chi2 and proba"""
res,_ = scipy.optimize.curve_fit(pol0,x,y,p0=[np.mean(y)],sigma=dy)
#print res
#print y
#print dy
cost = np.sum(((pol0(x,res[0])-y)/dy)**2)
self.info("Fit with a constant function")
print(('\tChi2 = ',cost," NDF = ",len(y)-1))
print(('\tprobability of being cst = ',1 - chi2.cdf(cost,len(y)-1)))
print()
def VariabilityIndex(self):
"""Compute the variability index as in the 2FGL catalogue. (see Nolan et al, 2012)"""
LcOutPath = self.LCfolder + self.config['target']['name']
utils._log('Computing Variability index ')
self.config['Spectrum']['FitsGeneration'] = 'no'
try :
ResultDicDC = utils.ReadResult(self.generalconfig)
except :
self.warning("No results file found; please run enrico_sed first.")
return
LogL1 = []
LogL0 = []
Time = []
for i in range(self.Nbin):
CurConfig = get_config(self.configfile[i])
#Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed
try :
ResultDic = utils.ReadResult(CurConfig)
except :
self._errorReading("Fail reading the config file ",i)
continue
# LogL1.append(ResultDic.get("log_like"))
#Update the time and time error array
Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.)
##############################################################
# Compute the loglike value using the DC flux or prefactor
##############################################################
# Create one obs instance
CurConfig['Spectrum']['FitsGeneration'] = 'no'
_,Fit = GenAnalysisObjects(CurConfig,verbose=0)#be quiet
Fit.ftol = float(self.config['fitting']['ftol'])
#Spectral index management!
parameters = dict()
parameters['Index'] = -2.
parameters['alpha'] = +2.
parameters['Index1'] = -2.
parameters['beta'] = 0
parameters['Index2'] = 2.
parameters['Cutoff'] = 100000. # set the cutoff to be high
for key in list(parameters.keys()):
try:
utils.FreezeParams(Fit, self.srcname, key, parameters[key])
except:
continue
LogL1.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer']))
for key in ["norm","Prefactor","Integral"]:
try:
utils.FreezeParams(Fit,self.srcname,key, utils.fluxNorm(ResultsDicDC[key]))
except:
continue
LogL0.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer']))
del Fit #Clean memory
plt.figure()
plt.xlabel("Time")
plt.ylabel("Log(Like) Variability")
plt.errorbar(Time,LogL0,fmt='o',color='black',ls='None')
plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)),
xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time)))
# Move the offset to the axis label
ax = plt.gca()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim()))))
if (offset_factor != 0):
ax.set_yticklabels([float(round(k,5)) \
for k in ax.get_yticks()*10**(-offset_factor)])
ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\
r" [${\times 10^{%d}}$]" %offset_factor)
# Secondary axis with MJD
mjdaxis = ax.twiny()
mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()])
mjdaxis.set_xlabel(r"Time (MJD)")
mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 )
plt.tight_layout()
plt.savefig(LcOutPath+"_VarIndex.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
self.info("Variability index calculation")
print(("\t TSvar = ",2*(sum(LogL1)-sum(LogL0))))
print(("\t NDF = ",len(LogL0)-1))
print(("\t Chi2 prob = ",1 - chi2.cdf(2*(sum(LogL1)-sum(LogL0)),len(LogL0)-1)))
print()
def WriteToAscii(Time, TimeErr, Flux, FluxErr, Index, IndexErr, Cutoff, CutoffErr, TS, Npred, filename):
"""Write the results of the LC in a Ascii file"""
flc = open(filename, 'w')
if len(Cutoff) == 0:
flc.write('# Time (MET) Delta_Time Flux(ph cm-2 s-1) '
'Delta_Flux Index Delta_Index TS Npred\n')
for i in range(len(Time)):
flc.write(str(Time[i]) + "\t" + str(TimeErr[i]) + "\t" +
str(Flux[i]) + "\t" + str(FluxErr[i]) + "\t" +
str(Index[i]) + "\t" + str(IndexErr[i]) + "\t" +
str(TS[i]) + "\t" + str(Npred[i]) + "\n")
else:
flc.write('# Time (MET) Delta_Time Flux(ph cm-2 s-1) '
'Delta_Flux Index Delta_Index Cutoff Delta_Cutoff TS Npred\n')
for i in range(len(Time)):
flc.write(str(Time[i]) + "\t" + str(TimeErr[i]) + "\t" +
str(Flux[i]) + "\t" + str(FluxErr[i]) + "\t" +
str(Index[i]) + "\t" + str(IndexErr[i]) + "\t" +
str(Cutoff[i]) + "\t" + str(CutoffErr[i]) + "\t" +
str(TS[i]) + "\t" + str(Npred[i]) + "\n")
flc.close()
|
[
"matplotlib.rc",
"numpy.floor",
"enrico.config.get_config",
"enrico.submit.call",
"os.path.isfile",
"matplotlib.pyplot.figure",
"enrico.utils._log",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"enrico.environ.DIRS.get",
"enrico.utils.MJD_to_met",
"matplotlib.ticker.ScalarFormatter",
"os.path.abspath",
"enrico.Loggin.Message.__init__",
"numpy.savetxt",
"numpy.genfromtxt",
"enrico.utils.met_to_MJD",
"numpy.reshape",
"numpy.linspace",
"enrico.utils.JD_to_met",
"numpy.var",
"matplotlib.pyplot.errorbar",
"enrico.plotting.plot_errorbar_withuls",
"numpy.average",
"numpy.ceil",
"math.sqrt",
"matplotlib.pyplot.ylim",
"numpy.asarray",
"os.system",
"numpy.percentile",
"matplotlib.use",
"astropy.io.fits.open",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"enrico.utils.fluxNorm",
"enrico.utils.ReadResult",
"numpy.zeros",
"enrico.utils.mkdir_p",
"enrico.RunGTlike.GenAnalysisObjects",
"numpy.array",
"enrico.utils.FreezeParams",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((120, 141), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (134, 141), False, 'import matplotlib\n'), ((142, 232), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Computer Modern'],\n 'size': 15})\n", (155, 232), False, 'import matplotlib\n'), ((229, 264), 'matplotlib.rc', 'matplotlib.rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (242, 264), False, 'import matplotlib\n'), ((938, 967), 'enrico.Loggin.Message.__init__', 'Loggin.Message.__init__', (['self'], {}), '(self)\n', (961, 967), False, 'from enrico import Loggin\n'), ((999, 1031), 'os.path.abspath', 'os.path.abspath', (['parent_filename'], {}), '(parent_filename)\n', (1014, 1031), False, 'import os\n'), ((1061, 1079), 'enrico.config.get_config', 'get_config', (['config'], {}), '(config)\n', (1071, 1079), False, 'from enrico.config import get_config\n'), ((1109, 1127), 'enrico.config.get_config', 'get_config', (['config'], {}), '(config)\n', (1119, 1127), False, 'from enrico.config import get_config\n'), ((3242, 3271), 'os.path.isfile', 'os.path.isfile', (['evtcoarsefile'], {}), '(evtcoarsefile)\n', (3256, 3271), False, 'import os\n'), ((3474, 3485), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3482, 3485), True, 'import numpy as np\n'), ((6057, 6085), 'enrico.utils.mkdir_p', 'utils.mkdir_p', (['self.LCfolder'], {}), '(self.LCfolder)\n', (6070, 6085), False, 'from enrico import utils\n'), ((7833, 7863), 'enrico.environ.DIRS.get', 'environ.DIRS.get', (['"""ENRICO_DIR"""'], {}), "('ENRICO_DIR')\n", (7849, 7863), False, 'from enrico import environ\n'), ((7883, 7912), 'enrico.environ.DIRS.get', 'environ.DIRS.get', (['"""FERMI_DIR"""'], {}), "('FERMI_DIR')\n", (7899, 7912), False, 'from enrico import environ\n'), ((8824, 8847), 'numpy.zeros', 'np.zeros', (['(self.Nbin * 2)'], {}), '(self.Nbin * 2)\n', (8832, 8847), True, 'import numpy as np\n'), ((9000, 9045), 'enrico.utils.met_to_MJD', 'utils.met_to_MJD', (["self.config['time']['tmin']"], {}), "(self.config['time']['tmin'])\n", (9016, 9045), False, 'from enrico import utils\n'), ((9059, 9104), 'enrico.utils.met_to_MJD', 'utils.met_to_MJD', (["self.config['time']['tmax']"], {}), "(self.config['time']['tmax'])\n", (9075, 9104), False, 'from enrico import utils\n'), ((9379, 9411), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(self.Nbin + 1)'], {}), '(0, 1, self.Nbin + 1)\n', (9390, 9411), True, 'import numpy as np\n'), ((14177, 14194), 'numpy.asarray', 'np.asarray', (['Npred'], {}), '(Npred)\n', (14187, 14194), True, 'import numpy as np\n'), ((14220, 14261), 'numpy.asarray', 'np.asarray', (['Npred[Npred_detected_indices]'], {}), '(Npred[Npred_detected_indices])\n', (14230, 14261), True, 'import numpy as np\n'), ((14277, 14293), 'numpy.asarray', 'np.asarray', (['Time'], {}), '(Time)\n', (14287, 14293), True, 'import numpy as np\n'), ((14312, 14331), 'numpy.asarray', 'np.asarray', (['TimeErr'], {}), '(TimeErr)\n', (14322, 14331), True, 'import numpy as np\n'), ((14622, 14638), 'numpy.asarray', 'np.asarray', (['Flux'], {}), '(Flux)\n', (14632, 14638), True, 'import numpy as np\n'), ((14657, 14676), 'numpy.asarray', 'np.asarray', (['FluxErr'], {}), '(FluxErr)\n', (14667, 14676), True, 'import numpy as np\n'), ((14852, 14876), 'numpy.asarray', 'np.asarray', (['FluxForNpred'], {}), '(FluxForNpred)\n', (14862, 14876), True, 'import numpy as np\n'), ((14947, 14976), 'numpy.asarray', 'np.asarray', (['uplim'], {'dtype': 'bool'}), '(uplim, dtype=bool)\n', (14957, 14976), True, 'import numpy as np\n'), ((19454, 19466), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19464, 19466), True, 'import matplotlib.pyplot as plt\n'), ((19475, 19497), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (19485, 19497), True, 'import matplotlib.pyplot as plt\n'), ((19507, 19564), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\rm Flux\\\\ (photon\\\\ cm^{-2}\\\\ s^{-1})}$"""'], {}), "('${\\\\rm Flux\\\\ (photon\\\\ cm^{-2}\\\\ s^{-1})}$')\n", (19517, 19564), True, 'import matplotlib.pyplot as plt\n'), ((19778, 19872), 'enrico.plotting.plot_errorbar_withuls', 'plot_errorbar_withuls', (['Time', 'TimeErr', 'TimeErr', 'Flux', 'FluxErr', 'FluxErr', 'uplim'], {'bblocks': '(True)'}), '(Time, TimeErr, TimeErr, Flux, FluxErr, FluxErr, uplim,\n bblocks=True)\n', (19799, 19872), False, 'from enrico.plotting import plot_errorbar_withuls\n'), ((20330, 20339), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20337, 20339), True, 'import matplotlib.pyplot as plt\n'), ((21091, 21109), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21107, 21109), True, 'import matplotlib.pyplot as plt\n'), ((21119, 21320), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(LcOutPath + '_LC.png')"], {'dpi': '(150)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'bbox_inches': 'None', 'pad_inches': '(0.1)', 'frameon': 'None'}), "(LcOutPath + '_LC.png', dpi=150, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format=None, transparent=False,\n bbox_inches=None, pad_inches=0.1, frameon=None)\n", (21130, 21320), True, 'import matplotlib.pyplot as plt\n'), ((24839, 24855), 'numpy.average', 'np.average', (['Flux'], {}), '(Flux)\n', (24849, 24855), True, 'import numpy as np\n'), ((24868, 24880), 'numpy.var', 'np.var', (['Flux'], {}), '(Flux)\n', (24874, 24880), True, 'import numpy as np\n'), ((26181, 26223), 'enrico.utils._log', 'utils._log', (['"""Computing Variability index """'], {}), "('Computing Variability index ')\n", (26191, 26223), False, 'from enrico import utils\n'), ((28536, 28548), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28546, 28548), True, 'import matplotlib.pyplot as plt\n'), ((28557, 28575), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (28567, 28575), True, 'import matplotlib.pyplot as plt\n'), ((28584, 28619), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log(Like) Variability"""'], {}), "('Log(Like) Variability')\n", (28594, 28619), True, 'import matplotlib.pyplot as plt\n'), ((28628, 28688), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['Time', 'LogL0'], {'fmt': '"""o"""', 'color': '"""black"""', 'ls': '"""None"""'}), "(Time, LogL0, fmt='o', color='black', ls='None')\n", (28640, 28688), True, 'import matplotlib.pyplot as plt\n'), ((28887, 28896), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (28894, 28896), True, 'import matplotlib.pyplot as plt\n'), ((29649, 29667), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (29665, 29667), True, 'import matplotlib.pyplot as plt\n'), ((29677, 29886), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(LcOutPath + '_VarIndex.png')"], {'dpi': '(150)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'bbox_inches': 'None', 'pad_inches': '(0.1)', 'frameon': 'None'}), "(LcOutPath + '_VarIndex.png', dpi=150, facecolor='w', edgecolor=\n 'w', orientation='portrait', papertype=None, format=None, transparent=\n False, bbox_inches=None, pad_inches=0.1, frameon=None)\n", (29688, 29886), True, 'import matplotlib.pyplot as plt\n'), ((5122, 5145), 'numpy.zeros', 'np.zeros', (['(self.Nbin * 2)'], {}), '(self.Nbin * 2)\n', (5130, 5145), True, 'import numpy as np\n'), ((5219, 5295), 'numpy.arange', 'np.arange', (['self.tmin', '(self.tmax + 1e-06)', '((self.tmax - self.tmin) / self.Nbin)'], {}), '(self.tmin, self.tmax + 1e-06, (self.tmax - self.tmin) / self.Nbin)\n', (5228, 5295), True, 'import numpy as np\n'), ((9290, 9318), 'numpy.floor', 'np.floor', (['((t1 - T0) / Period)'], {}), '((t1 - T0) / Period)\n', (9298, 9318), True, 'import numpy as np\n'), ((9337, 9364), 'numpy.ceil', 'np.ceil', (['((t2 - T0) / Period)'], {}), '((t2 - T0) / Period)\n', (9344, 9364), True, 'import numpy as np\n'), ((9877, 9891), 'numpy.array', 'np.array', (['ints'], {}), '(ints)\n', (9885, 9891), True, 'import numpy as np\n'), ((9904, 9927), 'numpy.savetxt', 'np.savetxt', (['gtifn', 'tsel'], {}), '(gtifn, tsel)\n', (9914, 9927), True, 'import numpy as np\n'), ((12167, 12197), 'enrico.config.get_config', 'get_config', (['self.configfile[i]'], {}), '(self.configfile[i])\n', (12177, 12197), False, 'from enrico.config import get_config\n'), ((14402, 14424), 'enrico.utils.MJD_to_met', 'utils.MJD_to_met', (['Time'], {}), '(Time)\n', (14418, 14424), False, 'from enrico import utils\n'), ((15217, 15229), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15227, 15229), True, 'import matplotlib.pyplot as plt\n'), ((15369, 15415), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['NdN', 'FdF'], {'fmt': '"""+"""', 'color': '"""black"""'}), "(NdN, FdF, fmt='+', color='black')\n", (15381, 15415), True, 'import matplotlib.pyplot as plt\n'), ((16821, 16833), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16831, 16833), True, 'import matplotlib.pyplot as plt\n'), ((16846, 16868), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (16856, 16868), True, 'import matplotlib.pyplot as plt\n'), ((16882, 16910), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test Statistic"""'], {}), "('Test Statistic')\n", (16892, 16910), True, 'import matplotlib.pyplot as plt\n'), ((16924, 16999), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'Time', 'y': 'TS', 'xerr': 'TimeErr', 'fmt': '"""+"""', 'color': '"""black"""', 'ls': '"""None"""'}), "(x=Time, y=TS, xerr=TimeErr, fmt='+', color='black', ls='None')\n", (16936, 16999), True, 'import matplotlib.pyplot as plt\n'), ((17247, 17256), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (17254, 17256), True, 'import matplotlib.pyplot as plt\n'), ((18025, 18043), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18041, 18043), True, 'import matplotlib.pyplot as plt\n'), ((18057, 18258), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(LcOutPath + '_TS.png')"], {'dpi': '(150)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'bbox_inches': 'None', 'pad_inches': '(0.1)', 'frameon': 'None'}), "(LcOutPath + '_TS.png', dpi=150, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format=None, transparent=False,\n bbox_inches=None, pad_inches=0.1, frameon=None)\n", (18068, 18258), True, 'import matplotlib.pyplot as plt\n'), ((20962, 21012), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (20995, 21012), False, 'import matplotlib\n'), ((21433, 21445), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21443, 21445), True, 'import matplotlib.pyplot as plt\n'), ((21458, 21480), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (21468, 21480), True, 'import matplotlib.pyplot as plt\n'), ((21494, 21522), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\rm Index}$"""'], {}), "('${\\\\rm Index}$')\n", (21504, 21522), True, 'import matplotlib.pyplot as plt\n'), ((21543, 21560), 'numpy.asarray', 'np.asarray', (['Index'], {}), '(Index)\n', (21553, 21560), True, 'import numpy as np\n'), ((21584, 21604), 'numpy.asarray', 'np.asarray', (['IndexErr'], {}), '(IndexErr)\n', (21594, 21604), True, 'import numpy as np\n'), ((21662, 21861), 'enrico.plotting.plot_errorbar_withuls', 'plot_errorbar_withuls', (['Time[~uplimIndex]', 'TimeErr[~uplimIndex]', 'TimeErr[~uplimIndex]', 'Index[~uplimIndex]', 'IndexErr[~uplimIndex]', 'IndexErr[~uplimIndex]', 'uplimIndex[~uplimIndex]'], {'bblocks': '(True)'}), '(Time[~uplimIndex], TimeErr[~uplimIndex], TimeErr[~\n uplimIndex], Index[~uplimIndex], IndexErr[~uplimIndex], IndexErr[~\n uplimIndex], uplimIndex[~uplimIndex], bblocks=True)\n', (21683, 21861), False, 'from enrico.plotting import plot_errorbar_withuls\n'), ((22482, 22491), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22489, 22491), True, 'import matplotlib.pyplot as plt\n'), ((23300, 23318), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23316, 23318), True, 'import matplotlib.pyplot as plt\n'), ((23331, 23535), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(LcOutPath + '_Index.png')"], {'dpi': '(150)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'bbox_inches': 'None', 'pad_inches': '(0.1)', 'frameon': 'None'}), "(LcOutPath + '_Index.png', dpi=150, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format=None, transparent=False,\n bbox_inches=None, pad_inches=0.1, frameon=None)\n", (23342, 23535), True, 'import matplotlib.pyplot as plt\n'), ((26323, 26359), 'enrico.utils.ReadResult', 'utils.ReadResult', (['self.generalconfig'], {}), '(self.generalconfig)\n', (26339, 26359), False, 'from enrico import utils\n'), ((26592, 26622), 'enrico.config.get_config', 'get_config', (['self.configfile[i]'], {}), '(self.configfile[i])\n', (26602, 26622), False, 'from enrico.config import get_config\n'), ((27449, 27489), 'enrico.RunGTlike.GenAnalysisObjects', 'GenAnalysisObjects', (['CurConfig'], {'verbose': '(0)'}), '(CurConfig, verbose=0)\n', (27467, 27489), False, 'from enrico.RunGTlike import run, GenAnalysisObjects\n'), ((29520, 29570), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (29553, 29570), False, 'import matplotlib\n'), ((3831, 3889), 'numpy.genfromtxt', 'np.genfromtxt', (['self.gtifile[0]'], {'dtype': '"""float"""', 'unpack': '(True)'}), "(self.gtifile[0], dtype='float', unpack=True)\n", (3844, 3889), True, 'import numpy as np\n'), ((3966, 4000), 'numpy.reshape', 'np.reshape', (['times', 'times.size', '"""F"""'], {}), "(times, times.size, 'F')\n", (3976, 4000), True, 'import numpy as np\n'), ((8558, 8617), 'enrico.submit.call', 'call', (['cmd', 'enricodir', 'fermidir', 'scriptname', 'JobLog', 'JobName'], {}), '(cmd, enricodir, fermidir, scriptname, JobLog, JobName)\n', (8562, 8617), False, 'from enrico.submit import call\n'), ((8663, 8677), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (8672, 8677), False, 'import os\n'), ((12361, 12388), 'enrico.utils.ReadResult', 'utils.ReadResult', (['CurConfig'], {}), '(CurConfig)\n', (12377, 12388), False, 'from enrico import utils\n'), ((14539, 14560), 'enrico.utils.JD_to_met', 'utils.JD_to_met', (['Time'], {}), '(Time)\n', (14554, 14560), False, 'from enrico import utils\n'), ((15248, 15265), 'numpy.asarray', 'np.asarray', (['Npred'], {}), '(Npred)\n', (15258, 15265), True, 'import numpy as np\n'), ((15267, 15281), 'numpy.sqrt', 'np.sqrt', (['Npred'], {}), '(Npred)\n', (15274, 15281), True, 'import numpy as np\n'), ((15300, 15324), 'numpy.asarray', 'np.asarray', (['FluxForNpred'], {}), '(FluxForNpred)\n', (15310, 15324), True, 'import numpy as np\n'), ((15670, 15714), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['NdN', 'FdF'], {'fmt': '"""+"""', 'color': '"""red"""'}), "(NdN, FdF, fmt='+', color='red')\n", (15682, 15714), True, 'import matplotlib.pyplot as plt\n'), ((16343, 16385), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""${\\\\rm Npred/\\\\sqrt{Npred}}$"""'], {}), "('${\\\\rm Npred/\\\\sqrt{Npred}}$')\n", (16353, 16385), True, 'import matplotlib.pyplot as plt\n'), ((16401, 16441), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""${\\\\rm Flux/\\\\Delta Flux}$"""'], {}), "('${\\\\rm Flux/\\\\Delta Flux}$')\n", (16411, 16441), True, 'import matplotlib.pyplot as plt\n'), ((16457, 16661), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(LcOutPath + '_Npred.png')"], {'dpi': '(150)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'bbox_inches': 'None', 'pad_inches': '(0.1)', 'frameon': 'None'}), "(LcOutPath + '_Npred.png', dpi=150, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format=None, transparent=False,\n bbox_inches=None, pad_inches=0.1, frameon=None)\n", (16468, 16661), True, 'import matplotlib.pyplot as plt\n'), ((17888, 17938), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (17921, 17938), False, 'import matplotlib\n'), ((20833, 20852), 'enrico.utils.met_to_MJD', 'utils.met_to_MJD', (['k'], {}), '(k)\n', (20849, 20852), False, 'from enrico import utils\n'), ((23163, 23213), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (23196, 23213), False, 'import matplotlib\n'), ((24908, 24925), 'numpy.array', 'np.array', (['FluxErr'], {}), '(FluxErr)\n', (24916, 24925), True, 'import numpy as np\n'), ((25008, 25020), 'math.sqrt', 'sqrt', (['intvar'], {}), '(intvar)\n', (25012, 25020), False, 'from math import sqrt\n'), ((26786, 26813), 'enrico.utils.ReadResult', 'utils.ReadResult', (['CurConfig'], {}), '(CurConfig)\n', (26802, 26813), False, 'from enrico import utils\n'), ((29391, 29410), 'enrico.utils.met_to_MJD', 'utils.met_to_MJD', (['k'], {}), '(k)\n', (29407, 29410), False, 'from enrico import utils\n'), ((4114, 4147), 'enrico.utils.MJD_to_met', 'utils.MJD_to_met', (['self.time_array'], {}), '(self.time_array)\n', (4130, 4147), False, 'from enrico import utils\n'), ((4428, 4468), 'astropy.io.fits.open', 'pyfits.open', (["self.config['time']['file']"], {}), "(self.config['time']['file'])\n", (4439, 4468), True, 'import astropy.io.fits as pyfits\n'), ((4706, 4729), 'numpy.zeros', 'np.zeros', (['(self.Nbin * 2)'], {}), '(self.Nbin * 2)\n', (4714, 4729), True, 'import numpy as np\n'), ((4752, 4828), 'numpy.arange', 'np.arange', (['self.tmin', '(self.tmax + 1e-05)', '((self.tmax - self.tmin) / self.Nbin)'], {}), '(self.tmin, self.tmax + 1e-05, (self.tmax - self.tmin) / self.Nbin)\n', (4761, 4828), True, 'import numpy as np\n'), ((9181, 9208), 'numpy.ceil', 'np.ceil', (['((T0 - t1) / Period)'], {}), '((T0 - t1) / Period)\n', (9188, 9208), True, 'import numpy as np\n'), ((15328, 15347), 'numpy.asarray', 'np.asarray', (['FluxErr'], {}), '(FluxErr)\n', (15338, 15347), True, 'import numpy as np\n'), ((15475, 15501), 'numpy.asarray', 'np.asarray', (['Npred_detected'], {}), '(Npred_detected)\n', (15485, 15501), True, 'import numpy as np\n'), ((15503, 15526), 'numpy.sqrt', 'np.sqrt', (['Npred_detected'], {}), '(Npred_detected)\n', (15510, 15526), True, 'import numpy as np\n'), ((15549, 15597), 'numpy.asarray', 'np.asarray', (['FluxForNpred[Npred_detected_indices]'], {}), '(FluxForNpred[Npred_detected_indices])\n', (15559, 15597), True, 'import numpy as np\n'), ((17751, 17770), 'enrico.utils.met_to_MJD', 'utils.met_to_MJD', (['k'], {}), '(k)\n', (17767, 17770), False, 'from enrico import utils\n'), ((23026, 23045), 'enrico.utils.met_to_MJD', 'utils.met_to_MJD', (['k'], {}), '(k)\n', (23042, 23045), False, 'from enrico import utils\n'), ((25657, 25667), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (25664, 25667), True, 'import numpy as np\n'), ((27985, 28044), 'enrico.utils.FreezeParams', 'utils.FreezeParams', (['Fit', 'self.srcname', 'key', 'parameters[key]'], {}), '(Fit, self.srcname, key, parameters[key])\n', (28003, 28044), False, 'from enrico import utils\n'), ((4243, 4275), 'enrico.utils.JD_to_met', 'utils.JD_to_met', (['self.time_array'], {}), '(self.time_array)\n', (4258, 4275), False, 'from enrico import utils\n'), ((15601, 15644), 'numpy.asarray', 'np.asarray', (['FluxErr[Npred_detected_indices]'], {}), '(FluxErr[Npred_detected_indices])\n', (15611, 15644), True, 'import numpy as np\n'), ((28320, 28353), 'enrico.utils.fluxNorm', 'utils.fluxNorm', (['ResultsDicDC[key]'], {}), '(ResultsDicDC[key])\n', (28334, 28353), False, 'from enrico import utils\n'), ((28711, 28721), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (28719, 28721), True, 'import matplotlib.pyplot as plt\n'), ((28783, 28793), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (28791, 28793), True, 'import matplotlib.pyplot as plt\n'), ((17081, 17091), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (17089, 17091), True, 'import matplotlib.pyplot as plt\n'), ((17135, 17145), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (17143, 17145), True, 'import matplotlib.pyplot as plt\n'), ((19945, 19955), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (19953, 19955), True, 'import matplotlib.pyplot as plt\n'), ((19959, 19989), 'numpy.percentile', 'np.percentile', (['Flux[~uplim]', '(1)'], {}), '(Flux[~uplim], 1)\n', (19972, 19989), True, 'import numpy as np\n'), ((20025, 20035), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (20033, 20035), True, 'import matplotlib.pyplot as plt\n'), ((20039, 20070), 'numpy.percentile', 'np.percentile', (['Flux[~uplim]', '(99)'], {}), '(Flux[~uplim], 99)\n', (20052, 20070), True, 'import numpy as np\n'), ((20106, 20116), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (20114, 20116), True, 'import matplotlib.pyplot as plt\n'), ((20182, 20192), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (20190, 20192), True, 'import matplotlib.pyplot as plt\n'), ((22121, 22131), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (22129, 22131), True, 'import matplotlib.pyplot as plt\n'), ((22135, 22171), 'numpy.percentile', 'np.percentile', (['Index[~uplimIndex]', '(1)'], {}), '(Index[~uplimIndex], 1)\n', (22148, 22171), True, 'import numpy as np\n'), ((22207, 22217), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (22215, 22217), True, 'import matplotlib.pyplot as plt\n'), ((22221, 22258), 'numpy.percentile', 'np.percentile', (['Index[~uplimIndex]', '(99)'], {}), '(Index[~uplimIndex], 99)\n', (22234, 22258), True, 'import numpy as np\n'), ((22294, 22304), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (22302, 22304), True, 'import matplotlib.pyplot as plt\n'), ((22370, 22380), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (22378, 22380), True, 'import matplotlib.pyplot as plt\n'), ((15916, 15930), 'math.sqrt', 'sqrt', (['Npred[i]'], {}), '(Npred[i])\n', (15920, 15930), False, 'from math import sqrt\n'), ((16164, 16178), 'math.sqrt', 'sqrt', (['Npred[i]'], {}), '(Npred[i])\n', (16168, 16178), False, 'from math import sqrt\n')]
|
import subprocess
import numpy as np
from rsbeams.rsdata.SDDS import readSDDS
def obj_f(_):
analysis_command = ["sddsanalyzebeam", "run_setup.output.sdds", "output.anb"]
subprocess.call(analysis_command)
anb = readSDDS("output.anb")
anb.read()
betax, betax_target = anb.columns['betax'].squeeze(), 10.
betay, betay_target = anb.columns['betay'].squeeze(), 10.
alphax, alphax_target = anb.columns['alphax'].squeeze(), 1.
alphay, alphay_target = anb.columns['alphay'].squeeze(), 1.
obj_val = np.sqrt((betax - betax_target)**2 +
(betay - betay_target)**2 +
(alphax - alphax_target)**2 +
(alphay - alphay_target)**2)
return obj_val
def obj_f_dfols(_):
analysis_command = ["sddsanalyzebeam", "run_setup.output.sdds", "output.anb"]
subprocess.call(analysis_command)
anb = readSDDS("output.anb")
anb.read()
betax, betax_target = anb.columns['betax'].squeeze(), 10.
betay, betay_target = anb.columns['betay'].squeeze(), 10.
alphax, alphax_target = anb.columns['alphax'].squeeze(), 1.
alphay, alphay_target = anb.columns['alphay'].squeeze(), 1.
obj_val = (betax - betax_target)**2 + \
(betay - betay_target)**2 + \
(alphax - alphax_target)**2 + \
(alphay - alphay_target)**2
obj_vec = np.array([betax - betax_target,
betay - betay_target,
alphax - alphax_target,
alphay - alphay_target])
return obj_val, obj_vec
|
[
"numpy.array",
"numpy.sqrt",
"subprocess.call",
"rsbeams.rsdata.SDDS.readSDDS"
] |
[((180, 213), 'subprocess.call', 'subprocess.call', (['analysis_command'], {}), '(analysis_command)\n', (195, 213), False, 'import subprocess\n'), ((225, 247), 'rsbeams.rsdata.SDDS.readSDDS', 'readSDDS', (['"""output.anb"""'], {}), "('output.anb')\n", (233, 247), False, 'from rsbeams.rsdata.SDDS import readSDDS\n'), ((531, 665), 'numpy.sqrt', 'np.sqrt', (['((betax - betax_target) ** 2 + (betay - betay_target) ** 2 + (alphax -\n alphax_target) ** 2 + (alphay - alphay_target) ** 2)'], {}), '((betax - betax_target) ** 2 + (betay - betay_target) ** 2 + (alphax -\n alphax_target) ** 2 + (alphay - alphay_target) ** 2)\n', (538, 665), True, 'import numpy as np\n'), ((848, 881), 'subprocess.call', 'subprocess.call', (['analysis_command'], {}), '(analysis_command)\n', (863, 881), False, 'import subprocess\n'), ((893, 915), 'rsbeams.rsdata.SDDS.readSDDS', 'readSDDS', (['"""output.anb"""'], {}), "('output.anb')\n", (901, 915), False, 'from rsbeams.rsdata.SDDS import readSDDS\n'), ((1392, 1498), 'numpy.array', 'np.array', (['[betax - betax_target, betay - betay_target, alphax - alphax_target, alphay -\n alphay_target]'], {}), '([betax - betax_target, betay - betay_target, alphax -\n alphax_target, alphay - alphay_target])\n', (1400, 1498), True, 'import numpy as np\n')]
|
import numpy as np
from ..utils.darray import DependArray
class NonPBC(object):
def __init__(self, rij, charges, cell_basis=None, exclusion=None, *, dij_inverse=None, dij_inverse_gradient=None):
self.charges = charges
self.qmmm_coulomb_tensor = DependArray(
name="qmmm_coulomb_tensor",
func=NonPBC._get_qmmm_coulomb_tensor,
dependencies=[dij_inverse, exclusion],
)
self.qmmm_coulomb_tensor_gradient = DependArray(
name="qmmm_coulomb_tensor_gradient",
func=NonPBC._get_qmmm_coulomb_tensor_gradient,
dependencies=[dij_inverse_gradient, exclusion],
)
self.qm_total_esp = DependArray(
name="qm_total_esp",
func=NonPBC._get_qm_total_esp,
dependencies=[
self.qmmm_coulomb_tensor,
self.qmmm_coulomb_tensor_gradient,
charges,
],
)
@staticmethod
def _get_qmmm_coulomb_tensor(dij_inverse, exclusion=None):
if exclusion is not None:
dij_inverse = np.copy(dij_inverse)
dij_inverse[:, np.asarray(exclusion)] = 0.
return dij_inverse
@staticmethod
def _get_qmmm_coulomb_tensor_gradient(dij_inverse_gradient, exclusion=None):
if exclusion is not None:
dij_inverse_gradient = np.copy(dij_inverse_gradient)
dij_inverse_gradient[:, :, np.asarray(exclusion)] = 0.
return dij_inverse_gradient
@staticmethod
def _get_qm_total_esp(t, t_grad, charges):
coulomb_tensor = np.zeros((4, t.shape[0], t.shape[1]))
coulomb_tensor[0] = t
coulomb_tensor[1:] = -t_grad
return coulomb_tensor @ charges
def _get_total_espc_gradient(self, qm_esp_charges):
return qm_esp_charges @ self.qmmm_coulomb_tensor_gradient * self.charges
|
[
"numpy.asarray",
"numpy.zeros",
"numpy.copy"
] |
[((1598, 1635), 'numpy.zeros', 'np.zeros', (['(4, t.shape[0], t.shape[1])'], {}), '((4, t.shape[0], t.shape[1]))\n', (1606, 1635), True, 'import numpy as np\n'), ((1099, 1119), 'numpy.copy', 'np.copy', (['dij_inverse'], {}), '(dij_inverse)\n', (1106, 1119), True, 'import numpy as np\n'), ((1373, 1402), 'numpy.copy', 'np.copy', (['dij_inverse_gradient'], {}), '(dij_inverse_gradient)\n', (1380, 1402), True, 'import numpy as np\n'), ((1147, 1168), 'numpy.asarray', 'np.asarray', (['exclusion'], {}), '(exclusion)\n', (1157, 1168), True, 'import numpy as np\n'), ((1442, 1463), 'numpy.asarray', 'np.asarray', (['exclusion'], {}), '(exclusion)\n', (1452, 1463), True, 'import numpy as np\n')]
|
#
import numpy
import pandas
#
import torch
from torch import nn
#
N = 10_000
data_raw = pandas.DataFrame(data={'A': numpy.array(numpy.arange(N)),
# 'A': numpy.random.choice([0, 1], size=(N,)),
'B': numpy.array(numpy.arange(N)) + numpy.random.normal(size=(N,)),
'C': numpy.random.normal(size=(N,))})
target = 'A'
quantitative = ['B', 'C']
qualitative = []
# decise
data_set = data_raw.copy()
for category in qualitative:
data_set[category] = data_set[category].astype('category')
for numeric in quantitative:
data_set[numeric] = data_set[numeric].astype('float64')
data_set[target] = data_set[target].astype('float64')
# data_set[target] = data_set[target].astype('category')
from new_siege.data import DataHandler
data = DataHandler(data_set, target)
print(data.data)
from new_siege.neuro import Gene
# model-params
layers = [nn.Linear, nn.Linear]
layers_dimensions = [100, 1]
# layers_dimensions = [100, 2]
# activators = [nn.Sigmoid, nn.Sigmoid]
activators = [nn.ReLU, nn.ReLU]
activators_args = {}
# preprocessors = nn.BatchNorm1d
preprocessors = None
embeddingdrop = 0.0
drops = 0.0
model = Gene(data, layers, layers_dimensions, activators, activators_args, preprocessors, embeddingdrop, drops)
print(model)
# Define Optimisation
loss_function = nn.MSELoss()
# loss_function = nn.CrossEntropyLoss()
optimiser = torch.optim.Adam(model.parameters(), lr=0.001)
# Train the Model
epochs = 500
model.fit(optimiser, loss_function, epochs)
model.fit_plot()
model.summary(loss_function=loss_function, show_confusion_matrix=False)
|
[
"new_siege.neuro.Gene",
"torch.nn.MSELoss",
"new_siege.data.DataHandler",
"numpy.arange",
"numpy.random.normal"
] |
[((803, 832), 'new_siege.data.DataHandler', 'DataHandler', (['data_set', 'target'], {}), '(data_set, target)\n', (814, 832), False, 'from new_siege.data import DataHandler\n'), ((1181, 1288), 'new_siege.neuro.Gene', 'Gene', (['data', 'layers', 'layers_dimensions', 'activators', 'activators_args', 'preprocessors', 'embeddingdrop', 'drops'], {}), '(data, layers, layers_dimensions, activators, activators_args,\n preprocessors, embeddingdrop, drops)\n', (1185, 1288), False, 'from new_siege.neuro import Gene\n'), ((1338, 1350), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1348, 1350), False, 'from torch import nn\n'), ((333, 363), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(N,)'}), '(size=(N,))\n', (352, 363), False, 'import numpy\n'), ((135, 150), 'numpy.arange', 'numpy.arange', (['N'], {}), '(N)\n', (147, 150), False, 'import numpy\n'), ((266, 296), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(N,)'}), '(size=(N,))\n', (285, 296), False, 'import numpy\n'), ((247, 262), 'numpy.arange', 'numpy.arange', (['N'], {}), '(N)\n', (259, 262), False, 'import numpy\n')]
|
import faiss
import numpy as np
from scipy import sparse, special
def estimate_pdf(target, emb, C=0.1):
"""Estimate the density of entities at the given target locations in the
embedding space using the density estimator based on the k-nearest
neighbors.
:param target: Target location at which the density is calculated.
:type target: numpy.array, shape=(num_target, dim)
:param emb: Embedding vectors for the entities
:type emb: numpy.ndarray, (num_entities, dim)
:param C: Bandwidth for kernels. Ranges between (0,1]. Roughly C * num_entities nearest neighbors will be used for estimating the density at a single target location.
:type C: float, optional
:return: Log-density of points at the target locations.
:rtype: numpy.ndarray (num_target,)
Reference
https://faculty.washington.edu/yenchic/18W_425/Lec7_knn_basis.pdf
.. highlight:: python
.. code-block:: python
>>> import emlens
>>> import numpy as np
>>> emb = np.random.randn(100, 20)
>>> target = np.random.randn(10, 20)
>>> density = emlens.estimate_pdf(target=target, emb = emb)
"""
if len(emb.shape) != 2:
raise TypeError(
"emb should be 2D numpy array of size (number of points, dimensions)"
)
if len(target.shape) != 2:
raise TypeError(
"target should be 2D numpy array of size (number of points, dimensions)"
)
n = emb.shape[0]
dim = emb.shape[1]
k = np.maximum(1, np.round(C * np.power(n, 4 / 5)))
k = int(k)
# Construct the knn graph
index = faiss.IndexFlatL2(dim)
index.add(emb.astype(np.float32))
distances, indices = index.search(target.astype(np.float32), k=k)
#
# KNN density estimator
# https://faculty.washington.edu/yenchic/18W_425/Lec7_knn_basis.pdf
#
logVd = np.log(np.pi) * (dim / 2.0) - special.loggamma(dim / 2.0 + 1)
Rk = np.max(distances, axis=1)
density = np.log(k) - np.log(n) - dim * np.log(Rk) - logVd
return density
|
[
"scipy.special.loggamma",
"numpy.log",
"numpy.power",
"numpy.max",
"faiss.IndexFlatL2"
] |
[((1615, 1637), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['dim'], {}), '(dim)\n', (1632, 1637), False, 'import faiss\n'), ((1942, 1967), 'numpy.max', 'np.max', (['distances'], {'axis': '(1)'}), '(distances, axis=1)\n', (1948, 1967), True, 'import numpy as np\n'), ((1901, 1932), 'scipy.special.loggamma', 'special.loggamma', (['(dim / 2.0 + 1)'], {}), '(dim / 2.0 + 1)\n', (1917, 1932), False, 'from scipy import sparse, special\n'), ((1871, 1884), 'numpy.log', 'np.log', (['np.pi'], {}), '(np.pi)\n', (1877, 1884), True, 'import numpy as np\n'), ((1536, 1554), 'numpy.power', 'np.power', (['n', '(4 / 5)'], {}), '(n, 4 / 5)\n', (1544, 1554), True, 'import numpy as np\n'), ((1982, 1991), 'numpy.log', 'np.log', (['k'], {}), '(k)\n', (1988, 1991), True, 'import numpy as np\n'), ((1994, 2003), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (2000, 2003), True, 'import numpy as np\n'), ((2012, 2022), 'numpy.log', 'np.log', (['Rk'], {}), '(Rk)\n', (2018, 2022), True, 'import numpy as np\n')]
|
import time
# from flask import Flask, request
import flask
from flask import request, Response, render_template, jsonify, send_file, make_response
import os
import subprocess
import sys
import pandas
from shelljob import proc
import csv
import json
import pandas as pd
from copy import deepcopy
from ruamel import yaml
import re
app = flask.Flask(__name__)
app.run(threaded=True)
# Model evaluation
@app.route('/stream/',methods=['GET', 'POST'])
def stream():
def read_process():
arr = ["18","2","3","9","10"]
for i in arr:
time.sleep(1)
print(i)
yield i + '\n'
# yield i
sentence = request.args.get('sentence')
# sentence = "happy"
prefix= "python main.py --all --config config.yml --type \""
cmdEvaluation = prefix + sentence + "\"" + " --verbose"
# cmdEvaluation = "python main.py --train --all --config config.yml --verbose"
def inner():
proc = subprocess.Popen(
[cmdEvaluation], #call something with a lot of output so we can see it
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True #!mportant....
)
for line in iter(proc.stdout.readline,''):
# time.sleep(1) # Don't need this just shows the text streaming
print(line.rstrip())
yield line.rstrip() + '\n'
resp = flask.Response(inner(),
mimetype='text/plain'
)
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Credentials'] = 'false'
resp.headers['Access-Control-Allow-Headers:'] = 'Content-Type,Connection,Server,Date'
resp.headers['Access-Control-Allow-Methods'] = 'GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH'
resp.headers['X-Content-Type-Options'] = 'nosniff'
resp.headers['Vary'] = '*'
resp.headers['Accept-encoding'] = 'identity'
resp.headers['Content-encoding'] = 'identity'
resp.headers['Content-Encoding'] = 'compress'
resp.headers['Transfer-encoding'] = 'identity'
resp.headers['X-Powered-By'] = 'Express'
return resp
# Model training
@app.route('/train/',methods=['GET', 'POST'])
def train():
def read_process():
arr = ["18","2","3","9","10"]
for i in arr:
time.sleep(1)
print(i)
yield i + '\n'
sentence = request.args.get('sentence')
cmdEvaluation = "python main.py --train --all --config config.yml --verbose"
def inner():
proc = subprocess.Popen(
[cmdEvaluation], #call something with a lot of output so we can see it
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True # important....
)
for line in iter(proc.stdout.readline,''):
# time.sleep(1) # Don't need this just shows the text streaming
print(line.rstrip())
yield line.rstrip() + '\n'
resp = flask.Response(inner(),
mimetype='text/plain'
)
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Credentials'] = 'false'
resp.headers['Access-Control-Allow-Headers:'] = 'Content-Type,Connection,Server,Date'
resp.headers['Access-Control-Allow-Methods'] = 'GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH'
resp.headers['X-Content-Type-Options'] = 'nosniff'
resp.headers['Vary'] = '*'
resp.headers['Accept-encoding'] = 'identity'
resp.headers['Content-encoding'] = 'identity'
resp.headers['Content-Encoding'] = 'compress'
resp.headers['Transfer-encoding'] = 'identity'
resp.headers['X-Powered-By'] = 'Express'
return resp
# Result - Get sentence level data
@app.route('/dataS')
def dataS():
print(os.getcwd())
# TODO: Change the correct path for reading the output files
# Now it’s '/data/output1/…
# Should be '/data/output/…
path=os.getcwd()+'/data/output1/SupervisorAgent_sentence_aggregate.csv'
print(path)
csv_col_name = list(pd.read_csv(path).columns) # column mame
dict_csv_data = csv.DictReader(open(path, 'r'), csv_col_name) # read csv as dict
csv_col_name[0] = "sentence"
csv_col_name[csv_col_name.index("sentence.id")] = "id"
csv_col_name[csv_col_name.index("gaze.shift")] = "gazeShift"
csv_col_name[csv_col_name.index("immediate.bs")] = "imBs"
csv_col_name[csv_col_name.index("delayed.bs")] = "dlBs"
csv_col_name[csv_col_name.index("gaze.keyboard.ratio")] = "gazeRatio"
csv_col_name[csv_col_name.index("fix.count")] = "fixNum"
csv_col_name[csv_col_name.index("correct.error")] = "corErr"
csv_col_name[csv_col_name.index("uncorrected.error")] = "unErr"
csv_col_name[csv_col_name.index("fix.duration")] = "fixDur"
csv_col_name[csv_col_name.index("iki.1")] = "ikiSD"
csv_col_name[csv_col_name.index("wpm.1")] = "wpmSD"
csv_col_name[csv_col_name.index("bs.1")] = "bsSD"
csv_col_name[csv_col_name.index("gaze.shift.1")] = "gazeShiftSD"
csv_col_name[csv_col_name.index("immediate.bs.1")] = "imBsSD"
csv_col_name[csv_col_name.index("delayed.bs.1")] = "dlBsSD"
csv_col_name[csv_col_name.index("gaze.keyboard.ratio.1")] = "gazeRatioSD"
csv_col_name[csv_col_name.index("fix.count.1")] = "fixNumSD"
csv_col_name[csv_col_name.index("correct.error.1")] = "corErrSD"
csv_col_name[csv_col_name.index("uncorrected.error.1")] = "unErrSD"
csv_col_name[csv_col_name.index("fix.duration.1")] = "fixDurSD"
print(csv_col_name)
next(dict_csv_data) # pop header out
next(dict_csv_data)
next(dict_csv_data)
dict1 = {}
dict2 = {}
for rows in dict_csv_data:
dict2 = deepcopy(rows)
dict2.pop('sentence.id.1')
dict2.pop('agent.id')
dict2.pop('agent.id.1')
print(rows['id'])
dict1[rows['id']] = dict2 # sentence.id is the key of each row: 1,2,3...
return dict1
# Result - Get trial level data
@app.route('/dataT')
def dataT():
print(os.getcwd())
# TODO: Change the correct path for reading the output files
path=os.getcwd()+'/data/output1/SupervisorAgent_sentence_test.csv'
print(path)
csv_col_name = list(pd.read_csv(path).columns)
dict_csv_data = csv.DictReader(open(path, 'r'), csv_col_name)
print(csv_col_name)
csv_col_name[csv_col_name.index("sentence.id")] = "sid"
csv_col_name[csv_col_name.index("agent.id")] = "aid"
csv_col_name[csv_col_name.index("target.sentence")] = "sentence"
csv_col_name[csv_col_name.index("gaze.shift")] = "gazeShift"
csv_col_name[csv_col_name.index("immediate.bs")] = "imBs"
csv_col_name[csv_col_name.index("delayed.bs")] = "dlBs"
csv_col_name[csv_col_name.index("gaze.keyboard.ratio")] = "gazeRatio"
csv_col_name[csv_col_name.index("fix.count")] = "fixNum"
csv_col_name[csv_col_name.index("correct.error")] = "corErr"
csv_col_name[csv_col_name.index("uncorrected.error")] = "unErr"
csv_col_name[csv_col_name.index("fix.duration")] = "fixDur"
print(csv_col_name)
next(dict_csv_data)
dict1 = {}
dict2 = {}
dict3 = {}
for rows in dict_csv_data:
dict2 = deepcopy(rows)
if dict1.get(rows['sid']): # if sid exist, push the new trial
dict1[rows['sid']][rows['aid']] = dict2
else:
dict1[rows['sid']] = {}
dict1[rows['sid']][rows['aid']] = dict2
return dict1
# Result - Calculate general info for result data
@app.route('/dataG')
def dataG():
print(os.getcwd())
# TODO: Change the correct path for reading the output files
path=os.getcwd()+'/data/output1/SupervisorAgent_sentence_test.csv'
print(path)
csv_col_name = list(pd.read_csv(path).columns)
dict_csv_data = csv.DictReader(open(path, 'r'), csv_col_name)
print(csv_col_name)
csv_col_name[csv_col_name.index("sentence.id")] = "sid"
csv_col_name[csv_col_name.index("agent.id")] = "aid"
csv_col_name[csv_col_name.index("target.sentence")] = "sentence"
csv_col_name[csv_col_name.index("gaze.shift")] = "gazeShift"
csv_col_name[csv_col_name.index("immediate.bs")] = "imBs"
csv_col_name[csv_col_name.index("delayed.bs")] = "dlBs"
csv_col_name[csv_col_name.index("gaze.keyboard.ratio")] = "gazeRatio"
csv_col_name[csv_col_name.index("fix.count")] = "fixNum"
csv_col_name[csv_col_name.index("correct.error")] = "corErr"
csv_col_name[csv_col_name.index("uncorrected.error")] = "unErr"
csv_col_name[csv_col_name.index("fix.duration")] = "fixDur"
print(csv_col_name)
next(dict_csv_data) # pop out header
dict1 = {}
dict2 = {}
dict3 = {}
dictResult = {}
iki = []
wpm = []
bs = []
imBs = []
dlBs = []
gazeShift = []
gazeRatio = []
fixNum = []
fixDur = []
corErr = []
unErr = []
for rows in dict_csv_data:
iki.append(float(rows["iki"]))
wpm.append(float(rows["wpm"]))
bs.append(float(rows["bs"]))
imBs.append(float(rows["imBs"]))
dlBs.append(float(rows["dlBs"]))
gazeShift.append(float(rows["gazeShift"]))
gazeRatio.append(float(rows["gazeRatio"]))
fixNum.append(float(rows["fixNum"]))
fixDur.append(float(rows["fixDur"]))
corErr.append(float(rows["corErr"]))
unErr.append(float(rows["unErr"]))
dict2 = deepcopy(rows)
if dict1.get(rows['sid']): # if sid exist, push the new trial
dict1[rows['sid']][rows['aid']] = dict2
else:
dict1[rows['sid']] = {}
dict1[rows['sid']][rows['aid']] = dict2
import numpy as np
dictResult["sNum"] = len(dict1)
dictResult["tNum"] = len(dict1["1"])
dictResult["ikiMean"] = np.mean(iki)
dictResult["ikiSD"] = np.std(iki)
dictResult["wpmMean"] = np.mean(wpm)
dictResult["wpmSD"] = np.std(wpm)
dictResult["bsMean"] = np.mean(bs)
dictResult["bsSD"] = np.std(bs)
dictResult["imBsMean"] = np.mean(imBs)
dictResult["imBsSD"] = np.std(imBs)
dictResult["dlBsMean"] = np.mean(dlBs)
dictResult["dlBsSD"] = np.std(dlBs)
dictResult["gazeShiftMean"] = np.mean(gazeShift)
dictResult["gazeShiftSD"] = np.std(gazeShift)
dictResult["gazeRatioMean"] = np.mean(gazeRatio)
dictResult["gazeRatioSD"] = np.std(gazeRatio)
dictResult["fixNumMean"] = np.mean(fixNum)
dictResult["fuxNumSD"] = np.std(fixNum)
dictResult["fixDurMean"] = np.mean(fixDur)
dictResult["fixDurSD"] = np.std(fixDur)
dictResult["corErrMean"] = np.mean(corErr)
dictResult["corErrSD"] = np.std(corErr)
dictResult["unErrMean"] = np.mean(unErr)
dictResult["unErrSD"] = np.std(unErr)
print(len(dict1))
return dictResult
# Result - Save model
@app.route('/saveModel/',methods=['GET', 'POST'])
def saveModel():
name = request.args.get('name')
path=os.getcwd()+"/data/models_saved"
newpath=path+"/Model_"+name
currentModel=os.getcwd()+"/data/models"
import shutil
shutil.copytree(currentModel, newpath)
return "saved"
# Evaluate/Model - Read built-in and saved models' name
@app.route('/readNames')
def readNames():
path=os.getcwd()+"/data/models_saved"
fileList = os.listdir(path)
nameList = []
for name in fileList:
nameList.append({
"value": name,
})
nameList.insert(0,{
"value": "Default",
})
print(nameList)
return jsonify(nameList)
# Evaluate/Model - Set evaluation model onClick
@app.route('/setModel/',methods=['GET', 'POST'])
def setModel():
def update_yml(name):
path=os.getcwd()+"/configs/training_config.yml"
with open(path, encoding="utf-8") as f:
content = yaml.load(f, Loader=yaml.RoundTripLoader)
# modify the parameters in yml
content['finger']['save_path'] = "data/models_saved/" + name + "/finger"
content['supervisor']['save_path'] = "data/models_saved/" + name + "/supervisor"
with open(path, 'w', encoding="utf-8") as nf:
yaml.dump(content, nf, Dumper=yaml.RoundTripDumper)
def reset_yml():
path=os.getcwd()+"/configs/training_config.yml"
with open(path, encoding="utf-8") as f:
content = yaml.load(f, Loader=yaml.RoundTripLoader)
content['finger']['save_path'] = "data/models/finger"
content['supervisor']['save_path'] = "data/models/supervisor"
with open(path, 'w', encoding="utf-8") as nf:
yaml.dump(content, nf, Dumper=yaml.RoundTripDumper)
name = request.args.get('name')
if name != "Default":
update_yml(name)
else:
reset_yml()
return "done"
# Result - Generate video on trial level
@app.route('/generateVideo/',methods=['GET', 'POST'])
def video():
def read_process():
arr = ["18","2","3","9","10"]
for i in arr:
time.sleep(1)
print(i)
yield i + '\n'
# yield i
sentence = request.args.get('sentence')
prefix= "python main.py --supervisor --config config.yml --type \""
cmdEvaluation = prefix + sentence + "\""
# cmdEvaluation = "python main.py --train --all --config config.yml --verbose"
def inner():
proc = subprocess.Popen(
[cmdEvaluation], #call something with a lot of output so we can see it
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True #!mportant....
)
for line in iter(proc.stdout.readline,''):
# time.sleep(1) # Don't need this just shows the text streaming
print(line.rstrip())
yield line.rstrip() + '\n'
resp = flask.Response(inner(),
mimetype='text/plain'
)
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Credentials'] = 'false'
resp.headers['Access-Control-Allow-Headers:'] = 'Content-Type,Connection,Server,Date'
resp.headers['Access-Control-Allow-Methods'] = 'GET, HEAD, POST, PUT, DELETE, CONNECT, OPTIONS, TRACE, PATCH'
resp.headers['X-Content-Type-Options'] = 'nosniff'
resp.headers['Vary'] = '*'
resp.headers['Accept-encoding'] = 'identity'
resp.headers['Content-encoding'] = 'identity'
resp.headers['Content-Encoding'] = 'compress'
resp.headers['Transfer-encoding'] = 'identity'
resp.headers['X-Powered-By'] = 'Express'
return resp
|
[
"subprocess.Popen",
"copy.deepcopy",
"flask.request.args.get",
"ruamel.yaml.dump",
"numpy.std",
"os.getcwd",
"pandas.read_csv",
"flask.Flask",
"time.sleep",
"flask.jsonify",
"numpy.mean",
"ruamel.yaml.load",
"shutil.copytree",
"os.listdir"
] |
[((340, 361), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (351, 361), False, 'import flask\n'), ((667, 695), 'flask.request.args.get', 'request.args.get', (['"""sentence"""'], {}), "('sentence')\n", (683, 695), False, 'from flask import request, Response, render_template, jsonify, send_file, make_response\n'), ((2427, 2455), 'flask.request.args.get', 'request.args.get', (['"""sentence"""'], {}), "('sentence')\n", (2443, 2455), False, 'from flask import request, Response, render_template, jsonify, send_file, make_response\n'), ((9856, 9868), 'numpy.mean', 'np.mean', (['iki'], {}), '(iki)\n', (9863, 9868), True, 'import numpy as np\n'), ((9896, 9907), 'numpy.std', 'np.std', (['iki'], {}), '(iki)\n', (9902, 9907), True, 'import numpy as np\n'), ((9937, 9949), 'numpy.mean', 'np.mean', (['wpm'], {}), '(wpm)\n', (9944, 9949), True, 'import numpy as np\n'), ((9977, 9988), 'numpy.std', 'np.std', (['wpm'], {}), '(wpm)\n', (9983, 9988), True, 'import numpy as np\n'), ((10017, 10028), 'numpy.mean', 'np.mean', (['bs'], {}), '(bs)\n', (10024, 10028), True, 'import numpy as np\n'), ((10055, 10065), 'numpy.std', 'np.std', (['bs'], {}), '(bs)\n', (10061, 10065), True, 'import numpy as np\n'), ((10096, 10109), 'numpy.mean', 'np.mean', (['imBs'], {}), '(imBs)\n', (10103, 10109), True, 'import numpy as np\n'), ((10138, 10150), 'numpy.std', 'np.std', (['imBs'], {}), '(imBs)\n', (10144, 10150), True, 'import numpy as np\n'), ((10181, 10194), 'numpy.mean', 'np.mean', (['dlBs'], {}), '(dlBs)\n', (10188, 10194), True, 'import numpy as np\n'), ((10223, 10235), 'numpy.std', 'np.std', (['dlBs'], {}), '(dlBs)\n', (10229, 10235), True, 'import numpy as np\n'), ((10271, 10289), 'numpy.mean', 'np.mean', (['gazeShift'], {}), '(gazeShift)\n', (10278, 10289), True, 'import numpy as np\n'), ((10323, 10340), 'numpy.std', 'np.std', (['gazeShift'], {}), '(gazeShift)\n', (10329, 10340), True, 'import numpy as np\n'), ((10376, 10394), 'numpy.mean', 'np.mean', (['gazeRatio'], {}), '(gazeRatio)\n', (10383, 10394), True, 'import numpy as np\n'), ((10428, 10445), 'numpy.std', 'np.std', (['gazeRatio'], {}), '(gazeRatio)\n', (10434, 10445), True, 'import numpy as np\n'), ((10478, 10493), 'numpy.mean', 'np.mean', (['fixNum'], {}), '(fixNum)\n', (10485, 10493), True, 'import numpy as np\n'), ((10524, 10538), 'numpy.std', 'np.std', (['fixNum'], {}), '(fixNum)\n', (10530, 10538), True, 'import numpy as np\n'), ((10571, 10586), 'numpy.mean', 'np.mean', (['fixDur'], {}), '(fixDur)\n', (10578, 10586), True, 'import numpy as np\n'), ((10617, 10631), 'numpy.std', 'np.std', (['fixDur'], {}), '(fixDur)\n', (10623, 10631), True, 'import numpy as np\n'), ((10664, 10679), 'numpy.mean', 'np.mean', (['corErr'], {}), '(corErr)\n', (10671, 10679), True, 'import numpy as np\n'), ((10710, 10724), 'numpy.std', 'np.std', (['corErr'], {}), '(corErr)\n', (10716, 10724), True, 'import numpy as np\n'), ((10756, 10770), 'numpy.mean', 'np.mean', (['unErr'], {}), '(unErr)\n', (10763, 10770), True, 'import numpy as np\n'), ((10800, 10813), 'numpy.std', 'np.std', (['unErr'], {}), '(unErr)\n', (10806, 10813), True, 'import numpy as np\n'), ((10962, 10986), 'flask.request.args.get', 'request.args.get', (['"""name"""'], {}), "('name')\n", (10978, 10986), False, 'from flask import request, Response, render_template, jsonify, send_file, make_response\n'), ((11128, 11166), 'shutil.copytree', 'shutil.copytree', (['currentModel', 'newpath'], {}), '(currentModel, newpath)\n', (11143, 11166), False, 'import shutil\n'), ((11342, 11358), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (11352, 11358), False, 'import os\n'), ((11563, 11580), 'flask.jsonify', 'jsonify', (['nameList'], {}), '(nameList)\n', (11570, 11580), False, 'from flask import request, Response, render_template, jsonify, send_file, make_response\n'), ((12689, 12713), 'flask.request.args.get', 'request.args.get', (['"""name"""'], {}), "('name')\n", (12705, 12713), False, 'from flask import request, Response, render_template, jsonify, send_file, make_response\n'), ((13124, 13152), 'flask.request.args.get', 'request.args.get', (['"""sentence"""'], {}), "('sentence')\n", (13140, 13152), False, 'from flask import request, Response, render_template, jsonify, send_file, make_response\n'), ((964, 1062), 'subprocess.Popen', 'subprocess.Popen', (['[cmdEvaluation]'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '([cmdEvaluation], shell=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n', (980, 1062), False, 'import subprocess\n'), ((2570, 2668), 'subprocess.Popen', 'subprocess.Popen', (['[cmdEvaluation]'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '([cmdEvaluation], shell=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n', (2586, 2668), False, 'import subprocess\n'), ((3858, 3869), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3867, 3869), False, 'import os\n'), ((4010, 4021), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4019, 4021), False, 'import os\n'), ((5782, 5796), 'copy.deepcopy', 'deepcopy', (['rows'], {}), '(rows)\n', (5790, 5796), False, 'from copy import deepcopy\n'), ((6098, 6109), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6107, 6109), False, 'import os\n'), ((6185, 6196), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6194, 6196), False, 'import os\n'), ((7267, 7281), 'copy.deepcopy', 'deepcopy', (['rows'], {}), '(rows)\n', (7275, 7281), False, 'from copy import deepcopy\n'), ((7621, 7632), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7630, 7632), False, 'import os\n'), ((7708, 7719), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7717, 7719), False, 'import os\n'), ((9480, 9494), 'copy.deepcopy', 'deepcopy', (['rows'], {}), '(rows)\n', (9488, 9494), False, 'from copy import deepcopy\n'), ((10996, 11007), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11005, 11007), False, 'import os\n'), ((11079, 11090), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11088, 11090), False, 'import os\n'), ((11294, 11305), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11303, 11305), False, 'import os\n'), ((13387, 13485), 'subprocess.Popen', 'subprocess.Popen', (['[cmdEvaluation]'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '([cmdEvaluation], shell=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n', (13403, 13485), False, 'import subprocess\n'), ((563, 576), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (573, 576), False, 'import time\n'), ((2345, 2358), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2355, 2358), False, 'import time\n'), ((4117, 4134), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (4128, 4134), True, 'import pandas as pd\n'), ((6287, 6304), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (6298, 6304), True, 'import pandas as pd\n'), ((7810, 7827), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (7821, 7827), True, 'import pandas as pd\n'), ((11734, 11745), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11743, 11745), False, 'import os\n'), ((11847, 11888), 'ruamel.yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.RoundTripLoader'}), '(f, Loader=yaml.RoundTripLoader)\n', (11856, 11888), False, 'from ruamel import yaml\n'), ((12177, 12228), 'ruamel.yaml.dump', 'yaml.dump', (['content', 'nf'], {'Dumper': 'yaml.RoundTripDumper'}), '(content, nf, Dumper=yaml.RoundTripDumper)\n', (12186, 12228), False, 'from ruamel import yaml\n'), ((12264, 12275), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12273, 12275), False, 'import os\n'), ((12377, 12418), 'ruamel.yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.RoundTripLoader'}), '(f, Loader=yaml.RoundTripLoader)\n', (12386, 12418), False, 'from ruamel import yaml\n'), ((12625, 12676), 'ruamel.yaml.dump', 'yaml.dump', (['content', 'nf'], {'Dumper': 'yaml.RoundTripDumper'}), '(content, nf, Dumper=yaml.RoundTripDumper)\n', (12634, 12676), False, 'from ruamel import yaml\n'), ((13020, 13033), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (13030, 13033), False, 'import time\n')]
|
from . import Instrument
from collections import deque
import mido
import numpy
import itertools
import operator
SCALE_WEIGHTS = [6, 2, 4, 2, 4, 1, 3]
MAX_JUMP = 7
EOLIAN_SCALE = [0, 2, 3, 5, 7, 8, 10]
class SynthLead(Instrument):
TRACK_NAME_BASE = "Lead Synth"
def __init__(self, live_set, role, recording_ended_callback):
super(SynthLead, self).__init__(live_set, role, recording_ended_callback)
self.pending_messages = deque()
self.output_port = mido.open_output("IAC Driver Melody %s" % self.role)
def set_volume(self, value):
self.get_track().volume = value
def tick(self, tick_count):
super(SynthLead, self).tick(tick_count)
if self.player and not self.pending_messages:
melody = self.get_notes()
#print "Generated melody:", melody
self.pending_messages.extend(melody)
if self.pending_messages:
#print "SYNTH LEAD SENDING MESSAGES"
messages = self.pending_messages.popleft()
#print "Sending messages:", messages
for message in messages:
self.output_port.send(message)
# print tick_count
# if tick_count % 96 == 0:
# print "on"
# self.output_port.send(mido.Message("note_on", note=57))
# elif tick_count % 96 == 48:
# print "off"
# self.output_port.send(mido.Message("note_off", note=57))
def get_notes(self):
if self.player.mean_joint_distance < 0.5:
# No movement or almost no movement. Generate silent beat.
return [[], [], [], []]
# More motion = shorter note sequence & faster rhythm.
# Movement value is normalized to range 0-1.
normalized_mean_joint_distance = min((self.player.mean_joint_distance - 0.5) / 3, 1)
inverse_mean_joint_distance = 1 - normalized_mean_joint_distance
pattern_length_index = scale_to_range(inverse_mean_joint_distance, 0, 2)
# Pattern can be [1, 2, 4] quarters long.
pattern_length_beats = 2 ** pattern_length_index
rh_y, lh_y, head_y, body_z, distance_between_hands = self.player.param_values
number_of_notes = scale_to_range(distance_between_hands, 1, 12)
# Shortest note is 16th note.
if number_of_notes > pattern_length_beats * 4:
number_of_notes = pattern_length_beats * 4
rhythmic_pattern = self.choose_note_start_locations(pattern_length_beats, number_of_notes)
# Lowest note in range corresponds to rh_y. A1 - A3.
# Notes are numbered as "offset from A1".
lowest_note = scale_to_range(rh_y, 0, 14)
# Size of melody range corresponds to y distance between hands.
note_range_size = scale_to_range(abs(rh_y - lh_y), 0, 7)
# Melody start note corresponds to y position of left hand (offset from range start, in mode).
melody_start = scale_to_range(lh_y, 0, note_range_size) + lowest_note
pitches = self.get_melody_pitches(number_of_notes, note_range_size, lowest_note, melody_start)
previous_start_location = 0
previous_pitch = None
previous_note_end = None
messages = []
midi_pitches = [self.get_midi_value(pitch) for pitch in pitches]
melody_notes = deque(zip(rhythmic_pattern, midi_pitches))
next_note_start, next_note_pitch = melody_notes.popleft()
for tick in range(4 * pattern_length_beats):
tick_messages = []
if tick == next_note_start:
tick_messages.append(mido.Message("note_on", note=next_note_pitch))
# When starting a new note, end the previous one, if there is one still playing,
# so that notes don't overlap.
if previous_note_end:
tick_messages.append(mido.Message("note_off", note=previous_pitch))
previous_note_end = None
previous_pitch = None
# Current note should end in 4 ticks maximum.
previous_note_end = tick + 4
previous_pitch = next_note_pitch
# Advance to next note.
if melody_notes:
next_note_start, next_note_pitch = melody_notes.popleft()
else:
next_note_start = next_note_pitch = None
elif tick == previous_note_end:
tick_messages.append(mido.Message("note_off", note=previous_pitch))
previous_pitch = None
previous_note_end = None
messages.append(tick_messages)
return messages
@staticmethod
def get_midi_value(offset_from_a1):
A1 = 57
octave = itertools.count(0, 12)
octave_offset = itertools.chain.from_iterable(itertools.repeat(item, 7) for item in octave)
infinite_eolian = itertools.imap(operator.add, itertools.cycle(EOLIAN_SCALE), octave_offset)
semitone_offset_from_a1 = next(itertools.islice(infinite_eolian, offset_from_a1, offset_from_a1 + 1))
return A1 + semitone_offset_from_a1
def choose_note_start_locations(self, pattern_length_beats, number_of_notes):
weights = [1] * pattern_length_beats * 4
for i in xrange(len(weights)):
for power in xrange(1, 5):
if i % (2 ** power) == 0:
weights[i] += 1
total = sum(weights)
normalized_weights = [float(w) / total for w in weights]
#print normalized_weights
choices = numpy.random.choice(pattern_length_beats * 4, number_of_notes, False, normalized_weights)
choices.sort()
return choices
def get_melody_pitches(self, number_of_notes, note_range_size, lowest_note, melody_start):
current_note = melody_start
highest_note = lowest_note + note_range_size
pitches = []
possible_notes = range(lowest_note, highest_note + 1)
# note_scale_weights = [1] * len(possible_notes)
# for i, note in range(len(possible_notes)):
# pitch_class = note % 7
# note_scale_weights[i] += self.SCALE_WEIGHTS[pitch_class]
for i in xrange(number_of_notes):
pitches.append(current_note)
possible_next_notes = [
i for i in xrange(current_note - MAX_JUMP, current_note + MAX_JUMP + 1)
if lowest_note <= i <= highest_note
]
next_note_weights = [0] * len(possible_next_notes)
for i, note in enumerate(possible_next_notes):
pitch_class = note % 7
# Notes that are "strong" scale notes have a higher chance.
weight = 2 * SCALE_WEIGHTS[pitch_class]
# Notes that are far away have a lower chance.
weight -= abs(current_note - note)
# No notes has no chance!
weight = max(1, weight)
next_note_weights[i] = weight
#print "N:", possible_next_notes
#print "W:", next_note_weights
total = sum(next_note_weights)
next_note_weights_normalized = [float(w) / total for w in next_note_weights]
current_note = numpy.random.choice(possible_next_notes, p=next_note_weights_normalized)
return pitches
def scale_to_range(value, min_value, max_value):
""" Convert 0-1 value to value between min_value and max_value (inclusive) """
scaled_value = min_value + int(value * (max_value - min_value + 1))
return scaled_value if scaled_value <= max_value else max_value
|
[
"itertools.repeat",
"mido.Message",
"itertools.count",
"itertools.islice",
"numpy.random.choice",
"itertools.cycle",
"mido.open_output",
"collections.deque"
] |
[((454, 461), 'collections.deque', 'deque', ([], {}), '()\n', (459, 461), False, 'from collections import deque\n'), ((489, 541), 'mido.open_output', 'mido.open_output', (["('IAC Driver Melody %s' % self.role)"], {}), "('IAC Driver Melody %s' % self.role)\n", (505, 541), False, 'import mido\n'), ((4843, 4865), 'itertools.count', 'itertools.count', (['(0)', '(12)'], {}), '(0, 12)\n', (4858, 4865), False, 'import itertools\n'), ((5693, 5786), 'numpy.random.choice', 'numpy.random.choice', (['(pattern_length_beats * 4)', 'number_of_notes', '(False)', 'normalized_weights'], {}), '(pattern_length_beats * 4, number_of_notes, False,\n normalized_weights)\n', (5712, 5786), False, 'import numpy\n'), ((5021, 5050), 'itertools.cycle', 'itertools.cycle', (['EOLIAN_SCALE'], {}), '(EOLIAN_SCALE)\n', (5036, 5050), False, 'import itertools\n'), ((5106, 5175), 'itertools.islice', 'itertools.islice', (['infinite_eolian', 'offset_from_a1', '(offset_from_a1 + 1)'], {}), '(infinite_eolian, offset_from_a1, offset_from_a1 + 1)\n', (5122, 5175), False, 'import itertools\n'), ((7397, 7469), 'numpy.random.choice', 'numpy.random.choice', (['possible_next_notes'], {'p': 'next_note_weights_normalized'}), '(possible_next_notes, p=next_note_weights_normalized)\n', (7416, 7469), False, 'import numpy\n'), ((4920, 4945), 'itertools.repeat', 'itertools.repeat', (['item', '(7)'], {}), '(item, 7)\n', (4936, 4945), False, 'import itertools\n'), ((3653, 3698), 'mido.Message', 'mido.Message', (['"""note_on"""'], {'note': 'next_note_pitch'}), "('note_on', note=next_note_pitch)\n", (3665, 3698), False, 'import mido\n'), ((3923, 3968), 'mido.Message', 'mido.Message', (['"""note_off"""'], {'note': 'previous_pitch'}), "('note_off', note=previous_pitch)\n", (3935, 3968), False, 'import mido\n'), ((4528, 4573), 'mido.Message', 'mido.Message', (['"""note_off"""'], {'note': 'previous_pitch'}), "('note_off', note=previous_pitch)\n", (4540, 4573), False, 'import mido\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import functools
import numpy as np
from ....tests.helper import pytest
from .. import iers
from ....table import Table
from ....time import Time
allclose_sec = functools.partial(np.allclose, rtol=1e-15, atol=1e-9)
# 1 nanosec atol
try:
iers.IERS_A.open() # check if IERS_A is available
except IOError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
def test_simple(self):
iers.IERS.close()
assert iers.IERS.iers_table is None
iers_tab = iers.IERS.open()
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, Table)
jd1 = np.array([2456108.5, 2456108.5, 2456108.5,
2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert allclose_sec(ut1_utc, np.array([-0.5868211, -0.5868184,
-0.5868184, 0.4131816,
0.41328895]))
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert allclose_sec(ut1_utc3, np.array([-0.5868211, -0.5868184,
-0.5868184, 0.4131816,
0.41328895]))
@pytest.mark.skipif(str('not HAS_IERS_A'))
class TestIERS_A():
def test_simple(self):
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5,
2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert allclose_sec(ut1_utc, np.array([-0.5868211, -0.5868184,
-0.5868184, 0.4131816,
0.41328895]))
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
|
[
"functools.partial",
"numpy.array",
"numpy.all"
] |
[((337, 391), 'functools.partial', 'functools.partial', (['np.allclose'], {'rtol': '(1e-15)', 'atol': '(1e-09)'}), '(np.allclose, rtol=1e-15, atol=1e-09)\n', (354, 391), False, 'import functools\n'), ((865, 930), 'numpy.array', 'np.array', (['[2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5]'], {}), '([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])\n', (873, 930), True, 'import numpy as np\n'), ((969, 1025), 'numpy.array', 'np.array', (['[0.49999421, 0.99997685, 0.99998843, 0.0, 0.5]'], {}), '([0.49999421, 0.99997685, 0.99998843, 0.0, 0.5])\n', (977, 1025), True, 'import numpy as np\n'), ((1589, 1624), 'numpy.all', 'np.all', (['(status2 == iers.FROM_IERS_B)'], {}), '(status2 == iers.FROM_IERS_B)\n', (1595, 1624), True, 'import numpy as np\n'), ((2234, 2299), 'numpy.array', 'np.array', (['[2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5]'], {}), '([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])\n', (2242, 2299), True, 'import numpy as np\n'), ((2338, 2394), 'numpy.array', 'np.array', (['[0.49999421, 0.99997685, 0.99998843, 0.0, 0.5]'], {}), '([0.49999421, 0.99997685, 0.99998843, 0.0, 0.5])\n', (2346, 2394), True, 'import numpy as np\n'), ((2482, 2516), 'numpy.all', 'np.all', (['(status == iers.FROM_IERS_B)'], {}), '(status == iers.FROM_IERS_B)\n', (2488, 2516), True, 'import numpy as np\n'), ((1107, 1176), 'numpy.array', 'np.array', (['[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895]'], {}), '([-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895])\n', (1115, 1176), True, 'import numpy as np\n'), ((1923, 1992), 'numpy.array', 'np.array', (['[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895]'], {}), '([-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895])\n', (1931, 1992), True, 'import numpy as np\n'), ((2554, 2623), 'numpy.array', 'np.array', (['[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895]'], {}), '([-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895])\n', (2562, 2623), True, 'import numpy as np\n')]
|
import socket
from time import sleep, strftime, time
from typing import Any, List, Optional
from numpy import random
def log(*args: Any) -> None:
print(strftime('%Y-%m-%d %H:%M:%S - hlp - INFO -'), *args)
def get_random_bytes(size: int, seed: int) -> bytes:
random.seed(seed)
result = random.bytes(size)
assert isinstance(result, bytes)
return result
def get_random_text(size: int, seed: int) -> str:
random.seed(seed)
items: List[str] = []
for char in range(ord('A'), ord('Z') + 1):
items += chr(char)
for char in range(ord('a'), ord('z') + 1):
items += chr(char)
for char in range(ord('0'), ord('9') + 1):
items += chr(char)
items.extend(('.', ',', ';', ':', '!',))
items.extend((' ', ' ', ' ', ' ',))
items += '\n'
return ''.join(random.choice(items, size))
# http://code.activestate.com/recipes/576655-wait-for-network-service-to-appear/
def wait_net_service(host: str, port: int, timeout: Optional[int] = None) -> bool:
log(f'Waiting for web server: {host}:{port}')
sock = socket.socket()
end = time() + timeout if timeout else 0
while True:
try:
if timeout:
if time() > end:
log('ERROR! Network sockets connect waiting timeout!')
return False
sock.connect((host, port))
except socket.timeout:
sleep(0.1)
except socket.error:
sleep(0.1)
else:
sock.close()
return True
|
[
"numpy.random.seed",
"socket.socket",
"time.strftime",
"time.time",
"time.sleep",
"numpy.random.bytes",
"numpy.random.choice"
] |
[((275, 292), 'numpy.random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (286, 292), False, 'from numpy import random\n'), ((306, 324), 'numpy.random.bytes', 'random.bytes', (['size'], {}), '(size)\n', (318, 324), False, 'from numpy import random\n'), ((436, 453), 'numpy.random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (447, 453), False, 'from numpy import random\n'), ((1080, 1095), 'socket.socket', 'socket.socket', ([], {}), '()\n', (1093, 1095), False, 'import socket\n'), ((159, 207), 'time.strftime', 'strftime', (['"""%Y-%m-%d %H:%M:%S - hlp - INFO -"""'], {}), "('%Y-%m-%d %H:%M:%S - hlp - INFO -')\n", (167, 207), False, 'from time import sleep, strftime, time\n'), ((824, 850), 'numpy.random.choice', 'random.choice', (['items', 'size'], {}), '(items, size)\n', (837, 850), False, 'from numpy import random\n'), ((1106, 1112), 'time.time', 'time', ([], {}), '()\n', (1110, 1112), False, 'from time import sleep, strftime, time\n'), ((1420, 1430), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (1425, 1430), False, 'from time import sleep, strftime, time\n'), ((1472, 1482), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (1477, 1482), False, 'from time import sleep, strftime, time\n'), ((1214, 1220), 'time.time', 'time', ([], {}), '()\n', (1218, 1220), False, 'from time import sleep, strftime, time\n')]
|
"""
2021 <NAME>, ETHZ, MPI IS
Application to train generative model.
"""
import numpy as np
import healthgen.apps.global_parameters
from healthgen.apps.base_app import BaseApplication
from healthgen.generation import VAEGenModel, MultiVAEGenModel, SRNNGenModel, KVAEGenModel, KVAEMissGenModel, HealthGenModel
import torch
import wandb
from absl import flags, app
FLAGS = flags.FLAGS
class GenApplication(BaseApplication):
def __init__(self):
super().__init__()
self.gen_model = self._get_gen_model()
def _get_gen_model(self):
if FLAGS.gen_model == 'vae':
return VAEGenModel(seed=FLAGS.seed, x_dim=FLAGS.x_dim, z_dim=FLAGS.z_dim, seq_len=FLAGS.seq_len,
activation=FLAGS.activation, dropout=FLAGS.dropout,
dense_x_z=FLAGS.dense_x_z, dense_z_x=FLAGS.dense_z_x,
conv_x_z=FLAGS.conv_x_z, conv_z_x=FLAGS.conv_z_x,
encoder=FLAGS.encoder, beta=FLAGS.beta, data_mode=FLAGS.data_mode,
mask_loss=FLAGS.mask_loss)
elif FLAGS.gen_model == 'multi_vae':
return MultiVAEGenModel(seed=FLAGS.seed, x_dim=FLAGS.x_dim, z_dim=FLAGS.z_dim, seq_len=FLAGS.seq_len,
activation=FLAGS.activation, dropout=FLAGS.dropout,
dense_x_z=FLAGS.dense_x_z, dense_z_x=FLAGS.dense_z_x,
conv_x_z=FLAGS.conv_x_z, conv_z_x=FLAGS.conv_z_x,
encoder=FLAGS.encoder, decoder=FLAGS.decoder, beta=FLAGS.beta, data_mode=FLAGS.data_mode,
mask_loss=FLAGS.mask_loss)
elif FLAGS.gen_model == 'healthgen':
return HealthGenModel(seed=FLAGS.seed, x_dim=FLAGS.x_dim, y_dim=FLAGS.y_dim, v_dim=FLAGS.v_dim,
z_dim=FLAGS.z_dim, seq_len=FLAGS.seq_len, activation=FLAGS.activation,
dropout=FLAGS.dropout, dense_x_v=FLAGS.dense_x_v, dense_x_h=FLAGS.dense_x_h,
dense_hx_g=FLAGS.dense_hx_g, dense_gz_z=FLAGS.dense_gz_z,
dim_RNN_h=FLAGS.dim_rnn_h, num_RNN_h=FLAGS.num_rnn_h, dim_RNN_g=FLAGS.dim_rnn_g,
num_RNN_g=FLAGS.num_rnn_g, dense_hz_z=FLAGS.dense_hz_z, dense_hz_x=FLAGS.dense_hz_x, dense_v_m=FLAGS.dense_v_m,
beta=FLAGS.beta)
elif FLAGS.gen_model == 'srnn':
return SRNNGenModel(seed=FLAGS.seed, x_dim=FLAGS.x_dim, z_dim=FLAGS.z_dim,
activation=FLAGS.activation, dropout=FLAGS.dropout,
dense_x_h=FLAGS.dense_x_h, dim_rnn_h=FLAGS.dim_rnn_h,
num_rnn_h=FLAGS.num_rnn_h, dense_hx_g=FLAGS.dense_hx_g,
dim_rnn_g=FLAGS.dim_rnn_g, num_rnn_g=FLAGS.num_rnn_g,
dense_gz_z=FLAGS.dense_gz_z, dense_hz_z=FLAGS.dense_hz_z,
dense_hz_x=FLAGS.dense_hz_x, beta=FLAGS.beta)
elif FLAGS.gen_model == 'kvae':
return KVAEGenModel(seed=FLAGS.seed, u_dim=FLAGS.y_dim, x_dim=FLAGS.x_dim, a_dim=FLAGS.a_dim,
z_dim=FLAGS.z_dim, activation=FLAGS.activation,
dropout=FLAGS.dropout, dense_x_a=FLAGS.dense_x_a,
dense_a_x=FLAGS.dense_a_x, init_kf_mat=FLAGS.init_kf_mat,
noise_transition=FLAGS.noise_transition,
noise_emission=FLAGS.noise_emission, init_cov=FLAGS.init_cov,
K=FLAGS.K, dim_rnn_alpha=FLAGS.dim_rnn_alpha,
num_rnn_alpha=FLAGS.num_rnn_alpha, scale_recon=FLAGS.scale_recon,
use_smoothed_a=FLAGS.use_smoothed_a)
elif FLAGS.gen_model == 'kvae_miss':
return KVAEMissGenModel(seed=FLAGS.seed, u_dim=FLAGS.u_dim, x_dim=FLAGS.x_dim, m_dim=FLAGS.m_dim, a_dim=FLAGS.a_dim,
z_dim=FLAGS.z_dim, activation=FLAGS.activation,
dropout=FLAGS.dropout, dense_x_a=FLAGS.dense_x_a,
dense_a_x=FLAGS.dense_a_x, init_kf_mat=FLAGS.init_kf_mat,
noise_transition=FLAGS.noise_transition,
noise_emission=FLAGS.noise_emission, init_cov=FLAGS.init_cov,
K=FLAGS.K, dim_rnn_alpha=FLAGS.dim_rnn_alpha,
num_rnn_alpha=FLAGS.num_rnn_alpha, scale_recon=FLAGS.scale_recon,
use_smoothed_a=FLAGS.use_smoothed_a, sample_m=FLAGS.sample_m,
learn_scale=FLAGS.learn_scale)
def _get_cond_labels(self, y_dict):
"""
Returns: cond_labels, labels used for conditional generation.
"""
cond_labels = np.concatenate((y_dict['y_train'], y_dict['y_val']))
def run(self):
# Load data
X_real, y_real = self.data_loader.get_data() # X: {dict}, y: {dict}
# Train generative model
self.gen_model.train_model(X_real, y_real)
def main(argv):
# init wandb logging
config = dict(
seed=FLAGS.seed,
learning_rate=FLAGS.gen_lr,
batch_size=FLAGS.gen_batch_size,
hidden_size=FLAGS.dim_rnn_h,
dataset="MIMIC-III",
model=FLAGS.gen_model,
pred_task="vent_bin"
)
use_cuda = torch.cuda.is_available()
wandb.init(
project='wand_project',
entity='wandb_user',
group=FLAGS.gen_model,
job_type='cluster' if use_cuda else 'local',
mode='online' if use_cuda else 'offline',
config=config
)
application = GenApplication()
application.run()
if __name__ == '__main__':
app.run(main)
|
[
"healthgen.generation.VAEGenModel",
"healthgen.generation.HealthGenModel",
"healthgen.generation.KVAEMissGenModel",
"healthgen.generation.MultiVAEGenModel",
"absl.app.run",
"wandb.init",
"torch.cuda.is_available",
"healthgen.generation.SRNNGenModel",
"healthgen.generation.KVAEGenModel",
"numpy.concatenate"
] |
[((5588, 5613), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5611, 5613), False, 'import torch\n'), ((5619, 5808), 'wandb.init', 'wandb.init', ([], {'project': '"""wand_project"""', 'entity': '"""wandb_user"""', 'group': 'FLAGS.gen_model', 'job_type': "('cluster' if use_cuda else 'local')", 'mode': "('online' if use_cuda else 'offline')", 'config': 'config'}), "(project='wand_project', entity='wandb_user', group=FLAGS.\n gen_model, job_type='cluster' if use_cuda else 'local', mode='online' if\n use_cuda else 'offline', config=config)\n", (5629, 5808), False, 'import wandb\n'), ((5945, 5958), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (5952, 5958), False, 'from absl import flags, app\n'), ((5022, 5074), 'numpy.concatenate', 'np.concatenate', (["(y_dict['y_train'], y_dict['y_val'])"], {}), "((y_dict['y_train'], y_dict['y_val']))\n", (5036, 5074), True, 'import numpy as np\n'), ((610, 968), 'healthgen.generation.VAEGenModel', 'VAEGenModel', ([], {'seed': 'FLAGS.seed', 'x_dim': 'FLAGS.x_dim', 'z_dim': 'FLAGS.z_dim', 'seq_len': 'FLAGS.seq_len', 'activation': 'FLAGS.activation', 'dropout': 'FLAGS.dropout', 'dense_x_z': 'FLAGS.dense_x_z', 'dense_z_x': 'FLAGS.dense_z_x', 'conv_x_z': 'FLAGS.conv_x_z', 'conv_z_x': 'FLAGS.conv_z_x', 'encoder': 'FLAGS.encoder', 'beta': 'FLAGS.beta', 'data_mode': 'FLAGS.data_mode', 'mask_loss': 'FLAGS.mask_loss'}), '(seed=FLAGS.seed, x_dim=FLAGS.x_dim, z_dim=FLAGS.z_dim, seq_len=\n FLAGS.seq_len, activation=FLAGS.activation, dropout=FLAGS.dropout,\n dense_x_z=FLAGS.dense_x_z, dense_z_x=FLAGS.dense_z_x, conv_x_z=FLAGS.\n conv_x_z, conv_z_x=FLAGS.conv_z_x, encoder=FLAGS.encoder, beta=FLAGS.\n beta, data_mode=FLAGS.data_mode, mask_loss=FLAGS.mask_loss)\n', (621, 968), False, 'from healthgen.generation import VAEGenModel, MultiVAEGenModel, SRNNGenModel, KVAEGenModel, KVAEMissGenModel, HealthGenModel\n'), ((1169, 1558), 'healthgen.generation.MultiVAEGenModel', 'MultiVAEGenModel', ([], {'seed': 'FLAGS.seed', 'x_dim': 'FLAGS.x_dim', 'z_dim': 'FLAGS.z_dim', 'seq_len': 'FLAGS.seq_len', 'activation': 'FLAGS.activation', 'dropout': 'FLAGS.dropout', 'dense_x_z': 'FLAGS.dense_x_z', 'dense_z_x': 'FLAGS.dense_z_x', 'conv_x_z': 'FLAGS.conv_x_z', 'conv_z_x': 'FLAGS.conv_z_x', 'encoder': 'FLAGS.encoder', 'decoder': 'FLAGS.decoder', 'beta': 'FLAGS.beta', 'data_mode': 'FLAGS.data_mode', 'mask_loss': 'FLAGS.mask_loss'}), '(seed=FLAGS.seed, x_dim=FLAGS.x_dim, z_dim=FLAGS.z_dim,\n seq_len=FLAGS.seq_len, activation=FLAGS.activation, dropout=FLAGS.\n dropout, dense_x_z=FLAGS.dense_x_z, dense_z_x=FLAGS.dense_z_x, conv_x_z\n =FLAGS.conv_x_z, conv_z_x=FLAGS.conv_z_x, encoder=FLAGS.encoder,\n decoder=FLAGS.decoder, beta=FLAGS.beta, data_mode=FLAGS.data_mode,\n mask_loss=FLAGS.mask_loss)\n', (1185, 1558), False, 'from healthgen.generation import VAEGenModel, MultiVAEGenModel, SRNNGenModel, KVAEGenModel, KVAEMissGenModel, HealthGenModel\n'), ((1781, 2317), 'healthgen.generation.HealthGenModel', 'HealthGenModel', ([], {'seed': 'FLAGS.seed', 'x_dim': 'FLAGS.x_dim', 'y_dim': 'FLAGS.y_dim', 'v_dim': 'FLAGS.v_dim', 'z_dim': 'FLAGS.z_dim', 'seq_len': 'FLAGS.seq_len', 'activation': 'FLAGS.activation', 'dropout': 'FLAGS.dropout', 'dense_x_v': 'FLAGS.dense_x_v', 'dense_x_h': 'FLAGS.dense_x_h', 'dense_hx_g': 'FLAGS.dense_hx_g', 'dense_gz_z': 'FLAGS.dense_gz_z', 'dim_RNN_h': 'FLAGS.dim_rnn_h', 'num_RNN_h': 'FLAGS.num_rnn_h', 'dim_RNN_g': 'FLAGS.dim_rnn_g', 'num_RNN_g': 'FLAGS.num_rnn_g', 'dense_hz_z': 'FLAGS.dense_hz_z', 'dense_hz_x': 'FLAGS.dense_hz_x', 'dense_v_m': 'FLAGS.dense_v_m', 'beta': 'FLAGS.beta'}), '(seed=FLAGS.seed, x_dim=FLAGS.x_dim, y_dim=FLAGS.y_dim, v_dim\n =FLAGS.v_dim, z_dim=FLAGS.z_dim, seq_len=FLAGS.seq_len, activation=\n FLAGS.activation, dropout=FLAGS.dropout, dense_x_v=FLAGS.dense_x_v,\n dense_x_h=FLAGS.dense_x_h, dense_hx_g=FLAGS.dense_hx_g, dense_gz_z=\n FLAGS.dense_gz_z, dim_RNN_h=FLAGS.dim_rnn_h, num_RNN_h=FLAGS.num_rnn_h,\n dim_RNN_g=FLAGS.dim_rnn_g, num_RNN_g=FLAGS.num_rnn_g, dense_hz_z=FLAGS.\n dense_hz_z, dense_hz_x=FLAGS.dense_hz_x, dense_v_m=FLAGS.dense_v_m,\n beta=FLAGS.beta)\n', (1795, 2317), False, 'from healthgen.generation import VAEGenModel, MultiVAEGenModel, SRNNGenModel, KVAEGenModel, KVAEMissGenModel, HealthGenModel\n'), ((2549, 2958), 'healthgen.generation.SRNNGenModel', 'SRNNGenModel', ([], {'seed': 'FLAGS.seed', 'x_dim': 'FLAGS.x_dim', 'z_dim': 'FLAGS.z_dim', 'activation': 'FLAGS.activation', 'dropout': 'FLAGS.dropout', 'dense_x_h': 'FLAGS.dense_x_h', 'dim_rnn_h': 'FLAGS.dim_rnn_h', 'num_rnn_h': 'FLAGS.num_rnn_h', 'dense_hx_g': 'FLAGS.dense_hx_g', 'dim_rnn_g': 'FLAGS.dim_rnn_g', 'num_rnn_g': 'FLAGS.num_rnn_g', 'dense_gz_z': 'FLAGS.dense_gz_z', 'dense_hz_z': 'FLAGS.dense_hz_z', 'dense_hz_x': 'FLAGS.dense_hz_x', 'beta': 'FLAGS.beta'}), '(seed=FLAGS.seed, x_dim=FLAGS.x_dim, z_dim=FLAGS.z_dim,\n activation=FLAGS.activation, dropout=FLAGS.dropout, dense_x_h=FLAGS.\n dense_x_h, dim_rnn_h=FLAGS.dim_rnn_h, num_rnn_h=FLAGS.num_rnn_h,\n dense_hx_g=FLAGS.dense_hx_g, dim_rnn_g=FLAGS.dim_rnn_g, num_rnn_g=FLAGS\n .num_rnn_g, dense_gz_z=FLAGS.dense_gz_z, dense_hz_z=FLAGS.dense_hz_z,\n dense_hz_x=FLAGS.dense_hz_x, beta=FLAGS.beta)\n', (2561, 2958), False, 'from healthgen.generation import VAEGenModel, MultiVAEGenModel, SRNNGenModel, KVAEGenModel, KVAEMissGenModel, HealthGenModel\n'), ((3188, 3708), 'healthgen.generation.KVAEGenModel', 'KVAEGenModel', ([], {'seed': 'FLAGS.seed', 'u_dim': 'FLAGS.y_dim', 'x_dim': 'FLAGS.x_dim', 'a_dim': 'FLAGS.a_dim', 'z_dim': 'FLAGS.z_dim', 'activation': 'FLAGS.activation', 'dropout': 'FLAGS.dropout', 'dense_x_a': 'FLAGS.dense_x_a', 'dense_a_x': 'FLAGS.dense_a_x', 'init_kf_mat': 'FLAGS.init_kf_mat', 'noise_transition': 'FLAGS.noise_transition', 'noise_emission': 'FLAGS.noise_emission', 'init_cov': 'FLAGS.init_cov', 'K': 'FLAGS.K', 'dim_rnn_alpha': 'FLAGS.dim_rnn_alpha', 'num_rnn_alpha': 'FLAGS.num_rnn_alpha', 'scale_recon': 'FLAGS.scale_recon', 'use_smoothed_a': 'FLAGS.use_smoothed_a'}), '(seed=FLAGS.seed, u_dim=FLAGS.y_dim, x_dim=FLAGS.x_dim, a_dim=\n FLAGS.a_dim, z_dim=FLAGS.z_dim, activation=FLAGS.activation, dropout=\n FLAGS.dropout, dense_x_a=FLAGS.dense_x_a, dense_a_x=FLAGS.dense_a_x,\n init_kf_mat=FLAGS.init_kf_mat, noise_transition=FLAGS.noise_transition,\n noise_emission=FLAGS.noise_emission, init_cov=FLAGS.init_cov, K=FLAGS.K,\n dim_rnn_alpha=FLAGS.dim_rnn_alpha, num_rnn_alpha=FLAGS.num_rnn_alpha,\n scale_recon=FLAGS.scale_recon, use_smoothed_a=FLAGS.use_smoothed_a)\n', (3200, 3708), False, 'from healthgen.generation import VAEGenModel, MultiVAEGenModel, SRNNGenModel, KVAEGenModel, KVAEMissGenModel, HealthGenModel\n'), ((4003, 4613), 'healthgen.generation.KVAEMissGenModel', 'KVAEMissGenModel', ([], {'seed': 'FLAGS.seed', 'u_dim': 'FLAGS.u_dim', 'x_dim': 'FLAGS.x_dim', 'm_dim': 'FLAGS.m_dim', 'a_dim': 'FLAGS.a_dim', 'z_dim': 'FLAGS.z_dim', 'activation': 'FLAGS.activation', 'dropout': 'FLAGS.dropout', 'dense_x_a': 'FLAGS.dense_x_a', 'dense_a_x': 'FLAGS.dense_a_x', 'init_kf_mat': 'FLAGS.init_kf_mat', 'noise_transition': 'FLAGS.noise_transition', 'noise_emission': 'FLAGS.noise_emission', 'init_cov': 'FLAGS.init_cov', 'K': 'FLAGS.K', 'dim_rnn_alpha': 'FLAGS.dim_rnn_alpha', 'num_rnn_alpha': 'FLAGS.num_rnn_alpha', 'scale_recon': 'FLAGS.scale_recon', 'use_smoothed_a': 'FLAGS.use_smoothed_a', 'sample_m': 'FLAGS.sample_m', 'learn_scale': 'FLAGS.learn_scale'}), '(seed=FLAGS.seed, u_dim=FLAGS.u_dim, x_dim=FLAGS.x_dim,\n m_dim=FLAGS.m_dim, a_dim=FLAGS.a_dim, z_dim=FLAGS.z_dim, activation=\n FLAGS.activation, dropout=FLAGS.dropout, dense_x_a=FLAGS.dense_x_a,\n dense_a_x=FLAGS.dense_a_x, init_kf_mat=FLAGS.init_kf_mat,\n noise_transition=FLAGS.noise_transition, noise_emission=FLAGS.\n noise_emission, init_cov=FLAGS.init_cov, K=FLAGS.K, dim_rnn_alpha=FLAGS\n .dim_rnn_alpha, num_rnn_alpha=FLAGS.num_rnn_alpha, scale_recon=FLAGS.\n scale_recon, use_smoothed_a=FLAGS.use_smoothed_a, sample_m=FLAGS.\n sample_m, learn_scale=FLAGS.learn_scale)\n', (4019, 4613), False, 'from healthgen.generation import VAEGenModel, MultiVAEGenModel, SRNNGenModel, KVAEGenModel, KVAEMissGenModel, HealthGenModel\n')]
|
# -*- coding: UTF-8 -*-
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from bokeh.plotting import output_file, figure, show
class NeuralNetwork:
def __init__(self, input_shape, stock_or_return):
self.input_shape = input_shape
self.stock_or_return = stock_or_return
def create_model(self, input_shape):
# 这个必须是一个整数
# input_shape = 20
# 原始实现是用 Keras 框架做的,这里除了特殊指定之外,其余参数用 Keras 默认的参数配置
model = tf.keras.models.Sequential()
# 输入层
model.add(tf.keras.layers.InputLayer(input_shape=(1, input_shape)))
# 第一层 LSTM
model.add(tf.keras.layers.LSTM(
units=5, # 指定为5
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=tf.keras.regularizers.l2(l=0), # 指定为0
bias_regularizer=None,
activity_regularizer=tf.keras.regularizers.l2(l=0.003), # 指定为0.003
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.2, # 指定为0.2
recurrent_dropout=0.2, # 指定为0.2
implementation=1,
return_sequences=True, # 指定为False
return_state=False,
go_backwards=False,
stateful=False,
unroll=False
))
# Dense 层
model.add(tf.keras.layers.Dense(
units=5, # 指定为5
activation='sigmoid', # 指定为sigmoid
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=tf.keras.regularizers.l2(l=0.005), # 指定为0.005
kernel_constraint=None,
bias_constraint=None
))
# 第二层 LSTM
model.add(tf.keras.layers.LSTM(
units=2, # 指定为2
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=tf.keras.regularizers.l2(l=0.001), # 指定为0.001
bias_regularizer=None,
activity_regularizer=tf.keras.regularizers.l2(l=0.01), # 指定为0.003
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.2, # 论文指定为0.2
recurrent_dropout=0.2, # 论文指定为0.2
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False
))
# Dense 层
model.add(tf.keras.layers.Dense(
units=1, # 指定为1
activation='sigmoid', # 指定为sigmoid
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=tf.keras.regularizers.l2(l=0.001), # 指定为0.005
kernel_constraint=None,
bias_constraint=None
))
return model
def make_train_model(self):
model = self.create_model(self.input_shape)
model.compile(optimizer="adam", loss="mean_squared_error", metrics=["mse"])
# load data
train = np.reshape(np.array(pd.read_csv("features/autoencoded_train_data.csv", index_col=0)),
(len(np.array(pd.read_csv("features/autoencoded_train_data.csv"))), 1, self.input_shape))
train_y = np.array(pd.read_csv("features/autoencoded_train_y.csv", index_col=0))
# train_stock = np.array(pd.read_csv("train_stock.csv"))
# train model
model.fit(train, train_y, epochs=2000)
test_x = np.reshape(np.array(pd.read_csv("features/autoencoded_test_data.csv", index_col=0)),
(len(np.array(pd.read_csv("features/autoencoded_test_data.csv"))), 1, self.input_shape))
test_y = np.array(pd.read_csv("features/autoencoded_test_y.csv", index_col=0))
# test_stock = np.array(pd.read_csv("test_stock.csv"))
stock_data_test = np.array(pd.read_csv("stock_data_test.csv", index_col=0))
print(model.evaluate(test_x, test_y))
prediction_data = []
stock_data = []
for i in range(len(test_y)):
prediction = (model.predict(np.reshape(test_x[i], (1, 1, self.input_shape))))
prediction_data.append(np.reshape(prediction, (1,)))
std = np.std(prediction_data)
if 0 == std:
std = 0.0001
prediction_corrected = (np.array(prediction_data) - np.mean(prediction_data)) * (1.0 / std)
stock_price = np.exp(np.reshape(prediction, (1,)))*stock_data_test[i]
stock_data.append(stock_price[0])
stock_data[:] = [i - (float(stock_data[0])-float(stock_data_test[0])) for i in stock_data]
# stock_data = stock_data - stock_data[0]
if self.stock_or_return:
plt.plot(stock_data)
plt.plot(stock_data_test)
stock = pd.DataFrame(stock_data, index=None)
stock_test = pd.DataFrame(stock_data_test, index=None)
# print(stock_data)
plt.show()
else:
# plt.plot(prediction_corrected)
plt.plot(prediction_data)
# print(prediction_data)
plt.plot(test_y)
plt.show()
if __name__ == "__main__":
model = NeuralNetwork(20, False)
model.make_train_model()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"numpy.std",
"tensorflow.keras.layers.InputLayer",
"numpy.mean",
"numpy.array",
"tensorflow.keras.models.Sequential",
"numpy.reshape",
"tensorflow.keras.regularizers.l2"
] |
[((501, 529), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (527, 529), True, 'import tensorflow as tf\n'), ((562, 618), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(1, input_shape)'}), '(input_shape=(1, input_shape))\n', (588, 618), True, 'import tensorflow as tf\n'), ((3993, 4053), 'pandas.read_csv', 'pd.read_csv', (['"""features/autoencoded_train_y.csv"""'], {'index_col': '(0)'}), "('features/autoencoded_train_y.csv', index_col=0)\n", (4004, 4053), True, 'import pandas as pd\n'), ((4437, 4496), 'pandas.read_csv', 'pd.read_csv', (['"""features/autoencoded_test_y.csv"""'], {'index_col': '(0)'}), "('features/autoencoded_test_y.csv', index_col=0)\n", (4448, 4496), True, 'import pandas as pd\n'), ((4597, 4644), 'pandas.read_csv', 'pd.read_csv', (['"""stock_data_test.csv"""'], {'index_col': '(0)'}), "('stock_data_test.csv', index_col=0)\n", (4608, 4644), True, 'import pandas as pd\n'), ((4956, 4979), 'numpy.std', 'np.std', (['prediction_data'], {}), '(prediction_data)\n', (4962, 4979), True, 'import numpy as np\n'), ((5460, 5480), 'matplotlib.pyplot.plot', 'plt.plot', (['stock_data'], {}), '(stock_data)\n', (5468, 5480), True, 'import matplotlib.pyplot as plt\n'), ((5493, 5518), 'matplotlib.pyplot.plot', 'plt.plot', (['stock_data_test'], {}), '(stock_data_test)\n', (5501, 5518), True, 'import matplotlib.pyplot as plt\n'), ((5539, 5575), 'pandas.DataFrame', 'pd.DataFrame', (['stock_data'], {'index': 'None'}), '(stock_data, index=None)\n', (5551, 5575), True, 'import pandas as pd\n'), ((5601, 5642), 'pandas.DataFrame', 'pd.DataFrame', (['stock_data_test'], {'index': 'None'}), '(stock_data_test, index=None)\n', (5613, 5642), True, 'import pandas as pd\n'), ((5687, 5697), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5695, 5697), True, 'import matplotlib.pyplot as plt\n'), ((5769, 5794), 'matplotlib.pyplot.plot', 'plt.plot', (['prediction_data'], {}), '(prediction_data)\n', (5777, 5794), True, 'import matplotlib.pyplot as plt\n'), ((5844, 5860), 'matplotlib.pyplot.plot', 'plt.plot', (['test_y'], {}), '(test_y)\n', (5852, 5860), True, 'import matplotlib.pyplot as plt\n'), ((5873, 5883), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5881, 5883), True, 'import matplotlib.pyplot as plt\n'), ((3783, 3846), 'pandas.read_csv', 'pd.read_csv', (['"""features/autoencoded_train_data.csv"""'], {'index_col': '(0)'}), "('features/autoencoded_train_data.csv', index_col=0)\n", (3794, 3846), True, 'import pandas as pd\n'), ((4229, 4291), 'pandas.read_csv', 'pd.read_csv', (['"""features/autoencoded_test_data.csv"""'], {'index_col': '(0)'}), "('features/autoencoded_test_data.csv', index_col=0)\n", (4240, 4291), True, 'import pandas as pd\n'), ((4823, 4870), 'numpy.reshape', 'np.reshape', (['test_x[i]', '(1, 1, self.input_shape)'], {}), '(test_x[i], (1, 1, self.input_shape))\n', (4833, 4870), True, 'import numpy as np\n'), ((4908, 4936), 'numpy.reshape', 'np.reshape', (['prediction', '(1,)'], {}), '(prediction, (1,))\n', (4918, 4936), True, 'import numpy as np\n'), ((1055, 1084), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': '(0)'}), '(l=0)\n', (1079, 1084), True, 'import tensorflow as tf\n'), ((1161, 1194), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': '(0.003)'}), '(l=0.003)\n', (1185, 1194), True, 'import tensorflow as tf\n'), ((1951, 1984), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': '(0.005)'}), '(l=0.005)\n', (1975, 1984), True, 'import tensorflow as tf\n'), ((2512, 2545), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': '(0.001)'}), '(l=0.001)\n', (2536, 2545), True, 'import tensorflow as tf\n'), ((2626, 2658), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': '(0.01)'}), '(l=0.01)\n', (2650, 2658), True, 'import tensorflow as tf\n'), ((3409, 3442), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': '(0.001)'}), '(l=0.001)\n', (3433, 3442), True, 'import tensorflow as tf\n'), ((5070, 5095), 'numpy.array', 'np.array', (['prediction_data'], {}), '(prediction_data)\n', (5078, 5095), True, 'import numpy as np\n'), ((5098, 5122), 'numpy.mean', 'np.mean', (['prediction_data'], {}), '(prediction_data)\n', (5105, 5122), True, 'import numpy as np\n'), ((5171, 5199), 'numpy.reshape', 'np.reshape', (['prediction', '(1,)'], {}), '(prediction, (1,))\n', (5181, 5199), True, 'import numpy as np\n'), ((3890, 3940), 'pandas.read_csv', 'pd.read_csv', (['"""features/autoencoded_train_data.csv"""'], {}), "('features/autoencoded_train_data.csv')\n", (3901, 3940), True, 'import pandas as pd\n'), ((4336, 4385), 'pandas.read_csv', 'pd.read_csv', (['"""features/autoencoded_test_data.csv"""'], {}), "('features/autoencoded_test_data.csv')\n", (4347, 4385), True, 'import pandas as pd\n')]
|
import networkx as nx
import numpy as np
def _graph2dag(graph):
"""Converts nx.Graph to an directed, acyclic form. Returns the adjancency matrix"""
adj = nx.adj_matrix(graph).todense()
adj = adj + adj.T
adj = (adj != 0).astype(int)
adj = np.tril(adj)
assert nx.is_directed_acyclic_graph(nx.from_numpy_matrix(adj, create_using=nx.DiGraph))
return adj
def erdos_renyi_dag(n, p, seed=None):
"""
Simulates an Erdos Renyi random DAG on n vertices
with expected degree p. Each node has the same expected
degree.
If p is an integer, it is the expected
number of connected edges. Else, it is the expected degree
fraction relative to n.
"""
if p > 1 or isinstance(p, int):
p = p / (n - 1)
G = nx.erdos_renyi_graph(n, p, seed, directed=False)
return _graph2dag(G)
def connected_erdos_renyi_dag(n, p, seed=None):
"""
Simulates an Erdos Renyi random DAG on n vertices
with expected degree p. Each node has the same expected
degree and the graph is gauranteed connected, with
a deterministic number of edges.
If p is an integer, it is the expected
number of connected edges. Else, it is the expected degree
fraction relative to n.
"""
if p <= 1 and isinstance(p, float):
p = p * n
if int(p) != p:
import warnings
warnings.warn(f'Number of neighbors {p:.1f} will be rounded')
G = nx.connected_watts_strogatz_graph(
n, k=round(p), p=1 - 1/n, seed=seed
)
return _graph2dag(G)
def barabasi_albert_dag(n, p, seed=None):
"""
Simulates an Barabasi Albert DAG on n vertices
with expected degree p. The degree distribution follows
a power law, and the graph is guaranteed to be connected.
If p is an integer, it is the expected
number of connected edges. Else, it is the expected degree
fraction relative to n. Important, p must be <= 0.5
or the integer equivalent to be guaranteed to succeed on all graphs.
"""
if p > 1 or isinstance(p, int):
p = p / (n - 1)
# BA model input m leads to K=(1+...+m) + m*(n-m) total edges
# p = K
m = 0.5*(2*n - 1 - np.sqrt(4*n**2 - 4*n + 1 - 4*p*n**2 + 4*p*n))
if int(m) != m:
import warnings
warnings.warn(f'Number of neighbors {m:.1f} will be rounded')
G = nx.barabasi_albert_graph(n, round(m), seed)
return _graph2dag(G)
def complete_dag(n, p=None, seed=None):
"""
Returns a complete DAG over n variables
"""
G = np.ones((n, n)) - np.eye(n)
return np.tril(G)
|
[
"networkx.from_numpy_matrix",
"numpy.eye",
"networkx.erdos_renyi_graph",
"numpy.tril",
"networkx.adj_matrix",
"numpy.ones",
"warnings.warn",
"numpy.sqrt"
] |
[((260, 272), 'numpy.tril', 'np.tril', (['adj'], {}), '(adj)\n', (267, 272), True, 'import numpy as np\n'), ((767, 815), 'networkx.erdos_renyi_graph', 'nx.erdos_renyi_graph', (['n', 'p', 'seed'], {'directed': '(False)'}), '(n, p, seed, directed=False)\n', (787, 815), True, 'import networkx as nx\n'), ((2573, 2583), 'numpy.tril', 'np.tril', (['G'], {}), '(G)\n', (2580, 2583), True, 'import numpy as np\n'), ((314, 364), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj'], {'create_using': 'nx.DiGraph'}), '(adj, create_using=nx.DiGraph)\n', (334, 364), True, 'import networkx as nx\n'), ((2284, 2345), 'warnings.warn', 'warnings.warn', (['f"""Number of neighbors {m:.1f} will be rounded"""'], {}), "(f'Number of neighbors {m:.1f} will be rounded')\n", (2297, 2345), False, 'import warnings\n'), ((2534, 2549), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (2541, 2549), True, 'import numpy as np\n'), ((2552, 2561), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2558, 2561), True, 'import numpy as np\n'), ((164, 184), 'networkx.adj_matrix', 'nx.adj_matrix', (['graph'], {}), '(graph)\n', (177, 184), True, 'import networkx as nx\n'), ((1370, 1431), 'warnings.warn', 'warnings.warn', (['f"""Number of neighbors {p:.1f} will be rounded"""'], {}), "(f'Number of neighbors {p:.1f} will be rounded')\n", (1383, 1431), False, 'import warnings\n'), ((2186, 2246), 'numpy.sqrt', 'np.sqrt', (['(4 * n ** 2 - 4 * n + 1 - 4 * p * n ** 2 + 4 * p * n)'], {}), '(4 * n ** 2 - 4 * n + 1 - 4 * p * n ** 2 + 4 * p * n)\n', (2193, 2246), True, 'import numpy as np\n')]
|
'''
sample test of pycoq.serlib
'''
import logging
import os
import json
import pkg_resources
import numpy
import pytest
import pycoq.log
import serlib.parser
def with_prefix(s: str) -> str:
''' adds package path as prefix '''
return os.path.join(pkg_resources.resource_filename('pycoq', 'tests'), s)
def test_hash_bytestring0():
''' tests serlib.hash bytestring function len 0 '''
assert serlib.parser.hash_bytestring(b'') == 5371
def test_hash_bytestring1():
''' tests serlib.hash bytestring function len 1 '''
assert serlib.parser.hash_bytestring(b'\0') == 257*5371
def test_hash_bytestring2():
''' tests serlib.hash bytestring function len 2 '''
assert serlib.parser.hash_bytestring(b'\0\0') == 257*257*5371
def test_hash_bytestring4():
''' tests serlib.hash bytestring function len 4 '''
assert serlib.parser.hash_bytestring(b'test') == 23432804277179
def test_sexpparser_parse_string0():
''' tests parsing single bytestring into postfix '''
parser = serlib.parser.SExpParser()
res = parser.postfix_of_sexp('((a b c)d(a b c)(e f) g)')
ans = numpy.array([1,2,3,-3,4,1,2,3,-3,5,6,-2,7,-5],
dtype=numpy.intc)
assert all(res == ans)
def test_sexpparser_parse_string1():
''' tests parsing two sequential bytestrings into postfix '''
parser = serlib.parser.SExpParser()
res0 = parser.postfix_of_sexp('((a b c)d(a b c)(e f) g)')
ans0 = numpy.array([1,2,3,-3,4,1,2,3,-3,5,6,-2,7,-5],
dtype=numpy.intc)
ans0_ann = serlib.cparser.annotate(ans0)
res0_ann = numpy.array([0, 1, 2, 0, 4, 5, 6, 7, 5, 9, 10, 9, 12, 0], dtype=numpy.intc)
assert all(res0 == ans0)
#assert all(ans0_ann == res0_ann)
def test_sexpparser_parse_string2():
parser = serlib.parser.SExpParser()
res0 = parser.postfix_of_sexp('((a b c)d(a b c)(e f) g)')
ans0 = numpy.array([1,2,3,-3,4,1,2,3,-3,5,6,-2,7,-5],
dtype=numpy.intc)
res1 = parser.postfix_of_sexp('(g g a b Ф)')
ans1 = numpy.array([7,7,1,2,8,-5], dtype=numpy.intc)
res2 = parser.dict
ans_string2 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'Ф': 8}
ans2 = {key.encode('utf8'): value for key, value in ans_string2.items()}
assert all(res1 == ans1)
#assert res2 == ans2
def aux_serlib_parse_bytestring_new(s, address):
sparser = serlib.parser.SExpParser()
res0a = sparser.postfix_of_bytestring(s, address)
res0b = sparser.parse_bytestring_new(s, address)
pycoq.log.info(f"test {res0a} == {res0b}")
assert all(res0a == res0b)
def test_serlib_parse_bytestring_new0():
s = b'(((a b)(c d))((e f)(g h)))'
address = []
aux_serlib_parse_bytestring_new(s, address)
def test_serlib_parse_bytestring_new1():
s = b'(((a b)(c d))((e f)(g h)))'
address = [1, 0, 1]
aux_serlib_parse_bytestring_new(s, address)
def test_serlib_parse_bytestring_new2():
s = b'(((a b)(c))((e f)(g h)))'
address = [0, 1]
aux_serlib_parse_bytestring_new(s, address)
def test_serlib_parse_bytestring_new3():
''' OK '''
s = b'(((a b)(c))((e f)(g h))(k l)(p q))'
address = [3, 1]
aux_serlib_parse_bytestring_new(s, address)
def test_serlib_parse_bytestring_new4():
''' expected error for badly formed address '''
s = b'(((a b)(c))((e f)(g h))(k l)(p q))'
address = [4, 0]
with pytest.raises(serlib.cparser.IndexError):
aux_serlib_parse_bytestring_new(s, address)
def test_serlib_parse_bytestring_new5():
''' expected error for badly formed address '''
s = b'(((a b)(c))((e f)(g h))(k l)(p q))'
address = [3, 2]
with pytest.raises(serlib.cparser.IndexError):
aux_serlib_parse_bytestring_new(s, address)
def test_serlib_children():
s = b'(((a b)(c))((e f)(g h))(k l)(p q))'
parser = serlib.parser.SExpParser()
res = parser.postfix_of_bytestring(s)
ann = serlib.cparser.annotate(res)
root = res.shape[0] - 1
children = serlib.cparser.children(res, ann, root)
pycoq.log.info(f"root node {root} has children {children}")
assert all(children == [5,12,15,18])
def aux_parse_bytestring(name: str, write=False):
s = open(with_prefix(f"serlib/{name}.in")).read().strip().encode()
p = serlib.parser.SExpParser()
res = numpy.ndarray.tolist(p.postfix_of_bytestring(s))
if write:
json.dump(res, open(with_prefix(f"serlib/{name}.out"), 'w'))
else:
assert res == json.load(open(with_prefix(f"serlib/{name}.out")))
def test_parse_bytestring():
aux_parse_bytestring("input0")
def test_parse_inverse2():
s = open(with_prefix(f"serlib/input2.in")).read()
p = serlib.parser.SExpParser()
r = p.postfix_of_sexp(s)
sprime = p.to_sexp(r)
sn = numpy.array(list(s))
sprimen = numpy.array(list(sprime))
if not (all(sprimen == sn)):
logging.info(f"{sn[sprimen != sn]}")
logging.info(f"{sprimen[sprimen != sn]}")
assert all(sprimen == sn)
def test_hash_bytestring_5gb():
''' tests serlib.hash function len 5 Gb '''
n = 5*2**30
test = b'\0'*n
ans = 5371*pow(257, n, 2**64) % 2**64
assert serlib.parser.hash_bytestring(test) == ans
|
[
"logging.info",
"pytest.raises",
"numpy.array",
"pkg_resources.resource_filename"
] |
[((1118, 1195), 'numpy.array', 'numpy.array', (['[1, 2, 3, -3, 4, 1, 2, 3, -3, 5, 6, -2, 7, -5]'], {'dtype': 'numpy.intc'}), '([1, 2, 3, -3, 4, 1, 2, 3, -3, 5, 6, -2, 7, -5], dtype=numpy.intc)\n', (1129, 1195), False, 'import numpy\n'), ((1451, 1528), 'numpy.array', 'numpy.array', (['[1, 2, 3, -3, 4, 1, 2, 3, -3, 5, 6, -2, 7, -5]'], {'dtype': 'numpy.intc'}), '([1, 2, 3, -3, 4, 1, 2, 3, -3, 5, 6, -2, 7, -5], dtype=numpy.intc)\n', (1462, 1528), False, 'import numpy\n'), ((1598, 1673), 'numpy.array', 'numpy.array', (['[0, 1, 2, 0, 4, 5, 6, 7, 5, 9, 10, 9, 12, 0]'], {'dtype': 'numpy.intc'}), '([0, 1, 2, 0, 4, 5, 6, 7, 5, 9, 10, 9, 12, 0], dtype=numpy.intc)\n', (1609, 1673), False, 'import numpy\n'), ((1904, 1981), 'numpy.array', 'numpy.array', (['[1, 2, 3, -3, 4, 1, 2, 3, -3, 5, 6, -2, 7, -5]'], {'dtype': 'numpy.intc'}), '([1, 2, 3, -3, 4, 1, 2, 3, -3, 5, 6, -2, 7, -5], dtype=numpy.intc)\n', (1915, 1981), False, 'import numpy\n'), ((2052, 2102), 'numpy.array', 'numpy.array', (['[7, 7, 1, 2, 8, -5]'], {'dtype': 'numpy.intc'}), '([7, 7, 1, 2, 8, -5], dtype=numpy.intc)\n', (2063, 2102), False, 'import numpy\n'), ((263, 312), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""pycoq"""', '"""tests"""'], {}), "('pycoq', 'tests')\n", (294, 312), False, 'import pkg_resources\n'), ((3405, 3445), 'pytest.raises', 'pytest.raises', (['serlib.cparser.IndexError'], {}), '(serlib.cparser.IndexError)\n', (3418, 3445), False, 'import pytest\n'), ((3673, 3713), 'pytest.raises', 'pytest.raises', (['serlib.cparser.IndexError'], {}), '(serlib.cparser.IndexError)\n', (3686, 3713), False, 'import pytest\n'), ((4887, 4923), 'logging.info', 'logging.info', (['f"""{sn[sprimen != sn]}"""'], {}), "(f'{sn[sprimen != sn]}')\n", (4899, 4923), False, 'import logging\n'), ((4932, 4973), 'logging.info', 'logging.info', (['f"""{sprimen[sprimen != sn]}"""'], {}), "(f'{sprimen[sprimen != sn]}')\n", (4944, 4973), False, 'import logging\n')]
|
from __future__ import print_function
import torch
import cv2
import time
import numpy as np
import os
from imutils.video import FPS, WebcamVideoStream
from data import BaseTransform
from ssd import build_ssd
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
FONT = cv2.FONT_HERSHEY_SIMPLEX
THRESHOLD = 0.2
def sliding_window(image, step_size, window_size):
# slide a window across the image
for y in range(0, image.shape[0], step_size):
for x in range(0, image.shape[1], step_size):
# yield the current window
yield (x, y, image[y:y + window_size[1], x:x + window_size[0]])
def detect(img, out_file, steps):
def predict(frame):
height, width = frame.shape[:2]
x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
x = torch.autograd.Variable(x.unsqueeze(0))
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([width, height, width, height])
# 15 is the index of the person class in the VOC label map
person_class_idx = 15
j = 0
while detections[0, person_class_idx, j, 0] >= THRESHOLD:
pt = (detections[0, person_class_idx, j, 1:] * scale).cpu().numpy()
cv2.rectangle(frame, (int(pt[0]), int(pt[1])), (int(pt[2]), int(pt[3])), (255, 128, 0), 1)
cv2.putText(frame, str((detections[0, person_class_idx, j, 0]).cpu().numpy()), (int(pt[0]), int(pt[1])), FONT, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
j += 1
return frame
net = build_ssd('test', 300, 21) # initialize SSD
net.load_state_dict(torch.load('data/weights/ssd_300_VOC0712.pth'))
transform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0))
(winW, winH) = (steps, steps)
combined = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
print(combined.shape)
# Find lowest non-existing index
# person_index = 0
# new_fname = '/people/' + str(person_index) + '.png'
# while os.path.exists(new_fname):
# person_index += 1
# new_fname = '/people/' + str(person_index) + '.png'
for (fromX, fromY, window) in sliding_window(img, step_size=steps, window_size=(winW, winH)):
toX = int(min(fromX+winW, img.shape[1]))
toY = int(min(fromY+winH, img.shape[0]))
print('Processing (' + str(fromX) + ', ' + str(fromY) + ') to (' + str(toX) + ', ' + str(toY) + ')...')
window = predict(window)
combined[int(fromY):toY, int(fromX):toX] = window
cv2.imwrite(str(out_file), combined)
resized = cv2.resize(combined, (800, 450))
cv2.imshow('output', resized)
k = cv2.waitKey(1)
if k == 0xFF & ord("q"):
break
if __name__ == "__main__":
detect(cv2.imread('./frames/1560698544.png'), 'out/9_1.png', 608)
|
[
"data.BaseTransform",
"cv2.waitKey",
"torch.load",
"numpy.zeros",
"ssd.build_ssd",
"cv2.imread",
"torch.Tensor",
"cv2.imshow",
"cv2.resize"
] |
[((1564, 1590), 'ssd.build_ssd', 'build_ssd', (['"""test"""', '(300)', '(21)'], {}), "('test', 300, 21)\n", (1573, 1590), False, 'from ssd import build_ssd\n'), ((1699, 1763), 'data.BaseTransform', 'BaseTransform', (['net.size', '(104 / 256.0, 117 / 256.0, 123 / 256.0)'], {}), '(net.size, (104 / 256.0, 117 / 256.0, 123 / 256.0))\n', (1712, 1763), False, 'from data import BaseTransform\n'), ((1809, 1860), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1], 3)', 'np.uint8'], {}), '((img.shape[0], img.shape[1], 3), np.uint8)\n', (1817, 1860), True, 'import numpy as np\n'), ((938, 982), 'torch.Tensor', 'torch.Tensor', (['[width, height, width, height]'], {}), '([width, height, width, height])\n', (950, 982), False, 'import torch\n'), ((1635, 1681), 'torch.load', 'torch.load', (['"""data/weights/ssd_300_VOC0712.pth"""'], {}), "('data/weights/ssd_300_VOC0712.pth')\n", (1645, 1681), False, 'import torch\n'), ((2601, 2633), 'cv2.resize', 'cv2.resize', (['combined', '(800, 450)'], {}), '(combined, (800, 450))\n', (2611, 2633), False, 'import cv2\n'), ((2642, 2671), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'resized'], {}), "('output', resized)\n", (2652, 2671), False, 'import cv2\n'), ((2685, 2699), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2696, 2699), False, 'import cv2\n'), ((2790, 2827), 'cv2.imread', 'cv2.imread', (['"""./frames/1560698544.png"""'], {}), "('./frames/1560698544.png')\n", (2800, 2827), False, 'import cv2\n')]
|
"""
<NAME>, 2020
"""
import numpy as np
def ara_backward_induction(energy_cost_current = 1,energy_cost_future = 1,energy_bonus = 0):
"""
This function implements the backward induction algorithm.
The inputs energy_cost_future and energy_cost_current specify the energy
costs of the current and future segments. Energy bonus specifies how much
value is assigned per remaining energy unit after the final trial.
"""
n_energy = 7 # number of energy states
n_offer = 4 # number of offers
n_action = 2 # number of actions
n_trial = 8 # number of trials
max_energy = 6 # maximum energy state
op = np.array([0.25,0.25,0.25,0.25]) # offer probability
V = np.zeros((n_energy,n_offer,n_trial+1)) # state value function
Q = np.zeros((n_energy,n_offer,n_action,n_trial)) # state action value function
final_reward = np.tile(np.arange(n_energy),(n_offer,1)).T * energy_bonus # terminal reward
V[:,:,0] = final_reward
# loop through the statespace and timesteps
# Note1: If speed is needed, vectorize and use tranistion matrix
# Note2: Since we are implicitly looping backwards, the future
# segment comes first
for t in range(n_trial):
for e in range(n_energy):
for a in range(n_action):
for o in range(n_offer):
if t < n_trial/2: # Future segment
if a == 0: # accept
if e >= energy_cost_future: # enough energy
Q[e,o,a,t] = o+1 + np.sum(V[e-energy_cost_future,:,t]*op)
elif e < energy_cost_future: # not enough energy
Q[e,o,a,t] = np.sum(V[0,:,t]*op) # energy goes to 0
elif a == 1: # reject
if e < max_energy: # not max energy
Q[e,o,a,t] = np.sum(V[e+1,:,t]*op)
if e == max_energy: # max energy
Q[e,o,a,t] = np.sum(V[e,:,t]*op)
elif t >= n_trial/2: # Current segment
if a == 0: # accept
if e >= energy_cost_current: # enough energy
Q[e,o,a,t] = o+1 + np.sum(V[e-energy_cost_current,:,t]*op)
elif e < energy_cost_current: # not enough energy
Q[e,o,a,t] = np.sum(V[0,:,t]*op) # energy goes to 0
elif a == 1: # reject
if e < max_energy: # not max energy
Q[e,o,a,t] = np.sum(V[e+1,:,t]*op)
if e == max_energy: # max energy
Q[e,o,a,t] = np.sum(V[e,:,t]*op)
# Value of the maximizing action given energy state e,
# offer o and trial t
Qmax = np.maximum(Q[e,o,0,t],Q[e,o,1,t])
# Update state value function
V[e,o,t+1] = Qmax
# Flip V and Q such that the first trial corresponds to te first index
V = V[:,:,::-1]
Q = Q[:,:,:,::-1]
return V, Q
#%% Run backward induction
# Calculate decision varaible DV and conflict C
costs = np.array([[1,1], [2,1],[1,2],[2,2]]) # The four possible transitions
energy_bonus=2.5/1.5
V = np.zeros((7,4,9,4)) # State-value function
Q = np.zeros((7,4,2,8,4)) # State-action function
for i in range(4):
V[:,:,:,i], Q[:,:,:,:,i] = ara_backward_induction(energy_cost_current = costs[i][0],energy_cost_future = costs[i][1],energy_bonus=energy_bonus)
DV = Q[:,:,0,:,:] - Q[:,:,1,:,:]
C = np.absolute(DV) * -1
|
[
"numpy.absolute",
"numpy.maximum",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.arange"
] |
[((3498, 3540), 'numpy.array', 'np.array', (['[[1, 1], [2, 1], [1, 2], [2, 2]]'], {}), '([[1, 1], [2, 1], [1, 2], [2, 2]])\n', (3506, 3540), True, 'import numpy as np\n'), ((3592, 3614), 'numpy.zeros', 'np.zeros', (['(7, 4, 9, 4)'], {}), '((7, 4, 9, 4))\n', (3600, 3614), True, 'import numpy as np\n'), ((3640, 3665), 'numpy.zeros', 'np.zeros', (['(7, 4, 2, 8, 4)'], {}), '((7, 4, 2, 8, 4))\n', (3648, 3665), True, 'import numpy as np\n'), ((721, 755), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25, 0.25])\n', (729, 755), True, 'import numpy as np\n'), ((786, 828), 'numpy.zeros', 'np.zeros', (['(n_energy, n_offer, n_trial + 1)'], {}), '((n_energy, n_offer, n_trial + 1))\n', (794, 828), True, 'import numpy as np\n'), ((857, 905), 'numpy.zeros', 'np.zeros', (['(n_energy, n_offer, n_action, n_trial)'], {}), '((n_energy, n_offer, n_action, n_trial))\n', (865, 905), True, 'import numpy as np\n'), ((3893, 3908), 'numpy.absolute', 'np.absolute', (['DV'], {}), '(DV)\n', (3904, 3908), True, 'import numpy as np\n'), ((965, 984), 'numpy.arange', 'np.arange', (['n_energy'], {}), '(n_energy)\n', (974, 984), True, 'import numpy as np\n'), ((3091, 3131), 'numpy.maximum', 'np.maximum', (['Q[e, o, 0, t]', 'Q[e, o, 1, t]'], {}), '(Q[e, o, 0, t], Q[e, o, 1, t])\n', (3101, 3131), True, 'import numpy as np\n'), ((1663, 1707), 'numpy.sum', 'np.sum', (['(V[e - energy_cost_future, :, t] * op)'], {}), '(V[e - energy_cost_future, :, t] * op)\n', (1669, 1707), True, 'import numpy as np\n'), ((1824, 1847), 'numpy.sum', 'np.sum', (['(V[0, :, t] * op)'], {}), '(V[0, :, t] * op)\n', (1830, 1847), True, 'import numpy as np\n'), ((2018, 2045), 'numpy.sum', 'np.sum', (['(V[e + 1, :, t] * op)'], {}), '(V[e + 1, :, t] * op)\n', (2024, 2045), True, 'import numpy as np\n'), ((2146, 2169), 'numpy.sum', 'np.sum', (['(V[e, :, t] * op)'], {}), '(V[e, :, t] * op)\n', (2152, 2169), True, 'import numpy as np\n'), ((2424, 2469), 'numpy.sum', 'np.sum', (['(V[e - energy_cost_current, :, t] * op)'], {}), '(V[e - energy_cost_current, :, t] * op)\n', (2430, 2469), True, 'import numpy as np\n'), ((2587, 2610), 'numpy.sum', 'np.sum', (['(V[0, :, t] * op)'], {}), '(V[0, :, t] * op)\n', (2593, 2610), True, 'import numpy as np\n'), ((2781, 2808), 'numpy.sum', 'np.sum', (['(V[e + 1, :, t] * op)'], {}), '(V[e + 1, :, t] * op)\n', (2787, 2808), True, 'import numpy as np\n'), ((2909, 2932), 'numpy.sum', 'np.sum', (['(V[e, :, t] * op)'], {}), '(V[e, :, t] * op)\n', (2915, 2932), True, 'import numpy as np\n')]
|
import os
import pickle
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.externals import joblib
from nmapy.classification import *
def main():
#n = dataset.Neighborhoods()
working_dir = "/mnt/GATES/UserDirs/4ja/data/image_chips_IGARSS_svms/dar_es_salaam"
#n.load_dataset(working_dir)
#train, test = n.strat_split_train_test(0.20)
run_desc = "singlescale"
train = dataset.Training()
test = dataset.Test()
train.load_dataset(working_dir + "/train")
test.load_dataset(working_dir + "/test")
print("computing features")
params = [{'feature': 'glcm',
'params': {'scales': ['120 meters'],
'prop': 'energy',
'distances': [1, 2],
'angles': [0.0,
0.5235987755982988,
0.7853981633974483,
1.0471975511965976,
1.5707963267948966,
2.0943951023931953,
2.356194490192345,
2.6179938779914944],
'smooth_factor': None,
'levels': None,
'stat': None}},
{'feature': 'gabor',
'params': {'scales': ['120 meters'],
'thetas': [0, 0.7853981633974483, 1.5707963267948966, 2.356194490192345],
'sigmas': [1, 3, 7],
'frequencies': [0.9],
'n_clusters': 32,
'mean_var_method': False}},
{'feature': 'glcm',
'params': {'scales': ['120 meters'],
'prop': 'ASM',
'distances': [2, 4, 6, 8, 10],
'angles': [0.0,
0.5235987755982988,
0.7853981633974483,
1.0471975511965976,
1.5707963267948966,
2.0943951023931953,
2.356194490192345,
2.6179938779914944],
'smooth_factor': None,
'levels': 200,
'stat': None}},
{'feature': 'w_hog', 'params': {'scales': ['120 meters']}}]
#train.set_feature_hyperparams(params)
#test.set_feature_hyperparams(params)
train.compute_features(params, n_jobs=16)
test.compute_features(params, n_jobs=16)
print("done computing features")
scaler = StandardScaler(copy=False)
scaler.fit(train.data)
scaler.transform(train.data, copy=False)
scaler.transform(test.data, copy=False)
# grid search SVM
# print("starting grid search")
# svm_params = {'kernel':['linear','rbf','poly'], 'C':[2**-4, 2**-3, 2**-2, 2**-1, 2, 2**2, 2**3, 2**4, 2**5, 2**6, 2**7], 'gamma':[2**-5, 2**-4, 2**-3, 2**-2, 2**-1, 2, 2**2, 2**3, 2**4, 2**5, 2**6, 2**7], 'degree':[3]}
# svmsvc = SVC()
# clf = GridSearchCV(estimator=svmsvc, param_grid=svm_params)
# clf.fit(train.data, train.labels)
# print("done with grid search")
# print(clf.best_estimator_)
# print(clf.best_score_)
# print("making predictions")
# test_pred = clf.predict(test.data)
# print(clf.score(test.data, test.labels))
# cnf_matrix = confusion_matrix(test.labels, test_pred)
# print(cnf_matrix)
# clf = clf.best_estimator
# with setting params explicitly
clf = SVC(C=12.041044961603584, kernel='linear', gamma=4.958572644482876)
clf.fit(train.data, train.labels)
print("training accuracy")
print(clf.score(train.data, train.labels))
print("making predictions")
test_pred = clf.predict(test.data)
print(clf.score(test.data, test.labels))
cnf_matrix = confusion_matrix(test.labels, test_pred)
print(cnf_matrix)
# save the confusion matrix, the classifier, the feature parameters,
# and write human readable metadata about the training and testing datasets
np.save(os.path.join(working_dir, run_desc + "_conf_matrix.npy"), cnf_matrix)
with open(os.path.join(working_dir, run_desc + "_svm_model.pkl"), "wb") as clf_output:
pickle.dump(clf, clf_output, -1)
#joblib.dump(clf, "C:/Users/4ja/data/neighborhood_mapping/image_chips/johannesburg/svm_model.pkl")
train.save_feature_hyperparams(os.path.join(working_dir, run_desc + "_feature_params.pkl"))
with open(os.path.join(working_dir, run_desc + "_data_scaler.pkl"), "wb") as output:
pickle.dump(scaler, output, -1)
write_metadata(working_dir, run_desc, train, test, clf, scaler)
def write_metadata(working_dir, train, test, clf, scaler):
metadata_file = open(os.path.join(working_dir, run_desc + "_metadata.txt"), "w")
labelnames = str(train.label_names[0])
for n in train.label_names[1:]:
labelnames += ","+str(n)
metadata_file.write("CLASS NAMES : " + labelnames + "\n")
trainclasses = len(train.label_names)
traincounts = np.histogram(train.labels, bins=trainclasses)[0]
trainclasscount = str(traincounts[0])
for n in traincounts[1:]:
trainclasscount+=","+str(n)
metadata_file.write("TRAIN SAMPLES : " + trainclasscount + "\n")
testclasses = len(test.label_names)
testcounts = np.histogram(test.labels, bins=testclasses)[0]
testclasscount = str(testcounts[0])
for n in testcounts[1:]:
testclasscount+=","+str(n)
metadata_file.write("TEST SAMPLES : " + testclasscount + "\n")
metadata_file.write("-------------------------------------- Features ------------------------------------\n")
metadata_file.write("FEATURE VECTOR LENGTH : " + str(len(train.data[0])) + "\n")
feature_means = str(scaler.mean_[0])
feature_vars = str(scaler.var_[0])
for n in range(1,len(scaler.mean_[:])):
feature_means += "," + str(scaler.mean_[n])
feature_vars += "," + str(scaler.var_[n])
metadata_file.write("FEATURE MEANS : " + feature_means + "\n")
metadata_file.write("FEATURE VARIANCES : " + feature_vars + "\n")
featurenames = train.feature_names[0]
for n in train.feature_names[1:]:
featurenames += "," + n
metadata_file.write("PRIMARY FEATURES : " + featurenames + "\n")
metadata_file.write("--------------------------------- Feature Parameters ----------------------------------\n")
for n in train.txt_feature_hyperparams:
metadata_file.write(str(n) + "\n")
metadata_file.write("--------------------------------- Class Means -----------------------------------------\n")
preds = clf.predict(train.data)
p = 0
mean_dict = {}
while p < len(preds):
if train.label_names[preds[p]] not in mean_dict:
mean_dict[train.label_names[preds[p]]] = [[train.data[p]]]
else:
mean_dict[train.label_names[preds[p]]].append([train.data[p]])
p+=1
for k in mean_dict:
mean_dict[k] = np.mean(np.array(mean_dict[k]), axis=0)
values_string = k + " : " + str(mean_dict[k][0][0])
for val in mean_dict[k][0][1:]:
values_string+= "," + str(val)
metadata_file.write(values_string + "\n")
metadata_file.write("-------------------------------------------------------------------\n")
metadata_file.write("SVM PARAMS : " + str(clf.get_params()) + "\n")
# np.save("C:/Users/4ja/data/neighborhood_mapping/image_chips/johannesburg/svm_support.npy",
# np.array(clf.best_estimator_.support_))
# np.save("C:/Users/4ja/data/neighborhood_mapping/image_chips/johannesburg/svm_support_vectors.npy",
# np.array(clf.best_estimator_.support_vectors_))
# np.save("C:/Users/4ja/data/neighborhood_mapping/image_chips/johannesburg/svm_n_support.npy",
# np.array(clf.best_estimator_.n_support_))
# np.save("C:/Users/4ja/data/neighborhood_mapping/image_chips/johannesburg/svm_dual_coef.npy",
# np.array(clf.best_estimator_.dual_coef_))
# np.save("C:/Users/4ja/data/neighborhood_mapping/image_chips/johannesburg/svm_intercept.npy",
# np.array(clf.best_estimator_.intercept_))
# if clf.best_estimator_.get_params()["kernel"] == "linear":
# np.save("C:/Users/4ja/data/neighborhood_mapping/image_chips/johannesburg/svm_coef.npy",
# np.array(clf.best_estimator_.coef_))
# metadata_file.write("support_ : " + np.array(clf.best_estimator_.support_) + "\n")
# metadata_file.write("support_vectors_ : " + np.array(clf.best_estimator_.support_vectors_) + "\n")
# metadata_file.write("n_support_ : " + np.array(clf.best_estimator_.n_support_) + "\n")
# metadata_file.write("dual_coef_ : " + np.array(clf.best_estimator_.dual_coef_) + "\n")
# metadata_file.write("intercept_ : " + np.array(clf.best_estimator_.intercept_) + "\n")
# if clf.best_estimator_.get_params()["kernel"] == "linear":
# metadata_file.write("coef_ : " + np.array(clf.best_estimator_.coef_) + "\n")
metadata_file.close()
if __name__ == "__main__":
main()
|
[
"pickle.dump",
"sklearn.preprocessing.StandardScaler",
"numpy.histogram",
"numpy.array",
"sklearn.svm.SVC",
"sklearn.metrics.confusion_matrix",
"os.path.join"
] |
[((2094, 2120), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'copy': '(False)'}), '(copy=False)\n', (2108, 2120), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3058, 3125), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(12.041044961603584)', 'kernel': '"""linear"""', 'gamma': '(4.958572644482876)'}), "(C=12.041044961603584, kernel='linear', gamma=4.958572644482876)\n", (3061, 3125), False, 'from sklearn.svm import SVC\n'), ((3384, 3424), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test.labels', 'test_pred'], {}), '(test.labels, test_pred)\n', (3400, 3424), False, 'from sklearn.metrics import confusion_matrix\n'), ((3619, 3675), 'os.path.join', 'os.path.join', (['working_dir', "(run_desc + '_conf_matrix.npy')"], {}), "(working_dir, run_desc + '_conf_matrix.npy')\n", (3631, 3675), False, 'import os\n'), ((3790, 3822), 'pickle.dump', 'pickle.dump', (['clf', 'clf_output', '(-1)'], {}), '(clf, clf_output, -1)\n', (3801, 3822), False, 'import pickle\n'), ((3963, 4022), 'os.path.join', 'os.path.join', (['working_dir', "(run_desc + '_feature_params.pkl')"], {}), "(working_dir, run_desc + '_feature_params.pkl')\n", (3975, 4022), False, 'import os\n'), ((4123, 4154), 'pickle.dump', 'pickle.dump', (['scaler', 'output', '(-1)'], {}), '(scaler, output, -1)\n', (4134, 4154), False, 'import pickle\n'), ((4312, 4365), 'os.path.join', 'os.path.join', (['working_dir', "(run_desc + '_metadata.txt')"], {}), "(working_dir, run_desc + '_metadata.txt')\n", (4324, 4365), False, 'import os\n'), ((4630, 4675), 'numpy.histogram', 'np.histogram', (['train.labels'], {'bins': 'trainclasses'}), '(train.labels, bins=trainclasses)\n', (4642, 4675), True, 'import numpy as np\n'), ((4933, 4976), 'numpy.histogram', 'np.histogram', (['test.labels'], {'bins': 'testclasses'}), '(test.labels, bins=testclasses)\n', (4945, 4976), True, 'import numpy as np\n'), ((3704, 3758), 'os.path.join', 'os.path.join', (['working_dir', "(run_desc + '_svm_model.pkl')"], {}), "(working_dir, run_desc + '_svm_model.pkl')\n", (3716, 3758), False, 'import os\n'), ((4039, 4095), 'os.path.join', 'os.path.join', (['working_dir', "(run_desc + '_data_scaler.pkl')"], {}), "(working_dir, run_desc + '_data_scaler.pkl')\n", (4051, 4095), False, 'import os\n'), ((6655, 6677), 'numpy.array', 'np.array', (['mean_dict[k]'], {}), '(mean_dict[k])\n', (6663, 6677), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch_struct as ts
def nk2ts(chart):
# chart: batch x time x time x num_classes
# need to make indices inclusive to be compatible with torchstruct
# and add 1st dimension corresponding to size of semiring
return chart[:,:-1, 1:]
def batch_marg(chart, semiring=ts.MaxSemiring, lengths=None):
chart_ts = nk2ts(chart).clone()
# is lengths-1 correct?
chart_ts[:,0,lengths-1,0].fill_(-1e8)
model = ts.CKY_CRF
struct = model(semiring)
# is lengths-1 correct?
return struct.marginals(chart_ts, lengths=lengths-1)
def exclusive_spans(spans):
# change endpoints of spans: batch x n x n x *
# to be exclusive
spans[:,2] += 1
return spans
def pad(x, batch_idxs):
len_padded = batch_idxs.max_len
bsz = batch_idxs.batch_size
lens = batch_idxs.seq_lens_np
H = x.shape[-1]
# filter out sentence boundaries, maybe
xmask = np.zeros(x.shape[0])
start = 0
for l in lens:
xmask[start:start+l-1] = True
start += l
xmask = torch.BoolTensor(xmask).to(x.device)
x = x[xmask]
# flatten padded tensor and index copy in
padded = x.new(
bsz, len_padded, H,
device = x.device,
)
padded.fill_(0)
mask = np.zeros(bsz * len_padded)
for i, l in enumerate(lens):
mask[i * len_padded: i * len_padded + l - 1] = 1
mask = torch.BoolTensor(mask).to(x.device)
index = torch.arange(0, mask.shape[0], device=x.device)[mask]
padded = (padded
.view(-1, H)
.index_copy(
0,
index,
x,
)
)
return padded.view(bsz, len_padded, H)
def from_parts():
# convert from tree to chart representation
pass
|
[
"torch.BoolTensor",
"numpy.zeros",
"torch.arange"
] |
[((937, 957), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (945, 957), True, 'import numpy as np\n'), ((1273, 1299), 'numpy.zeros', 'np.zeros', (['(bsz * len_padded)'], {}), '(bsz * len_padded)\n', (1281, 1299), True, 'import numpy as np\n'), ((1449, 1496), 'torch.arange', 'torch.arange', (['(0)', 'mask.shape[0]'], {'device': 'x.device'}), '(0, mask.shape[0], device=x.device)\n', (1461, 1496), False, 'import torch\n'), ((1060, 1083), 'torch.BoolTensor', 'torch.BoolTensor', (['xmask'], {}), '(xmask)\n', (1076, 1083), False, 'import torch\n'), ((1401, 1423), 'torch.BoolTensor', 'torch.BoolTensor', (['mask'], {}), '(mask)\n', (1417, 1423), False, 'import torch\n')]
|
__author__ = ('<NAME>', '<NAME>')
import sys
import unittest
import os
import platform
import numpy as np
import pandas as pd
import tables as pt
from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, \
make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, \
SharedVLArray
from pypet.tests.testutils.ioutils import get_root_logger, parse_args, run_suite
from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest
from pypet.tests.testutils.data import TrajectoryComparator
from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager
class MyTable(pt.IsDescription):
id = pt.Int32Col()
name = pt.StringCol(15)
surname = pt.StringCol(15)
weight = pt.FloatCol()
class StorageDataTrajectoryTests(TrajectoryComparator):
tags = 'unittest', 'trajectory', 'shared', 'hdf5'
def test_conversions(self):
filename = make_temp_dir('hdf5manipulation.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
traj.v_standard_result = SharedResult
traj.f_store(only_init=True)
traj.f_add_result('shared_data')
thedata = np.zeros((1000, 1000))
myarray = SharedArray('array', traj.shared_data, trajectory=traj)
traj.shared_data['array'] = myarray
mytable = SharedTable('t1', traj.shared_data, trajectory=traj)
traj.shared_data['t1'] = mytable
dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']}
dadict2 = {'answer': [42]}
res = traj.f_add_result('shared.dfs')
res['df'] = SharedPandasFrame()
res['df'].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj)
frame = SharedPandasFrame('df1', traj.f_get('shared.dfs'), trajectory=traj,
add_to_parent=True)
frame.create_shared_data(data=pd.DataFrame(dadict2),)
res['df1'] = frame
traj.f_add_result('mylist', [1, 2, 3])
traj.f_add_result('my.mytuple', k=(1, 2, 3), wa=42)
traj.f_add_result('my.myarray', np.zeros((50, 50)))
traj.f_add_result('my.myframe', data=pd.DataFrame(dadict2))
traj.f_add_result('my.mytable', ObjectTable(data=dadict2))
myarray.create_shared_data(data=thedata)
mytable.create_shared_data(first_row={'hi': 'hi'.encode('utf-8'), 'huhu': np.ones(3)})
traj.f_store()
data = myarray.read()
myarray.get_data_node()
self.assertTrue(np.all(data == thedata))
with StorageContextManager(traj):
myarray[2, 2] = 10
data = myarray.read()
self.assertTrue(data[2, 2] == 10)
self.assertTrue(data[2, 2] == 10)
self.assertFalse(traj.v_storage_service.is_open)
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
make_ordinary_result(traj.shared_data, 'array', trajectory=traj)
array = traj.shared_data.array
self.assertTrue(isinstance(array, np.ndarray))
thedata[2, 2] = 10
self.assertTrue(np.all(array == thedata))
make_ordinary_result(traj.shared_data, 't1', trajectory=traj,)
t1 = traj.shared_data.t1
self.assertTrue(isinstance(t1, ObjectTable))
self.assertTrue(np.all(t1['huhu'][0] == np.ones(3)))
dfs = traj.shared.dfs
make_ordinary_result(traj.shared.dfs, 'df', trajectory=traj)
theframe = dfs.f_get('df')
self.assertTrue(isinstance(dfs, Result))
self.assertTrue(isinstance(theframe, pd.DataFrame))
self.assertTrue(theframe['hi'][0] == 1)
listres = traj.f_get('mylist')
listres = make_shared_result(listres, 0, trajectory=traj)
with StorageContextManager(traj):
self.assertTrue(listres[0][2] == 3)
listres[0][0] = 4
self.assertTrue(listres[0][0] == 4)
listres = make_ordinary_result(listres, 0, trajectory=traj)
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
mylist = traj.mylist
self.assertTrue(isinstance(listres, Result))
self.assertTrue(mylist[0] == 4)
self.assertTrue(isinstance(mylist, list))
mytuple = traj.mytuple
with self.assertRaises(AttributeError):
mytuple = make_shared_result(mytuple, 'mylist', traj, new_class=SharedArray)
mytuple = make_shared_result(mytuple, 'k', traj, new_class=SharedArray)
self.assertTrue(mytuple.k[1] == 2)
mytuple = make_ordinary_result(mytuple, 'k', trajectory=traj)
self.assertTrue(isinstance(mytuple.k, tuple))
self.assertTrue(mytuple.k[2] == 3)
myframe = traj.myframe
myframe = make_shared_result(myframe, 'data', traj)
theframe = myframe.data.read()
self.assertTrue(theframe['answer'][0] == 42)
myframe = make_ordinary_result(myframe, 'data', trajectory=traj)
traj.f_load_item(myframe)
self.assertTrue(myframe.data['answer'][0] == 42)
mytable = traj.f_get('mytable')
mytable = make_shared_result(mytable, 0, traj)
self.assertTrue(isinstance(mytable[0], SharedTable))
rows = mytable.mytable.read()
self.assertTrue(rows[0][0] == 42)
mytable = make_ordinary_result(mytable, 0, trajectory=traj)
self.assertTrue(isinstance(mytable, Result))
self.assertTrue(mytable[0]['answer'][0] == 42)
def test_storing_and_manipulating(self):
filename = make_temp_dir('hdf5manipulation.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
thedata = np.zeros((1000, 1000))
res = traj.f_add_result(SharedResult, 'shared')
myarray = SharedArray('array', res, trajectory=traj, add_to_parent=True)
mytable = SharedTable('t1', res, trajectory=traj, add_to_parent=True)
mytable2 = SharedTable('t2', res, trajectory=traj, add_to_parent=True)
mytable3 = SharedTable('t3', res, trajectory=traj, add_to_parent=True)
traj.f_store(only_init=True)
myarray.create_shared_data(data=thedata)
mytable.create_shared_data(first_row={'hi': 'hi'.encode('utf-8'), 'huhu': np.ones(3)})
mytable2.create_shared_data(description={'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1)})
mytable3.create_shared_data(description={'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1)})
traj.f_store()
newrow = {'ha': 'hu', 'haha': 4.0}
with self.assertRaises(TypeError):
traj.shared.t2.row
with StorageContextManager(traj) as cm:
row = traj.shared.t2.row
for irun in range(11):
for key, val in newrow.items():
row[key] = val
row.append()
traj.shared.t3.flush()
data = myarray.read()
myarray.get_data_node()
self.assertTrue(np.all(data == thedata))
with StorageContextManager(traj):
myarray[2, 2] = 10
data = myarray.read()
self.assertTrue(data[2, 2] == 10)
self.assertTrue(data[2, 2] == 10)
self.assertFalse(traj.v_storage_service.is_open)
traj = load_trajectory(name=trajname, filename=filename)
traj.f_load(load_data=2)
traj.shared.t2.traj = traj
traj.shared.t1.traj = traj
traj.shared.array.traj = traj
self.assertTrue(traj.shared.t2.nrows == 11, '%s != 11' % str(traj.shared.t2.nrows))
self.assertTrue(traj.shared.t2[0]['ha'] == 'hu'.encode('utf-8'), traj.shared.t2[0]['ha'])
self.assertTrue(traj.shared.t2[1]['ha'] == 'hu'.encode('utf-8'), traj.shared.t2[1]['ha'])
self.assertTrue('huhu' in traj.shared.t1.colnames)
self.assertTrue(traj.shared.array[2, 2] == 10)
@unittest.skipIf(platform.system() == 'Windows', 'Not supported under Windows')
def test_compacting(self):
filename = make_temp_dir('hdf5compacting.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
traj.v_storage_service.complevel = 7
first_row = {'ha': 'hi'.encode('utf-8'), 'haha': np.zeros((3, 3))}
traj.f_store(only_init=True)
traj.f_add_result('My.Tree.Will.Be.Deleted', 42)
traj.f_add_result('Mine.Too.HomeBoy', 42, comment='Don`t cry for me!')
res = traj.f_add_result(SharedResult, 'myres')
res['myres'] = SharedTable()
res['myres'].create_shared_data(first_row=first_row)
with StorageContextManager(traj):
traj.myres
for irun in range(10000):
row = traj.myres.row
for key in first_row:
row[key] = first_row[key]
row.append()
traj.f_store()
del traj
traj = load_trajectory(name=trajname, filename=filename, load_all=2)
with StorageContextManager(traj) as cm:
tb = traj.myres.get_data_node()
tb.remove_rows(1000, 10000)
cm.flush_store()
self.assertTrue(traj.myres.nrows == 1001)
traj.f_delete_item(traj.My, recursive=True)
traj.f_delete_item(traj.Mine, recursive=True)
size = os.path.getsize(filename)
get_root_logger().info('Filesize is %s' % str(size))
name_wo_ext, ext = os.path.splitext(filename)
backup_file_name = name_wo_ext + '_backup' + ext
code = compact_hdf5_file(filename, keep_backup=True)
if code != 0:
raise RuntimeError('ptrepack fail')
backup_size = os.path.getsize(backup_file_name)
self.assertTrue(backup_size == size)
new_size = os.path.getsize(filename)
get_root_logger().info('New filesize is %s' % str(new_size))
self.assertTrue(new_size < size, "%s > %s" % (str(new_size), str(size)))
def test_all_arrays(self):
filename = make_temp_dir('hdf5arrays.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
npearray = np.ones((2, 10, 3), dtype=np.float)
thevlarray = np.array(['j'.encode('utf-8'), 22.2, 'gutter'.encode('utf-8')])
traj.f_store(only_init=True)
res = traj.f_add_result(SharedResult, 'arrays')
res['carray'] = SharedCArray()
res['carray'].create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom())
res['earray'] = SharedEArray()
res['earray'].create_shared_data(obj=npearray)
res['vlarray'] = SharedVLArray()
res['vlarray'].create_shared_data(obj=thevlarray)
res['array'] = SharedArray()
res['array'].create_shared_data(data=npearray)
traj.f_store()
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
toappned = [44, 'k'.encode('utf-8')]
with StorageContextManager(traj):
a1 = traj.arrays.array
a1[0, 0, 0] = 4.0
a2 = traj.arrays.carray
a2[0, 1] = 4
a4 = traj.arrays.vlarray
a4.append(toappned)
a3 = traj.arrays.earray
a3.append(np.zeros((1, 10, 3)))
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
with StorageContextManager(traj):
a1 = traj.arrays.array
self.assertTrue(a1[0, 0, 0] == 4.0)
a2 = traj.arrays.carray
self.assertTrue(a2[0, 1] == 4)
a3 = traj.arrays.earray
self.assertTrue(a3.read().shape == (3, 10, 3))
a4 = traj.arrays.vlarray
for idx, x in enumerate(a4):
if idx == 0:
self.assertTrue(np.all(x == np.array(thevlarray)))
elif idx == 1:
self.assertTrue(np.all(x == np.array(toappned)))
else:
raise RuntimeError()
def test_df(self):
filename = make_temp_dir('hdf5errors.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
traj.f_store()
dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']}
dadict2 = {'answer': [42]}
traj.f_add_result(SharedResult, 'dfs.df', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict))
traj.f_add_result(SharedResult, 'dfs.df1', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict2))
traj.f_add_result(SharedResult, 'dfs.df3', SharedPandasFrame())
for irun in range(10):
traj.df3.append(traj.df1.read())
dframe = traj.df3.read()
self.assertTrue(len(dframe) == 10)
what = traj.df.select(where='index == 2')
self.assertTrue(len(what) == 1)
def test_errors(self):
filename = make_temp_dir('hdf5errors.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
npearray = np.ones((2, 10, 3), dtype=np.float)
thevlarray = np.array(['j'.encode('utf-8'), 22.2, 'gutter'.encode('utf-8')])
with self.assertRaises(TypeError):
traj.f_add_result(SharedResult, 'arrays.vlarray', SharedVLArray()).create_shared_data(obj=thevlarray)
traj.f_store()
traj.arrays.vlarray.create_shared_data(obj=thevlarray)
traj.f_add_result(SharedResult, 'arrays.array', SharedArray()).create_shared_data(data=npearray)
traj.arrays.f_add_result(SharedResult, 'super.carray', SharedCArray(),
comment='carray').create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom())
traj.arrays.f_add_result(SharedResult, 'earray', SharedEArray()).create_shared_data('earray',
obj=npearray)
traj.f_store()
with self.assertRaises(TypeError):
traj.arrays.array.iterrows()
with StorageContextManager(traj):
with self.assertRaises(RuntimeError):
with StorageContextManager(traj):
pass
self.assertTrue(traj.v_storage_service.is_open)
with self.assertRaises(RuntimeError):
StorageContextManager(traj).open_store()
self.assertFalse(traj.v_storage_service.is_open)
class SharedTableTest(TrajectoryComparator):
tags = 'unittest', 'trajectory', 'shared', 'hdf5', 'table', 'mehmet'
def setUp(self):
self.filename = make_temp_dir('shared_table_test.hdf5')
self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename)
self.traj.v_standard_result = SharedResult
self.traj.f_store(only_init=True)
self.traj.f_add_result('shared_data')
self.shared_table = SharedTable(name='table',
parent=self.traj.shared_data,
trajectory=self.traj,
add_to_parent=True)
def test_table_read(self):
the_reading_table = self.traj.results.shared_data.table
self.assertTrue(the_reading_table is self.shared_table)
the_reading_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_reading_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_reading_table.flush()
for idx, row in enumerate(the_reading_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_reading_table = traj2.results.shared_data.table
self.assertTrue(np.all(the_reading_table.read() == second_reading_table.read()))
second_reading_table.append([(21, 'aaa', 'bbb', 100)])
self.assertTrue(np.all(the_reading_table.read() == second_reading_table.read()))
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_reading_table = traj3.results.shared_data.table
self.assertTrue(np.all(the_reading_table.read() == third_reading_table.read()))
def test_table_append(self):
the_append_table = self.traj.results.shared_data.table
self.assertTrue(the_append_table is self.shared_table)
the_append_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_append_table.row
for i in range(15):
row['id'] = i * 2
row['name'] = 'name %d' % i
row['surname'] = '%d surname' % i
row['weight'] = (i*0.5 + 50.0)
row.append()
the_append_table.flush()
for idx, row in enumerate(the_append_table.iterrows()):
self.assertEqual(row['id'], idx * 2)
self.assertEqual(row['name'], ('name %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], ('%d surname' % idx).encode('utf-8'))
self.assertEqual(row['weight'], idx*0.5+50.0)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_append_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_append_table.iterrows()):
self.assertEqual(row['id'], idx * 2)
self.assertEqual(row['name'], ('name %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], ('%d surname' % idx).encode('utf-8'))
self.assertEqual(row['weight'], idx*0.5+50.0)
second_append_table.append([(30, 'mehmet', 'timur', 65.5)])
self.assertEqual(second_append_table.read(field='id')[-1], 30)
self.assertEqual(second_append_table.read(field='name')[-1], 'mehmet'.encode('utf-8'))
self.assertEqual(second_append_table.read(field='surname')[-1], 'timur'.encode('utf-8'))
self.assertEqual(second_append_table.read(field='weight')[-1], 65.5)
traj2.f_store()
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_append_table = traj3.results.shared_data.table
self.assertEqual((third_append_table.read(field='id')[-1]), 30)
self.assertEqual((third_append_table.read(field='name')[-1]), 'mehmet'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='surname')[-1]), 'timur'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='weight')[-1]), 65.5)
third_append_table.append([(33, 'Harrison', 'Ford', 95.5)])
self.assertEqual((third_append_table.read(field='id')[-1]), 33)
self.assertEqual((third_append_table.read(field='name')[-1]), 'Harrison'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='surname')[-1]), 'Ford'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='weight')[-1]), 95.5)
def test_table_iterrows(self):
the_iterrows_table = self.traj.results.shared_data.table
self.assertTrue(the_iterrows_table is self.shared_table)
the_iterrows_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_iterrows_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_iterrows_table.flush()
for idx, row in enumerate(the_iterrows_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_iterrows_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_iterrows_table.iterrows()):
self.assertEqual(row['id'], idx)
def test_table_col(self):
the_col_table = self.traj.results.shared_data.table
self.assertTrue(the_col_table is self.shared_table)
the_col_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_col_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_col_table.flush()
for idx, row in enumerate(the_col_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_col_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_col_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertTrue(np.all(second_col_table.read(field='id') == second_col_table.col('id')))
self.assertTrue(np.all(second_col_table.read(field='name') == second_col_table.col('name')))
self.assertTrue(np.all(second_col_table.read(field='surname') == second_col_table.col('surname')))
self.assertTrue(np.all(second_col_table.read(field='weight') == second_col_table.col('weight')))
# def test_table_itersequence(self):
# pass
#
# def test_table_itersorted(self):
# pass
#
# def test_table_read_coordinates(self):
# pass
#
# def test_table_read_sorted(self):
# pass
def test_table_getitem(self):
the_getitem_table = self.traj.results.shared_data.table
self.assertTrue(the_getitem_table is self.shared_table)
the_getitem_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_getitem_table.row
for i in range(10):
row['id'] = i
row['name'] = '<NAME>' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_getitem_table.flush()
for idx, row in enumerate(the_getitem_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_getitem_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_getitem_table.iterrows()):
self.assertTrue(np.all(second_getitem_table.read()[idx] == second_getitem_table[idx]))
second_getitem_table.append([(30, '<NAME>', 'timur', 65.5)])
for idx, row in enumerate(second_getitem_table.iterrows(-1)):
self.assertEqual(row['id'], 30)
self.assertEqual(row['name'], '<NAME>'.encode('utf-8'))
self.assertEqual(row['surname'], 'timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5)
traj2.f_store()
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_getitem_table = traj3.results.shared_data.table
with StorageContextManager(traj3):
for idx, row in enumerate(third_getitem_table.iterrows()):
self.assertTrue(np.all(third_getitem_table.read()[idx] == third_getitem_table[idx]))
# def test_table_iter(self):
# pass
#
# def test_table_modify_column(self):
# pass
#
# def test_table_modify_columns(self):
# pass
#
# def test_table_modify_coordinates(self):
# pass
#
# def test_table_modify_rows(self):
# pass
#
# def test_table_remove_rows(self):
# pass
#
# def test_table_remove_row(self):
# pass
def test_table_setitem(self):
the_setitem_table = self.traj.results.shared_data.table
self.assertTrue(the_setitem_table is self.shared_table)
the_setitem_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_setitem_table.row
for i in range(10):
row['id'] = i
row['name'] = '<NAME>' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_setitem_table.flush()
for idx, row in enumerate(the_setitem_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_setitem_table = traj2.results.shared_data.table
second_setitem_table[0] = [(100, '<NAME>', 'TIMUR', 75.5)]
self.assertEqual(second_setitem_table.read(field='id')[0], 100)
self.assertEqual(second_setitem_table.read(field='name')[0], '<NAME>'.encode('utf-8'))
self.assertEqual(second_setitem_table.read(field='surname')[0], 'TIMUR'.encode('utf-8'))
self.assertEqual(second_setitem_table.read(field='weight')[0], 75.5)
traj2.f_store()
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_setitem_table = traj3.results.shared_data.table
self.assertEqual(third_setitem_table.read(field='id')[0], 100)
self.assertEqual(third_setitem_table.read(field='name')[0], '<NAME>'.encode('utf-8'))
self.assertEqual(third_setitem_table.read(field='surname')[0], 'TIMUR'.encode('utf-8'))
self.assertEqual(third_setitem_table.read(field='weight')[0], 75.5)
# def test_table_get_where_list(self):
# pass
#
# def test_table_read_where(self):
# pass
def test_table_where(self):
the_where_table = self.traj.results.shared_data.table
self.assertTrue(the_where_table is self.shared_table)
the_where_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_where_table.row
for i in range(10):
row['id'] = i
row['name'] = '<NAME>' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i
row.append()
the_where_table.flush()
for idx, row in enumerate(the_where_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_where_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
result = second_where_table.where('(id == 2)&(name == b"<NAME>")&(surname ==b"Timur")&(weight == 67.5)')
there = False
for row in result:
there = True
self.assertTrue(there)
# def test_table_append_where(self):
# pass
#
# def test_table_will_query_use_indexing(self):
# pass
#
# def test_table_copy(self):
# pass
#
# def test_table_flush_rows_to_index(self):
# pass
#
# def test_table_get_enum(self):
# pass
#
# def test_table_reindex(self):
# pass
#
# def test_table_reindex_dirty(self):
# pass
#
# def test_table_remove_index(self):
# pass
#
# def test_table_create_index(self):
# pass
#
# def test_table_create_cindex(self):
# pass
#
# def test_table_colindexes(self):
# pass
#
# def test_table_cols(self):
# pass
#
# def test_table_row(self):
# pass
def test_table_flush(self):
the_flush_table = self.traj.results.shared_data.table
self.assertTrue(the_flush_table is self.shared_table)
the_flush_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_flush_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i
row.append()
the_flush_table.flush()
for idx, row in enumerate(the_flush_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertEqual(row['name'], ('mehmet %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], 'Timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5+idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_flush_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_flush_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertEqual(row['name'], ('mehmet %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], 'Timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5+idx)
row = second_flush_table.row
for i in range(10, 11):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i
row.append()
second_flush_table.flush()
for idx, row in enumerate(second_flush_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertEqual(row['name'], ('mehmet %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], 'Timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5+idx)
class SharedArrayTest(TrajectoryComparator):
tags = 'unittest', 'trajectory', 'shared', 'hdf5', 'array', 'mehmet'
def setUp(self):
self.filename = make_temp_dir('shared_table_test.hdf5')
self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename)
self.traj.v_standard_result = SharedResult
self.traj.f_store(only_init=True)
self.traj.f_add_result('shared_data')
self.shared_array = SharedArray(name='array',
parent=self.traj.shared_data,
trajectory=self.traj,
add_to_parent=True)
def test_array_read(self):
the_reading_array = np.ones((100, 100)) * 4
first_reading_array = self.traj.results.shared_data.array
self.assertTrue(first_reading_array is self.shared_array)
first_reading_array.create_shared_data(obj=the_reading_array)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_reading_array = traj2.shared_data.array.read()
self.assertTrue(np.all(the_reading_array == second_reading_array),
'%s != %s' % (str(the_reading_array), str(second_reading_array)))
def test_array_getitem(self):
the_getitem_array = np.array(range(100))
first_getitem_array = self.traj.results.shared_data.array
first_getitem_array.create_shared_data(obj=the_getitem_array)
for k in range(len(the_getitem_array)):
self.assertEqual(the_getitem_array[k], first_getitem_array[k])
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
for j in range(len(the_getitem_array)):
self.assertEqual(the_getitem_array[j], traj2.results.shared_data.array[j])
def test_array_getenum(self):
the_getenum_array = np.array(range(100))
first_getenum_array = self.traj.results.shared_data.array
first_getenum_array.create_shared_data(obj=the_getenum_array)
with self.assertRaises(TypeError):
first_getenum_array.get_enum()
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_enum_array = traj2.results.shared_data.array
with self.assertRaises(TypeError):
second_enum_array.get_enum()
def test_array_iterrows(self):
the_iterrows_array = np.random.randint(0, 100, (100, 100))
first_iterrows_array = self.traj.results.shared_data.array
first_iterrows_array.create_shared_data(obj=the_iterrows_array)
with StorageContextManager(self.traj):
for idx, row in enumerate(first_iterrows_array.iterrows()):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_iterrows_array = traj2.results.shared_data.array
with StorageContextManager(traj2):
for idx, row in enumerate(second_iterrows_array.iterrows()):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
def test_array_setitem(self):
the_setitem_array = np.zeros((50, 50))
first_setitem_array = self.traj.results.shared_data.array
first_setitem_array.create_shared_data(obj=the_setitem_array)
first_setitem_array[2, 2] = 10
self.assertEqual(first_setitem_array[2, 2], 10)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_setitem_array = traj2.results.shared_data.array
self.assertEqual(second_setitem_array[2, 2], 10)
second_setitem_array[3, 3] = 17
self.assertEqual(second_setitem_array[3, 3], 17)
def test_array_iter(self):
the_iterrows_array = np.random.randint(0, 100, (100, 100))
first_iterrows_array = self.traj.results.shared_data.array
first_iterrows_array.create_shared_data(obj=the_iterrows_array)
with StorageContextManager(self.traj):
for idx, row in enumerate(first_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.assertTrue(np.all(the_iterrows_array == first_iterrows_array.read()))
for idx, row in enumerate(the_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_iterrows_array = traj2.results.shared_data.array
with StorageContextManager(traj2):
for idx, row in enumerate(second_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.assertTrue(np.all(the_iterrows_array == second_iterrows_array.read()))
for idx, row in enumerate(second_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
def test_array_len(self):
the_len_array = np.ones((100, 100))
first_len_array = self.traj.results.shared_data.array
self.assertTrue(first_len_array is self.shared_array)
first_len_array.create_shared_data(obj=the_len_array)
self.assertEqual(len(first_len_array), 100)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_len_array = traj2.results.shared_data.array
self.assertEqual(len(second_len_array), 100)
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args)
|
[
"pypet.SharedTable",
"pypet.make_shared_result",
"numpy.ones",
"pypet.SharedEArray",
"pypet.ObjectTable",
"numpy.random.randint",
"pypet.StorageContextManager",
"pypet.compact_hdf5_file",
"pandas.DataFrame",
"tables.atom.FloatAtom",
"tables.Int32Col",
"pypet.load_trajectory",
"pypet.SharedArray",
"pypet.tests.testutils.ioutils.make_temp_dir",
"os.path.getsize",
"pypet.tests.testutils.ioutils.get_root_logger",
"pypet.tests.testutils.ioutils.parse_args",
"pypet.tests.testutils.ioutils.run_suite",
"tables.StringCol",
"platform.system",
"pypet.SharedCArray",
"pypet.make_ordinary_result",
"numpy.all",
"pypet.SharedPandasFrame",
"numpy.zeros",
"tables.FloatCol",
"numpy.array",
"os.path.splitext",
"pypet.SharedVLArray",
"pypet.tests.testutils.ioutils.make_trajectory_name"
] |
[((707, 720), 'tables.Int32Col', 'pt.Int32Col', ([], {}), '()\n', (718, 720), True, 'import tables as pt\n'), ((732, 748), 'tables.StringCol', 'pt.StringCol', (['(15)'], {}), '(15)\n', (744, 748), True, 'import tables as pt\n'), ((763, 779), 'tables.StringCol', 'pt.StringCol', (['(15)'], {}), '(15)\n', (775, 779), True, 'import tables as pt\n'), ((793, 806), 'tables.FloatCol', 'pt.FloatCol', ([], {}), '()\n', (804, 806), True, 'import tables as pt\n'), ((36977, 36989), 'pypet.tests.testutils.ioutils.parse_args', 'parse_args', ([], {}), '()\n', (36987, 36989), False, 'from pypet.tests.testutils.ioutils import get_root_logger, parse_args, run_suite\n'), ((36994, 37015), 'pypet.tests.testutils.ioutils.run_suite', 'run_suite', ([], {}), '(**opt_args)\n', (37003, 37015), False, 'from pypet.tests.testutils.ioutils import get_root_logger, parse_args, run_suite\n'), ((972, 1010), 'pypet.tests.testutils.ioutils.make_temp_dir', 'make_temp_dir', (['"""hdf5manipulation.hdf5"""'], {}), "('hdf5manipulation.hdf5')\n", (985, 1010), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((1266, 1288), 'numpy.zeros', 'np.zeros', (['(1000, 1000)'], {}), '((1000, 1000))\n', (1274, 1288), True, 'import numpy as np\n'), ((1307, 1362), 'pypet.SharedArray', 'SharedArray', (['"""array"""', 'traj.shared_data'], {'trajectory': 'traj'}), "('array', traj.shared_data, trajectory=traj)\n", (1318, 1362), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((1425, 1477), 'pypet.SharedTable', 'SharedTable', (['"""t1"""', 'traj.shared_data'], {'trajectory': 'traj'}), "('t1', traj.shared_data, trajectory=traj)\n", (1436, 1477), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((1701, 1720), 'pypet.SharedPandasFrame', 'SharedPandasFrame', ([], {}), '()\n', (1718, 1720), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((2883, 2978), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'trajname', 'filename': 'filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=trajname, filename=filename, load_all=2,\n dynamic_imports=SharedResult)\n', (2898, 2978), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((3015, 3079), 'pypet.make_ordinary_result', 'make_ordinary_result', (['traj.shared_data', '"""array"""'], {'trajectory': 'traj'}), "(traj.shared_data, 'array', trajectory=traj)\n", (3035, 3079), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((3260, 3321), 'pypet.make_ordinary_result', 'make_ordinary_result', (['traj.shared_data', '"""t1"""'], {'trajectory': 'traj'}), "(traj.shared_data, 't1', trajectory=traj)\n", (3280, 3321), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((3509, 3569), 'pypet.make_ordinary_result', 'make_ordinary_result', (['traj.shared.dfs', '"""df"""'], {'trajectory': 'traj'}), "(traj.shared.dfs, 'df', trajectory=traj)\n", (3529, 3569), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((3820, 3867), 'pypet.make_shared_result', 'make_shared_result', (['listres', '(0)'], {'trajectory': 'traj'}), '(listres, 0, trajectory=traj)\n', (3838, 3867), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((4051, 4100), 'pypet.make_ordinary_result', 'make_ordinary_result', (['listres', '(0)'], {'trajectory': 'traj'}), '(listres, 0, trajectory=traj)\n', (4071, 4100), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((4116, 4211), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'trajname', 'filename': 'filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=trajname, filename=filename, load_all=2,\n dynamic_imports=SharedResult)\n', (4131, 4211), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((4600, 4661), 'pypet.make_shared_result', 'make_shared_result', (['mytuple', '"""k"""', 'traj'], {'new_class': 'SharedArray'}), "(mytuple, 'k', traj, new_class=SharedArray)\n", (4618, 4661), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((4724, 4775), 'pypet.make_ordinary_result', 'make_ordinary_result', (['mytuple', '"""k"""'], {'trajectory': 'traj'}), "(mytuple, 'k', trajectory=traj)\n", (4744, 4775), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((4923, 4964), 'pypet.make_shared_result', 'make_shared_result', (['myframe', '"""data"""', 'traj'], {}), "(myframe, 'data', traj)\n", (4941, 4964), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((5077, 5131), 'pypet.make_ordinary_result', 'make_ordinary_result', (['myframe', '"""data"""'], {'trajectory': 'traj'}), "(myframe, 'data', trajectory=traj)\n", (5097, 5131), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((5282, 5318), 'pypet.make_shared_result', 'make_shared_result', (['mytable', '(0)', 'traj'], {}), '(mytable, 0, traj)\n', (5300, 5318), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((5481, 5530), 'pypet.make_ordinary_result', 'make_ordinary_result', (['mytable', '(0)'], {'trajectory': 'traj'}), '(mytable, 0, trajectory=traj)\n', (5501, 5530), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((5705, 5743), 'pypet.tests.testutils.ioutils.make_temp_dir', 'make_temp_dir', (['"""hdf5manipulation.hdf5"""'], {}), "('hdf5manipulation.hdf5')\n", (5718, 5743), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((5872, 5894), 'numpy.zeros', 'np.zeros', (['(1000, 1000)'], {}), '((1000, 1000))\n', (5880, 5894), True, 'import numpy as np\n'), ((5969, 6031), 'pypet.SharedArray', 'SharedArray', (['"""array"""', 'res'], {'trajectory': 'traj', 'add_to_parent': '(True)'}), "('array', res, trajectory=traj, add_to_parent=True)\n", (5980, 6031), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((6050, 6109), 'pypet.SharedTable', 'SharedTable', (['"""t1"""', 'res'], {'trajectory': 'traj', 'add_to_parent': '(True)'}), "('t1', res, trajectory=traj, add_to_parent=True)\n", (6061, 6109), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((6129, 6188), 'pypet.SharedTable', 'SharedTable', (['"""t2"""', 'res'], {'trajectory': 'traj', 'add_to_parent': '(True)'}), "('t2', res, trajectory=traj, add_to_parent=True)\n", (6140, 6188), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((6208, 6267), 'pypet.SharedTable', 'SharedTable', (['"""t3"""', 'res'], {'trajectory': 'traj', 'add_to_parent': '(True)'}), "('t3', res, trajectory=traj, add_to_parent=True)\n", (6219, 6267), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((7459, 7508), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'trajname', 'filename': 'filename'}), '(name=trajname, filename=filename)\n', (7474, 7508), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((8190, 8226), 'pypet.tests.testutils.ioutils.make_temp_dir', 'make_temp_dir', (['"""hdf5compacting.hdf5"""'], {}), "('hdf5compacting.hdf5')\n", (8203, 8226), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((8712, 8725), 'pypet.SharedTable', 'SharedTable', ([], {}), '()\n', (8723, 8725), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((9097, 9158), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'trajname', 'filename': 'filename', 'load_all': '(2)'}), '(name=trajname, filename=filename, load_all=2)\n', (9112, 9158), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((9498, 9523), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (9513, 9523), False, 'import os\n'), ((9612, 9638), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (9628, 9638), False, 'import os\n'), ((9711, 9756), 'pypet.compact_hdf5_file', 'compact_hdf5_file', (['filename'], {'keep_backup': '(True)'}), '(filename, keep_backup=True)\n', (9728, 9756), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((9849, 9882), 'os.path.getsize', 'os.path.getsize', (['backup_file_name'], {}), '(backup_file_name)\n', (9864, 9882), False, 'import os\n'), ((9947, 9972), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (9962, 9972), False, 'import os\n'), ((10174, 10206), 'pypet.tests.testutils.ioutils.make_temp_dir', 'make_temp_dir', (['"""hdf5arrays.hdf5"""'], {}), "('hdf5arrays.hdf5')\n", (10187, 10206), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((10336, 10371), 'numpy.ones', 'np.ones', (['(2, 10, 3)'], {'dtype': 'np.float'}), '((2, 10, 3), dtype=np.float)\n', (10343, 10371), True, 'import numpy as np\n'), ((10574, 10588), 'pypet.SharedCArray', 'SharedCArray', ([], {}), '()\n', (10586, 10588), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((10696, 10710), 'pypet.SharedEArray', 'SharedEArray', ([], {}), '()\n', (10708, 10710), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((10791, 10806), 'pypet.SharedVLArray', 'SharedVLArray', ([], {}), '()\n', (10804, 10806), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((10888, 10901), 'pypet.SharedArray', 'SharedArray', ([], {}), '()\n', (10899, 10901), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((10997, 11092), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'trajname', 'filename': 'filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=trajname, filename=filename, load_all=2,\n dynamic_imports=SharedResult)\n', (11012, 11092), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((11502, 11597), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'trajname', 'filename': 'filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=trajname, filename=filename, load_all=2,\n dynamic_imports=SharedResult)\n', (11517, 11597), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((12312, 12344), 'pypet.tests.testutils.ioutils.make_temp_dir', 'make_temp_dir', (['"""hdf5errors.hdf5"""'], {}), "('hdf5errors.hdf5')\n", (12325, 12344), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((13163, 13195), 'pypet.tests.testutils.ioutils.make_temp_dir', 'make_temp_dir', (['"""hdf5errors.hdf5"""'], {}), "('hdf5errors.hdf5')\n", (13176, 13195), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((13294, 13329), 'numpy.ones', 'np.ones', (['(2, 10, 3)'], {'dtype': 'np.float'}), '((2, 10, 3), dtype=np.float)\n', (13301, 13329), True, 'import numpy as np\n'), ((14832, 14871), 'pypet.tests.testutils.ioutils.make_temp_dir', 'make_temp_dir', (['"""shared_table_test.hdf5"""'], {}), "('shared_table_test.hdf5')\n", (14845, 14871), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((15132, 15234), 'pypet.SharedTable', 'SharedTable', ([], {'name': '"""table"""', 'parent': 'self.traj.shared_data', 'trajectory': 'self.traj', 'add_to_parent': '(True)'}), "(name='table', parent=self.traj.shared_data, trajectory=self.\n traj, add_to_parent=True)\n", (15143, 15234), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((16092, 16200), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (16107, 16200), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((16522, 16630), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (16537, 16630), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((17762, 17870), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (17777, 17870), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((18800, 18908), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (18815, 18908), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((20479, 20587), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (20494, 20587), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((21534, 21642), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (21549, 21642), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((23259, 23367), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (23274, 23367), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((24086, 24194), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (24101, 24194), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((25640, 25748), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (25655, 25748), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((26261, 26369), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (26276, 26369), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((27614, 27722), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (27629, 27722), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((29790, 29898), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (29805, 29898), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((31141, 31180), 'pypet.tests.testutils.ioutils.make_temp_dir', 'make_temp_dir', (['"""shared_table_test.hdf5"""'], {}), "('shared_table_test.hdf5')\n", (31154, 31180), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((31441, 31543), 'pypet.SharedArray', 'SharedArray', ([], {'name': '"""array"""', 'parent': 'self.traj.shared_data', 'trajectory': 'self.traj', 'add_to_parent': '(True)'}), "(name='array', parent=self.traj.shared_data, trajectory=self.\n traj, add_to_parent=True)\n", (31452, 31543), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((31994, 32102), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (32009, 32102), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((32720, 32828), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (32735, 32828), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((33316, 33424), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (33331, 33424), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((33632, 33669), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(100, 100)'], {}), '(0, 100, (100, 100))\n', (33649, 33669), True, 'import numpy as np\n'), ((34052, 34160), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (34067, 34160), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((34477, 34495), 'numpy.zeros', 'np.zeros', (['(50, 50)'], {}), '((50, 50))\n', (34485, 34495), True, 'import numpy as np\n'), ((34777, 34885), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (34792, 34885), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((35165, 35202), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(100, 100)'], {}), '(0, 100, (100, 100))\n', (35182, 35202), True, 'import numpy as np\n'), ((35785, 35893), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (35800, 35893), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((36406, 36425), 'numpy.ones', 'np.ones', (['(100, 100)'], {}), '((100, 100))\n', (36413, 36425), True, 'import numpy as np\n'), ((36714, 36822), 'pypet.load_trajectory', 'load_trajectory', ([], {'name': 'self.traj.v_name', 'filename': 'self.filename', 'load_all': '(2)', 'dynamic_imports': 'SharedResult'}), '(name=self.traj.v_name, filename=self.filename, load_all=2,\n dynamic_imports=SharedResult)\n', (36729, 36822), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((2177, 2195), 'numpy.zeros', 'np.zeros', (['(50, 50)'], {}), '((50, 50))\n', (2185, 2195), True, 'import numpy as np\n'), ((2305, 2330), 'pypet.ObjectTable', 'ObjectTable', ([], {'data': 'dadict2'}), '(data=dadict2)\n', (2316, 2330), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((2588, 2611), 'numpy.all', 'np.all', (['(data == thedata)'], {}), '(data == thedata)\n', (2594, 2611), True, 'import numpy as np\n'), ((2627, 2654), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (2648, 2654), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((3225, 3249), 'numpy.all', 'np.all', (['(array == thedata)'], {}), '(array == thedata)\n', (3231, 3249), True, 'import numpy as np\n'), ((3881, 3908), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (3902, 3908), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((4514, 4580), 'pypet.make_shared_result', 'make_shared_result', (['mytuple', '"""mylist"""', 'traj'], {'new_class': 'SharedArray'}), "(mytuple, 'mylist', traj, new_class=SharedArray)\n", (4532, 4580), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((6823, 6850), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (6844, 6850), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((7164, 7187), 'numpy.all', 'np.all', (['(data == thedata)'], {}), '(data == thedata)\n', (7170, 7187), True, 'import numpy as np\n'), ((7203, 7230), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (7224, 7230), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((8439, 8455), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (8447, 8455), True, 'import numpy as np\n'), ((8802, 8829), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (8823, 8829), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((9172, 9199), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (9193, 9199), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((8077, 8094), 'platform.system', 'platform.system', ([], {}), '()\n', (8092, 8094), False, 'import platform\n'), ((11179, 11206), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (11200, 11206), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((11639, 11666), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (11660, 11666), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((12849, 12868), 'pypet.SharedPandasFrame', 'SharedPandasFrame', ([], {}), '()\n', (12866, 12868), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((14286, 14313), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (14307, 14313), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((15590, 15622), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (15611, 15622), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((17018, 17050), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (17039, 17050), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((17944, 17972), 'pypet.StorageContextManager', 'StorageContextManager', (['traj2'], {}), '(traj2)\n', (17965, 17972), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((19974, 20006), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (19995, 20006), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((20663, 20691), 'pypet.StorageContextManager', 'StorageContextManager', (['traj2'], {}), '(traj2)\n', (20684, 20691), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((21044, 21076), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (21065, 21076), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((21713, 21741), 'pypet.StorageContextManager', 'StorageContextManager', (['traj2'], {}), '(traj2)\n', (21734, 21741), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((22760, 22792), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (22781, 22792), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((23442, 23470), 'pypet.StorageContextManager', 'StorageContextManager', (['traj2'], {}), '(traj2)\n', (23463, 23470), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((24268, 24296), 'pypet.StorageContextManager', 'StorageContextManager', (['traj3'], {}), '(traj3)\n', (24289, 24296), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((25141, 25173), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (25162, 25173), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((27123, 27155), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (27144, 27155), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((27795, 27823), 'pypet.StorageContextManager', 'StorageContextManager', (['traj2'], {}), '(traj2)\n', (27816, 27823), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((29085, 29117), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (29106, 29117), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((29971, 29999), 'pypet.StorageContextManager', 'StorageContextManager', (['traj2'], {}), '(traj2)\n', (29992, 29999), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((31719, 31738), 'numpy.ones', 'np.ones', (['(100, 100)'], {}), '((100, 100))\n', (31726, 31738), True, 'import numpy as np\n'), ((32187, 32236), 'numpy.all', 'np.all', (['(the_reading_array == second_reading_array)'], {}), '(the_reading_array == second_reading_array)\n', (32193, 32236), True, 'import numpy as np\n'), ((33825, 33857), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (33846, 33857), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((34236, 34264), 'pypet.StorageContextManager', 'StorageContextManager', (['traj2'], {}), '(traj2)\n', (34257, 34264), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((35358, 35390), 'pypet.StorageContextManager', 'StorageContextManager', (['self.traj'], {}), '(self.traj)\n', (35379, 35390), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((35969, 35997), 'pypet.StorageContextManager', 'StorageContextManager', (['traj2'], {}), '(traj2)\n', (35990, 35997), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((1042, 1068), 'pypet.tests.testutils.ioutils.make_trajectory_name', 'make_trajectory_name', (['self'], {}), '(self)\n', (1062, 1068), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((1763, 1783), 'pandas.DataFrame', 'pd.DataFrame', (['dadict'], {}), '(dadict)\n', (1775, 1783), True, 'import pandas as pd\n'), ((1978, 1999), 'pandas.DataFrame', 'pd.DataFrame', (['dadict2'], {}), '(dadict2)\n', (1990, 1999), True, 'import pandas as pd\n'), ((2242, 2263), 'pandas.DataFrame', 'pd.DataFrame', (['dadict2'], {}), '(dadict2)\n', (2254, 2263), True, 'import pandas as pd\n'), ((5775, 5801), 'pypet.tests.testutils.ioutils.make_trajectory_name', 'make_trajectory_name', (['self'], {}), '(self)\n', (5795, 5801), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((8258, 8284), 'pypet.tests.testutils.ioutils.make_trajectory_name', 'make_trajectory_name', (['self'], {}), '(self)\n', (8278, 8284), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((9532, 9549), 'pypet.tests.testutils.ioutils.get_root_logger', 'get_root_logger', ([], {}), '()\n', (9547, 9549), False, 'from pypet.tests.testutils.ioutils import get_root_logger, parse_args, run_suite\n'), ((9981, 9998), 'pypet.tests.testutils.ioutils.get_root_logger', 'get_root_logger', ([], {}), '()\n', (9996, 9998), False, 'from pypet.tests.testutils.ioutils import get_root_logger, parse_args, run_suite\n'), ((10238, 10264), 'pypet.tests.testutils.ioutils.make_trajectory_name', 'make_trajectory_name', (['self'], {}), '(self)\n', (10258, 10264), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((10651, 10670), 'tables.atom.FloatAtom', 'pt.atom.FloatAtom', ([], {}), '()\n', (10668, 10670), True, 'import tables as pt\n'), ((11464, 11484), 'numpy.zeros', 'np.zeros', (['(1, 10, 3)'], {}), '((1, 10, 3))\n', (11472, 11484), True, 'import numpy as np\n'), ((12376, 12402), 'pypet.tests.testutils.ioutils.make_trajectory_name', 'make_trajectory_name', (['self'], {}), '(self)\n', (12396, 12402), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((12657, 12677), 'pandas.DataFrame', 'pd.DataFrame', (['dadict'], {}), '(dadict)\n', (12669, 12677), True, 'import pandas as pd\n'), ((12775, 12796), 'pandas.DataFrame', 'pd.DataFrame', (['dadict2'], {}), '(dadict2)\n', (12787, 12796), True, 'import pandas as pd\n'), ((13227, 13253), 'pypet.tests.testutils.ioutils.make_trajectory_name', 'make_trajectory_name', (['self'], {}), '(self)\n', (13247, 13253), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((13934, 13953), 'tables.atom.FloatAtom', 'pt.atom.FloatAtom', ([], {}), '()\n', (13951, 13953), True, 'import tables as pt\n'), ((14909, 14935), 'pypet.tests.testutils.ioutils.make_trajectory_name', 'make_trajectory_name', (['self'], {}), '(self)\n', (14929, 14935), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((31218, 31244), 'pypet.tests.testutils.ioutils.make_trajectory_name', 'make_trajectory_name', (['self'], {}), '(self)\n', (31238, 31244), False, 'from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest\n'), ((35696, 35737), 'numpy.all', 'np.all', (['(row == the_iterrows_array[idx, :])'], {}), '(row == the_iterrows_array[idx, :])\n', (35702, 35737), True, 'import numpy as np\n'), ((36308, 36349), 'numpy.all', 'np.all', (['(row == the_iterrows_array[idx, :])'], {}), '(row == the_iterrows_array[idx, :])\n', (36314, 36349), True, 'import numpy as np\n'), ((2464, 2474), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2471, 2474), True, 'import numpy as np\n'), ((3457, 3467), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3464, 3467), True, 'import numpy as np\n'), ((6437, 6447), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (6444, 6447), True, 'import numpy as np\n'), ((6505, 6527), 'tables.StringCol', 'pt.StringCol', (['(2)'], {'pos': '(0)'}), '(2, pos=0)\n', (6517, 6527), True, 'import tables as pt\n'), ((6537, 6555), 'tables.FloatCol', 'pt.FloatCol', ([], {'pos': '(1)'}), '(pos=1)\n', (6548, 6555), True, 'import tables as pt\n'), ((6613, 6635), 'tables.StringCol', 'pt.StringCol', (['(2)'], {'pos': '(0)'}), '(2, pos=0)\n', (6625, 6635), True, 'import tables as pt\n'), ((6645, 6663), 'tables.FloatCol', 'pt.FloatCol', ([], {'pos': '(1)'}), '(pos=1)\n', (6656, 6663), True, 'import tables as pt\n'), ((12612, 12631), 'pypet.SharedPandasFrame', 'SharedPandasFrame', ([], {}), '()\n', (12629, 12631), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((12730, 12749), 'pypet.SharedPandasFrame', 'SharedPandasFrame', ([], {}), '()\n', (12747, 12749), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((13715, 13728), 'pypet.SharedArray', 'SharedArray', ([], {}), '()\n', (13726, 13728), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((13827, 13841), 'pypet.SharedCArray', 'SharedCArray', ([], {}), '()\n', (13839, 13841), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((14012, 14026), 'pypet.SharedEArray', 'SharedEArray', ([], {}), '()\n', (14024, 14026), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((14386, 14413), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (14407, 14413), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((33963, 34004), 'numpy.all', 'np.all', (['(row == the_iterrows_array[idx, :])'], {}), '(row == the_iterrows_array[idx, :])\n', (33969, 34004), True, 'import numpy as np\n'), ((34371, 34412), 'numpy.all', 'np.all', (['(row == the_iterrows_array[idx, :])'], {}), '(row == the_iterrows_array[idx, :])\n', (34377, 34412), True, 'import numpy as np\n'), ((35485, 35526), 'numpy.all', 'np.all', (['(row == the_iterrows_array[idx, :])'], {}), '(row == the_iterrows_array[idx, :])\n', (35491, 35526), True, 'import numpy as np\n'), ((36093, 36134), 'numpy.all', 'np.all', (['(row == the_iterrows_array[idx, :])'], {}), '(row == the_iterrows_array[idx, :])\n', (36099, 36134), True, 'import numpy as np\n'), ((13521, 13536), 'pypet.SharedVLArray', 'SharedVLArray', ([], {}), '()\n', (13534, 13536), False, 'from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, SharedVLArray\n'), ((14566, 14593), 'pypet.StorageContextManager', 'StorageContextManager', (['traj'], {}), '(traj)\n', (14587, 14593), False, 'from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager\n'), ((12083, 12103), 'numpy.array', 'np.array', (['thevlarray'], {}), '(thevlarray)\n', (12091, 12103), True, 'import numpy as np\n'), ((12185, 12203), 'numpy.array', 'np.array', (['toappned'], {}), '(toappned)\n', (12193, 12203), True, 'import numpy as np\n')]
|
from collections import Counter, OrderedDict
from logging import getLogger
from typing import List
from typing import OrderedDict as OrderedDictType
from typing import Tuple
import numpy as np
from ordered_set import OrderedSet
from pandas import DataFrame
from recording_script_generator.core.types import (ReadingPassages,
Representations, Selection,
Utterance, Utterances,
get_utterance_duration_s,
utterance_to_str,
utterance_to_symbols)
from text_utils import get_ngrams
from tqdm import tqdm
def log_general_stats(selection: Selection, reading_passages: ReadingPassages, representations: Representations, avg_chars_per_s: float) -> None:
assert avg_chars_per_s > 0
logger = getLogger(__name__)
selected_reading_passages = [v for k, v in tqdm(reading_passages.items()) if k in selection]
selected_representations = [v for k, v in tqdm(representations.items()) if k in selection]
deselected_reading_passages = [v for k, v in tqdm(reading_passages.items()) if k not in selection]
deselected_representations = [v for k, v in tqdm(representations.items()) if k not in selection]
selected_read_chars_len = sum([1 for read in selected_reading_passages for _ in read])
deselected_read_chars_len = sum([1 for read in deselected_reading_passages for _ in read])
selected_word_counts = __get_utterances_word_counts(selected_reading_passages)
deselected_word_counts = __get_utterances_word_counts(
deselected_reading_passages)
selected_durations_s = __get_utterances_durations(selected_reading_passages, avg_chars_per_s)
deselected_durations_s = __get_utterances_durations(deselected_reading_passages, avg_chars_per_s)
selected_durations_s_distribution = __get_utterance_duration_distribution(selected_durations_s)
non_selected_durations_s_distribution = __get_utterance_duration_distribution(
deselected_durations_s)
if len(selected_durations_s_distribution) > 0:
logger.info("Duration distribution for selected utterances:")
__log_distribution(selected_durations_s_distribution)
if len(non_selected_durations_s_distribution) > 0:
logger.info("Duration distribution for deselected utterances:")
__log_distribution(non_selected_durations_s_distribution)
logger.info(
f"Selected: {len(selected_reading_passages)} entries / {selected_read_chars_len} chars / ca. {selected_read_chars_len/avg_chars_per_s/60:.2f}min / ca. {selected_read_chars_len/avg_chars_per_s/60/60:.2f}h")
logger.info(
f"Deselected: {len(deselected_reading_passages)} entries / {deselected_read_chars_len} chars / ca. {deselected_read_chars_len/avg_chars_per_s/60:.2f}min / ca. {deselected_read_chars_len/avg_chars_per_s/60/60:.2f}h")
selected_chars = {x for rep in tqdm(selected_representations) for x in rep}
rest_chars = {x for rep in tqdm(deselected_representations) for x in rep}
if len(selected_chars) > 0:
logger.info(f"Selected chars ({len(selected_chars)}):\t{' '.join(sorted(selected_chars))}")
if len(rest_chars) > 0:
logger.info(f"Deselected chars ({len(rest_chars)}):\t{' '.join(sorted(rest_chars))}")
if len(selected_word_counts) > 0:
logger.info(
f"Selected words count: {min(selected_word_counts)} (min), {np.mean(selected_word_counts):.0f} (mean), {np.median(selected_word_counts):.0f} (median), {max(selected_word_counts)} (max)")
if len(deselected_word_counts) > 0:
logger.info(
f"Deselected words count: {min(deselected_word_counts)} (min), {np.mean(deselected_word_counts):.0f} (mean), {np.median(deselected_word_counts):.0f} (median), {max(deselected_word_counts)} (max)")
if len(selected_durations_s) > 0:
x = selected_durations_s
logger.info(
f"Selected utterance durations: {min(x):.0f}s (min), {np.mean(x):.0f}s (mean), {np.median(x):.0f}s (median), {max(x):.0f}s (max)")
if len(deselected_durations_s) > 0:
x = deselected_durations_s
logger.info(
f"Deselected utterance durations: {min(x):.0f}s (min), {np.mean(x):.0f}s (mean), {np.median(x):.0f}s (median), {max(x):.0f}s (max)")
def get_n_gram_stats_df(utterances: Utterances, selection: Selection, n: int) -> DataFrame:
columns = [
f"{n}-gram",
"Selected #",
"Not selected #",
"Total #",
"Selected %",
"Not selected %",
"Total %",
]
if len(utterances) == 0:
empty_stats_df = DataFrame(data=[], columns=columns)
return empty_stats_df
selected = [
utterance_to_symbols(
utterance,
utterances.symbol_format,
utterances.language
)
for utterance_id, utterance in tqdm(utterances.items()) if utterance_id in selection
]
deselected = [
utterance_to_symbols(
utterance,
utterances.symbol_format,
utterances.language
)
for utterance_id, utterance in tqdm(utterances.items()) if utterance_id not in selection
]
selected_n_grams = [n_gram for utterance_symbols in tqdm(selected)
for n_gram in get_ngrams(utterance_symbols, n)]
deselected_n_grams = [n_gram for utterance_symbols in tqdm(deselected)
for n_gram in get_ngrams(utterance_symbols, n)]
total_n_grams = selected_n_grams + deselected_n_grams
total_n_grams_ordered = OrderedSet(list(sorted(total_n_grams)))
all_n_grams_str = tuple(''.join(n_grams) for n_grams in total_n_grams_ordered)
total_n_grams_count = __get_ngram_stats_count(total_n_grams, total_n_grams_ordered)
total_n_grams_percent = __get_ngram_stats_percent(total_n_grams_count)
selected_n_grams_count = __get_ngram_stats_count(selected_n_grams, total_n_grams_ordered)
selected_n_grams_percent = __get_ngram_stats_percent(selected_n_grams_count)
deselected_n_grams_count = __get_ngram_stats_count(deselected_n_grams, total_n_grams_ordered)
deselected_n_grams_percent = __get_ngram_stats_percent(deselected_n_grams_count)
utterances = np.array([
all_n_grams_str,
selected_n_grams_count,
deselected_n_grams_count,
total_n_grams_count,
selected_n_grams_percent,
deselected_n_grams_percent,
total_n_grams_percent
]).T
n_gram_stats_df = DataFrame(data=utterances, columns=columns)
return n_gram_stats_df
def __get_ngram_stats_count(ngrams: List[Tuple[str, ...]], ngrams_order: OrderedSet[Tuple[str, ...]]) -> Tuple[int, ...]:
counter = Counter(ngrams)
res = tuple(counter[n_gram] if n_gram in counter else 0 for n_gram in ngrams_order)
return res
def __get_ngram_stats_percent(ngram_stats_count: Tuple[int, ...]) -> Tuple[float, ...]:
total_count = sum(ngram_stats_count)
if total_count == 0:
ngram_stats_percent = tuple(np.nan for _ in ngram_stats_count)
else:
ngram_stats_percent = tuple(count / total_count * 100 for count in ngram_stats_count)
return ngram_stats_percent
def __get_utterances_word_counts(utterances: List[Utterance]) -> List[int]:
word_counts = []
for utterance in tqdm(utterances):
words = utterance_to_str(utterance).split(" ")
word_counts.append(len(words))
return word_counts
def __get_utterances_durations(utterances: List[Utterance], avg_chars_per_s: float) -> List[float]:
durations = [
get_utterance_duration_s(utterance, avg_chars_per_s)
for utterance in tqdm(utterances)
]
return durations
def __get_utterance_duration_distribution(utterance_durations: List[float]) -> OrderedDictType[int, float]:
duration_distribution: OrderedDictType[int, float] = OrderedDict()
current_step_duration = 0
# unsorted_utterances_rounded = [round(x, 1) for x in utterance_durations]
unsorted_utterances_rounded = [round(x) for x in utterance_durations]
while len(unsorted_utterances_rounded) > 0:
to_remove = []
for current_duration in unsorted_utterances_rounded:
# if current_duration == round(current_step_duration, 1):
if current_duration == round(current_step_duration):
if current_step_duration not in duration_distribution:
duration_distribution[current_step_duration] = 1
else:
duration_distribution[current_step_duration] += 1
to_remove.append(current_duration)
for item in to_remove:
unsorted_utterances_rounded.remove(item)
# current_step_duration += 0.1
current_step_duration += 1
return duration_distribution
def __log_distribution(distribution: OrderedDictType[int, float]) -> None:
logger = getLogger(__name__)
total_length = sum([step_duration * step_occurrences for step_duration,
step_occurrences in distribution.items()])
# total_count = sum(distribution.values())
current_summed_occurrences = 0
current_summed_lengths = 0
for step_duration, step_occurrences in distribution.items():
current_length = step_duration * step_occurrences
current_summed_lengths += current_length
current_summed_occurrences += step_occurrences
# logger.info(f"{step_duration:.1f}s: {step_occurrences} ({step_occurrences/total_count*100:.2f}%) ({current_summed_occurrences/total_count*100:.2f}%)")
logger.info(f"{step_duration:.0f}s: {step_occurrences} ({current_length/total_length*100:.2f}%) ({current_summed_lengths/total_length*100:.2f}%)")
|
[
"pandas.DataFrame",
"tqdm.tqdm",
"numpy.median",
"recording_script_generator.core.types.get_utterance_duration_s",
"text_utils.get_ngrams",
"recording_script_generator.core.types.utterance_to_symbols",
"recording_script_generator.core.types.utterance_to_str",
"numpy.mean",
"numpy.array",
"collections.OrderedDict",
"collections.Counter",
"logging.getLogger"
] |
[((942, 961), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (951, 961), False, 'from logging import getLogger\n'), ((6309, 6352), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'utterances', 'columns': 'columns'}), '(data=utterances, columns=columns)\n', (6318, 6352), False, 'from pandas import DataFrame\n'), ((6515, 6530), 'collections.Counter', 'Counter', (['ngrams'], {}), '(ngrams)\n', (6522, 6530), False, 'from collections import Counter, OrderedDict\n'), ((7092, 7108), 'tqdm.tqdm', 'tqdm', (['utterances'], {}), '(utterances)\n', (7096, 7108), False, 'from tqdm import tqdm\n'), ((7618, 7631), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7629, 7631), False, 'from collections import Counter, OrderedDict\n'), ((8553, 8572), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (8562, 8572), False, 'from logging import getLogger\n'), ((4564, 4599), 'pandas.DataFrame', 'DataFrame', ([], {'data': '[]', 'columns': 'columns'}), '(data=[], columns=columns)\n', (4573, 4599), False, 'from pandas import DataFrame\n'), ((4646, 4724), 'recording_script_generator.core.types.utterance_to_symbols', 'utterance_to_symbols', (['utterance', 'utterances.symbol_format', 'utterances.language'], {}), '(utterance, utterances.symbol_format, utterances.language)\n', (4666, 4724), False, 'from recording_script_generator.core.types import ReadingPassages, Representations, Selection, Utterance, Utterances, get_utterance_duration_s, utterance_to_str, utterance_to_symbols\n'), ((4864, 4942), 'recording_script_generator.core.types.utterance_to_symbols', 'utterance_to_symbols', (['utterance', 'utterances.symbol_format', 'utterances.language'], {}), '(utterance, utterances.symbol_format, utterances.language)\n', (4884, 4942), False, 'from recording_script_generator.core.types import ReadingPassages, Representations, Selection, Utterance, Utterances, get_utterance_duration_s, utterance_to_str, utterance_to_symbols\n'), ((6078, 6261), 'numpy.array', 'np.array', (['[all_n_grams_str, selected_n_grams_count, deselected_n_grams_count,\n total_n_grams_count, selected_n_grams_percent,\n deselected_n_grams_percent, total_n_grams_percent]'], {}), '([all_n_grams_str, selected_n_grams_count, deselected_n_grams_count,\n total_n_grams_count, selected_n_grams_percent,\n deselected_n_grams_percent, total_n_grams_percent])\n', (6086, 6261), True, 'import numpy as np\n'), ((7339, 7391), 'recording_script_generator.core.types.get_utterance_duration_s', 'get_utterance_duration_s', (['utterance', 'avg_chars_per_s'], {}), '(utterance, avg_chars_per_s)\n', (7363, 7391), False, 'from recording_script_generator.core.types import ReadingPassages, Representations, Selection, Utterance, Utterances, get_utterance_duration_s, utterance_to_str, utterance_to_symbols\n'), ((2963, 2993), 'tqdm.tqdm', 'tqdm', (['selected_representations'], {}), '(selected_representations)\n', (2967, 2993), False, 'from tqdm import tqdm\n'), ((3037, 3069), 'tqdm.tqdm', 'tqdm', (['deselected_representations'], {}), '(deselected_representations)\n', (3041, 3069), False, 'from tqdm import tqdm\n'), ((5119, 5133), 'tqdm.tqdm', 'tqdm', (['selected'], {}), '(selected)\n', (5123, 5133), False, 'from tqdm import tqdm\n'), ((5170, 5202), 'text_utils.get_ngrams', 'get_ngrams', (['utterance_symbols', 'n'], {}), '(utterance_symbols, n)\n', (5180, 5202), False, 'from text_utils import get_ngrams\n'), ((5260, 5276), 'tqdm.tqdm', 'tqdm', (['deselected'], {}), '(deselected)\n', (5264, 5276), False, 'from tqdm import tqdm\n'), ((5315, 5347), 'text_utils.get_ngrams', 'get_ngrams', (['utterance_symbols', 'n'], {}), '(utterance_symbols, n)\n', (5325, 5347), False, 'from text_utils import get_ngrams\n'), ((7413, 7429), 'tqdm.tqdm', 'tqdm', (['utterances'], {}), '(utterances)\n', (7417, 7429), False, 'from tqdm import tqdm\n'), ((7122, 7149), 'recording_script_generator.core.types.utterance_to_str', 'utterance_to_str', (['utterance'], {}), '(utterance)\n', (7138, 7149), False, 'from recording_script_generator.core.types import ReadingPassages, Representations, Selection, Utterance, Utterances, get_utterance_duration_s, utterance_to_str, utterance_to_symbols\n'), ((3446, 3475), 'numpy.mean', 'np.mean', (['selected_word_counts'], {}), '(selected_word_counts)\n', (3453, 3475), True, 'import numpy as np\n'), ((3490, 3521), 'numpy.median', 'np.median', (['selected_word_counts'], {}), '(selected_word_counts)\n', (3499, 3521), True, 'import numpy as np\n'), ((3698, 3729), 'numpy.mean', 'np.mean', (['deselected_word_counts'], {}), '(deselected_word_counts)\n', (3705, 3729), True, 'import numpy as np\n'), ((3744, 3777), 'numpy.median', 'np.median', (['deselected_word_counts'], {}), '(deselected_word_counts)\n', (3753, 3777), True, 'import numpy as np\n'), ((3974, 3984), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3981, 3984), True, 'import numpy as np\n'), ((4000, 4012), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (4009, 4012), True, 'import numpy as np\n'), ((4199, 4209), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (4206, 4209), True, 'import numpy as np\n'), ((4225, 4237), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (4234, 4237), True, 'import numpy as np\n')]
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
import sys
import struct
import numpy as np
import tensorflow as tf
def floor_divide_f32():
para = []
# init the input data and parameters
batch = int(np.random.randint(1, high=4, size=1))
in_channel = int(np.random.randint(16, high=64, size=1))
in_height = int(np.random.randint(64, high=128, size=1))
in_width = int(np.random.randint(64, high=128, size=1))
zero_point1 = int(np.random.randint(2000, high=6000, size=1))
std1 = int(np.random.randint(1000, high=1500, size=1))
zero_point2 = int(np.random.randint(200, high=400, size=1))
std2 = int(np.random.randint(10, high=20, size=1))
size_all = batch * in_channel * in_height * in_width
src_in1 = np.random.normal(zero_point1, std1, (batch, in_channel, in_height, in_width))
src_in1 = src_in1.astype(np.float32)
src_in2 = np.random.normal(zero_point2, std2, (batch, in_channel, in_height, in_width))
src_in2 = src_in2.astype(np.float32)
# src_out = np.floor_divide(src_in1, src_in2)
out_calcu = tf.math.floordiv(src_in1, src_in2)
sess = tf.Session()
src_out = sess.run(out_calcu)
src_in_1 = src_in1.reshape(size_all)
src_in_2 = src_in2.reshape(size_all)
src_out_1 = src_out.reshape(size_all)
total_size = (len(src_in_1) + len(src_in_2) + len(src_out_1)) + 4
para.append(total_size)
para.append(batch)
para.append(in_channel)
para.append(in_height)
para.append(in_width)
with open("floor_div_data_f32.bin", "wb") as fp:
data = struct.pack(('%di' % len(para)), *para)
fp.write(data)
data = struct.pack(('%df' % len(src_in_1)), *src_in_1)
fp.write(data)
data = struct.pack(('%df' % len(src_in_2)), *src_in_2)
fp.write(data)
data = struct.pack(('%df' % len(src_out_1)), *src_out_1)
fp.write(data)
fp.close()
return 0
if __name__ == '__main__':
floor_divide_f32()
print("end")
|
[
"numpy.random.randint",
"tensorflow.Session",
"numpy.random.normal",
"tensorflow.math.floordiv"
] |
[((771, 848), 'numpy.random.normal', 'np.random.normal', (['zero_point1', 'std1', '(batch, in_channel, in_height, in_width)'], {}), '(zero_point1, std1, (batch, in_channel, in_height, in_width))\n', (787, 848), True, 'import numpy as np\n'), ((904, 981), 'numpy.random.normal', 'np.random.normal', (['zero_point2', 'std2', '(batch, in_channel, in_height, in_width)'], {}), '(zero_point2, std2, (batch, in_channel, in_height, in_width))\n', (920, 981), True, 'import numpy as np\n'), ((1090, 1124), 'tensorflow.math.floordiv', 'tf.math.floordiv', (['src_in1', 'src_in2'], {}), '(src_in1, src_in2)\n', (1106, 1124), True, 'import tensorflow as tf\n'), ((1137, 1149), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1147, 1149), True, 'import tensorflow as tf\n'), ((211, 247), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': '(4)', 'size': '(1)'}), '(1, high=4, size=1)\n', (228, 247), True, 'import numpy as np\n'), ((271, 309), 'numpy.random.randint', 'np.random.randint', (['(16)'], {'high': '(64)', 'size': '(1)'}), '(16, high=64, size=1)\n', (288, 309), True, 'import numpy as np\n'), ((334, 373), 'numpy.random.randint', 'np.random.randint', (['(64)'], {'high': '(128)', 'size': '(1)'}), '(64, high=128, size=1)\n', (351, 373), True, 'import numpy as np\n'), ((397, 436), 'numpy.random.randint', 'np.random.randint', (['(64)'], {'high': '(128)', 'size': '(1)'}), '(64, high=128, size=1)\n', (414, 436), True, 'import numpy as np\n'), ((461, 503), 'numpy.random.randint', 'np.random.randint', (['(2000)'], {'high': '(6000)', 'size': '(1)'}), '(2000, high=6000, size=1)\n', (478, 503), True, 'import numpy as np\n'), ((527, 569), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {'high': '(1500)', 'size': '(1)'}), '(1000, high=1500, size=1)\n', (544, 569), True, 'import numpy as np\n'), ((593, 633), 'numpy.random.randint', 'np.random.randint', (['(200)'], {'high': '(400)', 'size': '(1)'}), '(200, high=400, size=1)\n', (610, 633), True, 'import numpy as np\n'), ((657, 695), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'high': '(20)', 'size': '(1)'}), '(10, high=20, size=1)\n', (674, 695), True, 'import numpy as np\n')]
|
# Copyright 2019 ChangyuLiu Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This tutorial provides an example of how to load CSV data from a file
into a `tf.data.Dataset`.
"""
import numpy as np
import tensorflow as tf
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
"""## Load data
So we know what we're doing, lets look at the top of the CSV file we're working with.
"""
"""As you can see, the columns in the CSV are labeled. We need the list later on, so let's read it out of the file."""
# CSV columns in the input file.
with open(train_file_path, 'r') as f:
names_row = f.readline()
CSV_COLUMNS = names_row.rstrip('\n').split(',')
print(CSV_COLUMNS)
"""The dataset constructor will pick these labels up automatically.
If the file you are working with does not contain the column names in the first line, pass them in a list of strings to the `column_names` argument in the `make_csv_dataset` function.
```python
CSV_COLUMNS = ['survived', 'sex', 'age', 'n_siblings_spouses', 'parch', 'fare', 'class', 'deck', 'embark_town', 'alone']
dataset = tf.data.experimental.make_csv_dataset(
...,
column_names=CSV_COLUMNS,
...)
```
This example is going to use all the available columns. If you need to omit some columns from the dataset, create a list of just the columns you plan to use, and pass it into the (optional) `select_columns` argument of the constructor.
```python
drop_columns = ['fare', 'embark_town']
columns_to_use = [col for col in CSV_COLUMNS if col not in drop_columns]
dataset = tf.data.experimental.make_csv_dataset(
...,
select_columns = columns_to_use,
...)
```
We also have to identify which column will serve as the labels for each example, and what those labels are.
"""
LABELS = [0, 1]
LABEL_COLUMN = 'survived'
FEATURE_COLUMNS = [column for column in CSV_COLUMNS if column != LABEL_COLUMN]
"""Now that these constructor argument values are in place, read the CSV data from the file and create a dataset.
(For the full documentation, see `tf.data.experimental.make_csv_dataset`)
"""
def get_dataset(file_path):
dataset = tf.data.experimental.make_csv_dataset(
file_path,
batch_size=12, # Artificially small to make examples easier to show.
label_name=LABEL_COLUMN,
na_value="?",
num_epochs=1,
ignore_errors=True)
return dataset
raw_train_data = get_dataset(train_file_path)
raw_test_data = get_dataset(test_file_path)
"""Each item in the dataset is a batch, represented as a tuple of (*many examples*, *many labels*). The data from the examples is organized in column-based tensors (rather than row-based tensors), each with as many elements as the batch size (12 in this case).
It might help to see this yourself.
"""
examples, labels = next(iter(raw_train_data)) # Just the first batch.
print("EXAMPLES: \n", examples, "\n")
print("LABELS: \n", labels)
"""## Data preprocessing
### Categorical data
Some of the columns in the CSV data are categorical columns. That is, the content should be one of a limited set of options.
In the CSV, these options are represented as text. This text needs to be converted to numbers before the model can be trained. To facilitate that, we need to create a list of categorical columns, along with a list of the options available in each column.
"""
CATEGORIES = {
'sex': ['male', 'female'],
'class': ['First', 'Second', 'Third'],
'deck': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'],
'embark_town': ['Cherbourg', 'Southhampton', 'Queenstown'],
'alone': ['y', 'n']
}
"""Write a function that takes a tensor of categorical values, matches it to a list of value names, and then performs a one-hot encoding."""
def process_categorical_data(data, categories):
"""Returns a one-hot encoded tensor representing categorical values."""
# Remove leading ' '.
data = tf.strings.regex_replace(data, '^ ', '')
# Remove trailing '.'.
data = tf.strings.regex_replace(data, r'\.$', '')
# ONE HOT ENCODE
# Reshape data from 1d (a list) to a 2d (a list of one-element lists)
data = tf.reshape(data, [-1, 1])
# For each element, create a new list of boolean values the length of categories,
# where the truth value is element == category label
data = tf.equal(categories, data)
# Cast booleans to floats.
data = tf.cast(data, tf.float32)
# The entire encoding can fit on one line:
# data = tf.cast(tf.equal(categories, tf.reshape(data, [-1, 1])), tf.float32)
return data
"""To help you visualize this, we'll take a single category-column tensor from the first batch, preprocess it, and show the before and after state."""
class_tensor = examples['class']
class_categories = CATEGORIES['class']
processed_class = process_categorical_data(class_tensor, class_categories)
"""Notice the relationship between the lengths of the two inputs and the shape of the output."""
print("Size of batch: ", len(class_tensor.numpy()))
print("Number of category labels: ", len(class_categories))
print("Shape of one-hot encoded tensor: ", processed_class.shape)
"""### Continuous data
Continuous data needs to be normalized, so that the values fall between 0 and 1. To do that, write a function that multiplies each value by 1 over twice the mean of the column values.
The function should also reshape the data into a two dimensional tensor.
"""
def process_continuous_data(data, mean):
# Normalize data
data = tf.cast(data, tf.float32) * 1 / (2 * mean)
return tf.reshape(data, [-1, 1])
"""To do this calculation, you need the column means. You would obviously need to compute these in real life, but for this example we'll just provide them."""
MEANS = {
'age': 29.631308,
'n_siblings_spouses': 0.545455,
'parch': 0.379585,
'fare': 34.385399
}
"""Again, to see what this function is actually doing, we'll take a single tensor of continuous data and show it before and after processing."""
age_tensor = examples['age']
process_continuous_data(age_tensor, MEANS['age'])
"""### Preprocess the data
Now assemble these preprocessing tasks into a single function that can be mapped to each batch in the dataset.
"""
def preprocess(features, labels):
# Process categorial features.
for feature in CATEGORIES.keys():
features[feature] = process_categorical_data(features[feature],
CATEGORIES[feature])
# Process continuous features.
for feature in MEANS.keys():
features[feature] = process_continuous_data(features[feature],
MEANS[feature])
# Assemble features into a single tensor.
features = tf.concat([features[column] for column in FEATURE_COLUMNS], 1)
return features, labels
"""Now apply that function with `tf.Dataset.map`, and shuffle the dataset to avoid overfitting."""
train_data = raw_train_data.map(preprocess).shuffle(500)
test_data = raw_test_data.map(preprocess)
"""And let's see what a single example looks like."""
examples, labels = next(iter(train_data))
"""The examples are in a two dimensional arrays of 12 items each (the batch size). Each item represents a single row in the original CSV file. The labels are a 1d tensor of 12 values.
## Build the model
This example uses the [Keras Functional API](https://www.tensorflow.org/alpha/guide/keras/functional) wrapped in a `get_model` constructor to build up a simple model.
"""
def get_model(input_dim, hidden_units=None):
"""Create a Keras model with layers.
Args:
input_dim: (int) The shape of an item in a batch.
hidden_units: [int] the layer sizes of the DNN (input layer first)
Returns:
A Keras model.
"""
if hidden_units is None:
hidden_units = [100]
inputs = tf.keras.Input(shape=(input_dim,))
x = inputs
for units in hidden_units:
x = tf.keras.layers.Dense(units, activation='relu')(x)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
return model
"""The `get_model` constructor needs to know the input shape of your data (not including the batch size)."""
input_shape, output_shape = train_data.output_shapes
input_dimension = input_shape.dims[1] # [0] is the batch size
"""## Train, evaluate, and predict
Now the model can be instantiated and trained.
"""
model = get_model(input_dimension)
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_data, epochs=20)
"""Once the model is trained, we can check its accuracy on the `test_data` set."""
test_loss, test_accuracy = model.evaluate(test_data)
print('\n\nTest Loss {}, Test Accuracy {}'.format(test_loss, test_accuracy))
"""Use `tf.keras.Model.predict` to infer labels on a batch or a dataset of batches."""
predictions = model.predict(test_data)
# Show some results
for prediction, survived in zip(predictions[:10], list(test_data)[0][1][:10]):
print("Predicted survival: {:.2%}".format(prediction[0]),
" | Actual outcome: ",
("SURVIVED" if bool(survived) else "DIED"))
|
[
"numpy.set_printoptions",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.keras.Model",
"tensorflow.cast",
"tensorflow.keras.utils.get_file",
"tensorflow.strings.regex_replace",
"tensorflow.equal",
"tensorflow.data.experimental.make_csv_dataset"
] |
[((1016, 1068), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['"""train.csv"""', 'TRAIN_DATA_URL'], {}), "('train.csv', TRAIN_DATA_URL)\n", (1039, 1068), True, 'import tensorflow as tf\n'), ((1086, 1136), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['"""eval.csv"""', 'TEST_DATA_URL'], {}), "('eval.csv', TEST_DATA_URL)\n", (1109, 1136), True, 'import tensorflow as tf\n'), ((1174, 1221), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (1193, 1221), True, 'import numpy as np\n'), ((3041, 3182), 'tensorflow.data.experimental.make_csv_dataset', 'tf.data.experimental.make_csv_dataset', (['file_path'], {'batch_size': '(12)', 'label_name': 'LABEL_COLUMN', 'na_value': '"""?"""', 'num_epochs': '(1)', 'ignore_errors': '(True)'}), "(file_path, batch_size=12, label_name=\n LABEL_COLUMN, na_value='?', num_epochs=1, ignore_errors=True)\n", (3078, 3182), True, 'import tensorflow as tf\n'), ((4788, 4828), 'tensorflow.strings.regex_replace', 'tf.strings.regex_replace', (['data', '"""^ """', '""""""'], {}), "(data, '^ ', '')\n", (4812, 4828), True, 'import tensorflow as tf\n'), ((4863, 4905), 'tensorflow.strings.regex_replace', 'tf.strings.regex_replace', (['data', '"""\\\\.$"""', '""""""'], {}), "(data, '\\\\.$', '')\n", (4887, 4905), True, 'import tensorflow as tf\n'), ((5007, 5032), 'tensorflow.reshape', 'tf.reshape', (['data', '[-1, 1]'], {}), '(data, [-1, 1])\n', (5017, 5032), True, 'import tensorflow as tf\n'), ((5181, 5207), 'tensorflow.equal', 'tf.equal', (['categories', 'data'], {}), '(categories, data)\n', (5189, 5207), True, 'import tensorflow as tf\n'), ((5246, 5271), 'tensorflow.cast', 'tf.cast', (['data', 'tf.float32'], {}), '(data, tf.float32)\n', (5253, 5271), True, 'import tensorflow as tf\n'), ((6402, 6427), 'tensorflow.reshape', 'tf.reshape', (['data', '[-1, 1]'], {}), '(data, [-1, 1])\n', (6412, 6427), True, 'import tensorflow as tf\n'), ((7573, 7635), 'tensorflow.concat', 'tf.concat', (['[features[column] for column in FEATURE_COLUMNS]', '(1)'], {}), '([features[column] for column in FEATURE_COLUMNS], 1)\n', (7582, 7635), True, 'import tensorflow as tf\n'), ((8664, 8698), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (8678, 8698), True, 'import tensorflow as tf\n'), ((8876, 8907), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (8890, 8907), True, 'import tensorflow as tf\n'), ((8815, 8861), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (8836, 8861), True, 'import tensorflow as tf\n'), ((6350, 6375), 'tensorflow.cast', 'tf.cast', (['data', 'tf.float32'], {}), '(data, tf.float32)\n', (6357, 6375), True, 'import tensorflow as tf\n'), ((8752, 8799), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['units'], {'activation': '"""relu"""'}), "(units, activation='relu')\n", (8773, 8799), True, 'import tensorflow as tf\n')]
|
"""
Script name: MalGAN_v2.py
Reproduced for reader's convenience from the original code available at:
https://github.com/yanminglai/Malware-GAN/blob/master/MalGAN_v2.py
Released under GPL 3.0 LICENSE: https://github.com/yanminglai/Malware-GAN/blob/master/LICENSE
"""
from keras.layers import Input, Dense, Activation
from keras.layers.merge import Maximum, Concatenate
from keras.models import Model
from keras.optimizers import Adam
from numpy.lib import format
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model, svm
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from load_data import *
import numpy as np
class MalGAN():
def __init__(self):
self.apifeature_dims = 74
self.z_dims = 10
self.hide_layers = 256
self.generator_layers = [self.apifeature_dims+self.z_dims, self.hide_layers, self.apifeature_dims]
self.substitute_detector_layers = [self.apifeature_dims, self.hide_layers, 1]
self.blackbox = 'RF'
optimizer = Adam(lr=0.001)
# Build and Train blackbox_detector
self.blackbox_detector = self.build_blackbox_detector()
# Build and compile the substitute_detector
self.substitute_detector = self.build_substitute_detector()
self.substitute_detector.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes malware and noise as input and generates adversarial malware examples
example = Input(shape=(self.apifeature_dims,))
noise = Input(shape=(self.z_dims,))
input = [example, noise]
malware_examples = self.generator(input)
# For the combined model we will only train the generator
self.substitute_detector.trainable = False
# The discriminator takes generated images as input and determines validity
validity = self.substitute_detector(malware_examples)
# The combined model (stacked generator and substitute_detector)
# Trains the generator to fool the discriminator
self.combined = Model(input, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_blackbox_detector(self):
if self.blackbox is 'RF':
blackbox_detector = RandomForestClassifier(n_estimators=50, max_depth=5, random_state=1)
return blackbox_detector
def build_generator(self):
example = Input(shape=(self.apifeature_dims,))
noise = Input(shape=(self.z_dims,))
x = Concatenate(axis=1)([example, noise])
for dim in self.generator_layers[1:]:
x = Dense(dim)(x)
x = Activation(activation='sigmoid')(x)
x = Maximum()([example, x])
generator = Model([example, noise], x, name='generator')
generator.summary()
return generator
def build_substitute_detector(self):
input = Input(shape=(self.substitute_detector_layers[0],))
x = input
for dim in self.substitute_detector_layers[1:]:
x = Dense(dim)(x)
x = Activation(activation='sigmoid')(x)
substitute_detector = Model(input, x, name='substitute_detector')
substitute_detector.summary()
return substitute_detector
def load_data(self, filename):
data = load(filename)
xmal, ymal, xben, yben = data['xmal'], data['ymal'], data['xben'], data['yben']
# np.savez('mydata.npz', xmal=xmal, ymal=ymal, xben=xben, yben=yben,
# xmal_=xmal, ymal_=ymal, xben_=xmal, yben_=ymal, t=8)
return (xmal, ymal), (xben, yben)
def train(self, epochs, batch_size=32):
# Load the dataset
(xmal, ymal), (xben, yben) = self.load_data('mydata.npz')
xtrain_mal, xtest_mal, ytrain_mal, ytest_mal = train_test_split(xmal, ymal, test_size=0.20)
xtrain_ben, xtest_ben, ytrain_ben, ytest_ben = train_test_split(xben, yben, test_size=0.20)
# Train blackbox_detctor
self.blackbox_detector.fit(np.concatenate([xmal, xben]),
np.concatenate([ymal, yben]))
ytrain_ben_blackbox = self.blackbox_detector.predict(xtrain_ben)
Original_Train_TPR = self.blackbox_detector.score(xtrain_mal, ytrain_mal)
Original_Test_TPR = self.blackbox_detector.score(xtest_mal, ytest_mal)
Train_TPR, Test_TPR = [Original_Train_TPR], [Original_Test_TPR]
best_TPR = 1.0
for epoch in range(epochs):
for step in range(xtrain_mal.shape[0] // batch_size):
# ---------------------
# Train substitute_detector
# ---------------------
# Select a random batch of malware examples
idx = np.random.randint(0, xtrain_mal.shape[0], batch_size)
xmal_batch = xtrain_mal[idx]
noise = np.random.uniform(0, 1, (batch_size, self.z_dims)) #noise as random uniform
idx = np.random.randint(0, xmal_batch.shape[0], batch_size)
xben_batch = xtrain_ben[idx]
yben_batch = ytrain_ben_blackbox[idx]
# Generate a batch of new malware examples
gen_examples = self.generator.predict([xmal_batch, noise])
ymal_batch = self.blackbox_detector.predict(np.ones(gen_examples.shape)*(gen_examples > 0.5))
# Train the substitute_detector
d_loss_real = self.substitute_detector.train_on_batch(gen_examples, ymal_batch)
d_loss_fake = self.substitute_detector.train_on_batch(xben_batch, yben_batch)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
idx = np.random.randint(0, xtrain_mal.shape[0], batch_size)
xmal_batch = xtrain_mal[idx]
noise = np.random.uniform(0, 1, (batch_size, self.z_dims))
# Train the generator
g_loss = self.combined.train_on_batch([xmal_batch, noise], np.zeros((batch_size, 1)))
# Compute Train TPR
noise = np.random.uniform(0, 1, (xtrain_mal.shape[0], self.z_dims))
gen_examples = self.generator.predict([xtrain_mal, noise])
TPR = self.blackbox_detector.score(np.ones(gen_examples.shape) * (gen_examples > 0.5), ytrain_mal)
Train_TPR.append(TPR)
# Compute Test TPR
noise = np.random.uniform(0, 1, (xtest_mal.shape[0], self.z_dims))
gen_examples = self.generator.predict([xtest_mal, noise])
TPR = self.blackbox_detector.score(np.ones(gen_examples.shape) * (gen_examples > 0.5), ytest_mal)
Test_TPR.append(TPR)
# Save best model
if TPR < best_TPR:
self.combined.save_weights('saves/malgan.h5')
best_TPR = TPR
# Plot the progress
print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
print('Original_Train_TPR: {0}, Adver_Train_TPR: {1}'.format(Original_Train_TPR, Train_TPR[-1]))
print('Original_Test_TPR: {0}, Adver_Test_TPR: {1}'.format(Original_Test_TPR, Test_TPR[-1]))
# Plot TPR
plt.figure()
plt.plot(range(len(Train_TPR)), Train_TPR, c='r', label='Training Set', linewidth=2)
plt.plot(range(len(Test_TPR)), Test_TPR, c='g', linestyle='--', label='Validation Set', linewidth=2)
plt.xlabel('Epoch')
plt.ylabel('TPR')
plt.legend()
plt.savefig('saves/Epoch_TPR.png')
plt.show()
if __name__ == '__main__':
malgan = MalGAN()
malgan.train(epochs=50, batch_size=64)
|
[
"sklearn.model_selection.train_test_split",
"numpy.ones",
"keras.models.Model",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"keras.layers.Input",
"numpy.add",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"keras.optimizers.Adam",
"keras.layers.merge.Concatenate",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"numpy.random.uniform",
"keras.layers.Activation",
"keras.layers.merge.Maximum",
"numpy.zeros",
"keras.layers.Dense",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1089, 1103), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (1093, 1103), False, 'from keras.optimizers import Adam\n'), ((1657, 1693), 'keras.layers.Input', 'Input', ([], {'shape': '(self.apifeature_dims,)'}), '(shape=(self.apifeature_dims,))\n', (1662, 1693), False, 'from keras.layers import Input, Dense, Activation\n'), ((1711, 1738), 'keras.layers.Input', 'Input', ([], {'shape': '(self.z_dims,)'}), '(shape=(self.z_dims,))\n', (1716, 1738), False, 'from keras.layers import Input, Dense, Activation\n'), ((2254, 2276), 'keras.models.Model', 'Model', (['input', 'validity'], {}), '(input, validity)\n', (2259, 2276), False, 'from keras.models import Model\n'), ((2627, 2663), 'keras.layers.Input', 'Input', ([], {'shape': '(self.apifeature_dims,)'}), '(shape=(self.apifeature_dims,))\n', (2632, 2663), False, 'from keras.layers import Input, Dense, Activation\n'), ((2681, 2708), 'keras.layers.Input', 'Input', ([], {'shape': '(self.z_dims,)'}), '(shape=(self.z_dims,))\n', (2686, 2708), False, 'from keras.layers import Input, Dense, Activation\n'), ((2945, 2989), 'keras.models.Model', 'Model', (['[example, noise]', 'x'], {'name': '"""generator"""'}), "([example, noise], x, name='generator')\n", (2950, 2989), False, 'from keras.models import Model\n'), ((3108, 3158), 'keras.layers.Input', 'Input', ([], {'shape': '(self.substitute_detector_layers[0],)'}), '(shape=(self.substitute_detector_layers[0],))\n', (3113, 3158), False, 'from keras.layers import Input, Dense, Activation\n'), ((3346, 3389), 'keras.models.Model', 'Model', (['input', 'x'], {'name': '"""substitute_detector"""'}), "(input, x, name='substitute_detector')\n", (3351, 3389), False, 'from keras.models import Model\n'), ((4019, 4062), 'sklearn.model_selection.train_test_split', 'train_test_split', (['xmal', 'ymal'], {'test_size': '(0.2)'}), '(xmal, ymal, test_size=0.2)\n', (4035, 4062), False, 'from sklearn.model_selection import train_test_split\n'), ((4120, 4163), 'sklearn.model_selection.train_test_split', 'train_test_split', (['xben', 'yben'], {'test_size': '(0.2)'}), '(xben, yben, test_size=0.2)\n', (4136, 4163), False, 'from sklearn.model_selection import train_test_split\n'), ((7602, 7614), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7612, 7614), True, 'import matplotlib.pyplot as plt\n'), ((7828, 7847), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (7838, 7847), True, 'import matplotlib.pyplot as plt\n'), ((7857, 7874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (7867, 7874), True, 'import matplotlib.pyplot as plt\n'), ((7884, 7896), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7894, 7896), True, 'import matplotlib.pyplot as plt\n'), ((7906, 7940), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""saves/Epoch_TPR.png"""'], {}), "('saves/Epoch_TPR.png')\n", (7917, 7940), True, 'import matplotlib.pyplot as plt\n'), ((7950, 7960), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7958, 7960), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2537), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(50)', 'max_depth': '(5)', 'random_state': '(1)'}), '(n_estimators=50, max_depth=5, random_state=1)\n', (2491, 2537), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2722, 2741), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (2733, 2741), False, 'from keras.layers.merge import Maximum, Concatenate\n'), ((2851, 2883), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""sigmoid"""'}), "(activation='sigmoid')\n", (2861, 2883), False, 'from keras.layers import Input, Dense, Activation\n'), ((2900, 2909), 'keras.layers.merge.Maximum', 'Maximum', ([], {}), '()\n', (2907, 2909), False, 'from keras.layers.merge import Maximum, Concatenate\n'), ((3279, 3311), 'keras.layers.Activation', 'Activation', ([], {'activation': '"""sigmoid"""'}), "(activation='sigmoid')\n", (3289, 3311), False, 'from keras.layers import Input, Dense, Activation\n'), ((4237, 4265), 'numpy.concatenate', 'np.concatenate', (['[xmal, xben]'], {}), '([xmal, xben])\n', (4251, 4265), True, 'import numpy as np\n'), ((4303, 4331), 'numpy.concatenate', 'np.concatenate', (['[ymal, yben]'], {}), '([ymal, yben])\n', (4317, 4331), True, 'import numpy as np\n'), ((6448, 6507), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(xtrain_mal.shape[0], self.z_dims)'], {}), '(0, 1, (xtrain_mal.shape[0], self.z_dims))\n', (6465, 6507), True, 'import numpy as np\n'), ((6782, 6840), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(xtest_mal.shape[0], self.z_dims)'], {}), '(0, 1, (xtest_mal.shape[0], self.z_dims))\n', (6799, 6840), True, 'import numpy as np\n'), ((2824, 2834), 'keras.layers.Dense', 'Dense', (['dim'], {}), '(dim)\n', (2829, 2834), False, 'from keras.layers import Input, Dense, Activation\n'), ((3252, 3262), 'keras.layers.Dense', 'Dense', (['dim'], {}), '(dim)\n', (3257, 3262), False, 'from keras.layers import Input, Dense, Activation\n'), ((4989, 5042), 'numpy.random.randint', 'np.random.randint', (['(0)', 'xtrain_mal.shape[0]', 'batch_size'], {}), '(0, xtrain_mal.shape[0], batch_size)\n', (5006, 5042), True, 'import numpy as np\n'), ((5114, 5164), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(batch_size, self.z_dims)'], {}), '(0, 1, (batch_size, self.z_dims))\n', (5131, 5164), True, 'import numpy as np\n'), ((5215, 5268), 'numpy.random.randint', 'np.random.randint', (['(0)', 'xmal_batch.shape[0]', 'batch_size'], {}), '(0, xmal_batch.shape[0], batch_size)\n', (5232, 5268), True, 'import numpy as np\n'), ((6072, 6125), 'numpy.random.randint', 'np.random.randint', (['(0)', 'xtrain_mal.shape[0]', 'batch_size'], {}), '(0, xtrain_mal.shape[0], batch_size)\n', (6089, 6125), True, 'import numpy as np\n'), ((6197, 6247), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(batch_size, self.z_dims)'], {}), '(0, 1, (batch_size, self.z_dims))\n', (6214, 6247), True, 'import numpy as np\n'), ((5894, 5926), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (5900, 5926), True, 'import numpy as np\n'), ((6365, 6390), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (6373, 6390), True, 'import numpy as np\n'), ((6628, 6655), 'numpy.ones', 'np.ones', (['gen_examples.shape'], {}), '(gen_examples.shape)\n', (6635, 6655), True, 'import numpy as np\n'), ((6960, 6987), 'numpy.ones', 'np.ones', (['gen_examples.shape'], {}), '(gen_examples.shape)\n', (6967, 6987), True, 'import numpy as np\n'), ((5569, 5596), 'numpy.ones', 'np.ones', (['gen_examples.shape'], {}), '(gen_examples.shape)\n', (5576, 5596), True, 'import numpy as np\n')]
|
import csv
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.axes(projection='3d')
f=open('consensus_line_2.csv')
csv_f = csv.reader(f)
agent2=[]
agent4=[]
agent5=[]
agent7=[]
dt=[]
i=0
csv_f=csv.reader(f)
for row in csv_f:
a=(row[0])
a=a[1:-1]
a.strip()
a=np.fromstring(a, dtype=float, sep=' ')
agent2.append(a)
a=(row[1])
a=a[1:-1]
a.strip()
a=np.fromstring(a, dtype=float, sep=' ')
agent4.append(a)
a=(row[2])
a=a[1:-1]
a.strip()
a=np.fromstring(a, dtype=float, sep=' ')
agent5.append(a)
a=(row[3])
a=a[1:-1]
a.strip()
a=np.fromstring(a, dtype=float, sep=' ')
agent7.append(a)
dt.append(float(row[4]))
i=i+1
print(i)
if i>(1770-2):
break
dt=np.array(dt)
agent5=np.array(agent5)
agent2=np.array(agent2)
agent4=np.array(agent4)
agent7=np.array(agent7)
ax.plot3D((0.2)*dt,(0.2)*dt,dt,'green')
ax.plot3D(agent2[:,0],agent2[:,1],dt,'gray')
ax.plot3D(agent4[:,0],agent4[:,1],dt,'gray')
ax.plot3D(agent5[:,0],agent5[:,1],dt,'gray')
ax.plot3D(agent7[:,0],agent7[:,1],dt,'gray')
ax.plot([agent2[0,0],agent4[0,0]],[agent2[0,1],agent4[0,1]],[dt[0],dt[0]],'blue')
ax.plot([agent4[0,0],agent5[0,0]],[agent4[0,1],agent5[0,1]],[dt[0],dt[0]],'blue')
ax.plot([agent2[0,0],agent7[0,0]],[agent2[0,1],agent7[0,1]],[dt[0],dt[0]],'blue')
ax.plot([agent2[500,0],agent4[500,0]],[agent2[500,1],agent4[500,1]],[dt[500],dt[500]],'blue')
ax.plot([agent4[500,0],agent5[500,0]],[agent4[500,1],agent5[500,1]],[dt[500],dt[500]],'blue')
ax.plot([agent2[500,0],agent7[500,0]],[agent2[500,1],agent7[500,1]],[dt[500],dt[500]],'blue')
ax.plot3D((agent2[:,0]+agent4[:,0]+agent5[:,0]+agent7[:,0])/4,(agent2[:,1]+agent4[:,1]+agent5[:,1]+agent7[:,1])/4,dt,'red')
plt.show()
|
[
"csv.reader",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.fromstring"
] |
[((108, 120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (118, 120), True, 'import matplotlib.pyplot as plt\n'), ((126, 151), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (134, 151), True, 'import matplotlib.pyplot as plt\n'), ((194, 207), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (204, 207), False, 'import csv\n'), ((266, 279), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (276, 279), False, 'import csv\n'), ((779, 791), 'numpy.array', 'np.array', (['dt'], {}), '(dt)\n', (787, 791), True, 'import numpy as np\n'), ((799, 815), 'numpy.array', 'np.array', (['agent5'], {}), '(agent5)\n', (807, 815), True, 'import numpy as np\n'), ((823, 839), 'numpy.array', 'np.array', (['agent2'], {}), '(agent2)\n', (831, 839), True, 'import numpy as np\n'), ((847, 863), 'numpy.array', 'np.array', (['agent4'], {}), '(agent4)\n', (855, 863), True, 'import numpy as np\n'), ((871, 887), 'numpy.array', 'np.array', (['agent7'], {}), '(agent7)\n', (879, 887), True, 'import numpy as np\n'), ((1763, 1773), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1771, 1773), True, 'import matplotlib.pyplot as plt\n'), ((340, 378), 'numpy.fromstring', 'np.fromstring', (['a'], {'dtype': 'float', 'sep': '""" """'}), "(a, dtype=float, sep=' ')\n", (353, 378), True, 'import numpy as np\n'), ((439, 477), 'numpy.fromstring', 'np.fromstring', (['a'], {'dtype': 'float', 'sep': '""" """'}), "(a, dtype=float, sep=' ')\n", (452, 477), True, 'import numpy as np\n'), ((538, 576), 'numpy.fromstring', 'np.fromstring', (['a'], {'dtype': 'float', 'sep': '""" """'}), "(a, dtype=float, sep=' ')\n", (551, 576), True, 'import numpy as np\n'), ((637, 675), 'numpy.fromstring', 'np.fromstring', (['a'], {'dtype': 'float', 'sep': '""" """'}), "(a, dtype=float, sep=' ')\n", (650, 675), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
import streamlit as st
from PIL import Image
import cv2
def app():
filters = st.sidebar.selectbox("Select filters",("Pencil Sketch","Detail Enhancement","Bilateral Filter","Pencil Edges","White Box"))
st.write(filters)
def resize_crop(image):
h, w, c = np.shape(image)
if min(h, w) > 720:
if h > w:
h, w = int(720*h/w), 720
else:
h, w = 720, int(720*w/h)
image = cv2.resize(image, (w, h),interpolation=cv2.INTER_AREA)
h, w = (h//8)*8, (w//8)*8
image = image[:h, :w, :]
return image
image = st.file_uploader("Choose an image...", type="jpg")
if image is not None:
image = Image.open(image)
image = np.array(image)
if filters == "Pencil Sketch":
st.write("Image will be converted into a sketch as if your image has been drawn using a pencil. ")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_blur = cv2.GaussianBlur(gray, (25, 25), 0)
cartoon = cv2.divide(gray, gray_blur, scale=250.0)
st.image(cartoon)
elif filters == "Detail Enhancement":
st.write("A cartoon effect by sharpening the image, smoothing the colors, and enhancing the edges. ")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_blur = cv2.medianBlur(gray, 3)
edges = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
color = cv2.detailEnhance(image, sigma_s=5, sigma_r=0.5)
cartoon = cv2.bitwise_and(color, color, mask=edges)
st.image(cartoon)
elif filters == "Bilateral Filter":
st.write("smooth the image and the colors while preserving the edge at the same time.")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_blur = cv2.medianBlur(gray, 3)
edges = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
color = cv2.bilateralFilter(image, 5, 50, 5)
cartoon = cv2.bitwise_and(color, color, mask=edges)
st.image(cartoon)
elif filters == "Pencil Edges":
st.write("Pencil Edges filter creates a new image that contains only significant edges and white background. ")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 25)
edges = cv2.Laplacian(gray, -1, ksize=3)
edges_inv = 255-edges
dummy, cartoon = cv2.threshold(edges_inv, 170, 255, cv2.THRESH_BINARY)
st.image(cartoon)
else:
interpreter = tf.lite.Interpreter(model_path="cartoon_gan.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
image = cv2.resize(image, (256, 256),interpolation=cv2.INTER_CUBIC)
image = image.astype(np.float32)/127.5 - 1
image = image.reshape(input_shape)
interpreter.set_tensor(input_details[0]['index'], image)
interpreter.invoke()
output = interpreter.get_tensor(output_details[0]['index'])
output = (np.squeeze(output)+1)*127.5
output = np.clip(output, 0, 255)
cv2.imwrite('image.jpg',output)
st.image('image.jpg')
|
[
"cv2.GaussianBlur",
"streamlit.image",
"cv2.bitwise_and",
"cv2.medianBlur",
"cv2.adaptiveThreshold",
"numpy.clip",
"numpy.shape",
"cv2.bilateralFilter",
"streamlit.sidebar.selectbox",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.detailEnhance",
"cv2.divide",
"cv2.resize",
"cv2.Laplacian",
"streamlit.file_uploader",
"tensorflow.lite.Interpreter",
"numpy.squeeze",
"cv2.threshold",
"streamlit.write",
"PIL.Image.open",
"numpy.array"
] |
[((149, 281), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select filters"""', "('Pencil Sketch', 'Detail Enhancement', 'Bilateral Filter', 'Pencil Edges',\n 'White Box')"], {}), "('Select filters', ('Pencil Sketch',\n 'Detail Enhancement', 'Bilateral Filter', 'Pencil Edges', 'White Box'))\n", (169, 281), True, 'import streamlit as st\n'), ((277, 294), 'streamlit.write', 'st.write', (['filters'], {}), '(filters)\n', (285, 294), True, 'import streamlit as st\n'), ((678, 728), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose an image..."""'], {'type': '"""jpg"""'}), "('Choose an image...', type='jpg')\n", (694, 728), True, 'import streamlit as st\n'), ((341, 356), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (349, 356), True, 'import numpy as np\n'), ((523, 578), 'cv2.resize', 'cv2.resize', (['image', '(w, h)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (w, h), interpolation=cv2.INTER_AREA)\n', (533, 578), False, 'import cv2\n'), ((772, 789), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (782, 789), False, 'from PIL import Image\n'), ((806, 821), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (814, 821), True, 'import numpy as np\n'), ((879, 987), 'streamlit.write', 'st.write', (['"""Image will be converted into a sketch as if your image has been drawn using a pencil. """'], {}), "(\n 'Image will be converted into a sketch as if your image has been drawn using a pencil. '\n )\n", (887, 987), True, 'import streamlit as st\n'), ((1006, 1045), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1018, 1045), False, 'import cv2\n'), ((1070, 1105), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(25, 25)', '(0)'], {}), '(gray, (25, 25), 0)\n', (1086, 1105), False, 'import cv2\n'), ((1128, 1168), 'cv2.divide', 'cv2.divide', (['gray', 'gray_blur'], {'scale': '(250.0)'}), '(gray, gray_blur, scale=250.0)\n', (1138, 1168), False, 'import cv2\n'), ((1181, 1198), 'streamlit.image', 'st.image', (['cartoon'], {}), '(cartoon)\n', (1189, 1198), True, 'import streamlit as st\n'), ((1277, 1388), 'streamlit.write', 'st.write', (['"""A cartoon effect by sharpening the image, smoothing the colors, and enhancing the edges. """'], {}), "(\n 'A cartoon effect by sharpening the image, smoothing the colors, and enhancing the edges. '\n )\n", (1285, 1388), True, 'import streamlit as st\n'), ((1398, 1437), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1410, 1437), False, 'import cv2\n'), ((1462, 1485), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(3)'], {}), '(gray, 3)\n', (1476, 1485), False, 'import cv2\n'), ((1506, 1601), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray_blur', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY', '(9)', '(9)'], {}), '(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY, 9, 9)\n', (1527, 1601), False, 'import cv2\n'), ((1617, 1665), 'cv2.detailEnhance', 'cv2.detailEnhance', (['image'], {'sigma_s': '(5)', 'sigma_r': '(0.5)'}), '(image, sigma_s=5, sigma_r=0.5)\n', (1634, 1665), False, 'import cv2\n'), ((1688, 1729), 'cv2.bitwise_and', 'cv2.bitwise_and', (['color', 'color'], {'mask': 'edges'}), '(color, color, mask=edges)\n', (1703, 1729), False, 'import cv2\n'), ((1743, 1760), 'streamlit.image', 'st.image', (['cartoon'], {}), '(cartoon)\n', (1751, 1760), True, 'import streamlit as st\n'), ((1846, 1943), 'streamlit.write', 'st.write', (['"""smooth the image and the colors while preserving the edge at the same time."""'], {}), "(\n 'smooth the image and the colors while preserving the edge at the same time.'\n )\n", (1854, 1943), True, 'import streamlit as st\n'), ((1953, 1992), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1965, 1992), False, 'import cv2\n'), ((2017, 2040), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(3)'], {}), '(gray, 3)\n', (2031, 2040), False, 'import cv2\n'), ((2061, 2156), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray_blur', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY', '(9)', '(9)'], {}), '(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY, 9, 9)\n', (2082, 2156), False, 'import cv2\n'), ((2172, 2208), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['image', '(5)', '(50)', '(5)'], {}), '(image, 5, 50, 5)\n', (2191, 2208), False, 'import cv2\n'), ((2231, 2272), 'cv2.bitwise_and', 'cv2.bitwise_and', (['color', 'color'], {'mask': 'edges'}), '(color, color, mask=edges)\n', (2246, 2272), False, 'import cv2\n'), ((2286, 2303), 'streamlit.image', 'st.image', (['cartoon'], {}), '(cartoon)\n', (2294, 2303), True, 'import streamlit as st\n'), ((2396, 2517), 'streamlit.write', 'st.write', (['"""Pencil Edges filter creates a new image that contains only significant edges and white background. """'], {}), "(\n 'Pencil Edges filter creates a new image that contains only significant edges and white background. '\n )\n", (2404, 2517), True, 'import streamlit as st\n'), ((2527, 2566), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2539, 2566), False, 'import cv2\n'), ((2586, 2610), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(25)'], {}), '(gray, 25)\n', (2600, 2610), False, 'import cv2\n'), ((2632, 2664), 'cv2.Laplacian', 'cv2.Laplacian', (['gray', '(-1)'], {'ksize': '(3)'}), '(gray, -1, ksize=3)\n', (2645, 2664), False, 'import cv2\n'), ((2728, 2781), 'cv2.threshold', 'cv2.threshold', (['edges_inv', '(170)', '(255)', 'cv2.THRESH_BINARY'], {}), '(edges_inv, 170, 255, cv2.THRESH_BINARY)\n', (2741, 2781), False, 'import cv2\n'), ((2794, 2811), 'streamlit.image', 'st.image', (['cartoon'], {}), '(cartoon)\n', (2802, 2811), True, 'import streamlit as st\n'), ((2868, 2920), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': '"""cartoon_gan.tflite"""'}), "(model_path='cartoon_gan.tflite')\n", (2887, 2920), True, 'import tensorflow as tf\n'), ((3158, 3218), 'cv2.resize', 'cv2.resize', (['image', '(256, 256)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image, (256, 256), interpolation=cv2.INTER_CUBIC)\n', (3168, 3218), False, 'import cv2\n'), ((3565, 3588), 'numpy.clip', 'np.clip', (['output', '(0)', '(255)'], {}), '(output, 0, 255)\n', (3572, 3588), True, 'import numpy as np\n'), ((3601, 3633), 'cv2.imwrite', 'cv2.imwrite', (['"""image.jpg"""', 'output'], {}), "('image.jpg', output)\n", (3612, 3633), False, 'import cv2\n'), ((3645, 3666), 'streamlit.image', 'st.image', (['"""image.jpg"""'], {}), "('image.jpg')\n", (3653, 3666), True, 'import streamlit as st\n'), ((3516, 3534), 'numpy.squeeze', 'np.squeeze', (['output'], {}), '(output)\n', (3526, 3534), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
import json
from typing import List
import pandas as pd
import re
import ir_thermography.thermometry as irt
import matplotlib.ticker as ticker
import matplotlib.gridspec as gridspec
# base_path = r'C:\Users\erick\OneDrive\Documents\ucsd\Postdoc\research\data\firing_tests\IR_VS_POWER'
base_path = r'C:\Users\erick\OneDrive\Documents\ucsd\Postdoc\research\data\firing_tests\IR_VS_POWER\graphite'
# csv_database = r'R3N21_firing_database.csv'
csv_database = 'GT001688_firing_database.csv'
chamber_volume = 31.57 # L
max_time = 4.0 # s
heat_flux_at_100pct = 25.2 # MW/m2
def plot_pressure(base_path: str, filelist: List, legends: List, output_filename: str, colors, display=False,
plot_title=None):
with open('plot_style.json', 'r') as file:
json_file = json.load(file)
plot_style = json_file['defaultPlotStyle']
mpl.rcParams.update(plot_style)
fig, ax = plt.subplots()
fig.set_size_inches(4.5, 3.0)
base_pressures = np.empty_like(filelist, dtype=np.float64)
peak_pressures = np.empty_like(filelist, dtype=np.float64)
peak_dt = np.empty_like(filelist, dtype=np.float64)
for fn, leg, c, i in zip(filelist, legends, colors, range(len(filelist))):
params = get_experiment_params(base_path, fn)
pressure_csv = f'{fn}_pressure.csv'
print(pressure_csv)
pressure_data = pd.read_csv(filepath_or_buffer=os.path.join(base_path, pressure_csv))
pressure_data = pressure_data.apply(pd.to_numeric)
time_s = pressure_data['Time (s)'].values
time_s -= time_s.min()
pressure = 1000 * pressure_data['Pressure (Torr)'].values
base_pressures[i] = pressure[0]
peak_pressures[i] = pressure.max()
idx_peak = (np.abs(pressure - peak_pressures[i])).argmin()
peak_dt[i] = time_s[idx_peak]
# title_str = 'Sample ' + params['Sample Name']['value'] + ', '
# params_title = params
# params_title.pop('Sample Name')
#
# for i, p in enumerate(params_title):
# title_str += f"{params_title[p]['value']}{params_title[p]['units']}"
# if i + 1 < len(params_title):
# title_str += ', '
line = ax.plot(time_s, pressure, label=leg, color=c)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Pressure (mTorr)')
# colors.append(line[0].get_color())
leg = ax.legend(frameon=True, loc='best', fontsize=8)
for color, text in zip(colors, leg.get_texts()):
text.set_color(color)
if plot_title is not None:
ax.set_title(plot_title)
outgassing_rate = chamber_volume * (peak_pressures - base_pressures) * 1E-3 / peak_dt
outgas_df = pd.DataFrame(data={
'Sample': legends,
'Base Pressure (mTorr)': base_pressures,
'Peak Pressure (mTorr)': peak_pressures,
'Peak dt (s)': peak_dt,
'Outgassing Rate (Torr L / s)': outgassing_rate
})
print(outgas_df)
outgas_df.to_csv(os.path.join(base_path, f'{output_filename}_OUTGASSING.csv'), index=False)
fig.tight_layout()
fig.savefig(os.path.join(base_path, f'{output_filename}_PRESSURE.png'), dpi=600)
if display:
fig.show()
def get_experiment_params(relative_path: str, filename: str):
# Read the experiment parameters
results_csv = os.path.join(relative_path, f'{filename}.csv')
count = 0
params = {}
with open(results_csv) as f:
for line in f:
if line.startswith('#'):
if count > 1:
l = line.strip()
print(l)
if l == '#Data:':
break
pattern1 = re.compile("\s+(.*?):\s(.*?)\s(.*?)$")
pattern2 = re.compile("\s+(.*?):\s(.*?)$")
matches1 = pattern1.findall(l)
matches2 = pattern2.findall(l)
if len(matches1) > 0:
params[matches1[0][0]] = {
'value': matches1[0][1],
'units': matches1[0][2]
}
elif len(matches2) > 0:
params[matches2[0][0]] = {
'value': matches2[0][1],
'units': ''
}
count += 1
return params
if __name__ == '__main__':
thermometry = irt.PDThermometer()
database_df = pd.read_csv(
os.path.join(base_path, csv_database)
)
database_df['Laser Power Setting (%)'] = database_df['Laser Power Setting (%)'].apply(pd.to_numeric)
database_df.sort_values(by='Laser Power Setting (%)', ascending=True)
# print(database_df)
filelist = database_df['csv']
n = len(filelist)
colors = plt.cm.jet(np.linspace(0, 1, n))
base_pressures = np.empty(n, dtype=np.float64)
peak_pressures = np.empty(n, dtype=np.float64)
peak_dt = np.empty(n, dtype=np.float64)
with open('plot_style.json', 'r') as file:
json_file = json.load(file)
plot_style = json_file['defaultPlotStyle']
mpl.rcParams.update(plot_style)
fig = plt.figure(tight_layout=True)
fig.set_size_inches(5.0, 5.5)
gs = gridspec.GridSpec(ncols=1, nrows=2, figure=fig)#, height_ratios=[1.618, 1.618, 1])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
laser_power_setpoint_list = []
max_temperature = np.empty(len(filelist), dtype=np.float64)
heat_flux = np.empty_like(max_temperature)
for i, file in enumerate(filelist):
file = file.strip()
csv_file = file + '_irdata.csv'
print(f"Processing file {i+1:d} of {n}: {file}")
experiment_params = get_experiment_params(relative_path=base_path, filename=file)
photodiode_gain = experiment_params['Photodiode Gain']['value']
laser_power_setting = experiment_params['Laser Power Setpoint']['value']
laser_power_setpoint_list.append(laser_power_setting)
heat_flux[i] = heat_flux_at_100pct * float(laser_power_setting) * 1E-2
ir_df = pd.read_csv(os.path.join(base_path, csv_file)).apply(pd.to_numeric)
ir_df = ir_df[ir_df['Time (s)'] <= max_time]
ir_df = ir_df[ir_df['Voltage (V)'] > 0.0]
thermometry.gain = int(photodiode_gain)
print(f"Calibration Factor: {thermometry.calibration_factor}")
time_s = ir_df['Time (s)'].values
dt = 0.103 if float(laser_power_setting) < 70 else 0.0270
time_s -= dt #time_s.min()
if int(laser_power_setting) == 80:
time_s -= 0.067
voltage = ir_df['Voltage (V)'].values
temperature_c = thermometry.get_temperature(voltage=voltage) - 273.15
if len(temperature_c) > 0:
max_temperature[i] = temperature_c.max()
print(temperature_c)
else:
max_temperature[i] = 20.0
lbl = f'{float(laser_power_setting):3.0f} % Power'
ax1.plot(
time_s,
temperature_c,
color=colors[i], lw=1.75,
label=lbl
)
pressure_csv = f'{file}_pressure.csv'
pressure_data = pd.read_csv(filepath_or_buffer=os.path.join(base_path, pressure_csv))
pressure_data = pressure_data.apply(pd.to_numeric)
time_s = pressure_data['Time (s)'].values
time_s -= time_s.min() + 0.5
time_msk = time_s >= 0.0
time_s = time_s[time_msk]
pressure = 1000 * pressure_data['Pressure (Torr)'].values
pressure = pressure[time_msk]
base_pressures[i] = pressure[0]
peak_pressures[i] = pressure.max()
idx_peak = (np.abs(pressure - peak_pressures[i])).argmin()
peak_dt[i] = time_s[idx_peak]
ax2.plot(time_s, pressure, label=lbl, color=colors[i], lw=1.75)
ax1.set_xlabel('Time (s)')
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('Chamber pressure (mTorr)')
ax1.set_ylabel('Surface temperature (°C)')
ax1.set_ylim(top=3000)
# ax2.set_ylim(top=40)
# ax1.set_xlabel('Time (s)')
ax1.set_xlim(0.0, max_time)
ax2.set_xlim(left=0.0, right=max_time)
# ax1.legend(loc="upper right", prop={'size': 9}, frameon=False, ncol=3)
ax1.legend(
bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=4, mode="expand", borderaxespad=0.,
prop={'size': 8}
)
ax1.ticklabel_format(useMathText=True)
# ax1.xaxis.set_major_locator(ticker.MultipleLocator(0.25))
# ax1.xaxis.set_minor_locator(ticker.MultipleLocator(0.125))
ax1.yaxis.set_major_locator(ticker.MultipleLocator(500))
ax1.yaxis.set_minor_locator(ticker.MultipleLocator(250))
ax2.ticklabel_format(useMathText=True)
ax2.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax2.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
# ax2.yaxis.set_major_locator(ticker.MultipleLocator(10))
# ax2.yaxis.set_minor_locator(ticker.MultipleLocator(5))
# ax1.set_title('IR Thermography')
# ax2.set_title('Chamber Pressure')
filetag = os.path.splitext(csv_database)[0]
outgassing_rate = chamber_volume * (peak_pressures - base_pressures) * 1E-3 / peak_dt
outgas_df = pd.DataFrame(data={
'Laser Power Setpoint': laser_power_setpoint_list,
'Base Pressure (mTorr)': base_pressures,
'Peak Pressure (mTorr)': peak_pressures,
'Peak dt (s)': peak_dt,
'Outgassing Rate (Torr L / s)': outgassing_rate
})
temperature_vs_power_df = pd.DataFrame(data={
'Laser power setpoint (%)': laser_power_setpoint_list,
'Heat flux (MW/m^2)': heat_flux,
'Max surface temperature (C)': max_temperature
})
temperature_vs_power_df.to_csv(os.path.join(base_path, f'{filetag}_surface_temperature.csv'), index=False)
print(outgas_df)
outgas_df.to_csv(os.path.join(base_path, f'{filetag}_OUTGASSING.csv'), index=False)
fig.savefig(os.path.join(base_path, filetag + "_ir-pressure_plot.png"), dpi=600)
fig.savefig(os.path.join(base_path, filetag + "_ir-pressure_plot.eps"), dpi=600)
fig.savefig(os.path.join(base_path, filetag + "_ir-pressure_plot.svg"), dpi=600)
plt.show()
|
[
"pandas.DataFrame",
"json.load",
"matplotlib.pyplot.show",
"os.path.join",
"numpy.abs",
"numpy.empty",
"matplotlib.rcParams.update",
"numpy.empty_like",
"matplotlib.pyplot.figure",
"os.path.splitext",
"numpy.linspace",
"ir_thermography.thermometry.PDThermometer",
"matplotlib.ticker.MultipleLocator",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplots",
"re.compile"
] |
[((943, 974), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['plot_style'], {}), '(plot_style)\n', (962, 974), True, 'import matplotlib as mpl\n'), ((990, 1004), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1002, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1102), 'numpy.empty_like', 'np.empty_like', (['filelist'], {'dtype': 'np.float64'}), '(filelist, dtype=np.float64)\n', (1074, 1102), True, 'import numpy as np\n'), ((1124, 1165), 'numpy.empty_like', 'np.empty_like', (['filelist'], {'dtype': 'np.float64'}), '(filelist, dtype=np.float64)\n', (1137, 1165), True, 'import numpy as np\n'), ((1180, 1221), 'numpy.empty_like', 'np.empty_like', (['filelist'], {'dtype': 'np.float64'}), '(filelist, dtype=np.float64)\n', (1193, 1221), True, 'import numpy as np\n'), ((2781, 2982), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Sample': legends, 'Base Pressure (mTorr)': base_pressures,\n 'Peak Pressure (mTorr)': peak_pressures, 'Peak dt (s)': peak_dt,\n 'Outgassing Rate (Torr L / s)': outgassing_rate}"}), "(data={'Sample': legends, 'Base Pressure (mTorr)':\n base_pressures, 'Peak Pressure (mTorr)': peak_pressures, 'Peak dt (s)':\n peak_dt, 'Outgassing Rate (Torr L / s)': outgassing_rate})\n", (2793, 2982), True, 'import pandas as pd\n'), ((3402, 3448), 'os.path.join', 'os.path.join', (['relative_path', 'f"""{filename}.csv"""'], {}), "(relative_path, f'{filename}.csv')\n", (3414, 3448), False, 'import os\n'), ((4501, 4520), 'ir_thermography.thermometry.PDThermometer', 'irt.PDThermometer', ([], {}), '()\n', (4518, 4520), True, 'import ir_thermography.thermometry as irt\n'), ((4932, 4961), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (4940, 4961), True, 'import numpy as np\n'), ((4983, 5012), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (4991, 5012), True, 'import numpy as np\n'), ((5027, 5056), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (5035, 5056), True, 'import numpy as np\n'), ((5196, 5227), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['plot_style'], {}), '(plot_style)\n', (5215, 5227), True, 'import matplotlib as mpl\n'), ((5239, 5268), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'tight_layout': '(True)'}), '(tight_layout=True)\n', (5249, 5268), True, 'import matplotlib.pyplot as plt\n'), ((5313, 5360), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'ncols': '(1)', 'nrows': '(2)', 'figure': 'fig'}), '(ncols=1, nrows=2, figure=fig)\n', (5330, 5360), True, 'import matplotlib.gridspec as gridspec\n'), ((5579, 5609), 'numpy.empty_like', 'np.empty_like', (['max_temperature'], {}), '(max_temperature)\n', (5592, 5609), True, 'import numpy as np\n'), ((9263, 9500), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Laser Power Setpoint': laser_power_setpoint_list, 'Base Pressure (mTorr)':\n base_pressures, 'Peak Pressure (mTorr)': peak_pressures, 'Peak dt (s)':\n peak_dt, 'Outgassing Rate (Torr L / s)': outgassing_rate}"}), "(data={'Laser Power Setpoint': laser_power_setpoint_list,\n 'Base Pressure (mTorr)': base_pressures, 'Peak Pressure (mTorr)':\n peak_pressures, 'Peak dt (s)': peak_dt, 'Outgassing Rate (Torr L / s)':\n outgassing_rate})\n", (9275, 9500), True, 'import pandas as pd\n'), ((9566, 9729), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Laser power setpoint (%)': laser_power_setpoint_list,\n 'Heat flux (MW/m^2)': heat_flux, 'Max surface temperature (C)':\n max_temperature}"}), "(data={'Laser power setpoint (%)': laser_power_setpoint_list,\n 'Heat flux (MW/m^2)': heat_flux, 'Max surface temperature (C)':\n max_temperature})\n", (9578, 9729), True, 'import pandas as pd\n'), ((10235, 10245), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10243, 10245), True, 'import matplotlib.pyplot as plt\n'), ((872, 887), 'json.load', 'json.load', (['file'], {}), '(file)\n', (881, 887), False, 'import json\n'), ((3064, 3124), 'os.path.join', 'os.path.join', (['base_path', 'f"""{output_filename}_OUTGASSING.csv"""'], {}), "(base_path, f'{output_filename}_OUTGASSING.csv')\n", (3076, 3124), False, 'import os\n'), ((3179, 3237), 'os.path.join', 'os.path.join', (['base_path', 'f"""{output_filename}_PRESSURE.png"""'], {}), "(base_path, f'{output_filename}_PRESSURE.png')\n", (3191, 3237), False, 'import os\n'), ((4560, 4597), 'os.path.join', 'os.path.join', (['base_path', 'csv_database'], {}), '(base_path, csv_database)\n', (4572, 4597), False, 'import os\n'), ((4888, 4908), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (4899, 4908), True, 'import numpy as np\n'), ((5125, 5140), 'json.load', 'json.load', (['file'], {}), '(file)\n', (5134, 5140), False, 'import json\n'), ((8646, 8673), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(500)'], {}), '(500)\n', (8668, 8673), True, 'import matplotlib.ticker as ticker\n'), ((8707, 8734), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(250)'], {}), '(250)\n', (8729, 8734), True, 'import matplotlib.ticker as ticker\n'), ((8812, 8839), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(0.5)'], {}), '(0.5)\n', (8834, 8839), True, 'import matplotlib.ticker as ticker\n'), ((8873, 8901), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(0.25)'], {}), '(0.25)\n', (8895, 8901), True, 'import matplotlib.ticker as ticker\n'), ((9121, 9151), 'os.path.splitext', 'os.path.splitext', (['csv_database'], {}), '(csv_database)\n', (9137, 9151), False, 'import os\n'), ((9788, 9849), 'os.path.join', 'os.path.join', (['base_path', 'f"""{filetag}_surface_temperature.csv"""'], {}), "(base_path, f'{filetag}_surface_temperature.csv')\n", (9800, 9849), False, 'import os\n'), ((9907, 9959), 'os.path.join', 'os.path.join', (['base_path', 'f"""{filetag}_OUTGASSING.csv"""'], {}), "(base_path, f'{filetag}_OUTGASSING.csv')\n", (9919, 9959), False, 'import os\n'), ((9992, 10050), 'os.path.join', 'os.path.join', (['base_path', "(filetag + '_ir-pressure_plot.png')"], {}), "(base_path, filetag + '_ir-pressure_plot.png')\n", (10004, 10050), False, 'import os\n'), ((10077, 10135), 'os.path.join', 'os.path.join', (['base_path', "(filetag + '_ir-pressure_plot.eps')"], {}), "(base_path, filetag + '_ir-pressure_plot.eps')\n", (10089, 10135), False, 'import os\n'), ((10162, 10220), 'os.path.join', 'os.path.join', (['base_path', "(filetag + '_ir-pressure_plot.svg')"], {}), "(base_path, filetag + '_ir-pressure_plot.svg')\n", (10174, 10220), False, 'import os\n'), ((1483, 1520), 'os.path.join', 'os.path.join', (['base_path', 'pressure_csv'], {}), '(base_path, pressure_csv)\n', (1495, 1520), False, 'import os\n'), ((1831, 1867), 'numpy.abs', 'np.abs', (['(pressure - peak_pressures[i])'], {}), '(pressure - peak_pressures[i])\n', (1837, 1867), True, 'import numpy as np\n'), ((7275, 7312), 'os.path.join', 'os.path.join', (['base_path', 'pressure_csv'], {}), '(base_path, pressure_csv)\n', (7287, 7312), False, 'import os\n'), ((7734, 7770), 'numpy.abs', 'np.abs', (['(pressure - peak_pressures[i])'], {}), '(pressure - peak_pressures[i])\n', (7740, 7770), True, 'import numpy as np\n'), ((3767, 3808), 're.compile', 're.compile', (['"""\\\\s+(.*?):\\\\s(.*?)\\\\s(.*?)$"""'], {}), "('\\\\s+(.*?):\\\\s(.*?)\\\\s(.*?)$')\n", (3777, 3808), False, 'import re\n'), ((3837, 3870), 're.compile', 're.compile', (['"""\\\\s+(.*?):\\\\s(.*?)$"""'], {}), "('\\\\s+(.*?):\\\\s(.*?)$')\n", (3847, 3870), False, 'import re\n'), ((6188, 6221), 'os.path.join', 'os.path.join', (['base_path', 'csv_file'], {}), '(base_path, csv_file)\n', (6200, 6221), False, 'import os\n')]
|
# flake8: noqa
import pandas as pd
import numpy
import anndata
import os
from scipy import sparse
# -----------------------------------------------------------------#
# General example information
SCHEMA_VERSION = "2.0.0"
FIXTURES_ROOT = os.path.join(os.path.dirname(__file__))
# -----------------------------------------------------------------#
# Pre-made example files
h5ad_dir = os.path.join(FIXTURES_ROOT, "h5ads")
h5ad_valid = os.path.join(h5ad_dir, "example_valid.h5ad")
h5ad_invalid = os.path.join(h5ad_dir, "example_invalid_CL.h5ad")
# -----------------------------------------------------------------#
# Manually creating minimal anndata objects.
#
# The valid objects mentioned below contain all valid cases covered in the schema, including multiple examples for
# fields that allow multiple valid options.
#
# This process entails:
# 1. Creating individual obs components: one valid dataframe, and one with labels (extra columns that are supposed
# to be added by validator)
# 2. Creating individual var components: valid, and one with labels
# 3. Creating individual uns valid component
# 4. Creating expression matrices
# 5. Creating valid obsm
# 6. Putting all the components created in the previous steps into minimal anndata that used for testing in
# the unittests
# Valid obs per schema
good_obs = pd.DataFrame(
[
[
"CL:0000066",
"EFO:0009899",
"MONDO:0100096",
"NCBITaxon:9606",
"PATO:0000383",
"UBERON:0002048",
True,
"HANCESTRO:0575",
"HsapDv:0000003",
],
[
"CL:0000192",
"EFO:0010183 (sci-plex)",
"PATO:0000461",
"NCBITaxon:10090",
"unknown",
"CL:0000192 (cell culture)",
False,
"na",
"MmusDv:0000003",
],
],
index=["X", "Y"],
columns=[
"cell_type_ontology_term_id",
"assay_ontology_term_id",
"disease_ontology_term_id",
"organism_ontology_term_id",
"sex_ontology_term_id",
"tissue_ontology_term_id",
"is_primary_data",
"ethnicity_ontology_term_id",
"development_stage_ontology_term_id",
],
)
# Expected obs, this is what the obs above should look like after adding the necessary columns with the validator,
# these columns are defined in the schema
obs_expected = pd.DataFrame(
[
[
"epithelial cell",
"10x 3' v2",
"COVID-19",
"Homo sapiens",
"female",
"lung",
"Yoruban",
"Carnegie stage 01",
],
[
"smooth muscle cell",
"single cell library construction (sci-plex)",
"normal",
"Mus musculus",
"unknown",
"smooth muscle cell (cell culture)",
"na",
"Theiler stage 01",
],
],
index=["X", "Y"],
columns=[
"cell_type",
"assay",
"disease",
"organism",
"sex",
"tissue",
"ethnicity",
"development_stage",
],
)
# ---
# 2. Creating individual var components: valid object and valid object and with labels
# Valid var per schema
good_var = pd.DataFrame(
[
["spike-in", False],
["gene", False],
["gene", False],
["gene", False],
],
index=["ERCC-00002", "ENSG00000127603", "ENSMUSG00000059552", "ENSSASG00005000004"],
columns=["feature_biotype", "feature_is_filtered"],
)
good_var.loc[:, ["feature_biotype"]] = good_var.astype("category")
# Expected var, this is what the obs above should look like after adding the necessary columns with the validator,
# these columns are defined in the schema
var_expected = pd.DataFrame(
[
["spike-in", False, "ERCC-00002 (spike-in control)", "NCBITaxon:32630"],
["gene", False, "MACF1", "NCBITaxon:9606"],
["gene", False, "Trp53", "NCBITaxon:10090"],
["gene", False, "S_ENSSASG00005000004", "NCBITaxon:2697049"],
],
index=["ERCC-00002", "ENSG00000127603", "ENSMUSG00000059552", "ENSSASG00005000004"],
columns=[
"feature_biotype",
"feature_is_filtered",
"feature_name",
"feature_reference",
],
)
var_expected.loc[:, ["feature_biotype"]] = var_expected.astype("category")
# ---
# 3. Creating individual uns component
good_uns = {
"schema_version": SCHEMA_VERSION,
"title": "A title",
"default_embedding": "X_umap",
"X_normalization": "CPM",
"X_approximate_distribution": "normal",
"batch_condition": ["is_primary_data"],
}
# ---
# 4. Creating expression matrix,
# X has integer values and non_raw_X has real values
X = numpy.zeros([good_obs.shape[0], good_var.shape[0]])
non_raw_X = sparse.csr_matrix(X.copy())
non_raw_X[0, 0] = 1.5
# ---
# 5.Creating valid obsm
good_obsm = {"X_umap": numpy.zeros([X.shape[0], 2])}
# ---
# 6. Putting all the components created in the previous steps into minimal anndata that used for testing in
# the unittests
# Valid anndata
adata = anndata.AnnData(
X=sparse.csr_matrix(X), obs=good_obs, uns=good_uns, obsm=good_obsm, var=good_var
)
adata.raw = adata
adata.X = non_raw_X
adata.raw.var.drop("feature_is_filtered", axis=1, inplace=True)
# Anndata with "X" and "raw.X" but neither has actual raw values
adata_no_raw_values = anndata.AnnData(
X=sparse.csr_matrix(non_raw_X), obs=good_obs, uns=good_uns, obsm=good_obsm, var=good_var
)
adata_no_raw_values.raw = adata_no_raw_values
adata_no_raw_values.raw.var.drop("feature_is_filtered", axis=1, inplace=True)
# Anndata with no obs nor var
adata_minimal = anndata.AnnData(X=sparse.csr_matrix(X), uns=good_uns, obsm=good_obsm)
# Anndata with a expression matrix that is not raw
adata_non_raw = anndata.AnnData(
X=sparse.csr_matrix(non_raw_X),
obs=good_obs,
uns=good_uns,
obsm=good_obsm,
var=good_var,
)
# Expected anndata with labels that the validator must write in obs and var
adata_with_labels = anndata.AnnData(
X=sparse.csr_matrix(X),
obs=pd.concat([good_obs, obs_expected], axis=1),
var=pd.concat([good_var, var_expected], axis=1),
uns=good_uns,
obsm=good_obsm,
)
|
[
"pandas.DataFrame",
"os.path.dirname",
"numpy.zeros",
"scipy.sparse.csr_matrix",
"os.path.join",
"pandas.concat"
] |
[((385, 421), 'os.path.join', 'os.path.join', (['FIXTURES_ROOT', '"""h5ads"""'], {}), "(FIXTURES_ROOT, 'h5ads')\n", (397, 421), False, 'import os\n'), ((435, 479), 'os.path.join', 'os.path.join', (['h5ad_dir', '"""example_valid.h5ad"""'], {}), "(h5ad_dir, 'example_valid.h5ad')\n", (447, 479), False, 'import os\n'), ((495, 544), 'os.path.join', 'os.path.join', (['h5ad_dir', '"""example_invalid_CL.h5ad"""'], {}), "(h5ad_dir, 'example_invalid_CL.h5ad')\n", (507, 544), False, 'import os\n'), ((1325, 1945), 'pandas.DataFrame', 'pd.DataFrame', (["[['CL:0000066', 'EFO:0009899', 'MONDO:0100096', 'NCBITaxon:9606',\n 'PATO:0000383', 'UBERON:0002048', True, 'HANCESTRO:0575',\n 'HsapDv:0000003'], ['CL:0000192', 'EFO:0010183 (sci-plex)',\n 'PATO:0000461', 'NCBITaxon:10090', 'unknown',\n 'CL:0000192 (cell culture)', False, 'na', 'MmusDv:0000003']]"], {'index': "['X', 'Y']", 'columns': "['cell_type_ontology_term_id', 'assay_ontology_term_id',\n 'disease_ontology_term_id', 'organism_ontology_term_id',\n 'sex_ontology_term_id', 'tissue_ontology_term_id', 'is_primary_data',\n 'ethnicity_ontology_term_id', 'development_stage_ontology_term_id']"}), "([['CL:0000066', 'EFO:0009899', 'MONDO:0100096',\n 'NCBITaxon:9606', 'PATO:0000383', 'UBERON:0002048', True,\n 'HANCESTRO:0575', 'HsapDv:0000003'], ['CL:0000192',\n 'EFO:0010183 (sci-plex)', 'PATO:0000461', 'NCBITaxon:10090', 'unknown',\n 'CL:0000192 (cell culture)', False, 'na', 'MmusDv:0000003']], index=[\n 'X', 'Y'], columns=['cell_type_ontology_term_id',\n 'assay_ontology_term_id', 'disease_ontology_term_id',\n 'organism_ontology_term_id', 'sex_ontology_term_id',\n 'tissue_ontology_term_id', 'is_primary_data',\n 'ethnicity_ontology_term_id', 'development_stage_ontology_term_id'])\n", (1337, 1945), True, 'import pandas as pd\n'), ((2437, 2884), 'pandas.DataFrame', 'pd.DataFrame', (['[[\'epithelial cell\', "10x 3\' v2", \'COVID-19\', \'Homo sapiens\', \'female\',\n \'lung\', \'Yoruban\', \'Carnegie stage 01\'], [\'smooth muscle cell\',\n \'single cell library construction (sci-plex)\', \'normal\', \'Mus musculus\',\n \'unknown\', \'smooth muscle cell (cell culture)\', \'na\', \'Theiler stage 01\']]'], {'index': "['X', 'Y']", 'columns': "['cell_type', 'assay', 'disease', 'organism', 'sex', 'tissue', 'ethnicity',\n 'development_stage']"}), '([[\'epithelial cell\', "10x 3\' v2", \'COVID-19\', \'Homo sapiens\',\n \'female\', \'lung\', \'Yoruban\', \'Carnegie stage 01\'], [\n \'smooth muscle cell\', \'single cell library construction (sci-plex)\',\n \'normal\', \'Mus musculus\', \'unknown\',\n \'smooth muscle cell (cell culture)\', \'na\', \'Theiler stage 01\']], index=\n [\'X\', \'Y\'], columns=[\'cell_type\', \'assay\', \'disease\', \'organism\', \'sex\',\n \'tissue\', \'ethnicity\', \'development_stage\'])\n', (2449, 2884), True, 'import pandas as pd\n'), ((3311, 3547), 'pandas.DataFrame', 'pd.DataFrame', (["[['spike-in', False], ['gene', False], ['gene', False], ['gene', False]]"], {'index': "['ERCC-00002', 'ENSG00000127603', 'ENSMUSG00000059552', 'ENSSASG00005000004']", 'columns': "['feature_biotype', 'feature_is_filtered']"}), "([['spike-in', False], ['gene', False], ['gene', False], [\n 'gene', False]], index=['ERCC-00002', 'ENSG00000127603',\n 'ENSMUSG00000059552', 'ENSSASG00005000004'], columns=['feature_biotype',\n 'feature_is_filtered'])\n", (3323, 3547), True, 'import pandas as pd\n'), ((3829, 4266), 'pandas.DataFrame', 'pd.DataFrame', (["[['spike-in', False, 'ERCC-00002 (spike-in control)', 'NCBITaxon:32630'], [\n 'gene', False, 'MACF1', 'NCBITaxon:9606'], ['gene', False, 'Trp53',\n 'NCBITaxon:10090'], ['gene', False, 'S_ENSSASG00005000004',\n 'NCBITaxon:2697049']]"], {'index': "['ERCC-00002', 'ENSG00000127603', 'ENSMUSG00000059552', 'ENSSASG00005000004']", 'columns': "['feature_biotype', 'feature_is_filtered', 'feature_name', 'feature_reference']"}), "([['spike-in', False, 'ERCC-00002 (spike-in control)',\n 'NCBITaxon:32630'], ['gene', False, 'MACF1', 'NCBITaxon:9606'], ['gene',\n False, 'Trp53', 'NCBITaxon:10090'], ['gene', False,\n 'S_ENSSASG00005000004', 'NCBITaxon:2697049']], index=['ERCC-00002',\n 'ENSG00000127603', 'ENSMUSG00000059552', 'ENSSASG00005000004'], columns\n =['feature_biotype', 'feature_is_filtered', 'feature_name',\n 'feature_reference'])\n", (3841, 4266), True, 'import pandas as pd\n'), ((4783, 4834), 'numpy.zeros', 'numpy.zeros', (['[good_obs.shape[0], good_var.shape[0]]'], {}), '([good_obs.shape[0], good_var.shape[0]])\n', (4794, 4834), False, 'import numpy\n'), ((252, 277), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (267, 277), False, 'import os\n'), ((4951, 4979), 'numpy.zeros', 'numpy.zeros', (['[X.shape[0], 2]'], {}), '([X.shape[0], 2])\n', (4962, 4979), False, 'import numpy\n'), ((5163, 5183), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X'], {}), '(X)\n', (5180, 5183), False, 'from scipy import sparse\n'), ((5457, 5485), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['non_raw_X'], {}), '(non_raw_X)\n', (5474, 5485), False, 'from scipy import sparse\n'), ((5735, 5755), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X'], {}), '(X)\n', (5752, 5755), False, 'from scipy import sparse\n'), ((5878, 5906), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['non_raw_X'], {}), '(non_raw_X)\n', (5895, 5906), False, 'from scipy import sparse\n'), ((6104, 6124), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X'], {}), '(X)\n', (6121, 6124), False, 'from scipy import sparse\n'), ((6134, 6177), 'pandas.concat', 'pd.concat', (['[good_obs, obs_expected]'], {'axis': '(1)'}), '([good_obs, obs_expected], axis=1)\n', (6143, 6177), True, 'import pandas as pd\n'), ((6187, 6230), 'pandas.concat', 'pd.concat', (['[good_var, var_expected]'], {'axis': '(1)'}), '([good_var, var_expected], axis=1)\n', (6196, 6230), True, 'import pandas as pd\n')]
|
import os
import json
import joblib
import numpy as np
from scipy.stats import rankdata
import scipy.stats
from sklearn.metrics import mean_squared_error
from .... import logger
from ...setup.setup import Session
from .... import MODELS_PATH
MAX_N = 10
class Predictor(object):
def __init__(self, mdl):
self.mdl = mdl
def predict(self, X):
return self.mdl.predict_proba(X)[:, 1]
class DistributionCoincidence(object):
def __init__(self, p, q):
self.p = np.array(p)
self.q = np.array(q)
def jensen_shannon_distance(self):
p = self.p
q = self.q
m = (p + q) / 2
divergence = (scipy.stats.entropy(p, m) + scipy.stats.entropy(q, m)) / 2
distance = np.sqrt(divergence)
return distance
def mean_squared_error(self):
p = sorted(self.p)
q = sorted(self.q)
mse = mean_squared_error(p, q)
return mse
class ModelEnsembler(object):
def __init__(self):
self.output_path = Session().get_output_path()
self.tags_path = os.path.join(MODELS_PATH, "linkage", "results")
this_mdl_path = os.path.join(self.output_path, "score", "mdl.pkl")
if os.path.exists(this_mdl_path):
logger.debug("Model exists for this dataset")
self.mdl_paths = [this_mdl_path]
self.weights = [1.0]
self.has_mdl = True
else:
logger.debug("Model does not exist for this dataset")
self.mdl_paths = []
self.weights = []
self.has_mdl = False
def _read_columns(self, path):
with open(os.path.join(path, "compare", "columns.json"), "r") as f:
columns = json.load(f)
return columns
def _read_C(self, path):
with open(os.path.join(path, "compare", "C.npy"), "rb") as f:
C = np.load(f)
return C
def _find_pretrained_models_with_same_columns(self):
columns = self._read_columns(self.output_path)
for tag in os.listdir(self.tags_path):
if len(tag) != 36:
continue
pretrained_columns = self._read_columns(os.path.join(self.tags_path, tag))
if columns != pretrained_columns:
continue
yield tag
def _measure_C_coincidence(self, C_0, C_1):
values = []
n = np.min([C_0.shape[0], C_1.shape[1]])
idxs_0 = np.random.choice(C_0.shape[0], n, replace=False)
idxs_1 = np.random.choice(C_1.shape[1], n, replace=False)
for i in range(C_0.shape[1]):
p = C_0[:, i]
q = C_1[:, i]
p = p[idxs_0]
q = q[idxs_1]
values += [DistributionCoincidence(p, q).mean_squared_error()]
return np.array(values)
def _scan_pretrained_models(self):
logger.debug("Scan pretrained models")
C_0 = self._read_C(self.output_path)
R = []
tags = []
for tag in self._find_pretrained_models_with_same_columns():
C_1 = self._read_C(os.path.join(self.tags_path, tag))
coincidence = self._measure_C_coincidence(C_0, C_1)
R += [coincidence]
tags += [tag]
R = np.array(R)
X = np.zeros(R.shape)
for i in range(R.shape[1]):
r = rankdata(R[:, i], method="ordinal")
X[:, i] = r / np.max(r)
S = np.median(X, axis=1)
idxs = np.argsort(S)[:MAX_N]
tags = [tags[i] for i in idxs]
scores = [len(idxs) - i for i in range(len(idxs))] # TODO: Refine score
scores = np.array(scores) / np.max(scores)
for tag, score in zip(tags, scores):
yield tag, score
def _load_model_by_tag(self, tag):
tag_path = os.path.join(self.tags_path, tag)
mdl_path = os.path.join(tag_path, "score", "mdl.pkl")
mdl = joblib.load(mdl_path)
return mdl
def _load_cv_results_by_tag(self, tag):
tag_path = os.path.join(self.tags_path, tag)
cv_results_path = os.path.join(tag_path, "score", "cv_results.json")
with open(cv_results_path, "r") as f:
cv_results = json.load(f)
return cv_results
def items(self):
for tag, score in self._scan_pretrained_models():
mdl = self._load_model_by_tag(tag)
cv_results = self._load_cv_results_by_tag(tag)
prd = Predictor(mdl)
results = {
"tag": tag,
"predictor": prd,
"weight": score,
"cv_results": cv_results,
}
yield results
|
[
"os.listdir",
"numpy.load",
"json.load",
"numpy.median",
"os.path.exists",
"numpy.zeros",
"scipy.stats.rankdata",
"numpy.argsort",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.random.choice",
"joblib.load",
"os.path.join",
"sklearn.metrics.mean_squared_error",
"numpy.sqrt"
] |
[((498, 509), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (506, 509), True, 'import numpy as np\n'), ((527, 538), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (535, 538), True, 'import numpy as np\n'), ((741, 760), 'numpy.sqrt', 'np.sqrt', (['divergence'], {}), '(divergence)\n', (748, 760), True, 'import numpy as np\n'), ((888, 912), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['p', 'q'], {}), '(p, q)\n', (906, 912), False, 'from sklearn.metrics import mean_squared_error\n'), ((1068, 1115), 'os.path.join', 'os.path.join', (['MODELS_PATH', '"""linkage"""', '"""results"""'], {}), "(MODELS_PATH, 'linkage', 'results')\n", (1080, 1115), False, 'import os\n'), ((1140, 1190), 'os.path.join', 'os.path.join', (['self.output_path', '"""score"""', '"""mdl.pkl"""'], {}), "(self.output_path, 'score', 'mdl.pkl')\n", (1152, 1190), False, 'import os\n'), ((1202, 1231), 'os.path.exists', 'os.path.exists', (['this_mdl_path'], {}), '(this_mdl_path)\n', (1216, 1231), False, 'import os\n'), ((2022, 2048), 'os.listdir', 'os.listdir', (['self.tags_path'], {}), '(self.tags_path)\n', (2032, 2048), False, 'import os\n'), ((2367, 2403), 'numpy.min', 'np.min', (['[C_0.shape[0], C_1.shape[1]]'], {}), '([C_0.shape[0], C_1.shape[1]])\n', (2373, 2403), True, 'import numpy as np\n'), ((2421, 2469), 'numpy.random.choice', 'np.random.choice', (['C_0.shape[0]', 'n'], {'replace': '(False)'}), '(C_0.shape[0], n, replace=False)\n', (2437, 2469), True, 'import numpy as np\n'), ((2487, 2535), 'numpy.random.choice', 'np.random.choice', (['C_1.shape[1]', 'n'], {'replace': '(False)'}), '(C_1.shape[1], n, replace=False)\n', (2503, 2535), True, 'import numpy as np\n'), ((2768, 2784), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (2776, 2784), True, 'import numpy as np\n'), ((3218, 3229), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (3226, 3229), True, 'import numpy as np\n'), ((3242, 3259), 'numpy.zeros', 'np.zeros', (['R.shape'], {}), '(R.shape)\n', (3250, 3259), True, 'import numpy as np\n'), ((3396, 3416), 'numpy.median', 'np.median', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (3405, 3416), True, 'import numpy as np\n'), ((3759, 3792), 'os.path.join', 'os.path.join', (['self.tags_path', 'tag'], {}), '(self.tags_path, tag)\n', (3771, 3792), False, 'import os\n'), ((3812, 3854), 'os.path.join', 'os.path.join', (['tag_path', '"""score"""', '"""mdl.pkl"""'], {}), "(tag_path, 'score', 'mdl.pkl')\n", (3824, 3854), False, 'import os\n'), ((3869, 3890), 'joblib.load', 'joblib.load', (['mdl_path'], {}), '(mdl_path)\n', (3880, 3890), False, 'import joblib\n'), ((3974, 4007), 'os.path.join', 'os.path.join', (['self.tags_path', 'tag'], {}), '(self.tags_path, tag)\n', (3986, 4007), False, 'import os\n'), ((4034, 4084), 'os.path.join', 'os.path.join', (['tag_path', '"""score"""', '"""cv_results.json"""'], {}), "(tag_path, 'score', 'cv_results.json')\n", (4046, 4084), False, 'import os\n'), ((1710, 1722), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1719, 1722), False, 'import json\n'), ((1862, 1872), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1869, 1872), True, 'import numpy as np\n'), ((3312, 3347), 'scipy.stats.rankdata', 'rankdata', (['R[:, i]'], {'method': '"""ordinal"""'}), "(R[:, i], method='ordinal')\n", (3320, 3347), False, 'from scipy.stats import rankdata\n'), ((3432, 3445), 'numpy.argsort', 'np.argsort', (['S'], {}), '(S)\n', (3442, 3445), True, 'import numpy as np\n'), ((3592, 3608), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3600, 3608), True, 'import numpy as np\n'), ((3611, 3625), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (3617, 3625), True, 'import numpy as np\n'), ((4156, 4168), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4165, 4168), False, 'import json\n'), ((1630, 1675), 'os.path.join', 'os.path.join', (['path', '"""compare"""', '"""columns.json"""'], {}), "(path, 'compare', 'columns.json')\n", (1642, 1675), False, 'import os\n'), ((1794, 1832), 'os.path.join', 'os.path.join', (['path', '"""compare"""', '"""C.npy"""'], {}), "(path, 'compare', 'C.npy')\n", (1806, 1832), False, 'import os\n'), ((2158, 2191), 'os.path.join', 'os.path.join', (['self.tags_path', 'tag'], {}), '(self.tags_path, tag)\n', (2170, 2191), False, 'import os\n'), ((3050, 3083), 'os.path.join', 'os.path.join', (['self.tags_path', 'tag'], {}), '(self.tags_path, tag)\n', (3062, 3083), False, 'import os\n'), ((3374, 3383), 'numpy.max', 'np.max', (['r'], {}), '(r)\n', (3380, 3383), True, 'import numpy as np\n')]
|
from random import random
import gym
# os.chdir(os.path.join(os.path.abspath(os.path.curdir), "hw1/"))
import load_policy
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tf_util
from keras import Sequential
from keras.layers import Dense
def roll_out(env_name, policy_fn, render=False, max_timesteps=1000):
env = gym.make(env_name)
max_steps = max_timesteps or env.spec.timestep_limit
observations = []
actions = []
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = policy_fn(obs[None, :])
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if steps % 100 == 0: print("%i/%i" % (steps, max_steps))
if render:
env.render()
if steps >= max_steps:
break
print('return: ', totalr)
return {'observations': np.array(observations),
'actions': np.array(actions),
'returns': np.array([totalr])}
def build_network(output_units, hidden_layers, units_per_layer):
model = Sequential()
for _ in range(hidden_layers):
model.add(Dense(units=units_per_layer, activation='relu'))
model.add(Dense(units=output_units))
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
return model
def report_results(expert_returns, novice_returns, config):
plt.plot(novice_returns)
plt.plot(expert_returns)
plt.xlabel('Rollout')
plt.ylabel('Return')
plt.title('{}: {} demonstrations, network size {}, {} epochs \n'.format(config['env'], config['demos'],
config['nn_size'], config['epochs']))
plt.savefig('dagger_result.png')
def dagger(env_name, num_rollouts=20, max_timesteps=1000, beta_base=0.5, nn_hidden_layers=2, nn_units_per_layer=64,
epochs=5):
def get_beta():
return pow(beta_base, i)
data_set = None
print('loading and building expert policy')
expert_policy = load_policy.load_policy('experts/{}-v1.pkl'.format(env_name.split('-')[0]))
print('loaded and built')
env = gym.make(env_name)
action_dim = env.action_space.shape[0]
novice_policy = build_network(action_dim, nn_hidden_layers, nn_units_per_layer)
def policy(x):
if get_beta() >= random():
return expert_policy(x)
else:
return novice_policy.predict(x)
with tf.Session():
tf_util.initialize()
# Run DAgger
for i in range(num_rollouts):
print("Rollout: %i/%i" % (i, num_rollouts))
data = roll_out(env_name, policy, max_timesteps=max_timesteps)
data['actions'] = expert_policy(data['observations'])
# Aggregate data
if data_set is None:
data_set = data
else:
for key in ('observations', 'actions', 'returns'):
data_set[key] = np.vstack((data_set[key], data[key]))
# Update the novice policy
novice_policy.fit(data_set['observations'], data_set['actions'], verbose=2, epochs=epochs, batch_size=64)
print('reporting results')
config = {'env': env_name,
'demos': data_set['observations'].shape[0],
'nn_size': '({}x{})'.format(nn_hidden_layers, nn_units_per_layer),
'epochs': epochs}
report_results(data_set['returns'][0], data_set['returns'], config)
return novice_policy
if __name__ == "__main__":
pi = dagger('Humanoid-v2', num_rollouts=200, nn_units_per_layer=256)
|
[
"gym.make",
"matplotlib.pyplot.plot",
"keras.Sequential",
"tensorflow.Session",
"random.random",
"numpy.vstack",
"keras.layers.Dense",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"tf_util.initialize"
] |
[((354, 372), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (362, 372), False, 'import gym\n'), ((1153, 1165), 'keras.Sequential', 'Sequential', ([], {}), '()\n', (1163, 1165), False, 'from keras import Sequential\n'), ((1515, 1539), 'matplotlib.pyplot.plot', 'plt.plot', (['novice_returns'], {}), '(novice_returns)\n', (1523, 1539), True, 'import matplotlib.pyplot as plt\n'), ((1544, 1568), 'matplotlib.pyplot.plot', 'plt.plot', (['expert_returns'], {}), '(expert_returns)\n', (1552, 1568), True, 'import matplotlib.pyplot as plt\n'), ((1573, 1594), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rollout"""'], {}), "('Rollout')\n", (1583, 1594), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1619), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Return"""'], {}), "('Return')\n", (1609, 1619), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1880), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""dagger_result.png"""'], {}), "('dagger_result.png')\n", (1859, 1880), True, 'import matplotlib.pyplot as plt\n'), ((2281, 2299), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (2289, 2299), False, 'import gym\n'), ((965, 987), 'numpy.array', 'np.array', (['observations'], {}), '(observations)\n', (973, 987), True, 'import numpy as np\n'), ((1012, 1029), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (1020, 1029), True, 'import numpy as np\n'), ((1054, 1072), 'numpy.array', 'np.array', (['[totalr]'], {}), '([totalr])\n', (1062, 1072), True, 'import numpy as np\n'), ((1283, 1308), 'keras.layers.Dense', 'Dense', ([], {'units': 'output_units'}), '(units=output_units)\n', (1288, 1308), False, 'from keras.layers import Dense\n'), ((2586, 2598), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2596, 2598), True, 'import tensorflow as tf\n'), ((2608, 2628), 'tf_util.initialize', 'tf_util.initialize', ([], {}), '()\n', (2626, 2628), False, 'import tf_util\n'), ((1219, 1266), 'keras.layers.Dense', 'Dense', ([], {'units': 'units_per_layer', 'activation': '"""relu"""'}), "(units=units_per_layer, activation='relu')\n", (1224, 1266), False, 'from keras.layers import Dense\n'), ((2472, 2480), 'random.random', 'random', ([], {}), '()\n', (2478, 2480), False, 'from random import random\n'), ((3104, 3141), 'numpy.vstack', 'np.vstack', (['(data_set[key], data[key])'], {}), '((data_set[key], data[key]))\n', (3113, 3141), True, 'import numpy as np\n')]
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module for interfacing with a remote extractor."""
import logging
from typing import Optional, Callable, List
import numpy as np
from .utils import generate_wsr, bitarray_to_bytes
from .baserandomservice import BaseRandomService
from .cqcextractorjob import CQCExtractorJob
logger = logging.getLogger(__name__)
class CQCExtractor(BaseRandomService):
"""Class for interfacing with a CQC remote extractor.
There are two extractor methods - Dodis (extractor 1) and Hayashi (extractor 2).
These methods can be invoked synchronously or asynchronously.
To invoke them synchronously::
random_bits = extractor.run(*cqc_parameters)
To invoke them asynchronously::
import numpy as np
extractor1_out = extractor.run_async_ext1(*ext1_parameters).block_until_ready()
extractor2_out = extractor.run_async_ext2(
ext2_seed=extractor1_out, *ext2_parameters).block_until_ready()
random_bits = np.append(extractor1_out, extractor2_out)
Running them asynchronously takes more steps because extractor 2 uses the
output of extractor 1 as its seed, so it must wait for extractor 1 to finish first.
"""
def run( # type: ignore[override]
self,
ext1_input_num_bits: int,
ext1_output_num_bits: int,
ext1_raw_bytes: bytes,
ext1_wsr_bytes: bytes,
ext2_seed_num_bits: int,
ext2_wsr_multiplier: int,
ext2_wsr_generator: Optional[Callable] = None
) -> List[int]:
"""Process input data synchronously.
Args:
ext1_input_num_bits: Number of input bits, for extractor 1.
ext1_output_num_bits: Number of output bits, for extractor 1.
ext1_raw_bytes: Initial random numbers, in bytes, for extractor 1.
ext1_wsr_bytes: Initial WSRs, in bytes, for extractor 1.
ext2_seed_num_bits: Number of bits in the seed, for extractor 2.
ext2_wsr_multiplier: WSR multiplier, for extractor 2. The number of
bits used by extractor 2 is ext2_seed_num_bits*ext2_wsr_multiplier.
ext2_wsr_generator: WSR generator used for extractor 2. It must take the
number of bits as the input and a list of random bits (0s and 1s)
as the output. If ``None``, :func:``generate_wsr`` is used.
Returns:
An instance of ``CQCExtractorJob`` which can be used to retrieve the
results later.
"""
# pylint: disable=arguments-differ
# Run ext1
output = self.run_async_ext1(ext1_input_num_bits, ext1_output_num_bits,
ext1_raw_bytes, ext1_wsr_bytes).block_until_ready()
# Run ext2 if requested.
if ext2_wsr_multiplier != 0:
ext2_out = self.run_async_ext2(
output, ext2_seed_num_bits, ext2_wsr_multiplier,
ext2_wsr_generator).block_until_ready()
output = np.append(output, ext2_out).tolist()
return output
def run_async_ext1(
self,
ext1_input_num_bits: int,
ext1_output_num_bits: int,
ext1_raw_bytes: bytes,
ext1_wsr_bytes: bytes
) -> CQCExtractorJob:
"""Run the first extractor asynchronously.
Args:
ext1_input_num_bits: Number of input bits, for extractor 1.
ext1_output_num_bits: Number of output bits, for extractor 1.
ext1_raw_bytes: Initial random numbers, in bytes, for extractor 1.
ext1_wsr_bytes: Initial WSRs, in bytes, for extractor 1.
Returns:
An instance of ``CQCExtractorJob`` which can be used to retrieve the
results later.
Raises:
ValueError: If an invalid argument values are specified.
"""
if not ext1_input_num_bits or not ext1_output_num_bits:
raise ValueError("Invalid input arguments. ext1_input_num_bits and "
"ext1_output_num_bits must be non-zero.")
logger.info("Starting first extraction.")
# Run ext1
ext1_data = {"n": ext1_input_num_bits,
"m": ext1_output_num_bits}
ext1_files = {"x": ext1_raw_bytes,
"y": ext1_wsr_bytes}
response = self._client.extract(
name='cqc', method='ext1', data=ext1_data, files=ext1_files)
parameters = {'ext1_input_num_bits': ext1_input_num_bits,
'ext1_output_num_bits': ext1_output_num_bits,
'ext1_raw_bytes': ext1_raw_bytes,
'ext1_wsr_bytes': ext1_wsr_bytes}
return CQCExtractorJob(job_id=response['id'], client=self._client, parameters=parameters)
def run_async_ext2(
self,
ext2_seed: List[int],
ext2_seed_num_bits: int,
ext2_wsr_multiplier: int,
ext2_wsr_generator: Optional[Callable] = None
) -> CQCExtractorJob:
"""Run the second extractor asynchronously.
Args:
ext2_seed: Seed used for extractor 2, such as the output of extractor 1.
ext2_seed_num_bits: Number of bits in the seed, for extractor 2.
ext2_wsr_multiplier: WSR multiplier, for extractor 2. The number of
bits used by extractor 2 is ext2_seed_num_bits*ext2_wsr_multiplier.
ext2_wsr_generator: WSR generator used for extractor 2. It must take the
number of bits as the input and a list of random bits (0s and 1s)
as the output. If ``None``, :func:``generate_wsr`` is used.
Returns:
An instance of ``CQCExtractorJob`` which can be used to retrieve the
results later.
Raises:
ValueError: If an invalid argument values are specified.
"""
if not ext2_seed_num_bits or not ext2_wsr_multiplier:
raise ValueError("Invalid input arguments. ext2_seed_num_bits and "
"ext2_wsr_multiplier must be non-zero.")
logger.info("Starting second extraction.")
ext2_seed = bitarray_to_bytes(ext2_seed[:ext2_seed_num_bits]) # type: ignore[assignment]
if ext2_wsr_generator is None:
ext2_wsr_generator = generate_wsr
ext2_wsr = ext2_wsr_generator(ext2_seed_num_bits*ext2_wsr_multiplier)
ext2_wsr = bitarray_to_bytes(ext2_wsr)
ext2_data = {"a": ext2_seed_num_bits,
"b": ext2_wsr_multiplier}
ext2_files = {"r": ext2_seed,
"x": ext2_wsr}
response = self._client.extract(name='cqc', method='ext2',
data=ext2_data, files=ext2_files)
parameters = {'ext2_seed_num_bits': ext2_seed_num_bits,
'ext2_wsr_multiplier': ext2_wsr_multiplier,
'ext2_seed_bytes': ext2_seed,
'ext2_wsr': ext2_wsr}
return CQCExtractorJob(job_id=response['id'], client=self._client, parameters=parameters)
def retrieve_job(self, job_id: str) -> CQCExtractorJob:
"""Retrieve a previously submitted job.
Args:
job_id: Job ID.
Returns:
A ``CQCExtractorJob`` instance.
"""
return CQCExtractorJob(job_id, self._client)
def __repr__(self) -> str:
return "<{}('{}') from {}>".format(self.__class__.__name__,
self.name,
self._provider)
|
[
"numpy.append",
"logging.getLogger"
] |
[((768, 795), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (785, 795), False, 'import logging\n'), ((3478, 3505), 'numpy.append', 'np.append', (['output', 'ext2_out'], {}), '(output, ext2_out)\n', (3487, 3505), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
def plot_curve(data, plot_file, keys=None,
clip=True, label_min=True, label_end=True):
if not keys:
keys = data.keys()
plt.figure()
for i,key in enumerate(keys):
plt.subplot(len(keys),1,i+1)
if clip:
limit = 2*np.mean(np.abs(data[key]))
y = np.clip(data[key],-limit,limit)
else:
y = data[key]
plt.plot(y, linewidth=1.,label=key)
if label_min:
plt.plot(np.argmin(data[key]),np.min(data[key]),'o',
label="min: {:.3g}".format(np.min(data[key])))
if label_end:
plt.plot(len(data[key])-1,data[key][-1],'o',
label="end: {:.3g}".format(data[key][-1]))
plt.legend()
plt.savefig(plot_file)
plt.close()
def plot_sample(data, plot_file, groups, num_points=20):
plt.figure()
for i,keys in enumerate(groups):
plt.subplot(len(groups),1,i+1)
for key in keys:
interval = int(data[key].shape[0]/num_points)
y = data[key][::interval]
plt.plot(y, linewidth=1., label=key)
plt.legend()
plt.savefig(plot_file)
plt.close()
|
[
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.clip",
"numpy.argmin",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.savefig"
] |
[((204, 216), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (214, 216), True, 'import matplotlib.pyplot as plt\n'), ((807, 829), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_file'], {}), '(plot_file)\n', (818, 829), True, 'import matplotlib.pyplot as plt\n'), ((834, 845), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (843, 845), True, 'import matplotlib.pyplot as plt\n'), ((908, 920), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (918, 920), True, 'import matplotlib.pyplot as plt\n'), ((1192, 1214), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_file'], {}), '(plot_file)\n', (1203, 1214), True, 'import matplotlib.pyplot as plt\n'), ((1219, 1230), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1228, 1230), True, 'import matplotlib.pyplot as plt\n'), ((450, 487), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'linewidth': '(1.0)', 'label': 'key'}), '(y, linewidth=1.0, label=key)\n', (458, 487), True, 'import matplotlib.pyplot as plt\n'), ((790, 802), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (800, 802), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1187), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1185, 1187), True, 'import matplotlib.pyplot as plt\n'), ((370, 403), 'numpy.clip', 'np.clip', (['data[key]', '(-limit)', 'limit'], {}), '(data[key], -limit, limit)\n', (377, 403), True, 'import numpy as np\n'), ((1130, 1167), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'linewidth': '(1.0)', 'label': 'key'}), '(y, linewidth=1.0, label=key)\n', (1138, 1167), True, 'import matplotlib.pyplot as plt\n'), ((529, 549), 'numpy.argmin', 'np.argmin', (['data[key]'], {}), '(data[key])\n', (538, 549), True, 'import numpy as np\n'), ((550, 567), 'numpy.min', 'np.min', (['data[key]'], {}), '(data[key])\n', (556, 567), True, 'import numpy as np\n'), ((335, 352), 'numpy.abs', 'np.abs', (['data[key]'], {}), '(data[key])\n', (341, 352), True, 'import numpy as np\n'), ((620, 637), 'numpy.min', 'np.min', (['data[key]'], {}), '(data[key])\n', (626, 637), True, 'import numpy as np\n')]
|
'''
Date: Feb, 2020
Author: <NAME> , <NAME>
This file is originally from "'Double DIP" (https://github.com/yossigandelsman/DoubleDIP)
Some modifications are built to define the baselines
'''
import glob
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image, ImageFilter
from skimage.metrics import peak_signal_noise_ratio, structural_similarity
# import skvideo.io
import os
import math
matplotlib.use('agg')
def gaussian_kernel(size, sigma=2., dim=2, channels=3):
#https://github.com/kechan/FastaiPlayground/blob/master/Quick%20Tour%20of%20Data%20Augmentation.ipynb
# The gaussian kernel is the product of the gaussian function of each dimension.
# kernel_size should be an odd number.
kernel_size = 2*size + 1
kernel_size = [kernel_size] * dim
sigma = [sigma] * dim
kernel = 1
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-((mgrid - mean) / (2 * std)) ** 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
return kernel
def smooth_image_torch(x, k_size=3):
kernel = gaussian_kernel(size=k_size).to(str(x.device))
kernel_size = 2*k_size + 1
padding = (kernel_size - 1) // 2
x = F.pad(x, (padding, padding, padding, padding), mode='reflect')
x = F.conv2d(x, kernel, groups=3)
return x
def renormalize(img):
if torch.is_tensor(img):
min_val = torch.min(img.reshape(-1))
max_val = torch.max(img.reshape(-1))
img = (img-min_val)*(1 / (max_val-min_val + 1e-20))
else: #numpy
min_val = np.min(img.flatten())
max_val = np.max(img.flatten())
new_max = 255 if max_val > 1 else 1
img = (img-min_val)*(new_max / (max_val-min_val + 1e-20))
return img
def edge_image(img_np):
img = np_to_pil(img_np)
img = img.filter(ImageFilter.FIND_EDGES)
return pil_to_np(img)
def blur_image(img_np, r=2):
img = np_to_pil(img_np)
img = img.filter(ImageFilter.GaussianBlur(radius=r))
return pil_to_np(img)
def compute_psnr(img1_np, img2_np):
if img1_np is None or img2_np is None:
return -1.0
else:
return peak_signal_noise_ratio(img1_np, img2_np)
def compute_ssim(img1_np, img2_np):
if img1_np is None or img2_np is None:
return -1.0
assert(img1_np.shape == img2_np.shape)
if len(img1_np.shape) < 3:
return structural_similarity(sim1_np, img2_np)
else:
# matplotlib input img shape (W, H, C)
if img1_np.shape[0] == 1 or img1_np.shape[0] == 3:
img1_np = img1_np.transpose(1,2,0)
if img2_np.shape[0] == 1 or img2_np.shape[0] == 3:
img2_np = img2_np.transpose(1,2,0)
return structural_similarity(img1_np, img2_np, multichannel=True)
def rgb_to_gray(rgb):
if len(rgb.shape) == 4: # tensor
assert(rgb.shape[1]== 3)
gray = rgb[:, [0], :, :] * 299/1000 + rgb[:, [1], :, :] * 587/1000 +rgb[:, [2], :, :] * 114/1000
else: # np
assert(rgb.shape[0]== 3)
gray = rgb[[0], :, :] * 299/1000 + rgb[[1], :, :] * 587/1000 +rgb[[2], :, :] * 114/1000
return gray
def crop_image(img, d=32):
"""
Make dimensions divisible by d
:param pil img:
:param d:
:return:
"""
new_size = (img.size[0] - img.size[0] % d,
img.size[1] - img.size[1] % d)
bbox = [
int((img.size[0] - new_size[0]) / 2),
int((img.size[1] - new_size[1]) / 2),
int((img.size[0] + new_size[0]) / 2),
int((img.size[1] + new_size[1]) / 2),
]
img_cropped = img.crop(bbox)
return img_cropped
def crop_np_image(img_np, d=32):
return torch_to_np(crop_torch_image(np_to_torch(img_np), d))
def crop_torch_image(img, d=32):
"""
Make dimensions divisible by d
image is [1, 3, W, H] or [3, W, H]
:param pil img:
:param d:
:return:
"""
new_size = (img.shape[-2] - img.shape[-2] % d,
img.shape[-1] - img.shape[-1] % d)
pad = ((img.shape[-2] - new_size[-2]) // 2, (img.shape[-1] - new_size[-1]) // 2)
if len(img.shape) == 4:
return img[:, :, pad[-2]: pad[-2] + new_size[-2], pad[-1]: pad[-1] + new_size[-1]]
assert len(img.shape) == 3
return img[:, pad[-2]: pad[-2] + new_size[-2], pad[-1]: pad[-1] + new_size[-1]]
def get_params(opt_over, net, net_input, downsampler=None):
"""
Returns parameters that we want to optimize over.
:param opt_over: comma separated list, e.g. "net,input" or "net"
:param net: network
:param net_input: torch.Tensor that stores input `z`
:param downsampler:
:return:
"""
opt_over_list = opt_over.split(',')
params = []
for opt in opt_over_list:
if opt == 'net':
params += [x for x in net.parameters()]
elif opt == 'down':
assert downsampler is not None
params = [x for x in downsampler.parameters()]
elif opt == 'input':
net_input.requires_grad = True
params += [net_input]
else:
assert False, 'what is it?'
return params
def get_image_grid(images_np, nrow=8):
"""
Creates a grid from a list of images by concatenating them.
:param images_np:
:param nrow:
:return:
"""
images_torch = [torch.from_numpy(x).type(torch.FloatTensor) for x in images_np]
torch_grid = torchvision.utils.make_grid(images_torch, nrow)
return torch_grid.numpy()
def plot_image_grid(name, images_np, interpolation='lanczos', image_mode='rgb', output_path=None, show=True):
"""
Draws images in a grid
Args:
images_np: list of images, each image is np.array of size 3xHxW or 1xHxW
nrow: how many images will be in one row
interpolation: interpolation used in plt.imshow
"""
# assert len(images_np) == 2
n_channels = max(x.shape[0] for x in images_np)
assert (n_channels == 3) or (n_channels == 1), "images should have 1 or 3 channels"
if image_mode=='lab':
for i in range(len(images_np)):
im_p = np_to_pil(images_np[i], image_mode)
im_p = lab_to_rgb(im_p)
images_np[i] = pil_to_np(im_p)
images_np = [x if (x.shape[0] == n_channels) else np.concatenate([x, x, x], axis=0) for x in images_np]
grid = get_image_grid(images_np, len(images_np))
if images_np[0].shape[0] == 1:
plt.imshow(grid[0], cmap='gray', interpolation=interpolation)
else:
plt.imshow(grid.transpose(1, 2, 0), interpolation=interpolation)
plt.title(name)
plt.axis('off')
if output_path != None:
if not os.path.exists(output_path):
os.makedirs(output_path)
plt.savefig(os.path.join(output_path,"{}.png".format(name)))
if show:
plt.show()
plt.close()
def save_image(name, image_np, image_mode='rgb', output_path="output", show=False):
p = np_to_pil(image_np, image_mode)
p = lab_to_rgb(p) if p.mode.lower()=='lab' else p
if show:
plt.imshow(p)
plt.show()
if output_path != None:
if not os.path.exists(output_path):
os.makedirs(output_path)
p.save(os.path.join(output_path,"{}.jpg".format(name)))
plt.close()
def video_to_images(file_name, name):
video = prepare_video(file_name)
for i, f in enumerate(video):
save_image(name + "_{0:03d}".format(i), f)
def images_to_video(images_dir ,name, gray=True):
num = len(glob.glob(images_dir +"/*.jpg"))
c = []
for i in range(num):
if gray:
img = prepare_gray_image(images_dir + "/"+ name +"_{}.jpg".format(i))
else:
img = prepare_image(images_dir + "/"+name+"_{}.jpg".format(i))
print(img.shape)
c.append(img)
save_video(name, np.array(c))
def save_heatmap(name, image_np):
cmap = plt.get_cmap('jet')
rgba_img = cmap(image_np)
rgb_img = np.delete(rgba_img, 3, 2)
save_image(name, rgb_img.transpose(2, 0, 1))
def save_graph(name, graph_list, output_path="output", show=True):
plt.clf()
plt.plot(graph_list)
if show:
plt.show()
if output_path != None:
if not os.path.exists(output_path):
os.makedirs(output_path)
plt.savefig(os.path.join(output_path, name + ".png"))
plt.close()
def create_augmentations(np_image):
"""
convention: original, left, upside-down, right, rot1, rot2, rot3
:param np_image:
:return:
"""
aug = [np_image.copy(), np.rot90(np_image, 1, (1, 2)).copy(),
np.rot90(np_image, 2, (1, 2)).copy(), np.rot90(np_image, 3, (1, 2)).copy()]
flipped = np_image[:,::-1, :].copy()
aug += [flipped.copy(), np.rot90(flipped, 1, (1, 2)).copy(), np.rot90(flipped, 2, (1, 2)).copy(), np.rot90(flipped, 3, (1, 2)).copy()]
return aug
def create_video_augmentations(np_video):
"""
convention: original, left, upside-down, right, rot1, rot2, rot3
:param np_video:
:return:
"""
aug = [np_video.copy(), np.rot90(np_video, 1, (2, 3)).copy(),
np.rot90(np_video, 2, (2, 3)).copy(), np.rot90(np_video, 3, (2, 3)).copy()]
flipped = np_video[:, :, ::-1, :].copy()
aug += [flipped.copy(), np.rot90(flipped, 1, (2, 3)).copy(), np.rot90(flipped, 2, (2, 3)).copy(),
np.rot90(flipped, 3, (2, 3)).copy()]
return aug
def save_graphs(name, graph_dict, output_path="output/"):
"""
:param name:
:param dict graph_dict: a dict from the name of the list to the list itself.
:return:
"""
plt.clf()
fig, ax = plt.subplots()
for k, v in graph_dict.items():
ax.plot(v, label=k)
# ax.semilogy(v, label=k)
ax.set_xlabel('iterations')
# ax.set_ylabel(name)
ax.set_ylabel('MSE-loss')
# ax.set_ylabel('PSNR')
plt.legend()
plt.savefig(output_path + name + ".png")
def load(path):
"""Load PIL image."""
img = Image.open(path)
return img
def get_image(path, imsize=-1):
"""Load an image and resize to a cpecific size.
Args:
path: path to image
imsize: tuple or scalar with dimensions; -1 for `no resize`
"""
img = load(path)
if isinstance(imsize, int):
imsize = (imsize, imsize)
if imsize[0] != -1 and img.size != imsize:
if imsize[0] > img.size[0]:
img = img.resize(imsize, Image.BICUBIC)
else:
img = img.resize(imsize, Image.ANTIALIAS)
img_np = pil_to_np(img)
return img, img_np
# def prepare_image(file_name, imsize=-1):
# """
# loads makes it divisible
# :param file_name:
# :return: the numpy representation of the image
# """
# img_pil = crop_image(get_image(file_name, imsize)[0], d=32)
# return pil_to_np(img_pil)
def prepare_image(file_name, imsize=-1, image_mode='rgb'):
"""
loads makes it divisible
:param file_name:
:return: the numpy representation of the image
"""
try:
img_pil = crop_image(get_image(file_name, imsize)[0], d=32)
img_pil = img_pil.convert('L') if image_mode == 'L' else img_pil
img_pil = rgb_to_lab(img_pil) if image_mode=='lab' else img_pil
return pil_to_np(img_pil)
except:
print("Cannot find or open the file: ", file_name)
return None
def prepare_video(file_name, folder="output/"):
data = skvideo.io.vread(folder + file_name)
return crop_torch_image(data.transpose(0, 3, 1, 2).astype(np.float32) / 255.)[:35]
def save_video(name, video_np, output_path="output/"):
outputdata = video_np * 255
outputdata = outputdata.astype(np.uint8)
skvideo.io.vwrite(output_path + "{}.mp4".format(name), outputdata.transpose(0, 2, 3, 1))
def prepare_gray_image(file_name):
img = prepare_image(file_name)
return np.array([np.mean(img, axis=0)])
def rgb_to_lab(rgb_pil):
from PIL import Image, ImageCms
assert(rgb_pil.mode != 'rgb')
srgb_p = ImageCms.createProfile("sRGB")
lab_p = ImageCms.createProfile("LAB")
rgb2lab = ImageCms.buildTransformFromOpenProfiles(srgb_p, lab_p, "RGB", "LAB")
lab_pil = ImageCms.applyTransform(rgb_pil, rgb2lab)
del lab_p, srgb_p, rgb2lab
return lab_pil
def lab_to_rgb(lab_pil):
from PIL import Image, ImageCms
assert(lab_pil.mode != 'lab')
lab_p2 = ImageCms.createProfile("LAB")
srgb_p2 = ImageCms.createProfile("sRGB")
lab2rgb = ImageCms.buildTransformFromOpenProfiles(lab_p2, srgb_p2, "LAB", "RGB")
rgb_pil = ImageCms.applyTransform(lab_pil, lab2rgb)
del lab_p2, srgb_p2, lab2rgb
return rgb_pil
def pil_to_np(img_PIL, with_transpose=True):
"""
Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
"""
ar = np.array(img_PIL)
if len(ar.shape) == 3 and ar.shape[-1] == 4:
ar = ar[:, :, :3]
# this is alpha channel
if with_transpose:
if len(ar.shape) == 3:
ar = ar.transpose(2, 0, 1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def median(img_np_list):
"""
assumes C x W x H [0..1]
:param img_np_list:
:return:
"""
assert len(img_np_list) > 0
l = len(img_np_list)
shape = img_np_list[0].shape
result = np.zeros(shape)
for c in range(shape[0]):
for w in range(shape[1]):
for h in range(shape[2]):
result[c, w, h] = sorted(i[c, w, h] for i in img_np_list)[l//2]
return result
def average(img_np_list):
"""
assumes C x W x H [0..1]
:param img_np_list:
:return:
"""
assert len(img_np_list) > 0
l = len(img_np_list)
shape = img_np_list[0].shape
result = np.zeros(shape)
for i in img_np_list:
result += i
return result / l
def np_to_pil(img_np, img_mode='rgb'):
"""
Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
:param img_np:
:return:
"""
ar = np.clip(img_np * 255, 0, 255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
mode = 'L'
else:
assert img_np.shape[0] == 3, img_np.shape
ar = ar.transpose(1, 2, 0)
mode = img_mode.upper()
return Image.fromarray(ar, mode=mode)
def np_to_torch(img_np):
"""
Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
:param img_np:
:return:
"""
return torch.from_numpy(img_np)[None, :]
def torch_to_np(img_var):
"""
Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
:param img_var:
:return:
"""
return img_var.detach().cpu().numpy()[0]
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.clf",
"PIL.ImageCms.applyTransform",
"numpy.clip",
"numpy.mean",
"numpy.rot90",
"torch.arange",
"glob.glob",
"os.path.join",
"torch.nn.functional.pad",
"matplotlib.pyplot.close",
"matplotlib.pyplot.imshow",
"os.path.exists",
"torch.exp",
"torch.is_tensor",
"matplotlib.pyplot.subplots",
"PIL.ImageFilter.GaussianBlur",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"PIL.ImageCms.buildTransformFromOpenProfiles",
"math.sqrt",
"matplotlib.pyplot.legend",
"torch.nn.functional.conv2d",
"matplotlib.use",
"torch.sum",
"numpy.delete",
"numpy.concatenate",
"torch.from_numpy",
"os.makedirs",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.axis",
"PIL.Image.open",
"torchvision.utils.make_grid",
"skimage.metrics.structural_similarity",
"numpy.array",
"PIL.Image.fromarray",
"PIL.ImageCms.createProfile",
"skimage.metrics.peak_signal_noise_ratio",
"matplotlib.pyplot.savefig"
] |
[((517, 538), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (531, 538), False, 'import matplotlib\n'), ((1683, 1745), 'torch.nn.functional.pad', 'F.pad', (['x', '(padding, padding, padding, padding)'], {'mode': '"""reflect"""'}), "(x, (padding, padding, padding, padding), mode='reflect')\n", (1688, 1745), True, 'import torch.nn.functional as F\n'), ((1751, 1780), 'torch.nn.functional.conv2d', 'F.conv2d', (['x', 'kernel'], {'groups': '(3)'}), '(x, kernel, groups=3)\n', (1759, 1780), True, 'import torch.nn.functional as F\n'), ((1823, 1843), 'torch.is_tensor', 'torch.is_tensor', (['img'], {}), '(img)\n', (1838, 1843), False, 'import torch\n'), ((5845, 5892), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['images_torch', 'nrow'], {}), '(images_torch, nrow)\n', (5872, 5892), False, 'import torchvision\n'), ((7014, 7029), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (7023, 7029), True, 'import matplotlib.pyplot as plt\n'), ((7034, 7049), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7042, 7049), True, 'import matplotlib.pyplot as plt\n'), ((7267, 7278), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7276, 7278), True, 'import matplotlib.pyplot as plt\n'), ((7698, 7709), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7707, 7709), True, 'import matplotlib.pyplot as plt\n'), ((8321, 8340), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (8333, 8340), True, 'import matplotlib.pyplot as plt\n'), ((8386, 8411), 'numpy.delete', 'np.delete', (['rgba_img', '(3)', '(2)'], {}), '(rgba_img, 3, 2)\n', (8395, 8411), True, 'import numpy as np\n'), ((8534, 8543), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8541, 8543), True, 'import matplotlib.pyplot as plt\n'), ((8548, 8568), 'matplotlib.pyplot.plot', 'plt.plot', (['graph_list'], {}), '(graph_list)\n', (8556, 8568), True, 'import matplotlib.pyplot as plt\n'), ((8779, 8790), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8788, 8790), True, 'import matplotlib.pyplot as plt\n'), ((10030, 10039), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10037, 10039), True, 'import matplotlib.pyplot as plt\n'), ((10054, 10068), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10066, 10068), True, 'import matplotlib.pyplot as plt\n'), ((10287, 10299), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10297, 10299), True, 'import matplotlib.pyplot as plt\n'), ((10304, 10344), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_path + name + '.png')"], {}), "(output_path + name + '.png')\n", (10315, 10344), True, 'import matplotlib.pyplot as plt\n'), ((10399, 10415), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (10409, 10415), False, 'from PIL import Image, ImageCms\n'), ((12417, 12447), 'PIL.ImageCms.createProfile', 'ImageCms.createProfile', (['"""sRGB"""'], {}), "('sRGB')\n", (12439, 12447), False, 'from PIL import Image, ImageCms\n'), ((12461, 12490), 'PIL.ImageCms.createProfile', 'ImageCms.createProfile', (['"""LAB"""'], {}), "('LAB')\n", (12483, 12490), False, 'from PIL import Image, ImageCms\n'), ((12505, 12573), 'PIL.ImageCms.buildTransformFromOpenProfiles', 'ImageCms.buildTransformFromOpenProfiles', (['srgb_p', 'lab_p', '"""RGB"""', '"""LAB"""'], {}), "(srgb_p, lab_p, 'RGB', 'LAB')\n", (12544, 12573), False, 'from PIL import Image, ImageCms\n'), ((12588, 12629), 'PIL.ImageCms.applyTransform', 'ImageCms.applyTransform', (['rgb_pil', 'rgb2lab'], {}), '(rgb_pil, rgb2lab)\n', (12611, 12629), False, 'from PIL import Image, ImageCms\n'), ((12791, 12820), 'PIL.ImageCms.createProfile', 'ImageCms.createProfile', (['"""LAB"""'], {}), "('LAB')\n", (12813, 12820), False, 'from PIL import Image, ImageCms\n'), ((12838, 12868), 'PIL.ImageCms.createProfile', 'ImageCms.createProfile', (['"""sRGB"""'], {}), "('sRGB')\n", (12860, 12868), False, 'from PIL import Image, ImageCms\n'), ((12883, 12953), 'PIL.ImageCms.buildTransformFromOpenProfiles', 'ImageCms.buildTransformFromOpenProfiles', (['lab_p2', 'srgb_p2', '"""LAB"""', '"""RGB"""'], {}), "(lab_p2, srgb_p2, 'LAB', 'RGB')\n", (12922, 12953), False, 'from PIL import Image, ImageCms\n'), ((12968, 13009), 'PIL.ImageCms.applyTransform', 'ImageCms.applyTransform', (['lab_pil', 'lab2rgb'], {}), '(lab_pil, lab2rgb)\n', (12991, 13009), False, 'from PIL import Image, ImageCms\n'), ((13229, 13246), 'numpy.array', 'np.array', (['img_PIL'], {}), '(img_PIL)\n', (13237, 13246), True, 'import numpy as np\n'), ((13745, 13760), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (13753, 13760), True, 'import numpy as np\n'), ((14174, 14189), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (14182, 14189), True, 'import numpy as np\n'), ((14713, 14743), 'PIL.Image.fromarray', 'Image.fromarray', (['ar'], {'mode': 'mode'}), '(ar, mode=mode)\n', (14728, 14743), False, 'from PIL import Image, ImageCms\n'), ((1321, 1338), 'torch.sum', 'torch.sum', (['kernel'], {}), '(kernel)\n', (1330, 1338), False, 'import torch\n'), ((2425, 2459), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', ([], {'radius': 'r'}), '(radius=r)\n', (2449, 2459), False, 'from PIL import Image, ImageFilter\n'), ((2615, 2656), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['img1_np', 'img2_np'], {}), '(img1_np, img2_np)\n', (2638, 2656), False, 'from skimage.metrics import peak_signal_noise_ratio, structural_similarity\n'), ((2854, 2893), 'skimage.metrics.structural_similarity', 'structural_similarity', (['sim1_np', 'img2_np'], {}), '(sim1_np, img2_np)\n', (2875, 2893), False, 'from skimage.metrics import peak_signal_noise_ratio, structural_similarity\n'), ((3182, 3240), 'skimage.metrics.structural_similarity', 'structural_similarity', (['img1_np', 'img2_np'], {'multichannel': '(True)'}), '(img1_np, img2_np, multichannel=True)\n', (3203, 3240), False, 'from skimage.metrics import peak_signal_noise_ratio, structural_similarity\n'), ((6860, 6921), 'matplotlib.pyplot.imshow', 'plt.imshow', (['grid[0]'], {'cmap': '"""gray"""', 'interpolation': 'interpolation'}), "(grid[0], cmap='gray', interpolation=interpolation)\n", (6870, 6921), True, 'import matplotlib.pyplot as plt\n'), ((7252, 7262), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7260, 7262), True, 'import matplotlib.pyplot as plt\n'), ((7486, 7499), 'matplotlib.pyplot.imshow', 'plt.imshow', (['p'], {}), '(p)\n', (7496, 7499), True, 'import matplotlib.pyplot as plt\n'), ((7508, 7518), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7516, 7518), True, 'import matplotlib.pyplot as plt\n'), ((7936, 7968), 'glob.glob', 'glob.glob', (["(images_dir + '/*.jpg')"], {}), "(images_dir + '/*.jpg')\n", (7945, 7968), False, 'import glob\n'), ((8262, 8273), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (8270, 8273), True, 'import numpy as np\n'), ((8591, 8601), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8599, 8601), True, 'import matplotlib.pyplot as plt\n'), ((14930, 14954), 'torch.from_numpy', 'torch.from_numpy', (['img_np'], {}), '(img_np)\n', (14946, 14954), False, 'import torch\n'), ((973, 1012), 'torch.arange', 'torch.arange', (['size'], {'dtype': 'torch.float32'}), '(size, dtype=torch.float32)\n', (985, 1012), False, 'import torch\n'), ((1193, 1238), 'torch.exp', 'torch.exp', (['(-((mgrid - mean) / (2 * std)) ** 2)'], {}), '(-((mgrid - mean) / (2 * std)) ** 2)\n', (1202, 1238), False, 'import torch\n'), ((6708, 6741), 'numpy.concatenate', 'np.concatenate', (['[x, x, x]'], {'axis': '(0)'}), '([x, x, x], axis=0)\n', (6722, 6741), True, 'import numpy as np\n'), ((7094, 7121), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (7108, 7121), False, 'import os\n'), ((7136, 7160), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (7147, 7160), False, 'import os\n'), ((7563, 7590), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (7577, 7590), False, 'import os\n'), ((7605, 7629), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (7616, 7629), False, 'import os\n'), ((8646, 8673), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (8660, 8673), False, 'import os\n'), ((8688, 8712), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (8699, 8712), False, 'import os\n'), ((8733, 8773), 'os.path.join', 'os.path.join', (['output_path', "(name + '.png')"], {}), "(output_path, name + '.png')\n", (8745, 8773), False, 'import os\n'), ((12284, 12304), 'numpy.mean', 'np.mean', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (12291, 12304), True, 'import numpy as np\n'), ((14459, 14488), 'numpy.clip', 'np.clip', (['(img_np * 255)', '(0)', '(255)'], {}), '(img_np * 255, 0, 255)\n', (14466, 14488), True, 'import numpy as np\n'), ((5764, 5783), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (5780, 5783), False, 'import torch\n'), ((8975, 9004), 'numpy.rot90', 'np.rot90', (['np_image', '(1)', '(1, 2)'], {}), '(np_image, 1, (1, 2))\n', (8983, 9004), True, 'import numpy as np\n'), ((9024, 9053), 'numpy.rot90', 'np.rot90', (['np_image', '(2)', '(1, 2)'], {}), '(np_image, 2, (1, 2))\n', (9032, 9053), True, 'import numpy as np\n'), ((9062, 9091), 'numpy.rot90', 'np.rot90', (['np_image', '(3)', '(1, 2)'], {}), '(np_image, 3, (1, 2))\n', (9070, 9091), True, 'import numpy as np\n'), ((9169, 9197), 'numpy.rot90', 'np.rot90', (['flipped', '(1)', '(1, 2)'], {}), '(flipped, 1, (1, 2))\n', (9177, 9197), True, 'import numpy as np\n'), ((9206, 9234), 'numpy.rot90', 'np.rot90', (['flipped', '(2)', '(1, 2)'], {}), '(flipped, 2, (1, 2))\n', (9214, 9234), True, 'import numpy as np\n'), ((9243, 9271), 'numpy.rot90', 'np.rot90', (['flipped', '(3)', '(1, 2)'], {}), '(flipped, 3, (1, 2))\n', (9251, 9271), True, 'import numpy as np\n'), ((9502, 9531), 'numpy.rot90', 'np.rot90', (['np_video', '(1)', '(2, 3)'], {}), '(np_video, 1, (2, 3))\n', (9510, 9531), True, 'import numpy as np\n'), ((9551, 9580), 'numpy.rot90', 'np.rot90', (['np_video', '(2)', '(2, 3)'], {}), '(np_video, 2, (2, 3))\n', (9559, 9580), True, 'import numpy as np\n'), ((9589, 9618), 'numpy.rot90', 'np.rot90', (['np_video', '(3)', '(2, 3)'], {}), '(np_video, 3, (2, 3))\n', (9597, 9618), True, 'import numpy as np\n'), ((9700, 9728), 'numpy.rot90', 'np.rot90', (['flipped', '(1)', '(2, 3)'], {}), '(flipped, 1, (2, 3))\n', (9708, 9728), True, 'import numpy as np\n'), ((9737, 9765), 'numpy.rot90', 'np.rot90', (['flipped', '(2)', '(2, 3)'], {}), '(flipped, 2, (2, 3))\n', (9745, 9765), True, 'import numpy as np\n'), ((9786, 9814), 'numpy.rot90', 'np.rot90', (['flipped', '(3)', '(2, 3)'], {}), '(flipped, 3, (2, 3))\n', (9794, 9814), True, 'import numpy as np\n'), ((1167, 1189), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (1176, 1189), False, 'import math\n')]
|
# BUILD THE PROJECT WITH THE CORRECT PYTHON VERSION
# pip install wheel
# python setup.py bdist_wheel
# OR python setup.py sdist bdist_wheel (to include the source)
# for python 3.8
# C:\Users\yoann\AppData\Roaming\Python\Python38\Scripts\twine upload
# --verbose --repository testpypi dist/IndexMapping-1.0.3-cp38-cp38-win_amd64.whl
# for python 3.6
# C:\Users\yoann\AppData\Roaming\Python\Python36\Scripts\twine upload
# --verbose --repository testpypi dist/IndexMapping-1.0.3-cp36-cp36-win_amd64.whl
# python setup.py bdist_wheel
# twine upload --verbose --repository testpypi dist/*
# PRODUCTION v:
# version 1.0.2
# C:\Users\yoann\AppData\Roaming\Python\Python38\Scripts\twine upload --verbose dist/IndexMapping-1.0.2*
# CREATING EXECUTABLE
# pyinstaller --onefile pyinstaller_config.spec
# NUMPY IS REQUIRED
try:
import numpy
except ImportError:
raise ImportError("\n<numpy> library is missing on your system."
"\nTry: \n C:\\pip install numpy on a window command prompt.")
import setuptools
from Cython.Build import cythonize
from setuptools import setup, Extension
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name ="IndexMapping",
version ="1.0.2", # prod 1.0.2 / test 1.0.20 next 1.0.21
author ="<NAME>",
author_email ="<EMAIL>",
description ="1D array transpose/conversion",
long_description =long_description,
long_description_content_type="text/markdown",
url ="https://github.com/yoyoberenguer/IndexMapping",
packages =setuptools.find_packages(),
ext_modules =cythonize([
Extension("IndexMapping.mapping", ["mapping.pyx"],
extra_compile_args=["/Qpar", "/fp:fast", "/O2", "/Oy", "/Ot"], language="c"),
Extension("IndexMapping.mapcfunctions", ["mapcfunctions.pyx"],
extra_compile_args=["/Qpar", "/fp:fast", "/O2", "/Oy", "/Ot"], language="c")]),
include_dirs=[numpy.get_include()],
define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")],
license ='MIT',
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Cython',
'Programming Language :: C',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate you support Python 3. These classifiers are *not*
# checked by 'pip install'. See instead 'python_requires' below.
# 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires=[
'setuptools>=49.2.1',
'Cython>=0.28'
],
python_requires ='>=3.0',
platforms =['any'],
include_package_data =True,
data_files=[('./lib/site-packages/IndexMapping',
['__init__.pxd',
'__init__.py',
'pyproject.toml',
'setup_mapping.py',
'mapcfunctions.pyx',
'mapping.pxd',
'mapping.pyx',
'LICENSE',
'README.md',
'requirements.txt',
'mapc.c',
'setup.cfg'
]),
('./lib/site-packages/IndexMapping/test',
[
'test/__init__.py',
'test/test_mapping.py',
'test/test_split.py',
'test/profiling.py'
]),
('./lib/site-packages/IndexMapping/Assets',
[
'Assets/A1.png',
])
],
project_urls = { # Optional
'Bug Reports': 'https://github.com/yoyoberenguer/IndexMapping/issues',
'Source' : 'https://github.com/yoyoberenguer/IndexMapping',
},
)
|
[
"setuptools.Extension",
"numpy.get_include",
"setuptools.find_packages",
"warnings.filterwarnings"
] |
[((1154, 1216), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (1177, 1216), False, 'import warnings\n'), ((1218, 1275), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (1241, 1275), False, 'import warnings\n'), ((1918, 1944), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1942, 1944), False, 'import setuptools\n'), ((2340, 2359), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2357, 2359), False, 'import numpy\n'), ((2002, 2134), 'setuptools.Extension', 'Extension', (['"""IndexMapping.mapping"""', "['mapping.pyx']"], {'extra_compile_args': "['/Qpar', '/fp:fast', '/O2', '/Oy', '/Ot']", 'language': '"""c"""'}), "('IndexMapping.mapping', ['mapping.pyx'], extra_compile_args=[\n '/Qpar', '/fp:fast', '/O2', '/Oy', '/Ot'], language='c')\n", (2011, 2134), False, 'from setuptools import setup, Extension\n'), ((2159, 2307), 'setuptools.Extension', 'Extension', (['"""IndexMapping.mapcfunctions"""', "['mapcfunctions.pyx']"], {'extra_compile_args': "['/Qpar', '/fp:fast', '/O2', '/Oy', '/Ot']", 'language': '"""c"""'}), "('IndexMapping.mapcfunctions', ['mapcfunctions.pyx'],\n extra_compile_args=['/Qpar', '/fp:fast', '/O2', '/Oy', '/Ot'], language='c'\n )\n", (2168, 2307), False, 'from setuptools import setup, Extension\n')]
|
"""
Configuration to get the same results every time.
https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
"""
import os
import random as rn
import numpy as np
import tensorflow as tf
from keras import backend as K
# Set up random seed to
# get the same results every
# time you train your model.
rs = 5
# We want to silence some of the
# tensorflow log messages for the clarity
# of the output.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(rs)
rn.seed(rs)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.compat.v1.set_random_seed(rs)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
K.set_session(sess)
print("Session configured!")
|
[
"numpy.random.seed",
"tensorflow.compat.v1.get_default_graph",
"keras.backend.set_session",
"tensorflow.compat.v1.set_random_seed",
"random.seed",
"tensorflow.compat.v1.ConfigProto"
] |
[((532, 550), 'numpy.random.seed', 'np.random.seed', (['rs'], {}), '(rs)\n', (546, 550), True, 'import numpy as np\n'), ((551, 562), 'random.seed', 'rn.seed', (['rs'], {}), '(rs)\n', (558, 562), True, 'import random as rn\n'), ((579, 671), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {'intra_op_parallelism_threads': '(1)', 'inter_op_parallelism_threads': '(1)'}), '(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n', (603, 671), True, 'import tensorflow as tf\n'), ((668, 700), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['rs'], {}), '(rs)\n', (696, 700), True, 'import tensorflow as tf\n'), ((791, 810), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (804, 810), True, 'from keras import backend as K\n'), ((736, 768), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (766, 768), True, 'import tensorflow as tf\n')]
|
import sys
sys.path.insert(0, ".")
# from trimesh.exchange.obj import load_obj
import trimesh
import cv2
import numpy as np
from nara.camera import Camera
from nara.vis import Plot
from nara.rasterizing import rasterizing
camera = 10
frame = 250
person = "377"
# extri_fpath = f"data/easymocap/extri_{camera}.yml"
# intri_fpath = f"data/easymocap/intri_{camera}.yml"
# mesh_fpath = "data/easymocap/000074.obj"
# extri_fpath = "data/extri.yml"
# intri_fpath = "data/intri.yml"
# mesh_fpath = "data/000000_2.obj"
uv_fpath = "data/uv_table.npy"
extri_fpath = f"/home/group-cvg/datasets/easymocap/cameras/{person}/extri.yml"
intri_fpath = f"/home/group-cvg/datasets/easymocap/cameras/{person}/intri.yml"
mesh_fpath = f"/home/group-cvg/datasets/easymocap/meshes/{person}/%06d.obj" % frame
# image_fpath = f"data/AllViewImage_000000/{camera}_000000.jpg"
image_fpath = (
f"/home/group-cvg/datasets/easymocap/images/{person}/{camera}/%06d.jpg" % frame
)
intri_param = cv2.FileStorage(intri_fpath, flags=0)
extri_param = cv2.FileStorage(extri_fpath, flags=0)
im = cv2.cvtColor(cv2.imread(image_fpath), cv2.COLOR_BGR2RGB)
camera = "%02d" % camera
w = 1024
h = 1024
K = intri_param.getNode(f"K_{camera}").mat()
dist = intri_param.getNode(f"dist_{camera}").mat()
rvec = extri_param.getNode(f"R_{camera}").mat() # 3x1 np.array
tvec = extri_param.getNode(f"T_{camera}").mat() # 3x1 np.array
plot = Plot(w, h)
plot.imshow(im)
cam = Camera(rvec, tvec, K, dist, w, h)
mesh = trimesh.load(mesh_fpath)
V = mesh.vertices
F = mesh.faces
T = np.load(uv_fpath)
zbuffer, uv_image, normal_image = rasterizing(V, F, T, cam, calculate_normals=True)
print("normal_image", normal_image.shape, np.min(normal_image), np.max(normal_image))
normal_image = (normal_image + 1) / 2
plot.ax.imshow(im)
plot.ax.imshow(normal_image, alpha=0.5)
plot.save(f"output/eazmopca{person}_{camera}.png")
|
[
"numpy.load",
"trimesh.load",
"nara.vis.Plot",
"sys.path.insert",
"nara.rasterizing.rasterizing",
"cv2.imread",
"cv2.FileStorage",
"numpy.min",
"numpy.max",
"nara.camera.Camera"
] |
[((12, 35), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (27, 35), False, 'import sys\n'), ((975, 1012), 'cv2.FileStorage', 'cv2.FileStorage', (['intri_fpath'], {'flags': '(0)'}), '(intri_fpath, flags=0)\n', (990, 1012), False, 'import cv2\n'), ((1027, 1064), 'cv2.FileStorage', 'cv2.FileStorage', (['extri_fpath'], {'flags': '(0)'}), '(extri_fpath, flags=0)\n', (1042, 1064), False, 'import cv2\n'), ((1405, 1415), 'nara.vis.Plot', 'Plot', (['w', 'h'], {}), '(w, h)\n', (1409, 1415), False, 'from nara.vis import Plot\n'), ((1440, 1473), 'nara.camera.Camera', 'Camera', (['rvec', 'tvec', 'K', 'dist', 'w', 'h'], {}), '(rvec, tvec, K, dist, w, h)\n', (1446, 1473), False, 'from nara.camera import Camera\n'), ((1482, 1506), 'trimesh.load', 'trimesh.load', (['mesh_fpath'], {}), '(mesh_fpath)\n', (1494, 1506), False, 'import trimesh\n'), ((1544, 1561), 'numpy.load', 'np.load', (['uv_fpath'], {}), '(uv_fpath)\n', (1551, 1561), True, 'import numpy as np\n'), ((1598, 1647), 'nara.rasterizing.rasterizing', 'rasterizing', (['V', 'F', 'T', 'cam'], {'calculate_normals': '(True)'}), '(V, F, T, cam, calculate_normals=True)\n', (1609, 1647), False, 'from nara.rasterizing import rasterizing\n'), ((1084, 1107), 'cv2.imread', 'cv2.imread', (['image_fpath'], {}), '(image_fpath)\n', (1094, 1107), False, 'import cv2\n'), ((1692, 1712), 'numpy.min', 'np.min', (['normal_image'], {}), '(normal_image)\n', (1698, 1712), True, 'import numpy as np\n'), ((1714, 1734), 'numpy.max', 'np.max', (['normal_image'], {}), '(normal_image)\n', (1720, 1734), True, 'import numpy as np\n')]
|
import argparse
import joblib
import os
import numpy as np
import pandas as pd
# sklearn
from sklearn.compose import ColumnTransformer
from sklearn.compose import make_column_selector as selector
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report, accuracy_score, balanced_accuracy_score, f1_score, roc_auc_score, precision_score, recall_score, confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.feature_selection import RFECV
# azureml
from azureml.core.run import Run
from azureml.core import Workspace
def main():
# Get script arguments
parser = argparse.ArgumentParser()
# Input dataset
parser.add_argument("--input-data", type=str,
dest='input_data', help='training dataset')
# Hyperparameters
parser.add_argument('--C', type=float, default=1,
help="Inverse of regularization strength. Smaller values cause stronger regularization")
parser.add_argument('--max_iter', type=int, default=100,
help="Maximum number of iterations to converge")
parser.add_argument('--min_features', type=int, default=10,
help="RFE - Min features to select")
args = parser.parse_args()
print("start experiment")
run = Run.get_context()
run.log("arg_C", np.float(args.C))
run.log("arg_max_iter", np.int(args.max_iter))
run.log("arg_min_features", np.int(args.min_features))
# load the dataset
print("loading data")
# Get the training data from the estimator input
dataset = run.input_datasets['training_data'].to_pandas_dataframe()
# change objects to category to impute
for col in dataset.select_dtypes(object):
dataset[col] = dataset[col].astype('category')
print("data loaded")
X = dataset.drop(columns=['target'], axis=1)
y = np.array(dataset['target'])
print("start test_train_split")
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42)
print("ended test_train_split")
print("start model")
# Setting up the sklean pipeline
# imputer
imp = IterativeImputer(max_iter=10, random_state=0)
# RFE
svc = SVC(kernel="linear")
min_features_to_select = args.min_features
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy',
min_features_to_select=min_features_to_select)
# model
model = LogisticRegression(
C=args.C,
max_iter=args.max_iter,
class_weight='balanced',
solver="liblinear",
random_state=42)
# transformer
numeric_transformer = Pipeline(steps=[
('imp', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('imp', SimpleImputer(strategy='most_frequent')),
('ohe', OneHotEncoder(handle_unknown='ignore', sparse=False))
])
# preprocessor
preprocessor = ColumnTransformer(transformers=[
('num', numeric_transformer, selector(dtype_exclude="category")),
('cat', categorical_transformer, selector(dtype_include="category"))
])
# pipeline
pipe = Pipeline(
steps=[
('preprocessor', preprocessor),
('rfecv', rfecv),
('model', model)])
pipe.fit(X_train, y_train)
print("end model")
print("start logging metrics")
y_pred = pipe.predict(X_test)
for metric in [balanced_accuracy_score, accuracy_score, f1_score, roc_auc_score, precision_score, recall_score]:
run.log(f"{metric.__name__}", np.float(metric(y_test, y_pred)))
print("end logging metrics")
print("start output")
# files saved in the "outputs" folder are automatically uploaded into run history
# The outputs folder is however never created locally, just uploaded to the run instance
os.makedirs('outputs', exist_ok=True)
joblib.dump(pipe, 'outputs/model.pkl')
print("end output")
print("end experiment")
run.complete()
if __name__ == '__main__':
main()
|
[
"sklearn.impute.SimpleImputer",
"sklearn.preprocessing.StandardScaler",
"argparse.ArgumentParser",
"os.makedirs",
"sklearn.compose.make_column_selector",
"sklearn.model_selection.train_test_split",
"azureml.core.run.Run.get_context",
"joblib.dump",
"numpy.float",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.linear_model.LogisticRegression",
"numpy.array",
"numpy.int",
"sklearn.model_selection.StratifiedKFold",
"sklearn.svm.SVC",
"sklearn.pipeline.Pipeline",
"sklearn.impute.IterativeImputer"
] |
[((969, 994), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (992, 994), False, 'import argparse\n'), ((1659, 1676), 'azureml.core.run.Run.get_context', 'Run.get_context', ([], {}), '()\n', (1674, 1676), False, 'from azureml.core.run import Run\n'), ((2232, 2259), 'numpy.array', 'np.array', (["dataset['target']"], {}), "(dataset['target'])\n", (2240, 2259), True, 'import numpy as np\n'), ((2336, 2390), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(X, y, test_size=0.3, random_state=42)\n', (2352, 2390), False, 'from sklearn.model_selection import train_test_split\n'), ((2525, 2570), 'sklearn.impute.IterativeImputer', 'IterativeImputer', ([], {'max_iter': '(10)', 'random_state': '(0)'}), '(max_iter=10, random_state=0)\n', (2541, 2570), False, 'from sklearn.impute import IterativeImputer\n'), ((2592, 2612), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (2595, 2612), False, 'from sklearn.svm import SVC\n'), ((2852, 2971), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'args.C', 'max_iter': 'args.max_iter', 'class_weight': '"""balanced"""', 'solver': '"""liblinear"""', 'random_state': '(42)'}), "(C=args.C, max_iter=args.max_iter, class_weight=\n 'balanced', solver='liblinear', random_state=42)\n", (2870, 2971), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3600, 3688), 'sklearn.pipeline.Pipeline', 'Pipeline', ([], {'steps': "[('preprocessor', preprocessor), ('rfecv', rfecv), ('model', model)]"}), "(steps=[('preprocessor', preprocessor), ('rfecv', rfecv), ('model',\n model)])\n", (3608, 3688), False, 'from sklearn.pipeline import Pipeline\n'), ((4292, 4329), 'os.makedirs', 'os.makedirs', (['"""outputs"""'], {'exist_ok': '(True)'}), "('outputs', exist_ok=True)\n", (4303, 4329), False, 'import os\n'), ((4334, 4372), 'joblib.dump', 'joblib.dump', (['pipe', '"""outputs/model.pkl"""'], {}), "(pipe, 'outputs/model.pkl')\n", (4345, 4372), False, 'import joblib\n'), ((1699, 1715), 'numpy.float', 'np.float', (['args.C'], {}), '(args.C)\n', (1707, 1715), True, 'import numpy as np\n'), ((1745, 1766), 'numpy.int', 'np.int', (['args.max_iter'], {}), '(args.max_iter)\n', (1751, 1766), True, 'import numpy as np\n'), ((1800, 1825), 'numpy.int', 'np.int', (['args.min_features'], {}), '(args.min_features)\n', (1806, 1825), True, 'import numpy as np\n'), ((2704, 2722), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(2)'], {}), '(2)\n', (2719, 2722), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((3086, 3118), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (3099, 3118), False, 'from sklearn.impute import SimpleImputer\n'), ((3140, 3156), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3154, 3156), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((3224, 3263), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (3237, 3263), False, 'from sklearn.impute import SimpleImputer\n'), ((3282, 3334), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""', 'sparse': '(False)'}), "(handle_unknown='ignore', sparse=False)\n", (3295, 3334), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((3452, 3486), 'sklearn.compose.make_column_selector', 'selector', ([], {'dtype_exclude': '"""category"""'}), "(dtype_exclude='category')\n", (3460, 3486), True, 'from sklearn.compose import make_column_selector as selector\n'), ((3530, 3564), 'sklearn.compose.make_column_selector', 'selector', ([], {'dtype_include': '"""category"""'}), "(dtype_include='category')\n", (3538, 3564), True, 'from sklearn.compose import make_column_selector as selector\n')]
|
#!/usr/bin/python3.7
import argparse, json, gc, matplotlib, numpy, os, random, sys
import tensorflow, tensorly, sklearn.linear_model
import tensorflow.keras as keras
tensorflow.config.experimental.set_visible_devices (devices=[], device_type='GPU')
matplotlib.use ('Agg')
import matplotlib.pyplot
import scipy.cluster.hierarchy
import scipy.spatial.distance
def main ():
parser = argparse.ArgumentParser (description='Find anomalies from deviation matrices')
# inputs
parser.add_argument ('--input-dir', help='Specify input directories', nargs='+')
parser.add_argument ('--input-list', help='Specify lists of deviations', nargs='+')
parser.add_argument ('--input-user', help='Specify a list of users', nargs='+')
parser.add_argument ('--input-date', help='Specify a list of dates', nargs='+')
# features
parser.add_argument ('--features', help='Specify features to work on with', nargs='+')
parser.add_argument ('--no-level-zero', help='Remove benign-level features (e.g., alexa-0', action='store_true')
# plot heatmap
parser.add_argument ('--image-dir', help='Specify output image directory')
parser.add_argument ('--image-prefix', help='Specify output image filename prefix')
parser.add_argument ('--image-sigma', help='Specify an upper bound of deviation', type=float)
parser.add_argument ('--image-window', help='Specify a window-size for images', type=int)
parser.add_argument ('--image-negweight', help='Specify a weight for negative pixels', type=float)
# mode
parser.add_argument ('--verbose', help='Verbose mode', action='store_true')
parser.add_argument ('--test-module', help='Test Model and then exit', action='store_true')
parser.add_argument ('--model', help='Specify a model for anomaly detection')
parser.add_argument ('--top', help='Only output top N anomalies', type=int)
parser.add_argument ('--synthesis', help='Synthesize images and plot anomalous pixels', action='store_true')
parser.add_argument ('--train-on-date', help='Specify dates for training set', nargs='+')
parser.add_argument ('--test-on-date', help='Specify dates for testing set', nargs='+')
parser.add_argument ('--use-gpu', help='Use GPU (default off)', action='store_true')
# outputs
parser.add_argument ('-o', '--output', help='Write results to file')
parser.add_argument ('--output-dir', help='Arrange results in directory')
args = parser.parse_args ()
# configuration
args.input_dir = [] if args.input_dir is None else args.input_dir
args.input_list = [] if args.input_list is None else args.input_list
args.features = ['all'] if args.features is None else args.features
args.image_prefix = '' if args.image_prefix is None else args.image_prefix
args.image_sigma = 3.0 if args.image_sigma is None else args.image_sigma
args.image_window = 15 if args.image_window is None or args.image_window < 1 else args.image_window
args.image_negweight = 1.0 if args.image_negweight is None or args.image_negweight < 0.0 else args.image_negweight
if args.use_gpu:
gpus = tensorflow.config.experimental.list_physical_devices ('GPU')
tensorflow.config.experimental.set_visible_devices (devices=gpus, device_type='GPU')
for i in range (0, len (gpus)): tensorflow.config.experimental.set_memory_growth (gpus [i], True)
if args.test_module:
if args.model == 'autoencoder1': Autoencoder1.testModule (args)
if args.model == 'autoencoder2': Autoencoder2.testModule (args)
if args.model == 'autoencoderR1': AutoencoderR.testModule (args, Autoencoder1)
if args.model == 'autoencoderR2': AutoencoderR.testModule (args, Autoencoder2)
if args.model == 'anogan': AnoGAN.testModule (args)
if args.model == 'decomposition': TensorDecomposition.testModule (args)
exit (0)
# read inputs
users = UserManager (args)
for l in args.input_list:
for f in open (l, 'r'):
users.append (Deviation (f.strip (), args))
for _d in args.input_dir:
for user in os.listdir (_d):
if args.input_user is None or user in args.input_user:
userdir = os.path.join (_d, user)
for date in os.listdir (userdir):
if args.input_date is None or date in args.input_date:
datedir = os.path.join (userdir, date)
users.append (Deviation (datedir, args))
# get image for common behaviors (average)
if len (users) < 1: exit (0)
mean = users.mean () if len (users) > 1 else None
dates = mean.getDateRange () if len (users) > 1 else users.sample ().getDateRange ()
# apply anomaly detection
if args.model is not None:
evaluation = []
# build a model for each individual user
for u in users:
# build training set
trainX = []
for index in range (0, len (dates) - args.image_window + 1):
dateLabels = dates [index: index + args.image_window]
if args.train_on_date is not None and dateLabels [-1] not in args.train_on_date: continue
meanmap = mean.heatmap (dates=dateLabels)
heatmap = users [u].heatmap (dateLabels)
if heatmap is not None:
trainX.append (Figure.Heatmap ([heatmap, meanmap], args=args, tags=[u, dateLabels [-1]]))
# build testing set
testX = []
for index in range (0, len (dates) - args.image_window + 1):
dateLabels = dates [index: index + args.image_window]
if args.test_on_date is not None and dateLabels [-1] not in args.test_on_date: continue
meanmap = mean.heatmap (dates=dateLabels)
heatmap = users [u].heatmap (dateLabels)
if heatmap is not None:
testX.append (Figure.Heatmap ([heatmap, meanmap], args=args, tags=[u, dateLabels [-1]]))
# get dimensions
if len (trainX) == 0 or len (testX) == 0: exit (0)
width = args.image_window * 2
height = trainX [0].size / width
# autoencoder1
if args.model == 'autoencoder1':
model = Autoencoder1 (height=height, width=width, args=args)
model.train (X=list (trainX), epochs=64, batchsize=2048)
scores, syns = model.test (X=list (testX))
# autoencoder2
if args.model == 'autoencoder2':
model = Autoencoder2 (height=height, width=width, args=args)
model.train (X=list (trainX), epochs=16, batchsize=32)
scores, syns = model.test (X=list (testX))
# autoencoderR1
if args.model == 'autoencoderR1':
model = AutoencoderR (height=height, width=width, args=args, model=Autoencoder1)
model.train (X=list (trainX), epochs=4, batchsize=32)
scores, syns = model.test (X=list (testX))
# autoencoderR2
if args.model == 'autoencoderR2':
model = AutoencoderR (height=height, width=width, args=args, model=Autoencoder2)
model.train (X=list (trainX), epochs=4, batchsize=32)
scores, syns = model.test (X=list (testX))
# anogan
if args.model == 'anogan':
model = AnoGAN (height=height, width=width, args=args)
model.train (X=list (trainX), epochs=4, batchsize=32)
scores, syns = model.test (X=list (testX))
# decomposition
if args.model == 'decomposition':
model = TensorDecomposition (args=args)
model.train (X=list (trainX), epochs=2048)
scores, syns = model.test (X=list (testX))
# delete model
if model is not None:
keras.backend.clear_session ()
del (model); gc.collect ()
# attach results to evaluation
for index, score in enumerate (list (scores)):
fig = testX [index]
user = fig.tags () [0]
date = fig.tags () [1]
evaluation.append ([user, score, date])
# evaluate
evaluation = sorted (evaluation, key=lambda obj: obj [1], reverse=True)
# output evaluation
if args.output is None and args.output_dir is None: print (json.dumps (evaluation))
if args.output is not None:
with open (args.output, 'w') as fout: fout.write (json.dumps (evaluation) + '\n')
if args.output_dir is not None:
filename = 'result.log'
for date in args.test_on_date:
datedir = os.path.join (args.output_dir, date)
logfile = os.path.join (datedir, filename)
os.makedirs (os.path.join (args.output_dir, date), exist_ok=True)
if os.path.isfile (logfile): os.remove (logfile)
for user, score, date in evaluation:
with open (os.path.join (args.output_dir, date, filename), 'a') as fout:
fout.write (json.dumps ([user, score, date, args.model]) + '\n')
# plot summary
if args.image_dir is not None: # plot summary
title = '_'.join ([args.image_prefix, args.model, 'summary', dates [0], dates [-1]])
figure = matplotlib.pyplot.figure (1)
matplotlib.pyplot.yscale ('linear')
matplotlib.pyplot.grid (True)
matplotlib.pyplot.plot (numpy.arange (len (evaluation)), [tup [1] for tup in evaluation], '-', lw=2)
figure.suptitle (title)
figure.tight_layout ()
matplotlib.pyplot.savefig (os.path.join (args.image_dir, title + '.png'), dpi=600)
matplotlib.pyplot.close (figure)
# plot users
if args.image_dir:
order = list (sorted (users.keys ()))
# set risks
if args.model is not None and len (evaluation) > 1:
order = []
for user, score, date in evaluation:
if user not in order: order.append (user)
for i, u in enumerate (order):
users [u].setRisk (i)
# plot
for i, u in enumerate (order):
if args.top is None or i < args.top:
users [u].heatmap (plot=mean)
################################################
### Tensor Decomposition ###
################################################
class TensorDecomposition (object):
def __init__ (self, args=None):
gc.collect()
self.args = args
self.k = 1
self.lambda_u = 0.1
self.lambda_v = 0.1
self.lambda_w = 0.1
self.X = None
def standard_parafac (self, X, epochs=2048):
A = numpy.ones ((X.shape [0], self.k))
B = numpy.ones ((X.shape [1], self.k))
C = numpy.ones ((X.shape [2], self.k))
clf = sklearn.linear_model.ridge.Ridge (fit_intercept=False, alpha=0.1)
for epoch in range (0, epochs):
# estimate A
mttrpA = tensorly.tenalg.khatri_rao ([B, C])
destA = tensorly.transpose (tensorly.base.unfold (X, 0))
clf.fit (mttrpA, destA)
A = clf.coef_
# estimate B
mttrpB = tensorly.tenalg.khatri_rao ([A, C])
destB = tensorly.transpose (tensorly.base.unfold (X, 1))
clf.fit (mttrpB, destB)
B = clf.coef_
# estimate C
mttrpC = tensorly.tenalg.khatri_rao ([A, B])
destC = tensorly.transpose (tensorly.base.unfold (X, 2))
clf.fit (mttrpC, destC)
C = clf.coef_
if self.args.verbose:
Y = tensorly.kruskal_tensor.kruskal_to_tensor ([A, B, C])
print ('standard_parafac-' + str (epochs - epoch - 1) + ': ' + str (numpy.mean (self.compare (X, Y))))
return [A, B, C]
def regularized_parafac (self, X, constraints, epochs=2048):
cA, cB, cC = constraints
A = numpy.ones ((X.shape [0], self.k))
B = numpy.ones ((X.shape [1], self.k))
C = numpy.ones ((X.shape [2], self.k))
for epoch in range (0, epochs):
# estimate A
mttrpA = numpy.dot (tensorly.base.unfold (X, 0), tensorly.tenalg.khatri_rao ([B, C]))
destA = numpy.multiply (numpy.dot (B.T, B), numpy.dot (C.T, C))
A = tensorly.transpose (tensorly.solve (
tensorly.transpose (numpy.dot (destA, destA.T) + self.lambda_u * numpy.eye (self.k)),
tensorly.transpose (self.lambda_u * cA + numpy.dot (mttrpA, destA.T))))
# estimate B
mttrpB = numpy.dot (tensorly.base.unfold (X, 1), tensorly.tenalg.khatri_rao ([A, C]))
destB = numpy.multiply (numpy.dot (A.T, A), numpy.dot (C.T, C))
B = tensorly.transpose (tensorly.solve (
tensorly.transpose (numpy.dot (destB, destB.T) + self.lambda_v * numpy.eye (self.k)),
tensorly.transpose (self.lambda_v * cB + numpy.dot (mttrpB, destB.T))))
# estimate C: B*(CA)=X2, C*(BA)=X3
mttrpC = numpy.dot (tensorly.base.unfold (X, 2), tensorly.tenalg.khatri_rao ([A, B]))
destC = numpy.multiply (numpy.dot (A.T, A), numpy.dot (B.T, B))
C = tensorly.transpose (tensorly.solve (
tensorly.transpose (numpy.dot (destC, destC.T) + self.lambda_w * numpy.eye (self.k)),
tensorly.transpose (self.lambda_w * cC + numpy.dot (mttrpC, destC.T))))
if self.args.verbose:
Y = tensorly.kruskal_tensor.kruskal_to_tensor ([A, B, C])
print ('regularized_parafac-' + str (epochs - epoch - 1) + ': ' + str (numpy.mean (self.compare (X, Y))))
return [A, B, C]
def fit (self, X, epochs=2048):
self.X = X
self.epochs = epochs
def evaluate (self, X, epochs=None, synthesis=False):
scores, syns = [], []
numCubes = len (self.args.test_on_date)
lenCubes = int (len (X) / numCubes)
epochs = self.epochs if epochs is None else epochs
# decomposition of the last training cube
X0 = self.X if self.X is not None else X [: lenCubes]
X0 = self.checkArray (X0 [0 - lenCubes: ])
F0 = self.standard_parafac (X0, epochs)
Y0 = tensorly.kruskal_tensor.kruskal_to_tensor (F0)
# for each testing cube
for cubeIndex in range (0, numCubes):
# reconstruction
X1 = self.checkArray (X [cubeIndex * lenCubes: (cubeIndex + 1) * lenCubes])
F1 = self.regularized_parafac (X1, F0, epochs)
Y1 = tensorly.kruskal_tensor.kruskal_to_tensor (F1)
# scoring
scores1 = self.compare (Y0, Y1)
scores2 = self.compare (Y0, X1)
scores3 = self.compare (X1, Y1)
scores += list (numpy.max ([scores1, scores2, scores3], 0))
# synthesis
if synthesis: syns += list (numpy.reshape (numpy.array (Y1), X1.shape))
return scores, syns
def train (self, X, epochs=2048):
return self.fit (X, epochs)
def test (self, X, epochs=None, synthesis=False):
return self.evaluate (X, epochs, synthesis)
@staticmethod
def checkArray (array):
array = numpy.array (array)
return array
@staticmethod
def compare (X, Y):
ret = []
for d in range (0, X.shape [0]):
ret.append (numpy.sqrt (numpy.sum (numpy.power (X [d] - Y [d], 2))))
return ret
@classmethod
def testModule (cls, args=None):
train = [Figure.getNormalFigure (12, 14) for i in range (0, 1024*4)]
test1 = [Figure.getNormalFigure (12, 14) for i in range (0, 8)]
test2 = [Figure.getAbnormalFigure (12, 14) for i in range (0, 8)]
decomposition = cls (args=args)
decomposition.fit (train)
scores1, syns1 = decomposition.evaluate (test1, synthesis=True, epochs=128)
scores2, syns2 = decomposition.evaluate (test2, synthesis=True)
print ('Normal: ' + str (numpy.mean (scores1)))
print ('Abnormal: ' + str (numpy.mean (scores2)))
print (syns1 [0])
################################################
### AnoGAN ###
################################################
class AnoGAN (object):
def __init__ (self, height, width, args=None, loss='mean_squared_error'):
gc.collect()
# variables
self.args = args
self.width = (int ((width - 1) / 4) + 1) * 4
self.height = (int ((height - 1) / 4) + 1) * 4
self.col = int (self.width / 4)
self.row = int (self.height / 4)
self.channels = 1
self.zdim = 100
self.units = 128
self.kernels = (5, 5)
self.strides = (2, 2)
self.stddev = 0.02
self.relu = 1.0
self.dropout = 0.3
self.activation = 'tanh'
self.loss = loss
# generator
self.generator = keras.models.Sequential ()
self.generator.add (keras.layers.Dense (self.row * self.col * self.units, input_dim=self.zdim,
kernel_initializer=keras.initializers.RandomNormal (stddev=self.stddev)))
self.generator.add (keras.layers.LeakyReLU (self.relu))
self.generator.add (keras.layers.BatchNormalization ())
self.generator.add (keras.layers.Reshape ((self.row, self.col, self.units)))
self.generator.add (keras.layers.UpSampling2D (size=self.strides))
self.generator.add (keras.layers.Conv2D (int (self.units / 2), kernel_size=self.kernels, padding='same'))
self.generator.add (keras.layers.LeakyReLU (self.relu))
self.generator.add (keras.layers.BatchNormalization ())
self.generator.add (keras.layers.UpSampling2D (size=self.strides))
self.generator.add (keras.layers.Conv2D (self.channels, kernel_size=self.kernels, padding='same', activation='tanh'))
self.generator.compile (loss=self.loss, optimizer='adam')
if self.args.verbose: print ('\nGenerator: '); self.generator.summary ()
# discriminator
self.discriminator = keras.models.Sequential ()
self.discriminator.add (keras.layers.Conv2D (int (self.units / 2), kernel_size=self.kernels, strides=self.strides, padding='same',
input_shape=(self.height, self.width, self.channels), kernel_initializer=keras.initializers.RandomNormal (stddev=self.stddev)))
self.discriminator.add (keras.layers.LeakyReLU (self.relu))
self.discriminator.add (keras.layers.BatchNormalization ())
self.discriminator.add (keras.layers.Dropout (self.dropout))
self.discriminator.add (keras.layers.Conv2D (self.units, kernel_size=self.kernels, strides=self.strides, padding='same')) # layer [-5]
self.discriminator.add (keras.layers.LeakyReLU (self.relu))
self.discriminator.add (keras.layers.BatchNormalization ())
self.discriminator.add (keras.layers.Dropout (self.dropout))
self.discriminator.add (keras.layers.Flatten ())
self.discriminator.add (keras.layers.Dense (1, activation=self.activation))
self.discriminator.compile (loss=self.loss, optimizer='adam')
if self.args.verbose: print ('\nDiscriminator: '); self.discriminator.summary ()
# gan
self.ganInput = keras.layers.Input (shape=(self.zdim, ))
self.ganMidput = self.generator (self.ganInput)
self.ganOutput = self.discriminator (self.ganMidput)
self.GAN = keras.models.Model (inputs=self.ganInput, outputs=self.ganOutput)
self.GAN.compile (loss=self.loss, optimizer='adam')
if self.args.verbose: print ('\nGAN: '); self.GAN.summary ()
# intermediate feature extractor
self.intermediate = keras.models.Model (inputs=self.discriminator.layers [0].input, outputs=self.discriminator.layers [-5].output)
# self.intermediate.compile (loss=self.loss, optimizer='adam')
self.intermediate.compile (loss='binary_crossentropy', optimizer='adam')
if self.args.verbose: print ('\nFeature Extractor: '); self.intermediate.summary ()
# AnoGAN detector
self.anoInput = keras.layers.Input (shape=(self.zdim, ))
self.anoMidput = self.generator (keras.layers.Activation (self.activation) (keras.layers.Dense ((self.zdim)) (self.anoInput)))
self.anoOutput = self.intermediate (self.anoMidput)
self.detector = keras.models.Model (inputs=self.anoInput, outputs=[self.anoMidput, self.anoOutput])
self.detector.compile (loss=self.sum_of_residual, loss_weights=[0.9, 0.1], optimizer='adam')
if self.args.verbose: print ('\nAnoGAN Detector: '); self.detector.summary ()
def fit (self, X, epochs=4, batchsize=512):
X = self.checkArray (X)
for epoch in range (0, epochs):
# shuffle
indices = [i for i in range (0, X.shape [0])]
random.shuffle (indices)
# train
while len (indices) > 0:
# prepare batches
batch_images = [X [indices.pop ()] for i in range (0, min (batchsize, len (indices)))]
generated_images = self.generator.predict (numpy.random.uniform (0, 1, (len (batch_images), self.zdim)), verbose=0)
# train discriminator
batch_X = numpy.concatenate ((batch_images, generated_images))
batch_Y = numpy.array ([1] * len (batch_images) + [0] * len (generated_images))
discriminator_loss = self.discriminator.train_on_batch (batch_X, batch_Y)
# train generator
self.discriminator.trainable = False
batch_X = numpy.random.uniform (0, 1, (len (batch_images), self.zdim))
batch_Y = numpy.array ([1] * len (batch_images))
generator_loss = self.GAN.train_on_batch (batch_X, batch_Y)
self.discriminator.trainable = True
# verbose
if self.args.verbose:
print ('epoch ' + str (epochs - epoch - 1) + ' index ' + str (len (indices)), end=' ')
print ('discriminator_loss=' + str (discriminator_loss), end=' ')
print ('generator_loss=' + str (generator_loss))
def evaluate (self, X, epochs=32, synthesis=False):
scores = []
syns = []
for i, x in enumerate (X):
x = self.checkArray ([x])
z = numpy.random.uniform (0, 1, (1, self.zdim))
dx = self.intermediate.predict (x)
# learning for changing latent
loss = self.detector.fit (z, [x, dx], epochs=epochs, verbose=0)
# synthesis image
if synthesis:
syn, _ = self.detector.predict (z)
syns.append (syn [0])
# scoring
score = loss.history ['loss'][-1]
scores.append (score)
if self.args.verbose: print ('Test-' + str (len (X) - i - 1) + '=' + str (score))
shape = numpy.array (X).shape
if synthesis: syns = numpy.reshape (numpy.array (syns)[:, :shape [1], :shape [2]], shape)
return scores, syns
def train (self, X, epochs=4, batchsize=512):
return self.fit (X, epochs, batchsize)
def test (self, X, epochs=32, synthesis=False):
return self.evaluate (X, epochs, synthesis)
def checkArray (self, array):
# pad to fit dimensions
for i in range (0, len (array)):
height, width= array [i].shape
for h in range (height, self.height):
array [i] = numpy.append (array [i], [[0.0] * width], axis=0)
for w in range (width, self.width):
array [i] = numpy.append (array [i], [[0.0]] * (self.height), axis=1)
# reshape array
array = numpy.array ([x.reshape (self.height, self.width, self.channels) for x in array])
return array
@staticmethod
def sum_of_residual (y_true, y_pred):
# return tensorflow.reduce_sum (abs (y_true - y_pred))
return tensorflow.reduce_sum (tensorflow.abs (y_true - y_pred))
@classmethod
def testModule (cls, args=None):
train = [Figure.getNormalFigure (12, 14) for i in range (0, 1024*4)]
test1 = [Figure.getNormalFigure (12, 14) for i in range (0, 8)]
test2 = [Figure.getAbnormalFigure (12, 14) for i in range (0, 8)]
anogan = cls (12, 14, args=args)
anogan.fit (train)
scores1, syns1 = anogan.evaluate (test1, synthesis=True, epochs=128)
scores2, syns2 = anogan.evaluate (test2, synthesis=True)
print ('Normal: ' + str (numpy.mean (scores1)))
print ('Abnormal: ' + str (numpy.mean (scores2)))
print (syns1 [0])
################################################
### Autoencoders ###
################################################
class Autoencoder1 (object):
def __init__ (self, length=None, height=None, width=None, args=None, loss='mean_squared_error'):
gc.collect()
self.args = args
self.length = length if length is not None else int (width * height)
self.units = 512
self.relu = 1.0
self.stddev = 0.02
self.loss = loss
self.activation = 'tanh'
# encoder: code-(512 / 8 =64)
self.encoder = keras.models.Sequential ()
self.encoder.add (keras.layers.Dense (self.units, input_dim=self.length,
kernel_initializer=keras.initializers.RandomNormal (stddev=self.stddev)))
self.encoder.add (keras.layers.LeakyReLU (self.relu))
self.encoder.add (keras.layers.BatchNormalization ())
self.encoder.add (keras.layers.Dense (int (self.units / 2)))
self.encoder.add (keras.layers.LeakyReLU (self.relu))
self.encoder.add (keras.layers.BatchNormalization ())
self.encoder.add (keras.layers.Dense (int (self.units / 4)))
self.encoder.add (keras.layers.LeakyReLU (self.relu))
self.encoder.add (keras.layers.BatchNormalization ())
self.encoder.add (keras.layers.Dense (int (self.units / 8)))
self.encoder.add (keras.layers.LeakyReLU (self.relu))
self.encoder.add (keras.layers.BatchNormalization ())
self.encoder.compile (optimizer='adadelta', loss=self.loss)
if self.args.verbose: print ('\nEncoder'); self.encoder.summary ()
# decoder
self.decoder = keras.models.Sequential ()
self.decoder.add (keras.layers.Dense (int (self.units / 8), input_dim=int (self.units / 8),
kernel_initializer=keras.initializers.RandomNormal (stddev=self.stddev)))
self.decoder.add (keras.layers.LeakyReLU (self.relu))
self.decoder.add (keras.layers.BatchNormalization ())
self.decoder.add (keras.layers.Dense (int (self.units / 4)))
self.decoder.add (keras.layers.LeakyReLU (self.relu))
self.decoder.add (keras.layers.BatchNormalization ())
self.decoder.add (keras.layers.Dense (int (self.units / 2)))
self.decoder.add (keras.layers.LeakyReLU (self.relu))
self.decoder.add (keras.layers.Dense (self.length, activation=self.activation))
self.decoder.compile (optimizer='adadelta', loss=self.loss)
if self.args.verbose: print ('\nDecoder'); self.decoder.summary ()
# model
self.input = keras.layers.Input (shape=(self.length,))
self.midput = self.encoder (self.input)
self.output = self.decoder (self.midput)
self.autoencoder = keras.models.Model (self.input, self.output)
self.autoencoder.compile (optimizer='adadelta', loss=self.loss)
if self.args.verbose: print ('\nAutoencoder'); self.autoencoder.summary ()
def fit (self, X, epochs=4, batchsize=512):
X = self.checkArray (X)
loss = self.autoencoder.fit (X, X, epochs=epochs, batch_size=batchsize, shuffle=True, verbose=self.args.verbose)
def evaluate (self, X, synthesis=False):
scores = []
syns = []
for i, x in enumerate (X):
x = self.checkArray ([x])
score = self.autoencoder.evaluate (x, x, verbose=False)
scores.append (score)
if synthesis: syns.append (self.autoencoder.predict (x, verbose=False) [0])
if self.args.verbose: print ('Test-' + str (len (X) - i - 1) + '=' + str (score))
if synthesis: syns = numpy.reshape (numpy.array (syns), numpy.array (X).shape)
return scores, syns
def predict (self, X):
return numpy.reshape (self.autoencoder.predict (self.checkArray (X), verbose=self.args.verbose), X.shape)
def train (self, X, epochs=4, batchsize=512):
return self.fit (X, epochs, batchsize)
def test (self, X, synthesis=False):
return self.evaluate (X, synthesis)
def checkArray (self, array):
array = numpy.array (array).reshape (len (array), self.length)
return array
@classmethod
def testModule (cls, args):
train = [Figure.getNormalFigure (12, 14) for i in range (0, 1024*4)]
test1 = [Figure.getNormalFigure (12, 14) for i in range (0, 8)]
test2 = [Figure.getAbnormalFigure (12, 14) for i in range (0, 8)]
autoencoder = cls (width=12, height=14, args=args)
autoencoder.fit (train)
scores1, syns1 = autoencoder.evaluate (test1, synthesis=True)
scores2, syns2 = autoencoder.evaluate (test2, synthesis=True)
print ('Normal: ' + str (numpy.mean (scores1)))
print ('Abnormal: ' + str (numpy.mean (scores2)))
print (syns1 [0])
class Autoencoder2 (object):
def __init__ (self, height, width, args=None, loss='mean_squared_error', channels=1):
gc.collect()
self.args = args
self.width = (int ((width - 1) / 4) + 1) * 4
self.height = (int ((height - 1) / 4) + 1) * 4
self.channels = channels
self.units = 1024
self.col = int (self.width) / 4
self.row = int (self.height) / 4
self.kernels = (5, 5)
self.strides = (2, 2)
self.relu = 1.0
self.stddev = 0.02
self.loss = loss
# encoder: code-(h/4, w/4, unit/2)
self.encoder = keras.models.Sequential ()
self.encoder.add (keras.layers.Conv2D (self.units, self.kernels, padding='same',
input_shape=(self.height, self.width, self.channels), kernel_initializer=keras.initializers.RandomNormal (stddev=self.stddev)))
self.encoder.add (keras.layers.LeakyReLU (self.relu))
self.encoder.add (keras.layers.BatchNormalization ())
self.encoder.add (keras.layers.MaxPooling2D (self.strides, padding='same'))
self.encoder.add (keras.layers.Conv2D (int (self.units / 2), self.kernels, padding='same'))
self.encoder.add (keras.layers.LeakyReLU (self.relu))
self.encoder.add (keras.layers.BatchNormalization ())
self.encoder.add (keras.layers.MaxPooling2D (self.strides, padding='same'))
self.encoder.compile (optimizer='adadelta', loss=self.loss)
if self.args.verbose: print ('\nEncoder'); self.encoder.summary ()
# decoder
self.decoder = keras.models.Sequential ()
self.decoder.add (keras.layers.Conv2D (int (self.units / 2), self.kernels, padding='same',
input_shape=(self.row, self.col, int (self.units / 2)), kernel_initializer=keras.initializers.RandomNormal (stddev=self.stddev)))
self.decoder.add (keras.layers.LeakyReLU (self.relu))
self.decoder.add (keras.layers.BatchNormalization ())
self.decoder.add (keras.layers.UpSampling2D (self.strides))
self.decoder.add (keras.layers.Conv2D (int (self.units / 2), self.kernels, padding='same'))
self.decoder.add (keras.layers.LeakyReLU (self.relu))
self.decoder.add (keras.layers.BatchNormalization ())
self.decoder.add (keras.layers.UpSampling2D (self.strides))
self.decoder.add (keras.layers.Conv2D (self.units, self.kernels, padding='same'))
self.decoder.add (keras.layers.LeakyReLU (self.relu))
self.decoder.add (keras.layers.Conv2D (self.channels, self.kernels, activation='tanh', padding='same'))
self.decoder.compile (optimizer='adadelta', loss=self.loss)
if self.args.verbose: print ('\nDecoder'); self.decoder.summary ()
# model
self.input = keras.layers.Input (shape=(self.height, self.width, self.channels))
self.midput = self.encoder (self.input)
self.output = self.decoder (self.midput)
self.autoencoder = keras.models.Model (self.input, self.output)
self.autoencoder.compile (optimizer='adadelta', loss=self.loss)
if self.args.verbose: print ('\nAutoencoder'); self.autoencoder.summary ()
def fit (self, X, epochs=4, batchsize=512):
X = self.checkArray (X)
loss = self.autoencoder.fit (X, X, epochs=epochs, batch_size=batchsize, shuffle=True, verbose=self.args.verbose)
def evaluate (self, X, synthesis=False):
scores = []
syns = []
for i, x in enumerate (X):
x = self.checkArray ([x])
score = self.autoencoder.evaluate (x, x, verbose=False)
scores.append (score)
if synthesis: syns.append (self.autoencoder.predict (x, verbose=False) [0])
if self.args.verbose: print ('Test-' + str (len (X) - i - 1) + '=' + str (score))
shape = numpy.array (X).shape
if synthesis: syns = numpy.reshape (numpy.array (syns)[:, :shape [1], :shape [2]], shape)
return scores, syns
def predict (self, X):
shape = numpy.array (X).shape
syns = self.autoencoder.predict (self.checkArray (X), verbose=self.args.verbose)
syns = numpy.reshape (numpy.array (syns)[:, :shape [1], :shape [2]], shape)
return syns
def train (self, X, epochs=4, batchsize=512):
return self.fit (X, epochs, batchsize)
def test (self, X, synthesis=False):
return self.evaluate (X, synthesis)
def checkArray (self, array):
# pad to fit dimensions
padding = [0.0] if self.channels < 2 else [[0.0] * self.channels]
for i in range (0, len (array)):
height, width = array [i].shape [: 2]
array = list (array)
for h in range (height, self.height):
array [i] = numpy.append (array [i], [padding * width], axis=0)
for w in range (width, self.width):
array [i] = numpy.append (array [i], [padding] * (self.height), axis=1)
# reshape array
array = numpy.reshape (array, (len (array), self.height, self.width, self.channels))
return array
@classmethod
def testModule (cls, args):
train = [Figure.getNormalFigure (12, 14) for i in range (0, 1024*4)]
test1 = [Figure.getNormalFigure (12, 14) for i in range (0, 8)]
test2 = [Figure.getAbnormalFigure (12, 14) for i in range (0, 8)]
autoencoder = cls (12, 14, args=args)
autoencoder.fit (train)
scores1, syns1 = autoencoder.evaluate (test1, synthesis=True)
scores2, syns2 = autoencoder.evaluate (test2, synthesis=True)
print ('Normal: ' + str (numpy.mean (scores1)))
print ('Abnormal: ' + str (numpy.mean (scores2)))
print (syns1 [0])
class AutoencoderR (object):
def __init__ (self, height, width, args=None, model=Autoencoder2, loss='mean_squared_error'):
gc.collect()
self.hyperlambda = 0.0314159
self.hyperepsilon = 0.0314159 * 4
self.model = model
self.maxsize = 1024 * 1024
self.epochs = 2048
self.index = 0
self.trainX = []
self.args = args
self.loss = loss
def fit (self, X, epochs=4, batchsize=512):
self.epochs = epochs * batchsize
for i in range (0, len (X)):
if len (self.trainX) < self.maxsize: self.trainX.append (None)
self.trainX [self.index % self.maxsize] = X [i]
self.index += 1
def evaluate (self, X, synthesis=False, epochs=None, batchsize=None):
if epochs is not None and batchsize is not None: self.epochs = epochs * batchsize
iteration = self.epochs
X0 = numpy.array (list (self.trainX) + list (X))
# initial noise must not be zero, otherwise the anomalous test set is trained
N1 = []
for i in range (0, len (self.trainX)): N1.append (numpy.zeros (X [0].shape))
for i in range (0, len (X)): N1.append (numpy.random.random_sample (X [0].shape) * 2 - 1)
N0, N1 = None, numpy.array (N1)
# retrain the model as the model could be effected by previous anomalous test set
self.autoencoder = self.model (height=X0.shape [1], width=X0.shape [2], args=self.args, loss=self.loss)
while not self.converged (N0, N1) and iteration > 0:
iteration -= 1
N0 = N1
self.autoencoder.fit (X0 - N0, epochs=1)
X1 = self.autoencoder.predict (X0)
N1 = self.threshold (X0, X1)
if self.args.verbose: print ('Iteration = ' + str (iteration))
scores = self.scores ([N1 [0 - i] for i in range (0, len (X))])
syns = [X1 [0 - i] for i in range (0, len (X))]
return scores, syns
def train (self, X, epochs=4, batchsize=512):
return self.fit (X, epochs, batchsize)
def test (self, X, synthesis=False, epochs=None, batchsize=None):
return self.evaluate (X, synthesis, epochs, batchsize)
def converged (self, N0, N1):
if N0 is None or N1 is None: return False
N0 = numpy.reshape (N0, N0.size)
N1 = numpy.reshape (N1, N1.size)
return not any (diff > self.hyperepsilon for diff in numpy.absolute (N1 - N0))
def scores (self, N):
ret = []
for i in range (0, len (N)):
noise = numpy.array (N [i])
noise = numpy.reshape (noise, noise.size)
ret.append (numpy.sum (noise ** 2) / noise.size)
return ret
def threshold (self, X0, X1):
halflambda = self.hyperlambda / 2
diff = numpy.reshape (X0 - X1, ((len (X0), int (X0.size / len (X0)))))
if self.hyperlambda == 0: return diff
noise = numpy.zeros (diff.shape)
index = numpy.where (diff > halflambda)
noise [index] = diff [index] - halflambda
index = numpy.where (numpy.absolute (diff) <= halflambda)
noise [index] = 0
index = numpy.where (diff < 0 - halflambda)
noise [index] = diff [index] + halflambda
return numpy.reshape (noise, X0.shape)
@classmethod
def testModule (cls, args, model=None):
train = [Figure.getNormalFigure (12, 14) for i in range (0, 1024*4)]
test1 = [Figure.getNormalFigure (12, 14) for i in range (0, 8)]
test2 = [Figure.getAbnormalFigure (12, 14) for i in range (0, 8)]
autoencoder = cls (12, 14, args=args, model=model)
autoencoder.fit (train)
scores1, syns1 = autoencoder.evaluate (test1, synthesis=True)
scores2, syns2 = autoencoder.evaluate (test2, synthesis=True)
print ('Normal: ' + str (numpy.mean (scores1)))
print ('Abnormal: ' + str (numpy.mean (scores2)))
print (syns1 [0])
################################################
### Deviation ###
################################################
class Deviation (list):
def __init__ (self, obj, args=None):
filename = None
# read from file if obj is str
if isinstance (obj, str):
if os.path.isfile (obj):
filename, obj = obj, {}
if filename [-10: ] == '.deviation': obj = json.loads (open (filename, 'r').read ())
elif os.path.isdir (obj):
dirname, obj = obj, {}
for filename in os.listdir (dirname):
filename = os.path.join (dirname, filename)
if filename [-10: ] == '.deviation':
try:
deviation = json.loads (open (filename, 'r').read ())
for key in deviation: obj [key] = deviation [key]
except: continue # could be an empty file if the user is not presented on the day
# preprocess if obj is a dict
if isinstance (obj, dict):
# disable features if specified
for key in list (obj.keys ()):
if ( (key [-2: ] == '-0' and args.no_level_zero)
or ('all' not in args.features and not any (feature in key for feature in args.features))):
del (obj [key])
# flat to one level
for key in list (obj.keys ()):
if isinstance (obj [key], list) and isinstance (obj [key][0], list):
for level in range (0, len (obj [key])):
obj [key + '-' + str (level)] = obj [key][level]
del (obj [key])
# split by timeslices
arr = []
for i in range (0, len (obj [list (obj.keys ()) [0]])):
arr.append ({k: obj [k][i] for k in obj.keys ()})
# pre-built deviation
if isinstance (obj, list):
arr = obj
# instantiation
super (Deviation, self).__init__ (arr)
if filename is not None:
names = filename.split ('/')
self.date = names [-2]
self.user = names [-3]
def getUser (self):
return self.user
def getDate (self):
return self.date
def getKeys (self):
return sorted ([key for key in self [0]])
################################################
### User ###
################################################
class User (dict):
def __init__ (self, username, args, init=None):
if init is not None: super (User, self).__init__ (init)
else: super (User, self).__init__ ()
self.username = username
self.args = args
self.heatmaps = None
self.heatdates = None
self.risk = 'None'
def append (self, deviation):
if len (deviation) > 0:
self.__setitem__ (deviation.date, deviation)
self.heatmaps = None
self.heatdates = None
def getDateRange (self, start=None, end=None):
dates = sorted (self.keys ())
iStart, iEnd = 0, len (dates)
if start and not isinstance (start, str): start, end = start
while start and iStart < len (dates) and dates [iStart + 1] <= start: iStart += 1
while end and iEnd > 0 and dates [iEnd - 1] > end: iEnd -= 1
return dates [iStart: iEnd]
def sample (self):
return self [list (self.keys ()) [0]]
def setRisk (self, risk):
if isinstance (risk, str):
self.risk = risk
else: self.risk = '_'.join ([self.args.model, str (risk)])
def heatmap (self, dates=None, plot=None):
# check dates: return None if any date is missed out
dates = self.getDateRange () if dates is None else dates
absolute_deviation = True
if any (d not in self for d in dates): return None
# make figures
if self.heatmaps is None or self.heatdates != dates:
self.heatmaps = []
self.heatdates = dates
dateLabels = dates
featureLabels = self.sample ().getKeys ()
timeslices = len (self.sample ())
for timeIndex in range (0, timeslices):
self.heatmaps.append ([])
for featureIndex, feature in enumerate (featureLabels):
self.heatmaps [timeIndex].append ([])
for dateIndex, date in enumerate (dateLabels):
if feature not in self [date][timeIndex]: self [date][timeIndex][feature] = 0.0 # default value of deviation = 0.0
observation = self [date][timeIndex][feature] / self.args.image_sigma # scale transformation from (-5, 5) to (-1, 1)
observation = max (-1.0, min (1.0, observation)) # check boundaries as raw sigma could be 5
if observation < 0.0: observation = observation * self.args.image_negweight # assign negative weight
if absolute_deviation: observation = (observation + 1) / 2 # scale observation from (-1, 1) to (0, 1)
self.heatmaps [timeIndex][featureIndex].append (observation)
# plot figures
if plot and self.args.image_dir is not None:
dateLabels = dates
featureLabels = self.sample ().getKeys ()
timeslices = len (self.sample ())
rows = [self.heatmaps] if plot is True else [self.heatmaps, plot.heatmap (dates=dates)]
title = '_'.join ([self.args.image_prefix, self.risk, self.username, dateLabels [0], dateLabels [-1]])
fig, axs = matplotlib.pyplot.subplots (nrows=len (rows), ncols=timeslices, sharex=True, sharey=True)
for row in range (0, len (rows)):
figures = rows [row]
for timeIndex in range (0, timeslices):
# color transformation
bitmap = []
for featureIndex in range (0, len (figures [timeIndex])):
bitmap.append ([])
for dateIndex in range (0, len (figures [timeIndex][featureIndex])):
observation = figures [timeIndex][featureIndex][dateIndex]
if absolute_deviation: observation = (observation * 2) - 1 # rescale observation from (0, 1) to (-1, 1)
if observation >= 0.0: observation = [observation, 0.0, 0.0] # red gradient
else: observation = [-observation, -observation, -observation] # gray gradient
bitmap [featureIndex].append (observation)
# plot
ax = axs [timeIndex] if len (rows) == 1 else axs [row][timeIndex]
im = ax.imshow (numpy.array (bitmap))
ax.set_xticks (numpy.arange (len (dateLabels)))
ax.set_yticks (numpy.arange (len (featureLabels)))
ax.set_xticklabels (dateLabels, fontsize=4)
ax.set_yticklabels (featureLabels, fontsize=4)
matplotlib.pyplot.setp (ax.get_xticklabels (), rotation=45, ha='right', rotation_mode='anchor')
matplotlib.pyplot.setp (ax.get_yticklabels (), rotation=45, ha='right', rotation_mode='anchor')
fig.suptitle (title)
fig.tight_layout ()
matplotlib.pyplot.savefig (os.path.join (self.args.image_dir, title + '.png'), dpi=600)
matplotlib.pyplot.close (fig)
return self.heatmaps
################################################
### User Manager ###
################################################
class UserManager (dict):
def __init__ (self, args):
super (UserManager, self).__init__ ()
self.common = None
self.args = args
def append (self, obj):
if isinstance (obj, Deviation):
user = obj.getUser ()
if user not in self: self [user] = User (user, self.args)
self [user].append (obj)
self.common = None
def sample (self):
return self [list (self.keys ()) [0]]
def mean (self):
if self.common is None:
timeslices = len (self.sample ().sample ())
fsum = {}
# append everything
for user in self:
for date in self [user]:
if date not in fsum: fsum [date] = [dict () for timeIndex in range (0, timeslices)]
for timeIndex in range (0, timeslices):
for feature in self [user][date][timeIndex]:
if feature not in fsum [date][timeIndex]: fsum [date][timeIndex][feature] = []
fsum [date][timeIndex][feature].append (self [user][date][timeIndex][feature])
# calculate mean
for date in fsum:
for timeIndex in range (0, timeslices):
for feature in fsum [date][timeIndex]:
fsum [date][timeIndex][feature] = numpy.mean (fsum [date][timeIndex][feature])
fsum [date] = Deviation (fsum [date], self.args)
self.common = User ('MEAN', self.args, fsum)
return self.common
################################################
### Test Figure ###
################################################
class Figure (numpy.ndarray):
def __new__ (cls, h, w=None, args=None, tags=None):
if w is not None and isinstance (h, int) and isinstance (w, int):
array = []
for i in range (0, h): array.append([0.0] * w)
ret = numpy.asarray (array).view (cls)
else: ret = numpy.asarray (h).view (cls)
ret.args = args
ret.tagl = tags
return ret
def tags (self):
try:
return list (self.tagl)
except:
return self.tagl
def flat (self):
return self.reshape (self.size)
def plot (self, filename, args=None):
if args: self.args = args
# color transformation
width = self.args.image_window * 2
height = int (self.size / width)
heatmap = self.reshape (height, width)
filename = self.args.image_prefix + filename
bitmap = []
for i in range (0, height):
bitmap.append ([])
for j in range (0, width):
observation = heatmap [i][j]
if observation >= 0.0: observation = [observation, 0.0, 0.0] # red gradient
else: observation = [-observation, -observation, -observation] # gray gradient
bitmap [i].append (observation)
# plot
fig, axs = matplotlib.pyplot.subplots (nrows=1, ncols=1)
im = axs.imshow (numpy.array (bitmap))
fig.suptitle (filename)
fig.tight_layout ()
matplotlib.pyplot.savefig (os.path.join (self.args.image_dir, filename + '.png'), dpi=600)
matplotlib.pyplot.close (fig)
@classmethod
def getNormalFigure (cls, h, w, args=None, tags=None):
ret = cls (h, w, args, tags)
for i in range (0, h, 2):
for j in range (0, w, 2):
if random.random () > 0.5: ret [i][j] = random.random () * 2 - 1
ret [0][0] = 0.88
ret [0][1] = 0.31415
return ret
@classmethod
def getAbnormalFigure (cls, h, w, args=None, tags=None):
ret = cls.getNormalFigure (h, w, args, tags)
for i in range (1, h, 2):
for j in range (1, w, 2):
if random.random () > 0.5: ret [i][j] = 1.0
return ret
@classmethod
def Heatmap (cls, heatmap, rows=None, timeslices=None, features=None, dates=None, args=None, tags=None):
if not rows or not timeslices or not features or not dates:
rows = len (heatmap)
timeslices = len (heatmap [0])
features = len (heatmap [0][0])
dates = len (heatmap [0][0][0])
bitmap = [[0.0] * (timeslices * dates) for _ in range (0, (rows * features))]
for row in range (0, rows):
for timeIndex in range (0, timeslices):
for feature in range (0, features):
for date in range (0, dates):
bitmap [row * features + feature][timeIndex * dates + date] = heatmap [row][timeIndex][feature][date]
return cls (bitmap, args=args, tags=tags)
################################################
### MISC ###
################################################
if __name__ == '__main__':
main ()
|
[
"numpy.absolute",
"matplotlib.pyplot.yscale",
"os.remove",
"tensorflow.keras.layers.MaxPooling2D",
"argparse.ArgumentParser",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"numpy.sum",
"tensorly.kruskal_tensor.kruskal_to_tensor",
"random.shuffle",
"numpy.random.random_sample",
"numpy.ones",
"json.dumps",
"gc.collect",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.LeakyReLU",
"os.path.isfile",
"tensorflow.keras.models.Sequential",
"numpy.mean",
"os.path.join",
"tensorflow.keras.layers.Flatten",
"tensorflow.abs",
"tensorflow.keras.layers.BatchNormalization",
"tensorly.tenalg.khatri_rao",
"matplotlib.pyplot.close",
"numpy.power",
"numpy.append",
"numpy.max",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Input",
"numpy.reshape",
"tensorflow.keras.layers.UpSampling2D",
"matplotlib.pyplot.subplots",
"tensorflow.config.experimental.set_visible_devices",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.backend.clear_session",
"numpy.asarray",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.models.Model",
"random.random",
"matplotlib.use",
"tensorflow.keras.initializers.RandomNormal",
"numpy.dot",
"tensorflow.config.experimental.list_physical_devices",
"os.listdir",
"matplotlib.pyplot.grid",
"numpy.concatenate",
"numpy.random.uniform",
"tensorflow.keras.layers.Conv2D",
"os.path.isdir",
"tensorly.base.unfold",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.eye"
] |
[((167, 253), 'tensorflow.config.experimental.set_visible_devices', 'tensorflow.config.experimental.set_visible_devices', ([], {'devices': '[]', 'device_type': '"""GPU"""'}), "(devices=[], device_type=\n 'GPU')\n", (217, 253), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((250, 271), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (264, 271), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((386, 463), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Find anomalies from deviation matrices"""'}), "(description='Find anomalies from deviation matrices')\n", (409, 463), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((3109, 3168), 'tensorflow.config.experimental.list_physical_devices', 'tensorflow.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (3161, 3168), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((3178, 3265), 'tensorflow.config.experimental.set_visible_devices', 'tensorflow.config.experimental.set_visible_devices', ([], {'devices': 'gpus', 'device_type': '"""GPU"""'}), "(devices=gpus,\n device_type='GPU')\n", (3228, 3265), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((4088, 4102), 'os.listdir', 'os.listdir', (['_d'], {}), '(_d)\n', (4098, 4102), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((10631, 10643), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10641, 10643), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((10856, 10888), 'numpy.ones', 'numpy.ones', (['(X.shape[0], self.k)'], {}), '((X.shape[0], self.k))\n', (10866, 10888), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((10903, 10935), 'numpy.ones', 'numpy.ones', (['(X.shape[1], self.k)'], {}), '((X.shape[1], self.k))\n', (10913, 10935), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((10950, 10982), 'numpy.ones', 'numpy.ones', (['(X.shape[2], self.k)'], {}), '((X.shape[2], self.k))\n', (10960, 10982), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((12107, 12139), 'numpy.ones', 'numpy.ones', (['(X.shape[0], self.k)'], {}), '((X.shape[0], self.k))\n', (12117, 12139), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((12154, 12186), 'numpy.ones', 'numpy.ones', (['(X.shape[1], self.k)'], {}), '((X.shape[1], self.k))\n', (12164, 12186), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((12201, 12233), 'numpy.ones', 'numpy.ones', (['(X.shape[2], self.k)'], {}), '((X.shape[2], self.k))\n', (12211, 12233), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((14451, 14496), 'tensorly.kruskal_tensor.kruskal_to_tensor', 'tensorly.kruskal_tensor.kruskal_to_tensor', (['F0'], {}), '(F0)\n', (14492, 14496), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((15420, 15438), 'numpy.array', 'numpy.array', (['array'], {}), '(array)\n', (15431, 15438), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((16568, 16580), 'gc.collect', 'gc.collect', ([], {}), '()\n', (16578, 16580), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((17131, 17156), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (17154, 17156), True, 'import tensorflow.keras as keras\n'), ((18295, 18320), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (18318, 18320), True, 'import tensorflow.keras as keras\n'), ((19513, 19551), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(self.zdim,)'}), '(shape=(self.zdim,))\n', (19531, 19551), True, 'import tensorflow.keras as keras\n'), ((19690, 19754), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'self.ganInput', 'outputs': 'self.ganOutput'}), '(inputs=self.ganInput, outputs=self.ganOutput)\n', (19708, 19754), True, 'import tensorflow.keras as keras\n'), ((19954, 20066), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'self.discriminator.layers[0].input', 'outputs': 'self.discriminator.layers[-5].output'}), '(inputs=self.discriminator.layers[0].input, outputs=self.\n discriminator.layers[-5].output)\n', (19972, 20066), True, 'import tensorflow.keras as keras\n'), ((20359, 20397), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(self.zdim,)'}), '(shape=(self.zdim,))\n', (20377, 20397), True, 'import tensorflow.keras as keras\n'), ((20619, 20706), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'self.anoInput', 'outputs': '[self.anoMidput, self.anoOutput]'}), '(inputs=self.anoInput, outputs=[self.anoMidput, self.\n anoOutput])\n', (20637, 20706), True, 'import tensorflow.keras as keras\n'), ((25239, 25251), 'gc.collect', 'gc.collect', ([], {}), '()\n', (25249, 25251), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((25549, 25574), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (25572, 25574), True, 'import tensorflow.keras as keras\n'), ((26645, 26670), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (26668, 26670), True, 'import tensorflow.keras as keras\n'), ((27590, 27630), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(self.length,)'}), '(shape=(self.length,))\n', (27608, 27630), True, 'import tensorflow.keras as keras\n'), ((27756, 27799), 'tensorflow.keras.models.Model', 'keras.models.Model', (['self.input', 'self.output'], {}), '(self.input, self.output)\n', (27774, 27799), True, 'import tensorflow.keras as keras\n'), ((29932, 29944), 'gc.collect', 'gc.collect', ([], {}), '()\n', (29942, 29944), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((30421, 30446), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (30444, 30446), True, 'import tensorflow.keras as keras\n'), ((31391, 31416), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (31414, 31416), True, 'import tensorflow.keras as keras\n'), ((32602, 32668), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(self.height, self.width, self.channels)'}), '(shape=(self.height, self.width, self.channels))\n', (32620, 32668), True, 'import tensorflow.keras as keras\n'), ((32794, 32837), 'tensorflow.keras.models.Model', 'keras.models.Model', (['self.input', 'self.output'], {}), '(self.input, self.output)\n', (32812, 32837), True, 'import tensorflow.keras as keras\n'), ((35716, 35728), 'gc.collect', 'gc.collect', ([], {}), '()\n', (35726, 35728), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((37874, 37900), 'numpy.reshape', 'numpy.reshape', (['N0', 'N0.size'], {}), '(N0, N0.size)\n', (37887, 37900), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((37915, 37941), 'numpy.reshape', 'numpy.reshape', (['N1', 'N1.size'], {}), '(N1, N1.size)\n', (37928, 37941), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((38503, 38526), 'numpy.zeros', 'numpy.zeros', (['diff.shape'], {}), '(diff.shape)\n', (38514, 38526), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((38544, 38574), 'numpy.where', 'numpy.where', (['(diff > halflambda)'], {}), '(diff > halflambda)\n', (38555, 38574), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((38734, 38768), 'numpy.where', 'numpy.where', (['(diff < 0 - halflambda)'], {}), '(diff < 0 - halflambda)\n', (38745, 38768), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((38835, 38865), 'numpy.reshape', 'numpy.reshape', (['noise', 'X0.shape'], {}), '(noise, X0.shape)\n', (38848, 38865), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((50370, 50414), 'matplotlib.pyplot.subplots', 'matplotlib.pyplot.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (50396, 50414), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((50630, 50658), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', (['fig'], {}), '(fig)\n', (50653, 50658), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((3303, 3366), 'tensorflow.config.experimental.set_memory_growth', 'tensorflow.config.experimental.set_memory_growth', (['gpus[i]', '(True)'], {}), '(gpus[i], True)\n', (3351, 3366), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((9428, 9455), 'matplotlib.pyplot.figure', 'matplotlib.pyplot.figure', (['(1)'], {}), '(1)\n', (9452, 9455), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((9469, 9503), 'matplotlib.pyplot.yscale', 'matplotlib.pyplot.yscale', (['"""linear"""'], {}), "('linear')\n", (9493, 9503), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((9517, 9545), 'matplotlib.pyplot.grid', 'matplotlib.pyplot.grid', (['(True)'], {}), '(True)\n', (9539, 9545), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((9838, 9869), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', (['figure'], {}), '(figure)\n', (9861, 9869), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((11151, 11185), 'tensorly.tenalg.khatri_rao', 'tensorly.tenalg.khatri_rao', (['[B, C]'], {}), '([B, C])\n', (11177, 11185), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((11364, 11398), 'tensorly.tenalg.khatri_rao', 'tensorly.tenalg.khatri_rao', (['[A, C]'], {}), '([A, C])\n', (11390, 11398), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((11577, 11611), 'tensorly.tenalg.khatri_rao', 'tensorly.tenalg.khatri_rao', (['[A, B]'], {}), '([A, B])\n', (11603, 11611), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((14769, 14814), 'tensorly.kruskal_tensor.kruskal_to_tensor', 'tensorly.kruskal_tensor.kruskal_to_tensor', (['F1'], {}), '(F1)\n', (14810, 14814), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((17392, 17425), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (17414, 17425), True, 'import tensorflow.keras as keras\n'), ((17456, 17489), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (17487, 17489), True, 'import tensorflow.keras as keras\n'), ((17520, 17574), 'tensorflow.keras.layers.Reshape', 'keras.layers.Reshape', (['(self.row, self.col, self.units)'], {}), '((self.row, self.col, self.units))\n', (17540, 17574), True, 'import tensorflow.keras as keras\n'), ((17605, 17649), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {'size': 'self.strides'}), '(size=self.strides)\n', (17630, 17649), True, 'import tensorflow.keras as keras\n'), ((17794, 17827), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (17816, 17827), True, 'import tensorflow.keras as keras\n'), ((17858, 17891), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (17889, 17891), True, 'import tensorflow.keras as keras\n'), ((17922, 17966), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {'size': 'self.strides'}), '(size=self.strides)\n', (17947, 17966), True, 'import tensorflow.keras as keras\n'), ((17997, 18096), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['self.channels'], {'kernel_size': 'self.kernels', 'padding': '"""same"""', 'activation': '"""tanh"""'}), "(self.channels, kernel_size=self.kernels, padding='same',\n activation='tanh')\n", (18016, 18096), True, 'import tensorflow.keras as keras\n'), ((18653, 18686), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (18675, 18686), True, 'import tensorflow.keras as keras\n'), ((18721, 18754), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (18752, 18754), True, 'import tensorflow.keras as keras\n'), ((18789, 18823), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (18809, 18823), True, 'import tensorflow.keras as keras\n'), ((18858, 18958), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['self.units'], {'kernel_size': 'self.kernels', 'strides': 'self.strides', 'padding': '"""same"""'}), "(self.units, kernel_size=self.kernels, strides=self.\n strides, padding='same')\n", (18877, 18958), True, 'import tensorflow.keras as keras\n'), ((19002, 19035), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (19024, 19035), True, 'import tensorflow.keras as keras\n'), ((19070, 19103), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (19101, 19103), True, 'import tensorflow.keras as keras\n'), ((19138, 19172), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (19158, 19172), True, 'import tensorflow.keras as keras\n'), ((19207, 19229), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (19227, 19229), True, 'import tensorflow.keras as keras\n'), ((19264, 19313), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': 'self.activation'}), '(1, activation=self.activation)\n', (19282, 19313), True, 'import tensorflow.keras as keras\n'), ((21102, 21125), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (21116, 21125), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((22639, 22681), 'numpy.random.uniform', 'numpy.random.uniform', (['(0)', '(1)', '(1, self.zdim)'], {}), '(0, 1, (1, self.zdim))\n', (22659, 22681), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((23206, 23220), 'numpy.array', 'numpy.array', (['X'], {}), '(X)\n', (23217, 23220), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((24289, 24320), 'tensorflow.abs', 'tensorflow.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (24303, 24320), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((25784, 25817), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (25806, 25817), True, 'import tensorflow.keras as keras\n'), ((25846, 25879), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (25877, 25879), True, 'import tensorflow.keras as keras\n'), ((25977, 26010), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (25999, 26010), True, 'import tensorflow.keras as keras\n'), ((26039, 26072), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (26070, 26072), True, 'import tensorflow.keras as keras\n'), ((26170, 26203), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (26192, 26203), True, 'import tensorflow.keras as keras\n'), ((26232, 26265), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (26263, 26265), True, 'import tensorflow.keras as keras\n'), ((26363, 26396), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (26385, 26396), True, 'import tensorflow.keras as keras\n'), ((26425, 26458), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (26456, 26458), True, 'import tensorflow.keras as keras\n'), ((26900, 26933), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (26922, 26933), True, 'import tensorflow.keras as keras\n'), ((26962, 26995), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (26993, 26995), True, 'import tensorflow.keras as keras\n'), ((27093, 27126), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (27115, 27126), True, 'import tensorflow.keras as keras\n'), ((27155, 27188), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (27186, 27188), True, 'import tensorflow.keras as keras\n'), ((27286, 27319), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (27308, 27319), True, 'import tensorflow.keras as keras\n'), ((27348, 27407), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['self.length'], {'activation': 'self.activation'}), '(self.length, activation=self.activation)\n', (27366, 27407), True, 'import tensorflow.keras as keras\n'), ((30717, 30750), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (30739, 30750), True, 'import tensorflow.keras as keras\n'), ((30779, 30812), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (30810, 30812), True, 'import tensorflow.keras as keras\n'), ((30841, 30896), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['self.strides'], {'padding': '"""same"""'}), "(self.strides, padding='same')\n", (30866, 30896), True, 'import tensorflow.keras as keras\n'), ((31025, 31058), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (31047, 31058), True, 'import tensorflow.keras as keras\n'), ((31087, 31120), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (31118, 31120), True, 'import tensorflow.keras as keras\n'), ((31149, 31204), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['self.strides'], {'padding': '"""same"""'}), "(self.strides, padding='same')\n", (31174, 31204), True, 'import tensorflow.keras as keras\n'), ((31700, 31733), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (31722, 31733), True, 'import tensorflow.keras as keras\n'), ((31762, 31795), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (31793, 31795), True, 'import tensorflow.keras as keras\n'), ((31824, 31863), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', (['self.strides'], {}), '(self.strides)\n', (31849, 31863), True, 'import tensorflow.keras as keras\n'), ((31992, 32025), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (32014, 32025), True, 'import tensorflow.keras as keras\n'), ((32054, 32087), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (32085, 32087), True, 'import tensorflow.keras as keras\n'), ((32116, 32155), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', (['self.strides'], {}), '(self.strides)\n', (32141, 32155), True, 'import tensorflow.keras as keras\n'), ((32184, 32245), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['self.units', 'self.kernels'], {'padding': '"""same"""'}), "(self.units, self.kernels, padding='same')\n", (32203, 32245), True, 'import tensorflow.keras as keras\n'), ((32274, 32307), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', (['self.relu'], {}), '(self.relu)\n', (32296, 32307), True, 'import tensorflow.keras as keras\n'), ((32336, 32424), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['self.channels', 'self.kernels'], {'activation': '"""tanh"""', 'padding': '"""same"""'}), "(self.channels, self.kernels, activation='tanh', padding\n ='same')\n", (32355, 32424), True, 'import tensorflow.keras as keras\n'), ((33651, 33665), 'numpy.array', 'numpy.array', (['X'], {}), '(X)\n', (33662, 33665), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((33860, 33874), 'numpy.array', 'numpy.array', (['X'], {}), '(X)\n', (33871, 33874), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((36846, 36861), 'numpy.array', 'numpy.array', (['N1'], {}), '(N1)\n', (36857, 36861), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((38130, 38147), 'numpy.array', 'numpy.array', (['N[i]'], {}), '(N[i])\n', (38141, 38147), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((38170, 38202), 'numpy.reshape', 'numpy.reshape', (['noise', 'noise.size'], {}), '(noise, noise.size)\n', (38183, 38202), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((39850, 39869), 'os.path.isfile', 'os.path.isfile', (['obj'], {}), '(obj)\n', (39864, 39869), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((47122, 47150), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', (['fig'], {}), '(fig)\n', (47145, 47150), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((50441, 50460), 'numpy.array', 'numpy.array', (['bitmap'], {}), '(bitmap)\n', (50452, 50460), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((50558, 50610), 'os.path.join', 'os.path.join', (['self.args.image_dir', "(filename + '.png')"], {}), "(self.args.image_dir, filename + '.png')\n", (50570, 50610), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((4198, 4220), 'os.path.join', 'os.path.join', (['_d', 'user'], {}), '(_d, user)\n', (4210, 4220), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((4250, 4269), 'os.listdir', 'os.listdir', (['userdir'], {}), '(userdir)\n', (4260, 4269), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((7926, 7955), 'tensorflow.keras.backend.clear_session', 'keras.backend.clear_session', ([], {}), '()\n', (7953, 7955), True, 'import tensorflow.keras as keras\n'), ((7986, 7998), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7996, 7998), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((8466, 8488), 'json.dumps', 'json.dumps', (['evaluation'], {}), '(evaluation)\n', (8476, 8488), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((8766, 8801), 'os.path.join', 'os.path.join', (['args.output_dir', 'date'], {}), '(args.output_dir, date)\n', (8778, 8801), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((8829, 8860), 'os.path.join', 'os.path.join', (['datedir', 'filename'], {}), '(datedir, filename)\n', (8841, 8860), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((8963, 8986), 'os.path.isfile', 'os.path.isfile', (['logfile'], {}), '(logfile)\n', (8977, 8986), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((9770, 9814), 'os.path.join', 'os.path.join', (['args.image_dir', "(title + '.png')"], {}), "(args.image_dir, title + '.png')\n", (9782, 9814), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((11227, 11253), 'tensorly.base.unfold', 'tensorly.base.unfold', (['X', '(0)'], {}), '(X, 0)\n', (11247, 11253), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((11440, 11466), 'tensorly.base.unfold', 'tensorly.base.unfold', (['X', '(1)'], {}), '(X, 1)\n', (11460, 11466), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((11653, 11679), 'tensorly.base.unfold', 'tensorly.base.unfold', (['X', '(2)'], {}), '(X, 2)\n', (11673, 11679), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((11799, 11851), 'tensorly.kruskal_tensor.kruskal_to_tensor', 'tensorly.kruskal_tensor.kruskal_to_tensor', (['[A, B, C]'], {}), '([A, B, C])\n', (11840, 11851), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((12333, 12359), 'tensorly.base.unfold', 'tensorly.base.unfold', (['X', '(0)'], {}), '(X, 0)\n', (12353, 12359), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((12362, 12396), 'tensorly.tenalg.khatri_rao', 'tensorly.tenalg.khatri_rao', (['[B, C]'], {}), '([B, C])\n', (12388, 12396), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((12435, 12452), 'numpy.dot', 'numpy.dot', (['B.T', 'B'], {}), '(B.T, B)\n', (12444, 12452), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((12455, 12472), 'numpy.dot', 'numpy.dot', (['C.T', 'C'], {}), '(C.T, C)\n', (12464, 12472), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((12783, 12809), 'tensorly.base.unfold', 'tensorly.base.unfold', (['X', '(1)'], {}), '(X, 1)\n', (12803, 12809), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((12812, 12846), 'tensorly.tenalg.khatri_rao', 'tensorly.tenalg.khatri_rao', (['[A, C]'], {}), '([A, C])\n', (12838, 12846), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((12885, 12902), 'numpy.dot', 'numpy.dot', (['A.T', 'A'], {}), '(A.T, A)\n', (12894, 12902), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((12905, 12922), 'numpy.dot', 'numpy.dot', (['C.T', 'C'], {}), '(C.T, C)\n', (12914, 12922), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((13255, 13281), 'tensorly.base.unfold', 'tensorly.base.unfold', (['X', '(2)'], {}), '(X, 2)\n', (13275, 13281), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((13284, 13318), 'tensorly.tenalg.khatri_rao', 'tensorly.tenalg.khatri_rao', (['[A, B]'], {}), '([A, B])\n', (13310, 13318), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((13357, 13374), 'numpy.dot', 'numpy.dot', (['A.T', 'A'], {}), '(A.T, A)\n', (13366, 13374), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((13377, 13394), 'numpy.dot', 'numpy.dot', (['B.T', 'B'], {}), '(B.T, B)\n', (13386, 13394), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((13703, 13755), 'tensorly.kruskal_tensor.kruskal_to_tensor', 'tensorly.kruskal_tensor.kruskal_to_tensor', (['[A, B, C]'], {}), '([A, B, C])\n', (13744, 13755), False, 'import tensorflow, tensorly, sklearn.linear_model\n'), ((14998, 15039), 'numpy.max', 'numpy.max', (['[scores1, scores2, scores3]', '(0)'], {}), '([scores1, scores2, scores3], 0)\n', (15007, 15039), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((20441, 20481), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['self.activation'], {}), '(self.activation)\n', (20464, 20481), True, 'import tensorflow.keras as keras\n'), ((21517, 21568), 'numpy.concatenate', 'numpy.concatenate', (['(batch_images, generated_images)'], {}), '((batch_images, generated_images))\n', (21534, 21568), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((23801, 23848), 'numpy.append', 'numpy.append', (['array[i]', '[[0.0] * width]'], {'axis': '(0)'}), '(array[i], [[0.0] * width], axis=0)\n', (23813, 23848), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((23927, 23980), 'numpy.append', 'numpy.append', (['array[i]', '([[0.0]] * self.height)'], {'axis': '(1)'}), '(array[i], [[0.0]] * self.height, axis=1)\n', (23939, 23980), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((28641, 28658), 'numpy.array', 'numpy.array', (['syns'], {}), '(syns)\n', (28652, 28658), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((29085, 29103), 'numpy.array', 'numpy.array', (['array'], {}), '(array)\n', (29096, 29103), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((34019, 34036), 'numpy.array', 'numpy.array', (['syns'], {}), '(syns)\n', (34030, 34036), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((34617, 34666), 'numpy.append', 'numpy.append', (['array[i]', '[padding * width]'], {'axis': '(0)'}), '(array[i], [padding * width], axis=0)\n', (34629, 34666), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((34745, 34800), 'numpy.append', 'numpy.append', (['array[i]', '([padding] * self.height)'], {'axis': '(1)'}), '(array[i], [padding] * self.height, axis=1)\n', (34757, 34800), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((36698, 36721), 'numpy.zeros', 'numpy.zeros', (['X[0].shape'], {}), '(X[0].shape)\n', (36709, 36721), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((38655, 38675), 'numpy.absolute', 'numpy.absolute', (['diff'], {}), '(diff)\n', (38669, 38675), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((40030, 40048), 'os.path.isdir', 'os.path.isdir', (['obj'], {}), '(obj)\n', (40043, 40048), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((47049, 47098), 'os.path.join', 'os.path.join', (['self.args.image_dir', "(title + '.png')"], {}), "(self.args.image_dir, title + '.png')\n", (47061, 47098), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((8891, 8926), 'os.path.join', 'os.path.join', (['args.output_dir', 'date'], {}), '(args.output_dir, date)\n', (8903, 8926), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((8989, 9007), 'os.remove', 'os.remove', (['logfile'], {}), '(logfile)\n', (8998, 9007), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((16201, 16220), 'numpy.mean', 'numpy.mean', (['scores1'], {}), '(scores1)\n', (16211, 16220), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((16259, 16278), 'numpy.mean', 'numpy.mean', (['scores2'], {}), '(scores2)\n', (16269, 16278), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((17309, 17360), 'tensorflow.keras.initializers.RandomNormal', 'keras.initializers.RandomNormal', ([], {'stddev': 'self.stddev'}), '(stddev=self.stddev)\n', (17340, 17360), True, 'import tensorflow.keras as keras\n'), ((18566, 18617), 'tensorflow.keras.initializers.RandomNormal', 'keras.initializers.RandomNormal', ([], {'stddev': 'self.stddev'}), '(stddev=self.stddev)\n', (18597, 18617), True, 'import tensorflow.keras as keras\n'), ((20484, 20513), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['self.zdim'], {}), '(self.zdim)\n', (20502, 20513), True, 'import tensorflow.keras as keras\n'), ((23290, 23307), 'numpy.array', 'numpy.array', (['syns'], {}), '(syns)\n', (23301, 23307), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((24843, 24862), 'numpy.mean', 'numpy.mean', (['scores1'], {}), '(scores1)\n', (24853, 24862), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((24901, 24920), 'numpy.mean', 'numpy.mean', (['scores2'], {}), '(scores2)\n', (24911, 24920), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((25703, 25754), 'tensorflow.keras.initializers.RandomNormal', 'keras.initializers.RandomNormal', ([], {'stddev': 'self.stddev'}), '(stddev=self.stddev)\n', (25734, 25754), True, 'import tensorflow.keras as keras\n'), ((26818, 26869), 'tensorflow.keras.initializers.RandomNormal', 'keras.initializers.RandomNormal', ([], {'stddev': 'self.stddev'}), '(stddev=self.stddev)\n', (26849, 26869), True, 'import tensorflow.keras as keras\n'), ((28661, 28675), 'numpy.array', 'numpy.array', (['X'], {}), '(X)\n', (28672, 28675), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((29697, 29716), 'numpy.mean', 'numpy.mean', (['scores1'], {}), '(scores1)\n', (29707, 29716), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((29755, 29774), 'numpy.mean', 'numpy.mean', (['scores2'], {}), '(scores2)\n', (29765, 29774), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((30636, 30687), 'tensorflow.keras.initializers.RandomNormal', 'keras.initializers.RandomNormal', ([], {'stddev': 'self.stddev'}), '(stddev=self.stddev)\n', (30667, 30687), True, 'import tensorflow.keras as keras\n'), ((31619, 31670), 'tensorflow.keras.initializers.RandomNormal', 'keras.initializers.RandomNormal', ([], {'stddev': 'self.stddev'}), '(stddev=self.stddev)\n', (31650, 31670), True, 'import tensorflow.keras as keras\n'), ((33735, 33752), 'numpy.array', 'numpy.array', (['syns'], {}), '(syns)\n', (33746, 33752), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((35466, 35485), 'numpy.mean', 'numpy.mean', (['scores1'], {}), '(scores1)\n', (35476, 35485), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((35524, 35543), 'numpy.mean', 'numpy.mean', (['scores2'], {}), '(scores2)\n', (35534, 35543), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((38228, 38249), 'numpy.sum', 'numpy.sum', (['(noise ** 2)'], {}), '(noise ** 2)\n', (38237, 38249), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((39415, 39434), 'numpy.mean', 'numpy.mean', (['scores1'], {}), '(scores1)\n', (39425, 39434), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((39473, 39492), 'numpy.mean', 'numpy.mean', (['scores2'], {}), '(scores2)\n', (39483, 39492), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((40122, 40141), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (40132, 40141), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((49314, 49334), 'numpy.asarray', 'numpy.asarray', (['array'], {}), '(array)\n', (49327, 49334), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((49367, 49383), 'numpy.asarray', 'numpy.asarray', (['h'], {}), '(h)\n', (49380, 49383), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((50864, 50879), 'random.random', 'random.random', ([], {}), '()\n', (50877, 50879), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((51222, 51237), 'random.random', 'random.random', ([], {}), '()\n', (51235, 51237), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((4381, 4408), 'os.path.join', 'os.path.join', (['userdir', 'date'], {}), '(userdir, date)\n', (4393, 4408), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((8589, 8611), 'json.dumps', 'json.dumps', (['evaluation'], {}), '(evaluation)\n', (8599, 8611), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((9085, 9130), 'os.path.join', 'os.path.join', (['args.output_dir', 'date', 'filename'], {}), '(args.output_dir, date, filename)\n', (9097, 9130), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((15121, 15136), 'numpy.array', 'numpy.array', (['Y1'], {}), '(Y1)\n', (15132, 15136), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((15608, 15635), 'numpy.power', 'numpy.power', (['(X[d] - Y[d])', '(2)'], {}), '(X[d] - Y[d], 2)\n', (15619, 15635), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((36773, 36811), 'numpy.random.random_sample', 'numpy.random.random_sample', (['X[0].shape'], {}), '(X[0].shape)\n', (36799, 36811), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((38004, 38027), 'numpy.absolute', 'numpy.absolute', (['(N1 - N0)'], {}), '(N1 - N0)\n', (38018, 38027), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((40175, 40206), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (40187, 40206), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((46421, 46440), 'numpy.array', 'numpy.array', (['bitmap'], {}), '(bitmap)\n', (46432, 46440), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((48709, 48751), 'numpy.mean', 'numpy.mean', (['fsum[date][timeIndex][feature]'], {}), '(fsum[date][timeIndex][feature])\n', (48719, 48751), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((9179, 9222), 'json.dumps', 'json.dumps', (['[user, score, date, args.model]'], {}), '([user, score, date, args.model])\n', (9189, 9222), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((12568, 12593), 'numpy.dot', 'numpy.dot', (['destA', 'destA.T'], {}), '(destA, destA.T)\n', (12577, 12593), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((12695, 12721), 'numpy.dot', 'numpy.dot', (['mttrpA', 'destA.T'], {}), '(mttrpA, destA.T)\n', (12704, 12721), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((13018, 13043), 'numpy.dot', 'numpy.dot', (['destB', 'destB.T'], {}), '(destB, destB.T)\n', (13027, 13043), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((13145, 13171), 'numpy.dot', 'numpy.dot', (['mttrpB', 'destB.T'], {}), '(mttrpB, destB.T)\n', (13154, 13171), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((13490, 13515), 'numpy.dot', 'numpy.dot', (['destC', 'destC.T'], {}), '(destC, destC.T)\n', (13499, 13515), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((13617, 13643), 'numpy.dot', 'numpy.dot', (['mttrpC', 'destC.T'], {}), '(mttrpC, destC.T)\n', (13626, 13643), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((50901, 50916), 'random.random', 'random.random', ([], {}), '()\n', (50914, 50916), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((12613, 12630), 'numpy.eye', 'numpy.eye', (['self.k'], {}), '(self.k)\n', (12622, 12630), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((13063, 13080), 'numpy.eye', 'numpy.eye', (['self.k'], {}), '(self.k)\n', (13072, 13080), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n'), ((13535, 13552), 'numpy.eye', 'numpy.eye', (['self.k'], {}), '(self.k)\n', (13544, 13552), False, 'import argparse, json, gc, matplotlib, numpy, os, random, sys\n')]
|
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS
# file. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
from toast_planck.reproc_modules.destriping import FancyDestriperPol
from toast.mpi import MPI
from toast.tests.mpi import MPITestCase
import healpy as hp
import numpy as np
class DestriperPolTest(MPITestCase):
def setUp(self):
self.disable = True
if self.disable:
return
self.nside = 16
self.npix = 12 * self.nside ** 2
self.nnz = 3
self.destriper = FancyDestriperPol(
self.npix, self.nnz, MPI.COMM_WORLD,
do_offset=True, do_gain=True, do_pol_eff=True, do_pol_angle=True,
ndegrade=4, fsample=1.0, lowpassfreq=.1, dir_out='test')
self.verbose = False
def test_destripe(self):
if self.disable:
return
ntask = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
nsamp_tot = 100000
nsamp_proc = nsamp_tot // ntask + 1
my_first = rank * nsamp_proc
my_last = my_first + nsamp_proc
if my_last > nsamp_tot:
my_last = nsamp_tot
my_nsamp = my_last - my_first
np.random.seed(12345)
sky = np.array(hp.synfast(
np.ones([4, 4 * self.nside]), self.nside, fwhm=10 * np.pi / 180,
pol=True, new=True))
sky[1:] *= 0.1 # Suppress polarization wrt temperature
# hp.write_map('input_sky.fits', sky)
# sky = np.zeros([self.npix, self.nnz], dtype=np.float64)
# sky[:, 0] = np.arange(self.npix)
# sky[:, 1] = np.cos(np.arange(self.npix))
# sky[:, 2] = np.sin(np.arange(self.npix))
# sky = np.array(hp.reorder(sky.T, r2n=True )).T
sky = np.array(hp.reorder(sky, r2n=True)).T
# import pylab
# hp.mollview(sky[:,0], nest=True)
# pylab.savefig('map.png')
template_sky = np.array(hp.synfast(
np.ones([4, 4 * self.nside]), self.nside, fwhm=20 * np.pi / 180,
pol=True, new=True))
template_sky[1:] *= 0.1
# hp.write_map('template_sky.fits', template_sky)
# template_sky = np.arange(self.npix, dtype=np.float) % 10
# template_sky = np.sin(np.arange(self.npix, dtype=np.float)
# / self.npix*np.pi)
# template_sky -= np.mean(template_sky)
# template_sky = np.array(hp.reorder(template_sky.T, r2n=True)).T
template_sky = np.array(hp.reorder(template_sky, r2n=True)).T
# if rank == 0:
# hp.mollview(template_sky[:,0], nest=True)
# pylab.savefig('templatemap.png')
# Four detectors
sigma1 = 1.
sigma2 = 1.
sigma3 = 1.
sigma4 = 1.
clean_toi = []
dirty_toi = []
t = np.arange(my_first, my_last)
pixels = t % self.npix
pixels[np.logical_and(t < nsamp_tot // 2, pixels < 8)] = 8
pixels1 = pixels
psi1 = t / self.npix * np.pi / 10
weights1 = np.vstack(
[np.ones(my_nsamp), np.cos(2 * psi1), np.sin(2 * psi1)]).T
psi1scan = psi1 + 1 * np.pi / 180
weights1scan = np.vstack(
[np.ones(my_nsamp),
0.9 * np.cos(2 * psi1scan),
0.9 * np.sin(2 * psi1scan)]).T
signal1 = np.zeros(my_nsamp)
template1 = np.zeros(my_nsamp)
for i, p in enumerate(pixels1):
signal1[i] = np.sum(sky[p] * weights1scan[i])
# signal1[i] = np.sum(sky[p] * weights1[i])
template1[i] = np.sum(template_sky[p] * weights1[i])
templates1 = [template1]
flag1 = np.zeros(my_nsamp, dtype=np.bool)
flag1[np.logical_and(t >= nsamp_tot // 4,
t <= 3 * nsamp_tot // 4)] = True
signal1 += np.random.randn(nsamp_tot)[my_first:my_last] * sigma1
clean_toi.append(signal1.copy())
signal1 *= 1.01
signal1 += templates1[0] * .1
signal1 += 1
dirty_toi.append(signal1.copy())
pixels2 = pixels
psi2 = t / self.npix * np.pi / 10 + np.pi / 2
weights2 = np.vstack(
[np.ones(my_nsamp), np.cos(2 * psi2), np.sin(2 * psi2)]).T
signal2 = np.zeros(my_nsamp)
template2 = np.zeros(my_nsamp)
for i, p in enumerate(pixels2):
signal2[i] = np.sum(sky[p] * weights2[i])
template2[i] = np.sum(template_sky[p] * weights2[i])
templates2 = [template2]
flag2 = np.zeros(my_nsamp, dtype=np.bool)
signal2 += np.random.randn(nsamp_tot)[my_first:my_last] * sigma2
clean_toi.append(signal2.copy())
signal2 *= .99
signal2 -= templates2[0] * .1
signal2 -= 1
dirty_toi.append(signal2)
pixels3 = pixels
psi3 = t / self.npix * np.pi / 10 + np.pi / 4
weights3 = np.vstack(
[np.ones(my_nsamp), np.cos(2 * psi3), np.sin(2 * psi3)]).T
signal3 = np.zeros(my_nsamp)
template3 = np.zeros(my_nsamp)
for i, p in enumerate(pixels3):
signal3[i] = np.sum(sky[p] * weights3[i])
template3[i] = np.sum(template_sky[p] * weights3[i])
templates3 = [template3]
flag3 = np.zeros(my_nsamp, dtype=np.bool)
signal3 += np.random.randn(nsamp_tot)[my_first:my_last] * sigma3
clean_toi.append(signal3.copy())
# signal3 -= templates3[0]*2
# signal3 -= 2
dirty_toi.append(signal3)
pixels4 = pixels
psi4 = t / self.npix * np.pi / 10 + np.pi / 4 + np.pi / 2
weights4 = np.vstack(
[np.ones(my_nsamp), np.cos(2 * psi4), np.sin(2 * psi4)]).T
signal4 = np.zeros(my_nsamp)
template4 = np.zeros(my_nsamp)
for i, p in enumerate(pixels4):
signal4[i] = np.sum(sky[p] * weights4[i])
template4[i] = np.sum(template_sky[p] * weights4[i])
templates4 = [template4]
flag4 = np.zeros(my_nsamp, dtype=np.bool)
signal4 += np.random.randn(nsamp_tot)[my_first:my_last] * sigma4
clean_toi.append(signal4.copy())
dirty_toi.append(signal4)
clean_toi = np.array(clean_toi)
dirty_toi = np.array(dirty_toi)
flags = np.vstack([flag1, flag2, flag3, flag4])
pixels = np.vstack([pixels1, pixels2, pixels3, pixels4])
weights = [weights1, weights2, weights3, weights4]
templates = [templates1, templates2, templates3, templates4]
resid = MPI.COMM_WORLD.allgather(
(dirty_toi - clean_toi)[np.logical_not(flags)].ravel())
if rank == 0:
print('RMS before destriping = {}'.format(np.std(np.hstack(resid))))
dirty_toi[1][10] = 1e4 # Add outlier
self.destriper.flag_outliers(
dirty_toi, flags, pixels, weights,
verbose=self.verbose, save_maps=False)
destriped_toi, _, _ = self.destriper.destripe(
dirty_toi, flags, pixels, weights, templates,
verbose=self.verbose, in_place=True, return_baselines=True,
siter='_poltest')
resid = MPI.COMM_WORLD.allgather(
(destriped_toi - clean_toi)[np.logical_not(flags)].ravel())
if rank == 0:
print('RMS after destriping 1/2 = {}'.format(
np.std(np.hstack(resid))))
destriped_toi, _, _ = self.destriper.destripe(
dirty_toi, flags, pixels, weights, templates,
verbose=self.verbose, in_place=True, return_baselines=True)
resid = MPI.COMM_WORLD.allgather(
(destriped_toi - clean_toi)[np.logical_not(flags)].ravel())
if rank == 0:
print('RMS after destriping 2/2 = {}'.format(
np.std(np.hstack(resid))))
destriped_toi, _, _ = self.destriper.destripe(
dirty_toi, flags, pixels, weights, templates,
verbose=self.verbose, in_place=True, return_baselines=True)
resid = MPI.COMM_WORLD.allgather(
(destriped_toi - clean_toi)[np.logical_not(flags)].ravel())
if rank == 0:
print('RMS after destriping 3/2 = {}'.format(
np.std(np.hstack(resid))))
return
def test_destripe_single(self):
if self.disable:
return
ntask = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
nsamp_tot = 100000
nsamp_proc = nsamp_tot // ntask + 1
my_first = rank * nsamp_proc
my_last = my_first + nsamp_proc
if my_last > nsamp_tot:
my_last = nsamp_tot
my_nsamp = my_last - my_first
sky = np.zeros([self.npix, self.nnz], dtype=np.float64)
sky[:, 0] = np.arange(self.npix)
sky[:, 1] = np.cos(np.arange(self.npix))
sky[:, 2] = np.sin(np.arange(self.npix))
# One detector
sigma1 = 10.
np.random.seed(12345)
clean_toi = []
dirty_toi = []
t = np.arange(my_first, my_last)
pixels = t % self.npix
# pixels[np.logical_and(t<nsamp_tot//2, pixels<8)] = 8
pixels1 = pixels
psi1 = t / self.npix * np.pi / 10
weights1 = np.vstack(
[np.ones(my_nsamp), np.cos(2 * psi1), np.sin(2 * psi1)]).T
signal1 = np.zeros(my_nsamp)
template1 = t + np.sin(t / 1000) * 100
for i, p in enumerate(pixels1):
signal1[i] = np.sum(sky[p] * weights1[i])
templates1 = [template1]
flag1 = np.zeros(my_nsamp, dtype=np.bool)
flag1[np.logical_and(t >= nsamp_tot // 4,
t <= 3 * nsamp_tot // 4)] = True
signal1 += np.random.randn(nsamp_tot)[my_first:my_last] * sigma1
clean_toi.append(signal1.copy())
signal1 *= 1.01
signal1 += 1
signal1 += templates1[0] * 1e4
dirty_toi.append(signal1.copy())
clean_toi = np.array(clean_toi)
dirty_toi = np.array(dirty_toi)
flags = np.vstack([flag1])
pixels = np.vstack([pixels1])
weights = [weights1]
templates = [templates1]
resid = MPI.COMM_WORLD.allgather(
(dirty_toi - clean_toi)[np.logical_not(flags)].ravel())
if rank == 0:
print('RMS before destriping = {}'.format(np.std(np.hstack(resid))))
destriper = FancyDestriperPol(self.npix, self.nnz, MPI.COMM_WORLD,
do_offset=True, do_gain=True, ndegrade=1,
dir_out='test')
_, amp, cov = destriper.destripe(
dirty_toi, flags, pixels, weights, templates,
verbose=self.verbose, in_place=False, return_baselines=True,
siter='_singlepoltest')
if rank == 0:
print('template amplitude = {} +- {}'.format(
amp[0, 0], np.sqrt(cov[0, 0]), flush=True))
if np.abs((amp[0, 0] - 1e4) / np.sqrt(cov[0, 0])) > 3 and \
np.abs((amp[0, 0] - 1e4) / 1e4) > 1e-4:
raise Exception(
'Failed to fit the template: {} +- {} != {}'.format(
amp[0, 0], np.sqrt(cov[0, 0]), 1e4))
return
def test_destripe_single_t_only(self):
if self.disable:
return
ntask = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
nsamp_tot = 100000
nsamp_proc = nsamp_tot // ntask + 1
my_first = rank * nsamp_proc
my_last = my_first + nsamp_proc
if my_last > nsamp_tot:
my_last = nsamp_tot
my_nsamp = my_last - my_first
sky = np.arange(self.npix)
# One detector
sigma1 = 10.
np.random.seed(12345)
clean_toi = []
dirty_toi = []
t = np.arange(my_first, my_last)
pixels = t % self.npix
# pixels[np.logical_and(t<nsamp_tot//2, pixels<8)] = 8
pixels1 = pixels
# psi1 = t / self.npix * np.pi / 10
signal1 = np.zeros(my_nsamp)
template1 = t + np.sin(t / 1000) * 100
for i, p in enumerate(pixels1):
signal1[i] = sky[p]
templates1 = [template1]
flag1 = np.zeros(my_nsamp, dtype=np.bool)
flag1[np.logical_and(t >= nsamp_tot // 4,
t <= 3 * nsamp_tot // 4)] = True
signal1 += np.random.randn(nsamp_tot)[my_first:my_last] * sigma1
clean_toi.append(signal1.copy())
signal1 *= 1.01
signal1 += 1
signal1 += templates1[0] * 1e4
dirty_toi.append(signal1.copy())
clean_toi = np.array(clean_toi)
dirty_toi = np.array(dirty_toi)
flags = np.vstack([flag1])
pixels = np.vstack([pixels1])
weights = None
templates = [templates1]
resid = MPI.COMM_WORLD.allgather(
(dirty_toi - clean_toi)[np.logical_not(flags)].ravel())
if rank == 0:
print('RMS before destriping = {}'.format(
np.std(np.hstack(resid))))
destriper = FancyDestriperPol(self.npix, 1, MPI.COMM_WORLD,
do_offset=True, do_gain=True, ndegrade=1,
dir_out='test')
_, amp, cov = destriper.destripe(
dirty_toi, flags, pixels, weights, templates,
verbose=self.verbose, in_place=False, return_baselines=True,
siter='_temptest')
if rank == 0:
print('template amplitude = {} +- {}'.format(
amp[0, 0], np.sqrt(cov[0, 0]), flush=True))
if np.abs((amp[0, 0] - 1e4) / np.sqrt(cov[0, 0])) > 3 and \
np.abs((amp[0, 0] - 1e4) / 1e4) > 1e-4:
raise Exception(
'Failed to fit the template: {} +- {} != {}'.format(
amp[0, 0], np.sqrt(cov[0, 0]), 1e4))
return
|
[
"numpy.random.seed",
"toast_planck.reproc_modules.destriping.FancyDestriperPol",
"numpy.logical_and",
"numpy.sum",
"numpy.random.randn",
"numpy.abs",
"healpy.reorder",
"numpy.logical_not",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.sin",
"numpy.arange",
"numpy.array",
"numpy.cos",
"numpy.vstack",
"numpy.sqrt"
] |
[((615, 802), 'toast_planck.reproc_modules.destriping.FancyDestriperPol', 'FancyDestriperPol', (['self.npix', 'self.nnz', 'MPI.COMM_WORLD'], {'do_offset': '(True)', 'do_gain': '(True)', 'do_pol_eff': '(True)', 'do_pol_angle': '(True)', 'ndegrade': '(4)', 'fsample': '(1.0)', 'lowpassfreq': '(0.1)', 'dir_out': '"""test"""'}), "(self.npix, self.nnz, MPI.COMM_WORLD, do_offset=True,\n do_gain=True, do_pol_eff=True, do_pol_angle=True, ndegrade=4, fsample=\n 1.0, lowpassfreq=0.1, dir_out='test')\n", (632, 802), False, 'from toast_planck.reproc_modules.destriping import FancyDestriperPol\n'), ((1265, 1286), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (1279, 1286), True, 'import numpy as np\n'), ((2879, 2907), 'numpy.arange', 'np.arange', (['my_first', 'my_last'], {}), '(my_first, my_last)\n', (2888, 2907), True, 'import numpy as np\n'), ((3385, 3403), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (3393, 3403), True, 'import numpy as np\n'), ((3424, 3442), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (3432, 3442), True, 'import numpy as np\n'), ((3711, 3744), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {'dtype': 'np.bool'}), '(my_nsamp, dtype=np.bool)\n', (3719, 3744), True, 'import numpy as np\n'), ((4297, 4315), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (4305, 4315), True, 'import numpy as np\n'), ((4336, 4354), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (4344, 4354), True, 'import numpy as np\n'), ((4563, 4596), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {'dtype': 'np.bool'}), '(my_nsamp, dtype=np.bool)\n', (4571, 4596), True, 'import numpy as np\n'), ((5029, 5047), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (5037, 5047), True, 'import numpy as np\n'), ((5068, 5086), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (5076, 5086), True, 'import numpy as np\n'), ((5295, 5328), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {'dtype': 'np.bool'}), '(my_nsamp, dtype=np.bool)\n', (5303, 5328), True, 'import numpy as np\n'), ((5751, 5769), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (5759, 5769), True, 'import numpy as np\n'), ((5790, 5808), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (5798, 5808), True, 'import numpy as np\n'), ((6017, 6050), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {'dtype': 'np.bool'}), '(my_nsamp, dtype=np.bool)\n', (6025, 6050), True, 'import numpy as np\n'), ((6221, 6240), 'numpy.array', 'np.array', (['clean_toi'], {}), '(clean_toi)\n', (6229, 6240), True, 'import numpy as np\n'), ((6261, 6280), 'numpy.array', 'np.array', (['dirty_toi'], {}), '(dirty_toi)\n', (6269, 6280), True, 'import numpy as np\n'), ((6297, 6336), 'numpy.vstack', 'np.vstack', (['[flag1, flag2, flag3, flag4]'], {}), '([flag1, flag2, flag3, flag4])\n', (6306, 6336), True, 'import numpy as np\n'), ((6354, 6401), 'numpy.vstack', 'np.vstack', (['[pixels1, pixels2, pixels3, pixels4]'], {}), '([pixels1, pixels2, pixels3, pixels4])\n', (6363, 6401), True, 'import numpy as np\n'), ((8665, 8714), 'numpy.zeros', 'np.zeros', (['[self.npix, self.nnz]'], {'dtype': 'np.float64'}), '([self.npix, self.nnz], dtype=np.float64)\n', (8673, 8714), True, 'import numpy as np\n'), ((8735, 8755), 'numpy.arange', 'np.arange', (['self.npix'], {}), '(self.npix)\n', (8744, 8755), True, 'import numpy as np\n'), ((8909, 8930), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (8923, 8930), True, 'import numpy as np\n'), ((8991, 9019), 'numpy.arange', 'np.arange', (['my_first', 'my_last'], {}), '(my_first, my_last)\n', (9000, 9019), True, 'import numpy as np\n'), ((9300, 9318), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (9308, 9318), True, 'import numpy as np\n'), ((9509, 9542), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {'dtype': 'np.bool'}), '(my_nsamp, dtype=np.bool)\n', (9517, 9542), True, 'import numpy as np\n'), ((9918, 9937), 'numpy.array', 'np.array', (['clean_toi'], {}), '(clean_toi)\n', (9926, 9937), True, 'import numpy as np\n'), ((9958, 9977), 'numpy.array', 'np.array', (['dirty_toi'], {}), '(dirty_toi)\n', (9966, 9977), True, 'import numpy as np\n'), ((9994, 10012), 'numpy.vstack', 'np.vstack', (['[flag1]'], {}), '([flag1])\n', (10003, 10012), True, 'import numpy as np\n'), ((10030, 10050), 'numpy.vstack', 'np.vstack', (['[pixels1]'], {}), '([pixels1])\n', (10039, 10050), True, 'import numpy as np\n'), ((10348, 10464), 'toast_planck.reproc_modules.destriping.FancyDestriperPol', 'FancyDestriperPol', (['self.npix', 'self.nnz', 'MPI.COMM_WORLD'], {'do_offset': '(True)', 'do_gain': '(True)', 'ndegrade': '(1)', 'dir_out': '"""test"""'}), "(self.npix, self.nnz, MPI.COMM_WORLD, do_offset=True,\n do_gain=True, ndegrade=1, dir_out='test')\n", (10365, 10464), False, 'from toast_planck.reproc_modules.destriping import FancyDestriperPol\n'), ((11604, 11624), 'numpy.arange', 'np.arange', (['self.npix'], {}), '(self.npix)\n', (11613, 11624), True, 'import numpy as np\n'), ((11680, 11701), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (11694, 11701), True, 'import numpy as np\n'), ((11762, 11790), 'numpy.arange', 'np.arange', (['my_first', 'my_last'], {}), '(my_first, my_last)\n', (11771, 11790), True, 'import numpy as np\n'), ((11972, 11990), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {}), '(my_nsamp)\n', (11980, 11990), True, 'import numpy as np\n'), ((12159, 12192), 'numpy.zeros', 'np.zeros', (['my_nsamp'], {'dtype': 'np.bool'}), '(my_nsamp, dtype=np.bool)\n', (12167, 12192), True, 'import numpy as np\n'), ((12568, 12587), 'numpy.array', 'np.array', (['clean_toi'], {}), '(clean_toi)\n', (12576, 12587), True, 'import numpy as np\n'), ((12608, 12627), 'numpy.array', 'np.array', (['dirty_toi'], {}), '(dirty_toi)\n', (12616, 12627), True, 'import numpy as np\n'), ((12644, 12662), 'numpy.vstack', 'np.vstack', (['[flag1]'], {}), '([flag1])\n', (12653, 12662), True, 'import numpy as np\n'), ((12680, 12700), 'numpy.vstack', 'np.vstack', (['[pixels1]'], {}), '([pixels1])\n', (12689, 12700), True, 'import numpy as np\n'), ((13009, 13119), 'toast_planck.reproc_modules.destriping.FancyDestriperPol', 'FancyDestriperPol', (['self.npix', '(1)', 'MPI.COMM_WORLD'], {'do_offset': '(True)', 'do_gain': '(True)', 'ndegrade': '(1)', 'dir_out': '"""test"""'}), "(self.npix, 1, MPI.COMM_WORLD, do_offset=True, do_gain=\n True, ndegrade=1, dir_out='test')\n", (13026, 13119), False, 'from toast_planck.reproc_modules.destriping import FancyDestriperPol\n'), ((2954, 3000), 'numpy.logical_and', 'np.logical_and', (['(t < nsamp_tot // 2)', '(pixels < 8)'], {}), '(t < nsamp_tot // 2, pixels < 8)\n', (2968, 3000), True, 'import numpy as np\n'), ((3508, 3540), 'numpy.sum', 'np.sum', (['(sky[p] * weights1scan[i])'], {}), '(sky[p] * weights1scan[i])\n', (3514, 3540), True, 'import numpy as np\n'), ((3624, 3661), 'numpy.sum', 'np.sum', (['(template_sky[p] * weights1[i])'], {}), '(template_sky[p] * weights1[i])\n', (3630, 3661), True, 'import numpy as np\n'), ((3759, 3819), 'numpy.logical_and', 'np.logical_and', (['(t >= nsamp_tot // 4)', '(t <= 3 * nsamp_tot // 4)'], {}), '(t >= nsamp_tot // 4, t <= 3 * nsamp_tot // 4)\n', (3773, 3819), True, 'import numpy as np\n'), ((4420, 4448), 'numpy.sum', 'np.sum', (['(sky[p] * weights2[i])'], {}), '(sky[p] * weights2[i])\n', (4426, 4448), True, 'import numpy as np\n'), ((4476, 4513), 'numpy.sum', 'np.sum', (['(template_sky[p] * weights2[i])'], {}), '(template_sky[p] * weights2[i])\n', (4482, 4513), True, 'import numpy as np\n'), ((5152, 5180), 'numpy.sum', 'np.sum', (['(sky[p] * weights3[i])'], {}), '(sky[p] * weights3[i])\n', (5158, 5180), True, 'import numpy as np\n'), ((5208, 5245), 'numpy.sum', 'np.sum', (['(template_sky[p] * weights3[i])'], {}), '(template_sky[p] * weights3[i])\n', (5214, 5245), True, 'import numpy as np\n'), ((5874, 5902), 'numpy.sum', 'np.sum', (['(sky[p] * weights4[i])'], {}), '(sky[p] * weights4[i])\n', (5880, 5902), True, 'import numpy as np\n'), ((5930, 5967), 'numpy.sum', 'np.sum', (['(template_sky[p] * weights4[i])'], {}), '(template_sky[p] * weights4[i])\n', (5936, 5967), True, 'import numpy as np\n'), ((8783, 8803), 'numpy.arange', 'np.arange', (['self.npix'], {}), '(self.npix)\n', (8792, 8803), True, 'import numpy as np\n'), ((8832, 8852), 'numpy.arange', 'np.arange', (['self.npix'], {}), '(self.npix)\n', (8841, 8852), True, 'import numpy as np\n'), ((9431, 9459), 'numpy.sum', 'np.sum', (['(sky[p] * weights1[i])'], {}), '(sky[p] * weights1[i])\n', (9437, 9459), True, 'import numpy as np\n'), ((9557, 9617), 'numpy.logical_and', 'np.logical_and', (['(t >= nsamp_tot // 4)', '(t <= 3 * nsamp_tot // 4)'], {}), '(t >= nsamp_tot // 4, t <= 3 * nsamp_tot // 4)\n', (9571, 9617), True, 'import numpy as np\n'), ((12207, 12267), 'numpy.logical_and', 'np.logical_and', (['(t >= nsamp_tot // 4)', '(t <= 3 * nsamp_tot // 4)'], {}), '(t >= nsamp_tot // 4, t <= 3 * nsamp_tot // 4)\n', (12221, 12267), True, 'import numpy as np\n'), ((1335, 1363), 'numpy.ones', 'np.ones', (['[4, 4 * self.nside]'], {}), '([4, 4 * self.nside])\n', (1342, 1363), True, 'import numpy as np\n'), ((1834, 1859), 'healpy.reorder', 'hp.reorder', (['sky'], {'r2n': '(True)'}), '(sky, r2n=True)\n', (1844, 1859), True, 'import healpy as hp\n'), ((2021, 2049), 'numpy.ones', 'np.ones', (['[4, 4 * self.nside]'], {}), '([4, 4 * self.nside])\n', (2028, 2049), True, 'import numpy as np\n'), ((2549, 2583), 'healpy.reorder', 'hp.reorder', (['template_sky'], {'r2n': '(True)'}), '(template_sky, r2n=True)\n', (2559, 2583), True, 'import healpy as hp\n'), ((3876, 3902), 'numpy.random.randn', 'np.random.randn', (['nsamp_tot'], {}), '(nsamp_tot)\n', (3891, 3902), True, 'import numpy as np\n'), ((4616, 4642), 'numpy.random.randn', 'np.random.randn', (['nsamp_tot'], {}), '(nsamp_tot)\n', (4631, 4642), True, 'import numpy as np\n'), ((5348, 5374), 'numpy.random.randn', 'np.random.randn', (['nsamp_tot'], {}), '(nsamp_tot)\n', (5363, 5374), True, 'import numpy as np\n'), ((6070, 6096), 'numpy.random.randn', 'np.random.randn', (['nsamp_tot'], {}), '(nsamp_tot)\n', (6085, 6096), True, 'import numpy as np\n'), ((9343, 9359), 'numpy.sin', 'np.sin', (['(t / 1000)'], {}), '(t / 1000)\n', (9349, 9359), True, 'import numpy as np\n'), ((9674, 9700), 'numpy.random.randn', 'np.random.randn', (['nsamp_tot'], {}), '(nsamp_tot)\n', (9689, 9700), True, 'import numpy as np\n'), ((10967, 11006), 'numpy.abs', 'np.abs', (['((amp[0, 0] - 10000.0) / 10000.0)'], {}), '((amp[0, 0] - 10000.0) / 10000.0)\n', (10973, 11006), True, 'import numpy as np\n'), ((12015, 12031), 'numpy.sin', 'np.sin', (['(t / 1000)'], {}), '(t / 1000)\n', (12021, 12031), True, 'import numpy as np\n'), ((12324, 12350), 'numpy.random.randn', 'np.random.randn', (['nsamp_tot'], {}), '(nsamp_tot)\n', (12339, 12350), True, 'import numpy as np\n'), ((13616, 13655), 'numpy.abs', 'np.abs', (['((amp[0, 0] - 10000.0) / 10000.0)'], {}), '((amp[0, 0] - 10000.0) / 10000.0)\n', (13622, 13655), True, 'import numpy as np\n'), ((3116, 3133), 'numpy.ones', 'np.ones', (['my_nsamp'], {}), '(my_nsamp)\n', (3123, 3133), True, 'import numpy as np\n'), ((3135, 3151), 'numpy.cos', 'np.cos', (['(2 * psi1)'], {}), '(2 * psi1)\n', (3141, 3151), True, 'import numpy as np\n'), ((3153, 3169), 'numpy.sin', 'np.sin', (['(2 * psi1)'], {}), '(2 * psi1)\n', (3159, 3169), True, 'import numpy as np\n'), ((3263, 3280), 'numpy.ones', 'np.ones', (['my_nsamp'], {}), '(my_nsamp)\n', (3270, 3280), True, 'import numpy as np\n'), ((4221, 4238), 'numpy.ones', 'np.ones', (['my_nsamp'], {}), '(my_nsamp)\n', (4228, 4238), True, 'import numpy as np\n'), ((4240, 4256), 'numpy.cos', 'np.cos', (['(2 * psi2)'], {}), '(2 * psi2)\n', (4246, 4256), True, 'import numpy as np\n'), ((4258, 4274), 'numpy.sin', 'np.sin', (['(2 * psi2)'], {}), '(2 * psi2)\n', (4264, 4274), True, 'import numpy as np\n'), ((4953, 4970), 'numpy.ones', 'np.ones', (['my_nsamp'], {}), '(my_nsamp)\n', (4960, 4970), True, 'import numpy as np\n'), ((4972, 4988), 'numpy.cos', 'np.cos', (['(2 * psi3)'], {}), '(2 * psi3)\n', (4978, 4988), True, 'import numpy as np\n'), ((4990, 5006), 'numpy.sin', 'np.sin', (['(2 * psi3)'], {}), '(2 * psi3)\n', (4996, 5006), True, 'import numpy as np\n'), ((5675, 5692), 'numpy.ones', 'np.ones', (['my_nsamp'], {}), '(my_nsamp)\n', (5682, 5692), True, 'import numpy as np\n'), ((5694, 5710), 'numpy.cos', 'np.cos', (['(2 * psi4)'], {}), '(2 * psi4)\n', (5700, 5710), True, 'import numpy as np\n'), ((5712, 5728), 'numpy.sin', 'np.sin', (['(2 * psi4)'], {}), '(2 * psi4)\n', (5718, 5728), True, 'import numpy as np\n'), ((9224, 9241), 'numpy.ones', 'np.ones', (['my_nsamp'], {}), '(my_nsamp)\n', (9231, 9241), True, 'import numpy as np\n'), ((9243, 9259), 'numpy.cos', 'np.cos', (['(2 * psi1)'], {}), '(2 * psi1)\n', (9249, 9259), True, 'import numpy as np\n'), ((9261, 9277), 'numpy.sin', 'np.sin', (['(2 * psi1)'], {}), '(2 * psi1)\n', (9267, 9277), True, 'import numpy as np\n'), ((10854, 10872), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (10861, 10872), True, 'import numpy as np\n'), ((11136, 11154), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (11143, 11154), True, 'import numpy as np\n'), ((13503, 13521), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (13510, 13521), True, 'import numpy as np\n'), ((13785, 13803), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (13792, 13803), True, 'import numpy as np\n'), ((3301, 3321), 'numpy.cos', 'np.cos', (['(2 * psi1scan)'], {}), '(2 * psi1scan)\n', (3307, 3321), True, 'import numpy as np\n'), ((3342, 3362), 'numpy.sin', 'np.sin', (['(2 * psi1scan)'], {}), '(2 * psi1scan)\n', (3348, 3362), True, 'import numpy as np\n'), ((6609, 6630), 'numpy.logical_not', 'np.logical_not', (['flags'], {}), '(flags)\n', (6623, 6630), True, 'import numpy as np\n'), ((6724, 6740), 'numpy.hstack', 'np.hstack', (['resid'], {}), '(resid)\n', (6733, 6740), True, 'import numpy as np\n'), ((7227, 7248), 'numpy.logical_not', 'np.logical_not', (['flags'], {}), '(flags)\n', (7241, 7248), True, 'import numpy as np\n'), ((7362, 7378), 'numpy.hstack', 'np.hstack', (['resid'], {}), '(resid)\n', (7371, 7378), True, 'import numpy as np\n'), ((7651, 7672), 'numpy.logical_not', 'np.logical_not', (['flags'], {}), '(flags)\n', (7665, 7672), True, 'import numpy as np\n'), ((7786, 7802), 'numpy.hstack', 'np.hstack', (['resid'], {}), '(resid)\n', (7795, 7802), True, 'import numpy as np\n'), ((8075, 8096), 'numpy.logical_not', 'np.logical_not', (['flags'], {}), '(flags)\n', (8089, 8096), True, 'import numpy as np\n'), ((8210, 8226), 'numpy.hstack', 'np.hstack', (['resid'], {}), '(resid)\n', (8219, 8226), True, 'import numpy as np\n'), ((10192, 10213), 'numpy.logical_not', 'np.logical_not', (['flags'], {}), '(flags)\n', (10206, 10213), True, 'import numpy as np\n'), ((10307, 10323), 'numpy.hstack', 'np.hstack', (['resid'], {}), '(resid)\n', (10316, 10323), True, 'import numpy as np\n'), ((10926, 10944), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (10933, 10944), True, 'import numpy as np\n'), ((12836, 12857), 'numpy.logical_not', 'np.logical_not', (['flags'], {}), '(flags)\n', (12850, 12857), True, 'import numpy as np\n'), ((12968, 12984), 'numpy.hstack', 'np.hstack', (['resid'], {}), '(resid)\n', (12977, 12984), True, 'import numpy as np\n'), ((13575, 13593), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (13582, 13593), True, 'import numpy as np\n')]
|
import cv2 # Import relevant libraries
import numpy as np
from pymouse import PyMouse
if __name__ == '__main__':
# img = cv2.imread('Landscape.jpg', 0) # Read in image
img1 = cv2.imread('LandscapeGrey.jpg',0) # Read in image
img2 = cv2.imread('LandscapeGrey2.jpg',0) # Read in image
dst2 = cv2.resize(img2, None, fx=2, fy=2)
img4 = cv2.imread('LandscapeGrey4.jpg',0) # Read in image
dst4 = cv2.resize(img4, None, fx=4, fy=4)
img8 = cv2.imread('LandscapeGrey8.jpg',0) # Read in image
dst8 = cv2.resize(img8, None, fx=8, fy=8)
img16 = cv2.imread('LandscapeGrey16.jpg',0) # Read in image
m = PyMouse()
x_dim, y_dim = m.screen_size()
# img16 = cv2.resize(img, None, fx=0.0625, fy=0.0625)
#
# cv2.imwrite("LandscapeGrey16.jpg",img16)
height = img1.shape[0] # Get the dimensions
width = img1.shape[1]
# Define mask
while True:
mask = np.ones(img1.shape, dtype='uint8')
dst = cv2.resize(img16,None,fx=16,fy=16)
mouse_x,mouse_y = m.position()
# Draw circle at x = 100, y = 70 of radius 25 and fill this in with 0
cv2.circle(dst, (int(mouse_x*width/x_dim), int(mouse_y*height/y_dim)), 1000, 8, -1)
cv2.circle(dst, (int(mouse_x*width/x_dim), int(mouse_y*height/y_dim)), 500, 4, -1)
cv2.circle(dst, (int(mouse_x*width/x_dim), int(mouse_y*height/y_dim)), 200, 2, -1)
cv2.circle(dst, (int(mouse_x*width/x_dim), int(mouse_y*height/y_dim)), 50, 1, -1)
dst[dst == 8] = dst8[dst == 8]
dst[dst == 4] = dst4[dst == 4]
dst[dst == 2] = dst2[dst == 2]
dst[dst == 1] = img1[dst == 1]
cv2.imshow('image',dst)
cv2.waitKey(1)
# Apply distance transform to mask
# out = cv2.distanceTransform(mask, cv2.DIST_L2, 3)/np.sqrt(height*height+width*width)
# out = cv2.distanceTransform(mask, cv2.DIST_L1, 3)/ (width+height)
#
# for column in out:
# for ix, num in enumerate(column):
# if num == 0 :
# continue
# elif num < 0.2:
# column[ix] = 20
# elif num < 0.4:
# column[ix] = 40
# elif num < 0.6:
# column[ix] = 80
# else:
# column[ix] = 160
#
# Define scale factor
# scale_factor = 10
#
# # Create output image that is the same as the original
# filtered = img.copy()
#
# # Create floating point copy for precision
# img_float = img.copy().astype('float')
#
# # Number of channels
# if len(img_float.shape) == 3:
# num_chan = img_float.shape[2]
# else:
# # If there is a single channel, make the images 3D with a singleton
# # dimension to allow for loop to work properly
# num_chan = 1
# img_float = img_float[:,:,None]
# filtered = filtered[:,:,None]
#
# # For each pixel in the input...
# for y in range(height):
# for x in range(width):
#
# # If distance transform is 0, skip
# if out[y,x] == 0.0:
# continue
#
# # Calculate M = d / S
# mask_val = np.ceil(out[y,x] / scale_factor)
#
# # If M is too small, set the mask size to the smallest possible value
# if mask_val <= 3:
# mask_val = 3
#
# # Get beginning and ending x and y coordinates for neighbourhood
# # and ensure they are within bounds
# beginx = x-int(mask_val/2)
# if beginx < 0:
# beginx = 0
#
# beginy = y-int(mask_val/2)
# if beginy < 0:
# beginy = 0
#
# endx = x+int(mask_val/2)
# if endx >= width:
# endx = width-1
#
# endy = y+int(mask_val/2)
# if endy >= height:
# endy = height-1
#
# # Get the coordinates of where we need to grab pixels
# xvals = np.arange(beginx, endx+1)
# yvals = np.arange(beginy, endy+1)
# (col_neigh,row_neigh) = np.meshgrid(xvals, yvals)
# col_neigh = col_neigh.astype('int')
# row_neigh = row_neigh.astype('int')
#
# # Get the pixels now
# # For each channel, do the foveation
# for ii in range(num_chan):
# chan = img_float[:,:,ii]
# pix = chan[row_neigh, col_neigh].ravel()
#
# # Calculate the average and set it to be the output
# filtered[y,x,ii] = int(np.mean(pix))
#
# # Remove singleton dimension if required for display and saving
# if num_chan == 1:
# filtered = filtered[:,:,0]
# Show the image
# cv2.imshow('Output', filtered)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
[
"cv2.waitKey",
"numpy.ones",
"pymouse.PyMouse",
"cv2.imread",
"cv2.imshow",
"cv2.resize"
] |
[((185, 219), 'cv2.imread', 'cv2.imread', (['"""LandscapeGrey.jpg"""', '(0)'], {}), "('LandscapeGrey.jpg', 0)\n", (195, 219), False, 'import cv2\n'), ((246, 281), 'cv2.imread', 'cv2.imread', (['"""LandscapeGrey2.jpg"""', '(0)'], {}), "('LandscapeGrey2.jpg', 0)\n", (256, 281), False, 'import cv2\n'), ((308, 342), 'cv2.resize', 'cv2.resize', (['img2', 'None'], {'fx': '(2)', 'fy': '(2)'}), '(img2, None, fx=2, fy=2)\n', (318, 342), False, 'import cv2\n'), ((355, 390), 'cv2.imread', 'cv2.imread', (['"""LandscapeGrey4.jpg"""', '(0)'], {}), "('LandscapeGrey4.jpg', 0)\n", (365, 390), False, 'import cv2\n'), ((417, 451), 'cv2.resize', 'cv2.resize', (['img4', 'None'], {'fx': '(4)', 'fy': '(4)'}), '(img4, None, fx=4, fy=4)\n', (427, 451), False, 'import cv2\n'), ((464, 499), 'cv2.imread', 'cv2.imread', (['"""LandscapeGrey8.jpg"""', '(0)'], {}), "('LandscapeGrey8.jpg', 0)\n", (474, 499), False, 'import cv2\n'), ((526, 560), 'cv2.resize', 'cv2.resize', (['img8', 'None'], {'fx': '(8)', 'fy': '(8)'}), '(img8, None, fx=8, fy=8)\n', (536, 560), False, 'import cv2\n'), ((574, 610), 'cv2.imread', 'cv2.imread', (['"""LandscapeGrey16.jpg"""', '(0)'], {}), "('LandscapeGrey16.jpg', 0)\n", (584, 610), False, 'import cv2\n'), ((635, 644), 'pymouse.PyMouse', 'PyMouse', ([], {}), '()\n', (642, 644), False, 'from pymouse import PyMouse\n'), ((917, 951), 'numpy.ones', 'np.ones', (['img1.shape'], {'dtype': '"""uint8"""'}), "(img1.shape, dtype='uint8')\n", (924, 951), True, 'import numpy as np\n'), ((966, 1003), 'cv2.resize', 'cv2.resize', (['img16', 'None'], {'fx': '(16)', 'fy': '(16)'}), '(img16, None, fx=16, fy=16)\n', (976, 1003), False, 'import cv2\n'), ((1654, 1678), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'dst'], {}), "('image', dst)\n", (1664, 1678), False, 'import cv2\n'), ((1686, 1700), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1697, 1700), False, 'import cv2\n')]
|
import gc
from typing import Tuple
import numpy as np
import torch
def get_embed_dropout(config):
if hasattr(config, 'embd_pdrop'):
return config.embd_pdrop
if hasattr(config, 'embed_dropout'):
return config.embed_dropout
def get_embed_dim(config):
if hasattr(config, "hidden_size"):
return config.hidden_size
if hasattr(config, "n_embd"):
return config.n_embd
if hasattr(config, "d_model"):
return config.d_model
def get_num_layers(config):
if hasattr(config, "num_layers"):
return config.num_layers
if hasattr(config, "n_layer"):
return config.n_layer
if hasattr(config, "num_hidden_layers"):
return config.num_hidden_layers
def init_all(model, init_func, *params, **kwargs):
for p in model.parameters():
init_func(p, *params, **kwargs)
def format_inputs(args, ds):
if not ds: return args
return tuple([None if torch.sum(t) == 127873 else t for t in args])
def format_outputs(args, ds):
if not ds: return args
shape = args[0].shape
device = args[0].device
return tuple([torch.Tensor([127873]).to(device) if t is None else t for t in args])
class PipeMethods:
def convert(self, device):
for idx in range(*self.exec_map):
self.layers[idx] = self.layers[idx].to(device)
# use placeholder to save more memory
for i in range(len(self.layers)):
if i < self.exec_map[0] or i >= self.exec_map[1]:
self.layers[i] = torch.nn.Module()
torch.cuda.empty_cache()
gc.collect()
self.device = device
def convert_layer_specs(self, device):
l, h = self.exec_map
for idx, layer_cls in enumerate(self.layer_specs):
if idx >= l and idx < h:
self.layers[idx] = layer_cls.build().to(device)
torch.cuda.empty_cache()
gc.collect()
self.device = device
def partition_by_parameter(self, stage, parts, synthetic=False):
l_params = self.total_params / parts * stage
h_params = self.total_params / parts * (stage + 1) if stage != parts - 1 else self.total_params
print("partition_by_parameter", self.total_params, l_params, h_params, flush=True)
# if synthetic:
# layer_params = np.cumsum([1] * self.total_params)
# else:
# layer_params = [sum([np.prod(p.size()) for p in self.layers[idx].parameters()]) for idx in range(len(self.layers))]
layer_params = np.cumsum(self.layer_param)
responsible_layers = np.argwhere((layer_params > l_params) & (layer_params <= h_params)).flatten()
print("responsible_layers", layer_params, responsible_layers, flush=True)
self.exec_map = (responsible_layers[0], responsible_layers[-1]+1)
# def get_decoder_start_token_id(
# decoder_start_token_id: int = None, bos_token_id: int = None
# ) -> int:
# decoder_start_token_id = (
# decoder_start_token_id
# if decoder_start_token_id is not None
# else self.config.decoder_start_token_id
# )
# bos_token_id = (
# bos_token_id # if bos_token_id is not None else self.config.bos_token_id
# )
# if decoder_start_token_id is not None:
# return decoder_start_token_id
# elif (
# hasattr(self.config, "decoder")
# and hasattr(self.config.decoder, "decoder_start_token_id")
# and self.config.decoder.decoder_start_token_id is not None
# ):
# return self.config.decoder.decoder_start_token_id
# elif bos_token_id is not None:
# return bos_token_id
# elif (
# hasattr(self.config, "decoder")
# and hasattr(self.config.decoder, "bos_token_id")
# and self.config.decoder.bos_token_id is not None
# ):
# return self.config.decoder.bos_token_id
# raise ValueError(
# "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
# )
def prepare_decoder_input_ids_for_generation(
input_ids: torch.LongTensor,
decoder_start_token_id: int,
bos_token_id: int = None,
) -> torch.LongTensor:
# decoder_start_token_id = get_decoder_start_token_id(
# decoder_start_token_id, bos_token_id
# )
decoder_input_ids = (
torch.ones(
(input_ids.shape[0], 1), dtype=torch.long, device=input_ids.device
)
* decoder_start_token_id
)
return decoder_input_ids
def invert_attention_mask(encoder_attention_mask: torch.Tensor) -> torch.Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.
Returns:
:obj:`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
# encoder_extended_attention_mask = encoder_extended_attention_mask.to(
# dtype=self.dtype
# ) # fp16 compatibility
#
# if self.dtype == torch.float16:
# encoder_extended_attention_mask = (
# 1.0 - encoder_extended_attention_mask
# ) * -1e4
# elif self.dtype == torch.float32:
encoder_extended_attention_mask = (
1.0 - encoder_extended_attention_mask
) * -1e9
# else:
# raise ValueError(
# f"{self.dtype} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`"
# )
return encoder_extended_attention_mask
def get_extended_attention_mask(
attention_mask: torch.Tensor,
input_shape: Tuple[int],
device: torch.device,
is_decoder=False,
) -> torch.Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(
# dtype=self.dtype
# ) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
|
[
"torch.ones",
"gc.collect",
"numpy.cumsum",
"torch.Tensor",
"torch.arange",
"torch.cuda.empty_cache",
"numpy.argwhere",
"torch.sum",
"torch.nn.Module"
] |
[((1545, 1569), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1567, 1569), False, 'import torch\n'), ((1578, 1590), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1588, 1590), False, 'import gc\n'), ((1862, 1886), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1884, 1886), False, 'import torch\n'), ((1895, 1907), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1905, 1907), False, 'import gc\n'), ((2514, 2541), 'numpy.cumsum', 'np.cumsum', (['self.layer_param'], {}), '(self.layer_param)\n', (2523, 2541), True, 'import numpy as np\n'), ((4350, 4428), 'torch.ones', 'torch.ones', (['(input_ids.shape[0], 1)'], {'dtype': 'torch.long', 'device': 'input_ids.device'}), '((input_ids.shape[0], 1), dtype=torch.long, device=input_ids.device)\n', (4360, 4428), False, 'import torch\n'), ((1518, 1535), 'torch.nn.Module', 'torch.nn.Module', ([], {}), '()\n', (1533, 1535), False, 'import torch\n'), ((2571, 2638), 'numpy.argwhere', 'np.argwhere', (['((layer_params > l_params) & (layer_params <= h_params))'], {}), '((layer_params > l_params) & (layer_params <= h_params))\n', (2582, 2638), True, 'import numpy as np\n'), ((7705, 7744), 'torch.arange', 'torch.arange', (['seq_length'], {'device': 'device'}), '(seq_length, device=device)\n', (7717, 7744), False, 'import torch\n'), ((934, 946), 'torch.sum', 'torch.sum', (['t'], {}), '(t)\n', (943, 946), False, 'import torch\n'), ((1111, 1133), 'torch.Tensor', 'torch.Tensor', (['[127873]'], {}), '([127873])\n', (1123, 1133), False, 'import torch\n'), ((8392, 8489), 'torch.ones', 'torch.ones', (['(batch_size, seq_length, prefix_seq_len)'], {'device': 'device', 'dtype': 'causal_mask.dtype'}), '((batch_size, seq_length, prefix_seq_len), device=device, dtype=\n causal_mask.dtype)\n', (8402, 8489), False, 'import torch\n')]
|
import random
import numpy as np
from environment import Env
from keras.layers.core import Dense
from keras.optimizers import Adam
from keras.models import Sequential
EPISODES = 1000
class DeepSARSAgent:
def __init__(self):
self.load_model = False
# actions which agent can do
self.action_space = [0, 1, 2, 3, 4]
# get size of state and action
self.action_size = len(self.action_space)
self.state_size = 15
self.discount_factor = 0.99
self.learning_rate = 0.001
self.epsilon = 1. # exploration
self.epsilon_decay = .9999
self.epsilon_min = 0.01
self.model = self.build_model()
if self.load_model:
self.epsilon = 0.05
self.model.load_weights('./save_model/deep_sarsa_trained.h5')
if __name__ == "__main__":
env = Env()
agent = DeepSARSAgent()
global_step = 0
scores, episodes = [], []
for e in range(EPISODES):
done = False
score = 0
state = env.reset()
state = np.reshape(state, [1, 15])
while not done:
# fresh env
global_step += 1
# get action for the current state and go one step in environment
action = agent.get_action(state)
next_state, reward, done = env.step(action)
next_state = np.reshape(next_state, [1, 15])
next_action = agent.get_action(next_state)
agent.train_model(state, action, reward, next_state, next_action, done)
state = next_state
# every time step we do training
score += reward
state = copy.deepcopy(next_state)
if done:
scores.append(score)
episodes.append(e)
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/deep_sarsa_.png")
print("episode:", e, " score:", score, "global_step", global_step, " epsilon:", agent.epsilon)
if e % 100 == 0:
agent.model.save_weights("./save_model/deep_sarsa.h5")
|
[
"environment.Env",
"numpy.reshape"
] |
[((855, 860), 'environment.Env', 'Env', ([], {}), '()\n', (858, 860), False, 'from environment import Env\n'), ((1054, 1080), 'numpy.reshape', 'np.reshape', (['state', '[1, 15]'], {}), '(state, [1, 15])\n', (1064, 1080), True, 'import numpy as np\n'), ((1364, 1395), 'numpy.reshape', 'np.reshape', (['next_state', '[1, 15]'], {}), '(next_state, [1, 15])\n', (1374, 1395), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
# System imports.
import arff
import argparse
import glob
import logging
import os
import shutil
import subprocess
import sys
import tempfile
from typing import List, Tuple
# Third-party imports.
import numpy as np
# Local source tree imports.
from idiaptts.src.data_preparation.LabelGen import LabelGen
from idiaptts.misc.normalisation.MeanStdDevExtractor import MeanStdDevExtractor
class OpenSMILELabelGen(LabelGen):
"""Create OpenSMILE features from wav files."""
@staticmethod
def gen_data(dir_in: os.PathLike,
opensmile_config_file: os.PathLike,
feature_name: str,
num_frames: int,
dir_out: os.PathLike = None,
file_id_list: os.PathLike = None,
id_list: List[str] = None,
file_ext: str = "wav",
return_dict: bool = False) -> Tuple:
if file_id_list is None:
file_id_list_name = ""
else:
id_list, file_id_list_name = OpenSMILELabelGen._get_id_list(
dir_in, file_id_list, id_list, file_ext)
if file_id_list_name is not None and file_id_list_name != "":
file_id_list_name += "-"
if return_dict:
label_dict = {}
normaliser = MeanStdDevExtractor()
for file_name in id_list:
features = OpenSMILELabelGen.extract_features(
config_file=opensmile_config_file,
file_path=os.path.join(dir_in, file_name + "." + file_ext),
num_frames=num_frames
)
if return_dict:
label_dict[file_name] = features
normaliser.add_sample(features)
if dir_out is not None:
out_file_path = os.path.join(dir_out, file_name)
OpenSMILELabelGen._save_to_npz(
file_path=out_file_path,
features=features.astype(np.float32),
feature_name=feature_name)
if dir_out is not None:
norm_file_path = os.path.join(dir_out,
file_id_list_name + feature_name)
logging.info("Write norm_prams to {}".format(norm_file_path))
normaliser.save(norm_file_path)
mean, std_dev = normaliser.get_params()
if return_dict:
return label_dict, mean, std_dev
else:
return mean, std_dev
@staticmethod
def _get_id_list(dir_in: os.PathLike, file_id_list: os.PathLike,
id_list: List[str] = None, file_ext: str = ".wav"
) -> Tuple[List[str], str]:
"""
Fill file_id_list by files in dir_in with file_ext if not given and set
an appropriate file_id_list_name.
"""
if id_list is None:
id_list = list()
filenames = glob.glob(os.path.join(dir_in, "*" + file_ext))
for filename in filenames:
id_list.append(os.path.splitext(os.path.basename(filename))[0])
file_id_list_name = "all"
else:
file_id_list_name = os.path.splitext(os.path.basename(file_id_list))[0]
return id_list, file_id_list_name
@staticmethod
def extract_features(config_file: os.PathLike, file_path: os.PathLike,
num_frames: int = None) -> np.ndarray:
"""
Extract features with SMILEExtract.
Removes first and last generated feature.
"""
tmp_dir = tempfile.mkdtemp()
path = os.path.join(tmp_dir, "test_output.arff")
try:
cmd = ["opensmile/bin/SMILExtract", "-C", config_file,
"-I", file_path, "-O", path, "-l", "1"]
logging.debug(cmd)
subprocess.check_output(cmd)
dataset = arff.load(open(path, 'r'))
data = dataset['data']
except subprocess.CalledProcessError as e:
print("SMILExtract stdout output:\n", e.output)
raise
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
if num_frames is None or num_frames == -1:
features = data
else:
len_diff = len(data) - num_frames
if len_diff > 0:
features = data[:num_frames]
else:
num_features = len(data[0])
padding = abs(len_diff) * [num_features * [0]]
features = data + padding
features = np.atleast_2d(np.asarray(features))[:, 1:-1].astype(float)
return features
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-a", "--dir_audio",
help="Directory containing the audio (wav) files.",
type=str, dest="dir_audio", required=True)
parser.add_argument('-c', '--config_file', default=None,
help='Path to the openSMILE config to use.',
required=True)
parser.add_argument("-f", "--num_frames", default=-1,
help="The features are cropped/padded to this length.",
type=int, dest="num_frames", required=False)
parser.add_argument("-i", "--file_id_list", default=None,
help="Path to text file with ids to process.",
type=str, dest="file_id_list", required=False)
parser.add_argument("--id_name", default=None,
help="Single id_name to process",
type=str, dest="id_name", required=False)
parser.add_argument("-n", "--feature_name",
help="Name of the feature used to store in npz file.",
type=str, required=True)
parser.add_argument("-o", "--dir_out",
help="Output directory to store the labels.",
type=str, dest="dir_out", required=True)
# Parse arguments
args = parser.parse_args()
dir_audio = os.path.abspath(args.dir_audio)
opensmile_config_file = os.path.abspath(args.config_file)
num_frames = int(args.num_frames)
feature_name = args.feature_name
dir_out = os.path.abspath(args.dir_out)
if args.file_id_list is not None:
file_id_list = os.path.abspath(args.file_id_list)
with open(file_id_list) as f:
id_list = f.readlines()
id_list[:] = [s.strip(' \t\n\r') for s in id_list] # Trim entries in-place.
elif args.id_name is not None:
file_id_list = None
id_list = [args.id_name]
else:
raise RuntimeError("Either file_id_list or id_name has to be given.")
assert num_frames == -1 or num_frames > 0, "num_frames has to be positive or -1."
OpenSMILELabelGen.gen_data(
dir_in=dir_audio,
dir_out=dir_out,
file_id_list=file_id_list,
id_list=id_list,
opensmile_config_file=opensmile_config_file,
feature_name=feature_name,
num_frames=num_frames,
return_dict=False
)
sys.exit(0)
if __name__ == "__main__":
main()
|
[
"os.path.abspath",
"logging.debug",
"argparse.ArgumentParser",
"logging.basicConfig",
"idiaptts.misc.normalisation.MeanStdDevExtractor.MeanStdDevExtractor",
"os.path.basename",
"subprocess.check_output",
"numpy.asarray",
"tempfile.mkdtemp",
"shutil.rmtree",
"os.path.join",
"sys.exit"
] |
[((4757, 4796), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (4776, 4796), False, 'import logging\n'), ((4811, 4914), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (4834, 4914), False, 'import argparse\n'), ((6295, 6326), 'os.path.abspath', 'os.path.abspath', (['args.dir_audio'], {}), '(args.dir_audio)\n', (6310, 6326), False, 'import os\n'), ((6355, 6388), 'os.path.abspath', 'os.path.abspath', (['args.config_file'], {}), '(args.config_file)\n', (6370, 6388), False, 'import os\n'), ((6478, 6507), 'os.path.abspath', 'os.path.abspath', (['args.dir_out'], {}), '(args.dir_out)\n', (6493, 6507), False, 'import os\n'), ((7334, 7345), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7342, 7345), False, 'import sys\n'), ((1444, 1465), 'idiaptts.misc.normalisation.MeanStdDevExtractor.MeanStdDevExtractor', 'MeanStdDevExtractor', ([], {}), '()\n', (1463, 1465), False, 'from idiaptts.misc.normalisation.MeanStdDevExtractor import MeanStdDevExtractor\n'), ((3681, 3699), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3697, 3699), False, 'import tempfile\n'), ((3715, 3756), 'os.path.join', 'os.path.join', (['tmp_dir', '"""test_output.arff"""'], {}), "(tmp_dir, 'test_output.arff')\n", (3727, 3756), False, 'import os\n'), ((6570, 6604), 'os.path.abspath', 'os.path.abspath', (['args.file_id_list'], {}), '(args.file_id_list)\n', (6585, 6604), False, 'import os\n'), ((2224, 2279), 'os.path.join', 'os.path.join', (['dir_out', '(file_id_list_name + feature_name)'], {}), '(dir_out, file_id_list_name + feature_name)\n', (2236, 2279), False, 'import os\n'), ((3908, 3926), 'logging.debug', 'logging.debug', (['cmd'], {}), '(cmd)\n', (3921, 3926), False, 'import logging\n'), ((3939, 3967), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (3962, 3967), False, 'import subprocess\n'), ((4211, 4253), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {'ignore_errors': '(True)'}), '(tmp_dir, ignore_errors=True)\n', (4224, 4253), False, 'import shutil\n'), ((1931, 1963), 'os.path.join', 'os.path.join', (['dir_out', 'file_name'], {}), '(dir_out, file_name)\n', (1943, 1963), False, 'import os\n'), ((3050, 3086), 'os.path.join', 'os.path.join', (['dir_in', "('*' + file_ext)"], {}), "(dir_in, '*' + file_ext)\n", (3062, 3086), False, 'import os\n'), ((1637, 1685), 'os.path.join', 'os.path.join', (['dir_in', "(file_name + '.' + file_ext)"], {}), "(dir_in, file_name + '.' + file_ext)\n", (1649, 1685), False, 'import os\n'), ((3308, 3338), 'os.path.basename', 'os.path.basename', (['file_id_list'], {}), '(file_id_list)\n', (3324, 3338), False, 'import os\n'), ((4669, 4689), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (4679, 4689), True, 'import numpy as np\n'), ((3175, 3201), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (3191, 3201), False, 'import os\n')]
|
import numpy as np
def bit_get(val, idx):
return (val >> idx) & 1
def create_generic_colormap(n):
colormap = np.zeros((n, 3), dtype=int)
ind = np.arange(n, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= bit_get(ind, channel) << shift
ind >>= 3
return colormap
|
[
"numpy.zeros",
"numpy.arange"
] |
[((117, 144), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {'dtype': 'int'}), '((n, 3), dtype=int)\n', (125, 144), True, 'import numpy as np\n'), ((155, 178), 'numpy.arange', 'np.arange', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (164, 178), True, 'import numpy as np\n')]
|
# NumCosmo implementation of CLMModeling
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
from gi.repository import NumCosmo as Nc
from gi.repository import NumCosmoMath as Ncm
import math
import numpy as np
from .parent_class import CLMMCosmology
__all__ = []
class NumCosmoCosmology(CLMMCosmology):
def __init__(self, dist=None, dist_zmax=15.0, **kwargs):
self.dist = None
super(NumCosmoCosmology, self).__init__(**kwargs)
# this tag will be used to check if the cosmology object is accepted by the modeling
self.backend = 'nc'
if dist:
self.set_dist(dist)
else:
self.set_dist(Nc.Distance.new(dist_zmax))
def _init_from_cosmo(self, be_cosmo):
assert isinstance(be_cosmo, Nc.HICosmo)
assert isinstance(be_cosmo, Nc.HICosmoDEXcdm)
assert isinstance(be_cosmo.peek_reparam (), Nc.HICosmoDEReparamOk)
self.be_cosmo = be_cosmo
def _init_from_params(self, H0, Omega_b0, Omega_dm0, Omega_k0):
self.be_cosmo = Nc.HICosmo.new_from_name(Nc.HICosmo, "NcHICosmoDEXcdm")
self.be_cosmo.param_set_lower_bound(Nc.HICosmoDESParams.T_GAMMA0, 0.0)
self.be_cosmo.omega_x2omega_k()
self.be_cosmo.param_set_by_name("w", -1.0)
self.be_cosmo.param_set_by_name("Tgamma0", 0.0)
self.be_cosmo.param_set_by_name("H0", H0)
self.be_cosmo.param_set_by_name("Omegab", Omega_b0)
self.be_cosmo.param_set_by_name("Omegac", Omega_dm0)
self.be_cosmo.param_set_by_name("Omegak", Omega_k0)
def _set_param(self, key, value):
if key == "Omega_b0":
self.be_cosmo.param_set_by_name("Omegab", value)
elif key == "Omega_dm0":
self.be_cosmo.param_set_by_name("Omegac", value)
elif key == "Omega_k0":
self.be_cosmo.param_set_by_name("Omegak", value)
elif key == 'h':
self.be_cosmo.param_set_by_name("H0", value*100.0)
elif key == 'H0':
self.be_cosmo.param_set_by_name("H0", value)
else:
raise ValueError(f"Unsupported parameter {key}")
def _get_param(self, key):
if key == "Omega_m0":
return self.be_cosmo.Omega_m0()
elif key == "Omega_b0":
return self.be_cosmo.Omega_b0()
elif key == "Omega_dm0":
return self.be_cosmo.Omega_c0()
elif key == "Omega_k0":
return self.be_cosmo.Omega_k0()
elif key == 'h':
return self.be_cosmo.h()
elif key == 'H0':
return self.be_cosmo.H0()
else:
raise ValueError(f"Unsupported parameter {key}")
def set_dist(self, dist):
r"""Sets distance functions (NumCosmo internal use)
"""
assert isinstance(dist, Nc.Distance)
self.dist = dist
self.dist.prepare_if_needed(self.be_cosmo)
def get_Omega_m(self, z):
return self.be_cosmo.E2Omega_m(z)/self.be_cosmo.E2(z)
def get_E2Omega_m(self, z):
return self.be_cosmo.E2Omega_m(z)
def eval_da_z1z2(self, z1, z2):
return np.vectorize(self.dist.angular_diameter_z1_z2)(self.be_cosmo, z1, z2)*self.be_cosmo.RH_Mpc()
def eval_sigma_crit(self, z_len, z_src):
self.smd.prepare_if_needed(self.be_cosmo)
f = lambda z_len, z_src: self.smd.sigma_critical(self.be_cosmo, z_src, z_len, z_len)
return np.vectorize(f)(z_len, z_src)
|
[
"gi.repository.NumCosmo.Distance.new",
"gi.repository.NumCosmo.HICosmo.new_from_name",
"gi.require_version",
"numpy.vectorize"
] |
[((52, 89), 'gi.require_version', 'gi.require_version', (['"""NumCosmo"""', '"""1.0"""'], {}), "('NumCosmo', '1.0')\n", (70, 89), False, 'import gi\n'), ((90, 131), 'gi.require_version', 'gi.require_version', (['"""NumCosmoMath"""', '"""1.0"""'], {}), "('NumCosmoMath', '1.0')\n", (108, 131), False, 'import gi\n'), ((1083, 1138), 'gi.repository.NumCosmo.HICosmo.new_from_name', 'Nc.HICosmo.new_from_name', (['Nc.HICosmo', '"""NcHICosmoDEXcdm"""'], {}), "(Nc.HICosmo, 'NcHICosmoDEXcdm')\n", (1107, 1138), True, 'from gi.repository import NumCosmo as Nc\n'), ((3441, 3456), 'numpy.vectorize', 'np.vectorize', (['f'], {}), '(f)\n', (3453, 3456), True, 'import numpy as np\n'), ((707, 733), 'gi.repository.NumCosmo.Distance.new', 'Nc.Distance.new', (['dist_zmax'], {}), '(dist_zmax)\n', (722, 733), True, 'from gi.repository import NumCosmo as Nc\n'), ((3143, 3189), 'numpy.vectorize', 'np.vectorize', (['self.dist.angular_diameter_z1_z2'], {}), '(self.dist.angular_diameter_z1_z2)\n', (3155, 3189), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from time import time
import os
import h5mapper as h5m
# last dim of data
D = 256
class RandnArray(h5m.Array):
def __init__(self, n, **ds_kwargs):
self.n = n
self.__ds_kwargs__.update(ds_kwargs)
def load(self, source):
return np.random.randn(self.n, D).astype(np.float32)
def get_loader(compression=None, slice_length=16, keep_open=True):
class Data(h5m.TypedFile):
x = RandnArray(slice_length, compression=compression, chunks=(slice_length, D))
y = RandnArray(slice_length, compression=compression, chunks=(slice_length, D))
z = RandnArray(slice_length, compression=compression, chunks=(slice_length, D))
ds = Data.create("bench-serve.h5", list(map(str, range(16*500))), keep_open=keep_open)
return ds, ds.serve(
# batch object
dict(x=h5m.Input(key="x", getter=h5m.GetId()),
y=h5m.Input(key="y", getter=h5m.GetId()),
z=h5m.Input(key="z", getter=h5m.GetId())
),
# loaders kwargs
shuffle=True,
num_workers=16,
batch_size=16,
pin_memory=True,
prefetch_factor=2
)
if __name__ == '__main__':
avg_time = {}
for comp in [None, 'lzf', 'gzip']:
for ko in [True, False]:
avg_time[(comp, ko)] = {}
for length in [16, 64, 128, 256]:
file, loader = get_loader(compression=comp, slice_length=length, keep_open=ko)
# get the 1st one out
_ = next(iter(loader))
times = []
before = time()
for _ in tqdm(loader, leave=False):
now = time()
times += [now - before]
before = now
avg_time[(comp, ko)][length] = sum(times) / len(times)
file.close()
fig, ax = plt.subplots()
for comp, ko in avg_time.keys():
avg = avg_time[(comp, ko)]
ax.scatter(list(avg.keys()), list(avg.values()), label=f"compression={comp}, keep_open={ko}",
marker="x", linewidths=1.)
ax.set_xlabel("slice's length")
ax.set_ylabel("avg load time / batch")
ax.legend(loc='best')
plt.tight_layout()
plt.show()
os.remove("bench-serve.h5")
|
[
"os.remove",
"tqdm.tqdm",
"matplotlib.pyplot.show",
"numpy.random.randn",
"matplotlib.pyplot.subplots",
"time.time",
"h5mapper.GetId",
"matplotlib.pyplot.tight_layout"
] |
[((1930, 1944), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1942, 1944), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2292), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2290, 2292), True, 'import matplotlib.pyplot as plt\n'), ((2297, 2307), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2305, 2307), True, 'import matplotlib.pyplot as plt\n'), ((2312, 2339), 'os.remove', 'os.remove', (['"""bench-serve.h5"""'], {}), "('bench-serve.h5')\n", (2321, 2339), False, 'import os\n'), ((337, 363), 'numpy.random.randn', 'np.random.randn', (['self.n', 'D'], {}), '(self.n, D)\n', (352, 363), True, 'import numpy as np\n'), ((1647, 1653), 'time.time', 'time', ([], {}), '()\n', (1651, 1653), False, 'from time import time\n'), ((1679, 1704), 'tqdm.tqdm', 'tqdm', (['loader'], {'leave': '(False)'}), '(loader, leave=False)\n', (1683, 1704), False, 'from tqdm import tqdm\n'), ((1732, 1738), 'time.time', 'time', ([], {}), '()\n', (1736, 1738), False, 'from time import time\n'), ((930, 941), 'h5mapper.GetId', 'h5m.GetId', ([], {}), '()\n', (939, 941), True, 'import h5mapper as h5m\n'), ((985, 996), 'h5mapper.GetId', 'h5m.GetId', ([], {}), '()\n', (994, 996), True, 'import h5mapper as h5m\n'), ((1040, 1051), 'h5mapper.GetId', 'h5m.GetId', ([], {}), '()\n', (1049, 1051), True, 'import h5mapper as h5m\n')]
|
import io
import sys
import numpy as np
def read_input_npy(filepath):
if not filepath and sys.stdin.isatty():
raise RuntimeError("Please specify the npy_filepath or give npy file via stdin.")
data = filepath.read_bytes() if filepath else sys.stdin.buffer.read()
buf = io.BytesIO()
buf.write(data)
buf.seek(0)
return np.load(buf, allow_pickle=True)
def write_output_npy(data):
buf = io.BytesIO()
np.save(buf, data)
buf.seek(0)
sys.stdout.buffer.write(buf.read())
def detect_type_from_suffix(filepath):
KNOWN_SUFFIXES = {'.caffemodel': 'caffe',
'.mlmodel': 'coreml',
'.onnx': 'onnx',
'.pb': 'tensorflow',
'.prototxt': 'caffe',
'.pth': 'pytorch',
'.tflite': 'tensorflowlite',
'.xml': 'openvino'}
for suffix in KNOWN_SUFFIXES:
if suffix in filepath.suffixes:
return KNOWN_SUFFIXES[suffix]
return None
|
[
"io.BytesIO",
"numpy.save",
"sys.stdin.isatty",
"numpy.load",
"sys.stdin.buffer.read"
] |
[((291, 303), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (301, 303), False, 'import io\n'), ((351, 382), 'numpy.load', 'np.load', (['buf'], {'allow_pickle': '(True)'}), '(buf, allow_pickle=True)\n', (358, 382), True, 'import numpy as np\n'), ((423, 435), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (433, 435), False, 'import io\n'), ((440, 458), 'numpy.save', 'np.save', (['buf', 'data'], {}), '(buf, data)\n', (447, 458), True, 'import numpy as np\n'), ((96, 114), 'sys.stdin.isatty', 'sys.stdin.isatty', ([], {}), '()\n', (112, 114), False, 'import sys\n'), ((257, 280), 'sys.stdin.buffer.read', 'sys.stdin.buffer.read', ([], {}), '()\n', (278, 280), False, 'import sys\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return -lim, lim
class ActorNetwork(nn.Module):
def __init__(self, observation_size, action_size, use_batch_norm, seed,
fc1_units=128, fc2_units=64, fc3_units=32):
"""
:param observation_size: observation size
:param action_size: action size
:param use_batch_norm: True to use batch norm
:param seed: random seed
:param fc1_units: number of nodes in 1st hidden layer
:param fc2_units: number of nodes in 2nd hidden layer
:param fc3_units: number of nodes in 3rd hidden layer
"""
super(ActorNetwork, self).__init__()
if seed is not None:
torch.manual_seed(seed)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(observation_size)
self.bn2 = nn.BatchNorm1d(fc1_units)
self.bn3 = nn.BatchNorm1d(fc2_units)
self.bn4 = nn.BatchNorm1d(fc3_units)
# batch norm has bias included, disable linear layer bias
use_bias = not use_batch_norm
self.use_batch_norm = use_batch_norm
self.fc1 = nn.Linear(observation_size, fc1_units, bias=use_bias)
self.fc2 = nn.Linear(fc1_units, fc2_units, bias=use_bias)
self.fc3 = nn.Linear(fc2_units, fc3_units, bias=use_bias)
self.fc4 = nn.Linear(fc3_units, action_size, bias=use_bias)
self.reset_parameters()
def forward(self, observation):
""" map a states to action values
:param observation: shape == (batch, observation_size)
:return: action values
"""
if self.use_batch_norm:
x = F.relu(self.fc1(self.bn1(observation)))
x = F.relu(self.fc2(self.bn2(x)))
x = F.relu(self.fc3(self.bn3(x)))
return torch.tanh(self.fc4(self.bn4(x)))
else:
x = F.relu(self.fc1(observation))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return torch.tanh(self.fc4(x))
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
self.fc4.weight.data.uniform_(-3e-3, 3e-3)
class CriticNetwork(nn.Module):
def __init__(self, observation_size, action_size, use_batch_norm, seed,
fc1_units=128, fc2_units=64, fc3_units=32):
"""
:param observation_size: Dimension of each state
:param action_size: Dimension of each state
:param seed: random seed
:param fc1_units: number of nodes in 1st hidden layer
:param fc2_units: number of nodes in 2nd hidden layer
:param fc3_units: number of nodes in 3rd hidden layer
"""
super(CriticNetwork, self).__init__()
if seed is not None:
torch.manual_seed(seed)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(observation_size + action_size)
self.bn2 = nn.BatchNorm1d(fc1_units)
self.bn3 = nn.BatchNorm1d(fc2_units)
self.bn4 = nn.BatchNorm1d(fc3_units)
# batch norm has bias included, disable linear layer bias
use_bias = not use_batch_norm
self.use_batch_norm = use_batch_norm
self.fc1 = nn.Linear(observation_size + action_size, fc1_units, bias=use_bias)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, fc3_units)
self.fc4 = nn.Linear(fc3_units, 1)
self.reset_parameters()
def forward(self, observation, action):
""" map (observation, actions) pairs to Q-values
:param observation: shape == (batch, observation_size)
:param action: shape == (batch, action_size)
:return: q-values values
"""
x = torch.cat([observation, action], dim=1)
if self.use_batch_norm:
x = F.relu(self.fc1(self.bn1(x)))
x = F.relu(self.fc2(self.bn2(x)))
x = F.relu(self.fc3(self.bn3(x)))
else:
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
self.fc4.weight.data.uniform_(-3e-3, 3e-3)
|
[
"torch.manual_seed",
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.nn.Linear",
"numpy.sqrt"
] |
[((168, 183), 'numpy.sqrt', 'np.sqrt', (['fan_in'], {}), '(fan_in)\n', (175, 183), True, 'import numpy as np\n'), ((1275, 1328), 'torch.nn.Linear', 'nn.Linear', (['observation_size', 'fc1_units'], {'bias': 'use_bias'}), '(observation_size, fc1_units, bias=use_bias)\n', (1284, 1328), True, 'import torch.nn as nn\n'), ((1348, 1394), 'torch.nn.Linear', 'nn.Linear', (['fc1_units', 'fc2_units'], {'bias': 'use_bias'}), '(fc1_units, fc2_units, bias=use_bias)\n', (1357, 1394), True, 'import torch.nn as nn\n'), ((1414, 1460), 'torch.nn.Linear', 'nn.Linear', (['fc2_units', 'fc3_units'], {'bias': 'use_bias'}), '(fc2_units, fc3_units, bias=use_bias)\n', (1423, 1460), True, 'import torch.nn as nn\n'), ((1480, 1528), 'torch.nn.Linear', 'nn.Linear', (['fc3_units', 'action_size'], {'bias': 'use_bias'}), '(fc3_units, action_size, bias=use_bias)\n', (1489, 1528), True, 'import torch.nn as nn\n'), ((3476, 3543), 'torch.nn.Linear', 'nn.Linear', (['(observation_size + action_size)', 'fc1_units'], {'bias': 'use_bias'}), '(observation_size + action_size, fc1_units, bias=use_bias)\n', (3485, 3543), True, 'import torch.nn as nn\n'), ((3563, 3594), 'torch.nn.Linear', 'nn.Linear', (['fc1_units', 'fc2_units'], {}), '(fc1_units, fc2_units)\n', (3572, 3594), True, 'import torch.nn as nn\n'), ((3614, 3645), 'torch.nn.Linear', 'nn.Linear', (['fc2_units', 'fc3_units'], {}), '(fc2_units, fc3_units)\n', (3623, 3645), True, 'import torch.nn as nn\n'), ((3665, 3688), 'torch.nn.Linear', 'nn.Linear', (['fc3_units', '(1)'], {}), '(fc3_units, 1)\n', (3674, 3688), True, 'import torch.nn as nn\n'), ((3997, 4036), 'torch.cat', 'torch.cat', (['[observation, action]'], {'dim': '(1)'}), '([observation, action], dim=1)\n', (4006, 4036), False, 'import torch\n'), ((850, 873), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (867, 873), False, 'import torch\n'), ((925, 957), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['observation_size'], {}), '(observation_size)\n', (939, 957), True, 'import torch.nn as nn\n'), ((981, 1006), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['fc1_units'], {}), '(fc1_units)\n', (995, 1006), True, 'import torch.nn as nn\n'), ((1030, 1055), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['fc2_units'], {}), '(fc2_units)\n', (1044, 1055), True, 'import torch.nn as nn\n'), ((1079, 1104), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['fc3_units'], {}), '(fc3_units)\n', (1093, 1104), True, 'import torch.nn as nn\n'), ((3037, 3060), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3054, 3060), False, 'import torch\n'), ((3112, 3158), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(observation_size + action_size)'], {}), '(observation_size + action_size)\n', (3126, 3158), True, 'import torch.nn as nn\n'), ((3182, 3207), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['fc1_units'], {}), '(fc1_units)\n', (3196, 3207), True, 'import torch.nn as nn\n'), ((3231, 3256), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['fc2_units'], {}), '(fc2_units)\n', (3245, 3256), True, 'import torch.nn as nn\n'), ((3280, 3305), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['fc3_units'], {}), '(fc3_units)\n', (3294, 3305), True, 'import torch.nn as nn\n')]
|
#' % HW19 - BIOE232
#' % <NAME>
#' % May 7, 2015
#' My First Python Script!
# For outputting nice HTML file/PDF
# import pweave
import pprint as pp
# All the essentials
import numpy as np # np.__version__
import matplotlib.pyplot as plt
import math
from decimal import *
from pylab import *
#' Question 1
# Declare varibales
Ka = 5.1E-7
pKa = -math.log10(Ka)
pH = np.linspace(0, 14, num=140)
ratio = 1/(1 + pow(10, (pKa - pH)))
# Print out constants
pp.pprint(['Ka = ', Ka])
pp.pprint(['pKa = ', pKa])
# Plot function
plot(pH, ratio)
title('HW19: Q1 The ratio of C_b / C_total versus pH')
xlabel('pH')
ylabel('Ratio of C_b / C_total')
grid(True)
# show()
#' Question 2
# Declare varibales
pKa1, pKa2 = 6.3, 10.8
pH = np.linspace(0, 14, num=140)
Dratio1 = 1/(1 + pow(10, (pKa1 - pH)))
Dratio2 = 1/(1 + pow(10, (pKa2 - pH)))
# Print out constants
# pp.pprint(['Dratio1 = ', Dratio1])
# pp.pprint(['Dratio2 = ', Dratio2])
# Plot function
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.7])
l1, l2 = ax1.plot(pH, Dratio1, '-*', pH, Dratio2)
fig.legend((l1, l2), ('Ajmalicine', 'Serpentine'), 'upper right')
title('HW19: Q2 The ratio of D_overall / D versus pH')
xlabel('pH')
ylabel('Ratio of D_overall / D')
grid(True)
# plt.show()
#' Question 3
pKa, pHv, pHc = 6.3, 3, 7
conc = (1+pow(10, pKa - pHv)) / (1+pow(10, pKa - pHc))
pp.pprint(['Concentration Ability = ', conc])
|
[
"matplotlib.pyplot.figure",
"math.log10",
"pprint.pprint",
"numpy.linspace"
] |
[((369, 396), 'numpy.linspace', 'np.linspace', (['(0)', '(14)'], {'num': '(140)'}), '(0, 14, num=140)\n', (380, 396), True, 'import numpy as np\n'), ((456, 480), 'pprint.pprint', 'pp.pprint', (["['Ka = ', Ka]"], {}), "(['Ka = ', Ka])\n", (465, 480), True, 'import pprint as pp\n'), ((481, 507), 'pprint.pprint', 'pp.pprint', (["['pKa = ', pKa]"], {}), "(['pKa = ', pKa])\n", (490, 507), True, 'import pprint as pp\n'), ((726, 753), 'numpy.linspace', 'np.linspace', (['(0)', '(14)'], {'num': '(140)'}), '(0, 14, num=140)\n', (737, 753), True, 'import numpy as np\n'), ((952, 964), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (962, 964), True, 'import matplotlib.pyplot as plt\n'), ((1345, 1390), 'pprint.pprint', 'pp.pprint', (["['Concentration Ability = ', conc]"], {}), "(['Concentration Ability = ', conc])\n", (1354, 1390), True, 'import pprint as pp\n'), ((349, 363), 'math.log10', 'math.log10', (['Ka'], {}), '(Ka)\n', (359, 363), False, 'import math\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import acl
import numpy as np
from ..common.const import *
from ..resource.context import create_stream
from ..data.ascendarray import AscendArray
from .util import TensorDesc, OpSet
class Cast():
""" define a Cast operator object to release dtype convert. support translate types:
float -> float16, float16 -> float, uint8 -> float16, float16 -> uint8
uint8 -> float32, float32 -> uint8, float16 -> int32, int32 -> float16
Args:
input : input tensor (AscendArray)
dtype : the converted data type of input.
context: input context, optional
stream : input stream, optional
function:
run : do compute matmul
out : return output result
"""
def __init__(self, input, dtype=np.dtype('float16'), context=None, stream=None):
if not isinstance(input, AscendArray):
raise TypeError(f"Input tensor expects a AscendArray, but got {type(input)}.")
if context and not isinstance(context, int):
raise TypeError(f"Input context expects an int, but got {type(context)}.")
if stream and not isinstance(stream, int):
raise TypeError(f"Input stream expects an int, but got {type(stream)}.")
# assign self value
self.input = input
self.context = context
self.stream = stream if stream else create_stream(context)
self.created = stream is None
OpSet()
# create output array to save result
self.output = AscendArray(input.shape, dtype=dtype, format='ND')
self.tensor_in = TensorDesc(input)
self.tensor_out = TensorDesc(self.output)
# do cast operator
self.run()
def run(self):
""" run op.
Args:
None
Returns:
None
"""
# do op cast
ret = acl.op.cast(self.tensor_in.desc,
self.tensor_in.buff,
self.tensor_out.desc,
self.tensor_out.buff,
0,
self.stream)
if ret != ACL_SUCCESS:
raise ValueError(f"Failed to do op cast, return {ret}.")
# do synchronize stream
ret = acl.rt.synchronize_stream(self.stream)
if ret != ACL_SUCCESS:
raise ValueError(f"Failed to synchronize stream in running blas gemm_ex, return {ret}.")
@property
def data(self):
return self.output
def __del__(self):
if hasattr(self, 'output'):
del self.output
if hasattr(self, 'tensor_out'):
del self.tensor_out
if hasattr(self, 'tensor_in'):
del self.tensor_in
if self.created:
ret = acl.rt.destroy_stream(self.stream)
assert ret == ACL_SUCCESS, f"destroy stream failed, return {ret}."
class ArgMax():
""" define a ArgMax operator.
Args:
input : input tensor (AscendArray)
size : output data size
context: input context, optinal
stream : input stream, optinal
function:
run : do argmax
out : return output result
"""
def __init__(self, input, axis=0, context=None, stream=None):
if not isinstance(input, AscendArray):
raise TypeError(f"Input tensor expects a AscendArray, but got {type(input)}.")
if context and not isinstance(context, int):
raise TypeError(f"Input context expects an int, but got {type(context)}.")
if stream and not isinstance(stream, int):
raise TypeError(f"Input stream expects an int, but got {type(stream)}.")
if axis >= input.ndim:
raise ValueError(f"Input axis should in range [0, {input.ndim}).")
# assign self value
self.input = input
self.context = context
self.stream = stream if stream else create_stream(context)
self.created = stream is None
# set op model dir
OpSet()
self.__pre_set()
# create output array to save result
shape = input.shape[:axis] + input.shape[axis + 1:]
self._dim = AscendArray.clone(np.array(axis, dtype=np.int32))
self._out = AscendArray(shape, dtype=np.int32)
self.tensor_in = TensorDesc(input)
self.tensor_dim = TensorDesc(self._dim)
self.tensor_out = TensorDesc(self._out)
# do cast operator
self.run()
def __pre_set(self):
""" set op name and attribute.
Args:
None
Returns:
None
"""
self.op_name = "ArgMaxV2"
self.op_attr = acl.op.create_attr()
def run(self):
""" run op.
Args:
None
Returns:
None
"""
# do op excute
ret = acl.op.execute(self.op_name,
[self.tensor_in.desc, self.tensor_dim.desc],
[self.tensor_in.buff, self.tensor_dim.buff],
[self.tensor_out.desc],
[self.tensor_out.buff],
self.op_attr,
self.stream)
if ret != ACL_SUCCESS:
raise ValueError(f"Failed to excute op {self.op_name}, return {ret}.")
# do synchronize stream
ret = acl.rt.synchronize_stream(self.stream)
if ret != ACL_SUCCESS:
raise ValueError(f"Failed to synchronize stream in excute op, return {ret}.")
@property
def data(self):
return self._out
def __del__(self):
if hasattr(self, '_out'):
del self._out
if hasattr(self, 'tensor_out'):
del self.tensor_out
if hasattr(self, 'tensor_in'):
del self.tensor_in
if self.created:
ret = acl.rt.destroy_stream(self.stream)
assert ret == ACL_SUCCESS, f"destroy stream failed, return {ret}."
#Transpose
class Transpose():
""" define a Transpose operator object to release Transpose. Permutes the dimensions according to perm.
The returned tensor's dimension i will correspond to the input dimension perm[i]
Args:
input : input tensor (AscendArray)
perm : Permutes the dimensions.
context: input context, optional
stream : input stream, optional
Methods:
run : do permute
out : return output result
"""
def __init__(self, input, perm=[0, 1, 2, 3], context=None, stream=None):
if not isinstance(input, AscendArray):
raise TypeError(f"Input tensor expects a AscendArray, but got {type(input)}.")
if context and not isinstance(context, int):
raise TypeError(f"Input context expects an int, but got {type(context)}.")
if stream and not isinstance(stream, int):
raise TypeError(f"Input stream expects an int, but got {type(stream)}.")
# assign self value
self.context = context
self.stream = stream if stream else create_stream(context)
self.created = stream is None
OpSet()
self.__pre_set()
# create output array to save result
_perm = AscendArray.clone(np.array(perm, dtype='int32'))
self.output = AscendArray(input.shape, dtype=input.dtype, format='ND')
self.tensor_in1 = TensorDesc(input)
self.tensor_in2 = TensorDesc(_perm)
self.tensor_out = TensorDesc(self.output)
# do transpose
self.run()
def __pre_set(self):
""" set op name and attribute.
Args:
None
Returns:
None
"""
self.op_name = "Transpose"
self.op_attr = acl.op.create_attr()
def run(self):
""" run op.
Args:
None
Returns:
None
"""
# do op excute
ret = acl.op.execute(self.op_name,
[self.tensor_in1.desc, self.tensor_in2.desc],
[self.tensor_in1.buff, self.tensor_in2.buff],
[self.tensor_out.desc],
[self.tensor_out.buff],
self.op_attr,
self.stream)
if ret != ACL_SUCCESS:
raise ValueError(f"Failed to excute op {self.op_name}, return {ret}.")
# do synchronize stream
ret = acl.rt.synchronize_stream(self.stream)
if ret != ACL_SUCCESS:
raise ValueError(f"Failed to synchronize stream in excute op, return {ret}.")
@property
def data(self):
return self.output
def __del__(self):
if hasattr(self, 'output'):
del self.output
if hasattr(self, 'tensor_out'):
del self.tensor_out
if hasattr(self, 'tensor_in'):
del self.tensor_in
if self.created:
ret = acl.rt.destroy_stream(self.stream)
assert ret == ACL_SUCCESS, f"destroy stream failed, return {ret}."
# Permute
class Permute():
""" define a Permute operator object to permute the dimensions.
Attributes::
input : input tensor (AscendArray)
axes : Permutes the dimensions.
context: input context, optional
stream : input stream, optional
Methods:
run : do permute
out : return output result
"""
def __init__(self, input, axes=(0, 2, 3, 1), context=None, stream=None):
if not isinstance(input, AscendArray):
raise TypeError(f"Input tensor expects a AscendArray, but got {type(input)}.")
if not isinstance(axes, (tuple, list)):
raise TypeError(f"Input axes expects a tuple or list, but got {type(axes)}.")
if context and not isinstance(context, int):
raise TypeError(f"Input context expects an int, but got {type(context)}.")
if stream and not isinstance(stream, int):
raise TypeError(f"Input stream expects an int, but got {type(stream)}.")
if tuple(axes) not in [(0, 2, 3, 1), (0, 3, 1, 2)]:
raise ValueError(f"Input axis only support (0, 2, 3, 1) or (0, 2, 3, 1).")
# assign self value
self._axes = axes
self.context = context
self.stream = stream if stream else create_stream(context)
self.created = stream is None
OpSet()
self.__pre_set()
# create output array to save result
out_shape = tuple([input.shape[i] for i in axes])
self.output = AscendArray(out_shape, dtype=input.dtype, format='ND')
self.tensor_in = TensorDesc(input)
self.tensor_out = TensorDesc(self.output)
# do transpose
self.run()
def __pre_set(self):
""" set op name and attribute.
Args:
None
Returns:
None
"""
self.op_name = "Permute"
self.op_attr = acl.op.create_attr()
ret = acl.op.set_attr_list_int(self.op_attr, 'order', np.array(self._axes, dtype=np.int64))
if ret != ACL_SUCCESS:
raise ValueError(f"Set attr 'order' failed, return {ret}.")
def run(self):
""" run op.
Args:
None
Returns:
None
"""
# do op excute
ret = acl.op.execute(self.op_name,
[self.tensor_in.desc],
[self.tensor_in.buff],
[self.tensor_out.desc],
[self.tensor_out.buff],
self.op_attr,
self.stream)
if ret != ACL_SUCCESS:
raise ValueError(f"Failed to excute op {self.op_name}, return {ret}.")
# do synchronize stream
ret = acl.rt.synchronize_stream(self.stream)
if ret != ACL_SUCCESS:
raise ValueError(f"Failed to synchronize stream in excute op, return {ret}.")
@property
def data(self):
return self.output
def __del__(self):
if hasattr(self, 'output'):
del self.output
if hasattr(self, 'tensor_out'):
del self.tensor_out
if hasattr(self, 'tensor_in'):
del self.tensor_in
if hasattr(self, 'created') and self.created:
ret = acl.rt.destroy_stream(self.stream)
assert ret == ACL_SUCCESS, f"destroy stream failed, return {ret}."
|
[
"acl.op.cast",
"acl.op.create_attr",
"numpy.dtype",
"numpy.array",
"acl.rt.synchronize_stream",
"acl.rt.destroy_stream",
"acl.op.execute"
] |
[((1443, 1462), 'numpy.dtype', 'np.dtype', (['"""float16"""'], {}), "('float16')\n", (1451, 1462), True, 'import numpy as np\n'), ((2577, 2694), 'acl.op.cast', 'acl.op.cast', (['self.tensor_in.desc', 'self.tensor_in.buff', 'self.tensor_out.desc', 'self.tensor_out.buff', '(0)', 'self.stream'], {}), '(self.tensor_in.desc, self.tensor_in.buff, self.tensor_out.desc,\n self.tensor_out.buff, 0, self.stream)\n', (2588, 2694), False, 'import acl\n'), ((2979, 3017), 'acl.rt.synchronize_stream', 'acl.rt.synchronize_stream', (['self.stream'], {}), '(self.stream)\n', (3004, 3017), False, 'import acl\n'), ((5462, 5482), 'acl.op.create_attr', 'acl.op.create_attr', ([], {}), '()\n', (5480, 5482), False, 'import acl\n'), ((5649, 5852), 'acl.op.execute', 'acl.op.execute', (['self.op_name', '[self.tensor_in.desc, self.tensor_dim.desc]', '[self.tensor_in.buff, self.tensor_dim.buff]', '[self.tensor_out.desc]', '[self.tensor_out.buff]', 'self.op_attr', 'self.stream'], {}), '(self.op_name, [self.tensor_in.desc, self.tensor_dim.desc], [\n self.tensor_in.buff, self.tensor_dim.buff], [self.tensor_out.desc], [\n self.tensor_out.buff], self.op_attr, self.stream)\n', (5663, 5852), False, 'import acl\n'), ((6190, 6228), 'acl.rt.synchronize_stream', 'acl.rt.synchronize_stream', (['self.stream'], {}), '(self.stream)\n', (6215, 6228), False, 'import acl\n'), ((8650, 8670), 'acl.op.create_attr', 'acl.op.create_attr', ([], {}), '()\n', (8668, 8670), False, 'import acl\n'), ((8839, 9043), 'acl.op.execute', 'acl.op.execute', (['self.op_name', '[self.tensor_in1.desc, self.tensor_in2.desc]', '[self.tensor_in1.buff, self.tensor_in2.buff]', '[self.tensor_out.desc]', '[self.tensor_out.buff]', 'self.op_attr', 'self.stream'], {}), '(self.op_name, [self.tensor_in1.desc, self.tensor_in2.desc],\n [self.tensor_in1.buff, self.tensor_in2.buff], [self.tensor_out.desc], [\n self.tensor_out.buff], self.op_attr, self.stream)\n', (8853, 9043), False, 'import acl\n'), ((9382, 9420), 'acl.rt.synchronize_stream', 'acl.rt.synchronize_stream', (['self.stream'], {}), '(self.stream)\n', (9407, 9420), False, 'import acl\n'), ((11998, 12018), 'acl.op.create_attr', 'acl.op.create_attr', ([], {}), '()\n', (12016, 12018), False, 'import acl\n'), ((12393, 12546), 'acl.op.execute', 'acl.op.execute', (['self.op_name', '[self.tensor_in.desc]', '[self.tensor_in.buff]', '[self.tensor_out.desc]', '[self.tensor_out.buff]', 'self.op_attr', 'self.stream'], {}), '(self.op_name, [self.tensor_in.desc], [self.tensor_in.buff],\n [self.tensor_out.desc], [self.tensor_out.buff], self.op_attr, self.stream)\n', (12407, 12546), False, 'import acl\n'), ((12890, 12928), 'acl.rt.synchronize_stream', 'acl.rt.synchronize_stream', (['self.stream'], {}), '(self.stream)\n', (12915, 12928), False, 'import acl\n'), ((3507, 3541), 'acl.rt.destroy_stream', 'acl.rt.destroy_stream', (['self.stream'], {}), '(self.stream)\n', (3528, 3541), False, 'import acl\n'), ((4969, 4999), 'numpy.array', 'np.array', (['axis'], {'dtype': 'np.int32'}), '(axis, dtype=np.int32)\n', (4977, 4999), True, 'import numpy as np\n'), ((6701, 6735), 'acl.rt.destroy_stream', 'acl.rt.destroy_stream', (['self.stream'], {}), '(self.stream)\n', (6722, 6735), False, 'import acl\n'), ((8138, 8167), 'numpy.array', 'np.array', (['perm'], {'dtype': '"""int32"""'}), "(perm, dtype='int32')\n", (8146, 8167), True, 'import numpy as np\n'), ((9899, 9933), 'acl.rt.destroy_stream', 'acl.rt.destroy_stream', (['self.stream'], {}), '(self.stream)\n', (9920, 9933), False, 'import acl\n'), ((12082, 12118), 'numpy.array', 'np.array', (['self._axes'], {'dtype': 'np.int64'}), '(self._axes, dtype=np.int64)\n', (12090, 12118), True, 'import numpy as np\n'), ((13436, 13470), 'acl.rt.destroy_stream', 'acl.rt.destroy_stream', (['self.stream'], {}), '(self.stream)\n', (13457, 13470), False, 'import acl\n')]
|
# all seam carving helper functions
# this file will be used as a custom module
import cv2 as cv
import numpy as np
def draw_vertical_seam(img, seam):
img_seam = img.copy()
# extracting the points from the seam
points = [(index, int(item)) for index, item in enumerate(seam)]
x_coords, y_coords = np.transpose(points)
# drawing the lines on img
img_seam[x_coords, y_coords] = (0, 255, 0)
return img_seam
def draw_horizontal_seam(img, seam):
img_seam = img.copy()
# extracting coordinates
coords = [(y, int(x)) for y, x in enumerate(seam)]
y_coords, x_coords = np.transpose(coords)
# drawing horizontal lines on img
img_seam[x_coords, y_coords] = (0, 255, 0)
return img_seam
def create_energy_matrix(img):
gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# computing x deriative of img
sobel_x = cv.Sobel(gray_img, cv.CV_64F, 1, 0, ksize=3)
# computing y deriative of img
sobel_y = cv.Sobel(gray_img, cv.CV_64F, 0, 1, ksize=3)
sobel_x = cv.convertScaleAbs(sobel_x)
sobel_y = cv.convertScaleAbs(sobel_y)
# energy matrix which is weighted sum of 2 imgs
energy_matrix = cv.addWeighted(sobel_x, .5, sobel_y, .5, 0)
return energy_matrix
def find_vertical_seam(img, energy_matrix):
rows, cols = img.shape[:2]
# filling the seam vector with zeros
# we store indecies of minimum values along x-axis in seam
seam = np.zeros(rows)
# initializing distance and edge marices
distance = np.full((rows, cols), float("inf")) # filling the matrix with "infinity"
distance[0, :] = np.zeros(cols) # filling the first row of distance matrix with zeros
# keeps track of the index (index difference)
# of minimum parent out of last 3 parents at previous row
min_parent = np.zeros((rows, cols))
# using dynamic programming, we calculate distances and
# min parent of each pixel with the help of our energy matrix
for i in range(rows - 1): # loop through all rows except the last one to prevent overflow
for j in range(1, cols - 1): # loop through all columns except the first and last column due to underflow and overflow
# bottom left
if distance[i + 1, j - 1] > distance[i, j] + energy_matrix[i + 1, j - 1]:
distance[i + 1, j - 1] = distance[i, j] + energy_matrix[i + 1, j - 1]
min_parent[i + 1, j - 1] = 1 # we have came from a parent with less energy which is on right side
# bottom center
if distance[i + 1, j] > distance[i, j] + energy_matrix[i + 1, j]:
distance[i + 1, j] = distance[i, j] + energy_matrix[i + 1, j]
min_parent[i + 1, j] = 0 # last parent we have come from is exactly above (same column, above row)
# bottom right
if distance[i + 1, j + 1] > distance[i, j] + energy_matrix[i + 1, j + 1]:
distance[i + 1, j + 1] = distance[i, j] + energy_matrix[i + 1, j + 1]
min_parent[i + 1, j + 1] = -1 # a parent with less energy on left side (left column, above row)
# tracking the path from the last row all the way up to first row
# we detect the minimum parent in each row as we come up and simply
# store its index in seam matrix, starting with minimum value of the last row
seam[rows - 1] = np.argmin(distance[rows - 1, :]) # last element of seam
# note that the first row is not included since we assign previous row each time
for i in range(rows - 1, 0, -1):
seam[i - 1] = seam[i] + min_parent[i, int(seam[i])]
return seam
# same as above method but from left to right
def find_horizontal_seam(img, energy_matrix):
rows, cols = img.shape[:2]
# store indecies of minimum values along y-axis in seam
seam = np.zeros(cols)
distance = np.full((rows, cols), float("inf"))
distance[:, 0] = np.zeros(rows) # filling the first column of distance matrix with zeros
# stores index difference of minimum parent out of 3 in previous column
min_parent = np.zeros((rows, cols))
for j in range(cols - 1): # loop through all columns except the last column due to overflow
for i in range(1, rows - 1): # loop through all rows except the first and last one to prevent both underflow & overflow
# above right
if distance[i - 1, j + 1] > distance[i, j] + energy_matrix[i - 1, j + 1]:
distance[i - 1, j + 1] = distance[i, j] + energy_matrix[i - 1, j + 1]
min_parent[i - 1, j + 1] = 1 # we have came from a parent with less energy which is underneath
# center right
if distance[i, j + 1] > distance[i, j] + energy_matrix[i, j + 1]:
distance[i, j + 1] = distance[i, j] + energy_matrix[i, j + 1]
min_parent[i, j + 1] = 0 # last parent we have come from is in the same row
# bottom right
if distance[i + 1, j + 1] > distance[i, j] + energy_matrix[i + 1, j + 1]:
distance[i + 1, j + 1] = distance[i, j] + energy_matrix[i + 1, j + 1]
min_parent[i + 1, j + 1] = -1 # a parent with less energy which is in the above row
# tracking the path from the last column to the very first one
# we detect the minimum parent in each column as we travel along side x-axis from right to left
# storing its index in seam, starting with the minimum value of the last column
seam[cols - 1] = np.argmin(distance[:, cols - 1]) # last element of seam
# note that the first column is not included since we assign previous column each time
for j in range(cols - 1, 0, -1):
seam[j - 1] = seam[j] + min_parent[int(seam[j]), j]
return seam
# deleting the vertical seam and reducing the width
def remove_vertical_seam(img, seam):
rows, cols = img.shape[:2]
# assign each pixel to its left column to remove the seam
for i in range(rows):
for j in range(int(seam[i]), cols - 1):
img[i, j] = img[i, j + 1]
# crop last column
img = img[:, :cols - 1]
return img
# deleting the horizontal seam and reducing the height
def remove_horizontal_seam(img, seam):
rows, cols = img.shape[:2]
# assign each pixel to its above row element to remove the seam
for j in range(cols):
for i in range(int(seam[j]), rows - 1):
img[i, j] = img[i + 1, j]
# crop last row
img = img[:rows - 1, :]
return img
# expanding the width of the image by increasing the columns
def add_vertical_seam(img, seam):
rows, cols = img.shape[:2]
# adding a total black column to img (widening)
column = np.zeros((rows, 1, 3), dtype=np.uint8) # remember an image is a 3d matrix with 3 chanels
expanded_img = np.hstack((img, column)) # adding the column to the end (a black pixel will be added to each row)
for i in range(rows):
for j in range(cols, int(seam[i]), -1):
expanded_img[i, j] = expanded_img[i, j - 1]
return expanded_img
# expand img horizontally by increasing the height
def add_horizontal_seam(img, seam):
rows, cols = img.shape[:2]
# adding a total black column to img
row = np.zeros((1, cols, 3), dtype=np.uint8)
expanded_img = np.vstack((img, row)) # adding the row to the end (a black pixel will be added to each column)
for j in range(cols):
for i in range(rows, int(seam[j]), -1):
expanded_img[i, j] = expanded_img[i - 1, j]
return expanded_img
if __name__ == "__main__":
img = cv.imread('../assets/test1.jpg')
img = cv.resize(img, None, fx=.5, fy=.5)
print(img.shape)
energy_matrix = create_energy_matrix(img)
seam = find_horizontal_seam(img, energy_matrix)
img = draw_horizontal_seam(img, seam)
img = add_horizontal_seam(img, seam)
print(img.shape)
cv.imshow("", img)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"cv2.resize",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.transpose",
"numpy.zeros",
"numpy.argmin",
"cv2.addWeighted",
"numpy.hstack",
"cv2.imread",
"cv2.convertScaleAbs",
"cv2.imshow",
"cv2.Sobel",
"numpy.vstack"
] |
[((317, 337), 'numpy.transpose', 'np.transpose', (['points'], {}), '(points)\n', (329, 337), True, 'import numpy as np\n'), ((612, 632), 'numpy.transpose', 'np.transpose', (['coords'], {}), '(coords)\n', (624, 632), True, 'import numpy as np\n'), ((787, 822), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (798, 822), True, 'import cv2 as cv\n'), ((873, 917), 'cv2.Sobel', 'cv.Sobel', (['gray_img', 'cv.CV_64F', '(1)', '(0)'], {'ksize': '(3)'}), '(gray_img, cv.CV_64F, 1, 0, ksize=3)\n', (881, 917), True, 'import cv2 as cv\n'), ((967, 1011), 'cv2.Sobel', 'cv.Sobel', (['gray_img', 'cv.CV_64F', '(0)', '(1)'], {'ksize': '(3)'}), '(gray_img, cv.CV_64F, 0, 1, ksize=3)\n', (975, 1011), True, 'import cv2 as cv\n'), ((1027, 1054), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['sobel_x'], {}), '(sobel_x)\n', (1045, 1054), True, 'import cv2 as cv\n'), ((1069, 1096), 'cv2.convertScaleAbs', 'cv.convertScaleAbs', (['sobel_y'], {}), '(sobel_y)\n', (1087, 1096), True, 'import cv2 as cv\n'), ((1170, 1215), 'cv2.addWeighted', 'cv.addWeighted', (['sobel_x', '(0.5)', 'sobel_y', '(0.5)', '(0)'], {}), '(sobel_x, 0.5, sobel_y, 0.5, 0)\n', (1184, 1215), True, 'import cv2 as cv\n'), ((1432, 1446), 'numpy.zeros', 'np.zeros', (['rows'], {}), '(rows)\n', (1440, 1446), True, 'import numpy as np\n'), ((1602, 1616), 'numpy.zeros', 'np.zeros', (['cols'], {}), '(cols)\n', (1610, 1616), True, 'import numpy as np\n'), ((1816, 1838), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (1824, 1838), True, 'import numpy as np\n'), ((3398, 3430), 'numpy.argmin', 'np.argmin', (['distance[rows - 1, :]'], {}), '(distance[rows - 1, :])\n', (3407, 3430), True, 'import numpy as np\n'), ((3861, 3875), 'numpy.zeros', 'np.zeros', (['cols'], {}), '(cols)\n', (3869, 3875), True, 'import numpy as np\n'), ((3949, 3963), 'numpy.zeros', 'np.zeros', (['rows'], {}), '(rows)\n', (3957, 3963), True, 'import numpy as np\n'), ((4119, 4141), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (4127, 4141), True, 'import numpy as np\n'), ((5585, 5617), 'numpy.argmin', 'np.argmin', (['distance[:, cols - 1]'], {}), '(distance[:, cols - 1])\n', (5594, 5617), True, 'import numpy as np\n'), ((6794, 6832), 'numpy.zeros', 'np.zeros', (['(rows, 1, 3)'], {'dtype': 'np.uint8'}), '((rows, 1, 3), dtype=np.uint8)\n', (6802, 6832), True, 'import numpy as np\n'), ((6902, 6926), 'numpy.hstack', 'np.hstack', (['(img, column)'], {}), '((img, column))\n', (6911, 6926), True, 'import numpy as np\n'), ((7336, 7374), 'numpy.zeros', 'np.zeros', (['(1, cols, 3)'], {'dtype': 'np.uint8'}), '((1, cols, 3), dtype=np.uint8)\n', (7344, 7374), True, 'import numpy as np\n'), ((7394, 7415), 'numpy.vstack', 'np.vstack', (['(img, row)'], {}), '((img, row))\n', (7403, 7415), True, 'import numpy as np\n'), ((7689, 7721), 'cv2.imread', 'cv.imread', (['"""../assets/test1.jpg"""'], {}), "('../assets/test1.jpg')\n", (7698, 7721), True, 'import cv2 as cv\n'), ((7732, 7768), 'cv2.resize', 'cv.resize', (['img', 'None'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(img, None, fx=0.5, fy=0.5)\n', (7741, 7768), True, 'import cv2 as cv\n'), ((8004, 8022), 'cv2.imshow', 'cv.imshow', (['""""""', 'img'], {}), "('', img)\n", (8013, 8022), True, 'import cv2 as cv\n'), ((8027, 8040), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (8037, 8040), True, 'import cv2 as cv\n'), ((8045, 8067), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (8065, 8067), True, 'import cv2 as cv\n')]
|
import os
import numpy as np
from skimage import io
dask_available = True
try:
from dask import array as da
except ImportError:
dask_available = False
def imread(filenames, *, use_dask=None, stack=True):
"""Read image files and return an array.
If multiple images are selected, they are stacked along the 0th axis.
Parameters
-------
filenames : list
List of filenames to be opened
use_dask : bool
Whether to use dask to create a lazy array, rather than NumPy.
Default is None, which is interpreted as "use if available". If set
to True and dask is not installed, this function
stack : bool
Whether to stack the images in multiple files into a single array. If
False, a list of arrays will be returned.
Returns
-------
image : array
Array of images
"""
if dask_available and use_dask is None:
use_dask = True
if not dask_available and use_dask:
raise ValueError('Dask array requested but dask is not installed.')
images = [io.imread(filename) for filename in filenames]
if len(images) == 1:
image = images[0]
else:
if use_dask:
image = da.stack(images)
else:
image = np.stack(images)
return image
def magic_read(filenames, *, use_dask=None, stack=True):
"""Dispatch the appropriate reader given some files.
The files are assumed to all have the same type.
Parameters
-------
filenames : list
List of filenames to be opened
use_dask : bool
Whether to use dask to create a lazy array, rather than NumPy.
Default is None, which is interpreted as "use if available". If set
to True and dask is not installed, this function
stack : bool
Whether to stack the images in multiple files into a single array. If
False, a list of arrays will be returned.
Returns
-------
image : array-like
Array or list of images
"""
if len(filenames) == 0:
return None
ext = os.path.splitext(filenames[0])[-1]
if ext == '.zarr':
if not dask_available:
raise ValueError('Dask is required to open zarr files.')
if len(filenames) == 1:
return da.from_zarr(filenames[0])
else:
loaded = [da.from_zarr(f) for f in filenames]
if stack:
return da.stack(loaded)
else:
return loaded
else: # assume proper image extension
return imread(filenames, use_dask=use_dask, stack=stack)
|
[
"numpy.stack",
"dask.array.stack",
"os.path.splitext",
"skimage.io.imread",
"dask.array.from_zarr"
] |
[((1066, 1085), 'skimage.io.imread', 'io.imread', (['filename'], {}), '(filename)\n', (1075, 1085), False, 'from skimage import io\n'), ((2074, 2104), 'os.path.splitext', 'os.path.splitext', (['filenames[0]'], {}), '(filenames[0])\n', (2090, 2104), False, 'import os\n'), ((1215, 1231), 'dask.array.stack', 'da.stack', (['images'], {}), '(images)\n', (1223, 1231), True, 'from dask import array as da\n'), ((1266, 1282), 'numpy.stack', 'np.stack', (['images'], {}), '(images)\n', (1274, 1282), True, 'import numpy as np\n'), ((2283, 2309), 'dask.array.from_zarr', 'da.from_zarr', (['filenames[0]'], {}), '(filenames[0])\n', (2295, 2309), True, 'from dask import array as da\n'), ((2346, 2361), 'dask.array.from_zarr', 'da.from_zarr', (['f'], {}), '(f)\n', (2358, 2361), True, 'from dask import array as da\n'), ((2427, 2443), 'dask.array.stack', 'da.stack', (['loaded'], {}), '(loaded)\n', (2435, 2443), True, 'from dask import array as da\n')]
|
"""
This is a procedural interface to the yttalab library
<EMAIL>
The following commands are provided:
Design and plot commands
dlqr - Discrete linear quadratic regulator
d2c - discrete to continous time conversion
full_obs - full order observer
red_obs - reduced order observer
comp_form - state feedback controller+observer in compact form
comp_form_i - state feedback controller+observer+integ in compact form
set_aw - introduce anti-windup into controller
bb_dcgain - return the steady state value of the step response
placep - Pole placement (replacement for place)
bb_c2d - Continous to discrete conversion
Old functions now corrected in python control
bb_dare - Solve Riccati equation for discrete time systems
"""
from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt
from numpy.linalg import norm
from scipy.linalg import inv, eigvals, logm
import scipy as sp
from matplotlib.pyplot import *
from control import *
def d2c(sys,method='zoh'):
"""Continous to discrete conversion with ZOH method
Call:
sysc=c2d(sys,method='log')
Parameters
----------
sys : System in statespace or Tf form
method: 'zoh' or 'bi'
Returns
-------
sysc: continous system ss or tf
"""
flag = 0
if isinstance(sys, TransferFunction):
sys=tf2ss(sys)
flag=1
a=sys.A
b=sys.B
c=sys.C
d=sys.D
Ts=sys.dt
n=shape(a)[0]
nb=shape(b)[1]
nc=shape(c)[0]
tol=1e-12
if method=='zoh':
if n==1:
if b[0,0]==1:
A=0
B=b/sys.dt
C=c
D=d
else:
tmp1=hstack((a,b))
tmp2=hstack((zeros((nb,n)),eye(nb)))
tmp=vstack((tmp1,tmp2))
s=logm(tmp)
s=s/Ts
if norm(imag(s),ord='inf') > sqrt(sp.finfo(float).eps):
print("Warning: accuracy may be poor")
s=real(s)
A=s[0:n,0:n]
B=s[0:n,n:n+nb]
C=c
D=d
elif method=='foh':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
Id = mat(eye(n))
A = logm(a)/Ts
A = real(around(A,12))
Amat = mat(A)
B = (a-Id)**(-2)*Amat**2*b*Ts
B = real(around(B,12))
Bmat = mat(B)
C = c
D = d - C*(Amat**(-2)/Ts*(a-Id)-Amat**(-1))*Bmat
D = real(around(D,12))
elif method=='bi':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
poles=eigvals(a)
if any(abs(poles-1)<200*sp.finfo(float).eps):
print("d2c: some poles very close to one. May get bad results.")
I=mat(eye(n,n))
tk = 2 / sqrt (Ts)
A = (2/Ts)*(a-I)*inv(a+I)
iab = inv(I+a)*b
B = tk*iab
C = tk*(c*inv(I+a))
D = d- (c*iab)
else:
print("Method not supported")
return
sysc=StateSpace(A,B,C,D)
#print("Teste ", sysc)
if flag==1:
sysc=ss2tf(sysc)
return sysc
|
[
"scipy.linalg.logm",
"scipy.linalg.eigvals",
"numpy.zeros",
"numpy.hstack",
"numpy.shape",
"numpy.around",
"numpy.imag",
"scipy.linalg.inv",
"numpy.real",
"numpy.eye",
"scipy.finfo",
"numpy.mat",
"numpy.vstack",
"numpy.sqrt"
] |
[((1493, 1501), 'numpy.shape', 'shape', (['a'], {}), '(a)\n', (1498, 1501), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((1512, 1520), 'numpy.shape', 'shape', (['b'], {}), '(b)\n', (1517, 1520), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((1531, 1539), 'numpy.shape', 'shape', (['c'], {}), '(c)\n', (1536, 1539), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((1745, 1759), 'numpy.hstack', 'hstack', (['(a, b)'], {}), '((a, b))\n', (1751, 1759), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((1824, 1844), 'numpy.vstack', 'vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (1830, 1844), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((1858, 1867), 'scipy.linalg.logm', 'logm', (['tmp'], {}), '(tmp)\n', (1862, 1867), False, 'from scipy.linalg import inv, eigvals, logm\n'), ((2024, 2031), 'numpy.real', 'real', (['s'], {}), '(s)\n', (2028, 2031), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2151, 2157), 'numpy.mat', 'mat', (['a'], {}), '(a)\n', (2154, 2157), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2168, 2174), 'numpy.mat', 'mat', (['b'], {}), '(b)\n', (2171, 2174), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2185, 2191), 'numpy.mat', 'mat', (['c'], {}), '(c)\n', (2188, 2191), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2202, 2208), 'numpy.mat', 'mat', (['d'], {}), '(d)\n', (2205, 2208), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2303, 2309), 'numpy.mat', 'mat', (['A'], {}), '(A)\n', (2306, 2309), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2394, 2400), 'numpy.mat', 'mat', (['B'], {}), '(B)\n', (2397, 2400), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2226, 2232), 'numpy.eye', 'eye', (['n'], {}), '(n)\n', (2229, 2232), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2246, 2253), 'scipy.linalg.logm', 'logm', (['a'], {}), '(a)\n', (2250, 2253), False, 'from scipy.linalg import inv, eigvals, logm\n'), ((2274, 2287), 'numpy.around', 'around', (['A', '(12)'], {}), '(A, 12)\n', (2280, 2287), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2365, 2378), 'numpy.around', 'around', (['B', '(12)'], {}), '(B, 12)\n', (2371, 2378), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2489, 2502), 'numpy.around', 'around', (['D', '(12)'], {}), '(D, 12)\n', (2495, 2502), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2536, 2542), 'numpy.mat', 'mat', (['a'], {}), '(a)\n', (2539, 2542), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2553, 2559), 'numpy.mat', 'mat', (['b'], {}), '(b)\n', (2556, 2559), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2570, 2576), 'numpy.mat', 'mat', (['c'], {}), '(c)\n', (2573, 2576), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2587, 2593), 'numpy.mat', 'mat', (['d'], {}), '(d)\n', (2590, 2593), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2608, 2618), 'scipy.linalg.eigvals', 'eigvals', (['a'], {}), '(a)\n', (2615, 2618), False, 'from scipy.linalg import inv, eigvals, logm\n'), ((1784, 1798), 'numpy.zeros', 'zeros', (['(nb, n)'], {}), '((nb, n))\n', (1789, 1798), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((1798, 1805), 'numpy.eye', 'eye', (['nb'], {}), '(nb)\n', (1801, 1805), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((1907, 1914), 'numpy.imag', 'imag', (['s'], {}), '(s)\n', (1911, 1914), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2773, 2782), 'numpy.eye', 'eye', (['n', 'n'], {}), '(n, n)\n', (2776, 2782), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2800, 2808), 'numpy.sqrt', 'sqrt', (['Ts'], {}), '(Ts)\n', (2804, 2808), False, 'from numpy import hstack, vstack, imag, zeros, eye, mat, shape, real, around, sqrt\n'), ((2835, 2845), 'scipy.linalg.inv', 'inv', (['(a + I)'], {}), '(a + I)\n', (2838, 2845), False, 'from scipy.linalg import inv, eigvals, logm\n'), ((2858, 2868), 'scipy.linalg.inv', 'inv', (['(I + a)'], {}), '(I + a)\n', (2861, 2868), False, 'from scipy.linalg import inv, eigvals, logm\n'), ((1933, 1948), 'scipy.finfo', 'sp.finfo', (['float'], {}), '(float)\n', (1941, 1948), True, 'import scipy as sp\n'), ((2906, 2916), 'scipy.linalg.inv', 'inv', (['(I + a)'], {}), '(I + a)\n', (2909, 2916), False, 'from scipy.linalg import inv, eigvals, logm\n'), ((2651, 2666), 'scipy.finfo', 'sp.finfo', (['float'], {}), '(float)\n', (2659, 2666), True, 'import scipy as sp\n')]
|
# This is automatically-generated code.
# Uses the jinja2 library for templating.
import cvxpy as cp
import numpy as np
import scipy as sp
# setup
problemID = "max_softmax_epigraph_0"
prob = None
opt_val = None
problemID = problemID + "_epigraph"
# Variable declarations
import scipy.sparse as sps
def normalized_data_matrix(m, n, mu):
if mu == 1:
# dense
A = np.random.randn(m, n)
A /= np.sqrt(np.sum(A**2, 0))
else:
# sparse
A = sps.rand(m, n, mu)
A.data = np.random.randn(A.nnz)
N = A.copy()
N.data = N.data**2
A = A*sps.diags([1 / np.sqrt(np.ravel(N.sum(axis=0)))], [0])
return A
np.random.seed(0)
k = 20 #class
m = 100 #instance
n = 50 #dim
p = 5 #p-largest
X = normalized_data_matrix(m,n,1)
Y = np.random.randint(0, k, m)
# Problem construction
def one_hot(y, k):
m = len(y)
return sps.coo_matrix((np.ones(m), (np.arange(m), y)), shape=(m, k)).todense()
Theta = cp.Variable(n,k)
beta = cp.Variable(1, k)
t = cp.Variable(m)
texp = cp.Variable(m)
f = cp.sum_largest(t+texp, p) + cp.sum_squares(Theta)
C = []
C.append(cp.log_sum_exp(X*Theta + np.ones((m, 1))*beta, axis=1) <= texp)
Yi = one_hot(Y, k)
C.append(t == cp.vstack([-(X[i]*Theta + beta)[Y[i]] for i in range(m)]))
prob = cp.Problem(cp.Minimize(f), C)
# Problem collection
# Single problem collection
problemDict = {
"problemID" : problemID,
"problem" : prob,
"opt_val" : opt_val
}
problems = [problemDict]
# For debugging individual problems:
if __name__ == "__main__":
def printResults(problemID = "", problem = None, opt_val = None):
print(problemID)
problem.solve()
print("\tstatus: {}".format(problem.status))
print("\toptimal value: {}".format(problem.value))
print("\ttrue optimal value: {}".format(opt_val))
printResults(**problems[0])
|
[
"numpy.random.seed",
"numpy.sum",
"numpy.random.randn",
"numpy.ones",
"numpy.random.randint",
"scipy.sparse.rand",
"numpy.arange",
"cvxpy.Variable",
"cvxpy.sum_largest",
"cvxpy.sum_squares",
"cvxpy.Minimize"
] |
[((683, 700), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (697, 700), True, 'import numpy as np\n'), ((805, 831), 'numpy.random.randint', 'np.random.randint', (['(0)', 'k', 'm'], {}), '(0, k, m)\n', (822, 831), True, 'import numpy as np\n'), ((984, 1001), 'cvxpy.Variable', 'cp.Variable', (['n', 'k'], {}), '(n, k)\n', (995, 1001), True, 'import cvxpy as cp\n'), ((1008, 1025), 'cvxpy.Variable', 'cp.Variable', (['(1)', 'k'], {}), '(1, k)\n', (1019, 1025), True, 'import cvxpy as cp\n'), ((1030, 1044), 'cvxpy.Variable', 'cp.Variable', (['m'], {}), '(m)\n', (1041, 1044), True, 'import cvxpy as cp\n'), ((1052, 1066), 'cvxpy.Variable', 'cp.Variable', (['m'], {}), '(m)\n', (1063, 1066), True, 'import cvxpy as cp\n'), ((1072, 1099), 'cvxpy.sum_largest', 'cp.sum_largest', (['(t + texp)', 'p'], {}), '(t + texp, p)\n', (1086, 1099), True, 'import cvxpy as cp\n'), ((1100, 1121), 'cvxpy.sum_squares', 'cp.sum_squares', (['Theta'], {}), '(Theta)\n', (1114, 1121), True, 'import cvxpy as cp\n'), ((1312, 1326), 'cvxpy.Minimize', 'cp.Minimize', (['f'], {}), '(f)\n', (1323, 1326), True, 'import cvxpy as cp\n'), ((393, 414), 'numpy.random.randn', 'np.random.randn', (['m', 'n'], {}), '(m, n)\n', (408, 414), True, 'import numpy as np\n'), ((492, 510), 'scipy.sparse.rand', 'sps.rand', (['m', 'n', 'mu'], {}), '(m, n, mu)\n', (500, 510), True, 'import scipy.sparse as sps\n'), ((528, 550), 'numpy.random.randn', 'np.random.randn', (['A.nnz'], {}), '(A.nnz)\n', (543, 550), True, 'import numpy as np\n'), ((436, 453), 'numpy.sum', 'np.sum', (['(A ** 2)', '(0)'], {}), '(A ** 2, 0)\n', (442, 453), True, 'import numpy as np\n'), ((919, 929), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (926, 929), True, 'import numpy as np\n'), ((1163, 1178), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (1170, 1178), True, 'import numpy as np\n'), ((932, 944), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (941, 944), True, 'import numpy as np\n')]
|
#!/bin/env python
"""
@author: <NAME>
Co-authors: <NAME>, <NAME>
Creates a customizable image sequence for the spectrum of an audio file.
"""
from arguments import args, initArgs, processArgs # Handles arguments
from styles import renderFrame # Handles styles
from audio2numpy import open_audio # Works with several audio formats, including .mp3 (Uses ffmpeg as subroutine)
from time import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
from os import mkdir, path, remove, rmdir
from sys import exit, stdout, stderr
from joblib import Parallel, delayed
from multiprocessing import Manager
import subprocess
VID_CODEC = "mp4v"
VID_EXT = ".mp4"
"""
Loads audio file.
"""
def loadAudio():
if args.test:
fileData = np.load("testData.npy")
samplerate = 44100
return fileData, samplerate
else:
if not path.isfile(args.filename):
exit("Path to file does not exist.")
else:
fileData, samplerate = open_audio(args.filename)
return fileData, samplerate
"""
Processes data from <FILENAME> and assigns data to its respective channels frame.
"""
def calculateFrameData(fileData, samplerate):
# Chooses what channels to be calculated
channels = []
if len(fileData.shape) > 1: # Converts multiple channels to single channel
if args.channel == "average":
channels.append(np.mean(fileData, axis=1))
elif args.channel == "left":
channels.append(fileData[:,0])
elif args.channel == "right":
channels.append(fileData[:,1])
else: # Adds all channels (Stereo, Surround)
for i in range(fileData.shape[1]):
channels.append(fileData[:,i])
else: # Adds mono channel
channels.append(fileData)
frameData = []
for channel in channels:
# Slices channelData to start and end point
channelData = channel[int(args.start*samplerate):int(args.end*samplerate)]
# Splits data into frames
channelFrameData = []
stepSize = samplerate/args.framerate
for i in range(int(np.ceil(len(channelData)/stepSize))):
frameDataMidpoint = stepSize * i + (stepSize/2)
frameDataStart = int(frameDataMidpoint - (args.duration/1000/2)*samplerate)
frameDataEnd = int(frameDataMidpoint + (args.duration/1000/2)*samplerate)
if frameDataStart < 0: # Leftbound data
emptyFrame = np.zeros(int(args.duration/1000 * samplerate))
currentFrameData = channelData[0:frameDataEnd]
emptyFrame[0:len(currentFrameData)] = currentFrameData
currentFrameData = emptyFrame
elif frameDataEnd > len(channelData): # Rightbound data
emptyFrame = np.zeros(int(args.duration/1000 * samplerate))
currentFrameData = channelData[frameDataStart:]
emptyFrame[0:len(currentFrameData)] = currentFrameData
currentFrameData = emptyFrame
else: # Inbound data
currentFrameData = channelData[int(frameDataStart):int(frameDataEnd)]
# Fourier Transformation (Amplitudes)
frameDataAmplitudes = abs(np.fft.rfft(currentFrameData))
# Slices frameDataAmplitudes to only contain the amplitudes between startFrequency and endFrequency
frameDataAmplitudes = frameDataAmplitudes[int(args.frequencyStart/(samplerate/2)*len(frameDataAmplitudes)):int(args.frequencyEnd/(samplerate/2)*len(frameDataAmplitudes))]
channelFrameData.append(frameDataAmplitudes)
#frameData.append(channelFrameData)
frameData.append(channelFrameData)
return frameData
"""
Creates the bins for every channels frame. A bin contains an amplitude that will later be represented as the height of a bar, point, line, etc. on the frame.
"""
def createBins(frameData):
bins = []
for channel in frameData:
channelBins = []
for data in channel:
frameBins = []
for i in range(args.bins):
if args.xlog == 0:
dataStart = int(i*len(data)/args.bins)
dataEnd = int((i+1)*len(data)/args.bins)
else:
dataStart = int((i/args.bins)**args.xlog * len(data))
dataEnd = int(((i+1)/args.bins)**args.xlog * len(data))
if dataEnd == dataStart:
dataEnd += 1 # Ensures [dataStart:dataEnd] does not result NaN
frameBins.append(np.mean(data[dataStart:dataEnd]))
channelBins.append(frameBins)
bins.append(channelBins)
return bins
"""
Smoothes the bins in a frame (Over the past/next n frames).
"""
def smoothBinData(bins):
binsSmoothed = []
for channel in bins:
channelBinsSmoothed = []
for frameBinData in channel:
smoothedBinData = []
for i in range(len(frameBinData)):
if i < args.smoothY: # First n bins
smoothedBinData.append(np.mean(frameBinData[:i+args.smoothY+1]))
elif i >= len(frameBinData)-args.smoothY: # Last n bins
smoothedBinData.append(np.mean(frameBinData[i-args.smoothY:]))
else: # Normal Case
smoothedBinData.append(np.mean(frameBinData[i-args.smoothY:i+args.smoothY+1]))
channelBinsSmoothed.append(smoothedBinData)
binsSmoothed.append(channelBinsSmoothed)
return binsSmoothed
"""
Creates directory named <args.destination>
Renders frames from bin data and exports them directly to <args.processes> partial videos
If args.imageSequence is set, instead exports frames as images into the directory
Starts at "0.png" for first frame.
"""
def renderSaveFrames(bins):
bins = bins/np.max(bins) # Normalize vector length to [0,1]
if args.ylog != 0:
div = np.log2(args.ylog + 1) # Constant for y-scaling
bins = np.log2(args.ylog * np.array(bins) + 1)/div # Y-scaling
numChunks = int(np.ceil(bins.shape[1]/(args.processes * args.chunkSize))) * args.processes # Total number of chunks (expanded to be a multiple of args.processes)
shMem = Manager().dict()
shMem['framecount'] = 0
Parallel(n_jobs=args.processes)(delayed(renderSavePartial)(j, numChunks, bins, shMem) for j in range(args.processes))
printProgressBar(bins.shape[1], bins.shape[1])
print() # New line after progress bar
"""
Renders and saves one process' share of frames in chunks
"""
def renderSavePartial(partialCounter, numChunks, bins, shMem):
if args.imageSequence:
vid = None
else:
fourcc = cv2.VideoWriter_fourcc(*VID_CODEC)
dest = args.destination+"/part"+str(partialCounter)+VID_EXT
vid = cv2.VideoWriter(dest, fourcc, args.framerate, (args.width, args.height))
chunksPerProcess = int(numChunks/args.processes)
for i in range(chunksPerProcess):
chunkCounter = partialCounter*chunksPerProcess + i
renderSaveChunk(chunkCounter, numChunks, bins, vid, shMem)
if not args.imageSequence:
vid.release()
"""
Renders and exports one chunk worth of frames
"""
def renderSaveChunk(chunkCounter, numChunks, bins, vid, shMem):
chunksPerProcess = int(numChunks/args.processes)
finishedChunkSets = int(chunkCounter/chunksPerProcess)
framesPerProcess = int(bins.shape[1]/args.processes)
currentChunkNumInNewSet = chunkCounter - finishedChunkSets * chunksPerProcess
start = finishedChunkSets * framesPerProcess + currentChunkNumInNewSet * args.chunkSize
end = start + args.chunkSize
if chunkCounter % chunksPerProcess == chunksPerProcess - 1:
completeChunkSets = int(numChunks/args.processes) - 1
fullSetChunks = completeChunkSets * args.processes
fullSetFrames = fullSetChunks * args.chunkSize
remainingFrames = bins.shape[1] - fullSetFrames
remainderChunkSize = int(remainingFrames/args.processes)
end = start + remainderChunkSize
frames = renderChunkFrames(bins, start, end)
if args.test:
plt.imsave("testFrame.png", frames[0], vmin=0, vmax=255, cmap='gray')
else:
for i in range(len(frames)):
if args.imageSequence:
plt.imsave(str(args.destination) + "/" + str(start + i) + ".png", frames[i], vmin=0, vmax=255, cmap='gray')
else:
vid.write(frames[i])
shMem['framecount'] += 1
printProgressBar(shMem['framecount'], bins.shape[1])
"""
Renders one chunk of frames
"""
def renderChunkFrames(bins, start, end):
frames = []
for j in range(start, end):
frame = renderFrame(args, bins, j)
frames.append(frame)
return frames
"""
Progress Bar (Modified from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console)
"""
def printProgressBar (iteration, total, prefix = "Progress:", suffix = "Complete", decimals = 2, length = 50, fill = '█', printEnd = "\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% ({iteration}/{total}) {suffix}', end = printEnd)
"""
Concatenates partial videos to full video and overlays audio.
Returns ffmpeg's exit status (0 on success).
"""
def createVideo():
with open(args.destination+"/vidList", "x") as vidList:
for i in range(args.processes):
vidList.write("file 'part"+ str(i) + VID_EXT +"'\n")
arguments = [
'ffmpeg',
'-hide_banner',
'-loglevel', 'error',
'-stats',
'-f', 'concat',
'-safe',
'0',
'-i',
args.destination+"/vidList",
]
if args.start != 0:
arguments += ['-ss', str(args.start)]
arguments += ['-i', args.filename]
if args.end != -1:
arguments += ['-t', str(args.end - args.start)]
arguments += [
'-c:v', 'libx264',
'-preset', 'ultrafast',
'-crf', '16',
'-pix_fmt', 'yuv420p',
'-c', 'copy',
'-y', args.destination+VID_EXT
]
proc = subprocess.Popen(
arguments,
stdout=stdout,
stderr=stderr,
)
return proc.wait()
def cleanupFiles(directoryExisted):
remove(args.destination+"/vidList")
for i in range(args.processes):
remove(args.destination+"/part"+str(i)+VID_EXT)
if not directoryExisted:
try:
rmdir(args.destination)
except OSError as error:
print(error)
print("Directory '{}' can not be removed".format(args.destination))
"""
Main method. Initializes the complete process from start to finish.
"""
if __name__ == '__main__':
args = initArgs() # Arguments as global variables
startTime = time()
maxSteps = 5
if args.imageSequence:
maxSteps = 4
# Create destination folder
directoryExisted = False
if not path.exists(args.destination) and not args.test:
mkdir(args.destination)
else:
directoryExisted = True
print("Loading audio. (1/{})".format(maxSteps))
fileData, samplerate = loadAudio()
processArgs(args, fileData, samplerate)
print("Creating frame data. (2/{})".format(maxSteps))
frameData = calculateFrameData(fileData, samplerate)
del fileData, samplerate
print("Creating bins. (3/{})".format(maxSteps))
bins = createBins(frameData)
if args.smoothY > 0:
bins = smoothBinData(bins)
del frameData
if args.imageSequence:
print("Creating and saving image sequence. (4/{})".format(maxSteps))
else:
print("Creating and saving partial videos. (4/{})".format(maxSteps))
renderSaveFrames(bins)
del bins
if not args.imageSequence:
print("Concatenating to full video and overlaying audio. (5/{})".format(maxSteps))
if createVideo() != 0:
exit("ffmpeg exited with a failure.")
processTime = time() - startTime
print("Completed successfully in " + str(format(processTime, ".3f")) + " seconds.")
if not args.imageSequence:
print("Cleaning up files.")
cleanupFiles(directoryExisted)
print("Finished!")
|
[
"os.mkdir",
"os.remove",
"numpy.load",
"numpy.fft.rfft",
"cv2.VideoWriter_fourcc",
"os.path.isfile",
"numpy.mean",
"matplotlib.pyplot.imsave",
"cv2.VideoWriter",
"arguments.initArgs",
"styles.renderFrame",
"os.path.exists",
"numpy.max",
"subprocess.Popen",
"numpy.ceil",
"numpy.log2",
"arguments.processArgs",
"os.rmdir",
"audio2numpy.open_audio",
"sys.exit",
"multiprocessing.Manager",
"time.time",
"numpy.array",
"joblib.Parallel",
"joblib.delayed"
] |
[((9213, 9270), 'subprocess.Popen', 'subprocess.Popen', (['arguments'], {'stdout': 'stdout', 'stderr': 'stderr'}), '(arguments, stdout=stdout, stderr=stderr)\n', (9229, 9270), False, 'import subprocess\n'), ((9340, 9377), 'os.remove', 'remove', (["(args.destination + '/vidList')"], {}), "(args.destination + '/vidList')\n", (9346, 9377), False, 'from os import mkdir, path, remove, rmdir\n'), ((9747, 9757), 'arguments.initArgs', 'initArgs', ([], {}), '()\n', (9755, 9757), False, 'from arguments import args, initArgs, processArgs\n'), ((9812, 9818), 'time.time', 'time', ([], {}), '()\n', (9816, 9818), False, 'from time import time\n'), ((10133, 10172), 'arguments.processArgs', 'processArgs', (['args', 'fileData', 'samplerate'], {}), '(args, fileData, samplerate)\n', (10144, 10172), False, 'from arguments import args, initArgs, processArgs\n'), ((750, 773), 'numpy.load', 'np.load', (['"""testData.npy"""'], {}), "('testData.npy')\n", (757, 773), True, 'import numpy as np\n'), ((5193, 5205), 'numpy.max', 'np.max', (['bins'], {}), '(bins)\n', (5199, 5205), True, 'import numpy as np\n'), ((5276, 5298), 'numpy.log2', 'np.log2', (['(args.ylog + 1)'], {}), '(args.ylog + 1)\n', (5283, 5298), True, 'import numpy as np\n'), ((5612, 5643), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'args.processes'}), '(n_jobs=args.processes)\n', (5620, 5643), False, 'from joblib import Parallel, delayed\n'), ((6013, 6047), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['*VID_CODEC'], {}), '(*VID_CODEC)\n', (6035, 6047), False, 'import cv2\n'), ((6118, 6190), 'cv2.VideoWriter', 'cv2.VideoWriter', (['dest', 'fourcc', 'args.framerate', '(args.width, args.height)'], {}), '(dest, fourcc, args.framerate, (args.width, args.height))\n', (6133, 6190), False, 'import cv2\n'), ((7341, 7410), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""testFrame.png"""', 'frames[0]'], {'vmin': '(0)', 'vmax': '(255)', 'cmap': '"""gray"""'}), "('testFrame.png', frames[0], vmin=0, vmax=255, cmap='gray')\n", (7351, 7410), True, 'import matplotlib.pyplot as plt\n'), ((7835, 7861), 'styles.renderFrame', 'renderFrame', (['args', 'bins', 'j'], {}), '(args, bins, j)\n', (7846, 7861), False, 'from styles import renderFrame\n'), ((9988, 10011), 'os.mkdir', 'mkdir', (['args.destination'], {}), '(args.destination)\n', (9993, 10011), False, 'from os import mkdir, path, remove, rmdir\n'), ((10860, 10866), 'time.time', 'time', ([], {}), '()\n', (10864, 10866), False, 'from time import time\n'), ((841, 867), 'os.path.isfile', 'path.isfile', (['args.filename'], {}), '(args.filename)\n', (852, 867), False, 'from os import mkdir, path, remove, rmdir\n'), ((872, 908), 'sys.exit', 'exit', (['"""Path to file does not exist."""'], {}), "('Path to file does not exist.')\n", (876, 908), False, 'from sys import exit, stdout, stderr\n'), ((943, 968), 'audio2numpy.open_audio', 'open_audio', (['args.filename'], {}), '(args.filename)\n', (953, 968), False, 'from audio2numpy import open_audio\n'), ((5412, 5470), 'numpy.ceil', 'np.ceil', (['(bins.shape[1] / (args.processes * args.chunkSize))'], {}), '(bins.shape[1] / (args.processes * args.chunkSize))\n', (5419, 5470), True, 'import numpy as np\n'), ((5569, 5578), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (5576, 5578), False, 'from multiprocessing import Manager\n'), ((9496, 9519), 'os.rmdir', 'rmdir', (['args.destination'], {}), '(args.destination)\n', (9501, 9519), False, 'from os import mkdir, path, remove, rmdir\n'), ((9937, 9966), 'os.path.exists', 'path.exists', (['args.destination'], {}), '(args.destination)\n', (9948, 9966), False, 'from os import mkdir, path, remove, rmdir\n'), ((10805, 10842), 'sys.exit', 'exit', (['"""ffmpeg exited with a failure."""'], {}), "('ffmpeg exited with a failure.')\n", (10809, 10842), False, 'from sys import exit, stdout, stderr\n'), ((1328, 1353), 'numpy.mean', 'np.mean', (['fileData'], {'axis': '(1)'}), '(fileData, axis=1)\n', (1335, 1353), True, 'import numpy as np\n'), ((2911, 2940), 'numpy.fft.rfft', 'np.fft.rfft', (['currentFrameData'], {}), '(currentFrameData)\n', (2922, 2940), True, 'import numpy as np\n'), ((5644, 5670), 'joblib.delayed', 'delayed', (['renderSavePartial'], {}), '(renderSavePartial)\n', (5651, 5670), False, 'from joblib import Parallel, delayed\n'), ((4053, 4085), 'numpy.mean', 'np.mean', (['data[dataStart:dataEnd]'], {}), '(data[dataStart:dataEnd])\n', (4060, 4085), True, 'import numpy as np\n'), ((4492, 4536), 'numpy.mean', 'np.mean', (['frameBinData[:i + args.smoothY + 1]'], {}), '(frameBinData[:i + args.smoothY + 1])\n', (4499, 4536), True, 'import numpy as np\n'), ((5358, 5372), 'numpy.array', 'np.array', (['bins'], {}), '(bins)\n', (5366, 5372), True, 'import numpy as np\n'), ((4622, 4662), 'numpy.mean', 'np.mean', (['frameBinData[i - args.smoothY:]'], {}), '(frameBinData[i - args.smoothY:])\n', (4629, 4662), True, 'import numpy as np\n'), ((4723, 4783), 'numpy.mean', 'np.mean', (['frameBinData[i - args.smoothY:i + args.smoothY + 1]'], {}), '(frameBinData[i - args.smoothY:i + args.smoothY + 1])\n', (4730, 4783), True, 'import numpy as np\n')]
|
from __future__ import print_function
from decimal import Decimal
from numpy import round, subtract
import matplotlib.pyplot as plt
class AStarGraph(object):
# Define a class board like grid with two barriers
def __init__(self, walls):
self.barriers = []
for wall in walls:
self.barriers.append(wall)
def heuristic(self, start, goal):
# Use Chebyshev distance heuristic if we can move one square either
# adjacent or diagonal
D = 1
dx = abs(start[0] - goal[0])
dy = abs(start[1] - goal[1])
return D * (dx + dy)
def get_vertex_neighbours(self, pos):
n = []
# Moves allow link a chess king
# for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (-1, 1), (1, -1), (-1, -1)]:
for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
x2 = round(pos[0] + dx, 2)
y2 = round(pos[1] + dy, 2)
n.append((x2, y2))
return n
def move_cost(self, a, b):
b = (round(b[0], 2), round(b[1], 2))
for barrier in self.barriers:
if b in barrier:
return 100000000000000 # Extremely high cost to enter barrier squares
return 1 # Normal movement cost
def setPath(self, path):
self.path = path
def setJumpValues(self,jumpValues):
self.jumpValues = jumpValues
def setCost(self, cost):
self.cost = cost
def getCost(self):
return self.cost
def getJumpValues(self):
return self.jumpValues
def getPath(self):
return self.path
def AStarSearch(start, end, graph, display=False):
startIsIntegers = all(isinstance(n, int) for n in start)
endIsIntegers = all(isinstance(n, int) for n in end)
if not startIsIntegers or not endIsIntegers:
raise Exception('The start position AND goal must be integer values')
G = {} # Actual movement cost to each position from the start position
F = {} # Estimated movement cost of start to end going via this position
# Initialize starting values
G[start] = 0
F[start] = graph.heuristic(start, end)
closedVertices = set()
openVertices = set([start])
cameFrom = {}
while len(openVertices) > 0:
# Get the vertex in the open list with the lowest F score
current = None
currentFscore = None
for pos in openVertices:
if current is None or F[pos] < currentFscore:
currentFscore = F[pos]
current = pos
# Check if we have reached the goal
if round(current[0],2) == round(end[0],2) and round(current[1],2) == round(end[1],2):
# Retrace our route backward
path = [current]
while current in cameFrom:
current = cameFrom[current]
path.append(current)
path.reverse()
prevTarget = path[0]
iterResult = iter(path)
next(iterResult)
state = 0
combinedResults = list()
delta = 0
for target in iterResult:
if target[0] - prevTarget[0] == 0 and state is 0:
state = 0
delta += target[1] - prevTarget[1]
elif target[0] - prevTarget[0] == 0 and state is 1:
state = 0
combinedResults.append((delta, 0))
delta = 0
delta += target[1] - prevTarget[1]
elif target[1] - prevTarget[1] == 0 and state is 1:
state = 1
delta += target[0] - prevTarget[0]
elif target[1] - prevTarget[1] == 0 and state is 0:
state = 1
combinedResults.append((0, delta))
delta = 0
delta += target[0] - prevTarget[0]
prevTarget = target
if state is 0:
combinedResults.append((0, delta))
elif state is 1:
combinedResults.append((delta, 0))
if display:
plt.plot([v[0] for v in path], [v[1] for v in path])
for barrier in graph.barriers:
plt.plot([v[0] for v in barrier], [v[1] for v in barrier])
plt.show()
graph.setCost(F[end])
graph.setJumpValues(combinedResults)
graph.setPath(path)
return # Done!
# Mark the current vertex as closed
openVertices.remove(current)
closedVertices.add(current)
# Update scores for vertices near the current position
for neighbour in graph.get_vertex_neighbours(current):
if neighbour in closedVertices:
continue # We have already processed this node exhaustively
candidateG = G[current] + graph.move_cost(current, neighbour)
if neighbour not in openVertices:
openVertices.add(neighbour) # Discovered a new vertex
elif candidateG >= G[neighbour]:
continue # This G score is worse than previously found
# Adopt this G score
cameFrom[neighbour] = current
G[neighbour] = candidateG
H = graph.heuristic(neighbour, end)
F[neighbour] = G[neighbour] + H
raise RuntimeError("A* failed to find a solution")
if __name__ == "__main__":
graph = AStarGraph([ [(2, 5), (1, 5), (0, 5)],
[(4, 3), (4, 4), (4, 5)],
[(2, 4), (2, 5), (2, 6),
(3, 6), (4, 6), (5, 6),
(5, 5), (5, 4), (5, 3), (5, 2),
(4, 2), (3, 2)]
])
start = (2,2)
end = (2,7)
AStarSearch(start, end, graph, True)
print("route", graph.getJumpValues())
print("coordiantes ", graph.getPath())
print("cost", graph.getCost())
|
[
"numpy.round",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] |
[((873, 894), 'numpy.round', 'round', (['(pos[0] + dx)', '(2)'], {}), '(pos[0] + dx, 2)\n', (878, 894), False, 'from numpy import round, subtract\n'), ((912, 933), 'numpy.round', 'round', (['(pos[1] + dy)', '(2)'], {}), '(pos[1] + dy, 2)\n', (917, 933), False, 'from numpy import round, subtract\n'), ((1027, 1041), 'numpy.round', 'round', (['b[0]', '(2)'], {}), '(b[0], 2)\n', (1032, 1041), False, 'from numpy import round, subtract\n'), ((1043, 1057), 'numpy.round', 'round', (['b[1]', '(2)'], {}), '(b[1], 2)\n', (1048, 1057), False, 'from numpy import round, subtract\n'), ((2594, 2614), 'numpy.round', 'round', (['current[0]', '(2)'], {}), '(current[0], 2)\n', (2599, 2614), False, 'from numpy import round, subtract\n'), ((2617, 2633), 'numpy.round', 'round', (['end[0]', '(2)'], {}), '(end[0], 2)\n', (2622, 2633), False, 'from numpy import round, subtract\n'), ((2637, 2657), 'numpy.round', 'round', (['current[1]', '(2)'], {}), '(current[1], 2)\n', (2642, 2657), False, 'from numpy import round, subtract\n'), ((2660, 2676), 'numpy.round', 'round', (['end[1]', '(2)'], {}), '(end[1], 2)\n', (2665, 2676), False, 'from numpy import round, subtract\n'), ((4129, 4181), 'matplotlib.pyplot.plot', 'plt.plot', (['[v[0] for v in path]', '[v[1] for v in path]'], {}), '([v[0] for v in path], [v[1] for v in path])\n', (4137, 4181), True, 'import matplotlib.pyplot as plt\n'), ((4324, 4334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4332, 4334), True, 'import matplotlib.pyplot as plt\n'), ((4249, 4307), 'matplotlib.pyplot.plot', 'plt.plot', (['[v[0] for v in barrier]', '[v[1] for v in barrier]'], {}), '([v[0] for v in barrier], [v[1] for v in barrier])\n', (4257, 4307), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 16:29:33 2019
@author: jlee
"""
import time
start_time = time.time()
import numpy as np
import glob, os
import g0_init_cfg as ic
# ----- Line number (to be revised!) ----- #
pk_line = 1400
'''
Line for finding peaks (gfreduce)
Line/column for finding apertures (gfextract)
'''
# ---------------------------------------- #
# ----- Importing IRAF from the root directory ----- #
current_dir = os.getcwd()
os.chdir(ic.dir_iraf)
from pyraf import iraf
from pyraf.iraf import gemini, gmos
os.chdir(current_dir)
iraf.chdir(current_dir)
iraf.unlearn('gfscatsub')
iraf.unlearn('gfreduce')
iraf.unlearn('gfdisplay')
iraf.unlearn('gfresponse')
# ---------- Reduce the lamp flat ---------- #
flat = np.loadtxt(ic.lst_flat, dtype=str)
if (flat.size > 1):
raise ValueError("Please check if there is only one flat image for the standard star.")
flat0 = flat.item(0)
arc = np.loadtxt(ic.lst_arc, dtype=str)
if (arc.size > 1):
raise ValueError("Please check if there is only one arc image for the standard star.")
arc0 = arc.item(0)
# Model and remove the light
iraf.imdelete('brg@'+ic.lst_flat)
# os.system('ds9 &')
# iraf.sleep(5.0)
blkmsk = np.loadtxt("blkmask_name.txt", dtype=str).item(0)
blkmsk0 = blkmsk
iraf.gfscatsub('rg'+flat0, blkmsk0, outimage='', prefix='b',
xorder='3,3,3,3,3,3,3,3,3,3,3,3',
yorder='3,3,3,3,3,3,3,3,3,3,3,3',
cross='yes', fl_inter='no')
# os.system('ds9 &')
# iraf.sleep(5.0)
# for flat in iraf.type(ic.lst_flat, Stdout=1):
# flat = flat.strip()
# for i in np.arange(12):
# iraf.imexamine('brg'+flat+'[sci,'+str(i+1)+']', 1)
# QE correction and extract
iraf.imdelete('qbrg@'+ic.lst_flat)
iraf.imdelete('eqbrg@'+ic.lst_flat)
iraf.gfreduce('brg@'+ic.lst_flat, recenter='no', reference='erg'+flat0,
fl_extract='yes', fl_qecorr='yes', qe_refim='erg'+arc0,
fl_addmdf='no', fl_bias='no', fl_over='no', fl_trim='no',
mdffile=ic.nmdf, mdfdir='./',
slits=ic.cslit, line=pk_line, fl_fluxcal='no', fl_gscrrej='no',
fl_wavtran='no', fl_skysub='no', fl_inter='no', fl_vardq='yes')
if (ic.nslit == 1):
vkw = '1'
if (ic.nslit == 2):
vkw = '*'
os.system('ds9 &')
iraf.sleep(5.0)
iraf.gfdisplay('eqbrg'+flat0, 1, version=vkw)
# ---------- Response function ---------- #
iraf.imdelete(flat0+'_resp')
iraf.gfresponse('eqbrg'+flat0, outimage=flat0+'_resp', sky='',
order=45, func='spline3', sample='*',
fl_fit='yes', fl_inter='no')
# os.system('ds9 &')
# iraf.sleep(5.0)
iraf.gfdisplay(flat0+'_resp', 1, version=vkw)
# Printing the running time
print('--- %.4f seconds ---' %(time.time()-start_time))
|
[
"pyraf.iraf.gfdisplay",
"pyraf.iraf.sleep",
"pyraf.iraf.gfresponse",
"os.getcwd",
"os.system",
"time.time",
"numpy.loadtxt",
"pyraf.iraf.gfreduce",
"pyraf.iraf.unlearn",
"pyraf.iraf.gfscatsub",
"pyraf.iraf.chdir",
"os.chdir",
"pyraf.iraf.imdelete"
] |
[((133, 144), 'time.time', 'time.time', ([], {}), '()\n', (142, 144), False, 'import time\n'), ((472, 483), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (481, 483), False, 'import glob, os\n'), ((484, 505), 'os.chdir', 'os.chdir', (['ic.dir_iraf'], {}), '(ic.dir_iraf)\n', (492, 505), False, 'import glob, os\n'), ((567, 588), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (575, 588), False, 'import glob, os\n'), ((589, 612), 'pyraf.iraf.chdir', 'iraf.chdir', (['current_dir'], {}), '(current_dir)\n', (599, 612), False, 'from pyraf import iraf\n'), ((614, 639), 'pyraf.iraf.unlearn', 'iraf.unlearn', (['"""gfscatsub"""'], {}), "('gfscatsub')\n", (626, 639), False, 'from pyraf import iraf\n'), ((640, 664), 'pyraf.iraf.unlearn', 'iraf.unlearn', (['"""gfreduce"""'], {}), "('gfreduce')\n", (652, 664), False, 'from pyraf import iraf\n'), ((665, 690), 'pyraf.iraf.unlearn', 'iraf.unlearn', (['"""gfdisplay"""'], {}), "('gfdisplay')\n", (677, 690), False, 'from pyraf import iraf\n'), ((691, 717), 'pyraf.iraf.unlearn', 'iraf.unlearn', (['"""gfresponse"""'], {}), "('gfresponse')\n", (703, 717), False, 'from pyraf import iraf\n'), ((774, 808), 'numpy.loadtxt', 'np.loadtxt', (['ic.lst_flat'], {'dtype': 'str'}), '(ic.lst_flat, dtype=str)\n', (784, 808), True, 'import numpy as np\n'), ((949, 982), 'numpy.loadtxt', 'np.loadtxt', (['ic.lst_arc'], {'dtype': 'str'}), '(ic.lst_arc, dtype=str)\n', (959, 982), True, 'import numpy as np\n'), ((1142, 1177), 'pyraf.iraf.imdelete', 'iraf.imdelete', (["('brg@' + ic.lst_flat)"], {}), "('brg@' + ic.lst_flat)\n", (1155, 1177), False, 'from pyraf import iraf\n'), ((1292, 1460), 'pyraf.iraf.gfscatsub', 'iraf.gfscatsub', (["('rg' + flat0)", 'blkmsk0'], {'outimage': '""""""', 'prefix': '"""b"""', 'xorder': '"""3,3,3,3,3,3,3,3,3,3,3,3"""', 'yorder': '"""3,3,3,3,3,3,3,3,3,3,3,3"""', 'cross': '"""yes"""', 'fl_inter': '"""no"""'}), "('rg' + flat0, blkmsk0, outimage='', prefix='b', xorder=\n '3,3,3,3,3,3,3,3,3,3,3,3', yorder='3,3,3,3,3,3,3,3,3,3,3,3', cross=\n 'yes', fl_inter='no')\n", (1306, 1460), False, 'from pyraf import iraf\n'), ((1730, 1766), 'pyraf.iraf.imdelete', 'iraf.imdelete', (["('qbrg@' + ic.lst_flat)"], {}), "('qbrg@' + ic.lst_flat)\n", (1743, 1766), False, 'from pyraf import iraf\n'), ((1765, 1802), 'pyraf.iraf.imdelete', 'iraf.imdelete', (["('eqbrg@' + ic.lst_flat)"], {}), "('eqbrg@' + ic.lst_flat)\n", (1778, 1802), False, 'from pyraf import iraf\n'), ((1801, 2168), 'pyraf.iraf.gfreduce', 'iraf.gfreduce', (["('brg@' + ic.lst_flat)"], {'recenter': '"""no"""', 'reference': "('erg' + flat0)", 'fl_extract': '"""yes"""', 'fl_qecorr': '"""yes"""', 'qe_refim': "('erg' + arc0)", 'fl_addmdf': '"""no"""', 'fl_bias': '"""no"""', 'fl_over': '"""no"""', 'fl_trim': '"""no"""', 'mdffile': 'ic.nmdf', 'mdfdir': '"""./"""', 'slits': 'ic.cslit', 'line': 'pk_line', 'fl_fluxcal': '"""no"""', 'fl_gscrrej': '"""no"""', 'fl_wavtran': '"""no"""', 'fl_skysub': '"""no"""', 'fl_inter': '"""no"""', 'fl_vardq': '"""yes"""'}), "('brg@' + ic.lst_flat, recenter='no', reference='erg' + flat0,\n fl_extract='yes', fl_qecorr='yes', qe_refim='erg' + arc0, fl_addmdf=\n 'no', fl_bias='no', fl_over='no', fl_trim='no', mdffile=ic.nmdf, mdfdir\n ='./', slits=ic.cslit, line=pk_line, fl_fluxcal='no', fl_gscrrej='no',\n fl_wavtran='no', fl_skysub='no', fl_inter='no', fl_vardq='yes')\n", (1814, 2168), False, 'from pyraf import iraf\n'), ((2284, 2302), 'os.system', 'os.system', (['"""ds9 &"""'], {}), "('ds9 &')\n", (2293, 2302), False, 'import glob, os\n'), ((2303, 2318), 'pyraf.iraf.sleep', 'iraf.sleep', (['(5.0)'], {}), '(5.0)\n', (2313, 2318), False, 'from pyraf import iraf\n'), ((2319, 2366), 'pyraf.iraf.gfdisplay', 'iraf.gfdisplay', (["('eqbrg' + flat0)", '(1)'], {'version': 'vkw'}), "('eqbrg' + flat0, 1, version=vkw)\n", (2333, 2366), False, 'from pyraf import iraf\n'), ((2411, 2441), 'pyraf.iraf.imdelete', 'iraf.imdelete', (["(flat0 + '_resp')"], {}), "(flat0 + '_resp')\n", (2424, 2441), False, 'from pyraf import iraf\n'), ((2440, 2577), 'pyraf.iraf.gfresponse', 'iraf.gfresponse', (["('eqbrg' + flat0)"], {'outimage': "(flat0 + '_resp')", 'sky': '""""""', 'order': '(45)', 'func': '"""spline3"""', 'sample': '"""*"""', 'fl_fit': '"""yes"""', 'fl_inter': '"""no"""'}), "('eqbrg' + flat0, outimage=flat0 + '_resp', sky='', order=45,\n func='spline3', sample='*', fl_fit='yes', fl_inter='no')\n", (2455, 2577), False, 'from pyraf import iraf\n'), ((2639, 2686), 'pyraf.iraf.gfdisplay', 'iraf.gfdisplay', (["(flat0 + '_resp')", '(1)'], {'version': 'vkw'}), "(flat0 + '_resp', 1, version=vkw)\n", (2653, 2686), False, 'from pyraf import iraf\n'), ((1225, 1266), 'numpy.loadtxt', 'np.loadtxt', (['"""blkmask_name.txt"""'], {'dtype': 'str'}), "('blkmask_name.txt', dtype=str)\n", (1235, 1266), True, 'import numpy as np\n'), ((2746, 2757), 'time.time', 'time.time', ([], {}), '()\n', (2755, 2757), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""
The main purpose of this script, is to encapsulate all the functionalities for
Exploratory Data Analysis (EDA) problems. Letting the user run simple analysis
on input data (CSV format).
Credits:
-------
Author: <NAME> (@LukeMaxMusic)
License: MIT License 2020
Reference:
----------
[1] https://www.streamlit.io/
[2] https://emojipedia.org
"""
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
import nbformat as nbf
from pptx import Presentation
from pptx.util import Inches, Pt
from pptx.enum.text import PP_ALIGN
import io
st.set_option('deprecation.showfileUploaderEncoding', False)
@st.cache
def get_data(filename):
"""
Function to extract CSV filename and convert to pandas dataframe.
:param filename: IOString Path to CSV path.
:return: {pd.DataFrame}: Pandas Dataframe.
"""
return pd.read_csv(filename)
def main():
"""
Main function for a Universal EDA App. Run this to run the app.
"""
st.title('📊WEBEDA')
st.subheader('Web based tool for general purpose Exploratory Data Analysis')
uploaded_file = st.file_uploader('Upload CSV file to begin (Max file size allowed: 200MB)', type='csv')
if uploaded_file is not None:
df = get_data(uploaded_file)
st.sidebar.title('Tools 🔧')
if st.checkbox('Show raw data', value=False):
st.write(df)
target_column = st.selectbox('Select Target Column', list(df.columns), key='target_column')
if target_column is not None:
if st.sidebar.checkbox('✍ Describe', value=False):
st.markdown('## Data Description')
st.write(df.describe())
st.markdown('### Columns that are potential binary features')
bin_cols = []
for col in df.columns:
if len(df[col].value_counts()) == 2:
bin_cols.append(col)
st.write(bin_cols)
st.markdown('### Columns Types')
st.write(df.dtypes)
if st.sidebar.checkbox('👁 Missing Data', value=False):
st.markdown('## Missing Data')
total = df.isnull().sum().sort_values(ascending=False)
percent = (df.isnull().sum() / df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
st.write(missing_data)
if st.sidebar.checkbox('🔢 Value Counts', value=False):
st.markdown('## Value Counts')
col = st.selectbox('Select Column', list(df.columns), key='val_col')
st.write(df[col].value_counts())
if st.sidebar.checkbox('🧬 Unique Elements', value=False):
st.markdown('## Unique elements')
if st.checkbox('Show all unique elements', value=False):
st.write(df.nunique())
col = st.selectbox('Show columnwise unique elements', list(df.columns), key='unique_col')
st.write(df[col].unique())
if st.sidebar.checkbox('〽 Show Distribution', False):
st.subheader(f'Distribution of {target_column}')
try:
sns.distplot(df[target_column])
st.write("Skewness: %.3f" % df[target_column].skew())
st.write("Kurtosis: %.3f" % df[target_column].kurt())
st.pyplot()
except:
st.error('Invalid Column')
if st.sidebar.checkbox('📈 Scatter Plot', value=False):
st.markdown('## Scatter Plots')
scatter_cols = st.multiselect('Select Column', list(df.columns), key='scatter_cols')
for col in scatter_cols:
try:
data = pd.concat([df[target_column], df[col]], axis=1)
data.plot.scatter(x=col, y=target_column)
st.pyplot()
except:
st.error('Invalid column')
if st.sidebar.checkbox('🈁 Box Plot', value=False):
st.markdown('## Box Plots')
box_cols = st.multiselect('Select Column', list(df.columns), key='box_cols')
for col in box_cols:
try:
data = pd.concat([df[target_column], df[col]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=col, y=target_column, data=data)
fig.axis(ymin=np.min(df[target_column]), ymax=np.max(df[target_column]))
st.pyplot()
except:
st.error('Invalid column')
if st.sidebar.checkbox('➿ Pair Plot', value=False):
st.markdown('## Pair Plots')
pair_cols = st.multiselect('Select Column', list(df.columns), key='pair_plot')
plot_size = st.number_input('Select Plot size', 1.0, 5.0, step=0.5, key='plot_size', value=2.5)
cols = [target_column]
for col in pair_cols:
cols.append(col)
try:
sns.set()
sns.pairplot(df[cols], size=plot_size)
st.pyplot()
except:
st.error('Invalid column')
if st.sidebar.checkbox('🧮 Correlation Matrix', value=False):
st.markdown('## Correlation matrix (heatmap style)')
eda_cols = list(df.columns)
if len(eda_cols) > 5:
eda_cols = eda_cols[:4]
eda_cols = st.multiselect("Select features:", df.columns.tolist(), default=eda_cols)
plt.figure(figsize=(12, 8))
ax = sns.heatmap(df[eda_cols].corr(), cmap="OrRd", linecolor='white', linewidths=1)
st.pyplot()
if st.sidebar.checkbox('🖼️ Create PPT', value=False):
st.markdown('### Create a Power Point Presentation\n'
'Which EDA Tools you want to include in the PPTX?')
prs = Presentation()
title_slide_layout = prs.slide_layouts[0]
slide = prs.slides.add_slide(title_slide_layout)
title = slide.shapes.title
subtitle = slide.placeholders[1]
title.text = "EDA Presentation"
subtitle.text = "Subtitle text here!"
top = Inches(7)
left = Inches(4.5)
width = height = Inches(1)
txBox = slide.shapes.add_textbox(left, top, width, height)
tf = txBox.text_frame
tf.text = "<Project Number> <Project name>\n<Reference number> <Issue> <Last updated>"
tf.paragraphs[0].font.size = Pt(8)
tf.paragraphs[1].font.size = Pt(8)
tf.paragraphs[0].alignment = PP_ALIGN.CENTER
tf.paragraphs[1].alignment = PP_ALIGN.CENTER
if st.checkbox('📈: Scatter Plot', value=False):
slide = prs.slides.add_slide(prs.slide_layouts[5])
title = slide.shapes.title
title.text = "Scatter Plot"
ax = sns.distplot(df[target_column])
fig = ax.get_figure()
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
pic = slide.shapes.add_picture(buf, Inches(2), Inches(2), height=Inches(5))
top = Inches(7)
left = Inches(4.5)
width = height = Inches(1)
txBox = slide.shapes.add_textbox(left, top, width, height)
tf = txBox.text_frame
tf.text = "<Project Number> <Project name>\n<Reference number> <Issue> <Last updated>"
tf.paragraphs[0].font.size = Pt(8)
tf.paragraphs[1].font.size = Pt(8)
tf.paragraphs[0].alignment = PP_ALIGN.CENTER
tf.paragraphs[1].alignment = PP_ALIGN.CENTER
if st.checkbox('🈁: Box Plot', value=False):
slide = prs.slides.add_slide(prs.slide_layouts[5])
title = slide.shapes.title
title.text = "Box Plot"
top = Inches(7)
left = Inches(4.5)
width = height = Inches(1)
txBox = slide.shapes.add_textbox(left, top, width, height)
tf = txBox.text_frame
tf.text = "<Project Number> <Project name>\n<Reference number> <Issue> <Last updated>"
tf.paragraphs[0].font.size = Pt(8)
tf.paragraphs[1].font.size = Pt(8)
tf.paragraphs[0].alignment = PP_ALIGN.CENTER
tf.paragraphs[1].alignment = PP_ALIGN.CENTER
try:
if len(box_cols) != 0:
for col in box_cols:
try:
data = pd.concat([df[target_column], df[col]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=col, y=target_column, data=data)
fig.axis(ymin=np.min(df[target_column]), ymax=np.max(df[target_column]))
except:
st.error('Invalid column')
fig = ax.get_figure()
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
pic = slide.shapes.add_picture(buf, Inches(2), Inches(2), height=Inches(5))
else:
st.error('Box Plot columns not selected')
#box_cols = list(df.columns)
#if len(box_cols) > 3:
# box_cols = box_cols[:2]
except NameError:
st.error('Box Plot columns not selected')
#box_cols = list(df.columns)
#if len(box_cols) > 3:
# box_cols = box_cols[:2]
if st.checkbox('🧮: Correlation Matrix', value=False):
slide = prs.slides.add_slide(prs.slide_layouts[5])
title = slide.shapes.title
title.text = "Correlation Matrix"
top = Inches(7)
left = Inches(4.5)
width = height = Inches(1)
txBox = slide.shapes.add_textbox(left, top, width, height)
tf = txBox.text_frame
tf.text = "<Project Number> <Project name>\n<Reference number> <Issue> <Last updated>"
tf.paragraphs[0].font.size = Pt(8)
tf.paragraphs[1].font.size = Pt(8)
tf.paragraphs[0].alignment = PP_ALIGN.CENTER
tf.paragraphs[1].alignment = PP_ALIGN.CENTER
try:
if len(eda_cols) != 0:
pass
else:
eda_cols = list(df.columns)
if len(eda_cols) > 5:
eda_cols = eda_cols[:4]
except NameError:
eda_cols = list(df.columns)
if len(eda_cols) > 5:
eda_cols = eda_cols[:4]
ax = sns.heatmap(df[eda_cols].corr(), cmap="OrRd", linecolor='white', linewidths=1)
fig = ax.get_figure()
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
pic = slide.shapes.add_picture(buf, Inches(2), Inches(2), height=Inches(5))
if st.checkbox('➿: Pair Plot', value=False):
slide = prs.slides.add_slide(prs.slide_layouts[5])
title = slide.shapes.title
title.text = "Pair Plot"
top = Inches(7)
left = Inches(4.5)
width = height = Inches(1)
txBox = slide.shapes.add_textbox(left, top, width, height)
tf = txBox.text_frame
tf.text = "<Project Number> <Project name>\n<Reference number> <Issue> <Last updated>"
tf.paragraphs[0].font.size = Pt(8)
tf.paragraphs[1].font.size = Pt(8)
tf.paragraphs[0].alignment = PP_ALIGN.CENTER
tf.paragraphs[1].alignment = PP_ALIGN.CENTER
try:
if len(pair_cols) != 0:
pass
else:
pair_cols = []
except NameError:
pair_cols = []
cols = [target_column]
for col in pair_cols:
cols.append(col)
try:
fig = sns.pairplot(df[cols])
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
pic = slide.shapes.add_picture(buf, Inches(2), Inches(2), height=Inches(5))
except:
st.error('Invalid column')
pptx_path = st.text_input(label='How should we call the Presentation?',
value='default_eda.pptx')
if st.button("Create PPTX"):
prs.save(pptx_path)
st.write("Finished! Power Point PPTX was created.")
if st.sidebar.checkbox('📓 Create Notebook', value=False):
st.markdown('### Create a Jupyter Notebook\n'
'Which EDA Tools you want to include in the notebook?')
nb = nbf.v4.new_notebook()
title_text = "# Auto-generated EDA notebook.\n" \
"## Welcome to EDA Notebook created from Streamlit´s TEDA application." \
"\n*Due to Streamlit´s file_uploader current limitations, you must manually insert the " \
"path of the CSV file.*"
import_libraries_code = """# Import all necessary libraries:
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt"""
load_dataframe_code = "# Load the dataframe:\n" \
"# Insert path to CSV:\n" \
"""df = pd.read_csv("filename.csv")\ntarget_column = "%s" """ % target_column
nb['cells'] = [nbf.v4.new_markdown_cell(title_text),
nbf.v4.new_code_cell(import_libraries_code),
nbf.v4.new_code_cell(load_dataframe_code)]
if st.checkbox('🥩 - Raw Data and Info', value=False):
raw_data_code = """# Raw Data and Info:
print(df.info())
df.head()"""
nb['cells'].append(nbf.v4.new_code_cell(raw_data_code))
if st.checkbox('👁 - Missing Data', value=False):
missing_data_code = """# Missing Data:
total = df.isnull().sum().sort_values(ascending=False)
percent = (df.isnull().sum() / df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data"""
nb['cells'].append(nbf.v4.new_code_cell(missing_data_code))
if st.checkbox('🔢 - Value Counts', value=False):
value_counts_code = """# Value Counts:
col = df.columns
# select which column you want to see:
df[target_column].value_counts()"""
nb['cells'].append(nbf.v4.new_code_cell(value_counts_code))
if st.checkbox('🧬 - Unique Elements', value=False):
unique_elements_code = """# Unique Elements:
# Unique Elements
# select which column you want to see:
df[target_column].unique()"""
nb['cells'].append(nbf.v4.new_code_cell(unique_elements_code))
if st.checkbox('〽 - Distributions', value=False):
distributions_code = """# Distributions:
print("Skewness: %.3f" % df[target_column].skew())
print("Kurtosis: %.3f" % df[target_column].kurt())
sns.distplot(df[target_column])"""
nb['cells'].append(nbf.v4.new_code_cell(distributions_code))
if st.checkbox('🈁 - Box Plot', value=False):
box_plot_code = """# Box Plot:
# select your variable to make the box plot:
box_column = df.columns[1]
data = pd.concat([df[target_column], df[box_column]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=box_column, y=target_column, data=data)
fig.axis(ymin=np.min(df[target_column]), ymax=np.max(df[target_column]))"""
nb['cells'].append(nbf.v4.new_code_cell(box_plot_code))
if st.checkbox('➿ - Pair Plot', value=False):
pair_plot_code = """# Pair Plot:
# select your variable/s to do the pair plot:
pair_cols = df.columns[1:2]
pair_cols.append(target_column)
sns.set()
sns.pairplot(df[pair_cols], size=2.0)"""
nb['cells'].append(nbf.v4.new_code_cell(pair_plot_code))
if st.checkbox('🧮 - Correlation Matrix', value=False):
correlation_matrix_plot_code = """# Pair Plot:
# select the columns for the matrix::
cm_cols = list(df.columns)
plt.figure(figsize=(12, 8))
ax = sns.heatmap(df[cm_cols].corr(), cmap="OrRd", linecolor='white', linewidths=1)"""
nb['cells'].append(nbf.v4.new_code_cell(correlation_matrix_plot_code))
notebook_path = st.text_input(label='How should we call the Jupyter notebook?',
value='default_eda.ipynb')
if st.button("Create Notebook"):
with open(notebook_path, 'w') as f:
nbf.write(nb, f)
st.write("Finished! Jupyter Notebook was created.")
if st.sidebar.button('Credits'):
st.sidebar.markdown('''
MIT License 2020 (c) **<NAME>**
🙋♂
Get in touch: [Twitter](https://twitter.com/@lukemaxmusic)
ℹ️
Source Code: [Github](https://github.com/lucasmengual92/webeda)
''')
if __name__ == '__main__':
main()
|
[
"streamlit.text_input",
"pptx.util.Pt",
"pandas.read_csv",
"nbformat.write",
"streamlit.title",
"streamlit.sidebar.title",
"matplotlib.pyplot.figure",
"seaborn.pairplot",
"streamlit.sidebar.button",
"streamlit.subheader",
"streamlit.sidebar.checkbox",
"streamlit.button",
"streamlit.sidebar.markdown",
"numpy.max",
"seaborn.set",
"pandas.concat",
"matplotlib.pyplot.subplots",
"io.BytesIO",
"streamlit.error",
"streamlit.set_option",
"streamlit.checkbox",
"pptx.Presentation",
"streamlit.file_uploader",
"pptx.util.Inches",
"seaborn.boxplot",
"numpy.min",
"streamlit.pyplot",
"nbformat.v4.new_code_cell",
"streamlit.markdown",
"nbformat.v4.new_notebook",
"streamlit.write",
"seaborn.distplot",
"nbformat.v4.new_markdown_cell",
"streamlit.number_input"
] |
[((686, 746), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showfileUploaderEncoding"""', '(False)'], {}), "('deprecation.showfileUploaderEncoding', False)\n", (699, 746), True, 'import streamlit as st\n'), ((986, 1007), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (997, 1007), True, 'import pandas as pd\n'), ((1117, 1136), 'streamlit.title', 'st.title', (['"""📊WEBEDA"""'], {}), "('📊WEBEDA')\n", (1125, 1136), True, 'import streamlit as st\n'), ((1142, 1218), 'streamlit.subheader', 'st.subheader', (['"""Web based tool for general purpose Exploratory Data Analysis"""'], {}), "('Web based tool for general purpose Exploratory Data Analysis')\n", (1154, 1218), True, 'import streamlit as st\n'), ((1242, 1333), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload CSV file to begin (Max file size allowed: 200MB)"""'], {'type': '"""csv"""'}), "('Upload CSV file to begin (Max file size allowed: 200MB)',\n type='csv')\n", (1258, 1333), True, 'import streamlit as st\n'), ((1416, 1443), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Tools 🔧"""'], {}), "('Tools 🔧')\n", (1432, 1443), True, 'import streamlit as st\n'), ((1456, 1497), 'streamlit.checkbox', 'st.checkbox', (['"""Show raw data"""'], {'value': '(False)'}), "('Show raw data', value=False)\n", (1467, 1497), True, 'import streamlit as st\n'), ((1512, 1524), 'streamlit.write', 'st.write', (['df'], {}), '(df)\n', (1520, 1524), True, 'import streamlit as st\n'), ((1683, 1729), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""✍ Describe"""'], {'value': '(False)'}), "('✍ Describe', value=False)\n", (1702, 1729), True, 'import streamlit as st\n'), ((2219, 2269), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""👁 Missing Data"""'], {'value': '(False)'}), "('👁 Missing Data', value=False)\n", (2238, 2269), True, 'import streamlit as st\n'), ((2642, 2692), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""🔢 Value Counts"""'], {'value': '(False)'}), "('🔢 Value Counts', value=False)\n", (2661, 2692), True, 'import streamlit as st\n'), ((2896, 2949), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""🧬 Unique Elements"""'], {'value': '(False)'}), "('🧬 Unique Elements', value=False)\n", (2915, 2949), True, 'import streamlit as st\n'), ((3289, 3338), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""〽 Show Distribution"""', '(False)'], {}), "('〽 Show Distribution', False)\n", (3308, 3338), True, 'import streamlit as st\n'), ((3755, 3805), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""📈 Scatter Plot"""'], {'value': '(False)'}), "('📈 Scatter Plot', value=False)\n", (3774, 3805), True, 'import streamlit as st\n'), ((4309, 4355), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""🈁 Box Plot"""'], {'value': '(False)'}), "('🈁 Box Plot', value=False)\n", (4328, 4355), True, 'import streamlit as st\n'), ((5014, 5061), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""➿ Pair Plot"""'], {'value': '(False)'}), "('➿ Pair Plot', value=False)\n", (5033, 5061), True, 'import streamlit as st\n'), ((5672, 5728), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""🧮 Correlation Matrix"""'], {'value': '(False)'}), "('🧮 Correlation Matrix', value=False)\n", (5691, 5728), True, 'import streamlit as st\n'), ((6224, 6273), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""🖼️ Create PPT"""'], {'value': '(False)'}), "('🖼️ Create PPT', value=False)\n", (6243, 6273), True, 'import streamlit as st\n'), ((14482, 14535), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""📓 Create Notebook"""'], {'value': '(False)'}), "('📓 Create Notebook', value=False)\n", (14501, 14535), True, 'import streamlit as st\n'), ((19089, 19117), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Credits"""'], {}), "('Credits')\n", (19106, 19117), True, 'import streamlit as st\n'), ((1748, 1782), 'streamlit.markdown', 'st.markdown', (['"""## Data Description"""'], {}), "('## Data Description')\n", (1759, 1782), True, 'import streamlit as st\n'), ((1841, 1902), 'streamlit.markdown', 'st.markdown', (['"""### Columns that are potential binary features"""'], {}), "('### Columns that are potential binary features')\n", (1852, 1902), True, 'import streamlit as st\n'), ((2095, 2113), 'streamlit.write', 'st.write', (['bin_cols'], {}), '(bin_cols)\n', (2103, 2113), True, 'import streamlit as st\n'), ((2131, 2163), 'streamlit.markdown', 'st.markdown', (['"""### Columns Types"""'], {}), "('### Columns Types')\n", (2142, 2163), True, 'import streamlit as st\n'), ((2181, 2200), 'streamlit.write', 'st.write', (['df.dtypes'], {}), '(df.dtypes)\n', (2189, 2200), True, 'import streamlit as st\n'), ((2288, 2318), 'streamlit.markdown', 'st.markdown', (['"""## Missing Data"""'], {}), "('## Missing Data')\n", (2299, 2318), True, 'import streamlit as st\n'), ((2521, 2583), 'pandas.concat', 'pd.concat', (['[total, percent]'], {'axis': '(1)', 'keys': "['Total', 'Percent']"}), "([total, percent], axis=1, keys=['Total', 'Percent'])\n", (2530, 2583), True, 'import pandas as pd\n'), ((2601, 2623), 'streamlit.write', 'st.write', (['missing_data'], {}), '(missing_data)\n', (2609, 2623), True, 'import streamlit as st\n'), ((2711, 2741), 'streamlit.markdown', 'st.markdown', (['"""## Value Counts"""'], {}), "('## Value Counts')\n", (2722, 2741), True, 'import streamlit as st\n'), ((2968, 3001), 'streamlit.markdown', 'st.markdown', (['"""## Unique elements"""'], {}), "('## Unique elements')\n", (2979, 3001), True, 'import streamlit as st\n'), ((3022, 3074), 'streamlit.checkbox', 'st.checkbox', (['"""Show all unique elements"""'], {'value': '(False)'}), "('Show all unique elements', value=False)\n", (3033, 3074), True, 'import streamlit as st\n'), ((3357, 3405), 'streamlit.subheader', 'st.subheader', (['f"""Distribution of {target_column}"""'], {}), "(f'Distribution of {target_column}')\n", (3369, 3405), True, 'import streamlit as st\n'), ((3824, 3855), 'streamlit.markdown', 'st.markdown', (['"""## Scatter Plots"""'], {}), "('## Scatter Plots')\n", (3835, 3855), True, 'import streamlit as st\n'), ((4374, 4401), 'streamlit.markdown', 'st.markdown', (['"""## Box Plots"""'], {}), "('## Box Plots')\n", (4385, 4401), True, 'import streamlit as st\n'), ((5080, 5108), 'streamlit.markdown', 'st.markdown', (['"""## Pair Plots"""'], {}), "('## Pair Plots')\n", (5091, 5108), True, 'import streamlit as st\n'), ((5234, 5321), 'streamlit.number_input', 'st.number_input', (['"""Select Plot size"""', '(1.0)', '(5.0)'], {'step': '(0.5)', 'key': '"""plot_size"""', 'value': '(2.5)'}), "('Select Plot size', 1.0, 5.0, step=0.5, key='plot_size',\n value=2.5)\n", (5249, 5321), True, 'import streamlit as st\n'), ((5747, 5799), 'streamlit.markdown', 'st.markdown', (['"""## Correlation matrix (heatmap style)"""'], {}), "('## Correlation matrix (heatmap style)')\n", (5758, 5799), True, 'import streamlit as st\n'), ((6048, 6075), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (6058, 6075), True, 'from matplotlib import pyplot as plt\n'), ((6194, 6205), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (6203, 6205), True, 'import streamlit as st\n'), ((6292, 6407), 'streamlit.markdown', 'st.markdown', (['"""### Create a Power Point Presentation\nWhich EDA Tools you want to include in the PPTX?"""'], {}), '(\n """### Create a Power Point Presentation\nWhich EDA Tools you want to include in the PPTX?"""\n )\n', (6303, 6407), True, 'import streamlit as st\n'), ((6452, 6466), 'pptx.Presentation', 'Presentation', ([], {}), '()\n', (6464, 6466), False, 'from pptx import Presentation\n'), ((6813, 6822), 'pptx.util.Inches', 'Inches', (['(7)'], {}), '(7)\n', (6819, 6822), False, 'from pptx.util import Inches, Pt\n'), ((6847, 6858), 'pptx.util.Inches', 'Inches', (['(4.5)'], {}), '(4.5)\n', (6853, 6858), False, 'from pptx.util import Inches, Pt\n'), ((6893, 6902), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (6899, 6902), False, 'from pptx.util import Inches, Pt\n'), ((7168, 7173), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (7170, 7173), False, 'from pptx.util import Inches, Pt\n'), ((7220, 7225), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (7222, 7225), False, 'from pptx.util import Inches, Pt\n'), ((7372, 7415), 'streamlit.checkbox', 'st.checkbox', (['"""📈: Scatter Plot"""'], {'value': '(False)'}), "('📈: Scatter Plot', value=False)\n", (7383, 7415), True, 'import streamlit as st\n'), ((8533, 8572), 'streamlit.checkbox', 'st.checkbox', (['"""🈁: Box Plot"""'], {'value': '(False)'}), "('🈁: Box Plot', value=False)\n", (8544, 8572), True, 'import streamlit as st\n'), ((10831, 10880), 'streamlit.checkbox', 'st.checkbox', (['"""🧮: Correlation Matrix"""'], {'value': '(False)'}), "('🧮: Correlation Matrix', value=False)\n", (10842, 10880), True, 'import streamlit as st\n'), ((12545, 12585), 'streamlit.checkbox', 'st.checkbox', (['"""➿: Pair Plot"""'], {'value': '(False)'}), "('➿: Pair Plot', value=False)\n", (12556, 12585), True, 'import streamlit as st\n'), ((14175, 14265), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""How should we call the Presentation?"""', 'value': '"""default_eda.pptx"""'}), "(label='How should we call the Presentation?', value=\n 'default_eda.pptx')\n", (14188, 14265), True, 'import streamlit as st\n'), ((14324, 14348), 'streamlit.button', 'st.button', (['"""Create PPTX"""'], {}), "('Create PPTX')\n", (14333, 14348), True, 'import streamlit as st\n'), ((14554, 14665), 'streamlit.markdown', 'st.markdown', (['"""### Create a Jupyter Notebook\nWhich EDA Tools you want to include in the notebook?"""'], {}), '(\n """### Create a Jupyter Notebook\nWhich EDA Tools you want to include in the notebook?"""\n )\n', (14565, 14665), True, 'import streamlit as st\n'), ((14709, 14730), 'nbformat.v4.new_notebook', 'nbf.v4.new_notebook', ([], {}), '()\n', (14728, 14730), True, 'import nbformat as nbf\n'), ((15760, 15809), 'streamlit.checkbox', 'st.checkbox', (['"""🥩 - Raw Data and Info"""'], {'value': '(False)'}), "('🥩 - Raw Data and Info', value=False)\n", (15771, 15809), True, 'import streamlit as st\n'), ((16003, 16047), 'streamlit.checkbox', 'st.checkbox', (['"""👁 - Missing Data"""'], {'value': '(False)'}), "('👁 - Missing Data', value=False)\n", (16014, 16047), True, 'import streamlit as st\n'), ((16446, 16490), 'streamlit.checkbox', 'st.checkbox', (['"""🔢 - Value Counts"""'], {'value': '(False)'}), "('🔢 - Value Counts', value=False)\n", (16457, 16490), True, 'import streamlit as st\n'), ((16750, 16797), 'streamlit.checkbox', 'st.checkbox', (['"""🧬 - Unique Elements"""'], {'value': '(False)'}), "('🧬 - Unique Elements', value=False)\n", (16761, 16797), True, 'import streamlit as st\n'), ((17063, 17108), 'streamlit.checkbox', 'st.checkbox', (['"""〽 - Distributions"""'], {'value': '(False)'}), "('〽 - Distributions', value=False)\n", (17074, 17108), True, 'import streamlit as st\n'), ((17416, 17456), 'streamlit.checkbox', 'st.checkbox', (['"""🈁 - Box Plot"""'], {'value': '(False)'}), "('🈁 - Box Plot', value=False)\n", (17427, 17456), True, 'import streamlit as st\n'), ((17922, 17963), 'streamlit.checkbox', 'st.checkbox', (['"""➿ - Pair Plot"""'], {'value': '(False)'}), "('➿ - Pair Plot', value=False)\n", (17933, 17963), True, 'import streamlit as st\n'), ((18281, 18331), 'streamlit.checkbox', 'st.checkbox', (['"""🧮 - Correlation Matrix"""'], {'value': '(False)'}), "('🧮 - Correlation Matrix', value=False)\n", (18292, 18331), True, 'import streamlit as st\n'), ((18711, 18806), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""How should we call the Jupyter notebook?"""', 'value': '"""default_eda.ipynb"""'}), "(label='How should we call the Jupyter notebook?', value=\n 'default_eda.ipynb')\n", (18724, 18806), True, 'import streamlit as st\n'), ((18869, 18897), 'streamlit.button', 'st.button', (['"""Create Notebook"""'], {}), "('Create Notebook')\n", (18878, 18897), True, 'import streamlit as st\n'), ((19136, 19466), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""\n MIT License 2020 (c) **<NAME>**\n \n 🙋\u200d♂\n Get in touch: [Twitter](https://twitter.com/@lukemaxmusic)\n \n ℹ️\n Source Code: [Github](https://github.com/lucasmengual92/webeda)\n """'], {}), '(\n """\n MIT License 2020 (c) **<NAME>**\n \n 🙋\u200d♂\n Get in touch: [Twitter](https://twitter.com/@lukemaxmusic)\n \n ℹ️\n Source Code: [Github](https://github.com/lucasmengual92/webeda)\n """\n )\n', (19155, 19466), True, 'import streamlit as st\n'), ((3449, 3480), 'seaborn.distplot', 'sns.distplot', (['df[target_column]'], {}), '(df[target_column])\n', (3461, 3480), True, 'import seaborn as sns\n'), ((3652, 3663), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (3661, 3663), True, 'import streamlit as st\n'), ((5478, 5487), 'seaborn.set', 'sns.set', ([], {}), '()\n', (5485, 5487), True, 'import seaborn as sns\n'), ((5509, 5547), 'seaborn.pairplot', 'sns.pairplot', (['df[cols]'], {'size': 'plot_size'}), '(df[cols], size=plot_size)\n', (5521, 5547), True, 'import seaborn as sns\n'), ((5569, 5580), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (5578, 5580), True, 'import streamlit as st\n'), ((7614, 7645), 'seaborn.distplot', 'sns.distplot', (['df[target_column]'], {}), '(df[target_column])\n', (7626, 7645), True, 'import seaborn as sns\n'), ((7716, 7728), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (7726, 7728), False, 'import io\n'), ((7938, 7947), 'pptx.util.Inches', 'Inches', (['(7)'], {}), '(7)\n', (7944, 7947), False, 'from pptx.util import Inches, Pt\n'), ((7976, 7987), 'pptx.util.Inches', 'Inches', (['(4.5)'], {}), '(4.5)\n', (7982, 7987), False, 'from pptx.util import Inches, Pt\n'), ((8026, 8035), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (8032, 8035), False, 'from pptx.util import Inches, Pt\n'), ((8317, 8322), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (8319, 8322), False, 'from pptx.util import Inches, Pt\n'), ((8373, 8378), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (8375, 8378), False, 'from pptx.util import Inches, Pt\n'), ((8766, 8775), 'pptx.util.Inches', 'Inches', (['(7)'], {}), '(7)\n', (8772, 8775), False, 'from pptx.util import Inches, Pt\n'), ((8804, 8815), 'pptx.util.Inches', 'Inches', (['(4.5)'], {}), '(4.5)\n', (8810, 8815), False, 'from pptx.util import Inches, Pt\n'), ((8854, 8863), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (8860, 8863), False, 'from pptx.util import Inches, Pt\n'), ((9145, 9150), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (9147, 9150), False, 'from pptx.util import Inches, Pt\n'), ((9201, 9206), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (9203, 9206), False, 'from pptx.util import Inches, Pt\n'), ((11084, 11093), 'pptx.util.Inches', 'Inches', (['(7)'], {}), '(7)\n', (11090, 11093), False, 'from pptx.util import Inches, Pt\n'), ((11122, 11133), 'pptx.util.Inches', 'Inches', (['(4.5)'], {}), '(4.5)\n', (11128, 11133), False, 'from pptx.util import Inches, Pt\n'), ((11172, 11181), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (11178, 11181), False, 'from pptx.util import Inches, Pt\n'), ((11463, 11468), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (11465, 11468), False, 'from pptx.util import Inches, Pt\n'), ((11519, 11524), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (11521, 11524), False, 'from pptx.util import Inches, Pt\n'), ((12328, 12340), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (12338, 12340), False, 'import io\n'), ((12780, 12789), 'pptx.util.Inches', 'Inches', (['(7)'], {}), '(7)\n', (12786, 12789), False, 'from pptx.util import Inches, Pt\n'), ((12818, 12829), 'pptx.util.Inches', 'Inches', (['(4.5)'], {}), '(4.5)\n', (12824, 12829), False, 'from pptx.util import Inches, Pt\n'), ((12868, 12877), 'pptx.util.Inches', 'Inches', (['(1)'], {}), '(1)\n', (12874, 12877), False, 'from pptx.util import Inches, Pt\n'), ((13159, 13164), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (13161, 13164), False, 'from pptx.util import Inches, Pt\n'), ((13215, 13220), 'pptx.util.Pt', 'Pt', (['(8)'], {}), '(8)\n', (13217, 13220), False, 'from pptx.util import Inches, Pt\n'), ((14412, 14463), 'streamlit.write', 'st.write', (['"""Finished! Power Point PPTX was created."""'], {}), "('Finished! Power Point PPTX was created.')\n", (14420, 14463), True, 'import streamlit as st\n'), ((15548, 15584), 'nbformat.v4.new_markdown_cell', 'nbf.v4.new_markdown_cell', (['title_text'], {}), '(title_text)\n', (15572, 15584), True, 'import nbformat as nbf\n'), ((15618, 15661), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['import_libraries_code'], {}), '(import_libraries_code)\n', (15638, 15661), True, 'import nbformat as nbf\n'), ((15695, 15736), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['load_dataframe_code'], {}), '(load_dataframe_code)\n', (15715, 15736), True, 'import nbformat as nbf\n'), ((19019, 19070), 'streamlit.write', 'st.write', (['"""Finished! Jupyter Notebook was created."""'], {}), "('Finished! Jupyter Notebook was created.')\n", (19027, 19070), True, 'import streamlit as st\n'), ((3710, 3736), 'streamlit.error', 'st.error', (['"""Invalid Column"""'], {}), "('Invalid Column')\n", (3718, 3736), True, 'import streamlit as st\n'), ((4058, 4105), 'pandas.concat', 'pd.concat', (['[df[target_column], df[col]]'], {'axis': '(1)'}), '([df[target_column], df[col]], axis=1)\n', (4067, 4105), True, 'import pandas as pd\n'), ((4198, 4209), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (4207, 4209), True, 'import streamlit as st\n'), ((4592, 4639), 'pandas.concat', 'pd.concat', (['[df[target_column], df[col]]'], {'axis': '(1)'}), '([df[target_column], df[col]], axis=1)\n', (4601, 4639), True, 'import pandas as pd\n'), ((4673, 4701), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (4685, 4701), True, 'from matplotlib import pyplot as plt\n'), ((4733, 4779), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'col', 'y': 'target_column', 'data': 'data'}), '(x=col, y=target_column, data=data)\n', (4744, 4779), True, 'import seaborn as sns\n'), ((4903, 4914), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (4912, 4914), True, 'import streamlit as st\n'), ((5627, 5653), 'streamlit.error', 'st.error', (['"""Invalid column"""'], {}), "('Invalid column')\n", (5635, 5653), True, 'import streamlit as st\n'), ((7871, 7880), 'pptx.util.Inches', 'Inches', (['(2)'], {}), '(2)\n', (7877, 7880), False, 'from pptx.util import Inches, Pt\n'), ((7882, 7891), 'pptx.util.Inches', 'Inches', (['(2)'], {}), '(2)\n', (7888, 7891), False, 'from pptx.util import Inches, Pt\n'), ((12483, 12492), 'pptx.util.Inches', 'Inches', (['(2)'], {}), '(2)\n', (12489, 12492), False, 'from pptx.util import Inches, Pt\n'), ((12494, 12503), 'pptx.util.Inches', 'Inches', (['(2)'], {}), '(2)\n', (12500, 12503), False, 'from pptx.util import Inches, Pt\n'), ((13802, 13824), 'seaborn.pairplot', 'sns.pairplot', (['df[cols]'], {}), '(df[cols])\n', (13814, 13824), True, 'import seaborn as sns\n'), ((13856, 13868), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (13866, 13868), False, 'import io\n'), ((15944, 15979), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['raw_data_code'], {}), '(raw_data_code)\n', (15964, 15979), True, 'import nbformat as nbf\n'), ((16383, 16422), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['missing_data_code'], {}), '(missing_data_code)\n', (16403, 16422), True, 'import nbformat as nbf\n'), ((16687, 16726), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['value_counts_code'], {}), '(value_counts_code)\n', (16707, 16726), True, 'import nbformat as nbf\n'), ((16997, 17039), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['unique_elements_code'], {}), '(unique_elements_code)\n', (17017, 17039), True, 'import nbformat as nbf\n'), ((17352, 17392), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['distributions_code'], {}), '(distributions_code)\n', (17372, 17392), True, 'import nbformat as nbf\n'), ((17863, 17898), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['box_plot_code'], {}), '(box_plot_code)\n', (17883, 17898), True, 'import nbformat as nbf\n'), ((18221, 18257), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['pair_plot_code'], {}), '(pair_plot_code)\n', (18241, 18257), True, 'import nbformat as nbf\n'), ((18624, 18674), 'nbformat.v4.new_code_cell', 'nbf.v4.new_code_cell', (['correlation_matrix_plot_code'], {}), '(correlation_matrix_plot_code)\n', (18644, 18674), True, 'import nbformat as nbf\n'), ((18981, 18997), 'nbformat.write', 'nbf.write', (['nb', 'f'], {}), '(nb, f)\n', (18990, 18997), True, 'import nbformat as nbf\n'), ((4264, 4290), 'streamlit.error', 'st.error', (['"""Invalid column"""'], {}), "('Invalid column')\n", (4272, 4290), True, 'import streamlit as st\n'), ((4969, 4995), 'streamlit.error', 'st.error', (['"""Invalid column"""'], {}), "('Invalid column')\n", (4977, 4995), True, 'import streamlit as st\n'), ((7900, 7909), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (7906, 7909), False, 'from pptx.util import Inches, Pt\n'), ((10058, 10070), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (10068, 10070), False, 'import io\n'), ((10337, 10378), 'streamlit.error', 'st.error', (['"""Box Plot columns not selected"""'], {}), "('Box Plot columns not selected')\n", (10345, 10378), True, 'import streamlit as st\n'), ((10611, 10652), 'streamlit.error', 'st.error', (['"""Box Plot columns not selected"""'], {}), "('Box Plot columns not selected')\n", (10619, 10652), True, 'import streamlit as st\n'), ((12512, 12521), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (12518, 12521), False, 'from pptx.util import Inches, Pt\n'), ((14023, 14032), 'pptx.util.Inches', 'Inches', (['(2)'], {}), '(2)\n', (14029, 14032), False, 'from pptx.util import Inches, Pt\n'), ((14034, 14043), 'pptx.util.Inches', 'Inches', (['(2)'], {}), '(2)\n', (14040, 14043), False, 'from pptx.util import Inches, Pt\n'), ((14117, 14143), 'streamlit.error', 'st.error', (['"""Invalid column"""'], {}), "('Invalid column')\n", (14125, 14143), True, 'import streamlit as st\n'), ((4819, 4844), 'numpy.min', 'np.min', (['df[target_column]'], {}), '(df[target_column])\n', (4825, 4844), True, 'import numpy as np\n'), ((4851, 4876), 'numpy.max', 'np.max', (['df[target_column]'], {}), '(df[target_column])\n', (4857, 4876), True, 'import numpy as np\n'), ((10237, 10246), 'pptx.util.Inches', 'Inches', (['(2)'], {}), '(2)\n', (10243, 10246), False, 'from pptx.util import Inches, Pt\n'), ((10248, 10257), 'pptx.util.Inches', 'Inches', (['(2)'], {}), '(2)\n', (10254, 10257), False, 'from pptx.util import Inches, Pt\n'), ((14052, 14061), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (14058, 14061), False, 'from pptx.util import Inches, Pt\n'), ((9545, 9592), 'pandas.concat', 'pd.concat', (['[df[target_column], df[col]]'], {'axis': '(1)'}), '([df[target_column], df[col]], axis=1)\n', (9554, 9592), True, 'import pandas as pd\n'), ((9638, 9666), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (9650, 9666), True, 'from matplotlib import pyplot as plt\n'), ((9710, 9756), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'col', 'y': 'target_column', 'data': 'data'}), '(x=col, y=target_column, data=data)\n', (9721, 9756), True, 'import seaborn as sns\n'), ((10266, 10275), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (10272, 10275), False, 'from pptx.util import Inches, Pt\n'), ((9945, 9971), 'streamlit.error', 'st.error', (['"""Invalid column"""'], {}), "('Invalid column')\n", (9953, 9971), True, 'import streamlit as st\n'), ((9808, 9833), 'numpy.min', 'np.min', (['df[target_column]'], {}), '(df[target_column])\n', (9814, 9833), True, 'import numpy as np\n'), ((9840, 9865), 'numpy.max', 'np.max', (['df[target_column]'], {}), '(df[target_column])\n', (9846, 9865), True, 'import numpy as np\n')]
|
import numpy as np
import time
import matplotlib.pyplot as plt
from scipy import interpolate
from mpl_toolkits.mplot3d import Axes3D
import math
class InterpolationLine():
def __init__(self, x, y, z=0):
self.number_of_point = 25
if not z == 0:
"To use 3D plot"
points = PointsList()
(self.x, self.y, self.z) = points.add_sort_fill(x, y, z)
else:
"Then only use 2D methods"
self.x = x
self.y = y
def sort_data_in_tems_of_time(self):
points = PointsList()
points.add_points_list(self.x, self.y, self.z)
points.sort()
(self.x, self.y, self.z) = points.fill_vectors()
def spline_approximation_2D(self):
tck, u = interpolate.splprep([self.x, self.y], s=100, k=2)
x_knots, y_knots = interpolate.splev(tck[0], tck)
u_fine = np.linspace(0, 1, self.number_of_point)
x_fine, y_fine, = interpolate.splev(u_fine, tck)
return [(x_knots, y_knots), (x_fine, y_fine)]
def spline_approximation_3D(self):
tck, u = interpolate.splprep([self.x, self.y, self.z], s=2, k=3)
x_knots, y_knots, z_knots = interpolate.splev(tck[0], tck)
u_fine = np.linspace(0, 1, self.number_of_point)
x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
return [(x_knots, y_knots, z_knots), (x_fine, y_fine, z_fine)]
def plot_2D(self):
res = self.spline_approximation_2D()
(x_knots, y_knots) = res[0]
(x_fine, y_fine) = res[1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.x, self.y, "x")
ax.plot(x_knots, y_knots, 'go')
ax.plot(x_fine, y_fine, 'g')
fig.show()
def plot_2D_interpolation_3D(self):
res = self.spline_approximation_3D()
(x_knots, y_knots, z_knots) = res[0]
(x_fine, y_fine, z_fine) = res[1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.x, self.y, "x")
ax.plot(x_knots, y_knots, 'go')
ax.plot(x_fine, y_fine, 'g')
fig.show()
def plot_3D(self):
res = self.spline_approximation_3D()
(x_knots, y_knots, z_knots) = res[0]
(x_fine, y_fine, z_fine) = res[1]
fig = plt.figure()
ax3d = fig.add_subplot(111, projection='3d')
ax3d.plot(x_knots, y_knots, z_knots, 'go')
ax3d.plot(x_fine, y_fine, z_fine, 'g')
fig.show()
def interp_test(self):
pass
class Point():
def __init__(self, x, y, time):
self.x = x
self.y = y
self.time = time
def is_x_y_equal(self, other):
cdt1 = self.x == other.x
cdt2 = self.y == other.y
return cdt1 and cdt2
def __eq__(self, other):
return math.fabs(self.time) == other.time
def __lt__(self, other):
return self.time < other.time
def __gt__(self, other):
return self.time > other.time
class PointsList:
def __init__(self):
self.points = []
def add_sort_fill(self, x, y, z):
self.add_points_list(x, y, z)
self.sort()
return self.fill_vectors()
def add_create_point(self, x, y, z):
point = Point(x, y, z)
self.points.append(point)
def add_point(self, point):
self.points.append(point)
def add_points_list(self, x, y, z):
for n in range(len(x)):
self.add_create_point(x[n], y[n], z[n])
def fill_vectors(self):
x = []
y = []
z = []
for point in self.points:
x.append(point.x)
y.append(point.y)
z.append(point.time)
return (x, y, z)
def sort(self):
self.points.sort()
def del_point(self, point):
self.points.remove(point)
def del_same_time(self):
new_points = []
for point in self.points:
if not point in new_points:
new_points.append(point)
self.points = new_points
def remove_point_with_same_x_y(self):
points_without_same = []
for point in self.points:
points_without_same.append(point)
for same_point in self.points:
if same_point.is_x_y_equal(point):
self.points.remove(same_point)
return points_without_same
|
[
"math.fabs",
"scipy.interpolate.splprep",
"matplotlib.pyplot.figure",
"numpy.linspace",
"scipy.interpolate.splev"
] |
[((760, 809), 'scipy.interpolate.splprep', 'interpolate.splprep', (['[self.x, self.y]'], {'s': '(100)', 'k': '(2)'}), '([self.x, self.y], s=100, k=2)\n', (779, 809), False, 'from scipy import interpolate\n'), ((837, 867), 'scipy.interpolate.splev', 'interpolate.splev', (['tck[0]', 'tck'], {}), '(tck[0], tck)\n', (854, 867), False, 'from scipy import interpolate\n'), ((885, 924), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.number_of_point'], {}), '(0, 1, self.number_of_point)\n', (896, 924), True, 'import numpy as np\n'), ((951, 981), 'scipy.interpolate.splev', 'interpolate.splev', (['u_fine', 'tck'], {}), '(u_fine, tck)\n', (968, 981), False, 'from scipy import interpolate\n'), ((1094, 1149), 'scipy.interpolate.splprep', 'interpolate.splprep', (['[self.x, self.y, self.z]'], {'s': '(2)', 'k': '(3)'}), '([self.x, self.y, self.z], s=2, k=3)\n', (1113, 1149), False, 'from scipy import interpolate\n'), ((1187, 1217), 'scipy.interpolate.splev', 'interpolate.splev', (['tck[0]', 'tck'], {}), '(tck[0], tck)\n', (1204, 1217), False, 'from scipy import interpolate\n'), ((1235, 1274), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.number_of_point'], {}), '(0, 1, self.number_of_point)\n', (1246, 1274), True, 'import numpy as np\n'), ((1308, 1338), 'scipy.interpolate.splev', 'interpolate.splev', (['u_fine', 'tck'], {}), '(u_fine, tck)\n', (1325, 1338), False, 'from scipy import interpolate\n'), ((1565, 1577), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1575, 1577), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1945), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1943, 1945), True, 'import matplotlib.pyplot as plt\n'), ((2284, 2296), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2294, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2800, 2820), 'math.fabs', 'math.fabs', (['self.time'], {}), '(self.time)\n', (2809, 2820), False, 'import math\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import argparse
import numpy as np
import itertools
import torch.utils.data
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
import egg.core as core
#from scipy.stats import entropy
from egg.core import EarlyStopperAccuracy
from egg.zoo.channel.features import OneHotLoader, UniformLoader, OneHotLoaderCompositionality, TestLoaderCompositionality
from egg.zoo.channel.archs import Sender, Receiver
from egg.core.reinforce_wrappers import RnnReceiverImpatient, RnnReceiverImpatientCompositionality, RnnReceiverCompositionality
from egg.core.reinforce_wrappers import SenderImpatientReceiverRnnReinforce, CompositionalitySenderImpatientReceiverRnnReinforce, CompositionalitySenderReceiverRnnReinforce
from egg.core.util import dump_sender_receiver_impatient, dump_sender_receiver_impatient_compositionality, dump_sender_receiver_compositionality
from egg.core.trainers import CompoTrainer
def get_params(params):
parser = argparse.ArgumentParser()
parser.add_argument('--n_features', type=int, default=10,
help='Dimensionality of the "concept" space (default: 10)')
parser.add_argument('--batches_per_epoch', type=int, default=1000,
help='Number of batches per epoch (default: 1000)')
parser.add_argument('--dim_dataset', type=int, default=10240,
help='Dim of constructing the data (default: 10240)')
parser.add_argument('--force_eos', type=int, default=0,
help='Force EOS at the end of the messages (default: 0)')
parser.add_argument('--sender_hidden', type=int, default=10,
help='Size of the hidden layer of Sender (default: 10)')
parser.add_argument('--receiver_hidden', type=int, default=10,
help='Size of the hidden layer of Receiver (default: 10)')
parser.add_argument('--receiver_num_layers', type=int, default=1,
help='Number hidden layers of receiver. Only in reinforce (default: 1)')
parser.add_argument('--sender_num_layers', type=int, default=1,
help='Number hidden layers of receiver. Only in reinforce (default: 1)')
parser.add_argument('--receiver_num_heads', type=int, default=8,
help='Number of attention heads for Transformer Receiver (default: 8)')
parser.add_argument('--sender_num_heads', type=int, default=8,
help='Number of self-attention heads for Transformer Sender (default: 8)')
parser.add_argument('--sender_embedding', type=int, default=10,
help='Dimensionality of the embedding hidden layer for Sender (default: 10)')
parser.add_argument('--receiver_embedding', type=int, default=10,
help='Dimensionality of the embedding hidden layer for Receiver (default: 10)')
parser.add_argument('--causal_sender', default=False, action='store_true')
parser.add_argument('--causal_receiver', default=False, action='store_true')
parser.add_argument('--sender_generate_style', type=str, default='in-place', choices=['standard', 'in-place'],
help='How the next symbol is generated within the TransformerDecoder (default: in-place)')
parser.add_argument('--sender_cell', type=str, default='rnn',
help='Type of the cell used for Sender {rnn, gru, lstm, transformer} (default: rnn)')
parser.add_argument('--receiver_cell', type=str, default='rnn',
help='Type of the model used for Receiver {rnn, gru, lstm, transformer} (default: rnn)')
parser.add_argument('--sender_entropy_coeff', type=float, default=1e-1,
help='The entropy regularisation coefficient for Sender (default: 1e-1)')
parser.add_argument('--receiver_entropy_coeff', type=float, default=1e-1,
help='The entropy regularisation coefficient for Receiver (default: 1e-1)')
parser.add_argument('--probs', type=str, default='uniform',
help="Prior distribution over the concepts (default: uniform)")
parser.add_argument('--length_cost', type=float, default=0.0,
help="Penalty for the message length, each symbol would before <EOS> would be "
"penalized by this cost (default: 0.0)")
parser.add_argument('--name', type=str, default='model',
help="Name for your checkpoint (default: model)")
parser.add_argument('--early_stopping_thr', type=float, default=0.9999,
help="Early stopping threshold on accuracy (default: 0.9999)")
# AJOUT
parser.add_argument('--dir_save', type=str, default="expe_1",
help="Directory in which we will save the information")
parser.add_argument('--unigram_pen', type=float, default=0.0,
help="Add a penalty for redundancy")
parser.add_argument('--impatient', type=bool, default=False,
help="Impatient listener")
parser.add_argument('--print_message', type=bool, default=False,
help='Print message ?')
parser.add_argument('--reg', type=bool, default=False,
help='Add regularization ?')
# Compositionality
parser.add_argument('--n_attributes', type=int, default=3,
help='Number of attributes (default: 2)')
parser.add_argument('--n_values', type=int, default=3,
help='Number of values by attribute')
parser.add_argument('--att_weights', type=list, default=[1,1,1],
help='Weights of each attribute')
parser.add_argument('--probs_attributes', type=str, default="uniform",
help='Sampling prob for each att')
args = core.init(parser, params)
return args
def loss(sender_input, _message, _receiver_input, receiver_output, _labels):
acc = (receiver_output.argmax(dim=1) == sender_input.argmax(dim=1)).detach().float()
loss = F.cross_entropy(receiver_output, sender_input.argmax(dim=1), reduction="none")
return loss, {'acc': acc}
def loss_impatient(sender_input, _message, message_length, _receiver_input, receiver_output, _labels):
to_onehot=torch.eye(_message.size(1)).to("cuda")
to_onehot=torch.cat((to_onehot,torch.zeros((1,_message.size(1))).to("cuda")),0)
len_mask=[]
for i in range(message_length.size(0)):
len_mask.append(to_onehot[message_length[i]])
len_mask=torch.stack(len_mask,dim=0)
coef=(1/message_length.to(float)).repeat(_message.size(1),1).transpose(1,0)
coef2=coef*torch.arange(_message.size(1),0,-1).repeat(_message.size(0),1).to("cuda")
len_mask=torch.cumsum(len_mask,dim=1)
len_mask=torch.ones(len_mask.size()).to("cuda").add_(-len_mask)
len_mask.mul_((coef2))
len_mask.mul_((1/len_mask.sum(1)).repeat((_message.size(1),1)).transpose(1,0))
crible_acc=torch.zeros(size=_message.size()).to("cuda")
crible_loss=torch.zeros(size=_message.size()).to("cuda")
for i in range(receiver_output.size(1)):
crible_acc[:,i].add_((receiver_output[:,i,:].argmax(dim=1) == sender_input.argmax(dim=1)).detach().float())
crible_loss[:,i].add_(F.cross_entropy(receiver_output[:,i,:], sender_input.argmax(dim=1), reduction="none"))
acc=crible_acc*len_mask
loss=crible_loss*len_mask
acc = acc.sum(1)
loss= loss.sum(1)
return loss, {'acc': acc}, crible_acc
def loss_compositionality(sender_input, _message, message_length, _receiver_input, receiver_output, _labels,n_attributes,n_values):
loss=0.
sender_input=sender_input.reshape(sender_input.size(0),n_attributes,n_values)
crible_acc=(receiver_output.argmax(dim=2)==sender_input.argmax(2)).detach().float().mean(1)
for j in range(receiver_output.size(1)):
#K=10*(1/(j+1))
K=sender_input[:,j,:].max(dim=1).values
loss+=K*F.cross_entropy(receiver_output[:,j,:], sender_input[:,j,:].argmax(dim=1), reduction="none")
return loss, {'acc': crible_acc}, crible_acc
def loss_impatient_compositionality(sender_input, _message, message_length, _receiver_input, receiver_output, _labels,n_attributes,n_values,att_weights):
to_onehot=torch.eye(_message.size(1)).to("cuda")
to_onehot=torch.cat((to_onehot,torch.zeros((1,_message.size(1))).to("cuda")),0)
len_mask=[]
for i in range(message_length.size(0)):
len_mask.append(to_onehot[message_length[i]])
len_mask=torch.stack(len_mask,dim=0)
coef=(1/message_length.to(float)).repeat(_message.size(1),1).transpose(1,0)
#coef2=coef*torch.arange(_message.size(1),0,-1).repeat(_message.size(0),1).to("cuda")
coef2=coef
# NEW LOSS
#Mlen=n_attributes*torch.ones(message_length.size()).to("cuda")
#coef=(1/Mlen).repeat(_message.size(1),1).transpose(1,0)
#coef2=coef
len_mask=torch.cumsum(len_mask,dim=1)
len_mask=torch.ones(len_mask.size()).to("cuda").add_(-len_mask)
#len_mask.mul_((coef2)) # A priori avec la normalisation apres, etape useless
len_mask.mul_((1/len_mask.sum(1)).repeat((_message.size(1),1)).transpose(1,0))
crible_acc=torch.zeros(size=_message.size()).to("cuda")
crible_loss=torch.zeros(size=_message.size()).to("cuda")
for i in range(receiver_output.size(1)):
ro=receiver_output[:,i,:].reshape(receiver_output.size(0),n_attributes,n_values)
si=sender_input.reshape(sender_input.size(0),n_attributes,n_values)
crible_acc[:,i].add_((ro.argmax(dim=2)==si.argmax(2)).detach().float().sum(1)/n_attributes)
#crible_loss[:,i].add_(F.cross_entropy(receiver_output[:,i,:], sender_input.argmax(dim=1), reduction="none"))
for j in range(ro.size(1)):
#K=att_weights[j]
K=si[:,j,:].max(dim=1).values
crible_loss[:,i].add_(K*F.cross_entropy(ro[:,j,:], si[:,j,:].argmax(dim=1), reduction="none")/n_attributes)
acc=crible_acc*len_mask
loss=crible_loss*len_mask
acc = acc.sum(1)
loss= loss.sum(1)
return loss, {'acc': acc}, crible_acc
def dump(game, n_features, device, gs_mode, epoch):
# tiny "dataset"
dataset = [[torch.eye(n_features).to(device), None]]
sender_inputs, messages, receiver_inputs, receiver_outputs, _ = \
core.dump_sender_receiver(game, dataset, gs=gs_mode, device=device, variable_length=True)
unif_acc = 0.
powerlaw_acc = 0.
powerlaw_probs = 1 / np.arange(1, n_features+1, dtype=np.float32)
powerlaw_probs /= powerlaw_probs.sum()
acc_vec=np.zeros(n_features)
for sender_input, message, receiver_output in zip(sender_inputs, messages, receiver_outputs):
input_symbol = sender_input.argmax()
output_symbol = receiver_output.argmax()
acc = (input_symbol == output_symbol).float().item()
acc_vec[int(input_symbol)]=acc
unif_acc += acc
powerlaw_acc += powerlaw_probs[input_symbol] * acc
if epoch%50==0:
print(f'input: {input_symbol.item()} -> message: {",".join([str(x.item()) for x in message])} -> output: {output_symbol.item()}', flush=True)
unif_acc /= n_features
#print(f'Mean accuracy wrt uniform distribution is {unif_acc}')
#print(f'Mean accuracy wrt powerlaw distribution is {powerlaw_acc}')
print(json.dumps({'powerlaw': powerlaw_acc, 'unif': unif_acc}))
return acc_vec, messages
def dump_impatient(game, n_features, device, gs_mode,epoch):
# tiny "dataset"
dataset = [[torch.eye(n_features).to(device), None]]
sender_inputs, messages, receiver_inputs, receiver_outputs, _ = \
dump_sender_receiver_impatient(game, dataset, gs=gs_mode, device=device, variable_length=True)
unif_acc = 0.
powerlaw_acc = 0.
powerlaw_probs = 1 / np.arange(1, n_features+1, dtype=np.float32)
powerlaw_probs /= powerlaw_probs.sum()
acc_vec=np.zeros(n_features)
for sender_input, message, receiver_output in zip(sender_inputs, messages, receiver_outputs):
input_symbol = sender_input.argmax()
output_symbol = receiver_output.argmax()
acc = (input_symbol == output_symbol).float().item()
acc_vec[int(input_symbol)]=acc
unif_acc += acc
powerlaw_acc += powerlaw_probs[input_symbol] * acc
if epoch%25==0 or epoch>300:
print(f'input: {input_symbol.item()} -> message: {",".join([str(x.item()) for x in message])} -> output: {output_symbol.item()}', flush=True)
unif_acc /= n_features
#print(f'Mean accuracy wrt uniform distribution is {unif_acc}')
#print(f'Mean accuracy wrt powerlaw distribution is {powerlaw_acc}')
print(json.dumps({'powerlaw': powerlaw_acc, 'unif': unif_acc}))
return acc_vec, messages
def dump_compositionality(game, n_attributes, n_values, device, gs_mode,epoch):
# tiny "dataset"
one_hots = torch.eye(n_values)
val=np.arange(n_values)
combination=list(itertools.product(val,repeat=n_attributes))
dataset=[]
for i in range(len(combination)):
new_input=torch.zeros(0)
for j in combination[i]:
new_input=torch.cat((new_input,one_hots[j]))
dataset.append(new_input)
# ATTENTION ICI AJOUT JUSTE DES COMBINAISONS POUR DEUX ATTRIBUTES
#dataset.append(torch.cat((torch.zeros(n_values),torch.zeros(n_values))))
#combination.append((-1,-1))
#for j in range(n_values):
# new_input=torch.cat((torch.zeros(n_values),one_hots[j]))
# dataset.append(new_input)
# combination.append((-1,j))
#for j in range(n_values):
# new_input=torch.cat((one_hots[j],torch.zeros(n_values)))
# dataset.append(new_input)
# combination.append((j,-1))
dataset=torch.stack(dataset)
dataset=[[dataset,None]]
sender_inputs, messages, receiver_inputs, receiver_outputs, _ = \
dump_sender_receiver_compositionality(game, dataset, gs=gs_mode, device=device, variable_length=True)
unif_acc = 0.
acc_vec=np.zeros(((n_values**n_attributes), n_attributes))
for i in range(len(receiver_outputs)):
message=messages[i]
correct=True
if i<n_values**n_attributes:
for j in range(len(list(combination[i]))):
if receiver_outputs[i][j]==list(combination[i])[j]:
unif_acc+=1
acc_vec[i,j]=1
print(f'input: {",".join([str(x) for x in combination[i]])} -> message: {",".join([str(x.item()) for x in message])} -> output: {",".join([str(x) for x in receiver_outputs[i]])}', flush=True)
unif_acc /= (n_values**n_attributes) * n_attributes
print(json.dumps({'unif': unif_acc}))
return acc_vec, messages
def dump_impatient_compositionality(game, n_attributes, n_values, device, gs_mode,epoch):
# tiny "dataset"
one_hots = torch.eye(n_values)
val=np.arange(n_values)
combination=list(itertools.product(val,repeat=n_attributes))
dataset=[]
for i in range(len(combination)):
new_input=torch.zeros(0)
for j in combination[i]:
new_input=torch.cat((new_input,one_hots[j]))
dataset.append(new_input)
dataset=torch.stack(dataset)
dataset=[[dataset,None]]
sender_inputs, messages, receiver_inputs, receiver_outputs, _ = \
dump_sender_receiver_impatient_compositionality(game, dataset, gs=gs_mode, device=device, variable_length=True)
unif_acc = 0.
acc_vec=np.zeros(((n_values**n_attributes), n_attributes))
for i in range(len(receiver_outputs)):
message=messages[i]
correct=True
for j in range(len(list(combination[i]))):
if receiver_outputs[i][j]==list(combination[i])[j]:
unif_acc+=1
acc_vec[i,j]=1
if epoch%5==0:
print(f'input: {",".join([str(x) for x in combination[i]])} -> message: {",".join([str(x.item()) for x in message])} -> output: {",".join([str(x) for x in receiver_outputs[i]])}', flush=True)
unif_acc /= (n_values**n_attributes) * n_attributes
print(json.dumps({'unif': unif_acc}))
return acc_vec, messages
def main(params):
print(torch.cuda.is_available())
opts = get_params(params)
print(opts, flush=True)
device = opts.device
force_eos = opts.force_eos == 1
# Distribution of the inputs
if opts.probs=="uniform":
probs=[]
probs_by_att = np.ones(opts.n_values)
probs_by_att /= probs_by_att.sum()
for i in range(opts.n_attributes):
probs.append(probs_by_att)
if opts.probs=="entropy_test":
probs=[]
for i in range(opts.n_attributes):
probs_by_att = np.ones(opts.n_values)
probs_by_att[0]=1+(1*i)
probs_by_att /= probs_by_att.sum()
probs.append(probs_by_att)
if opts.probs_attributes=="uniform":
probs_attributes=[1]*opts.n_attributes
if opts.probs_attributes=="uniform_indep":
probs_attributes=[]
probs_attributes=[0.2]*opts.n_attributes
if opts.probs_attributes=="echelon":
probs_attributes=[]
for i in range(opts.n_attributes):
#probs_attributes.append(1.-(0.2)*i)
#probs_attributes.append(0.7+0.3/(i+1))
probs_attributes=[1.,0.95,0.9,0.85]
print("Probability by attribute is:",probs_attributes)
train_loader = OneHotLoaderCompositionality(n_values=opts.n_values, n_attributes=opts.n_attributes, batch_size=opts.batch_size*opts.n_attributes,
batches_per_epoch=opts.batches_per_epoch, probs=probs, probs_attributes=probs_attributes)
# single batches with 1s on the diag
test_loader = TestLoaderCompositionality(n_values=opts.n_values,n_attributes=opts.n_attributes)
### SENDER ###
sender = Sender(n_features=opts.n_attributes*opts.n_values, n_hidden=opts.sender_hidden)
sender = core.RnnSenderReinforce(sender,opts.vocab_size, opts.sender_embedding, opts.sender_hidden,
cell=opts.sender_cell, max_len=opts.max_len, num_layers=opts.sender_num_layers,
force_eos=force_eos)
### RECEIVER ###
receiver = Receiver(n_features=opts.n_values, n_hidden=opts.receiver_hidden)
if not opts.impatient:
receiver = Receiver(n_features=opts.n_features, n_hidden=opts.receiver_hidden)
receiver = RnnReceiverCompositionality(receiver, opts.vocab_size, opts.receiver_embedding,
opts.receiver_hidden, cell=opts.receiver_cell,
num_layers=opts.receiver_num_layers, max_len=opts.max_len, n_attributes=opts.n_attributes, n_values=opts.n_values)
else:
receiver = Receiver(n_features=opts.receiver_hidden, n_hidden=opts.vocab_size)
# If impatient 1
receiver = RnnReceiverImpatientCompositionality(receiver, opts.vocab_size, opts.receiver_embedding,
opts.receiver_hidden, cell=opts.receiver_cell,
num_layers=opts.receiver_num_layers, max_len=opts.max_len, n_attributes=opts.n_attributes, n_values=opts.n_values)
if not opts.impatient:
game = CompositionalitySenderReceiverRnnReinforce(sender, receiver, loss_compositionality, sender_entropy_coeff=opts.sender_entropy_coeff,
n_attributes=opts.n_attributes,n_values=opts.n_values,receiver_entropy_coeff=opts.receiver_entropy_coeff,
length_cost=opts.length_cost,unigram_penalty=opts.unigram_pen,reg=opts.reg)
else:
game = CompositionalitySenderImpatientReceiverRnnReinforce(sender, receiver, loss_impatient_compositionality, sender_entropy_coeff=opts.sender_entropy_coeff,
n_attributes=opts.n_attributes,n_values=opts.n_values,att_weights=opts.att_weights,receiver_entropy_coeff=opts.receiver_entropy_coeff,
length_cost=opts.length_cost,unigram_penalty=opts.unigram_pen,reg=opts.reg)
optimizer = core.build_optimizer(game.parameters())
trainer = CompoTrainer(n_attributes=opts.n_attributes,n_values=opts.n_values,game=game, optimizer=optimizer, train_data=train_loader,
validation_data=test_loader, callbacks=[EarlyStopperAccuracy(opts.early_stopping_thr)])
curr_accs=[0]*7
game.att_weights=[1]*(game.n_attributes)
for epoch in range(int(opts.n_epochs)):
print("Epoch: "+str(epoch))
#if epoch%100==0:
# trainer.optimizer.defaults["lr"]/=2
trainer.train(n_epochs=1)
if opts.checkpoint_dir:
trainer.save_checkpoint(name=f'{opts.name}_vocab{opts.vocab_size}_rs{opts.random_seed}_lr{opts.lr}_shid{opts.sender_hidden}_rhid{opts.receiver_hidden}_sentr{opts.sender_entropy_coeff}_reg{opts.length_cost}_max_len{opts.max_len}')
if not opts.impatient:
acc_vec,messages=dump_compositionality(trainer.game, opts.n_attributes, opts.n_values, device, False,epoch)
else:
acc_vec,messages=dump_impatient_compositionality(trainer.game, opts.n_attributes, opts.n_values, device, False,epoch)
print(acc_vec.mean(0))
#print(trainer.optimizer.defaults["lr"])
# ADDITION TO SAVE MESSAGES
all_messages=[]
for x in messages:
x = x.cpu().numpy()
all_messages.append(x)
all_messages = np.asarray(all_messages)
if epoch%50==0:
torch.save(sender.state_dict(), opts.dir_save+"/sender/sender_weights"+str(epoch)+".pth")
torch.save(receiver.state_dict(), opts.dir_save+"/receiver/receiver_weights"+str(epoch)+".pth")
np.save(opts.dir_save+'/messages/messages_'+str((epoch))+'.npy', all_messages)
np.save(opts.dir_save+'/accuracy/accuracy_'+str((epoch))+'.npy', acc_vec)
print(acc_vec.T)
core.close()
if __name__ == "__main__":
import sys
main(sys.argv[1:])
|
[
"argparse.ArgumentParser",
"numpy.ones",
"json.dumps",
"numpy.arange",
"egg.zoo.channel.features.OneHotLoaderCompositionality",
"egg.core.reinforce_wrappers.RnnReceiverCompositionality",
"egg.core.reinforce_wrappers.CompositionalitySenderImpatientReceiverRnnReinforce",
"egg.core.util.dump_sender_receiver_impatient",
"egg.core.reinforce_wrappers.CompositionalitySenderReceiverRnnReinforce",
"egg.core.RnnSenderReinforce",
"egg.core.reinforce_wrappers.RnnReceiverImpatientCompositionality",
"itertools.product",
"egg.core.init",
"numpy.asarray",
"egg.core.util.dump_sender_receiver_impatient_compositionality",
"egg.core.close",
"egg.core.EarlyStopperAccuracy",
"egg.zoo.channel.archs.Receiver",
"egg.core.util.dump_sender_receiver_compositionality",
"egg.zoo.channel.features.TestLoaderCompositionality",
"numpy.zeros",
"egg.zoo.channel.archs.Sender",
"egg.core.dump_sender_receiver"
] |
[((1154, 1179), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1177, 1179), False, 'import argparse\n'), ((6035, 6060), 'egg.core.init', 'core.init', (['parser', 'params'], {}), '(parser, params)\n', (6044, 6060), True, 'import egg.core as core\n'), ((10483, 10576), 'egg.core.dump_sender_receiver', 'core.dump_sender_receiver', (['game', 'dataset'], {'gs': 'gs_mode', 'device': 'device', 'variable_length': '(True)'}), '(game, dataset, gs=gs_mode, device=device,\n variable_length=True)\n', (10508, 10576), True, 'import egg.core as core\n'), ((10740, 10760), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (10748, 10760), True, 'import numpy as np\n'), ((11804, 11902), 'egg.core.util.dump_sender_receiver_impatient', 'dump_sender_receiver_impatient', (['game', 'dataset'], {'gs': 'gs_mode', 'device': 'device', 'variable_length': '(True)'}), '(game, dataset, gs=gs_mode, device=device,\n variable_length=True)\n', (11834, 11902), False, 'from egg.core.util import dump_sender_receiver_impatient, dump_sender_receiver_impatient_compositionality, dump_sender_receiver_compositionality\n'), ((12066, 12086), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (12074, 12086), True, 'import numpy as np\n'), ((13070, 13089), 'numpy.arange', 'np.arange', (['n_values'], {}), '(n_values)\n', (13079, 13089), True, 'import numpy as np\n'), ((14009, 14115), 'egg.core.util.dump_sender_receiver_compositionality', 'dump_sender_receiver_compositionality', (['game', 'dataset'], {'gs': 'gs_mode', 'device': 'device', 'variable_length': '(True)'}), '(game, dataset, gs=gs_mode, device=\n device, variable_length=True)\n', (14046, 14115), False, 'from egg.core.util import dump_sender_receiver_impatient, dump_sender_receiver_impatient_compositionality, dump_sender_receiver_compositionality\n'), ((14142, 14192), 'numpy.zeros', 'np.zeros', (['(n_values ** n_attributes, n_attributes)'], {}), '((n_values ** n_attributes, n_attributes))\n', (14150, 14192), True, 'import numpy as np\n'), ((14973, 14992), 'numpy.arange', 'np.arange', (['n_values'], {}), '(n_values)\n', (14982, 14992), True, 'import numpy as np\n'), ((15403, 15518), 'egg.core.util.dump_sender_receiver_impatient_compositionality', 'dump_sender_receiver_impatient_compositionality', (['game', 'dataset'], {'gs': 'gs_mode', 'device': 'device', 'variable_length': '(True)'}), '(game, dataset, gs=gs_mode,\n device=device, variable_length=True)\n', (15450, 15518), False, 'from egg.core.util import dump_sender_receiver_impatient, dump_sender_receiver_impatient_compositionality, dump_sender_receiver_compositionality\n'), ((15546, 15596), 'numpy.zeros', 'np.zeros', (['(n_values ** n_attributes, n_attributes)'], {}), '((n_values ** n_attributes, n_attributes))\n', (15554, 15596), True, 'import numpy as np\n'), ((17447, 17683), 'egg.zoo.channel.features.OneHotLoaderCompositionality', 'OneHotLoaderCompositionality', ([], {'n_values': 'opts.n_values', 'n_attributes': 'opts.n_attributes', 'batch_size': '(opts.batch_size * opts.n_attributes)', 'batches_per_epoch': 'opts.batches_per_epoch', 'probs': 'probs', 'probs_attributes': 'probs_attributes'}), '(n_values=opts.n_values, n_attributes=opts.\n n_attributes, batch_size=opts.batch_size * opts.n_attributes,\n batches_per_epoch=opts.batches_per_epoch, probs=probs, probs_attributes\n =probs_attributes)\n', (17475, 17683), False, 'from egg.zoo.channel.features import OneHotLoader, UniformLoader, OneHotLoaderCompositionality, TestLoaderCompositionality\n'), ((17776, 17863), 'egg.zoo.channel.features.TestLoaderCompositionality', 'TestLoaderCompositionality', ([], {'n_values': 'opts.n_values', 'n_attributes': 'opts.n_attributes'}), '(n_values=opts.n_values, n_attributes=opts.\n n_attributes)\n', (17802, 17863), False, 'from egg.zoo.channel.features import OneHotLoader, UniformLoader, OneHotLoaderCompositionality, TestLoaderCompositionality\n'), ((17892, 17978), 'egg.zoo.channel.archs.Sender', 'Sender', ([], {'n_features': '(opts.n_attributes * opts.n_values)', 'n_hidden': 'opts.sender_hidden'}), '(n_features=opts.n_attributes * opts.n_values, n_hidden=opts.\n sender_hidden)\n', (17898, 17978), False, 'from egg.zoo.channel.archs import Sender, Receiver\n'), ((17986, 18186), 'egg.core.RnnSenderReinforce', 'core.RnnSenderReinforce', (['sender', 'opts.vocab_size', 'opts.sender_embedding', 'opts.sender_hidden'], {'cell': 'opts.sender_cell', 'max_len': 'opts.max_len', 'num_layers': 'opts.sender_num_layers', 'force_eos': 'force_eos'}), '(sender, opts.vocab_size, opts.sender_embedding,\n opts.sender_hidden, cell=opts.sender_cell, max_len=opts.max_len,\n num_layers=opts.sender_num_layers, force_eos=force_eos)\n', (18009, 18186), True, 'import egg.core as core\n'), ((18287, 18352), 'egg.zoo.channel.archs.Receiver', 'Receiver', ([], {'n_features': 'opts.n_values', 'n_hidden': 'opts.receiver_hidden'}), '(n_features=opts.n_values, n_hidden=opts.receiver_hidden)\n', (18295, 18352), False, 'from egg.zoo.channel.archs import Sender, Receiver\n'), ((22074, 22086), 'egg.core.close', 'core.close', ([], {}), '()\n', (22084, 22086), True, 'import egg.core as core\n'), ((10639, 10685), 'numpy.arange', 'np.arange', (['(1)', '(n_features + 1)'], {'dtype': 'np.float32'}), '(1, n_features + 1, dtype=np.float32)\n', (10648, 10685), True, 'import numpy as np\n'), ((11497, 11553), 'json.dumps', 'json.dumps', (["{'powerlaw': powerlaw_acc, 'unif': unif_acc}"], {}), "({'powerlaw': powerlaw_acc, 'unif': unif_acc})\n", (11507, 11553), False, 'import json\n'), ((11965, 12011), 'numpy.arange', 'np.arange', (['(1)', '(n_features + 1)'], {'dtype': 'np.float32'}), '(1, n_features + 1, dtype=np.float32)\n', (11974, 12011), True, 'import numpy as np\n'), ((12836, 12892), 'json.dumps', 'json.dumps', (["{'powerlaw': powerlaw_acc, 'unif': unif_acc}"], {}), "({'powerlaw': powerlaw_acc, 'unif': unif_acc})\n", (12846, 12892), False, 'import json\n'), ((13111, 13154), 'itertools.product', 'itertools.product', (['val'], {'repeat': 'n_attributes'}), '(val, repeat=n_attributes)\n', (13128, 13154), False, 'import itertools\n'), ((14755, 14785), 'json.dumps', 'json.dumps', (["{'unif': unif_acc}"], {}), "({'unif': unif_acc})\n", (14765, 14785), False, 'import json\n'), ((15014, 15057), 'itertools.product', 'itertools.product', (['val'], {'repeat': 'n_attributes'}), '(val, repeat=n_attributes)\n', (15031, 15057), False, 'import itertools\n'), ((16133, 16163), 'json.dumps', 'json.dumps', (["{'unif': unif_acc}"], {}), "({'unif': unif_acc})\n", (16143, 16163), False, 'import json\n'), ((16475, 16497), 'numpy.ones', 'np.ones', (['opts.n_values'], {}), '(opts.n_values)\n', (16482, 16497), True, 'import numpy as np\n'), ((18400, 18467), 'egg.zoo.channel.archs.Receiver', 'Receiver', ([], {'n_features': 'opts.n_features', 'n_hidden': 'opts.receiver_hidden'}), '(n_features=opts.n_features, n_hidden=opts.receiver_hidden)\n', (18408, 18467), False, 'from egg.zoo.channel.archs import Sender, Receiver\n'), ((18487, 18742), 'egg.core.reinforce_wrappers.RnnReceiverCompositionality', 'RnnReceiverCompositionality', (['receiver', 'opts.vocab_size', 'opts.receiver_embedding', 'opts.receiver_hidden'], {'cell': 'opts.receiver_cell', 'num_layers': 'opts.receiver_num_layers', 'max_len': 'opts.max_len', 'n_attributes': 'opts.n_attributes', 'n_values': 'opts.n_values'}), '(receiver, opts.vocab_size, opts.\n receiver_embedding, opts.receiver_hidden, cell=opts.receiver_cell,\n num_layers=opts.receiver_num_layers, max_len=opts.max_len, n_attributes\n =opts.n_attributes, n_values=opts.n_values)\n', (18514, 18742), False, 'from egg.core.reinforce_wrappers import RnnReceiverImpatient, RnnReceiverImpatientCompositionality, RnnReceiverCompositionality\n'), ((18846, 18913), 'egg.zoo.channel.archs.Receiver', 'Receiver', ([], {'n_features': 'opts.receiver_hidden', 'n_hidden': 'opts.vocab_size'}), '(n_features=opts.receiver_hidden, n_hidden=opts.vocab_size)\n', (18854, 18913), False, 'from egg.zoo.channel.archs import Sender, Receiver\n'), ((18958, 19222), 'egg.core.reinforce_wrappers.RnnReceiverImpatientCompositionality', 'RnnReceiverImpatientCompositionality', (['receiver', 'opts.vocab_size', 'opts.receiver_embedding', 'opts.receiver_hidden'], {'cell': 'opts.receiver_cell', 'num_layers': 'opts.receiver_num_layers', 'max_len': 'opts.max_len', 'n_attributes': 'opts.n_attributes', 'n_values': 'opts.n_values'}), '(receiver, opts.vocab_size, opts.\n receiver_embedding, opts.receiver_hidden, cell=opts.receiver_cell,\n num_layers=opts.receiver_num_layers, max_len=opts.max_len, n_attributes\n =opts.n_attributes, n_values=opts.n_values)\n', (18994, 19222), False, 'from egg.core.reinforce_wrappers import RnnReceiverImpatient, RnnReceiverImpatientCompositionality, RnnReceiverCompositionality\n'), ((19341, 19675), 'egg.core.reinforce_wrappers.CompositionalitySenderReceiverRnnReinforce', 'CompositionalitySenderReceiverRnnReinforce', (['sender', 'receiver', 'loss_compositionality'], {'sender_entropy_coeff': 'opts.sender_entropy_coeff', 'n_attributes': 'opts.n_attributes', 'n_values': 'opts.n_values', 'receiver_entropy_coeff': 'opts.receiver_entropy_coeff', 'length_cost': 'opts.length_cost', 'unigram_penalty': 'opts.unigram_pen', 'reg': 'opts.reg'}), '(sender, receiver,\n loss_compositionality, sender_entropy_coeff=opts.sender_entropy_coeff,\n n_attributes=opts.n_attributes, n_values=opts.n_values,\n receiver_entropy_coeff=opts.receiver_entropy_coeff, length_cost=opts.\n length_cost, unigram_penalty=opts.unigram_pen, reg=opts.reg)\n', (19383, 19675), False, 'from egg.core.reinforce_wrappers import SenderImpatientReceiverRnnReinforce, CompositionalitySenderImpatientReceiverRnnReinforce, CompositionalitySenderReceiverRnnReinforce\n'), ((19766, 20156), 'egg.core.reinforce_wrappers.CompositionalitySenderImpatientReceiverRnnReinforce', 'CompositionalitySenderImpatientReceiverRnnReinforce', (['sender', 'receiver', 'loss_impatient_compositionality'], {'sender_entropy_coeff': 'opts.sender_entropy_coeff', 'n_attributes': 'opts.n_attributes', 'n_values': 'opts.n_values', 'att_weights': 'opts.att_weights', 'receiver_entropy_coeff': 'opts.receiver_entropy_coeff', 'length_cost': 'opts.length_cost', 'unigram_penalty': 'opts.unigram_pen', 'reg': 'opts.reg'}), '(sender, receiver,\n loss_impatient_compositionality, sender_entropy_coeff=opts.\n sender_entropy_coeff, n_attributes=opts.n_attributes, n_values=opts.\n n_values, att_weights=opts.att_weights, receiver_entropy_coeff=opts.\n receiver_entropy_coeff, length_cost=opts.length_cost, unigram_penalty=\n opts.unigram_pen, reg=opts.reg)\n', (19817, 20156), False, 'from egg.core.reinforce_wrappers import SenderImpatientReceiverRnnReinforce, CompositionalitySenderImpatientReceiverRnnReinforce, CompositionalitySenderReceiverRnnReinforce\n'), ((21614, 21638), 'numpy.asarray', 'np.asarray', (['all_messages'], {}), '(all_messages)\n', (21624, 21638), True, 'import numpy as np\n'), ((16746, 16768), 'numpy.ones', 'np.ones', (['opts.n_values'], {}), '(opts.n_values)\n', (16753, 16768), True, 'import numpy as np\n'), ((20477, 20522), 'egg.core.EarlyStopperAccuracy', 'EarlyStopperAccuracy', (['opts.early_stopping_thr'], {}), '(opts.early_stopping_thr)\n', (20497, 20522), False, 'from egg.core import EarlyStopperAccuracy\n')]
|
#!/usr/bin/env python
"""
determine whether bitboard, piece-list coordinate or mailbox is best
bitboard (12, 8, 8): third order tensor
bitboard_vector (768): flattened third order tensor
small_bitboard_vector (384): with -1.0 values for black
piece_list (384): normalised rank, file, 8-rank and file for pieces in list
small_piece_list (192): normalised rank and file for pieces in list
mailbox (8, 8): normalised type of piece in its position in an 8 * 8 board
"""
import chess
import numpy as np
def fen_to_3D_bitboard(fen):
'convert fen to a 12 by 8 * 8 bitboard'
pos = chess.Board(fen)
bitboard = np.zeros((12, 8, 8))
# over all squares, get piece type and number
for r in range(8):
for f in range(8):
square = 8*r + f
index = pos.piece_type_at(square)
# if piece exists in square, if white, increment plane by 6
if index is not None:
piece = pos.piece_at(square).symbol()
offset = 0 if piece.istitle() else 6
bitboard[index + offset - 1, r, f] = 1.0
return bitboard
def fen_to_bitboard_vector(fen):
'convert fen to a 12 * 8 * 8 = 768 bitboard'
pos = chess.Board(fen)
bitboard = np.zeros((12, 8, 8))
# over all squares, get piece type and number
for r in range(8):
for f in range(8):
square = 8*r + f
index = pos.piece_type_at(square)
# if piece exists in square, if white, increment plane by 6
if index is not None:
piece = pos.piece_at(square).symbol()
offset = 0 if piece.istitle() else 6
bitboard[index + offset - 1, r, f] = 1.0
return bitboard.flatten()
def fen_to_small_bitboard_vector(fen):
'convert fen to a 6 * 8 * 8 = 384 bitboard'
pos = chess.Board(fen)
bitboard = np.zeros((6, 8, 8))
# over all squares, get piece type and number
for r in range(8):
for f in range(8):
square = 8*r + f
index = pos.piece_type_at(square)
# if piece exists in square, if white, increment else decrement
if index is not None:
piece = pos.piece_at(square).symbol()
player += 1.0 if piece.istitle() else -1.0
bitboard[index-1, r, f] = player
return bitboard.flatten()
def fen_to_piece_list(fen):
'convert fen to 384 piece list of coordinates'
pos = chess.Board(fen)
pieceList = np.zeros(384)
# {white, black} 8*Pawn, 10*Knight, 10*Bishop, 10*Rook, 9*Queen, 1*King
pieces = ['p', 'n', 'b', 'r', 'q', 'k', 'P', 'N', 'B', 'R', 'Q', 'K']
count = [8, 10, 10, 10, 9, 1, 8, 10, 10, 10, 9, 1]
# create {piece -> vectorposition} dictionary
vectorPosition = [4 * sum(count[:i]) for i in range(len(count))]
pieceIndex = dict(zip(pieces, vectorPosition))
# create pieceList and type
for f in range(8):
for r in range(8):
square = pos.piece_at(8*f + r)
# if piece exists in square, find first available index by adding 4
if square is not None:
index = pieceIndex[square.symbol()]
while pieceList[index] != 0.0:
index += 4
# set pieceList index to coordinate
pieceList[index] = (r+1.0) / 8.0
pieceList[index+1] = (8.0-r) / 8.0
pieceList[index+2] = (f+1.0) / 8.0
pieceList[index+3] = (8.0-f) / 8.0
return pieceList
def fen_to_small_piece_list(fen):
'convert fen to 192 piece list of coordinates'
pos = chess.Board(fen)
pieceList = np.zeros(192)
# {white, black} 8*Pawn, 10*Knight, 10*Bishop, 10*Rook, 9*Queen, 1*King
pieces = ['p', 'n', 'b', 'r', 'q', 'k', 'P', 'N', 'B', 'R', 'Q', 'K']
count = [8, 10, 10, 10, 9, 1, 8, 10, 10, 10, 9, 1]
# create {piece -> vectorposition} dictionary
vectorPosition = [2 * sum(count[:i]) for i in range(len(count))]
pieceIndex = dict(zip(pieces, vectorPosition))
# create pieceList and type
for f in range(8):
for r in range(8):
square = pos.piece_at(8*f + r)
# if piece exists in square, find first available index by adding 4
if square is not None:
index = pieceIndex[square.symbol()]
while pieceList[index] != 0.0:
index += 2
# set pieceList index to coordinate
pieceList[index] = (r+1.0) / 8.0
pieceList[index+1] = (f+1.0) / 8.0
return pieceList
def fen_to_mailbox(fen):
'convert fen to 8 * 8 matrix'
pos = chess.Board(fen)
bitboard = np.zeros((2, 8, 8))
# over all squares, get piece type and number
for r in range(8):
for f in range(8):
square = 8*r + f
index = pos.piece_type_at(square)
# if piece exists in square, if white, increment plane by 6
if index is not None:
piece = pos.piece_at(square).symbol()
offset = 0.0 if piece.istitle() else 6.0
encoding = (offset + index) / 12.0
bitboard[0, r, f] = encoding
bitboard[1, r, f] = 1.0 - encoding
return bitboard
def fen_to_mailbox_flat(fen):
'convert fen to 8 * 8 matrix'
pos = chess.Board(fen)
bitboard = np.zeros(2 * 8 * 8)
# over all squares, get piece type and number
for r in range(8):
for f in range(8):
square = 8*r + f
index = pos.piece_type_at(square)
# if piece exists in square, if white, increment plane by 6
if index is not None:
piece = pos.piece_at(square).symbol()
offset = 0.0 if piece.istitle() else 6.0
encoding = (offset + index) / 12.0
bitboard[2*square] = encoding
bitboard[2*square+1] = 1.0 - encoding
return bitboard
if __name__ == "__main__":
print(fen_to_bitboard_vector(chess.Board().fen()))
# print(fen_to_small_bitboard_vector(chess.Board().fen()))
# print(fen_to_piece_list(chess.Board().fen()))
# print(fen_to_small_piece_list(chess.Board().fen()))
# print(fen_to_mailbox(chess.Board().fen()))
# print(fen_to_mailbox_flat(chess.Board().fen()))
|
[
"chess.Board",
"numpy.zeros"
] |
[((587, 603), 'chess.Board', 'chess.Board', (['fen'], {}), '(fen)\n', (598, 603), False, 'import chess\n'), ((619, 639), 'numpy.zeros', 'np.zeros', (['(12, 8, 8)'], {}), '((12, 8, 8))\n', (627, 639), True, 'import numpy as np\n'), ((1201, 1217), 'chess.Board', 'chess.Board', (['fen'], {}), '(fen)\n', (1212, 1217), False, 'import chess\n'), ((1233, 1253), 'numpy.zeros', 'np.zeros', (['(12, 8, 8)'], {}), '((12, 8, 8))\n', (1241, 1253), True, 'import numpy as np\n'), ((1830, 1846), 'chess.Board', 'chess.Board', (['fen'], {}), '(fen)\n', (1841, 1846), False, 'import chess\n'), ((1862, 1881), 'numpy.zeros', 'np.zeros', (['(6, 8, 8)'], {}), '((6, 8, 8))\n', (1870, 1881), True, 'import numpy as np\n'), ((2452, 2468), 'chess.Board', 'chess.Board', (['fen'], {}), '(fen)\n', (2463, 2468), False, 'import chess\n'), ((2485, 2498), 'numpy.zeros', 'np.zeros', (['(384)'], {}), '(384)\n', (2493, 2498), True, 'import numpy as np\n'), ((3621, 3637), 'chess.Board', 'chess.Board', (['fen'], {}), '(fen)\n', (3632, 3637), False, 'import chess\n'), ((3654, 3667), 'numpy.zeros', 'np.zeros', (['(192)'], {}), '(192)\n', (3662, 3667), True, 'import numpy as np\n'), ((4662, 4678), 'chess.Board', 'chess.Board', (['fen'], {}), '(fen)\n', (4673, 4678), False, 'import chess\n'), ((4694, 4713), 'numpy.zeros', 'np.zeros', (['(2, 8, 8)'], {}), '((2, 8, 8))\n', (4702, 4713), True, 'import numpy as np\n'), ((5351, 5367), 'chess.Board', 'chess.Board', (['fen'], {}), '(fen)\n', (5362, 5367), False, 'import chess\n'), ((5383, 5402), 'numpy.zeros', 'np.zeros', (['(2 * 8 * 8)'], {}), '(2 * 8 * 8)\n', (5391, 5402), True, 'import numpy as np\n'), ((6030, 6043), 'chess.Board', 'chess.Board', ([], {}), '()\n', (6041, 6043), False, 'import chess\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This software is under a BSD license. See LICENSE.txt for details.
import numpy as np
def _times_considered_same(t1, t2):
"""docstring for _times_considered_same"""
return abs(t1 - t2) <= 0.000001 * (t1 + t2)
class DTSeries(object):
"""Base class for series support.
In general, you shouldn't need to use this class; it's only provided for
symmetry with DTSource, and to be used by DTSeriesGroup. However, it
may also be useful for non-group objects in future.
"""
def __init__(self, datafile, series_name, series_type):
"""
:param datafile: an empty :class:`datatank_py.DTDataFile.DTDataFile` instance
:param series_name: the name of the series variable
:param series_type: the type of the series variable
The name will typically be "Var", and the type will be whatever is the
base type stored, such as "Group" for a group object.
"""
super(DTSeries, self).__init__()
self._name = series_name
self._time_values = []
self._datafile = datafile
# appending and reading is too much work at this point
assert len(datafile.variable_names()) == 0, "an empty data file is required"
# add series type descriptor
datafile.write_anonymous(series_type, "Seq_" + series_name)
def datafile(self):
""":returns: the :class:`datatank_py.DTDataFile.DTDataFile` instance used for storage"""
return self._datafile
def savecount(self):
""":returns: the number of time values stored"""
return len(self.time_values())
def basename(self):
""":returns: name of the form 'name_N' where N is the result of :meth:savecount"""
return "%s_%d" % (self._name, self.savecount() - 1)
def time_values(self):
""":returns: vector of time values stored"""
return self._time_values
def last_time(self):
""":returns: last time value stored or ``None`` if no values are stored"""
return self.time_values()[-1] if self.savecount() else None
def shared_save(self, time):
"""
:param time: time value to store to disk
Saves the current time value and an appropriate variable name to
disk.
"""
# DTSource logs error and returns false here; assert since these are really
# programmer errors in our case.
assert time >= 0, "time must not be negative"
if len(self.time_values()):
assert time > self.last_time(), "time must be strictly increasing"
if self.last_time() >= 0:
assert _times_considered_same(time, self.last_time()) == False, "time values too close together"
self._time_values.append(time)
self._datafile.write_anonymous(time, self.basename() + "_time")
class DTSeriesGroup(DTSeries):
"""Base series group class."""
def __init__(self, datafile, name, name_to_type):
"""
:param datafile: an empty :class:`datatank_py.DTDataFile.DTDataFile` instance
:param name: the name of the group
:param name_to_type: a dictionary mapping variable names to DataTank types
This ``name_to_type`` dictionary defines the structure of the group::
{ "My Output Array":"Array", "My Scalar Value":"Real Number" }
You can look up the DataTank type names in its PDF help manual, or for
compound objects supported in :mod:`datatank_py`, you can use something
like::
from datatank_py.DTMesh2D import DTMesh2D
from datatank_py.DTPointCollection2D import DTPointCollection2D
{ "My 2D Mesh":DTMesh2D.dt_type[0], "My Points":DTPointCollection2D.dt_type[0] }
"""
super(DTSeriesGroup, self).__init__(datafile, name, "Group")
# save for sanity checking
self._names = set(name_to_type.keys())
basename = "SeqInfo_" + name
# WriteStructure equivalent; unordered in this case
idx = 1
for varname in name_to_type:
datafile.write_anonymous(varname, "%s_%dN" % (basename, idx))
datafile.write_anonymous(name_to_type[varname], "%s_%dT" % (basename, idx))
idx += 1
datafile.write_anonymous(len(name_to_type), basename + "_N")
datafile.write_anonymous("Group", basename)
def add(self, time, values):
"""Add a dictionary of values.
:param time: the time value represented by these values
:param values: dictionary mapping variable name to value
When adding to the group, all variables must be present, or an exception
will be raised. The caller is responsible for ensuring that value types
must be consistent with the expected data. Compound types (e.g., 2D Mesh)
are supported via wrapper objects that implement the dt_write protocol.
See DTDataFile documentation for more details.
Example::
group.add(idx / 10., { "Output Mesh":DTMesh2D(mesh, grid=grid), "Output Index":idx })
"""
assert self._names == set(values.keys()), "inconsistent variable names"
# DTSeries::SharedSave
self.shared_save(time)
# DTRetGroup::Write
for name in values:
self.datafile().write_anonymous(values[name], "%s_%s" % (self.basename(), name))
# expose the variable for DT
self.datafile().write_anonymous(np.array([], dtype=np.float64), self.basename())
|
[
"numpy.array"
] |
[((5816, 5846), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (5824, 5846), True, 'import numpy as np\n')]
|
import numpy as np
import signal_tl as stl
a = stl.Predicate("a") > 0
b = stl.Predicate("b") <= 0.5
phi = stl.Until(a, b)
t = np.linspace(0, 50, 201)
x = np.cos(t)
y = np.sin(t)
trace = {
"a": stl.Signal(x, t),
"b": stl.Signal(x, t),
}
rob = stl.compute_robustness(phi, trace)
print(rob.at(0))
|
[
"signal_tl.Predicate",
"signal_tl.Signal",
"numpy.sin",
"signal_tl.Until",
"numpy.cos",
"numpy.linspace",
"signal_tl.compute_robustness"
] |
[((109, 124), 'signal_tl.Until', 'stl.Until', (['a', 'b'], {}), '(a, b)\n', (118, 124), True, 'import signal_tl as stl\n'), ((130, 153), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(201)'], {}), '(0, 50, 201)\n', (141, 153), True, 'import numpy as np\n'), ((158, 167), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (164, 167), True, 'import numpy as np\n'), ((172, 181), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (178, 181), True, 'import numpy as np\n'), ((256, 290), 'signal_tl.compute_robustness', 'stl.compute_robustness', (['phi', 'trace'], {}), '(phi, trace)\n', (278, 290), True, 'import signal_tl as stl\n'), ((49, 67), 'signal_tl.Predicate', 'stl.Predicate', (['"""a"""'], {}), "('a')\n", (62, 67), True, 'import signal_tl as stl\n'), ((76, 94), 'signal_tl.Predicate', 'stl.Predicate', (['"""b"""'], {}), "('b')\n", (89, 94), True, 'import signal_tl as stl\n'), ((202, 218), 'signal_tl.Signal', 'stl.Signal', (['x', 't'], {}), '(x, t)\n', (212, 218), True, 'import signal_tl as stl\n'), ((229, 245), 'signal_tl.Signal', 'stl.Signal', (['x', 't'], {}), '(x, t)\n', (239, 245), True, 'import signal_tl as stl\n')]
|
"""
File: cartpole.py
Created: 2017-03-06
By <NAME>, <EMAIL>
Description:
-- Python 3.6 --
Solve the CartPole-v0 problem:
- observed features are expanded with a random transform to ensure linear separability.
- action selection is by dot product of an expanded observation with a weight vector.
- a queued history of recent observations is shuffled and replayed to update the output weights.
- output weights are updated at the end of each incomplete episode by Widrow-Hoff LMS update.
- the target outputs for the LMS algorithm are the means of the past outputs.
- output weights are maintained at a fixed norm for regularization.
"""
import gym
from gym import wrappers
from numpy import *
from numpy.random import uniform,normal
from numpy.linalg import norm
from random import shuffle
from collections import deque
from statistics import mean
env = gym.make('CartPole-v0')
# env = wrappers.Monitor(env, '../experiments/cartpole-experiment-1')
#------------------------------------------------------------------
# Hyperparameters
alpha = 1.0e-1 # the 'learning rate'
maxEpisodes = 1000 # run the agent for 'maxEpisodes'
maxTimeSteps = 500 # maximum number of steps per episode
fixedNorm = 0.5 # output weights are scaled to have norm == 'fixedNorm'
maxHistory = 2500 # maximum number of recent observations for replay
solvedEpisodes = 100 # cartpole is solved when average reward > 195 for 'solvedEpisodes'
#------------------------------------------------------------------
# Observations Transform
inputLength = 4 # length of an observation vector
expansionFactor = 30 # expand observation dimensions by 'expansionFactor'
expandedLength = expansionFactor*inputLength # length of transformed observations
# Feature transform with fixed random weights.
V = normal(scale=1.0, size=(expandedLength, inputLength))
# Output weights, randomly initialized.
W = uniform(low=-1.0, high=1.0, size=expandedLength)
# Fix the norm of the output weights to 'fixedNorm'.
W *= fixedNorm/norm(W)
#------------------------------------------------------------------
def CartPoleAgent(alpha, W, V):
"""
CartPoleAgent solves 'CartPole-v0'.
"""
#--------------------------------------------------
# observation history
H = deque([], maxHistory)
# episode total reward history
R = deque([], solvedEpisodes)
# histories of positive and negative outputs
PO = deque([0], maxHistory)
NO = deque([0], maxHistory)
#--------------------------------------------------
for episode in range(maxEpisodes):
observation = env.reset()
H.append(observation)
totalReward = 0
for t in range(1,maxTimeSteps+1):
env.render()
#--------------------------------------------------
out = dot(tanh(dot(V,observation)), W)
if out < 0:
NO.append(out)
action = 0
else:
PO.append(out)
action = 1
#--------------------------------------------------
observation, reward, done, info = env.step(action)
H.append(observation)
totalReward += reward
#--------------------------------------------------
if done:
R.append(totalReward)
if t < 200:
#------------------------------------------
# Replay shuffled past observations using the
# latest weights.
# Use the means of past outputs as
# LMS algorithm target outputs.
#------------------------------------------
mn = mean(NO)
mp = mean(PO)
shuffle(H)
for obs in H:
h = tanh(dot(V,obs)) # transform the observation
out = dot(h, W)
if out < 0:
e = mn - out
else:
e = mp - out
W += alpha * e * h # Widrow-Hoff LMS update
W *= fixedNorm/norm(W) # keep the weights at fixed norm
#------------------------------------------
#--------------------------------------------------
avgReward = sum(R)/solvedEpisodes
print(f"[{episode:3d}:{totalReward:3.0f}] R:{avgReward:6.2f} mp:{mean(PO):7.3f} mn:{mean(NO):7.3f} len(H):{len(H):4d} W:{W[:2]}", flush=True)
#--------------------------------------------------
if avgReward == 200:
print("Solved.")
return
#--------------------------------------------------
break
#------------------------------------------------------------------
#------------------------------------------------------------------
CartPoleAgent(alpha, W, V)
env.close()
|
[
"numpy.random.uniform",
"gym.make",
"random.shuffle",
"numpy.linalg.norm",
"statistics.mean",
"numpy.random.normal",
"collections.deque"
] |
[((862, 885), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (870, 885), False, 'import gym\n'), ((1855, 1908), 'numpy.random.normal', 'normal', ([], {'scale': '(1.0)', 'size': '(expandedLength, inputLength)'}), '(scale=1.0, size=(expandedLength, inputLength))\n', (1861, 1908), False, 'from numpy.random import uniform, normal\n'), ((1954, 2002), 'numpy.random.uniform', 'uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': 'expandedLength'}), '(low=-1.0, high=1.0, size=expandedLength)\n', (1961, 2002), False, 'from numpy.random import uniform, normal\n'), ((2071, 2078), 'numpy.linalg.norm', 'norm', (['W'], {}), '(W)\n', (2075, 2078), False, 'from numpy.linalg import norm\n'), ((2327, 2348), 'collections.deque', 'deque', (['[]', 'maxHistory'], {}), '([], maxHistory)\n', (2332, 2348), False, 'from collections import deque\n'), ((2392, 2417), 'collections.deque', 'deque', (['[]', 'solvedEpisodes'], {}), '([], solvedEpisodes)\n', (2397, 2417), False, 'from collections import deque\n'), ((2476, 2498), 'collections.deque', 'deque', (['[0]', 'maxHistory'], {}), '([0], maxHistory)\n', (2481, 2498), False, 'from collections import deque\n'), ((2508, 2530), 'collections.deque', 'deque', (['[0]', 'maxHistory'], {}), '([0], maxHistory)\n', (2513, 2530), False, 'from collections import deque\n'), ((3766, 3774), 'statistics.mean', 'mean', (['NO'], {}), '(NO)\n', (3770, 3774), False, 'from statistics import mean\n'), ((3811, 3819), 'statistics.mean', 'mean', (['PO'], {}), '(PO)\n', (3815, 3819), False, 'from statistics import mean\n'), ((3840, 3850), 'random.shuffle', 'shuffle', (['H'], {}), '(H)\n', (3847, 3850), False, 'from random import shuffle\n'), ((4268, 4275), 'numpy.linalg.norm', 'norm', (['W'], {}), '(W)\n', (4272, 4275), False, 'from numpy.linalg import norm\n'), ((4577, 4585), 'statistics.mean', 'mean', (['PO'], {}), '(PO)\n', (4581, 4585), False, 'from statistics import mean\n'), ((4596, 4604), 'statistics.mean', 'mean', (['NO'], {}), '(NO)\n', (4600, 4604), False, 'from statistics import mean\n')]
|
import numpy as np
from lumi_language_id import LanguageIdentifier, data_file
class MultiLayerPerceptron:
"""
A simple implementation of an MLP classifier. This implementation has no training code, but
can be used to run a 'frozen' classifier that was trained by scikit-learn.
"""
def __init__(self, coefs, intercepts):
self.coefs = coefs
self.intercepts = intercepts
def forward(self, row):
"""
Propagate a set of input features (`row`) through the MLP classifier, and get the output
features.
We assume here that all layers but the last one are ReLU layers, and the last one is
a sigmoid layer.
"""
for layer_num, (coefs_layer, intercepts_layer) in enumerate(
zip(self.coefs, self.intercepts)
):
row = row @ coefs_layer + intercepts_layer
if layer_num < len(self.coefs) - 1:
# Apply ReLU activation
row = np.maximum(row, 0.)
else:
# Apply sigmoid activation
row = 1. / (np.exp(-row) + 1.)
return row
def probability(self, row):
"""
Return the probability that this row of input belongs to the 'True' class, in a
binary classifier.
"""
# Binary classifiers are represented with one output in the final layer. Extract this
# output from its array
return self.forward(row)[0]
@staticmethod
def save(filename, coefs, intercepts):
"""
Save the coefficients and intercepts of a trained classifier in a .npz
file that can be loaded without scikit-learn.
"""
version = 1
n_layers = len(coefs)
arrays = {'meta': np.array([version, n_layers])}
for layer_num, (coefs_layer, intercepts_layer) in enumerate(
zip(coefs, intercepts)
):
arrays[f'coefs_{layer_num}'] = coefs_layer
arrays[f'intercepts_{layer_num}'] = intercepts_layer
np.savez(filename, **arrays)
@classmethod
def load(cls, filename):
"""
Load a MultiLayerPerceptron classifier from a .npz file.
"""
arrays = np.load(filename)
version, n_layers = arrays['meta']
if version != 1:
raise NotImplementedError(
"This code only understands MultiLayerPerceptron version 1"
)
coefs = []
intercepts = []
for layer_num in range(n_layers):
coefs_layer = arrays[f'coefs_{layer_num}']
intercepts_layer = arrays[f'intercepts_{layer_num}']
coefs.append(coefs_layer)
intercepts.append(intercepts_layer)
return cls(coefs, intercepts)
class TunedLanguageIdentifier:
"""
A FastText language ID classifier with another classifier on top of it, so
it can produce reliable probability estimates. It will refrain from
detecting a language if the probability of the detection being correct is
less than 0.5.
"""
def __init__(
self,
language_identifier: LanguageIdentifier,
tuned_classifier: MultiLayerPerceptron,
):
self.language_identifier = language_identifier
self.tuned_classifier = tuned_classifier
@classmethod
def load(cls, fasttext_filename='lid.176.ftz', tuned_filename='tuned.npz'):
"""
Load a TunedLanguageIdentifier from its fastText file and an .npz file
of the retuned classifier.
The filenames refer to files in the `lumi_language_id/data` directory,
and default to a classifier that's included with the repository, so
`TunedLanguageIdentifier.load()` should get you a classifier.
"""
lid = LanguageIdentifier(fasttext_filename)
tuned = MultiLayerPerceptron.load(data_file(tuned_filename))
return cls(lid, tuned)
def detect_language(self, text):
"""
Predict the language of a text using fastText.
Returns a pair of the detected language code and its probability (from
0.5 to 1). If the probability we detect is less than 0.5, the detected
language becomes 'und', so that we're not returning an answer that's
probably wrong.
"""
row, language = self.language_identifier.make_data_point(text)
probability = self.tuned_classifier.probability(row)
if probability < 0.5:
language = 'und'
return language, probability
|
[
"lumi_language_id.data_file",
"numpy.load",
"lumi_language_id.LanguageIdentifier",
"numpy.maximum",
"numpy.array",
"numpy.exp",
"numpy.savez"
] |
[((2035, 2063), 'numpy.savez', 'np.savez', (['filename'], {}), '(filename, **arrays)\n', (2043, 2063), True, 'import numpy as np\n'), ((2217, 2234), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (2224, 2234), True, 'import numpy as np\n'), ((3785, 3822), 'lumi_language_id.LanguageIdentifier', 'LanguageIdentifier', (['fasttext_filename'], {}), '(fasttext_filename)\n', (3803, 3822), False, 'from lumi_language_id import LanguageIdentifier, data_file\n'), ((1761, 1790), 'numpy.array', 'np.array', (['[version, n_layers]'], {}), '([version, n_layers])\n', (1769, 1790), True, 'import numpy as np\n'), ((3865, 3890), 'lumi_language_id.data_file', 'data_file', (['tuned_filename'], {}), '(tuned_filename)\n', (3874, 3890), False, 'from lumi_language_id import LanguageIdentifier, data_file\n'), ((984, 1004), 'numpy.maximum', 'np.maximum', (['row', '(0.0)'], {}), '(row, 0.0)\n', (994, 1004), True, 'import numpy as np\n'), ((1093, 1105), 'numpy.exp', 'np.exp', (['(-row)'], {}), '(-row)\n', (1099, 1105), True, 'import numpy as np\n')]
|
import numpy as np
from metod_alg import metod_analysis as mt_ays
def evaluate_quantities_with_points(beta, x_tr, y_tr, d,
g, func_args):
"""
For trajectories x^(k_x) and y^(k_y), where k_x = (0,...,K_x) and k_y =
(0,...,K_y), evaluate quantites.
Parameters
----------
beta : float or integer
Small constant step size to compute the partner points.
x_tr : 2-D array with shape (iterations + 1, d)
Array containing steepest descent iterations from the first
starting point.
y_tr : 2-D array with shape (iterations + 1, d)
Array containing steepest descent iterations from the
second starting point.
min_x : integer
Region of attraction index of x_tr.
min_y : integer
Region of attraction index of y_tr.
d : integer
Size of dimension.
g : gradient of objective function.
``g(x, *func_args) -> 1-D array with shape (d, )``
where ``x`` is a 1-D array with shape(d, ) and func_args is a
tuple of arguments needed to compute the gradient.
func_args : tuple
Arguments passed to g.
Returns
-------
store_quantites : 2-D array with shape (iterations, 5)
Computation of c1 and c2 at each iteration, where
c_1 = ||b||^2, c_2 = b^T (x - y) and
b = beta * (g(y, *func_args) - g(x, *func_args)).
sum_quantities : 1-D array of shape iterations
Compute c_1 + c_2 at each iteration.
"""
store_beta = np.zeros((4, 2))
sum_beta = np.zeros((4))
index = 0
for j in range(1, 3):
for k in range(1, 3):
x = x_tr[j, :].reshape(d, )
y = y_tr[k, :].reshape(d, )
store_beta[index, 0] = (beta ** 2 *
(np.linalg.norm(g(y, *func_args) -
g(x, *func_args)) ** 2))
store_beta[index, 1] = (2 * beta * (g(y, *func_args) -
g(x, *func_args)).T @ (x - y))
calc = mt_ays.check_quantities(beta, x, y, g, func_args)
assert(np.round(calc, 5) == np.round(np.sum(store_beta[index]), 5))
sum_beta[index] = calc
index += 1
return store_beta, sum_beta
|
[
"numpy.round",
"numpy.zeros",
"numpy.sum",
"metod_alg.metod_analysis.check_quantities"
] |
[((1627, 1643), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {}), '((4, 2))\n', (1635, 1643), True, 'import numpy as np\n'), ((1659, 1670), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1667, 1670), True, 'import numpy as np\n'), ((2169, 2218), 'metod_alg.metod_analysis.check_quantities', 'mt_ays.check_quantities', (['beta', 'x', 'y', 'g', 'func_args'], {}), '(beta, x, y, g, func_args)\n', (2192, 2218), True, 'from metod_alg import metod_analysis as mt_ays\n'), ((2238, 2255), 'numpy.round', 'np.round', (['calc', '(5)'], {}), '(calc, 5)\n', (2246, 2255), True, 'import numpy as np\n'), ((2268, 2293), 'numpy.sum', 'np.sum', (['store_beta[index]'], {}), '(store_beta[index])\n', (2274, 2293), True, 'import numpy as np\n')]
|
# pylint: skip-file
import numpy as np
from nasbench_asr.quiet_tensorflow import tensorflow as tf
def preprocess(
*,
ds,
encoder,
featurizer,
norm_stats=None,
epsilon=0.001,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=True,
max_feature_size=0
):
"""
Args:
- ds: yields (audio, sentence) tuples where
- audio has shape [None], and is of type tf.float32
- sentence has shape [], and is of type tf.string
Returns:
- ds: yields (feature, feature_size, encoded, encoded_size) tuples where
- feature has shape [time, channels], and is of type tf.float32
- feature_size has shape [], and is of type tf.int32, and represents
the number of time frames
- encoded has shape [None], and is of type tf.int32, and represents a
text encoded version of the original sentence; it contains values in
the range [1, encoder.vocab_size)
- encoded_size has shape [], and is of type tf.int32, and represents
the number of tokens in each text encoded version of the original
sentence
- featurizer
- encoder
"""
if norm_stats:
norm_stats = np.load(norm_stats)
mean = norm_stats['moving_mean']
variance = norm_stats['moving_variance']
norm_stats = True
def preprocess_map_func(audio, sentence):
feature = featurizer(audio)
feature_size = tf.shape(feature)[0]
encoded = encoder.get_encoded_from_sentence(sentence)
encoded_size = tf.shape(encoded)[0]
if norm_stats:
feature = (feature - mean) / tf.math.sqrt(variance + epsilon)
return feature, feature_size, encoded, encoded_size
ds = ds.map(preprocess_map_func,
num_parallel_calls=num_parallel_calls,
deterministic=deterministic)
if max_feature_size > 0:
def filter_fn(feature, feature_size, encoded, encoded_size):
return feature_size < tf.saturate_cast(max_feature_size, tf.int32)
ds = ds.filter(filter_fn)
return ds
|
[
"numpy.load",
"nasbench_asr.quiet_tensorflow.tensorflow.shape",
"nasbench_asr.quiet_tensorflow.tensorflow.saturate_cast",
"nasbench_asr.quiet_tensorflow.tensorflow.math.sqrt"
] |
[((1223, 1242), 'numpy.load', 'np.load', (['norm_stats'], {}), '(norm_stats)\n', (1230, 1242), True, 'import numpy as np\n'), ((1467, 1484), 'nasbench_asr.quiet_tensorflow.tensorflow.shape', 'tf.shape', (['feature'], {}), '(feature)\n', (1475, 1484), True, 'from nasbench_asr.quiet_tensorflow import tensorflow as tf\n'), ((1573, 1590), 'nasbench_asr.quiet_tensorflow.tensorflow.shape', 'tf.shape', (['encoded'], {}), '(encoded)\n', (1581, 1590), True, 'from nasbench_asr.quiet_tensorflow import tensorflow as tf\n'), ((1659, 1691), 'nasbench_asr.quiet_tensorflow.tensorflow.math.sqrt', 'tf.math.sqrt', (['(variance + epsilon)'], {}), '(variance + epsilon)\n', (1671, 1691), True, 'from nasbench_asr.quiet_tensorflow import tensorflow as tf\n'), ((2024, 2068), 'nasbench_asr.quiet_tensorflow.tensorflow.saturate_cast', 'tf.saturate_cast', (['max_feature_size', 'tf.int32'], {}), '(max_feature_size, tf.int32)\n', (2040, 2068), True, 'from nasbench_asr.quiet_tensorflow import tensorflow as tf\n')]
|
#! python3
# -*- encoding: utf-8 -*-
'''
@Time : 2021/07/13
@Author : jincheng.lyu
1. Get all categories
2. Generate label for each image
3. All images by using multithreading
'''
import cv2
import datetime
import glob
import imagesize
import json
import mmcv
import numpy as np
import os
import os.path as osp
from pycococreatortools import pycococreatortools
from tqdm import tqdm
from typing import List
from mmdet.core import get_classes
INFO = {
"description": "Leaf Dataset",
"url": "https://github.com/waspinator/pycococreator",
"version": "0.1.0",
"year": 2021,
"contributor": "Jincheng_Lyu",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Attribution-NonCommercial-ShareAlike License",
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
}
]
def getSize(imgpath):
width, height = imagesize.get(imgpath)
# print(width, height)
return width, height
def genCats(dataroot, classwise=True) -> List:
cat_list = os.listdir(dataroot)
cat_list.sort()
categories = []
# cat id starts from 1
if classwise:
# check number of categories
if len(cat_list) != len(get_classes("pinctada")):
print(f"*** Warning: length of categories in {dataroot} is not the same \
with dataset, we will use the dataset category names")
cat_list = get_classes("pinctada")
for idx, cat in enumerate(cat_list):
categories.append({
"supercategory": cat,
"id": idx+1, # noqa
"name": cat
})
else:
categories = [{
"supercategory": 'object',
"id": 0, # noqa
"name": 'object'
}]
return categories
def genImgs(dataroot):
imgpaths = glob.glob(dataroot + '/*.png')
imgpaths.sort()
images = []
# img id starts from 0
for id, imgpath in enumerate(imgpaths):
w, h = getSize(imgpath)
filename = osp.basename(imgpath)
images.append({
"height": h,
"width": w,
"id": id,
"file_name": filename
})
return images
def proc_one_img(image_id, imgpath, coco_output, CATEGORIES):
global segmentation_id
# image
filename = osp.basename(imgpath)
img = cv2.imread(imgpath, cv2.IMREAD_UNCHANGED)
image_info = pycococreatortools.create_image_info(
image_id,
filename,
getSize(imgpath)
)
coco_output["images"].append(image_info)
# annotation
mask = img[:, :, -1]
bin_mask = np.where(mask > 128, 1, 0)
if args.classwise:
class_id = [x['id'] for x in CATEGORIES if x['name'] in imgpath][0]
else:
class_id = [x['id'] for x in CATEGORIES][0] # use for class-agnostic mask
# class_id = 0 # use for class-agnostic mask
category_info = {'id': class_id, 'is_crowd': 0}
# We use RLE encode in pycococreatortools.create_annotation_info
# neverthless what is_crowd is to enable hole encoded in mask
annotation_info = pycococreatortools.create_annotation_info(
segmentation_id,
image_id,
category_info,
bin_mask,
getSize(imgpath),
tolerance=2
)
if annotation_info is not None:
coco_output["annotations"].append(annotation_info)
segmentation_id += 1
def genAnnsMp(dataroot):
import concurrent.futures
imgpaths = glob.glob(dataroot + '/**/*.png')
imgpaths.sort()
# imgpaths_rope = [x for x in imgpaths if 'Rope' in x]
# import random
# random.seed(0)
# imgpaths = random.sample(imgpaths, 100)
# imgpaths.extend(imgpaths_rope[:100])
# 2.
# imgpaths = imgpaths[:100]
CATEGORIES = genCats(dataroot, classwise=args.classwise)
coco_output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
# img id starts from 0
# segmentation_id = 1
global segmentation_id
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
futures = {
executor.submit(proc_one_img, image_id, imgpath, coco_output, CATEGORIES):
(image_id, imgpath) for (image_id, imgpath) in enumerate(imgpaths)
}
prog_bar = mmcv.ProgressBar(len(imgpaths))
for future in concurrent.futures.as_completed(futures):
image_id, imgpath = futures[future]
try:
result = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (imgpath, exc))
prog_bar.update()
return coco_output
def genAnns(dataroot):
imgpaths = glob.glob(dataroot + '/**/*.png')
imgpaths.sort()
# imgpaths_rope = [x for x in imgpaths if 'Rope' in x]
# import random
# random.seed(0)
# imgpaths = random.sample(imgpaths, 100)
# imgpaths.extend(imgpaths_rope[:100])
# 2.
# imgpaths = imgpaths[:100]
CATEGORIES = genCats(dataroot, classwise=args.classwise)
coco_output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
# img id starts from 0
segmentation_id = 1
for image_id, imgpath in enumerate(tqdm(imgpaths)):
# image
filename = osp.basename(imgpath)
img = cv2.imread(imgpath, cv2.IMREAD_UNCHANGED)
image_info = pycococreatortools.create_image_info(
image_id,
filename,
getSize(imgpath)
)
coco_output["images"].append(image_info)
# annotation
mask = img[:, :, -1]
bin_mask = np.where(mask > 128, 1, 0)
if args.classwise:
class_id = [x['id'] for x in CATEGORIES if x['name'] in imgpath][0]
else:
class_id = [x['id'] for x in CATEGORIES][0] # use for class-agnostic mask
category_info = {'id': class_id, 'is_crowd': 0}
# We use RLE encode in pycococreatortools.create_annotation_info
# neverthless what is_crowd is to enable hole encoded in mask
annotation_info = pycococreatortools.create_annotation_info(
segmentation_id,
image_id,
category_info,
bin_mask,
getSize(imgpath),
tolerance=2
)
if annotation_info is not None:
coco_output["annotations"].append(annotation_info)
segmentation_id += 1
return coco_output
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Pinctada dataset convert to COCO format')
parser.add_argument(
'--dataroot', '-d', type=str, help='dataset root directory'
)
parser.add_argument(
'--split', type=str, help='choose from train/val/test'
)
parser.add_argument(
'--ann_dir', type=str, help='annotation directory'
)
parser.add_argument(
'--classwise', action='store_true', help='set to class-specified dataset'
)
args = parser.parse_args()
# dataroot = "/ldap_home/jincheng.lyu/data/product_segmentation/synthetics/train"
dataroot = osp.join(args.dataroot, args.split)
segmentation_id = 1
coco_output = genAnnsMp(dataroot)
ann_path = osp.join(args.ann_dir, f"instances_{args.split}.json")
with open(ann_path, 'w') as f:
json.dump(coco_output, f, indent=4)
|
[
"json.dump",
"tqdm.tqdm",
"imagesize.get",
"argparse.ArgumentParser",
"os.path.basename",
"cv2.imread",
"datetime.datetime.utcnow",
"numpy.where",
"glob.glob",
"os.path.join",
"os.listdir",
"mmdet.core.get_classes"
] |
[((917, 939), 'imagesize.get', 'imagesize.get', (['imgpath'], {}), '(imgpath)\n', (930, 939), False, 'import imagesize\n'), ((1055, 1075), 'os.listdir', 'os.listdir', (['dataroot'], {}), '(dataroot)\n', (1065, 1075), False, 'import os\n'), ((1863, 1893), 'glob.glob', 'glob.glob', (["(dataroot + '/*.png')"], {}), "(dataroot + '/*.png')\n", (1872, 1893), False, 'import glob\n'), ((2349, 2370), 'os.path.basename', 'osp.basename', (['imgpath'], {}), '(imgpath)\n', (2361, 2370), True, 'import os.path as osp\n'), ((2381, 2422), 'cv2.imread', 'cv2.imread', (['imgpath', 'cv2.IMREAD_UNCHANGED'], {}), '(imgpath, cv2.IMREAD_UNCHANGED)\n', (2391, 2422), False, 'import cv2\n'), ((2648, 2674), 'numpy.where', 'np.where', (['(mask > 128)', '(1)', '(0)'], {}), '(mask > 128, 1, 0)\n', (2656, 2674), True, 'import numpy as np\n'), ((3500, 3533), 'glob.glob', 'glob.glob', (["(dataroot + '/**/*.png')"], {}), "(dataroot + '/**/*.png')\n", (3509, 3533), False, 'import glob\n'), ((4788, 4821), 'glob.glob', 'glob.glob', (["(dataroot + '/**/*.png')"], {}), "(dataroot + '/**/*.png')\n", (4797, 4821), False, 'import glob\n'), ((6662, 6740), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pinctada dataset convert to COCO format"""'}), "(description='Pinctada dataset convert to COCO format')\n", (6685, 6740), False, 'import argparse\n'), ((7274, 7309), 'os.path.join', 'osp.join', (['args.dataroot', 'args.split'], {}), '(args.dataroot, args.split)\n', (7282, 7309), True, 'import os.path as osp\n'), ((7392, 7446), 'os.path.join', 'osp.join', (['args.ann_dir', 'f"""instances_{args.split}.json"""'], {}), "(args.ann_dir, f'instances_{args.split}.json')\n", (7400, 7446), True, 'import os.path as osp\n'), ((2052, 2073), 'os.path.basename', 'osp.basename', (['imgpath'], {}), '(imgpath)\n', (2064, 2073), True, 'import os.path as osp\n'), ((5385, 5399), 'tqdm.tqdm', 'tqdm', (['imgpaths'], {}), '(imgpaths)\n', (5389, 5399), False, 'from tqdm import tqdm\n'), ((5437, 5458), 'os.path.basename', 'osp.basename', (['imgpath'], {}), '(imgpath)\n', (5449, 5458), True, 'import os.path as osp\n'), ((5473, 5514), 'cv2.imread', 'cv2.imread', (['imgpath', 'cv2.IMREAD_UNCHANGED'], {}), '(imgpath, cv2.IMREAD_UNCHANGED)\n', (5483, 5514), False, 'import cv2\n'), ((5776, 5802), 'numpy.where', 'np.where', (['(mask > 128)', '(1)', '(0)'], {}), '(mask > 128, 1, 0)\n', (5784, 5802), True, 'import numpy as np\n'), ((7490, 7525), 'json.dump', 'json.dump', (['coco_output', 'f'], {'indent': '(4)'}), '(coco_output, f, indent=4)\n', (7499, 7525), False, 'import json\n'), ((653, 679), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (677, 679), False, 'import datetime\n'), ((1441, 1464), 'mmdet.core.get_classes', 'get_classes', (['"""pinctada"""'], {}), "('pinctada')\n", (1452, 1464), False, 'from mmdet.core import get_classes\n'), ((1235, 1258), 'mmdet.core.get_classes', 'get_classes', (['"""pinctada"""'], {}), "('pinctada')\n", (1246, 1258), False, 'from mmdet.core import get_classes\n')]
|
from os import remove
from unittest import TestCase
from PIL import Image
import numpy as np
from requests import get, post
from screamshot import bytes_to_file
def _rmsd(img1, img2):
img1 = (img1 - np.mean(img1)) / (np.std(img1))
img2 = (img2 - np.mean(img2)) / (np.std(img2))
return np.sqrt(np.mean((img1 - img2) ** 2))
# Usefull for basic tests
def _is_same_image(img1, img2):
return (img1.size == img2.size) and (abs(_rmsd(img1, img2)) < 0.05)
class TestTakeScreenshot(TestCase):
def test_simple_get_take_screenshot_request(self):
request = get('http://127.0.0.1:8000/api/take-screenshot',
params={'url': 'http://127.0.0.1:5000/index'})
self.assertEqual(request.status_code, 200)
self.assertIsInstance(request.content, bytes)
bytes_to_file(request.content,
'test_simple_get_take_screenshot_request.png')
screenshot_img = Image.open(
'test_simple_get_take_screenshot_request.png')
img = Image.open('tests/server/static/images/600_800_index_page.png')
self.assertTrue(_is_same_image(screenshot_img, img))
remove('test_simple_get_take_screenshot_request.png')
def test_simple_post_take_screenshot_request(self):
request = post('http://127.0.0.1:8000/api/take-screenshot',
params={'url': 'http://127.0.0.1:5000/index'},
data={'selector': '#godot'})
self.assertEqual(request.status_code, 200)
self.assertIsInstance(request.content, bytes)
bytes_to_file(request.content,
'test_simple_post_take_screenshot_request.png')
screenshot_img = Image.open(
'test_simple_post_take_screenshot_request.png')
img = Image.open(
'tests/server/static/images/aww_dog.jpg').convert('RGBA')
self.assertTrue(_is_same_image(screenshot_img, img))
remove('test_simple_post_take_screenshot_request.png')
|
[
"os.remove",
"numpy.std",
"PIL.Image.open",
"numpy.mean",
"screamshot.bytes_to_file",
"requests.get",
"requests.post"
] |
[((227, 239), 'numpy.std', 'np.std', (['img1'], {}), '(img1)\n', (233, 239), True, 'import numpy as np\n'), ((278, 290), 'numpy.std', 'np.std', (['img2'], {}), '(img2)\n', (284, 290), True, 'import numpy as np\n'), ((311, 338), 'numpy.mean', 'np.mean', (['((img1 - img2) ** 2)'], {}), '((img1 - img2) ** 2)\n', (318, 338), True, 'import numpy as np\n'), ((583, 682), 'requests.get', 'get', (['"""http://127.0.0.1:8000/api/take-screenshot"""'], {'params': "{'url': 'http://127.0.0.1:5000/index'}"}), "('http://127.0.0.1:8000/api/take-screenshot', params={'url':\n 'http://127.0.0.1:5000/index'})\n", (586, 682), False, 'from requests import get, post\n'), ((815, 892), 'screamshot.bytes_to_file', 'bytes_to_file', (['request.content', '"""test_simple_get_take_screenshot_request.png"""'], {}), "(request.content, 'test_simple_get_take_screenshot_request.png')\n", (828, 892), False, 'from screamshot import bytes_to_file\n'), ((940, 997), 'PIL.Image.open', 'Image.open', (['"""test_simple_get_take_screenshot_request.png"""'], {}), "('test_simple_get_take_screenshot_request.png')\n", (950, 997), False, 'from PIL import Image\n'), ((1025, 1088), 'PIL.Image.open', 'Image.open', (['"""tests/server/static/images/600_800_index_page.png"""'], {}), "('tests/server/static/images/600_800_index_page.png')\n", (1035, 1088), False, 'from PIL import Image\n'), ((1158, 1211), 'os.remove', 'remove', (['"""test_simple_get_take_screenshot_request.png"""'], {}), "('test_simple_get_take_screenshot_request.png')\n", (1164, 1211), False, 'from os import remove\n'), ((1287, 1416), 'requests.post', 'post', (['"""http://127.0.0.1:8000/api/take-screenshot"""'], {'params': "{'url': 'http://127.0.0.1:5000/index'}", 'data': "{'selector': '#godot'}"}), "('http://127.0.0.1:8000/api/take-screenshot', params={'url':\n 'http://127.0.0.1:5000/index'}, data={'selector': '#godot'})\n", (1291, 1416), False, 'from requests import get, post\n'), ((1573, 1651), 'screamshot.bytes_to_file', 'bytes_to_file', (['request.content', '"""test_simple_post_take_screenshot_request.png"""'], {}), "(request.content, 'test_simple_post_take_screenshot_request.png')\n", (1586, 1651), False, 'from screamshot import bytes_to_file\n'), ((1699, 1757), 'PIL.Image.open', 'Image.open', (['"""test_simple_post_take_screenshot_request.png"""'], {}), "('test_simple_post_take_screenshot_request.png')\n", (1709, 1757), False, 'from PIL import Image\n'), ((1936, 1990), 'os.remove', 'remove', (['"""test_simple_post_take_screenshot_request.png"""'], {}), "('test_simple_post_take_screenshot_request.png')\n", (1942, 1990), False, 'from os import remove\n'), ((209, 222), 'numpy.mean', 'np.mean', (['img1'], {}), '(img1)\n', (216, 222), True, 'import numpy as np\n'), ((260, 273), 'numpy.mean', 'np.mean', (['img2'], {}), '(img2)\n', (267, 273), True, 'import numpy as np\n'), ((1785, 1837), 'PIL.Image.open', 'Image.open', (['"""tests/server/static/images/aww_dog.jpg"""'], {}), "('tests/server/static/images/aww_dog.jpg')\n", (1795, 1837), False, 'from PIL import Image\n')]
|
import threading
import anki_vector
from anki_vector import events
import numpy as np
import argparse
import cv2
import torch
import anki_vector
from anki_vector.events import Events
from anki_vector.util import degrees
import threading
from models.with_mobilenet import PoseEstimationWithMobileNet
from modules.keypoints import extract_keypoints, group_keypoints
from modules.load_state import load_state
from modules.pose import Pose, track_poses
from val import normalize, pad_width
from utils import detect_hand,detect_face, detect_touch
import time
from demo import infer_fast
previous_poses = []
# taken from demo.py
def run_on_image(net, height_size, cpu, track, smooth,img, stride, upsample_ratio, num_keypoints,threshold):
global previous_poses
orig_img = img.copy()
heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)
score = 0
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
for kpt_id in range(num_keypoints):
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
pose = Pose(pose_keypoints, pose_entries[n][18])
current_poses.append(pose)
if track:
track_poses(previous_poses, current_poses, smooth=smooth)
previous_poses = current_poses
for pose in current_poses:
pose.draw(img)
img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
for pose in current_poses:
# cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
r_hand_center, r_hand_width, l_hand_center, l_hand_width, = detect_hand(pose)
if -1 not in r_hand_center:
cv2.circle(img, (r_hand_center[0], r_hand_center[1]), 5, (255, 0, 0), 5)
cv2.rectangle(img, (r_hand_center[0]-r_hand_width, r_hand_center[1]-r_hand_width),
(r_hand_center[0] + r_hand_width, r_hand_center[1] + r_hand_width), (0, 255, 255))
if -1 not in l_hand_center:
cv2.circle(img, (l_hand_center[0], l_hand_center[1]), 5, (255, 0, 0), 5)
cv2.rectangle(img, (l_hand_center[0]-l_hand_width, l_hand_center[1]-l_hand_width),
(l_hand_center[0] + l_hand_width, l_hand_center[1] + l_hand_width), (0, 255, 255))
face_center, face_width = detect_face(pose)
if -1 not in face_center:
cv2.rectangle(img, (face_center[0] - face_width, face_center[1] - face_width),
(face_center[0] + face_width, face_center[1] + face_width), (0, 0, 255))
# (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
if track:
cv2.putText(img, 'id: {}'.format(pose.id), (face_center[0] - face_width, face_center[1] - face_width - 16),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
if -1 not in r_hand_center:
x,y,h,w, score= detect_touch(face_center,face_width,r_hand_center,r_hand_width)
if h!=0:
cv2.rectangle(img, (x,y),
(x+h,y+w), (255, 0, 255))
cv2.putText(img, f'Score: {score:0.2f}', (x, y - 16),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0))
if -1 not in l_hand_center:
x, y, h, w, score = detect_touch(face_center, face_width, l_hand_center, l_hand_width)
if h != 0:
cv2.rectangle(img, (x, y),
(x +h, y + w), (255, 0, 255))
cv2.putText(img, f'Score: {score:0.2f}', (x, y - 16),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0))
cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
delay = 1
detect = False
key = cv2.waitKey(delay)
if key == 27: # esc
return
elif key == 112: # 'p'
if delay == 33:
delay = 0
else:
delay = 33
return score>threshold
said_text = False
touched = False
last_touched = 0
def on_robot_observed_touch(robot, event_type, event):
print("Vector sees a touch")
global said_text
global last_touched
if not said_text:
last_touched = time.time()
said_text = True
robot.behavior.say_text("Don't touch your face")
anim = robot.anim.play_animation('anim_rtpickup_loop_09', ignore_head_track=True)
said_text = False
robot.behavior.set_head_angle(degrees(25.0))
robot.behavior.set_lift_height(0.0)
def on_new_raw_camera_image(robot, event_type, event,net):
print("Display new camera image " , time.time())
global previous_poses
global last_touched
# opencvImage = cv2.cvtColor(np.array(event.image), cv2.COLOR_RGB2BGR) #This has lower latency but when a touch is detected this will lag behind
opencvImage = cv2.cvtColor(np.array(robot.camera.latest_image.raw_image), cv2.COLOR_RGB2BGR)
# print(opencvImage.shape)
# cv2.imshow('hello', opencvImage)
# cv2.waitKey(1)
stride = 8
upsample_ratio = 4
num_keypoints = Pose.num_kpts
threshold = 0.15 #score for detecting face touch
touched = run_on_image(net, 256, cpu=False, track=True, smooth=True, img=opencvImage, stride=stride, upsample_ratio=upsample_ratio, num_keypoints=num_keypoints, threshold= threshold)
if touched and 2 < time.time()-last_touched:
last_touched = time.time()
robot.conn.run_coroutine(robot.events.dispatch_event_by_name('face touch detected', event_name='touched'))
def main():
net = PoseEstimationWithMobileNet()
checkpoint = torch.load("models/checkpoint_iter_370000.pth", map_location='cpu')
load_state(net, checkpoint)
net = net.cuda()
done = threading.Event()
with anki_vector.AsyncRobot() as robot:
robot.camera.init_camera_feed()
robot.camera.image_streaming_enabled()
# preparing robot pose ready
robot.behavior.set_head_angle(degrees(25.0))
robot.behavior.set_lift_height(0.0)
#events for detection and new camera feed
robot.events.subscribe(on_new_raw_camera_image, events.Events.new_raw_camera_image, net)
robot.events.subscribe_by_name(on_robot_observed_touch, event_name='touched')
print("------ waiting for camera events, press ctrl+c to exit early ------")
try:
if not done.wait(timeout=600):
print("------ Did not receive a new camera image! ------")
except KeyboardInterrupt:
pass
if __name__=="__main__":
main()
|
[
"numpy.ones",
"utils.detect_touch",
"cv2.rectangle",
"cv2.imshow",
"utils.detect_hand",
"torch.load",
"modules.pose.Pose",
"threading.Event",
"models.with_mobilenet.PoseEstimationWithMobileNet",
"cv2.circle",
"cv2.waitKey",
"utils.detect_face",
"cv2.addWeighted",
"modules.keypoints.group_keypoints",
"anki_vector.util.degrees",
"anki_vector.AsyncRobot",
"cv2.putText",
"modules.pose.track_poses",
"modules.load_state.load_state",
"modules.keypoints.extract_keypoints",
"time.time",
"demo.infer_fast",
"numpy.array"
] |
[((838, 900), 'demo.infer_fast', 'infer_fast', (['net', 'img', 'height_size', 'stride', 'upsample_ratio', 'cpu'], {}), '(net, img, height_size, stride, upsample_ratio, cpu)\n', (848, 900), False, 'from demo import infer_fast\n'), ((1260, 1315), 'modules.keypoints.group_keypoints', 'group_keypoints', (['all_keypoints_by_type', 'pafs'], {'demo': '(True)'}), '(all_keypoints_by_type, pafs, demo=True)\n', (1275, 1315), False, 'from modules.keypoints import extract_keypoints, group_keypoints\n'), ((2432, 2475), 'cv2.addWeighted', 'cv2.addWeighted', (['orig_img', '(0.6)', 'img', '(0.4)', '(0)'], {}), '(orig_img, 0.6, img, 0.4, 0)\n', (2447, 2475), False, 'import cv2\n'), ((4831, 4895), 'cv2.imshow', 'cv2.imshow', (['"""Lightweight Human Pose Estimation Python Demo"""', 'img'], {}), "('Lightweight Human Pose Estimation Python Demo', img)\n", (4841, 4895), False, 'import cv2\n'), ((4952, 4970), 'cv2.waitKey', 'cv2.waitKey', (['delay'], {}), '(delay)\n', (4963, 4970), False, 'import cv2\n'), ((6769, 6798), 'models.with_mobilenet.PoseEstimationWithMobileNet', 'PoseEstimationWithMobileNet', ([], {}), '()\n', (6796, 6798), False, 'from models.with_mobilenet import PoseEstimationWithMobileNet\n'), ((6816, 6883), 'torch.load', 'torch.load', (['"""models/checkpoint_iter_370000.pth"""'], {'map_location': '"""cpu"""'}), "('models/checkpoint_iter_370000.pth', map_location='cpu')\n", (6826, 6883), False, 'import torch\n'), ((6888, 6915), 'modules.load_state.load_state', 'load_state', (['net', 'checkpoint'], {}), '(net, checkpoint)\n', (6898, 6915), False, 'from modules.load_state import load_state\n'), ((6948, 6965), 'threading.Event', 'threading.Event', ([], {}), '()\n', (6963, 6965), False, 'import threading\n'), ((1081, 1171), 'modules.keypoints.extract_keypoints', 'extract_keypoints', (['heatmaps[:, :, kpt_idx]', 'all_keypoints_by_type', 'total_keypoints_num'], {}), '(heatmaps[:, :, kpt_idx], all_keypoints_by_type,\n total_keypoints_num)\n', (1098, 1171), False, 'from modules.keypoints import extract_keypoints, group_keypoints\n'), ((2143, 2184), 'modules.pose.Pose', 'Pose', (['pose_keypoints', 'pose_entries[n][18]'], {}), '(pose_keypoints, pose_entries[n][18])\n', (2147, 2184), False, 'from modules.pose import Pose, track_poses\n'), ((2255, 2312), 'modules.pose.track_poses', 'track_poses', (['previous_poses', 'current_poses'], {'smooth': 'smooth'}), '(previous_poses, current_poses, smooth=smooth)\n', (2266, 2312), False, 'from modules.pose import Pose, track_poses\n'), ((2648, 2665), 'utils.detect_hand', 'detect_hand', (['pose'], {}), '(pose)\n', (2659, 2665), False, 'from utils import detect_hand, detect_face, detect_touch\n'), ((3388, 3405), 'utils.detect_face', 'detect_face', (['pose'], {}), '(pose)\n', (3399, 3405), False, 'from utils import detect_hand, detect_face, detect_touch\n'), ((5414, 5425), 'time.time', 'time.time', ([], {}), '()\n', (5423, 5425), False, 'import time\n'), ((5824, 5835), 'time.time', 'time.time', ([], {}), '()\n', (5833, 5835), False, 'import time\n'), ((6073, 6118), 'numpy.array', 'np.array', (['robot.camera.latest_image.raw_image'], {}), '(robot.camera.latest_image.raw_image)\n', (6081, 6118), True, 'import numpy as np\n'), ((6617, 6628), 'time.time', 'time.time', ([], {}), '()\n', (6626, 6628), False, 'import time\n'), ((6976, 7000), 'anki_vector.AsyncRobot', 'anki_vector.AsyncRobot', ([], {}), '()\n', (6998, 7000), False, 'import anki_vector\n'), ((1753, 1796), 'numpy.ones', 'np.ones', (['(num_keypoints, 2)'], {'dtype': 'np.int32'}), '((num_keypoints, 2), dtype=np.int32)\n', (1760, 1796), True, 'import numpy as np\n'), ((2723, 2795), 'cv2.circle', 'cv2.circle', (['img', '(r_hand_center[0], r_hand_center[1])', '(5)', '(255, 0, 0)', '(5)'], {}), '(img, (r_hand_center[0], r_hand_center[1]), 5, (255, 0, 0), 5)\n', (2733, 2795), False, 'import cv2\n'), ((2812, 2989), 'cv2.rectangle', 'cv2.rectangle', (['img', '(r_hand_center[0] - r_hand_width, r_hand_center[1] - r_hand_width)', '(r_hand_center[0] + r_hand_width, r_hand_center[1] + r_hand_width)', '(0, 255, 255)'], {}), '(img, (r_hand_center[0] - r_hand_width, r_hand_center[1] -\n r_hand_width), (r_hand_center[0] + r_hand_width, r_hand_center[1] +\n r_hand_width), (0, 255, 255))\n', (2825, 2989), False, 'import cv2\n'), ((3064, 3136), 'cv2.circle', 'cv2.circle', (['img', '(l_hand_center[0], l_hand_center[1])', '(5)', '(255, 0, 0)', '(5)'], {}), '(img, (l_hand_center[0], l_hand_center[1]), 5, (255, 0, 0), 5)\n', (3074, 3136), False, 'import cv2\n'), ((3153, 3330), 'cv2.rectangle', 'cv2.rectangle', (['img', '(l_hand_center[0] - l_hand_width, l_hand_center[1] - l_hand_width)', '(l_hand_center[0] + l_hand_width, l_hand_center[1] + l_hand_width)', '(0, 255, 255)'], {}), '(img, (l_hand_center[0] - l_hand_width, l_hand_center[1] -\n l_hand_width), (l_hand_center[0] + l_hand_width, l_hand_center[1] +\n l_hand_width), (0, 255, 255))\n', (3166, 3330), False, 'import cv2\n'), ((3460, 3619), 'cv2.rectangle', 'cv2.rectangle', (['img', '(face_center[0] - face_width, face_center[1] - face_width)', '(face_center[0] + face_width, face_center[1] + face_width)', '(0, 0, 255)'], {}), '(img, (face_center[0] - face_width, face_center[1] -\n face_width), (face_center[0] + face_width, face_center[1] + face_width),\n (0, 0, 255))\n', (3473, 3619), False, 'import cv2\n'), ((4047, 4113), 'utils.detect_touch', 'detect_touch', (['face_center', 'face_width', 'r_hand_center', 'r_hand_width'], {}), '(face_center, face_width, r_hand_center, r_hand_width)\n', (4059, 4113), False, 'from utils import detect_hand, detect_face, detect_touch\n'), ((4466, 4532), 'utils.detect_touch', 'detect_touch', (['face_center', 'face_width', 'l_hand_center', 'l_hand_width'], {}), '(face_center, face_width, l_hand_center, l_hand_width)\n', (4478, 4532), False, 'from utils import detect_hand, detect_face, detect_touch\n'), ((5663, 5676), 'anki_vector.util.degrees', 'degrees', (['(25.0)'], {}), '(25.0)\n', (5670, 5676), False, 'from anki_vector.util import degrees\n'), ((7174, 7187), 'anki_vector.util.degrees', 'degrees', (['(25.0)'], {}), '(25.0)\n', (7181, 7187), False, 'from anki_vector.util import degrees\n'), ((4156, 4213), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + h, y + w)', '(255, 0, 255)'], {}), '(img, (x, y), (x + h, y + w), (255, 0, 255))\n', (4169, 4213), False, 'import cv2\n'), ((4258, 4362), 'cv2.putText', 'cv2.putText', (['img', 'f"""Score: {score:0.2f}"""', '(x, y - 16)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(255, 255, 0)'], {}), "(img, f'Score: {score:0.2f}', (x, y - 16), cv2.\n FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0))\n", (4269, 4362), False, 'import cv2\n'), ((4580, 4637), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + h, y + w)', '(255, 0, 255)'], {}), '(img, (x, y), (x + h, y + w), (255, 0, 255))\n', (4593, 4637), False, 'import cv2\n'), ((4691, 4795), 'cv2.putText', 'cv2.putText', (['img', 'f"""Score: {score:0.2f}"""', '(x, y - 16)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(255, 255, 0)'], {}), "(img, f'Score: {score:0.2f}', (x, y - 16), cv2.\n FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0))\n", (4702, 4795), False, 'import cv2\n'), ((6568, 6579), 'time.time', 'time.time', ([], {}), '()\n', (6577, 6579), False, 'import time\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.