code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
from torch import nn
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
"""
Simple function to init layers
"""
nn.init.orthogonal_(layer.weight, std)
nn.init.constant_(layer.bias, bias_const)
return layer
|
[
"torch.nn.init.orthogonal_",
"numpy.sqrt",
"torch.nn.init.constant_"
] |
[((68, 78), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (75, 78), True, 'import numpy as np\n'), ((152, 190), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['layer.weight', 'std'], {}), '(layer.weight, std)\n', (171, 190), False, 'from torch import nn\n'), ((195, 236), 'torch.nn.init.constant_', 'nn.init.constant_', (['layer.bias', 'bias_const'], {}), '(layer.bias, bias_const)\n', (212, 236), False, 'from torch import nn\n')]
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA Procrustean mapper"""
import unittest
import numpy as np
import itertools
from numpy.linalg import norm
from mvpa2.base import externals
from mvpa2.datasets.base import dataset_wizard
from mvpa2.testing import *
from mvpa2.testing.datasets import *
from mvpa2.mappers.procrustean import ProcrusteanMapper
svds = ["numpy"]
if externals.exists("liblapack.so"):
svds += ["dgesvd"]
if externals.exists("scipy"):
svds += ["scipy"]
class ProcrusteanMapperTests(unittest.TestCase):
@sweepargs(oblique=(False, True))
@sweepargs(svd=svds)
@reseed_rng()
def test_simple(self, svd, oblique):
d_orig = datasets["uni2large"].samples
d_orig2 = datasets["uni4large"].samples
for sdim, nf_s, nf_t, full_test in (
("Same 2D", 2, 2, True),
("Same 10D", 10, 10, True),
("2D -> 3D", 2, 3, True),
("3D -> 2D", 3, 2, False),
):
# figure out some "random" rotation
d = max(nf_s, nf_t)
R = get_random_rotation(nf_s, nf_t, d_orig)
if nf_s == nf_t:
adR = np.abs(1.0 - np.linalg.det(R))
self.assertTrue(
adR < 1e-10,
"Determinant of rotation matrix should " "be 1. Got it 1+%g" % adR,
)
self.assertTrue(norm(np.dot(R, R.T) - np.eye(R.shape[0])) < 1e-10)
for (s, scaling), demean in itertools.product(
((0.3, True), (1.0, False)), (False, True)
):
pm = ProcrusteanMapper(
scaling=scaling, oblique=oblique, svd=svd, demean=demean
)
# pm2 = ProcrusteanMapper(scaling=scaling, oblique=oblique)
if demean:
t1, t2 = d_orig[23, 1], d_orig[22, 1]
else:
t1, t2 = 0, 0
full_test = False # although runs, not intended to perform properly
# Create source/target data
d = d_orig[:, :nf_s]
d_s = d + t1
d_t = np.dot(s * d, R) + t2
# train bloody mapper(s)
ds = dataset_wizard(samples=d_s, targets=d_t)
pm.train(ds)
## not possible with new interface
# pm2.train(d_s, d_t)
## verify that both created the same transformation
# npm2proj = norm(pm.proj - pm2.proj)
# self.assertTrue(npm2proj <= 1e-10,
# msg="Got transformation different by norm %g."
# " Had to be less than 1e-10" % npm2proj)
# self.assertTrue(norm(pm._offset_in - pm2._offset_in) <= 1e-10)
# self.assertTrue(norm(pm._offset_out - pm2._offset_out) <= 1e-10)
# do forward transformation on the same source data
d_s_f = pm.forward(d_s)
self.assertEqual(
d_s_f.shape,
d_t.shape,
msg="Mapped shape should be identical to the d_t",
)
dsf = d_s_f - d_t
ndsf = norm(dsf) / norm(d_t)
if full_test:
dsR = norm(s * R - pm.proj)
if not oblique:
self.assertTrue(
dsR <= 1e-12,
msg="We should have got reconstructed rotation+scaling "
"perfectly. Now got d scale*R=%g" % dsR,
)
self.assertTrue(
np.abs(s - pm._scale) < 1e-12,
msg="We should have got reconstructed scale "
"perfectly. Now got %g for %g" % (pm._scale, s),
)
self.assertTrue(
ndsf <= 1e-12,
msg="%s: Failed to get to the target space correctly."
" normed error=%g" % (sdim, ndsf),
)
# Test if we get back
d_s_f_r = pm.reverse(d_s_f)
# Test if recon proj is true inverse except for high->low projection
if nf_s <= nf_t:
assert_almost_equal(
np.dot(pm._proj, pm._recon),
np.eye(pm._proj.shape[0]),
err_msg="Deviation from identity matrix is too large",
)
dsfr = d_s_f_r - d_s
ndsfr = norm(dsfr) / norm(d_s)
if full_test:
self.assertTrue(
ndsfr <= 1e-12,
msg="%s: Failed to reconstruct into source space correctly."
" normed error=%g" % (sdim, ndsfr),
)
@reseed_rng()
def test_reflection(self, rep=10):
for i in range(rep):
from mvpa2.testing.datasets import get_random_rotation
d = np.random.random((100, 2))
T = get_random_rotation(d.shape[1])
d2 = np.dot(d, T)
# scale it up a bit
d2 *= 1.2
# add a reflection by flipping the first dimension
d2[:, 0] *= -1
ds = dataset_wizard(samples=d, targets=d2)
norm0 = np.linalg.norm(d - d2)
mapper = ProcrusteanMapper(scaling=False, reflection=False)
mapper.train(ds)
norm1 = np.linalg.norm(d2 - mapper.forward(ds).samples)
eps = 1e-7
self.assertLess(
norm1,
norm0 + eps,
msg="Procrustes should reduce difference, "
"but %f > %f" % (norm1, norm0),
)
mapper = ProcrusteanMapper(scaling=True, reflection=False)
mapper.train(ds)
norm2 = np.linalg.norm(d2 - mapper.forward(ds).samples)
self.assertLess(
norm2,
norm1 + eps,
msg="Procrustes with scaling should work better, "
"but %f > %f" % (norm2, norm1),
)
mapper = ProcrusteanMapper(scaling=False, reflection=True)
mapper.train(ds)
norm3 = np.linalg.norm(d2 - mapper.forward(ds).samples)
self.assertLess(
norm3,
norm1 + eps,
msg="Procrustes with reflection should work better, "
"but %f > %f" % (norm3, norm1),
)
mapper = ProcrusteanMapper(scaling=True, reflection=True)
mapper.train(ds)
norm4 = np.linalg.norm(d2 - mapper.forward(ds).samples)
self.assertLess(
norm4,
norm3 + eps,
msg="Procrustes with scaling should work better, "
"but %f > %f" % (norm4, norm3),
)
self.assertLess(
norm4,
norm2 + eps,
msg="Procrustes with reflection should work better, "
"but %f > %f" % (norm4, norm2),
)
def suite(): # pragma: no cover
return unittest.makeSuite(ProcrusteanMapperTests)
if __name__ == "__main__": # pragma: no cover
from . import runner
runner.run()
|
[
"numpy.abs",
"numpy.eye",
"numpy.random.random",
"mvpa2.base.externals.exists",
"unittest.makeSuite",
"itertools.product",
"mvpa2.datasets.base.dataset_wizard",
"numpy.linalg.det",
"numpy.dot",
"numpy.linalg.norm",
"mvpa2.testing.datasets.get_random_rotation",
"mvpa2.mappers.procrustean.ProcrusteanMapper"
] |
[((733, 765), 'mvpa2.base.externals.exists', 'externals.exists', (['"""liblapack.so"""'], {}), "('liblapack.so')\n", (749, 765), False, 'from mvpa2.base import externals\n'), ((793, 818), 'mvpa2.base.externals.exists', 'externals.exists', (['"""scipy"""'], {}), "('scipy')\n", (809, 818), False, 'from mvpa2.base import externals\n'), ((7626, 7668), 'unittest.makeSuite', 'unittest.makeSuite', (['ProcrusteanMapperTests'], {}), '(ProcrusteanMapperTests)\n', (7644, 7668), False, 'import unittest\n'), ((1416, 1455), 'mvpa2.testing.datasets.get_random_rotation', 'get_random_rotation', (['nf_s', 'nf_t', 'd_orig'], {}), '(nf_s, nf_t, d_orig)\n', (1435, 1455), False, 'from mvpa2.testing.datasets import get_random_rotation\n'), ((1833, 1894), 'itertools.product', 'itertools.product', (['((0.3, True), (1.0, False))', '(False, True)'], {}), '(((0.3, True), (1.0, False)), (False, True))\n', (1850, 1894), False, 'import itertools\n'), ((5484, 5510), 'numpy.random.random', 'np.random.random', (['(100, 2)'], {}), '((100, 2))\n', (5500, 5510), True, 'import numpy as np\n'), ((5527, 5558), 'mvpa2.testing.datasets.get_random_rotation', 'get_random_rotation', (['d.shape[1]'], {}), '(d.shape[1])\n', (5546, 5558), False, 'from mvpa2.testing.datasets import get_random_rotation\n'), ((5576, 5588), 'numpy.dot', 'np.dot', (['d', 'T'], {}), '(d, T)\n', (5582, 5588), True, 'import numpy as np\n'), ((5750, 5787), 'mvpa2.datasets.base.dataset_wizard', 'dataset_wizard', ([], {'samples': 'd', 'targets': 'd2'}), '(samples=d, targets=d2)\n', (5764, 5787), False, 'from mvpa2.datasets.base import dataset_wizard\n'), ((5809, 5831), 'numpy.linalg.norm', 'np.linalg.norm', (['(d - d2)'], {}), '(d - d2)\n', (5823, 5831), True, 'import numpy as np\n'), ((5854, 5904), 'mvpa2.mappers.procrustean.ProcrusteanMapper', 'ProcrusteanMapper', ([], {'scaling': '(False)', 'reflection': '(False)'}), '(scaling=False, reflection=False)\n', (5871, 5904), False, 'from mvpa2.mappers.procrustean import ProcrusteanMapper\n'), ((6250, 6299), 'mvpa2.mappers.procrustean.ProcrusteanMapper', 'ProcrusteanMapper', ([], {'scaling': '(True)', 'reflection': '(False)'}), '(scaling=True, reflection=False)\n', (6267, 6299), False, 'from mvpa2.mappers.procrustean import ProcrusteanMapper\n'), ((6629, 6678), 'mvpa2.mappers.procrustean.ProcrusteanMapper', 'ProcrusteanMapper', ([], {'scaling': '(False)', 'reflection': '(True)'}), '(scaling=False, reflection=True)\n', (6646, 6678), False, 'from mvpa2.mappers.procrustean import ProcrusteanMapper\n'), ((7011, 7059), 'mvpa2.mappers.procrustean.ProcrusteanMapper', 'ProcrusteanMapper', ([], {'scaling': '(True)', 'reflection': '(True)'}), '(scaling=True, reflection=True)\n', (7028, 7059), False, 'from mvpa2.mappers.procrustean import ProcrusteanMapper\n'), ((1947, 2022), 'mvpa2.mappers.procrustean.ProcrusteanMapper', 'ProcrusteanMapper', ([], {'scaling': 'scaling', 'oblique': 'oblique', 'svd': 'svd', 'demean': 'demean'}), '(scaling=scaling, oblique=oblique, svd=svd, demean=demean)\n', (1964, 2022), False, 'from mvpa2.mappers.procrustean import ProcrusteanMapper\n'), ((2585, 2625), 'mvpa2.datasets.base.dataset_wizard', 'dataset_wizard', ([], {'samples': 'd_s', 'targets': 'd_t'}), '(samples=d_s, targets=d_t)\n', (2599, 2625), False, 'from mvpa2.datasets.base import dataset_wizard\n'), ((2500, 2516), 'numpy.dot', 'np.dot', (['(s * d)', 'R'], {}), '(s * d, R)\n', (2506, 2516), True, 'import numpy as np\n'), ((3593, 3602), 'numpy.linalg.norm', 'norm', (['dsf'], {}), '(dsf)\n', (3597, 3602), False, 'from numpy.linalg import norm\n'), ((3605, 3614), 'numpy.linalg.norm', 'norm', (['d_t'], {}), '(d_t)\n', (3609, 3614), False, 'from numpy.linalg import norm\n'), ((3671, 3692), 'numpy.linalg.norm', 'norm', (['(s * R - pm.proj)'], {}), '(s * R - pm.proj)\n', (3675, 3692), False, 'from numpy.linalg import norm\n'), ((5016, 5026), 'numpy.linalg.norm', 'norm', (['dsfr'], {}), '(dsfr)\n', (5020, 5026), False, 'from numpy.linalg import norm\n'), ((5029, 5038), 'numpy.linalg.norm', 'norm', (['d_s'], {}), '(d_s)\n', (5033, 5038), False, 'from numpy.linalg import norm\n'), ((1520, 1536), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (1533, 1536), True, 'import numpy as np\n'), ((4774, 4801), 'numpy.dot', 'np.dot', (['pm._proj', 'pm._recon'], {}), '(pm._proj, pm._recon)\n', (4780, 4801), True, 'import numpy as np\n'), ((4827, 4852), 'numpy.eye', 'np.eye', (['pm._proj.shape[0]'], {}), '(pm._proj.shape[0])\n', (4833, 4852), True, 'import numpy as np\n'), ((1747, 1761), 'numpy.dot', 'np.dot', (['R', 'R.T'], {}), '(R, R.T)\n', (1753, 1761), True, 'import numpy as np\n'), ((1764, 1782), 'numpy.eye', 'np.eye', (['R.shape[0]'], {}), '(R.shape[0])\n', (1770, 1782), True, 'import numpy as np\n'), ((4063, 4084), 'numpy.abs', 'np.abs', (['(s - pm._scale)'], {}), '(s - pm._scale)\n', (4069, 4084), True, 'import numpy as np\n')]
|
import bpy
import numpy as np
import math
import mathutils
import time
import os
class Prism:
""" ^"""
""" / \\"""
""" / ^ \\"""
""" / | \\"""
""" /'alpha'\\ <-- lenght of this side is calculated based on 'width' and 'alpha'"""
"""/ \\"""
"""----------- """
""" ^"""
""" |"""
"""This side is defined via 'width',"""
"""parallel to z-axis of Sigray defined"""
"""The angle opposite to this side is 'alpha'"""
"""'height' defines the distance between the two triangular sides of the prism"""
def __init__(self, width, height, alpha):
self.width = width
self.height = height
self.alpha = math.radians(alpha)
def clear_scene(self):
"""This function clears the whole scene and all objects contained in it"""
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
def define_prism(self, loc = (0, 0, 0), angle = None, base_width = None):
"""The default location assigned is (0, 0, 0). Using the 'update_coordinates'-function allows for reassignment of coordinates"""
x, y, z = loc
name = "prism"
meshes = bpy.data.meshes
if angle == None:
angle = self.alpha
else:
angle = math.radians(angle)
if base_width == None:
base_width = self.width
else:
base_width = base_width
points = [ [x, y, z], [x + base_width, y, z], [x + (base_width / 2), y + (base_width / (2 * np.tan(angle / 2))), z],
[x, y, z + self.height], [x + base_width, y, z + self.height], [x + (base_width / 2), y + (base_width / (2 * np.tan(angle / 2))), z + self.height] ]
faces = [ [4,5,2],[1,0,3],[2,5,3],[4,3,5],[1,2,0],[1,4,2],[4,1,3],[0,2,3] ]
shape_vertices = []
for p in points:
print(p)
shape_vertices.append ( mathutils.Vector((p[0],p[1],p[2])) )
new_mesh = bpy.data.meshes.new ( name + "_mesh" )
new_mesh.from_pydata ( shape_vertices, [], faces )
new_mesh.update()
new_obj = bpy.data.objects.new ( name, new_mesh )
return new_obj
def link_prism(self, object):
"""Any created object in Blender needs to be linked to the scene, in order to be displayed"""
bpy.context.collection.objects.link(object)
def update_coordinates(self, new_location):
"""This function allows for reassignment of coordinates"""
return self.define_prism(loc = new_location)
def update_alpha(self, new_alpha):
"""This function allows for reassignment of the angle alpha"""
return self.define_prism(angle = new_alpha)
def update_width(self, new_width):
"""This function allows for reassignment of the width of the prism"""
return self.define_prism(base_width = new_width)
def make_array(self, x, y, no_of_prisms, separation):
for p in range(no_of_prisms):
if p == 0:
self.link_prism(self.update_coordinates((x, y, 0)))
else:
self.link_prism(self.update_coordinates( (p * (self.width + separation) + x, y, 0)))
|
[
"bpy.ops.object.delete",
"mathutils.Vector",
"numpy.tan",
"bpy.ops.object.select_all",
"bpy.data.objects.new",
"bpy.data.meshes.new",
"math.radians",
"bpy.context.collection.objects.link"
] |
[((732, 751), 'math.radians', 'math.radians', (['alpha'], {}), '(alpha)\n', (744, 751), False, 'import math\n'), ((877, 919), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (902, 919), False, 'import bpy\n'), ((929, 968), 'bpy.ops.object.delete', 'bpy.ops.object.delete', ([], {'use_global': '(False)'}), '(use_global=False)\n', (950, 968), False, 'import bpy\n'), ((2093, 2128), 'bpy.data.meshes.new', 'bpy.data.meshes.new', (["(name + '_mesh')"], {}), "(name + '_mesh')\n", (2112, 2128), False, 'import bpy\n'), ((2240, 2276), 'bpy.data.objects.new', 'bpy.data.objects.new', (['name', 'new_mesh'], {}), '(name, new_mesh)\n', (2260, 2276), False, 'import bpy\n'), ((2453, 2496), 'bpy.context.collection.objects.link', 'bpy.context.collection.objects.link', (['object'], {}), '(object)\n', (2488, 2496), False, 'import bpy\n'), ((1370, 1389), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (1382, 1389), False, 'import math\n'), ((2034, 2070), 'mathutils.Vector', 'mathutils.Vector', (['(p[0], p[1], p[2])'], {}), '((p[0], p[1], p[2]))\n', (2050, 2070), False, 'import mathutils\n'), ((1635, 1652), 'numpy.tan', 'np.tan', (['(angle / 2)'], {}), '(angle / 2)\n', (1641, 1652), True, 'import numpy as np\n'), ((1791, 1808), 'numpy.tan', 'np.tan', (['(angle / 2)'], {}), '(angle / 2)\n', (1797, 1808), True, 'import numpy as np\n')]
|
"""
The container to store indexes in active learning.
Serve as the basic type of 'set' operation.
"""
# Authors: <NAME>
# License: BSD 3 clause
from __future__ import division
import collections
import copy
import numpy as np
from .multi_label_tools import check_index_multilabel, infer_label_size_multilabel, flattern_multilabel_index, \
integrate_multilabel_index
from ..utils.ace_warnings import *
from ..utils.interface import BaseCollection
from ..utils.misc import randperm
class IndexCollection(BaseCollection):
"""Index Collection.
Index Collection class is a basic data type of setting operation.
Multiple different type of element is supported for Active learning.
Also check the validity of given operation.
Note that:
1. The types of elements should be same
1. If multiple elements to update, it should be a list, numpy.ndarray or IndexCollection
object, otherwise, it will be cheated as one single element. (If single element
contains multiple values, take tuple as the type of element.)
Parameters
----------
data : list or np.ndarray or object, optional (default=None)
shape [n_element]. Element should be int or tuple.
The meaning of elements can be defined by users.
Some examples of elements:
(example_index, label_index) for instance-label pair query.
(example_index, feature_index) for feature query,
(example_index, example_index) for active clustering;
If int, it may be the index of an instance, for example.
Attributes
----------
index: list, shape (1, n_elements)
A list contains all elements in this container.
Examples
--------
>>> a = IndexCollection([1, 2, 3])
>>> a.update([4,5])
[1, 2, 3, 4, 5]
>>> a.difference_update([1,2])
[3, 4, 5]
"""
def __init__(self, data=None):
if data is None or len(data) == 0:
self._innercontainer = []
else:
if isinstance(data, IndexCollection):
self._innercontainer = copy.deepcopy(data.index)
self._element_type = data.get_elementType()
return
if not isinstance(data, (list, np.ndarray)):
data = [data]
self._innercontainer = list(np.unique([i for i in data], axis=0))
if len(self._innercontainer) != len(data):
warnings.warn("There are %d same elements in the given data" % (len(data) - len(self._innercontainer)),
category=RepeatElementWarning,
stacklevel=3)
datatype = collections.Counter([type(i) for i in self._innercontainer])
if len(datatype) != 1:
raise TypeError("Different types found in the given _indexes.")
tmp_data = self._innercontainer[0]
if isinstance(tmp_data, np.generic):
# self._element_type = type(np.asscalar(tmp_data)) # deprecated in numpy v1.16
self._element_type = type(tmp_data.item())
else:
self._element_type = type(tmp_data)
@property
def index(self):
"""
Get the index of data.
"""
return copy.deepcopy(self._innercontainer)
def __getitem__(self, item):
return self._innercontainer.__getitem__(item)
def get_elementType(self):
"""
Return the type of data.
"""
return self._element_type
def pop(self):
"""
Return the popped value. Raise KeyError if empty.
"""
return self._innercontainer.pop()
def add(self, value):
"""
Add element.
It will warn if the value to add is existent.
Parameters
----------
value: object
same type of the element already in the set.
Raise if unknown type is given.
Returns
-------
self: object
return self.
"""
if self._element_type is None:
self._element_type = type(value)
# check validation
if isinstance(value, np.generic):
# value = np.asscalar(value) # deprecated in numpy v1.16
value = value.item()
if not isinstance(value, self._element_type):
raise TypeError(
"A %s parameter is expected, but received: %s" % (str(self._element_type), str(type(value))))
if value in self._innercontainer:
warnings.warn("Adding element %s has already in the collection, skip." % (value.__str__()),
category=RepeatElementWarning,
stacklevel=3)
else:
self._innercontainer.append(value)
return self
def discard(self, value):
"""Remove an element.
It will warn if the value to discard is inexistent.
Parameters
----------
value: object
Value to discard.
Returns
-------
self: object
Return self.
"""
if value not in self._innercontainer:
warnings.warn("Element %s to discard is not in the collection, skip." % (value.__str__()),
category=InexistentElementWarning,
stacklevel=3)
else:
self._innercontainer.remove(value)
return self
def difference_update(self, other):
"""Remove all elements of another array from this container.
Parameters
----------
other: object
Elements to discard. Note that, if multiple indexes are contained,
a list, numpy.ndarray or IndexCollection should be given. Otherwise,
it will be cheated as an object.
Returns
-------
self: object
Return self.
"""
if not isinstance(other, (list, np.ndarray, IndexCollection)):
other = [other]
for item in other:
self.discard(item)
return self
def update(self, other):
"""Update self with the union of itself and others.
Parameters
----------
other: object
Elements to add. Note that, if multiple indexes are contained,
a list, numpy.ndarray or IndexCollection should be given. Otherwise,
it will be cheated as an object.
Returns
-------
self: object
Return self.
"""
if not isinstance(other, (list, np.ndarray, IndexCollection)):
other = [other]
for item in other:
self.add(item)
return self
def random_sampling(self, rate=0.3):
"""Return a random sampled subset of this collection.
Parameters
----------
rate: float, optional (default=None)
The rate of sampling. Must be a number in [0,1].
Returns
-------
array: IndexCollection
The sampled index collection.
"""
assert (0 < rate < 1)
perm = randperm(len(self) - 1, round(rate * len(self)))
return IndexCollection([self.index[i] for i in perm])
class MultiLabelIndexCollection(IndexCollection):
"""Class for managing multi-label indexes.
This class stores indexes in multi-label. Each element should be a tuple.
A single index should only have 1 element (example_index, ) to query all labels or
2 elements (example_index, [label_indexes]) to query specific labels.
Some examples of valid multi-label indexes include:
queried_index = (1, [3,4])
queried_index = (1, [3])
queried_index = (1, 3)
queried_index = (1, (3))
queried_index = (1, (3,4))
queried_index = (1, ) # query all labels
Several validity checking are implemented in this class.
Such as repeated elements, Index out of bound.
Parameters
----------
data : list or np.ndarray of a single tuple, optional (default=None)
shape [n_element]. All elements should be tuples.
label_size: int, optional (default=None)
The number of classes. If not provided, an infer is attempted, raise if fail.
Attributes
----------
index: list, shape (1, n_elements)
A list contains all elements in this container.
Examples
--------
>>> multi_lab_ind1 = MultiLabelIndexCollection([(0, 1), (0, 2), (0, (3, 4)), (1, (0, 1))], label_size=5)
{(0, 1), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.update((0, 0))
{(0, 1), (0, 0), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.update([(1, 2), (1, (3, 4))])
{(0, 1), (1, 2), (0, 0), (1, 3), (1, 4), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.update([(2,)])
{(0, 1), (1, 2), (0, 0), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.difference_update([(0,)])
{(1, 2), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (1, 0)}
"""
def __init__(self, data=None, label_size=None):
if data is None or len(data) == 0:
self._innercontainer = set()
if label_size is None:
warnings.warn("This collection does not have a label_size value, set it manually or "
"it will raise when decomposing indexes.",
category=ValidityWarning)
self._label_size = label_size
else:
if isinstance(data, MultiLabelIndexCollection):
self._innercontainer = copy.deepcopy(data.index)
self._label_size = data._label_size
return
# check given indexes
data = check_index_multilabel(data)
if label_size is None:
self._label_size = infer_label_size_multilabel(data, check_arr=False)
else:
self._label_size = label_size
# decompose all label queries.
decomposed_data = flattern_multilabel_index(data, self._label_size, check_arr=False)
self._innercontainer = set(decomposed_data)
if len(self._innercontainer) != len(decomposed_data):
warnings.warn(
"There are %d same elements in the given data" % (len(data) - len(self._innercontainer)),
category=RepeatElementWarning,
stacklevel=3)
@property
def index(self):
"""
Get the index of data.
"""
return list(self._innercontainer)
def add(self, value):
"""Add element.
It will warn if the value to add is existent. Raise if
invalid type of value is given.
Parameters
----------
value: tuple
Index for adding. Raise if index is out of bound.
Returns
-------
self: object
return self.
"""
# check validation
assert (isinstance(value, tuple))
if len(value) == 1:
value = [(value[0], i) for i in range(self._label_size)]
return self.update(value)
elif len(value) == 2:
if isinstance(value[1], collections.Iterable):
for item in value[1]:
if item >= self._label_size:
raise ValueError("Index %s is out of bound %s" % (str(item), str(self._label_size)))
else:
if value[1] >= self._label_size:
raise ValueError("Index %s is out of bound %s" % (str(value[1]), str(self._label_size)))
else:
raise ValueError("A tuple with 1 or 2 elements is expected, but received: %s" % str(value))
if value in self._innercontainer:
warnings.warn("Adding element %s has already in the collection, skip." % (value.__str__()),
category=RepeatElementWarning,
stacklevel=3)
else:
self._innercontainer.add(value)
return self
def discard(self, value):
"""Remove an element.
It will warn if the value to discard is inexistent. Raise if
invalid type of value is given.
Parameters
----------
value: tuple
Index for adding. Raise if index is out of bound.
Returns
-------
self: object
return self.
"""
assert (isinstance(value, tuple))
if len(value) == 1:
value = [(value[0], i) for i in range(self._label_size)]
return self.difference_update(value)
if value not in self._innercontainer:
warnings.warn("Element %s to discard is not in the collection, skip." % (value.__str__()),
category=InexistentElementWarning,
stacklevel=3)
else:
self._innercontainer.discard(value)
return self
def difference_update(self, other):
"""Remove all elements of another array from this container.
Parameters
----------
other: object
Elements to discard. Note that, if multiple indexes are contained,
a list, numpy.ndarray or MultiLabelIndexCollection should be given. Otherwise,
a tuple should be given.
Returns
-------
self: object
Return self.
"""
if isinstance(other, (list, np.ndarray, MultiLabelIndexCollection)):
label_ind = flattern_multilabel_index(other, self._label_size)
for j in label_ind:
self.discard(j)
elif isinstance(other, tuple):
self.discard(other)
else:
raise TypeError(
"A list or np.ndarray is expected if multiple indexes are "
"contained. Otherwise, a tuple should be provided")
return self
def update(self, other):
"""Update self with the union of itself and others.
Parameters
----------
other: object
Elements to add. Note that, if multiple indexes are contained,
a list, numpy.ndarray or MultiLabelIndexCollection should be given. Otherwise,
a tuple should be given.
Returns
-------
self: object
Return self.
"""
if isinstance(other, (list, np.ndarray, MultiLabelIndexCollection)):
label_ind = flattern_multilabel_index(other, self._label_size)
for j in label_ind:
self.add(j)
elif isinstance(other, tuple):
self.add(other)
else:
raise TypeError(
"A list or np.ndarray is expected if multiple indexes are "
"contained. Otherwise, a tuple should be provided")
return self
def get_onedim_index(self, order='C', ins_num=None):
"""Get the 1d index.
Parameters
----------
order : {'C', 'F'}, optional (default='C')
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Matlab-style) order.
ins_num: int, optional
The total number of instance. Must be provided if the order is 'F'.
Examples
--------
>>> b = [1, 4, 11]
>>> mi = MultiLabelIndexCollection.construct_by_1d_array(array=b, label_mat_shape=(3, 4))
>>> print(mi)
{(1, 0), (2, 3), (1, 1)}
>>> print('col major:', mi.get_onedim_index(order='F', ins_num=3))
col major: [1, 11, 4]
>>> print('row major:', mi.get_onedim_index(order='C'))
row major: [4, 11, 5]
"""
if order == 'F':
if ins_num is None:
raise ValueError("The ins_num must be provided if the order is 'F'.")
return [tup[0] + tup[1] * ins_num for tup in self._innercontainer]
elif order == 'C':
return [tup[0] * self._label_size + tup[1] for tup in self._innercontainer]
else:
raise ValueError("The value of order must be one of {'C', 'F'}")
def get_instance_index(self):
"""Get the index of instances contained in this object.
If it is a labeled set, it is equivalent to the indexes of fully and partially labeled instances.
Returns
-------
partlab: list
The indexes of partially labeled instances.
"""
return np.unique([tp[0] for tp in self._innercontainer])
def _get_cond_instance(self, cond):
"""Return the indexes of instances according to the cond.
cond = 0: return the instances which are unbroken.
cond = 1: return the instances which have missing entries.
"""
tmp = integrate_multilabel_index(self.index, label_size=self._label_size, check_arr=False)
if cond == 0:
return [tp[0] for tp in tmp if len(tp) == 1]
else:
return [tp[0] for tp in tmp if len(tp) > 1]
def get_unbroken_instances(self):
"""Return the indexes of unbroken instances whose entries are all known."""
return self._get_cond_instance(cond=0)
def get_break_instances(self):
"""Return the indexes of break instances which have missing entries."""
return self._get_cond_instance(cond=1)
def get_matrix_mask(self, mat_shape, fill_value=1, sparse=True, sparse_format='lil_matrix'):
"""Return an array which has the same shape with the label matrix.
If an entry is known, then, the corresponding value in the mask is 1, otherwise, 0.
Parameters
----------
mat_shape: tuple
The shape of label matrix. [n_samples, n_classes]
fill_value: int
The value filled in the mask when the entry is in the container.
sparse: bool
Whether to return a sparse matrix or a dense matrix (numpy.ndarray).
sparse_format: str
The format of the returned sparse matrix. Only available if sparse==True
should be one onf [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix, dok_matrix, lil_matrix].
Please refer to https://docs.scipy.org/doc/scipy-0.18.1/reference/sparse.html
for the definition of each sparse format.
Returns
-------
mask: {scipy.sparse.csr_matrix, scipy.sparse.csc_matrix}
The mask of the label matrix.
"""
assert isinstance(mat_shape, tuple)
if sparse:
try:
exec("from scipy.sparse import " + sparse_format)
except:
raise ValueError(
"sparse format " + sparse_format + "is not defined. Valid format should be one of "
"[bsr_matrix, coo_matrix, csc_matrix, csr_matrix, "
"dia_matrix, dok_matrix, lil_matrix].")
mask = eval(sparse_format + '(mat_shape)')
else:
if fill_value == 1:
mask = np.zeros(mat_shape, dtype=bool)
for item in self._innercontainer:
mask[item] = True
else:
mask = np.zeros(mat_shape)
for item in self._innercontainer:
mask[item] = fill_value
return mask
@classmethod
def construct_by_1d_array(cls, array, label_mat_shape, order='F'):
"""Construct a MultiLabelIndexCollection object by providing a
1d array, and the number of classes.
Parameters
----------
array: {list, np.ndarray}
An 1d array of indexes.
label_mat_shape: tuple of ints
The shape of label matrix. The 1st element is the number of instances,
and the 2nd element is the total classes.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Matlab-style) order.
Returns
-------
multi_ind: MultiLabelIndexCollection
The MultiLabelIndexCollection object.
Examples
--------
>>> b = [1, 4, 11]
>>> mi = MultiLabelIndexCollection.construct_by_1d_array(array=b, label_mat_shape=(3, 4))
>>> print(mi)
{(1, 0), (2, 3), (1, 1)}
>>> print('col major:', mi.get_onedim_index(order='F', ins_num=3))
col major: [1, 11, 4]
>>> print('row major:', mi.get_onedim_index(order='C'))
row major: [4, 11, 5]
"""
assert len(label_mat_shape) == 2
row, col = np.unravel_index(array, dims=label_mat_shape, order=order)
return cls(data=[(row[i], col[i]) for i in range(len(row))], label_size=label_mat_shape[1])
@classmethod
def construct_by_element_mask(cls, mask):
"""Construct a MultiLabelIndexCollection object by providing a
2d array whose shape should be the same as the matrix shape.
Parameters
----------
mask: {list, np.ndarray}
The 2d mask matrix of elements.
There must be only 1 and 0 in the matrix, in which,
1 means the corresponding element is known, and will be
added to the MultiLabelIndexCollection container.
Otherwise, it will be cheated as an unknown element.
Examples
--------
>>> import numpy as np
>>> mask = np.asarray([
[0, 1],
[1, 0],
[1, 0]
]) # 3 rows, 2 lines
>>> mi = MultiLabelIndexCollection.construct_by_element_mask(mask=mask)
>>> print(mi)
{(0, 1), (2, 0), (1, 0)}
"""
mask = np.asarray(mask)
ue = np.unique(mask)
if not (len(mask.shape) == 2 and len(ue) == 2 and 0 in ue and 1 in ue):
raise ValueError("The mask matrix should be a 2d array, and there must be only "
"1 and 0 in the matrix, in which, 1 means the corresponding "
"element is known, and will be added to the MultiLabelIndexCollection container.")
nz_row, nz_col = np.nonzero(mask)
return cls(data=[(nz_row[i], nz_col[i]) for i in range(len(nz_row))], label_size=mask.shape[1])
class FeatureIndexCollection(MultiLabelIndexCollection):
"""Container to store the indexes in feature querying scenario.
This class stores indexes in incomplete feature matrix setting. Each element should be a tuple.
A single index should only have 1 element (example_index, ) to query all features or
2 elements (example_index, [feature_indexes]) to query specific features.
Some examples of valid indexes include:
queried_index = (1, [3,4])
queried_index = (1, [3])
queried_index = (1, 3)
queried_index = (1, (3))
queried_index = (1, (3,4))
queried_index = (1, ) # query all _labels
Several validity checking are implemented in this class.
Such as repeated elements, Index out of bound.
Parameters
----------
data : list or np.ndarray of a single tuple, optional (default=None)
shape [n_element]. All elements should be tuples.
feature_size: int, optional (default=None)
The number of features. If not provided, an infer is attempted, raise if fail.
Attributes
----------
index: list, shape (1, n_elements)
A list contains all elements in this container.
Examples
--------
>>> fea_ind1 = FeatureIndexCollection([(0, 1), (0, 2), (0, (3, 4)), (1, (0, 1))], feature_size=5)
{(0, 1), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.update((0, 0))
{(0, 1), (0, 0), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.update([(1, 2), (1, (3, 4))])
{(0, 1), (1, 2), (0, 0), (1, 3), (1, 4), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.update([(2,)])
{(0, 1), (1, 2), (0, 0), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.difference_update([(0,)])
{(1, 2), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (1, 0)}
"""
def __init__(self, data, feature_size=None):
try:
super(FeatureIndexCollection, self).__init__(data=data, label_size=feature_size)
except(Exception, ValueError):
raise Exception("The inference of feature_size is failed, please set a specific value.")
def map_whole_index_to_train(train_idx, index_in_whole):
"""Map the indexes from whole dataset to training set.
Parameters
----------
train_idx: {list, numpy.ndarray}
The training indexes.
index_in_whole: {IndexCollection, MultiLabelIndexCollection}
The indexes need to be mapped of the whole data.
Returns
-------
index_in_train: {IndexCollection, MultiLabelIndexCollection}
The mapped indexes.
Examples
--------
>>> train_idx = [231, 333, 423]
>>> index_in_whole = IndexCollection([333, 423])
>>> print(map_whole_index_to_train(train_idx, index_in_whole))
[1, 2]
"""
if isinstance(index_in_whole, MultiLabelIndexCollection):
ind_type = 2
elif isinstance(index_in_whole, IndexCollection):
ind_type = 1
else:
raise TypeError("index_in_whole must be one of {IndexCollection, MultiLabelIndexCollection} type.")
tr_ob = []
for entry in index_in_whole:
if ind_type == 2:
assert entry[0] in train_idx
ind_in_train = np.argwhere(train_idx == entry[0])[0][0]
tr_ob.append((ind_in_train, entry[1]))
else:
assert entry in train_idx
tr_ob.append(np.argwhere(train_idx == entry)[0][0])
if ind_type == 2:
return MultiLabelIndexCollection(tr_ob)
else:
return IndexCollection(tr_ob)
|
[
"numpy.unique",
"numpy.asarray",
"numpy.zeros",
"numpy.argwhere",
"numpy.unravel_index",
"numpy.nonzero",
"copy.deepcopy"
] |
[((3255, 3290), 'copy.deepcopy', 'copy.deepcopy', (['self._innercontainer'], {}), '(self._innercontainer)\n', (3268, 3290), False, 'import copy\n'), ((16509, 16558), 'numpy.unique', 'np.unique', (['[tp[0] for tp in self._innercontainer]'], {}), '([tp[0] for tp in self._innercontainer])\n', (16518, 16558), True, 'import numpy as np\n'), ((20720, 20778), 'numpy.unravel_index', 'np.unravel_index', (['array'], {'dims': 'label_mat_shape', 'order': 'order'}), '(array, dims=label_mat_shape, order=order)\n', (20736, 20778), True, 'import numpy as np\n'), ((21807, 21823), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (21817, 21823), True, 'import numpy as np\n'), ((21837, 21852), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (21846, 21852), True, 'import numpy as np\n'), ((22255, 22271), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (22265, 22271), True, 'import numpy as np\n'), ((2073, 2098), 'copy.deepcopy', 'copy.deepcopy', (['data.index'], {}), '(data.index)\n', (2086, 2098), False, 'import copy\n'), ((2309, 2345), 'numpy.unique', 'np.unique', (['[i for i in data]'], {'axis': '(0)'}), '([i for i in data], axis=0)\n', (2318, 2345), True, 'import numpy as np\n'), ((9617, 9642), 'copy.deepcopy', 'copy.deepcopy', (['data.index'], {}), '(data.index)\n', (9630, 9642), False, 'import copy\n'), ((19137, 19168), 'numpy.zeros', 'np.zeros', (['mat_shape'], {'dtype': 'bool'}), '(mat_shape, dtype=bool)\n', (19145, 19168), True, 'import numpy as np\n'), ((19298, 19317), 'numpy.zeros', 'np.zeros', (['mat_shape'], {}), '(mat_shape)\n', (19306, 19317), True, 'import numpy as np\n'), ((25621, 25655), 'numpy.argwhere', 'np.argwhere', (['(train_idx == entry[0])'], {}), '(train_idx == entry[0])\n', (25632, 25655), True, 'import numpy as np\n'), ((25790, 25821), 'numpy.argwhere', 'np.argwhere', (['(train_idx == entry)'], {}), '(train_idx == entry)\n', (25801, 25821), True, 'import numpy as np\n')]
|
import pandas as pd
import rapidfuzz
import math
import numpy as np
# ------------------------- #
# --------- DATA ---------- #
# ------------------------- #
# Read in mock census and PES data
CEN = pd.read_csv('Data/Mock_Rwanda_Data_Census.csv')
PES = pd.read_csv('Data/Mock_Rwanda_Data_Pes.csv')
# select needed columns
CEN = CEN[['id_indi_cen', 'firstnm_cen', 'lastnm_cen', 'age_cen', 'month_cen', 'year_cen', 'sex_cen', 'province_cen']]
PES = PES[['id_indi_pes', 'firstnm_pes', 'lastnm_pes', 'age_pes', 'month_pes', 'year_pes', 'sex_pes', 'province_pes']]
# ----------------------------- #
# --------- BLOCKING ---------- #
# ----------------------------- #
# Block on province geographic variable
BP1 = 'province'
# Combine
for i, BP in enumerate([BP1], 1):
if i == 1:
combined_blocks = PES.merge(CEN, left_on = BP + '_pes', right_on = BP + '_cen', how = 'inner').drop_duplicates(['id_indi_cen', 'id_indi_pes'])
print("1" + str(combined_blocks.count()))
# Count
len(combined_blocks) # 50042
# -------------------------------------------------- #
# --------------- AGREEMENT VECTORS ---------------- #
# -------------------------------------------------- #
# Agreement vector is created which is then inputted into the EM Algorithm.
# Set v1, v2,... vn as the agreement variables
# Select agreement variables
v1 = 'firstnm'
v2 = 'lastnm'
v3 = 'month'
v4 = 'year'
v5 = 'sex'
# All agreement variables used to calculate match weights & probabilities
all_variables = [v1, v2, v3, v4, v5]
# Variables using partial agreement (string similarity)
edit_distance_variables = [v1, v2]
dob_variables = [v3, v4]
remaining_variables = [v5]
# Cut off values for edit distance variables
cutoff_values = [0.45, 0.45]
# Replace NaN with blank spaces to assure the right data types for string similarity metrics
for variable in edit_distance_variables:
cen_var = variable+ '_cen'
pes_var = variable + '_pes'
combined_blocks[cen_var] = combined_blocks[cen_var].fillna("")
combined_blocks[pes_var] = combined_blocks[pes_var].fillna("")
def SLD(s,t):
# Computing the standardised levenshtein edit distance between two strings
# using the rapidfuzz string matching library for it's fast string comparisons
# Dividing result by 100 to return a score between 0 and 1
standardised = (rapidfuzz.string_metric.normalized_levenshtein(s, t)/100)
return standardised;
# Create forename/ last name Edit Distance score columns for all pairs
combined_blocks['firstnm_agreement'] = combined_blocks.apply(lambda x: SLD(x['firstnm_pes'], x['firstnm_cen']), axis=1)
combined_blocks['lastnm_agreement'] = combined_blocks.apply(lambda x: SLD(x['lastnm_pes'], x['lastnm_cen']), axis=1)
# --------------------------------------------------------- #
# ---------------- INITIAL M & U VALUES ------------------- #
# --------------------------------------------------------- #
# Read in M and U values
m_values = pd.read_csv('Data/m_values.csv')
u_values = pd.read_csv('Data/u_values.csv')
# Save individual M values from file
FN_M = m_values[m_values.variable == 'firstnm'].iloc[0][1]
SN_M = m_values[m_values.variable == 'lastnm'].iloc[0][1]
SEX_M = m_values[m_values.variable == 'sex'].iloc[0][1]
MONTH_M = m_values[m_values.variable == 'month'].iloc[0][1]
YEAR_M = m_values[m_values.variable == 'year'].iloc[0][1]
# Save individual U values from file
FN_U = u_values[u_values.variable == 'firstnm'].iloc[0][1]
SN_U = u_values[u_values.variable == 'lastnm'].iloc[0][1]
SEX_U = u_values[u_values.variable == 'sex'].iloc[0][1]
MONTH_U = u_values[u_values.variable == 'month'].iloc[0][1]
YEAR_U = u_values[u_values.variable == 'year'].iloc[0][1]
# Add M values to unlinked data
combined_blocks['firstnm_m'] = FN_M
combined_blocks['lastnm_m'] = SN_M
combined_blocks['sex_m'] = SEX_M
combined_blocks['month_m'] = MONTH_M
combined_blocks['year_m'] = YEAR_M
# Add U values to unlinked data
combined_blocks['firstnm_u'] = FN_U
combined_blocks['lastnm_u'] = SN_U
combined_blocks['sex_u'] = SEX_U
combined_blocks['month_u'] = MONTH_U
combined_blocks['year_u'] = YEAR_U
# Add Agreement / Disagreement Weights
for var in all_variables:
# apply calculations: agreement weight = log base 2 (m/u)
combined_blocks[var + "_agreement_weight"] = combined_blocks.apply(lambda x: (math.log2(x[var + "_m"] / x[var + "_u"])), axis = 1)
# disagreement weight = log base 2 ((1-m)/(1-u))
combined_blocks[var + "_disagreement_weight"] = combined_blocks.apply(lambda x: (math.log2((1 - x[var + "_m"]) / (1 - x[var + "_u"]))), axis = 1)
# show sample of agreement/disagreement weights calculated
print(combined_blocks[[var + "_m", var + "_u", var + "_agreement_weight", var + "_disagreement_weight"]].head(1))
'''
Alter the M and U values above (i.e. FN_M, FN_U etc. currently lines 100 - 112) to see the effect on variable agreement/disagreement weights
'''
# --------------------------------------------------- #
# ------------------ MATCH SCORES ------------------ #
# --------------------------------------------------- #
''' An agreement value between 0 and 1 is calculated for each agreeement variable '''
''' This is done for every candidate record pair '''
# --------------------------------------- #
# ------------- DOB SCORE -------------- #
# --------------------------------------- #
# Partial scores
combined_blocks['month_agreement'] = np.where(combined_blocks['month_pes'] == combined_blocks['month_cen'], 1/3, 0)
combined_blocks['year_agreement'] = np.where(combined_blocks['year_pes'] == combined_blocks['year_cen'], 1/2, 0)
# Compute final Score and drop extra score columns
dob_score_columns = ['month_agreement', 'year_agreement']
combined_blocks['DOB_agreement'] = combined_blocks[dob_score_columns].sum(axis=1)
# combined_blocks = combined_blocks.drop(dob_score_columns, axis = 1)
# ---------------------------------------- #
# ---------- PARTIAL CUT OFFS ------------ #
# ---------------------------------------- #
# All partial variables except DOB
for variable, cutoff in zip(edit_distance_variables, cutoff_values):
# If agreement below a certain level, set agreement to 0. Else, leave agreeement as it is
combined_blocks[variable + '_agreement'] = np.where(combined_blocks[variable + "_agreement"] <= cutoff, 0, combined_blocks[variable + "_agreement"])
# Remaining variables (no partial scores)
for variable in remaining_variables:
# Calculate 1/0 Agreement Score (no partial scoring)
combined_blocks[variable + '_agreement'] = np.where(combined_blocks[variable + "_cen"] == combined_blocks[variable + "_pes"], 1, 0)
# ------------------------------------------------------------------ #
# ------------------------- WEIGHTS ------------------------------- #
# ------------------------------------------------------------------ #
# Start by giving all records agreement weights
for variable in all_variables:
combined_blocks[variable + "_weight"] = combined_blocks[variable + "_agreement_weight"]
# Update for partial agreement / disagreement (only when agreement < 1)
# source: https://www.census.gov/content/dam/Census/library/working-papers/1991/adrm/rr91-9.pdf
# weight = Agreement_Weight if Agreement = 1, and
# MAX{(Agreement_Weight - (Agreement_Weight - Disgreement_Weight)*(1-Agreement)*(9/2)), Disgreement_Weight} if 0 <= Agreement < 1.
for variable in all_variables:
combined_blocks[variable + "_weight"] = np.where(combined_blocks[variable + "_agreement"] < 1,
np.maximum(((combined_blocks[variable + "_agreement_weight"]) -
((combined_blocks[variable + "_agreement_weight"] - combined_blocks[variable + "_disagreement_weight"]) *
(1 - combined_blocks[variable + "_agreement"]) * (9/2))),
combined_blocks[variable + "_disagreement_weight"]),
combined_blocks[variable + "_weight"])
# Set weights to 0 (instead of disagreement_weight) if there is missingess in PES or CEN variable (agreement == 0 condition needed for DOB)
for variable in all_variables:
combined_blocks[variable + "_weight"] = np.where(combined_blocks[variable + '_pes'].isnull() | combined_blocks[variable + '_cen'].isnull() &
(combined_blocks[variable + '_agreement'] == 0), 0,
combined_blocks[variable + '_weight'])
# Sum column wise across the above columns - create match score
combined_blocks["match_score"] = combined_blocks[['firstnm_weight', 'lastnm_weight', 'month_weight', 'year_weight', 'sex_weight']].sum(axis=1)
# ------------------------------------------------------------------ #
# ----------------------- ADJUSTMENTS ----------------------------- #
# ------------------------------------------------------------------ #
# To reduce false matches going to clerical, if ages are dissimilar set score to 0
combined_blocks['match_score'] = np.where((combined_blocks['age_pes'].notnull() == False) &
combined_blocks['age_cen'].notnull() &
(combined_blocks['age_pes'] - combined_blocks['age_cen'] > 5),
0, combined_blocks['match_score'])
''' let's view some example clusters produced to check if the scores assigned are sensible'''
# high-scoring candidate record pairs
cen_vars = [s + '_cen' for s in all_variables]
pes_vars = [s + '_pes' for s in all_variables]
display(combined_blocks[cen_vars + pes_vars + ['match_score']].sort_values(by=['match_score'], ascending=False).head(50))
# and low-scoring candidate pairs
display(combined_blocks[cen_vars + pes_vars + ['match_score']].sort_values(by=['match_score']).head(50))
# -------------------------------------- #
# -------------- SAVE ----------------- #
# -------------------------------------- #
combined_blocks.to_csv('Data/Probabilistic_Scores.csv')
|
[
"rapidfuzz.string_metric.normalized_levenshtein",
"pandas.read_csv",
"numpy.where",
"math.log2",
"numpy.maximum"
] |
[((209, 256), 'pandas.read_csv', 'pd.read_csv', (['"""Data/Mock_Rwanda_Data_Census.csv"""'], {}), "('Data/Mock_Rwanda_Data_Census.csv')\n", (220, 256), True, 'import pandas as pd\n'), ((263, 307), 'pandas.read_csv', 'pd.read_csv', (['"""Data/Mock_Rwanda_Data_Pes.csv"""'], {}), "('Data/Mock_Rwanda_Data_Pes.csv')\n", (274, 307), True, 'import pandas as pd\n'), ((2967, 2999), 'pandas.read_csv', 'pd.read_csv', (['"""Data/m_values.csv"""'], {}), "('Data/m_values.csv')\n", (2978, 2999), True, 'import pandas as pd\n'), ((3011, 3043), 'pandas.read_csv', 'pd.read_csv', (['"""Data/u_values.csv"""'], {}), "('Data/u_values.csv')\n", (3022, 3043), True, 'import pandas as pd\n'), ((5443, 5528), 'numpy.where', 'np.where', (["(combined_blocks['month_pes'] == combined_blocks['month_cen'])", '(1 / 3)', '(0)'], {}), "(combined_blocks['month_pes'] == combined_blocks['month_cen'], 1 / 3, 0\n )\n", (5451, 5528), True, 'import numpy as np\n'), ((5560, 5638), 'numpy.where', 'np.where', (["(combined_blocks['year_pes'] == combined_blocks['year_cen'])", '(1 / 2)', '(0)'], {}), "(combined_blocks['year_pes'] == combined_blocks['year_cen'], 1 / 2, 0)\n", (5568, 5638), True, 'import numpy as np\n'), ((6287, 6396), 'numpy.where', 'np.where', (["(combined_blocks[variable + '_agreement'] <= cutoff)", '(0)', "combined_blocks[variable + '_agreement']"], {}), "(combined_blocks[variable + '_agreement'] <= cutoff, 0,\n combined_blocks[variable + '_agreement'])\n", (6295, 6396), True, 'import numpy as np\n'), ((6581, 6673), 'numpy.where', 'np.where', (["(combined_blocks[variable + '_cen'] == combined_blocks[variable + '_pes'])", '(1)', '(0)'], {}), "(combined_blocks[variable + '_cen'] == combined_blocks[variable +\n '_pes'], 1, 0)\n", (6589, 6673), True, 'import numpy as np\n'), ((2347, 2399), 'rapidfuzz.string_metric.normalized_levenshtein', 'rapidfuzz.string_metric.normalized_levenshtein', (['s', 't'], {}), '(s, t)\n', (2393, 2399), False, 'import rapidfuzz\n'), ((7600, 7893), 'numpy.maximum', 'np.maximum', (["(combined_blocks[variable + '_agreement_weight'] - (combined_blocks[\n variable + '_agreement_weight'] - combined_blocks[variable +\n '_disagreement_weight']) * (1 - combined_blocks[variable + '_agreement'\n ]) * (9 / 2))", "combined_blocks[variable + '_disagreement_weight']"], {}), "(combined_blocks[variable + '_agreement_weight'] - (\n combined_blocks[variable + '_agreement_weight'] - combined_blocks[\n variable + '_disagreement_weight']) * (1 - combined_blocks[variable +\n '_agreement']) * (9 / 2), combined_blocks[variable +\n '_disagreement_weight'])\n", (7610, 7893), True, 'import numpy as np\n'), ((4339, 4379), 'math.log2', 'math.log2', (["(x[var + '_m'] / x[var + '_u'])"], {}), "(x[var + '_m'] / x[var + '_u'])\n", (4348, 4379), False, 'import math\n'), ((4535, 4587), 'math.log2', 'math.log2', (["((1 - x[var + '_m']) / (1 - x[var + '_u']))"], {}), "((1 - x[var + '_m']) / (1 - x[var + '_u']))\n", (4544, 4587), False, 'import math\n')]
|
# coding=utf-8
# date: 2019/1/1, 19:38
# name: smz
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from LinearModel.modules.model3 import ModelThreeClasses
from LinearModel.configuration.options import opts
from LinearModel.scripts.gen_data import generate_data
def gen_train_data():
np.random.seed(10)
fields_num = 2
num_classes = 3
sample_size = 2000
mean = np.random.randn(fields_num)
cov = np.eye(fields_num)
diffs = [[3.0], [3.0, 0.0]] # 第三类样本中心与第二类样本中心之间只有y方向上的误差,第二类样本与第一类样本在x和y方向上均偏移3.0
train_X, train_Y = generate_data(num_classes=num_classes, sample_size=sample_size, mean=mean, cov=cov, diffs=diffs)
np.save("../data/train_data_X3.npy", train_X)
np.save("../data/train_data_Y3.npy", train_Y)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
colors = ['r' if np.argmax(label) == 0 else 'b' if np.argmax(label) == 1 else 'y' for label in train_Y]
ax.scatter(train_X[:, 0], train_X[:, 1], c=colors)
ax.set_xlabel("Scaled age(in years)")
ax.set_ylabel("Tumor size(in cm)")
plt.show()
def train_3_classes():
"""这个有问题,因为使用softmax表示的结果和使用sigmoid的那个模型是不同的,需要重写模型"""
model3 = ModelThreeClasses(opts)
model3.build()
train_x3 = np.load("../data/train_data_X3.npy")
train_y3 = np.load("../data/train_data_Y3.npy")
model_name = "model3s.ckpt"
num_samples = len(train_x3)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(opts["epochs"]):
start_pointer = 0
train_x, train_y = shuffle(train_x3, train_y3)
while start_pointer < num_samples:
end_pointer = start_pointer + opts["batch_size"]
batch_x = train_x[start_pointer:end_pointer]
batch_y = train_y[start_pointer:end_pointer]
start_pointer = end_pointer
feed_dict = {model3.inputs: batch_x, model3.labels: batch_y}
loss_value, glob_step_value, merge_str, _ = sess.run(
fetches=[model3.loss, model3.global_step, model3.merge_op,model3.train_step],
feed_dict=feed_dict)
model3.writer.add_summary(merge_str, global_step=glob_step_value)
print("epoch:%d, step:%d, loss:%.6f"%(epoch, glob_step_value, loss_value))
if (epoch + 1) % 10 == 0:
model3.saver.save(sess, opts["checkpoints_dir"] + model_name, global_step=model3.global_step)
if __name__ == "__main__":
# gen_train_data()
train_3_classes()
|
[
"LinearModel.modules.model3.ModelThreeClasses",
"numpy.eye",
"sklearn.utils.shuffle",
"tensorflow.Session",
"numpy.argmax",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.figure",
"LinearModel.scripts.gen_data.generate_data",
"numpy.random.seed",
"numpy.load",
"numpy.random.randn",
"numpy.save",
"matplotlib.pyplot.show"
] |
[((352, 370), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (366, 370), True, 'import numpy as np\n'), ((444, 471), 'numpy.random.randn', 'np.random.randn', (['fields_num'], {}), '(fields_num)\n', (459, 471), True, 'import numpy as np\n'), ((482, 500), 'numpy.eye', 'np.eye', (['fields_num'], {}), '(fields_num)\n', (488, 500), True, 'import numpy as np\n'), ((612, 712), 'LinearModel.scripts.gen_data.generate_data', 'generate_data', ([], {'num_classes': 'num_classes', 'sample_size': 'sample_size', 'mean': 'mean', 'cov': 'cov', 'diffs': 'diffs'}), '(num_classes=num_classes, sample_size=sample_size, mean=mean,\n cov=cov, diffs=diffs)\n', (625, 712), False, 'from LinearModel.scripts.gen_data import generate_data\n'), ((713, 758), 'numpy.save', 'np.save', (['"""../data/train_data_X3.npy"""', 'train_X'], {}), "('../data/train_data_X3.npy', train_X)\n", (720, 758), True, 'import numpy as np\n'), ((763, 808), 'numpy.save', 'np.save', (['"""../data/train_data_Y3.npy"""', 'train_Y'], {}), "('../data/train_data_Y3.npy', train_Y)\n", (770, 808), True, 'import numpy as np\n'), ((819, 831), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (829, 831), True, 'import matplotlib.pyplot as plt\n'), ((1115, 1125), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1123, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1223, 1246), 'LinearModel.modules.model3.ModelThreeClasses', 'ModelThreeClasses', (['opts'], {}), '(opts)\n', (1240, 1246), False, 'from LinearModel.modules.model3 import ModelThreeClasses\n'), ((1282, 1318), 'numpy.load', 'np.load', (['"""../data/train_data_X3.npy"""'], {}), "('../data/train_data_X3.npy')\n", (1289, 1318), True, 'import numpy as np\n'), ((1334, 1370), 'numpy.load', 'np.load', (['"""../data/train_data_Y3.npy"""'], {}), "('../data/train_data_Y3.npy')\n", (1341, 1370), True, 'import numpy as np\n'), ((1446, 1458), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1456, 1458), True, 'import tensorflow as tf\n'), ((1483, 1516), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1514, 1516), True, 'import tensorflow as tf\n'), ((1646, 1673), 'sklearn.utils.shuffle', 'shuffle', (['train_x3', 'train_y3'], {}), '(train_x3, train_y3)\n', (1653, 1673), False, 'from sklearn.utils import shuffle\n'), ((887, 903), 'numpy.argmax', 'np.argmax', (['label'], {}), '(label)\n', (896, 903), True, 'import numpy as np\n'), ((921, 937), 'numpy.argmax', 'np.argmax', (['label'], {}), '(label)\n', (930, 937), True, 'import numpy as np\n')]
|
import argparse
import os
import sys
import numpy as np
from scipy import misc
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.models import vgg16, vgg19
from torchvision.utils import save_image
from lib.gradients import GradCam, GuidedBackpropGrad
from lib.image_utils import preprocess_image, save_cam_image, save_as_gray_image
from lib.labels import IMAGENET_LABELS
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--img', type=str, default='',
help='Input image path')
parser.add_argument('--out_dir', type=str, default='./result/cam/',
help='Result directory path')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if args.cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
if args.img:
print('Input image: {}'.format(args.img))
else:
print('Input image: raccoon face (scipy.misc.face())')
print('Output directory: {}'.format(args.out_dir))
print()
return args
def main():
args = parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
target_layer_names = ['35']
target_index = None
# Prepare input image
if args.img:
img = cv2.imread(args.img, 1)
else:
img = misc.face()
img = np.float32(cv2.resize(img, (224, 224))) / 255
preprocessed_img = preprocess_image(img, args.cuda)
model = vgg19(pretrained=True)
if args.cuda:
model.cuda()
# Prediction
output = model(preprocessed_img)
pred_index = np.argmax(output.data.cpu().numpy())
print('Prediction: {}'.format(IMAGENET_LABELS[pred_index]))
# Prepare grad cam
grad_cam = GradCam(
pretrained_model=model,
target_layer_names=target_layer_names,
cuda=args.cuda)
# Compute grad cam
mask = grad_cam(preprocessed_img, target_index)
save_cam_image(img, mask, os.path.join(args.out_dir, 'grad_cam.jpg'))
print('Saved Grad-CAM image')
# Reload preprocessed image
preprocessed_img = preprocess_image(img)
# Compute guided backpropagation
guided_backprop = GuidedBackpropGrad(
pretrained_model=model, cuda=args.cuda)
guided_backprop_saliency = guided_backprop(preprocessed_img, index=target_index)
cam_mask = np.zeros(guided_backprop_saliency.shape)
for i in range(guided_backprop_saliency.shape[0]):
cam_mask[i, :, :] = mask
cam_guided_backprop = np.multiply(cam_mask, guided_backprop_saliency)
save_as_gray_image(
cam_guided_backprop,
os.path.join(args.out_dir, 'guided_grad_cam.jpg'))
print('Saved Guided Grad-CAM image')
if __name__ == '__main__':
main()
|
[
"os.path.exists",
"numpy.multiply",
"lib.gradients.GradCam",
"argparse.ArgumentParser",
"torchvision.models.vgg19",
"lib.gradients.GuidedBackpropGrad",
"os.makedirs",
"lib.image_utils.preprocess_image",
"os.path.join",
"numpy.zeros",
"torch.cuda.is_available",
"cv2.resize",
"cv2.imread",
"scipy.misc.face"
] |
[((455, 480), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (478, 480), False, 'import argparse\n'), ((1634, 1666), 'lib.image_utils.preprocess_image', 'preprocess_image', (['img', 'args.cuda'], {}), '(img, args.cuda)\n', (1650, 1666), False, 'from lib.image_utils import preprocess_image, save_cam_image, save_as_gray_image\n'), ((1680, 1702), 'torchvision.models.vgg19', 'vgg19', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1685, 1702), False, 'from torchvision.models import vgg16, vgg19\n'), ((1954, 2045), 'lib.gradients.GradCam', 'GradCam', ([], {'pretrained_model': 'model', 'target_layer_names': 'target_layer_names', 'cuda': 'args.cuda'}), '(pretrained_model=model, target_layer_names=target_layer_names, cuda\n =args.cuda)\n', (1961, 2045), False, 'from lib.gradients import GradCam, GuidedBackpropGrad\n'), ((2311, 2332), 'lib.image_utils.preprocess_image', 'preprocess_image', (['img'], {}), '(img)\n', (2327, 2332), False, 'from lib.image_utils import preprocess_image, save_cam_image, save_as_gray_image\n'), ((2393, 2451), 'lib.gradients.GuidedBackpropGrad', 'GuidedBackpropGrad', ([], {'pretrained_model': 'model', 'cuda': 'args.cuda'}), '(pretrained_model=model, cuda=args.cuda)\n', (2411, 2451), False, 'from lib.gradients import GradCam, GuidedBackpropGrad\n'), ((2562, 2602), 'numpy.zeros', 'np.zeros', (['guided_backprop_saliency.shape'], {}), '(guided_backprop_saliency.shape)\n', (2570, 2602), True, 'import numpy as np\n'), ((2718, 2765), 'numpy.multiply', 'np.multiply', (['cam_mask', 'guided_backprop_saliency'], {}), '(cam_mask, guided_backprop_saliency)\n', (2729, 2765), True, 'import numpy as np\n'), ((902, 927), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (925, 927), False, 'import torch\n'), ((1316, 1344), 'os.path.exists', 'os.path.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (1330, 1344), False, 'import os\n'), ((1354, 1379), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (1365, 1379), False, 'import os\n'), ((1495, 1518), 'cv2.imread', 'cv2.imread', (['args.img', '(1)'], {}), '(args.img, 1)\n', (1505, 1518), False, 'import cv2\n'), ((1543, 1554), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (1552, 1554), False, 'from scipy import misc\n'), ((2177, 2219), 'os.path.join', 'os.path.join', (['args.out_dir', '"""grad_cam.jpg"""'], {}), "(args.out_dir, 'grad_cam.jpg')\n", (2189, 2219), False, 'import os\n'), ((2827, 2876), 'os.path.join', 'os.path.join', (['args.out_dir', '"""guided_grad_cam.jpg"""'], {}), "(args.out_dir, 'guided_grad_cam.jpg')\n", (2839, 2876), False, 'import os\n'), ((1576, 1603), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (1586, 1603), False, 'import cv2\n')]
|
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import sys
import os
import unittest
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from lava.lib.dl.slayer.neuron import alif
verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False
seed = np.random.randint(1000)
# seed = 590
np.random.seed(seed)
if verbose:
print(f'{seed=}')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
if verbose:
print(
'CUDA is not available in the system. '
'Testing for CPU version only.'
)
device = torch.device('cpu')
# neuron parameters
threshold = 1
current_decay = np.random.random()
voltage_decay = np.random.random()
threshold_decay = np.random.random()
refractory_decay = np.random.random()
# create input
time = torch.FloatTensor(np.arange(200)).to(device)
# expand to (batch, neuron, time) tensor
spike_input = torch.autograd.Variable(
torch.zeros([5, 4, len(time)]), requires_grad=True
).to(device)
spike_input.data[..., np.random.randint(spike_input.shape[-1], size=5)] = 1
weight = torch.FloatTensor(
5 * np.random.random(size=spike_input.shape[-1]) - 0.5
).reshape(
[1, 1, spike_input.shape[-1]]
).to(device)
# initialize neuron
neuron = alif.Neuron(
threshold,
threshold_step=0.5 * threshold,
current_decay=current_decay,
voltage_decay=voltage_decay,
threshold_decay=threshold_decay,
refractory_decay=refractory_decay,
persistent_state=True,
).to(device)
quantized_weight = neuron.quantize_8bit(weight)
neuron.debug = True
# get the neuron response for full input
current, voltage, th, ref = neuron.dynamics(quantized_weight * spike_input)
spike = neuron.spike(voltage, th, ref)
class TestALIF(unittest.TestCase):
def test_input_output_range(self):
if verbose:
print(spike_input.sum(), spike_input.flatten())
if verbose:
print(spike.sum(), spike.flatten())
self.assertTrue(
spike_input.sum().item() > 0,
'There was zero input spike. Check the test setting.'
)
self.assertTrue(
spike.sum().item() > 0,
'There was zero ouptut spike. Check the test setting.'
)
def test_properties(self):
_ = neuron.weight_exponent
_ = neuron.v_th_mant
_ = neuron.cx_current_decay
_ = neuron.cx_voltage_decay
_ = neuron.cx_threshold_decay
_ = neuron.cx_refractory_decay
_ = neuron.scale
_ = neuron.shape
_ = neuron.device
# just looking for errors
self.assertTrue(True, 'Encountered errors.')
def test_batch_consistency(self):
spike_var = torch.norm(torch.var(spike, dim=0)).item()
voltage_var = torch.norm(torch.var(voltage, dim=0)).item()
current_var = torch.norm(torch.var(current, dim=0)).item()
th_var = torch.norm(torch.var(th, dim=0)).item()
ref_var = torch.norm(torch.var(ref, dim=0)).item()
self.assertTrue(
spike_var < 1e-5,
f'Spike variation across batch dimension is inconsistent. '
f'Variance was {spike_var}. Expected 0.'
)
self.assertTrue(
current_var < 1e-5,
f'Current variation across batch dimension is inconsistent. '
f'Variance was {current_var}. Expected 0.'
)
self.assertTrue(
voltage_var < 1e-5,
f'Voltage variation across batch dimension is inconsistent. '
f'Variance was {voltage_var}. Expected 0.'
)
self.assertTrue(
th_var < 1e-5,
f'Threshold variation across batch dimension is inconsistent. '
f'Variance was {th_var}. Expected 0.'
)
self.assertTrue(
ref_var < 1e-5,
f'Refractory variation across batch dimension is inconsistent. '
f'Variance was {ref_var}. Expected 0.'
)
def test_integer_states(self):
# there should be no quantization error when
# states are scaled with s_scale
voltage_error = torch.norm(
torch.floor(voltage * neuron.s_scale)
- voltage * neuron.s_scale
)
current_error = torch.norm(
torch.floor(current * neuron.s_scale)
- current * neuron.s_scale
)
th_error = torch.norm(
torch.floor(th * neuron.s_scale)
- th * neuron.s_scale
)
ref_error = torch.norm(
torch.floor(ref * neuron.s_scale)
- ref * neuron.s_scale
)
self.assertTrue(
voltage_error < 1e-5,
f'Voltage calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {voltage_error}'
)
self.assertTrue(
current_error < 1e-5,
f'Current calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {current_error}'
)
self.assertTrue(
th_error < 1e-5,
f'Threshold calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {th_error}'
)
self.assertTrue(
ref_error < 1e-5,
f'Refractory calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {ref_error}'
)
def test_persistent_state(self):
# clear previous persistent state
neuron.current_state *= 0
neuron.voltage_state *= 0
neuron.threshold_state *= 0
neuron.threshold_state += neuron.threshold # stable at th0
neuron.refractory_state *= 0
# break the calculation into two parts: before ind and after ind
ind = int(np.random.random() * (spike_input.shape[-1] - 1)) + 1
current0, voltage0, th0, ref0 = neuron.dynamics(
quantized_weight[..., :ind] * spike_input[..., :ind]
)
spike0 = neuron.spike(voltage0, th0, ref0)
current1, voltage1, th1, ref1 = neuron.dynamics(
quantized_weight[..., ind:] * spike_input[..., ind:]
)
spike1 = neuron.spike(voltage1, th1, ref1)
spike_error = (
torch.norm(spike[..., :ind] - spike0)
+ torch.norm(spike[..., ind:] - spike1)
).item()
voltage_error = (
torch.norm(voltage[..., :ind] - voltage0)
+ torch.norm(voltage[..., ind:] - voltage1)
).item()
current_error = (
torch.norm(current[..., :ind] - current0)
+ torch.norm(current[..., ind:] - current1)
).item()
th_error = (
torch.norm(th[..., :ind] - th0)
+ torch.norm(th[..., ind:] - th1)
).item()
ref_error = (
torch.norm(ref[..., :ind] - ref0)
+ torch.norm(ref[..., ind:] - ref1)
).item()
if verbose:
print(ind)
if spike_error >= 1e-5:
print('Persistent spike states')
print(
spike[0, 0, ind - 10:ind + 10].cpu().data.numpy().tolist()
)
print(spike0[0, 0, -10:].cpu().data.numpy().tolist())
print(spike1[0, 0, :10].cpu().data.numpy().tolist())
if voltage_error >= 1e-5:
print('Persistent voltage states')
print((
neuron.s_scale * voltage[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * voltage0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * voltage1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if current_error >= 1e-5:
print('Persistent current states')
print((
neuron.s_scale * current[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * current0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * current1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if th_error >= 1e-5:
print('Persistent threshold states')
print((
neuron.s_scale * th[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * th0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * th1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if ref_error >= 1e-5:
print('Persistent refractory states')
print((
neuron.s_scale * ref[0, 0, ind - 10:ind + 10]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * ref0[0, 0, -10:]
).cpu().data.numpy().astype(int).tolist())
print((
neuron.s_scale * ref1[0, 0, :10]
).cpu().data.numpy().astype(int).tolist())
if verbose:
if bool(os.environ.get('DISPLAY', None)):
plt.figure()
plt.plot(
time.cpu().data.numpy(),
current[0, 0].cpu().data.numpy(),
label='current'
)
plt.plot(
time[:ind].cpu().data.numpy(),
current0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
current1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.figure()
plt.plot(
time.cpu().data.numpy(),
voltage[0, 0].cpu().data.numpy(),
label='voltage'
)
plt.plot(
time[:ind].cpu().data.numpy(),
voltage0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
voltage1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.plot(
time[spike[0, 0] > 0].cpu().data.numpy(),
0 * spike[0, 0][spike[0, 0] > 0].cpu().data.numpy(),
'.', markersize=12, label='spike'
)
plt.plot(
time[:ind][spike0[0, 0] > 0].cpu().data.numpy(),
0 * spike0[0, 0][spike0[0, 0] > 0].cpu().data.numpy(),
'.', label=':ind'
)
plt.plot(
time[ind:][spike1[0, 0] > 0].cpu().data.numpy(),
0 * spike1[0, 0][spike1[0, 0] > 0].cpu().data.numpy(),
'.', label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.figure()
plt.plot(
time.cpu().data.numpy(),
th[0, 0].cpu().data.numpy(),
label='threshold'
)
plt.plot(
time[:ind].cpu().data.numpy(),
th0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
th1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.figure()
plt.plot(
time.cpu().data.numpy(),
ref[0, 0].cpu().data.numpy(),
label='refractory'
)
plt.plot(
time[:ind].cpu().data.numpy(),
ref0[0, 0].cpu().data.numpy(),
label=':ind'
)
plt.plot(
time[ind:].cpu().data.numpy(),
ref1[0, 0].cpu().data.numpy(),
label='ind:'
)
plt.xlabel('time')
plt.legend()
plt.show()
self.assertTrue(
spike_error < 1e-5,
f'Persistent state has errors in spike calculation. '
f'Error was {spike_error}.'
f'{seed=}'
)
self.assertTrue(
voltage_error < 1e-5,
f'Persistent state has errors in voltage calculation. '
f'Error was {voltage_error}.'
f'{seed=}'
)
self.assertTrue(
current_error < 1e-5,
f'Persistent state has errors in current calculation. '
f'Error was {current_error}.'
f'{seed=}'
)
self.assertTrue(
th_error < 1e-5,
f'Persistent state has errors in threshold calculation. '
f'Error was {th_error}.'
f'{seed=}'
)
self.assertTrue(
ref_error < 1e-5,
f'Persistent state has errors in refractory calculation. '
f'Error was {ref_error}.'
f'{seed=}'
)
def test_backward(self):
spike_target = spike.clone().detach()
current_target = current.clone().detach()
voltage_target = voltage.clone().detach()
spike_target[
...,
np.random.randint(spike_input.shape[-1], size=5)
] = 1
current_target[
...,
np.random.randint(spike_input.shape[-1], size=5)
] -= 1
voltage_target[
...,
np.random.randint(spike_input.shape[-1], size=5)
] -= -1
loss = F.mse_loss(spike, spike_target) \
+ F.mse_loss(current, current_target) \
+ F.mse_loss(voltage, voltage_target)
loss.backward()
# just looking for errors
self.assertTrue(True, 'Encountered errors.')
def test_graded_spikes(self):
# TODO: after further study of network behavior with graded spikes.
pass
|
[
"lava.lib.dl.slayer.neuron.alif.Neuron",
"torch.nn.functional.mse_loss",
"matplotlib.pyplot.show",
"numpy.random.random",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"torch.floor",
"os.environ.get",
"numpy.random.randint",
"torch.cuda.is_available",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"torch.norm",
"torch.var",
"numpy.arange",
"torch.device"
] |
[((346, 369), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (363, 369), True, 'import numpy as np\n'), ((383, 403), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (397, 403), True, 'import numpy as np\n'), ((442, 467), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (465, 467), False, 'import torch\n'), ((730, 748), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (746, 748), True, 'import numpy as np\n'), ((765, 783), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (781, 783), True, 'import numpy as np\n'), ((802, 820), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (818, 820), True, 'import numpy as np\n'), ((840, 858), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (856, 858), True, 'import numpy as np\n'), ((482, 502), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (494, 502), False, 'import torch\n'), ((659, 678), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (671, 678), False, 'import torch\n'), ((1097, 1145), 'numpy.random.randint', 'np.random.randint', (['spike_input.shape[-1]'], {'size': '(5)'}), '(spike_input.shape[-1], size=5)\n', (1114, 1145), True, 'import numpy as np\n'), ((1327, 1540), 'lava.lib.dl.slayer.neuron.alif.Neuron', 'alif.Neuron', (['threshold'], {'threshold_step': '(0.5 * threshold)', 'current_decay': 'current_decay', 'voltage_decay': 'voltage_decay', 'threshold_decay': 'threshold_decay', 'refractory_decay': 'refractory_decay', 'persistent_state': '(True)'}), '(threshold, threshold_step=0.5 * threshold, current_decay=\n current_decay, voltage_decay=voltage_decay, threshold_decay=\n threshold_decay, refractory_decay=refractory_decay, persistent_state=True)\n', (1338, 1540), False, 'from lava.lib.dl.slayer.neuron import alif\n'), ((900, 914), 'numpy.arange', 'np.arange', (['(200)'], {}), '(200)\n', (909, 914), True, 'import numpy as np\n'), ((14489, 14524), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['voltage', 'voltage_target'], {}), '(voltage, voltage_target)\n', (14499, 14524), True, 'import torch.nn.functional as F\n'), ((4204, 4241), 'torch.floor', 'torch.floor', (['(voltage * neuron.s_scale)'], {}), '(voltage * neuron.s_scale)\n', (4215, 4241), False, 'import torch\n'), ((4339, 4376), 'torch.floor', 'torch.floor', (['(current * neuron.s_scale)'], {}), '(current * neuron.s_scale)\n', (4350, 4376), False, 'import torch\n'), ((4469, 4501), 'torch.floor', 'torch.floor', (['(th * neuron.s_scale)'], {}), '(th * neuron.s_scale)\n', (4480, 4501), False, 'import torch\n'), ((4590, 4623), 'torch.floor', 'torch.floor', (['(ref * neuron.s_scale)'], {}), '(ref * neuron.s_scale)\n', (4601, 4623), False, 'import torch\n'), ((9575, 9606), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', 'None'], {}), "('DISPLAY', None)\n", (9589, 9606), False, 'import os\n'), ((9625, 9637), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9635, 9637), True, 'import matplotlib.pyplot as plt\n'), ((10199, 10217), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (10209, 10217), True, 'import matplotlib.pyplot as plt\n'), ((10234, 10246), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10244, 10246), True, 'import matplotlib.pyplot as plt\n'), ((10264, 10276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10274, 10276), True, 'import matplotlib.pyplot as plt\n'), ((11524, 11542), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (11534, 11542), True, 'import matplotlib.pyplot as plt\n'), ((11559, 11571), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11569, 11571), True, 'import matplotlib.pyplot as plt\n'), ((11589, 11601), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11599, 11601), True, 'import matplotlib.pyplot as plt\n'), ((12150, 12168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (12160, 12168), True, 'import matplotlib.pyplot as plt\n'), ((12185, 12197), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12195, 12197), True, 'import matplotlib.pyplot as plt\n'), ((12215, 12227), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12225, 12227), True, 'import matplotlib.pyplot as plt\n'), ((12780, 12798), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (12790, 12798), True, 'import matplotlib.pyplot as plt\n'), ((12815, 12827), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12825, 12827), True, 'import matplotlib.pyplot as plt\n'), ((12844, 12854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12852, 12854), True, 'import matplotlib.pyplot as plt\n'), ((14075, 14123), 'numpy.random.randint', 'np.random.randint', (['spike_input.shape[-1]'], {'size': '(5)'}), '(spike_input.shape[-1], size=5)\n', (14092, 14123), True, 'import numpy as np\n'), ((14191, 14239), 'numpy.random.randint', 'np.random.randint', (['spike_input.shape[-1]'], {'size': '(5)'}), '(spike_input.shape[-1], size=5)\n', (14208, 14239), True, 'import numpy as np\n'), ((14308, 14356), 'numpy.random.randint', 'np.random.randint', (['spike_input.shape[-1]'], {'size': '(5)'}), '(spike_input.shape[-1], size=5)\n', (14325, 14356), True, 'import numpy as np\n'), ((14389, 14420), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['spike', 'spike_target'], {}), '(spike, spike_target)\n', (14399, 14420), True, 'import torch.nn.functional as F\n'), ((14437, 14472), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['current', 'current_target'], {}), '(current, current_target)\n', (14447, 14472), True, 'import torch.nn.functional as F\n'), ((2783, 2806), 'torch.var', 'torch.var', (['spike'], {'dim': '(0)'}), '(spike, dim=0)\n', (2792, 2806), False, 'import torch\n'), ((2848, 2873), 'torch.var', 'torch.var', (['voltage'], {'dim': '(0)'}), '(voltage, dim=0)\n', (2857, 2873), False, 'import torch\n'), ((2915, 2940), 'torch.var', 'torch.var', (['current'], {'dim': '(0)'}), '(current, dim=0)\n', (2924, 2940), False, 'import torch\n'), ((2977, 2997), 'torch.var', 'torch.var', (['th'], {'dim': '(0)'}), '(th, dim=0)\n', (2986, 2997), False, 'import torch\n'), ((3035, 3056), 'torch.var', 'torch.var', (['ref'], {'dim': '(0)'}), '(ref, dim=0)\n', (3044, 3056), False, 'import torch\n'), ((5954, 5972), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5970, 5972), True, 'import numpy as np\n'), ((6411, 6448), 'torch.norm', 'torch.norm', (['(spike[..., :ind] - spike0)'], {}), '(spike[..., :ind] - spike0)\n', (6421, 6448), False, 'import torch\n'), ((6463, 6500), 'torch.norm', 'torch.norm', (['(spike[..., ind:] - spike1)'], {}), '(spike[..., ind:] - spike1)\n', (6473, 6500), False, 'import torch\n'), ((6556, 6597), 'torch.norm', 'torch.norm', (['(voltage[..., :ind] - voltage0)'], {}), '(voltage[..., :ind] - voltage0)\n', (6566, 6597), False, 'import torch\n'), ((6612, 6653), 'torch.norm', 'torch.norm', (['(voltage[..., ind:] - voltage1)'], {}), '(voltage[..., ind:] - voltage1)\n', (6622, 6653), False, 'import torch\n'), ((6709, 6750), 'torch.norm', 'torch.norm', (['(current[..., :ind] - current0)'], {}), '(current[..., :ind] - current0)\n', (6719, 6750), False, 'import torch\n'), ((6765, 6806), 'torch.norm', 'torch.norm', (['(current[..., ind:] - current1)'], {}), '(current[..., ind:] - current1)\n', (6775, 6806), False, 'import torch\n'), ((6857, 6888), 'torch.norm', 'torch.norm', (['(th[..., :ind] - th0)'], {}), '(th[..., :ind] - th0)\n', (6867, 6888), False, 'import torch\n'), ((6903, 6934), 'torch.norm', 'torch.norm', (['(th[..., ind:] - th1)'], {}), '(th[..., ind:] - th1)\n', (6913, 6934), False, 'import torch\n'), ((6986, 7019), 'torch.norm', 'torch.norm', (['(ref[..., :ind] - ref0)'], {}), '(ref[..., :ind] - ref0)\n', (6996, 7019), False, 'import torch\n'), ((7034, 7067), 'torch.norm', 'torch.norm', (['(ref[..., ind:] - ref1)'], {}), '(ref[..., ind:] - ref1)\n', (7044, 7067), False, 'import torch\n'), ((1187, 1231), 'numpy.random.random', 'np.random.random', ([], {'size': 'spike_input.shape[-1]'}), '(size=spike_input.shape[-1])\n', (1203, 1231), True, 'import numpy as np\n')]
|
import numpy as np
EXPERIMENT_NAME = 'EXP_12'
CORPUS_PATH = '/home/dddhiraj/Documents/stuff/data/wiki_en.txt'
TRAINING_WINDOW = 5
CONTEXT_DIMENSION = 64
LEANING_RATE = 1
DROPOUT = 0.05
CONTEXT_DECAY = 1 - TRAINING_WINDOW ** -0.5
CONTRASTIVE_WEIGHT = 1#0.1
NEGATIVE_SAMPLE_SIZE = TRAINING_WINDOW ** 2
CONEXT_INERTIA = np.sqrt(TRAINING_WINDOW)
THREADS = 6
CHUNK_SIZE = 5000
DB = 'REDIS'
if DB == 'MONGO':
import pymongo
myclient = pymongo.MongoClient('mongodb://localhost:27017')
mydb = myclient["mydatabase"]
collection = mydb.train_1#neighbour_aware_context_initilization_train_window_8
if DB == 'REDIS':
import redis
collection = redis.Redis(db=1) #11
key_collection= redis.Redis(db=2) #12
#import redisai
# collection = redisai.Client(db=14)
# key_collection = redisai.Client(db=15)
'''
Experiment details:
Trained on wiki data with 51 million words.
'''
|
[
"pymongo.MongoClient",
"numpy.sqrt",
"redis.Redis"
] |
[((367, 391), 'numpy.sqrt', 'np.sqrt', (['TRAINING_WINDOW'], {}), '(TRAINING_WINDOW)\n', (374, 391), True, 'import numpy as np\n'), ((496, 544), 'pymongo.MongoClient', 'pymongo.MongoClient', (['"""mongodb://localhost:27017"""'], {}), "('mongodb://localhost:27017')\n", (515, 544), False, 'import pymongo\n'), ((721, 738), 'redis.Redis', 'redis.Redis', ([], {'db': '(1)'}), '(db=1)\n', (732, 738), False, 'import redis\n'), ((763, 780), 'redis.Redis', 'redis.Redis', ([], {'db': '(2)'}), '(db=2)\n', (774, 780), False, 'import redis\n')]
|
import argparse, pdb
import gym
import numpy as np
import os
import pickle
import random
import torch
import scipy.misc
from gym.envs.registration import register
parser = argparse.ArgumentParser()
parser.add_argument('-display', type=int, default=0)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-lanes', type=int, default=3)
parser.add_argument('-traffic_rate', type=int, default=15)
parser.add_argument('-state_image', type=int, default=1)
parser.add_argument('-save_images', type=int, default=0)
parser.add_argument('-store', type=int, default=1)
parser.add_argument('-data_dir', type=str, default='traffic-data/state-action-cost/')
parser.add_argument('-fps', type=int, default=30)
parser.add_argument('-time_slot', type=int, default=0)
parser.add_argument('-map', type=str, default='i80', choices={'ai', 'i80', 'us101', 'lanker', 'peach'})
parser.add_argument('-delta_t', type=float, default=0.1)
opt = parser.parse_args()
opt.state_image = (opt.state_image == 1)
opt.store = (opt.store == 1)
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
os.system("mkdir -p " + opt.data_dir)
kwargs = dict(
display=opt.display,
state_image=opt.state_image,
store=opt.store,
fps=opt.fps,
nb_lanes=opt.lanes,
traffic_rate=opt.traffic_rate,
data_dir=opt.data_dir,
delta_t=opt.delta_t,
)
register(
id='Traffic-v0',
entry_point='traffic_gym:Simulator',
kwargs=kwargs
)
register(
id='I-80-v0',
entry_point='map_i80:I80',
kwargs=kwargs
)
gym.envs.registration.register(
id='US-101-v0',
entry_point='map_us101:US101',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Lankershim-v0',
entry_point='map_lanker:Lankershim',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Peachtree-v0',
entry_point='map_peach:Peachtree',
kwargs=kwargs,
)
env_names = {
'ai': 'Traffic-v0',
'i80': 'I-80-v0',
'us101': 'US-101-v0',
'lanker': 'Lankershim-v0',
'peach': 'Peachtree-v0',
}
print('Building the environment (loading data, if any)')
env = gym.make(env_names[opt.map])
env.reset(frame=0, time_slot=opt.time_slot)
done = False
while not done:
observation, reward, done, info = env.step()
env.render()
print(f'Data generation for <{opt.map}, time slot {opt.time_slot}> completed')
|
[
"torch.manual_seed",
"argparse.ArgumentParser",
"random.seed",
"gym.envs.registration.register",
"numpy.random.seed",
"os.system",
"gym.make"
] |
[((173, 198), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (196, 198), False, 'import argparse, pdb\n'), ((1027, 1048), 'random.seed', 'random.seed', (['opt.seed'], {}), '(opt.seed)\n', (1038, 1048), False, 'import random\n'), ((1049, 1073), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (1063, 1073), True, 'import numpy as np\n'), ((1074, 1101), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (1091, 1101), False, 'import torch\n'), ((1103, 1140), 'os.system', 'os.system', (["('mkdir -p ' + opt.data_dir)"], {}), "('mkdir -p ' + opt.data_dir)\n", (1112, 1140), False, 'import os\n'), ((1367, 1444), 'gym.envs.registration.register', 'register', ([], {'id': '"""Traffic-v0"""', 'entry_point': '"""traffic_gym:Simulator"""', 'kwargs': 'kwargs'}), "(id='Traffic-v0', entry_point='traffic_gym:Simulator', kwargs=kwargs)\n", (1375, 1444), False, 'from gym.envs.registration import register\n'), ((1460, 1524), 'gym.envs.registration.register', 'register', ([], {'id': '"""I-80-v0"""', 'entry_point': '"""map_i80:I80"""', 'kwargs': 'kwargs'}), "(id='I-80-v0', entry_point='map_i80:I80', kwargs=kwargs)\n", (1468, 1524), False, 'from gym.envs.registration import register\n'), ((1540, 1637), 'gym.envs.registration.register', 'gym.envs.registration.register', ([], {'id': '"""US-101-v0"""', 'entry_point': '"""map_us101:US101"""', 'kwargs': 'kwargs'}), "(id='US-101-v0', entry_point=\n 'map_us101:US101', kwargs=kwargs)\n", (1570, 1637), False, 'import gym\n'), ((1649, 1756), 'gym.envs.registration.register', 'gym.envs.registration.register', ([], {'id': '"""Lankershim-v0"""', 'entry_point': '"""map_lanker:Lankershim"""', 'kwargs': 'kwargs'}), "(id='Lankershim-v0', entry_point=\n 'map_lanker:Lankershim', kwargs=kwargs)\n", (1679, 1756), False, 'import gym\n'), ((1768, 1872), 'gym.envs.registration.register', 'gym.envs.registration.register', ([], {'id': '"""Peachtree-v0"""', 'entry_point': '"""map_peach:Peachtree"""', 'kwargs': 'kwargs'}), "(id='Peachtree-v0', entry_point=\n 'map_peach:Peachtree', kwargs=kwargs)\n", (1798, 1872), False, 'import gym\n'), ((2096, 2124), 'gym.make', 'gym.make', (['env_names[opt.map]'], {}), '(env_names[opt.map])\n', (2104, 2124), False, 'import gym\n')]
|
import matplotlib.pylab as plt
import numpy as np
import random
from scipy.ndimage import gaussian_filter
mu =9
N = 50
k = 10
eta =10
sigma = 2
p0 = 0.5
inverse_random = False
L = range(N*N)
Q = np.zeros((N*mu,N*mu))
for o in range(mu*mu):
print(o)
F = 1000*k
a = np.ones((N,N))
for k_ in range(1000):
linear_idx = random.choices(L, weights=a.ravel()/float(a.sum()), k = k)
x, y = np.unravel_index(linear_idx, a.shape)
x += np.random.randint(-eta,eta,k)
y += np.random.randint(-eta,eta,k)
cond = (x<0) | (x>=N) | (y<0) | (y>=N)
x_ = np.delete(x, np.where(cond))
y_ = np.delete(y, np.where(cond))
a[x_,y_]+=F
a = gaussian_filter(a,sigma =sigma)
if np.random.random()>p0 and inverse_random:
a = a.max()-a
Mx,My = np.unravel_index(o,(mu,mu))
Q[Mx*N:(Mx+1)*N,My*N:(My+1)*N] = a
fig,ax = plt.subplots(1,1,figsize = (20,20))
plt.imshow(Q, interpolation='nearest')
plt.axis('off')
|
[
"matplotlib.pylab.axis",
"matplotlib.pylab.subplots",
"numpy.ones",
"numpy.where",
"numpy.random.random",
"numpy.zeros",
"matplotlib.pylab.imshow",
"numpy.random.randint",
"numpy.unravel_index",
"scipy.ndimage.gaussian_filter"
] |
[((197, 223), 'numpy.zeros', 'np.zeros', (['(N * mu, N * mu)'], {}), '((N * mu, N * mu))\n', (205, 223), True, 'import numpy as np\n'), ((908, 944), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(20, 20)'}), '(1, 1, figsize=(20, 20))\n', (920, 944), True, 'import matplotlib.pylab as plt\n'), ((944, 982), 'matplotlib.pylab.imshow', 'plt.imshow', (['Q'], {'interpolation': '"""nearest"""'}), "(Q, interpolation='nearest')\n", (954, 982), True, 'import matplotlib.pylab as plt\n'), ((983, 998), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (991, 998), True, 'import matplotlib.pylab as plt\n'), ((278, 293), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (285, 293), True, 'import numpy as np\n'), ((831, 860), 'numpy.unravel_index', 'np.unravel_index', (['o', '(mu, mu)'], {}), '(o, (mu, mu))\n', (847, 860), True, 'import numpy as np\n'), ((420, 457), 'numpy.unravel_index', 'np.unravel_index', (['linear_idx', 'a.shape'], {}), '(linear_idx, a.shape)\n', (436, 457), True, 'import numpy as np\n'), ((472, 503), 'numpy.random.randint', 'np.random.randint', (['(-eta)', 'eta', 'k'], {}), '(-eta, eta, k)\n', (489, 503), True, 'import numpy as np\n'), ((515, 546), 'numpy.random.randint', 'np.random.randint', (['(-eta)', 'eta', 'k'], {}), '(-eta, eta, k)\n', (532, 546), True, 'import numpy as np\n'), ((708, 739), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['a'], {'sigma': 'sigma'}), '(a, sigma=sigma)\n', (723, 739), False, 'from scipy.ndimage import gaussian_filter\n'), ((618, 632), 'numpy.where', 'np.where', (['cond'], {}), '(cond)\n', (626, 632), True, 'import numpy as np\n'), ((660, 674), 'numpy.where', 'np.where', (['cond'], {}), '(cond)\n', (668, 674), True, 'import numpy as np\n'), ((751, 769), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (767, 769), True, 'import numpy as np\n')]
|
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import Adam
import tensorflow as tf
import os
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger('tensorflow').disabled = True
class SimpleDqnNpcV3:
"Klasa implementująca agenta DQN opartego o prostą sieć neuronową"
def __init__(self, num_of_inputs, num_of_outputs):
"""
num_of_inputs - długość wektora będącego wejściem dla sieci neuronowej
num_of_outputs - ilość wyjść z sieci neuronowej
"""
self._num_of_inputs = num_of_inputs
self._num_of_outputs = num_of_outputs
self._exploration_rate = 1.0
self._exploration_rate_min = 0.1
self._exploration_rate_decay = 0.997
self._discout_rate = 0.95
self.memory = deque(maxlen=4096)
self._init_model()
def _init_model(self):
"""
Inicjalizuje model sieci neuronowej.
Wybraliśmy (w naszym mniemaniu) najproszte parametry i kształt.
"""
self._model = Sequential()
self._model.add(Dense(5 * self._num_of_inputs, input_dim=self._num_of_inputs, activation='relu'))
self._model.add(Dropout(0.15))
self._model.add(Dense(4 * self._num_of_inputs, activation='sigmoid'))
self._model.add(Dropout(0.15))
self._model.add(Dense(self._num_of_outputs, activation='linear'))
self._model.compile(optimizer=Adam(), loss='mean_squared_error')
def act(self, state):
"""Przewiduje i zwraca akcję, którą należy wykonać"""
if np.random.rand() <= self._exploration_rate:
return random.randrange(self._num_of_outputs)
act_values = self._model.predict(state)
return np.argmax(act_values[0])
def retain(self, current_state, taken_action, gained_reward, next_state, is_done):
"""Zapisuje dyn przypadku w pamięci agenta"""
self.memory.append((current_state, taken_action, gained_reward, next_state, is_done))
def replay(self, batch_size):
"""
Doszkala sieć neuronową na losowym fragmencie z jego pamięci
batch-size - rozmiar fragmentu pamięci
"""
batch = random.sample(self.memory, batch_size)
for current_state, taken_action, gained_reward, next_state, is_done in batch:
next_act_best_profit = gained_reward
if not is_done:
future_act_profits = self._model.predict(next_state)
next_act_best_profit = gained_reward + self._discout_rate * np.amax(future_act_profits[0])
current_act_profits = self._model.predict(current_state)
current_act_profits[0][taken_action] = gained_reward + self._discout_rate * next_act_best_profit
with tf.device('/device:GPU:0'):
self._model.fit(x=current_state, y=current_act_profits, epochs=1, verbose=0)
if self._exploration_rate > self._exploration_rate_min:
self._exploration_rate *= self._exploration_rate_decay
def load(self, model_path):
"""Wczytuje model z pamięci"""
self._model.load_weights(model_path)
def save(self, model_path):
"""Zapisuje modele do pamięci"""
self._model.save_weights(model_path)
NUM_OF_AGENTS = 4
NUM_OF_EPISODES = 75
FRAMES_PER_EPISODE = 1000
BATCH_SIZE = 16
GAME_ID = "LunarLander-v2"
if __name__ == "__main__":
with tf.device('/device:CPU:0'):
game = gym.make(GAME_ID)
num_of_actions = game.action_space.n
observation_size = game.observation_space.shape[0]
npc = SimpleDqnNpcV3(observation_size, num_of_actions)
is_done = False
avgs = []
for model in range(NUM_OF_AGENTS):
scores = []
for episode in range(NUM_OF_EPISODES):
score = 0
current_state = np.reshape(game.reset(), [1, observation_size])
for frame in range(FRAMES_PER_EPISODE):
# game.render()
action = npc.act(current_state)
new_state, gained_reward, is_done, info = game.step(action)
new_state = np.reshape(new_state, [1, observation_size])
npc.retain(current_state, action, gained_reward, new_state, is_done)
score += gained_reward
current_state = new_state
if len(npc.memory) > BATCH_SIZE:
npc.replay(BATCH_SIZE)
if is_done:
print("episode: {0}/{1}; result: {2}; e: {3} used memory: {4}/{5}; time: {5}"
.format(episode, NUM_OF_EPISODES, score, npc._exploration_rate, len(npc.memory), npc.memory.maxlen, frame))
break
scores.append(score)
if not is_done:
print("episode: {0}/{1}; result: {2}; used memory: {3}/{4}; time: {5}"
.format(episode, NUM_OF_EPISODES, score, len(npc.memory), npc.memory.maxlen, frame))
npc.save("evo_dqn_" + str(model) + ".h5")
avgs.append(sum(scores) / len(scores))
for i, avg in enumerate(avgs):
print("Model {} has avarage: {}".format(i, avg))
print("Overall avg: {}".format(sum(avgs) / len(avgs)))
|
[
"logging.getLogger",
"tensorflow.device",
"random.sample",
"keras.optimizers.Adam",
"collections.deque",
"numpy.random.rand",
"numpy.reshape",
"random.randrange",
"numpy.amax",
"numpy.argmax",
"keras.models.Sequential",
"keras.layers.Dense",
"keras.layers.Dropout",
"gym.make"
] |
[((275, 306), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (292, 306), False, 'import logging\n'), ((903, 921), 'collections.deque', 'deque', ([], {'maxlen': '(4096)'}), '(maxlen=4096)\n', (908, 921), False, 'from collections import deque\n'), ((1141, 1153), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1151, 1153), False, 'from keras.models import Sequential\n'), ((1829, 1853), 'numpy.argmax', 'np.argmax', (['act_values[0]'], {}), '(act_values[0])\n', (1838, 1853), True, 'import numpy as np\n'), ((2283, 2321), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (2296, 2321), False, 'import random\n'), ((3493, 3519), 'tensorflow.device', 'tf.device', (['"""/device:CPU:0"""'], {}), "('/device:CPU:0')\n", (3502, 3519), True, 'import tensorflow as tf\n'), ((3536, 3553), 'gym.make', 'gym.make', (['GAME_ID'], {}), '(GAME_ID)\n', (3544, 3553), False, 'import gym\n'), ((1178, 1263), 'keras.layers.Dense', 'Dense', (['(5 * self._num_of_inputs)'], {'input_dim': 'self._num_of_inputs', 'activation': '"""relu"""'}), "(5 * self._num_of_inputs, input_dim=self._num_of_inputs, activation='relu'\n )\n", (1183, 1263), False, 'from keras.layers import Dense, Dropout\n'), ((1284, 1297), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (1291, 1297), False, 'from keras.layers import Dense, Dropout\n'), ((1323, 1375), 'keras.layers.Dense', 'Dense', (['(4 * self._num_of_inputs)'], {'activation': '"""sigmoid"""'}), "(4 * self._num_of_inputs, activation='sigmoid')\n", (1328, 1375), False, 'from keras.layers import Dense, Dropout\n'), ((1401, 1414), 'keras.layers.Dropout', 'Dropout', (['(0.15)'], {}), '(0.15)\n', (1408, 1414), False, 'from keras.layers import Dense, Dropout\n'), ((1440, 1488), 'keras.layers.Dense', 'Dense', (['self._num_of_outputs'], {'activation': '"""linear"""'}), "(self._num_of_outputs, activation='linear')\n", (1445, 1488), False, 'from keras.layers import Dense, Dropout\n'), ((1664, 1680), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1678, 1680), True, 'import numpy as np\n'), ((1727, 1765), 'random.randrange', 'random.randrange', (['self._num_of_outputs'], {}), '(self._num_of_outputs)\n', (1743, 1765), False, 'import random\n'), ((1528, 1534), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (1532, 1534), False, 'from keras.optimizers import Adam\n'), ((2856, 2882), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (2865, 2882), True, 'import tensorflow as tf\n'), ((4243, 4287), 'numpy.reshape', 'np.reshape', (['new_state', '[1, observation_size]'], {}), '(new_state, [1, observation_size])\n', (4253, 4287), True, 'import numpy as np\n'), ((2630, 2660), 'numpy.amax', 'np.amax', (['future_act_profits[0]'], {}), '(future_act_profits[0])\n', (2637, 2660), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _thread
import sys
import time
from math import exp
from random import random
from typing import List, Tuple, Set
from scipy import spatial
import numpy as np
import torch
from torch import nn
from torch.optim import optimizer
from torch.utils import tensorboard
from torch.utils.data import DataLoader
import torch.nn.functional as F
from dataloader import BidirectionalOneShotIterator
from dataloader import TrainDataset
from dataloader import TestDataset
import tensorflow as tf
import tensorboard as tb
import logging
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
torch.random.manual_seed(123456)
# region model
class KGEModel(nn.Module):
def __init__(self, train_seeds, nentity, nrelation, nvalue, hidden_dim, gamma, double_entity_embedding=False,
double_relation_embedding=False):
super(KGEModel, self).__init__()
# self.model_name = model_name
self.nentity = nentity
self.nrelation = nrelation
self.nvalue = nvalue
self.hidden_dim = hidden_dim
self.epsilon = 2.0
self.gamma = nn.Parameter(
torch.Tensor([gamma]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]),
requires_grad=False
)
self.entity_dim = hidden_dim * 2 if double_entity_embedding else hidden_dim
self.relation_dim = hidden_dim * 2 if double_relation_embedding else hidden_dim
self.value_dim = hidden_dim * 2 if double_entity_embedding else hidden_dim
entity_weight = torch.zeros(nentity, self.entity_dim)
nn.init.uniform_(
tensor=entity_weight,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
for left_entity, right_entity in train_seeds:
entity_weight[left_entity] = entity_weight[right_entity]
self.entity_embedding = nn.Parameter(entity_weight)
# nn.init.normal_(self.entity_embedding)
self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.relation_dim))
# nn.init.normal_(self.relation_embedding)
nn.init.uniform_(
tensor=self.relation_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
self.value_embedding = nn.Parameter(torch.zeros(nvalue, self.value_dim))
# nn.init.normal_(self.value_embedding)
nn.init.uniform_(
tensor=self.value_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
def forward(self, sample, mode='single'):
if mode == 'single':
batch_size, negative_sample_size = sample.size(0), 1
head = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:, 0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=sample[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.value_embedding,
dim=0,
index=sample[:, 2]
).unsqueeze(1)
elif mode == 'head-batch':
tail_part, head_part = sample
batch_size, negative_sample_size = head_part.size(0), head_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part.view(-1)
).view(batch_size, negative_sample_size, -1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=tail_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.value_embedding,
dim=0,
index=tail_part[:, 2]
).unsqueeze(1)
elif mode == 'tail-batch':
head_part, tail_part = sample
batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part[:, 0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=head_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.value_embedding,
dim=0,
index=tail_part.view(-1)
).view(batch_size, negative_sample_size, -1)
else:
raise ValueError('mode %s not supported' % mode)
score = self.TransE(head, relation, tail, mode)
return score
def TransE(self, head, relation, tail, mode):
if mode == 'head-batch':
score = head + (relation - tail)
else:
score = (head + relation) - tail
score = self.gamma.item() - torch.norm(score, p=1, dim=2)
return score
def RotatE(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation / (self.embedding_range.item() / pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
score = self.gamma.item() - score.sum(dim=2)
return score
@staticmethod
def train_step(model, optimizer, positive_sample, negative_sample, subsampling_weight, mode, device="cuda"):
model.train()
optimizer.zero_grad()
positive_sample = positive_sample.to(device)
negative_sample = negative_sample.to(device)
subsampling_weight = subsampling_weight.to(device)
negative_score = model((positive_sample, negative_sample), mode=mode)
negative_score = F.logsigmoid(-negative_score).mean(dim=1)
positive_score = model(positive_sample)
positive_score = F.logsigmoid(positive_score).squeeze(dim=1)
positive_sample_loss = - (subsampling_weight * positive_score).sum() / subsampling_weight.sum()
negative_sample_loss = - (subsampling_weight * negative_score).sum() / subsampling_weight.sum()
loss = (positive_sample_loss + negative_sample_loss) / 2
loss.backward()
optimizer.step()
return loss.item()
# endregion
# region 日志
def get_logger(filename):
"""
Return instance of logger
统一的日志样式
"""
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
logging.basicConfig(format='%(message)s', level=logging.INFO)
handler = logging.FileHandler(filename)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
logger = get_logger("./train.log")
# endregion
# region 进度条
class Progbar(object):
"""
Progbar class inspired by keras
进度条
```
progbar = Progbar(max_step=100)
for i in range(100):
progbar.update(i, [("step", i), ("next", i+1)])
```
"""
def __init__(self, max_step, width=30):
self.max_step = max_step
self.width = width
self.last_width = 0
self.sum_values = {}
self.start = time.time()
self.last_step = 0
self.info = ""
self.bar = ""
def _update_values(self, curr_step, values):
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (curr_step - self.last_step), curr_step - self.last_step]
else:
self.sum_values[k][0] += v * (curr_step - self.last_step)
self.sum_values[k][1] += (curr_step - self.last_step)
def _write_bar(self, curr_step):
last_width = self.last_width
sys.stdout.write("\b" * last_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.max_step))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (curr_step, self.max_step)
prog = float(curr_step) / self.max_step
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if curr_step < self.max_step:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
return bar
def _get_eta(self, curr_step):
now = time.time()
if curr_step:
time_per_unit = (now - self.start) / curr_step
else:
time_per_unit = 0
eta = time_per_unit * (self.max_step - curr_step)
if curr_step < self.max_step:
info = ' - ETA: %ds' % eta
else:
info = ' - %ds' % (now - self.start)
return info
def _get_values_sum(self):
info = ""
for name, value in self.sum_values.items():
info += ' - %s: %.6f' % (name, value[0] / max(1, value[1]))
return info
def _write_info(self, curr_step):
info = ""
info += self._get_eta(curr_step)
info += self._get_values_sum()
sys.stdout.write(info)
return info
def _update_width(self, curr_step):
curr_width = len(self.bar) + len(self.info)
if curr_width < self.last_width:
sys.stdout.write(" " * (self.last_width - curr_width))
if curr_step >= self.max_step:
sys.stdout.write("\n")
sys.stdout.flush()
self.last_width = curr_width
def update(self, curr_step, values):
"""Updates the progress bar.
Args:
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
"""
self._update_values(curr_step, values)
self.bar = self._write_bar(curr_step)
self.info = self._write_info(curr_step)
self._update_width(curr_step)
self.last_step = curr_step
# endregion
# region 测试对齐实体
class Tester:
left_ids: List[int] = [] # test_seeds 中对齐实体的左实体id
right_ids: List[int] = [] # test_seeds 中对齐实体的右实体id
seeds: List[Tuple[int, int]] = [] # (m, 2) 对齐的实体对(a,b)称a为左实体,b为右实体
train_seeds: List[Tuple[int, int]] = [] # (0.8m, 2)
test_seeds: List[Tuple[int, int]] = [] # (0.2m, 2)
linkEmbedding = []
kg1E = []
kg2E = []
EA_results = {}
def read_entity_align_list(self, entity_align_file_path):
ret = []
with open(entity_align_file_path, encoding='utf-8') as f:
for line in f:
th = line[:-1].split('\t')
ret.append((int(th[0]), int(th[1])))
self.seeds = ret
# 80%训练集,20%测试集
train_percent = 0.3
train_max_idx = int(train_percent * len(self.seeds))
self.train_seeds = self.seeds[:]
self.test_seeds = self.seeds[:]
self.left_ids = []
self.right_ids = []
for left_entity, right_entity in self.test_seeds:
self.left_ids.append(left_entity) # 对齐的左边的实体
self.right_ids.append(right_entity) # 对齐的右边的实体
def XRA(self, entity_embedding_file_path):
self.linkEmbedding = []
with open(entity_embedding_file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
for i in range(len(lines)):
aline = lines[i].strip()
aline_list = aline.split()
self.linkEmbedding.append(aline_list)
@staticmethod
def get_vec(entities_embedding, id_list: List[int], device="cuda"):
tensor = torch.LongTensor(id_list).view(-1, 1).to(device)
return entities_embedding(tensor).view(-1, 200).cpu().detach().numpy()
@staticmethod
def get_vec2(entities_embedding, id_list: List[int], device="cuda"):
all_entity_ids = torch.LongTensor(id_list).view(-1).to(device)
all_entity_vec = torch.index_select(
entities_embedding,
dim=0,
index=all_entity_ids
).view(-1, 200).cpu().detach().numpy()
return all_entity_vec
def calculate(self, top_k=(1, 10, 50, 100)):
Lvec = np.array([self.linkEmbedding[e1] for e1, e2 in self.test_seeds])
Rvec = np.array([self.linkEmbedding[e2] for e1, e2 in self.test_seeds])
return self.get_hits(Lvec, Rvec, top_k)
def get_hits2(self, Lvec, Rvec, top_k=(1, 10, 50, 100)):
sim = spatial.distance.cdist(Lvec, Rvec, metric='cityblock')
return self.get_hits(Lvec, Rvec, sim, top_k)
def get_hits(self, Lvec, Rvec, sim, top_k=(1, 10, 50, 100)):
# Lvec (m, d), Rvec (m, d)
# Lvec和Rvec分别是对齐的左右实体的嵌入组成的列表,d是嵌入维度,m是实体个数
# sim=distance(Lvec, Rvec) (m, m)
# sim[i, j] 表示在 Lvec 的实体 i 到 Rvec 的实体 j 的距离
top_lr = [0] * len(top_k)
for i in range(Lvec.shape[0]): # 对于每个KG1实体
rank = sim[i, :].argsort()
# sim[i, :] 是一个行向量,表示将 Lvec 中的实体 i 到 Rvec 的所有实体的距离
# argsort 表示将距离按大小排序,返回排序后的下标。比如[6,3,5]下标[0,1,2],排序后[3,5,6],则返回[1,2,0]
rank_index = np.where(rank == i)[0][0]
# 对于一维向量,np.where(rank == i) 等价于 list(rank).index(i),即查找元素 i 在 rank 中的下标
# 这里的 i 不是代表 Lvec 中的实体 i 的下标,而是代表 Rvec 中和 i 对齐的实体的下标。
for j in range(len(top_k)):
if rank_index < top_k[j]: # index 从 0 开始,因此用 '<' 号
top_lr[j] += 1
top_rl = [0] * len(top_k)
for i in range(Rvec.shape[0]):
rank = sim[:, i].argsort()
rank_index = np.where(rank == i)[0][0]
for j in range(len(top_k)):
if rank_index < top_k[j]:
top_rl[j] += 1
logger.info('For each left:')
left = []
for i in range(len(top_lr)):
hits = top_k[i]
hits_value = top_lr[i] / len(self.test_seeds) * 100
left.append((hits, hits_value))
logger.info('Hits@%d: %.2f%%' % (hits, hits_value))
logger.info('For each right:')
right = []
for i in range(len(top_rl)):
hits = top_k[i]
hits_value = top_rl[i] / len(self.test_seeds) * 100
right.append((hits, hits_value))
logger.info('Hits@%d: %.2f%%' % (hits, hits_value))
return {
"left": left,
"right": right,
}
# endregion
# region 保存与加载模型,恢复训练状态
_MODEL_STATE_DICT = "model_state_dict"
_OPTIMIZER_STATE_DICT = "optimizer_state_dict"
_EPOCH = "epoch"
_STEP = "step"
_BEST_SCORE = "best_score"
_LOSS = "loss"
def load_checkpoint(model: nn.Module, optim: optimizer.Optimizer,
checkpoint_path="./result/fr_en/checkpoint.tar") -> Tuple[int, int, float, float]:
"""Loads training checkpoint.
:param checkpoint_path: path to checkpoint
:param model: model to update state
:param optim: optimizer to update state
:return tuple of starting epoch id, starting step id, best checkpoint score
"""
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint[_MODEL_STATE_DICT])
optim.load_state_dict(checkpoint[_OPTIMIZER_STATE_DICT])
start_epoch_id = checkpoint[_EPOCH] + 1
step = checkpoint[_STEP] + 1
best_score = checkpoint[_BEST_SCORE]
loss = checkpoint[_LOSS]
return start_epoch_id, step, best_score, loss
def save_checkpoint(model: nn.Module, optim: optimizer.Optimizer,
epoch_id: int, step: int, best_score: float, loss: float,
save_path="./result/fr_en/checkpoint.tar"):
torch.save({
_MODEL_STATE_DICT: model.state_dict(),
_OPTIMIZER_STATE_DICT: optim.state_dict(),
_EPOCH: epoch_id,
_STEP: step,
_BEST_SCORE: best_score,
_LOSS: loss,
}, save_path)
def save_entity_embedding_list(model, embedding_path="./result/fr_en/ATentsembed.txt"):
with open(embedding_path, 'w') as f:
d = model.entity_embedding.data.detach().cpu().numpy()
for i in range(len(d)):
f.write(" ".join([str(j) for j in d[i].tolist()]))
f.write("\n")
# endregion
# region 数据集
def read_ids_and_names(dir_path, sp="\t"):
ids = []
names = []
with open(dir_path, encoding="utf-8") as file:
lines = file.readlines()
for line in lines:
id_to_name = line.strip().split(sp)
ids.append(int(id_to_name[0]))
names.append(id_to_name[1])
return ids, names
def read_triple(triple_path):
with open(triple_path, 'r') as fr:
triple = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[1])
rel = int(line_split[2])
triple.add((head, rel, tail))
return list(triple)
def append_align_triple(triple: List[Tuple[int, int, int]], entity_align_list: List[Tuple[int, int]]):
# 使用对齐实体替换头节点,构造属性三元组数据,从而达到利用对齐实体数据的目的
align_set = {}
for i in entity_align_list:
align_set[i[0]] = i[1]
align_set[i[1]] = i[0]
triple_replace_with_align = []
bar = Progbar(max_step=len(triple))
count = 0
for entity, attr, value in triple:
if entity in align_set:
triple_replace_with_align.append((align_set[entity], attr, value))
count += 1
bar.update(count, [("step", count)])
return triple + triple_replace_with_align
# endregion
class TransE:
def __init__(self,
# input paths
entity_align_file="data/fr_en/ref_ent_ids",
all_entity_file="data/fr_en/ent_ids_all",
all_attr_file="data/fr_en/att2id_all",
all_value_file="data/fr_en/att_value2id_all",
all_triple_file="data/fr_en/att_triple_all",
# output paths
checkpoint_path="./result/TransE/fr_en/checkpoint.tar",
embedding_path="./result/TransE/fr_en/ATentsembed.txt",
tensorboard_log_dir="./result/TransE/fr_en/log/",
device="cuda",
learning_rate=0.001,
visualize=False
):
self.entity_align_file = entity_align_file
self.all_entity_file = all_entity_file
self.all_attr_file = all_attr_file
self.all_value_file = all_value_file
self.all_triple_file = all_triple_file
self.device = device
self.visualize = visualize
self.tensorboard_log_dir = tensorboard_log_dir
self.checkpoint_path = checkpoint_path
self.embedding_path = embedding_path
self.learning_rate = learning_rate
def init_data(self):
self.t = Tester()
self.t.read_entity_align_list(self.entity_align_file) # 得到已知对齐实体
self.entity_list, self.entity_name_list = read_ids_and_names(self.all_entity_file)
self.attr_list, _ = read_ids_and_names(self.all_attr_file)
self.value_list, _ = read_ids_and_names(self.all_value_file)
self.train_triples = read_triple(self.all_triple_file)
self.entity_count = len(self.entity_list)
self.attr_count = len(self.attr_list)
self.value_count = len(self.value_list)
logger.info("entity: " + str(self.entity_count)
+ " attr: " + str(self.attr_count)
+ " value: " + str(self.value_count))
def append_align_triple(self):
self.train_triples = append_align_triple(self.train_triples, self.t.train_seeds)
def init_dataset(self):
train_dataloader_head = DataLoader(
TrainDataset(self.train_triples, self.entity_count, self.attr_count, self.value_count, 512, 'head-batch'),
batch_size=1024,
shuffle=False,
num_workers=4,
collate_fn=TrainDataset.collate_fn
)
train_dataloader_tail = DataLoader(
TrainDataset(self.train_triples, self.entity_count, self.attr_count, self.value_count, 512, 'tail-batch'),
batch_size=1024,
shuffle=False,
num_workers=4,
collate_fn=TrainDataset.collate_fn
)
self.train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)
def init_model(self):
self.model = KGEModel(
self.t.seeds, # 所有seed
nentity=self.entity_count,
nrelation=self.attr_count,
nvalue=self.value_count,
hidden_dim=200,
gamma=24.0,
).to(self.device)
def init_optimizer(self):
self.optim = torch.optim.Adam(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.learning_rate
)
def init_soft_align(self):
self.combination_threshold = 3 # 小于这个距离则模型认为已对齐
self.combination_restriction = 5000 # 模型认为对齐的实体对的个数
self.distance2entitiesPair: List[Tuple[int, Tuple[int, int]]] = []
self.combinationProbability: List[float] = [0] * self.entity_count # [0, 1)
self.correspondingEntity = {}
self.model_think_align_entities = []
self.model_is_able_to_predict_align_entities = False
def soft_align(self, positive_sample, mode='single'):
batch_size = positive_sample.size()[0]
# positive_sample (batch_size, 3)
# batch_size 个 (entity, attr, value) 的三元组
# negative_sample (batch_size, negative_sample_size)
# batch_size 个长度为 negative_sample_size 的 (neg_id1, neg_id2, ...) 替换用的待使用id
# 设 e 是正例实体,e' 是负例实体,e* 是模型认为的e的对齐实体
# 1. head-batch
# (e, a, v) + (e'1, e'2, ..., e'n) ->
# ((e, a, v), (e'1, a, v))
# ((e, a, v), (e'2, a, v))
# ...
# ((e, a, v), (e'n, a, v))
# 2. tail-batch
# (e, a, v) + (v'1, v'2, ..., v'n) ->
# ((e, a, v), (e, a, v'1))
# ((e, a, v), (e, a, v'2))
# ...
# ((e, a, v), (e, a, v'n))
soft_positive_sample = positive_sample.clone()
if mode == "head-batch":
# 负例是随机替换头部
# (neg_id1, neg_id2, ...) 是实体id
# ((e, a, v), (e'1, a, v))
# 已有 (e, a, v) + (e'1, e'2, ..., e'n)
for i in range(batch_size):
# 1. 模型认为头部是对齐的
h1 = soft_positive_sample[i][0].item()
if self.combinationProbability[h1] >= 0.5 and h1 in self.correspondingEntity: # 如果可信
# 希望 (e, a, v) (e', a, v) -> (e*, a, v) (e', a, v)
h1_cor = self.correspondingEntity[h1] # 获取模型认为的对齐实体
soft_positive_sample[i][0] = h1_cor # 替换为模型认为的对齐实体
elif mode == "tail-batch":
# 负例是随机替换尾部
# (neg_id1, neg_id2, ...) 是属性值id
# ((e, a, v), (e, a, v'2))
# 已有 (e, a, v) + (v'1, v'2, ..., v'n)
for i in range(batch_size):
# 1. 模型认为头部是对齐的
h1 = soft_positive_sample[i][0].item()
if self.combinationProbability[h1] >= 0.5 and h1 in self.correspondingEntity: # 如果可信
# 希望 (e, a, v) (e', a, v) -> (e*, a, v) (e', a, v)
h1_cor = self.correspondingEntity[h1] # 获取模型认为的对齐实体
soft_positive_sample[i][0] = h1_cor # 替换为模型认为的对齐实体
return soft_positive_sample
def do_combine(self, thread_name, sim):
# sim[i, j] 表示在 Lvec 的实体 i 到 Rvec 的实体 j 的距离
logger.info(thread_name + " " + "模型对齐中")
computing_time = time.time()
# 1. 按距离排序
self.distance2entitiesPair: List[Tuple[int, Tuple[int, int]]] = []
filtered = np.where(sim <= self.combination_threshold)
for i, j in zip(filtered[0], filtered[1]):
self.distance2entitiesPair.append((sim[i, j], (self.t.left_ids[i], self.t.right_ids[j])))
filter_time = time.time()
logger.info(thread_name + " " + "距离小于 "
+ str(self.combination_threshold) + " 的实体对有 "
+ str(len(self.distance2entitiesPair)) + " 个")
logger.info(thread_name + " " + "扁平化,用时 " + str(int(filter_time - computing_time)) + " 秒")
# 2.初始化"模型认为两实体是对齐的"这件事的可信概率
combinationProbability: List[float] = [0] * self.entity_count # [0, 1)
# 3.模型认为的对齐实体
correspondingEntity = {}
self.model_think_align_entities = []
occupied: Set[int] = set()
combination_counter = 0
sigmoid = lambda x: 1.0 / (1.0 + exp(-x))
for dis, (ent1, ent2) in self.distance2entitiesPair:
if dis > self.combination_threshold:
# 超过可信范围,不可信
continue
# 距离在可信范围内
if ent1 in occupied or ent2 in occupied:
continue
if combination_counter >= self.combination_restriction:
break
combination_counter += 1
self.correspondingEntity[ent1] = ent2
self.correspondingEntity[ent2] = ent1
self.model_think_align_entities.append((ent1, ent2))
occupied.add(ent1)
occupied.add(ent2)
combinationProbability[ent1] = sigmoid(self.combination_threshold - dis) # 必有 p > 0.5
combinationProbability[ent2] = sigmoid(self.combination_threshold - dis)
logger.info(thread_name + " " + "对齐了 " + str(len(self.model_think_align_entities)) + " 个实体")
self.combination_restriction += 1000
self.model_is_able_to_predict_align_entities = False # 上锁
self.combinationProbability = combinationProbability
self.correspondingEntity = correspondingEntity
self.model_is_able_to_predict_align_entities = True # 解锁
align_time = time.time()
logger.info(thread_name + " " + "模型对齐完成,用时 " + str(int(align_time - filter_time)) + " 秒")
def run_train(self, need_to_load_checkpoint=True):
logger.info("start training")
init_step = 1
total_steps = 20001
test_steps = 5000
last_loss = 100
score = 0
last_score = score
if need_to_load_checkpoint:
_, init_step, score, last_loss = load_checkpoint(self.model, self.optim, self.checkpoint_path)
last_score = score
summary_writer = tensorboard.SummaryWriter(log_dir=self.tensorboard_log_dir)
progbar = Progbar(max_step=total_steps - init_step)
start_time = time.time()
for step in range(init_step, total_steps):
positive_sample, negative_sample, subsampling_weight, mode = next(self.train_iterator)
loss = self.model.train_step(self.model, self.optim,
positive_sample, negative_sample,
subsampling_weight, mode, self.device)
# 软对齐
# 根据模型认为的对齐实体,修改 positive_sample,negative_sample,再训练一轮
if self.model_is_able_to_predict_align_entities:
soft_positive_sample = self.soft_align(positive_sample, mode)
loss2 = self.model.train_step(self.model, self.optim,
soft_positive_sample, negative_sample,
subsampling_weight, mode, self.device)
loss = (loss + loss2) / 2
progbar.update(step - init_step + 1, [
("loss", loss),
("cost", round((time.time() - start_time))),
("aligned", len(self.model_think_align_entities))
])
if self.visualize:
summary_writer.add_scalar(tag='Loss/train', scalar_value=loss, global_step=step)
if step == 12000 or step == 13000 or step == 14000:
logger.info("\n计算距离中")
computing_time = time.time()
left_vec = self.t.get_vec2(self.model.entity_embedding, self.t.left_ids)
right_vec = self.t.get_vec2(self.model.entity_embedding, self.t.right_ids)
sim = spatial.distance.cdist(left_vec, right_vec, metric='euclidean')
logger.info("计算距离完成,用时 " + str(int(time.time() - computing_time)) + " 秒")
# self.do_combine("Thread-" + str(step), sim)
# try:
# logger.info("启动线程,获取模型认为的对齐实体")
# _thread.start_new_thread(self.do_combine, ("Thread of step-" + str(step), sim,))
# except SystemExit:
# logger.error("Error: 无法启动线程")
logger.info("属性消融实验")
hits = self.t.get_hits(left_vec, right_vec, sim)
hits_left = hits["left"]
hits_right = hits["right"]
left_hits_10 = hits_left[2][1]
right_hits_10 = hits_right[2][1]
score = (left_hits_10 + right_hits_10) / 2
logger.info("score = " + str(score))
if self.visualize:
summary_writer.add_embedding(tag='Embedding',
mat=self.model.entity_embedding,
metadata=self.entity_name_list,
global_step=step)
summary_writer.add_scalar(tag='Hits@1/left', scalar_value=hits_left[0][1], global_step=step)
summary_writer.add_scalar(tag='Hits@10/left', scalar_value=hits_left[1][1], global_step=step)
summary_writer.add_scalar(tag='Hits@50/left', scalar_value=hits_left[2][1], global_step=step)
summary_writer.add_scalar(tag='Hits@100/left', scalar_value=hits_left[3][1], global_step=step)
summary_writer.add_scalar(tag='Hits@1/right', scalar_value=hits_right[0][1], global_step=step)
summary_writer.add_scalar(tag='Hits@10/right', scalar_value=hits_right[1][1], global_step=step)
summary_writer.add_scalar(tag='Hits@50/right', scalar_value=hits_right[2][1], global_step=step)
summary_writer.add_scalar(tag='Hits@100/right', scalar_value=hits_right[3][1], global_step=step)
if score > last_score:
last_score = score
save_checkpoint(self.model, self.optim, 1, step, score, loss, self.checkpoint_path)
save_entity_embedding_list(self.model, self.embedding_path)
def run_test(self):
load_checkpoint(self.model, self.optim, self.checkpoint_path)
logger.info("\n属性消融实验")
left_vec = self.t.get_vec2(self.model.entity_embedding, self.t.left_ids)
right_vec = self.t.get_vec2(self.model.entity_embedding, self.t.right_ids)
hits = self.t.get_hits(left_vec, right_vec)
hits_left = hits["left"]
hits_right = hits["right"]
left_hits_10 = hits_left[2][1]
right_hits_10 = hits_right[2][1]
score = (left_hits_10 + right_hits_10) / 2
logger.info("score = " + str(score))
def train_model_for_fr_en():
m = TransE(
checkpoint_path="./result/TransE2/fr_en/checkpoint.tar",
embedding_path="./result/TransE2/fr_en/ATentsembed.txt",
tensorboard_log_dir="./result/TransE2/fr_en/log/"
)
m.init_data()
# m.append_align_triple()
m.init_soft_align()
m.init_dataset()
m.init_model()
m.init_optimizer()
m.run_train(need_to_load_checkpoint=False)
def train_model_for_ja_en():
m = TransE(entity_align_file="data/ja_en/ref_ent_ids",
all_entity_file="data/ja_en/ent_ids_all",
all_attr_file="data/ja_en/att2id_all",
all_value_file="data/ja_en/att_value2id_all",
all_triple_file="data/ja_en/att_triple_all",
checkpoint_path="./result/TransE2/ja_en/checkpoint.tar",
embedding_path="./result/TransE2/ja_en/ATentsembed.txt",
tensorboard_log_dir="./result/TransE2/ja_en/log/")
m.init_data()
# m.append_align_triple()
m.init_soft_align()
m.init_dataset()
m.init_model()
m.init_optimizer()
m.run_train(need_to_load_checkpoint=False)
def train_model_for_zh_en():
m = TransE(entity_align_file="data/zh_en/ref_ent_ids",
all_entity_file="data/zh_en/ent_ids_all",
all_attr_file="data/zh_en/att2id_all",
all_value_file="data/zh_en/att_value2id_all",
all_triple_file="data/zh_en/att_triple_all",
checkpoint_path="./result/TransE2/zh_en/checkpoint.tar",
embedding_path="./result/TransE2/zh_en/ATentsembed.txt",
tensorboard_log_dir="./result/TransE2/zh_en/log/")
m.init_data()
# m.append_align_triple()
m.init_soft_align()
m.init_dataset()
m.init_model()
m.init_optimizer()
m.run_train(need_to_load_checkpoint=False)
def test_model():
m = TransE()
m.init_data()
m.init_model()
m.init_optimizer()
m.run_test()
# train_model_for_fr_en()
# train_model_for_ja_en()
train_model_for_zh_en()
|
[
"logging.getLogger",
"numpy.log10",
"torch.LongTensor",
"torch.optim.optimizer.step",
"torch.sin",
"numpy.array",
"torch.cos",
"math.exp",
"dataloader.TrainDataset",
"torch.random.manual_seed",
"torch.utils.tensorboard.SummaryWriter",
"numpy.where",
"torch.optim.optimizer.zero_grad",
"logging.FileHandler",
"sys.stdout.flush",
"torch.Tensor",
"torch.norm",
"torch.nn.functional.logsigmoid",
"time.time",
"logging.basicConfig",
"torch.index_select",
"scipy.spatial.distance.cdist",
"logging.Formatter",
"torch.load",
"torch.stack",
"dataloader.BidirectionalOneShotIterator",
"torch.nn.Parameter",
"torch.chunk",
"torch.zeros",
"sys.stdout.write"
] |
[((691, 723), 'torch.random.manual_seed', 'torch.random.manual_seed', (['(123456)'], {}), '(123456)\n', (715, 723), False, 'import torch\n'), ((7415, 7442), 'logging.getLogger', 'logging.getLogger', (['"""logger"""'], {}), "('logger')\n", (7432, 7442), False, 'import logging\n'), ((7481, 7542), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""', 'level': 'logging.INFO'}), "(format='%(message)s', level=logging.INFO)\n", (7500, 7542), False, 'import logging\n'), ((7558, 7587), 'logging.FileHandler', 'logging.FileHandler', (['filename'], {}), '(filename)\n', (7577, 7587), False, 'import logging\n'), ((16004, 16031), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (16014, 16031), False, 'import torch\n'), ((1730, 1767), 'torch.zeros', 'torch.zeros', (['nentity', 'self.entity_dim'], {}), '(nentity, self.entity_dim)\n', (1741, 1767), False, 'import torch\n'), ((2079, 2106), 'torch.nn.Parameter', 'nn.Parameter', (['entity_weight'], {}), '(entity_weight)\n', (2091, 2106), False, 'from torch import nn\n'), ((5344, 5371), 'torch.chunk', 'torch.chunk', (['head', '(2)'], {'dim': '(2)'}), '(head, 2, dim=2)\n', (5355, 5371), False, 'import torch\n'), ((5399, 5426), 'torch.chunk', 'torch.chunk', (['tail', '(2)'], {'dim': '(2)'}), '(tail, 2, dim=2)\n', (5410, 5426), False, 'import torch\n'), ((5592, 5617), 'torch.cos', 'torch.cos', (['phase_relation'], {}), '(phase_relation)\n', (5601, 5617), False, 'import torch\n'), ((5640, 5665), 'torch.sin', 'torch.sin', (['phase_relation'], {}), '(phase_relation)\n', (5649, 5665), False, 'import torch\n'), ((6175, 6215), 'torch.stack', 'torch.stack', (['[re_score, im_score]'], {'dim': '(0)'}), '([re_score, im_score], dim=0)\n', (6186, 6215), False, 'import torch\n'), ((6488, 6509), 'torch.optim.optimizer.zero_grad', 'optimizer.zero_grad', ([], {}), '()\n', (6507, 6509), False, 'from torch.optim import optimizer\n'), ((7247, 7263), 'torch.optim.optimizer.step', 'optimizer.step', ([], {}), '()\n', (7261, 7263), False, 'from torch.optim import optimizer\n'), ((7648, 7707), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s:%(levelname)s: %(message)s"""'], {}), "('%(asctime)s:%(levelname)s: %(message)s')\n", (7665, 7707), False, 'import logging\n'), ((8241, 8252), 'time.time', 'time.time', ([], {}), '()\n', (8250, 8252), False, 'import time\n'), ((8790, 8827), 'sys.stdout.write', 'sys.stdout.write', (["('\\x08' * last_width)"], {}), "('\\x08' * last_width)\n", (8806, 8827), False, 'import sys\n'), ((8834, 8856), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (8850, 8856), False, 'import sys\n'), ((9382, 9403), 'sys.stdout.write', 'sys.stdout.write', (['bar'], {}), '(bar)\n', (9398, 9403), False, 'import sys\n'), ((9474, 9485), 'time.time', 'time.time', ([], {}), '()\n', (9483, 9485), False, 'import time\n'), ((10171, 10193), 'sys.stdout.write', 'sys.stdout.write', (['info'], {}), '(info)\n', (10187, 10193), False, 'import sys\n'), ((10500, 10518), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10516, 10518), False, 'import sys\n'), ((13171, 13235), 'numpy.array', 'np.array', (['[self.linkEmbedding[e1] for e1, e2 in self.test_seeds]'], {}), '([self.linkEmbedding[e1] for e1, e2 in self.test_seeds])\n', (13179, 13235), True, 'import numpy as np\n'), ((13251, 13315), 'numpy.array', 'np.array', (['[self.linkEmbedding[e2] for e1, e2 in self.test_seeds]'], {}), '([self.linkEmbedding[e2] for e1, e2 in self.test_seeds])\n', (13259, 13315), True, 'import numpy as np\n'), ((13440, 13494), 'scipy.spatial.distance.cdist', 'spatial.distance.cdist', (['Lvec', 'Rvec'], {'metric': '"""cityblock"""'}), "(Lvec, Rvec, metric='cityblock')\n", (13462, 13494), False, 'from scipy import spatial\n'), ((21177, 21251), 'dataloader.BidirectionalOneShotIterator', 'BidirectionalOneShotIterator', (['train_dataloader_head', 'train_dataloader_tail'], {}), '(train_dataloader_head, train_dataloader_tail)\n', (21205, 21251), False, 'from dataloader import BidirectionalOneShotIterator\n'), ((24503, 24514), 'time.time', 'time.time', ([], {}), '()\n', (24512, 24514), False, 'import time\n'), ((24628, 24671), 'numpy.where', 'np.where', (['(sim <= self.combination_threshold)'], {}), '(sim <= self.combination_threshold)\n', (24636, 24671), True, 'import numpy as np\n'), ((24847, 24858), 'time.time', 'time.time', ([], {}), '()\n', (24856, 24858), False, 'import time\n'), ((26694, 26705), 'time.time', 'time.time', ([], {}), '()\n', (26703, 26705), False, 'import time\n'), ((27244, 27303), 'torch.utils.tensorboard.SummaryWriter', 'tensorboard.SummaryWriter', ([], {'log_dir': 'self.tensorboard_log_dir'}), '(log_dir=self.tensorboard_log_dir)\n', (27269, 27303), False, 'from torch.utils import tensorboard\n'), ((27385, 27396), 'time.time', 'time.time', ([], {}), '()\n', (27394, 27396), False, 'import time\n'), ((1220, 1241), 'torch.Tensor', 'torch.Tensor', (['[gamma]'], {}), '([gamma])\n', (1232, 1241), False, 'import torch\n'), ((2204, 2245), 'torch.zeros', 'torch.zeros', (['nrelation', 'self.relation_dim'], {}), '(nrelation, self.relation_dim)\n', (2215, 2245), False, 'import torch\n'), ((2509, 2544), 'torch.zeros', 'torch.zeros', (['nvalue', 'self.value_dim'], {}), '(nvalue, self.value_dim)\n', (2520, 2544), False, 'import torch\n'), ((5177, 5206), 'torch.norm', 'torch.norm', (['score'], {'p': '(1)', 'dim': '(2)'}), '(score, p=1, dim=2)\n', (5187, 5206), False, 'import torch\n'), ((7714, 7733), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7731, 7733), False, 'import logging\n'), ((10361, 10415), 'sys.stdout.write', 'sys.stdout.write', (["(' ' * (self.last_width - curr_width))"], {}), "(' ' * (self.last_width - curr_width))\n", (10377, 10415), False, 'import sys\n'), ((10468, 10490), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (10484, 10490), False, 'import sys\n'), ((20597, 20707), 'dataloader.TrainDataset', 'TrainDataset', (['self.train_triples', 'self.entity_count', 'self.attr_count', 'self.value_count', '(512)', '"""head-batch"""'], {}), "(self.train_triples, self.entity_count, self.attr_count, self.\n value_count, 512, 'head-batch')\n", (20609, 20707), False, 'from dataloader import TrainDataset\n'), ((20900, 21010), 'dataloader.TrainDataset', 'TrainDataset', (['self.train_triples', 'self.entity_count', 'self.attr_count', 'self.value_count', '(512)', '"""tail-batch"""'], {}), "(self.train_triples, self.entity_count, self.attr_count, self.\n value_count, 512, 'tail-batch')\n", (20912, 21010), False, 'from dataloader import TrainDataset\n'), ((6780, 6809), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['(-negative_score)'], {}), '(-negative_score)\n', (6792, 6809), True, 'import torch.nn.functional as F\n'), ((6896, 6924), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['positive_score'], {}), '(positive_score)\n', (6908, 6924), True, 'import torch.nn.functional as F\n'), ((28764, 28775), 'time.time', 'time.time', ([], {}), '()\n', (28773, 28775), False, 'import time\n'), ((28978, 29041), 'scipy.spatial.distance.cdist', 'spatial.distance.cdist', (['left_vec', 'right_vec'], {'metric': '"""euclidean"""'}), "(left_vec, right_vec, metric='euclidean')\n", (29000, 29041), False, 'from scipy import spatial\n'), ((2918, 2986), 'torch.index_select', 'torch.index_select', (['self.entity_embedding'], {'dim': '(0)', 'index': 'sample[:, 0]'}), '(self.entity_embedding, dim=0, index=sample[:, 0])\n', (2936, 2986), False, 'import torch\n'), ((3086, 3156), 'torch.index_select', 'torch.index_select', (['self.relation_embedding'], {'dim': '(0)', 'index': 'sample[:, 1]'}), '(self.relation_embedding, dim=0, index=sample[:, 1])\n', (3104, 3156), False, 'import torch\n'), ((3252, 3319), 'torch.index_select', 'torch.index_select', (['self.value_embedding'], {'dim': '(0)', 'index': 'sample[:, 2]'}), '(self.value_embedding, dim=0, index=sample[:, 2])\n', (3270, 3319), False, 'import torch\n'), ((8891, 8914), 'numpy.log10', 'np.log10', (['self.max_step'], {}), '(self.max_step)\n', (8899, 8914), True, 'import numpy as np\n'), ((14091, 14110), 'numpy.where', 'np.where', (['(rank == i)'], {}), '(rank == i)\n', (14099, 14110), True, 'import numpy as np\n'), ((14548, 14567), 'numpy.where', 'np.where', (['(rank == i)'], {}), '(rank == i)\n', (14556, 14567), True, 'import numpy as np\n'), ((25465, 25472), 'math.exp', 'exp', (['(-x)'], {}), '(-x)\n', (25468, 25472), False, 'from math import exp\n'), ((3781, 3854), 'torch.index_select', 'torch.index_select', (['self.relation_embedding'], {'dim': '(0)', 'index': 'tail_part[:, 1]'}), '(self.relation_embedding, dim=0, index=tail_part[:, 1])\n', (3799, 3854), False, 'import torch\n'), ((3950, 4020), 'torch.index_select', 'torch.index_select', (['self.value_embedding'], {'dim': '(0)', 'index': 'tail_part[:, 2]'}), '(self.value_embedding, dim=0, index=tail_part[:, 2])\n', (3968, 4020), False, 'import torch\n'), ((12609, 12634), 'torch.LongTensor', 'torch.LongTensor', (['id_list'], {}), '(id_list)\n', (12625, 12634), False, 'import torch\n'), ((12854, 12879), 'torch.LongTensor', 'torch.LongTensor', (['id_list'], {}), '(id_list)\n', (12870, 12879), False, 'import torch\n'), ((4278, 4349), 'torch.index_select', 'torch.index_select', (['self.entity_embedding'], {'dim': '(0)', 'index': 'head_part[:, 0]'}), '(self.entity_embedding, dim=0, index=head_part[:, 0])\n', (4296, 4349), False, 'import torch\n'), ((4449, 4522), 'torch.index_select', 'torch.index_select', (['self.relation_embedding'], {'dim': '(0)', 'index': 'head_part[:, 1]'}), '(self.relation_embedding, dim=0, index=head_part[:, 1])\n', (4467, 4522), False, 'import torch\n'), ((28389, 28400), 'time.time', 'time.time', ([], {}), '()\n', (28398, 28400), False, 'import time\n'), ((12925, 12992), 'torch.index_select', 'torch.index_select', (['entities_embedding'], {'dim': '(0)', 'index': 'all_entity_ids'}), '(entities_embedding, dim=0, index=all_entity_ids)\n', (12943, 12992), False, 'import torch\n'), ((29111, 29122), 'time.time', 'time.time', ([], {}), '()\n', (29120, 29122), False, 'import time\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import argparse
import numpy as np
from src.options.general import opts
from src.models.ADNet import adnet
from mindspore import Tensor, export, context
parser = argparse.ArgumentParser(
description='ADNet test')
parser.add_argument('--weight_file', default='', type=str, help='The pretrained weight file')
parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'])
parser.add_argument('--target_device', type=int, default=0)
args = parser.parse_args()
context.set_context(device_target=args.device_target, mode=context.PYNATIVE_MODE, device_id=args.target_device)
opts['num_videos'] = 1
net, domain_specific_nets = adnet(opts, trained_file=args.weight_file)
input_ = np.random.uniform(0.0, 1.0, size=[128, 3, 112, 112]).astype(np.float32)
export(net, Tensor(input_), file_name='ADNet', file_format='MINDIR')
print('export finished')
|
[
"argparse.ArgumentParser",
"mindspore.context.set_context",
"mindspore.Tensor",
"numpy.random.uniform",
"src.models.ADNet.adnet"
] |
[((832, 881), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ADNet test"""'}), "(description='ADNet test')\n", (855, 881), False, 'import argparse\n'), ((1169, 1285), 'mindspore.context.set_context', 'context.set_context', ([], {'device_target': 'args.device_target', 'mode': 'context.PYNATIVE_MODE', 'device_id': 'args.target_device'}), '(device_target=args.device_target, mode=context.\n PYNATIVE_MODE, device_id=args.target_device)\n', (1188, 1285), False, 'from mindspore import Tensor, export, context\n'), ((1332, 1374), 'src.models.ADNet.adnet', 'adnet', (['opts'], {'trained_file': 'args.weight_file'}), '(opts, trained_file=args.weight_file)\n', (1337, 1374), False, 'from src.models.ADNet import adnet\n'), ((1469, 1483), 'mindspore.Tensor', 'Tensor', (['input_'], {}), '(input_)\n', (1475, 1483), False, 'from mindspore import Tensor, export, context\n'), ((1385, 1437), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {'size': '[128, 3, 112, 112]'}), '(0.0, 1.0, size=[128, 3, 112, 112])\n', (1402, 1437), True, 'import numpy as np\n')]
|
from .pressureprofile import PressureProfile
import numpy as np
class ArrayPressureProfile(PressureProfile):
def __init__(self, array, reverse=False):
super().__init__(self.__class__.__name__, array.shape[-1])
if reverse:
self.pressure_profile = array[::-1]
else:
self.pressure_profile = array
def compute_pressure_profile(self):
"""
Sets up the pressure profile for the atmosphere model
"""
logp = np.log10(self.pressure_profile)
gradp = np.gradient(logp)
self.pressure_profile_levels = \
10**np.append(logp-gradp/2, logp[-1]+gradp[-1]/2)
@property
def profile(self):
return self.pressure_profile
def write(self, output):
pressure = super().write(output)
return pressure
@classmethod
def input_keywords(self):
return ['array', 'fromarray',]
|
[
"numpy.append",
"numpy.log10",
"numpy.gradient"
] |
[((503, 534), 'numpy.log10', 'np.log10', (['self.pressure_profile'], {}), '(self.pressure_profile)\n', (511, 534), True, 'import numpy as np\n'), ((551, 568), 'numpy.gradient', 'np.gradient', (['logp'], {}), '(logp)\n', (562, 568), True, 'import numpy as np\n'), ((627, 680), 'numpy.append', 'np.append', (['(logp - gradp / 2)', '(logp[-1] + gradp[-1] / 2)'], {}), '(logp - gradp / 2, logp[-1] + gradp[-1] / 2)\n', (636, 680), True, 'import numpy as np\n')]
|
"""EESG.py
Created by <NAME>, <NAME>.
Copyright (c) NREL. All rights reserved.
Electromagnetic design based on conventional magnetic circuit laws
Structural design based on McDonald's thesis """
from openmdao.api import Group, Problem, Component,ExecComp,IndepVarComp,ScipyOptimizer,pyOptSparseDriver
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
from openmdao.drivers import *
import numpy as np
from numpy import array,float,min,sign
from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan
import pandas
class EESG(Component):
""" Estimates overall mass dimensions and Efficiency of Electrically Excited Synchronous generator. """
def __init__(self):
super(EESG, self).__init__()
# EESG generator design inputs
self.add_param('r_s', val=0.0, units ='m', desc='airgap radius r_s')
self.add_param('l_s', val=0.0, units ='m', desc='Stator core length l_s')
self.add_param('h_s', val=0.0, units ='m', desc='Yoke height h_s')
self.add_param('tau_p',val=0.0, units ='m', desc='Pole pitch self.tau_p')
self.add_param('machine_rating',val=0.0, units ='W', desc='Machine rating')
self.add_param('n_nom',val=0.0, units ='rpm', desc='rated speed')
self.add_param('Torque',val=0.0, units ='Nm', desc='Rated torque ')
self.add_param('I_f',val=0.0000,units='A',desc='Excitation current')
self.add_param('N_f',val=0.0,units='A',desc='field turns')
self.add_param('h_ys',val=0.0, units ='m', desc='Yoke height')
self.add_param('h_yr',val=0.0, units ='m', desc='rotor yoke height')
# structural design variables
self.add_param('n_s' ,val=0.0, desc='number of stator arms n_s')
self.add_param('b_st' , val=0.0, units ='m', desc='arm width b_st')
self.add_param('d_s',val=0.0,units ='m', desc='arm depth d_s')
self.add_param('t_ws' ,val=0.0,units ='m', desc='arm depth thickness self.t_wr')
self.add_param('n_r' ,val=0.0, desc='number of arms n')
self.add_param('b_r' ,val=0.0,units ='m', desc='arm width b_r')
self.add_param('d_r' ,val=0.0, units ='m', desc='arm depth d_r')
self.add_param('t_wr' ,val=0.0, units ='m', desc='arm depth thickness self.t_wr')
self.add_param('R_o',val=0.0, units ='m',desc='Shaft radius')
# EESG generator design outputs
# Magnetic loading
self.add_output('B_symax' ,val=0.0, desc='Peak Stator Yoke flux density B_ymax')
self.add_output('B_tmax',val=0.0, desc='Peak Teeth flux density')
self.add_output('B_rymax',val=0.0, desc='Peak Rotor yoke flux density')
self.add_output('B_gfm',val=0.0, desc='Average air gap flux density B_g')
self.add_output('B_g' ,val=0.0, desc='Peak air gap flux density B_g')
self.add_output('B_pc',val=0.0, desc='Pole core flux density')
# Stator design
self.add_output('N_s' ,val=0.0, desc='Number of turns in the stator winding')
self.add_output('b_s',val=0.0, desc='slot width')
self.add_output('b_t',val=0.0, desc='tooth width')
self.add_output('A_Cuscalc',val=0.0, desc='Conductor cross-section mm^2')
self.add_output('S',val=0.0, desc='Stator slots')
# # Output parameters : Rotor design
self.add_output('h_p',val=0.0, desc='Pole height')
self.add_output('b_p',val=0.0, desc='Pole width')
self.add_output('p',val=0.0, desc='No of pole pairs')
self.add_output('n_brushes',val=0.0, desc='number of brushes')
self.add_output('A_Curcalc',val=0.0, desc='Rotor Conductor cross-section')
# Output parameters : Electrical performance
self.add_output('E_s',val=0.0, desc='Stator phase voltage')
self.add_output('f',val=0.0, desc='Generator output frequency')
self.add_output('I_s',val=0.0, desc='Generator output phase current')
self.add_output('R_s',val=0.0, desc='Stator resistance')
self.add_output('R_r',val=0.0, desc='Rotor resistance')
self.add_output('L_m',val=0.0, desc='Stator synchronising inductance')
self.add_output('J_s',val=0.0, desc='Stator Current density')
self.add_output('J_f',val=0.0, desc='rotor Current density')
self.add_output('A_1',val=0.0, desc='Specific current loading')
self.add_output('Load_mmf_ratio',val=0.0, desc='mmf_ratio')
# Objective functions and output
self.add_output('Mass',val=0.0, desc='Actual mass')
self.add_output('K_rad',val=0.0, desc='K_rad')
self.add_output('Losses',val=0.0, desc='Total loss')
self.add_output('gen_eff',val=0.0, desc='Generator efficiency')
# Structural performance
self.add_output('u_Ar',val=0.0, desc='Rotor radial deflection')
self.add_output('y_Ar',val=0.0, desc='Rotor axial deflection')
self.add_output('z_A_r',val=0.0, desc='Rotor circumferential deflection')
self.add_output('u_As',val=0.0, desc='Stator radial deflection')
self.add_output('y_As',val=0.0, desc='Stator axial deflection')
self.add_output('z_A_s',val=0.0, desc='Stator circumferential deflection')
self.add_output('u_all_r',val=0.0, desc='Allowable radial rotor')
self.add_output('u_all_s',val=0.0, desc='Allowable radial stator')
self.add_output('y_all',val=0.0, desc='Allowable axial')
self.add_output('z_all_s',val=0.0, desc='Allowable circum stator')
self.add_output('z_all_r',val=0.0, desc='Allowable circum rotor')
self.add_output('b_all_s',val=0.0, desc='Allowable arm')
self.add_output('b_all_r',val=0.0, desc='Allowable arm dimensions')
self.add_output('TC1',val=0.0, desc='Torque constraint')
self.add_output('TC2',val=0.0, desc='Torque constraint-rotor')
self.add_output('TC3',val=0.0, desc='Torque constraint-stator')
#Material properties
self.add_param('rho_Fes',val=0.0,units='kg*m**-3', desc='Structural Steel density ')
self.add_param('rho_Fe',val=0.0,units='kg*m**-3', desc='Magnetic Steel density ')
self.add_param('rho_Copper',val=0.0,units='kg*m**-3', desc='Copper density ')
# Mass Outputs
self.add_output('Copper',val=0.0, desc='Copper Mass')
self.add_output('Iron',val=0.0, desc='Electrical Steel Mass')
self.add_output('Structural_mass' ,val=0.0, desc='Structural Mass')
# Other parameters
self.add_output('Power_ratio',val=0.0, desc='Power_ratio')
self.add_output('Slot_aspect_ratio',val=0.0,desc='Stator slot aspect ratio')
self.add_output('R_out',val=0.0, desc='Outer radius')
#inputs/outputs for interface with drivese
self.add_param('main_shaft_cm',val= np.array([0.0, 0.0, 0.0]),desc='Main Shaft CM')
self.add_param('main_shaft_length',val=0.0, desc='main shaft length')
self.add_output('I',val=np.array([0.0, 0.0, 0.0]),desc='Moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('cm', val=np.array([0.0, 0.0, 0.0]),desc='COM [x,y,z]')
self.gen_sizing = generator_sizing()
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['B_symax'], outputs['B_tmax'], outputs['B_rymax'], outputs['B_gfm'], outputs['B_g'],outputs['B_pc'], outputs['N_s'], outputs['b_s'], \
outputs['b_t'], outputs['A_Cuscalc'],outputs['A_Curcalc'], outputs['b_p'], outputs['h_p'], outputs['p'], outputs['E_s'], outputs['f'], \
outputs['I_s'], outputs['R_s'], outputs['L_m'], outputs['A_1'], outputs['J_s'], outputs['R_r'],outputs['Losses'], \
outputs['Load_mmf_ratio'],outputs['Power_ratio'],outputs['n_brushes'],outputs['J_f'],outputs['K_rad'], outputs['gen_eff'], outputs['S'],
outputs['Slot_aspect_ratio'], outputs['Copper'],outputs['Iron'],outputs['u_Ar'], outputs['y_Ar'], \
outputs['z_A_r'], outputs['u_As'], outputs['y_As'], outputs['z_A_s'], outputs['u_all_r'], outputs['u_all_s'], \
outputs['y_all'], outputs['z_all_s'], outputs['z_all_r'], outputs['b_all_s'], outputs['b_all_r'], outputs['TC1'], \
outputs['TC2'], outputs['TC3'], outputs['R_out'], outputs['Structural_mass'],outputs['Mass'],outputs['cm'], outputs['I']) \
= self.gen_sizing.compute(inputs['r_s'], inputs['l_s'], inputs['h_s'], inputs['tau_p'], inputs['machine_rating'],
inputs['n_nom'], inputs['Torque'], inputs['I_f'],inputs['N_f'],inputs['h_ys'], inputs['h_yr'],inputs['rho_Fe'], inputs['rho_Copper'],inputs['b_st'], inputs['d_s'], \
inputs['t_ws'], inputs['n_r'],inputs['n_s'], inputs['b_r'],inputs['d_r'], inputs['t_wr'], \
inputs['R_o'], inputs['rho_Fes'],inputs['main_shaft_cm'],inputs['main_shaft_length'])
return outputs
class generator_sizing(object):
def __init__(self):
pass
def compute(self,r_s, l_s,h_s,tau_p,machine_rating,n_nom,Torque,I_f,N_f,h_ys,h_yr, \
rho_Fe,rho_Copper,b_st, d_s,t_ws, n_r,n_s, b_r,d_r, t_wr, \
R_o, rho_Fes,main_shaft_cm,main_shaft_length):
self.r_s=r_s
self.l_s=l_s
self.h_s=h_s
self.tau_p=tau_p
self.N_f=N_f
self.I_f=I_f
self.h_ys=h_ys
self.h_yr=h_yr
self.machine_rating=machine_rating
self.n_nom=n_nom
self.Torque=Torque
self.b_st=b_st
self.d_s=d_s
self.t_ws=t_ws
self.n_r=n_r
self.n_s=n_s
self.b_r=b_r
self.d_r=d_r
self.t_wr=t_wr
self.R_o=R_o
self.rho_Fe=rho_Fe
self.rho_Copper=rho_Copper
self.rho_Fes=rho_Fes
self.main_shaft_cm=main_shaft_cm
self.main_shaft_length=main_shaft_length
#Assign values to universal constants
g1 =9.81 # m/s^2 acceleration due to gravity
E =2e11 # N/m^2 young's modulus
sigma =48.373e3 # shear stress
mu_0 =pi*4e-7 # permeability of free space
phi =90*2*pi/360
#Assign values to design constants
h_w =0.005
b_so = 0.004 # Stator slot opening
m =3 # number of phases
q1 =2 # no of stator slots per pole per phase
b_s_tau_s=0.45 # ratio of slot width to slot pitch
k_sfil =0.65 # Slot fill factor
P_Fe0h =4 #specific hysteresis losses W/kg @ 1.5 T @50 Hz
P_Fe0e =1 #specific hysteresis losses W/kg @ 1.5 T @50 Hz
rho_Cu=1.8*10**(-8)*1.4 # resisitivity of copper
k_fes =0.9 # iron fill factor
y_tau_p=1 # coil span/pole pitch fullpitch
k_fillr = 0.7 # rotor slot fill factor
k_s=0.2 #magnetic saturation factor for iron
T = self.Torque
cos_phi=0.85 #power factor
# back iron thickness for rotor and stator
self.t_s =self.h_ys
self.t =self.h_yr
# Aspect ratio
self.K_rad=self.l_s/(2*self.r_s)
###################################################### Electromagnetic design#############################################
alpha_p=pi/2*.7
dia=2*self.r_s # air gap diameter
# air gap length and minimum values
g=0.001*dia
if(g<0.005):
g=0.005
r_r=self.r_s-g #rotor radius
d_se=dia+2*self.h_s+2*self.h_ys # stator outer diameter
self.p=round(pi*dia/(2*self.tau_p)) # number of pole pairs
self.S=2*self.p*q1*m # number of slots of stator phase winding
N_conductors=self.S*2
self.N_s=N_conductors/2/3 # Stator turns per phase
alpha =180/self.S/self.p #electrical angle
tau_s=pi*dia/self.S # slot pitch
h_ps=0.1*self.tau_p # height of pole shoe
b_pc=0.4*self.tau_p # width of pole core
h_pc=0.6*self.tau_p # height of pole core
self.h_p=0.7*tau_p # pole height
self.b_p=self.h_p
self.b_s=tau_s * b_s_tau_s #slot width
self.Slot_aspect_ratio=self.h_s/self.b_s
self.b_t=tau_s-self.b_s #tooth width
# Calculating carter factor and effective air gap
g_a=g
K_C1=(tau_s+10*g_a)/(tau_s-self.b_s+10*g_a) # salient pole rotor
g_1=K_C1*g
# calculating angular frequency
om_m=2*pi*self.n_nom/60
om_e=60
self.f = self.n_nom*self.p/60
# Slot fill factor according to air gap radius
if (2*self.r_s>2):
K_fills=0.65
else:
K_fills=0.4
# Calculating Stator winding factor
k_y1=sin(y_tau_p*pi/2) # chording factor
k_q1=sin(pi/6)/q1/sin(pi/6/q1) # winding zone factor
k_wd=k_y1*k_q1
# Calculating stator winding conductor length, cross-section and resistance
shortpitch=0
l_Cus = 2*self.N_s*(2*(self.tau_p-shortpitch/m/q1)+self.l_s) #length of winding
A_s = self.b_s*(self.h_s-h_w)
A_scalc=self.b_s*1000*(self.h_s*1000-h_w*1000) # cross section in mm^2
A_Cus = A_s*q1*self.p*K_fills/self.N_s
self.A_Cuscalc = A_scalc*q1*self.p*K_fills/self.N_s
self.R_s=l_Cus*rho_Cu/A_Cus
#field winding design, conductor lenght, cross-section and resistance
self.N_f=round(self.N_f) # rounding the field winding turns to the nearest integer
I_srated=self.machine_rating/(sqrt(3)*5000*cos_phi)
l_pole=self.l_s-0.05+0.120 # 50mm smaller than stator and 120mm longer to accommodate end stack
K_fe=0.95
l_pfe=l_pole*K_fe
l_Cur=4*self.p*self.N_f*(l_pfe+b_pc+pi/4*(pi*(r_r-h_pc-h_ps)/self.p-b_pc))
A_Cur=k_fillr*h_pc*0.5/self.N_f*(pi*(r_r-h_pc-h_ps)/self.p-b_pc)
self.A_Curcalc=k_fillr*h_pc*1000*0.5/self.N_f*(pi*(r_r-h_pc-h_ps)*1000/self.p-b_pc*1000)
Slot_Area=A_Cur*2*self.N_f/k_fillr
self.R_r=rho_Cu*l_Cur/A_Cur
#field winding current density
self.J_f=self.I_f/self.A_Curcalc
# calculating air flux density
self.B_gfm=mu_0*self.N_f*self.I_f/(g_1*(1+k_s)) #No load air gap flux density
self.B_g=self.B_gfm*4*sin(0.5*self.b_p*pi/self.tau_p)/pi # fundamental component
self.B_symax=self.tau_p*self.B_g/pi/self.h_ys #stator yoke flux density
L_fg=2*mu_0*self.p*self.l_s*4*self.N_f**2*((h_ps/(self.tau_p-self.b_p))+(h_pc/(3*pi*(r_r-h_pc-h_ps)/self.p-b_pc)))
# calculating no load voltage and stator current
self.E_s=2*self.N_s*self.l_s*self.r_s*k_wd*om_m*self.B_g/sqrt(2) #no load voltage
self.I_s=(self.E_s-(self.E_s**2-4*self.R_s*self.machine_rating/m)**0.5)/(2*self.R_s)
# Calculating stator winding current density and specific current loading
self.A_1 = 6*self.N_s*self.I_s/(pi*dia)
self.J_s=self.I_s/self.A_Cuscalc
# Calculating magnetic loading in other parts of the machine
delta_m=0 # Initialising load angle
# peak flux density in pole core, rotor yoke and stator teeth
self.B_pc=(1/b_pc)*((2*self.tau_p/pi)*self.B_g*cos(delta_m)+(2*mu_0*self.I_f*self.N_f*((2*h_ps/(self.tau_p-self.b_p))+(h_pc/(self.tau_p-b_pc)))))
self.B_rymax= 0.5*b_pc*self.B_pc/self.h_yr
self.B_tmax=(self.B_gfm+self.B_g)*tau_s*0.5/self.b_t
# Calculating leakage inductances in the stator
L_ssigmas=2*mu_0*self.l_s*self.N_s**2/self.p/q1*((self.h_s-h_w)/(3*self.b_s)+h_w/b_so) #slot leakage inductance
L_ssigmaew=mu_0*1.2*self.N_s**2/self.p*1.2*(2/3*self.tau_p+0.01) #end winding leakage inductance
L_ssigmag=2*mu_0*self.l_s*self.N_s**2/self.p/q1*(5*(g/b_so)/(5+4*(g/b_so))) # tooth tip leakage inductance
L_ssigma=(L_ssigmas+L_ssigmaew+L_ssigmag) # stator leakage inductance
# Calculating effective air gap
At_g=g_1*self.B_gfm/mu_0
At_t=self.h_s*(400*self.B_tmax+7*(self.B_tmax)**13)
At_sy=self.tau_p*0.5*(400*self.B_symax+7*(self.B_symax)**13)
At_pc=(h_pc+h_ps)*(400*self.B_pc+7*(self.B_pc)**13)
At_ry=self.tau_p*0.5*(400*self.B_rymax+7*(self.B_rymax)**13)
g_eff = (At_g+At_t+At_sy+At_pc+At_ry)*g_1/At_g
self.L_m = 6*k_wd**2*self.N_s**2*mu_0*self.r_s*self.l_s/pi/g_eff/self.p**2
B_r1=(mu_0*self.I_f*self.N_f*4*sin(0.5*(self.b_p/self.tau_p)*pi))/g_eff/pi
# Calculating direct axis and quadrature axes inductances
L_dm= (self.b_p/self.tau_p +(1/pi)*sin(pi*self.b_p/self.tau_p))*self.L_m
L_qm=(self.b_p/self.tau_p -(1/pi)*sin(pi*self.b_p/self.tau_p)+2/(3*pi)*cos(self.b_p*pi/2*self.tau_p))*self.L_m
# Calculating actual load angle
delta_m=(atan(om_e*L_qm*self.I_s/self.E_s))
L_d=L_dm+L_ssigma
L_q=L_qm+L_ssigma
I_sd=self.I_s*sin(delta_m)
I_sq=self.I_s*cos(delta_m)
# induced voltage
E_p=om_e*L_dm*I_sd+sqrt(self.E_s**2-(om_e*L_qm*I_sq)**2)
#M_sf =mu_0*8*self.r_s*self.l_s*k_wd*self.N_s*self.N_f*sin(0.5*self.b_p/self.tau_p*pi)/(self.p*g_eff*pi)
#I_f1=sqrt(2)*(E_p)/(om_e*M_sf)
#I_f2=(E_p/self.E_s)*self.B_g*g_eff*pi/(4*self.N_f*mu_0*sin(pi*self.b_p/2/self.tau_p))
#phi_max_stator=k_wd*self.N_s*pi*self.r_s*self.l_s*2*mu_0*self.N_f*self.I_f*4*sin(0.5*self.b_p/self.tau_p/pi)/(self.p*pi*g_eff*pi)
#M_sf=mu_0*8*self.r_s*self.l_s*k_wd*self.N_s*self.N_f*sin(0.5*b_p/self.tau_p/pi)/(self.p*g_eff*pi)
L_tot=self.l_s+2*self.tau_p
# Excitation power
V_fn=500
Power_excitation=V_fn*2*self.I_f #total rated power in excitation winding
self.Power_ratio =Power_excitation*100/self.machine_rating
# Calculating Electromagnetically Active mass
L_tot=self.l_s+2*self.tau_p
V_Cuss=m*l_Cus*A_Cus # volume of copper in stator
V_Cusr=l_Cur*A_Cur # volume of copper in rotor
V_Fest=(self.l_s*pi*((self.r_s+self.h_s)**2-self.r_s**2)-2*m*q1*self.p*self.b_s*self.h_s*self.l_s) # volume of iron in stator tooth
V_Fesy=self.l_s*pi*((self.r_s+self.h_s+self.h_ys)**2-(self.r_s+self.h_s)**2) # volume of iron in stator yoke
V_Fert=2*self.p*l_pfe*(h_pc*b_pc+self.b_p*h_ps) # volume of iron in rotor pole
V_Fery=l_pfe*pi*((r_r-h_ps-h_pc)**2-(r_r-h_ps-h_pc-self.h_yr)**2) # # volume of iron in rotor yoke
self.Copper=(V_Cuss+V_Cusr)*self.rho_Copper
M_Fest=V_Fest*self.rho_Fe
M_Fesy=V_Fesy*self.rho_Fe
M_Fert=V_Fert*self.rho_Fe
M_Fery=V_Fery*self.rho_Fe
self.Iron=M_Fest+M_Fesy+M_Fert+M_Fery
I_snom=self.machine_rating/(3*self.E_s*cos_phi)
## Optional## Calculating mmf ratio
F_1no_load=3*2**0.5*self.N_s*k_wd*self.I_s/(pi*self.p)
Nf_If_no_load=self.N_f*self.I_f
F_1_rated=(3*2**0.5*self.N_s*k_wd*I_srated)/(pi*self.p)
Nf_If_rated=2*Nf_If_no_load
self.Load_mmf_ratio=Nf_If_rated/F_1_rated
## Calculating losses
#1. Copper losses
K_R=1.2
P_Cuss=m*I_snom**2*self.R_s*K_R
P_Cusr=self.I_f**2*self.R_r
P_Cusnom_total=P_Cuss+P_Cusr
#2. Iron losses ( Hysteresis and Eddy currents)
P_Hyys=M_Fesy*(self.B_symax/1.5)**2*(P_Fe0h*om_e/(2*pi*60)) # Hysteresis losses in stator yoke
P_Ftys=M_Fesy*(self.B_symax/1.5)**2*(P_Fe0e*(om_e/(2*pi*60))**2) # Eddy losses in stator yoke
P_Fesynom=P_Hyys+P_Ftys
P_Hyd=M_Fest*(self.B_tmax/1.5)**2*(P_Fe0h*om_e/(2*pi*60)) # Hysteresis losses in stator teeth
P_Ftd=M_Fest*(self.B_tmax/1.5)**2*(P_Fe0e*(om_e/(2*pi*60))**2) # Eddy losses in stator teeth
P_Festnom=P_Hyd+P_Ftd
# brushes
delta_v=1
self.n_brushes=(self.I_f*2/120)
if (self.n_brushes<0.5):
self.n_brushes=1
else:
self.n_brushes=round(self.n_brushes)
#3. brush losses
p_b=2*delta_v*(self.I_f)
self.Losses=P_Cusnom_total+P_Festnom+P_Fesynom+p_b
self.gen_eff=self.machine_rating*100/(self.Losses+self.machine_rating)
################################################## Structural Design ########################################################
## Structural deflection calculations
#rotor structure
q3 = self.B_g**2/2/mu_0 # normal component of Maxwell's stress
l = self.l_s #l-stator core length
l_b = 2*self.tau_p #end winding length
l_e =self.l_s+2*0.001*self.r_s # equivalent core length
a_r = (self.b_r*self.d_r)-((self.b_r-2*self.t_wr)*(self.d_r-2*self.t_wr)) # cross-sectional area of rotor armms
A_r = l*self.t # cross-sectional area of rotor cylinder
N_r = round(self.n_r)
theta_r =pi/N_r # half angle between spokes
I_r =l*self.t**3/12 # second moment of area of rotor cylinder
I_arm_axi_r =((self.b_r*self.d_r**3)-((self.b_r-2*self.t_wr)*(self.d_r-2*self.t_wr)**3))/12 # second moment of area of rotor arm
I_arm_tor_r = ((self.d_r*self.b_r**3)-((self.d_r-2*self.t_wr)*(self.b_r-2*self.t_wr)**3))/12 # second moment of area of rotot arm w.r.t torsion
R = r_r-h_ps-h_pc-0.5*self.h_yr
R_1 = R-self.h_yr*0.5 # inner radius of rotor cylinder
k_1 = sqrt(I_r/A_r) # radius of gyration
m1 =(k_1/R)**2
c =R/500
self.u_all_r =R/10000 # allowable radial deflection
self.b_all_r =2*pi*self.R_o/N_r # allowable circumferential arm dimension
# Calculating radial deflection of rotor structure according to <NAME>'s
Numer=R**3*((0.25*(sin(theta_r)-(theta_r*cos(theta_r)))/(sin(theta_r))**2)-(0.5/sin(theta_r))+(0.5/theta_r))
Pov=((theta_r/(sin(theta_r))**2)+1/tan(theta_r))*((0.25*R/A_r)+(0.25*R**3/I_r))
Qov=R**3/(2*I_r*theta_r*(m1+1))
Lov=(R_1-R_o)/a_r
Denom=I_r*(Pov-Qov+Lov) # radial deflection % rotor
self.u_Ar =(q3*R**2/E/self.h_yr)*(1+Numer/Denom)
# Calculating axial deflection of rotor structure
w_r =self.rho_Fes*g1*sin(phi)*a_r*N_r
mass_st_lam=self.rho_Fe*2*pi*(R+0.5*self.h_yr)*l*self.h_yr # mass of rotor yoke steel
W =g1*sin(phi)*(mass_st_lam+(V_Cusr*self.rho_Copper)+M_Fert)/N_r # weight of rotor cylinder
l_ir =R # length of rotor arm beam at which rotor cylinder acts
l_iir =R_1
self.y_Ar =(W*l_ir**3/12/E/I_arm_axi_r)+(w_r*l_iir**4/24/E/I_arm_axi_r) # axial deflection
#Calculating torsional deflection of rotor structure
self.z_all_r =0.05*2*pi*R/360 # allowable torsional deflection
self.z_A_r =(2*pi*(R-0.5*self.h_yr)*l/N_r)*sigma*(l_ir-0.5*self.h_yr)**3/3/E/I_arm_tor_r # circumferential deflection
#STATOR structure
A_st =l*self.t_s
a_s = (self.b_st*self.d_s)-((self.b_st-2*self.t_ws)*(self.d_s-2*self.t_ws))
N_st = round(self.n_s)
theta_s =pi/N_st
I_st =l*self.t_s**3/12
I_arm_axi_s =((self.b_st*self.d_s**3)-((self.b_st-2*self.t_ws)*(self.d_s-2*self.t_ws)**3))/12 # second moment of area of stator arm
I_arm_tor_s = ((self.d_s*self.b_st**3)-((self.d_s-2*self.t_ws)*(self.b_st-2*self.t_ws)**3))/12 # second moment of area of rotot arm w.r.t torsion
R_st =(self.r_s+self.h_s+self.h_ys*0.5)
R_1s = R_st-self.h_ys*0.5
k_2 = sqrt(I_st/A_st)
m2 =(k_2/R_st)**2
# allowable deflections
self.b_all_s =2*pi*self.R_o/N_st
self.u_all_s = R_st/10000
self.y_all =2*l/100 # allowable axial deflection
self.z_all_s =0.05*2*pi*R_st/360 # allowable torsional deflection
# Calculating radial deflection according to McDonald's
Numers=R_st**3*((0.25*(sin(theta_s)-(theta_s*cos(theta_s)))/(sin(theta_s))**2)-(0.5/sin(theta_s))+(0.5/theta_s))
Povs=((theta_s/(sin(theta_s))**2)+1/tan(theta_s))*((0.25*R_st/A_st)+(0.25*R_st**3/I_st))
Qovs=R_st**3/(2*I_st*theta_s*(m2+1))
Lovs=(R_1s-R_o)*0.5/a_s
Denoms=I_st*(Povs-Qovs+Lovs)
self.R_out=(R/0.995+self.h_s+self.h_ys)
self.u_As =(q3*R_st**2/E/self.t_s)*(1+Numers/Denoms)
# Calculating axial deflection according to McDonald
l_is =R_st-self.R_o
l_iis =l_is
l_iiis =l_is
mass_st_lam_s= M_Fest+pi*l*self.rho_Fe*((R_st+0.5*self.h_ys)**2-(R_st-0.5*self.h_ys)**2)
W_is =g1*sin(phi)*(self.rho_Fes*l*self.d_s**2*0.5) # weight of rotor cylinder # length of rotor arm beam at which self-weight acts
W_iis =g1*sin(phi)*(V_Cuss*self.rho_Copper+mass_st_lam_s)/2/N_st
w_s =self.rho_Fes*g1*sin(phi)*a_s*N_st
X_comp1 = (W_is*l_is**3/12/E/I_arm_axi_s)
X_comp2 =(W_iis*l_iis**4/24/E/I_arm_axi_s)
X_comp3 =w_s*l_iiis**4/24/E/I_arm_axi_s
self.y_As =X_comp1+X_comp2+X_comp3 # axial deflection
# Calculating torsional deflection
self.z_A_s =2*pi*(R_st+0.5*self.t_s)*l/(2*N_st)*sigma*(l_is+0.5*self.t_s)**3/3/E/I_arm_tor_s
# tangential stress constraints
self.TC1=T/(2*pi*sigma)
self.TC2=R**2*l
self.TC3=R_st**2*l
mass_stru_steel =2*(N_st*(R_1s-self.R_o)*a_s*self.rho_Fes)
# Calculating inactive mass and total mass
self.Structural_mass=mass_stru_steel+(N_r*(R_1-self.R_o)*a_r*self.rho_Fes)
self.Mass=self.Copper+self.Iron+self.Structural_mass
self.I = np.array([0.0, 0.0, 0.0])
# Calculating mass moments of inertia and center of mass
self.I[0] = (0.5*self.Mass*self.R_out**2)
self.I[1] = (0.25*self.Mass*self.R_out**2+(1/12)*self.Mass*self.l_s**2)
self.I[2] = self.I[1]
self.cm = np.array([0.0, 0.0, 0.0])
self.cm[0] = self.main_shaft_cm[0] + self.main_shaft_length/2. + self.l_s/2
self.cm[1] = self.main_shaft_cm[1]
self.cm[2] = self.main_shaft_cm[2]
return(self.B_symax, self.B_tmax, self.B_rymax,self.B_gfm, self.B_g,self.B_pc, self.N_s, self.b_s, \
self.b_t, self.A_Cuscalc, self.A_Curcalc,self.b_p,self.h_p, self.p, self.E_s, self.f,self.I_s, self.R_s, self.L_m, self.A_1,\
self.J_s,self.R_r, self.Losses,self.Load_mmf_ratio,self.Power_ratio,self.n_brushes,self.J_f,self.K_rad, self.gen_eff,\
self.S, self.Slot_aspect_ratio, self.Copper,self.Iron,self.u_Ar,self.y_Ar,self.z_A_r,\
self.u_As,self.y_As,self.z_A_s,self.u_all_r,self.u_all_s,self.y_all,self.z_all_s,self.z_all_r,self.b_all_s, \
self.b_all_r,self.TC1,self.TC2,self.TC3,self.R_out,self.Structural_mass,self.Mass,self.cm,self.I)
####################################################Cost Analysis#######################################################################
class EESG_Cost(Component):
""" Provides a material cost estimate for EESG. Manufacturing costs are excluded"""
def __init__(self):
super(EESG_Cost, self).__init__()
# Inputs
# Specific cost of material by type
self.add_param('C_Cu',val=0.0, desc='Specific cost of copper')
self.add_param('C_Fe',val=0.0,desc='Specific cost of magnetic steel/iron')
self.add_param('C_Fes',val=0.0,desc='Specific cost of structural steel')
# Mass of each material type
self.add_param('Copper',val=0.0, desc='Copper mass')
self.add_param('Iron',val=0.0, desc='Iron mass')
self.add_param('Structural_mass',val=0.0, desc='Structural mass')
# Outputs
self.add_output('Costs',val=0.0,desc='Total cost')
self.gen_costs=generator_costing()
def solve_nonlinear(self,inputs,outputs,resid):
(outputs['Costs'])=self.gen_costs.compute(inputs['Copper'],inputs['C_Cu'], \
inputs['Iron'],inputs['C_Fe'],inputs['C_Fes'],inputs['Structural_mass'])
return outputs
class generator_costing(object):
def __init__(self):
pass
def compute(self,Copper,C_Cu,Iron,C_Fe,C_Fes,Structural_mass):
self.Copper=Copper
self.Iron=Iron
self.Structural_mass=Structural_mass
# Material cost as a function of material mass and specific cost of material
K_gen=self.Copper*C_Cu+self.Iron*C_Fe
Cost_str=C_Fes*self.Structural_mass
Costs=K_gen+Cost_str
return(Costs)
####################################################OPTIMISATION SET_UP ###############################################################
class EESG_Opt(Group):
""" Creates a new Group containing EESG and EESG_Cost"""
def __init__(self):
super(EESG_Opt, self).__init__()
self.add('machine_rating', IndepVarComp('machine_rating',0.0),promotes=['*'])
self.add('Torque',IndepVarComp('Torque', val=0.0),promotes=['*'])
self.add('n_nom', IndepVarComp('n_nom', val=0.0),promotes=['*'])
self.add('main_shaft_cm', IndepVarComp('main_shaft_cm',val=np.array([0.0, 0.0, 0.0])),promotes=['*'])
self.add('main_shaft_length',IndepVarComp('main_shaft_length',val=0.0),promotes=['*'])
self.add('r_s',IndepVarComp('r_s',0.0),promotes=['*'])
self.add('l_s',IndepVarComp('l_s',0.0),promotes=['*'])
self.add('h_s',IndepVarComp('h_s',0.0),promotes=['*'])
self.add('tau_p',IndepVarComp('tau_p',0.0),promotes=['*'])
self.add('I_f',IndepVarComp('I_f',0.0),promotes=['*'])
self.add('N_f',IndepVarComp('N_f',0.0),promotes=['*'])
self.add('h_ys',IndepVarComp('h_ys',0.0),promotes=['*'])
self.add('h_yr',IndepVarComp('h_yr',0.0),promotes=['*'])
self.add('n_s',IndepVarComp('n_s',0.0),promotes=['*'])
self.add('b_st',IndepVarComp('b_st',0.0),promotes=['*'])
self.add('n_r',IndepVarComp('n_r',0.0),promotes=['*'])
self.add('b_r',IndepVarComp('b_r',0.0),promotes=['*'])
self.add('d_r',IndepVarComp('d_r',0.0),promotes=['*'])
self.add('d_s',IndepVarComp('d_s',0.0),promotes=['*'])
self.add('t_wr',IndepVarComp('t_wr',0.0),promotes=['*'])
self.add('t_ws',IndepVarComp('t_ws',0.0),promotes=['*'])
self.add('R_o',IndepVarComp('R_o',0.0),promotes=['*'])
self.add('rho_Fes',IndepVarComp('rho_Fes',0.0),promotes=['*'])
self.add('rho_Fe',IndepVarComp('rho_Fe',0.0),promotes=['*'])
self.add('rho_Copper',IndepVarComp('rho_Copper',0.0),promotes=['*'])
# add EESG component, create constraint equations
self.add('EESG',EESG(),promotes=['*'])
self.add('con_uAs', ExecComp('con_uAs =u_all_s-u_As'),promotes=['*'])
self.add('con_zAs', ExecComp('con_zAs =z_all_s-z_A_s'),promotes=['*'])
self.add('con_yAs', ExecComp('con_yAs =y_all-y_As'),promotes=['*'])
self.add('con_bst', ExecComp('con_bst =b_all_s-b_st'),promotes=['*'])
self.add('con_uAr', ExecComp('con_uAr =u_all_r-u_Ar'),promotes=['*'])
self.add('con_zAr', ExecComp('con_zAr =z_all_r-z_A_r'),promotes=['*'])
self.add('con_yAr', ExecComp('con_yAr =y_all-y_Ar'),promotes=['*'])
self.add('con_br', ExecComp('con_br =b_all_r-b_r'),promotes=['*'])
self.add('con_TC2', ExecComp('con_TC2 =TC2-TC1'),promotes=['*'])
self.add('con_TC3', ExecComp('con_TC3 =TC3-TC1'),promotes=['*'])
# add EESG_Cost component
self.add('EESG_Cost',EESG_Cost(),promotes=['*'])
self.add('C_Cu',IndepVarComp('C_Cu',val=0.0),promotes=['*'])
self.add('C_Fe',IndepVarComp('C_Fe',val=0.0),promotes=['*'])
self.add('C_Fes',IndepVarComp('C_Fes',val=0.0),promotes=['*'])
def EESG_Opt_example():
opt_problem=Problem(root=EESG_Opt())
#Example optimization of an EESG for costs on a 5 MW reference turbine
# add optimizer and set-up problem (using user defined input on objective function)
#
opt_problem.driver=pyOptSparseDriver()
opt_problem.driver.options['optimizer'] = 'CONMIN'
opt_problem.driver.add_objective('Costs') # Define Objective
opt_problem.driver.opt_settings['IPRINT'] = 4
opt_problem.driver.opt_settings['ITRM'] = 3
opt_problem.driver.opt_settings['ITMAX'] = 10
opt_problem.driver.opt_settings['DELFUN'] = 1e-3
opt_problem.driver.opt_settings['DABFUN'] = 1e-3
opt_problem.driver.opt_settings['IFILE'] = 'CONMIN_EESG.out'
opt_problem.root.deriv_options['type']='fd'
# Specificiency target efficiency(%)
Eta_Target = 93.0
# Set bounds for design variables for an EESG designed for a 5MW turbine
opt_problem.driver.add_desvar('r_s',lower=0.5,upper=9.0)
opt_problem.driver.add_desvar('l_s', lower=0.5, upper=2.5)
opt_problem.driver.add_desvar('h_s', lower=0.06, upper=0.15)
opt_problem.driver.add_desvar('tau_p', lower=0.04, upper=0.2)
opt_problem.driver.add_desvar('N_f', lower=10, upper=300)
opt_problem.driver.add_desvar('I_f', lower=1, upper=500)
opt_problem.driver.add_desvar('n_r', lower=5.0, upper=15.0)
opt_problem.driver.add_desvar('h_yr', lower=0.01, upper=0.25)
opt_problem.driver.add_desvar('h_ys', lower=0.01, upper=0.25)
opt_problem.driver.add_desvar('b_r', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('d_r', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('t_wr', lower=0.001, upper=0.2)
opt_problem.driver.add_desvar('n_s', lower=5.0, upper=15.0)
opt_problem.driver.add_desvar('b_st', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('d_s', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('t_ws', lower=0.001, upper=0.2)
# set up constraints for the PMSG_arms generator
opt_problem.driver.add_constraint('B_symax',upper=2.0-1.0e-6) #1
opt_problem.driver.add_constraint('B_rymax',upper=2.0-1.0e-6) #2
opt_problem.driver.add_constraint('B_tmax',upper=2.0-1.0e-6) #3
opt_problem.driver.add_constraint('B_gfm',lower=0.617031,upper=1.057768) #4
opt_problem.driver.add_constraint('B_g',lower=0.7,upper=1.2) #5
opt_problem.driver.add_constraint('B_pc',upper=2.0) #6
opt_problem.driver.add_constraint('E_s',lower=500.0,upper=5000.0) #7
opt_problem.driver.add_constraint('con_uAs',lower=0.0+1.0e-6) #8
opt_problem.driver.add_constraint('con_zAs',lower=0.0+1.0e-6) #9
opt_problem.driver.add_constraint('con_yAs',lower=0.0+1.0e-6) #10
opt_problem.driver.add_constraint('con_uAr',lower=0.0+1.0e-6) #11
opt_problem.driver.add_constraint('con_zAr',lower=0.0+1.0e-6) #12
opt_problem.driver.add_constraint('con_yAr',lower=0.0+1.0e-6) #13
opt_problem.driver.add_constraint('con_TC2',lower=0.0+1.0e-6) #14
opt_problem.driver.add_constraint('con_TC3',lower=0.0+1e-6) #15
opt_problem.driver.add_constraint('con_br',lower=0.0+1e-6) #16
opt_problem.driver.add_constraint('con_bst',lower=0.0-1e-6) #17
opt_problem.driver.add_constraint('A_1',upper=60000.0-1e-6) #18
opt_problem.driver.add_constraint('J_s',upper=6.0) #19
opt_problem.driver.add_constraint('J_f',upper=6.0) #20
opt_problem.driver.add_constraint('A_Cuscalc',lower=5.0,upper=300) #22
opt_problem.driver.add_constraint('A_Curcalc',lower=10,upper=300) #23
opt_problem.driver.add_constraint('K_rad',lower=0.2+1e-6,upper=0.27) #24
opt_problem.driver.add_constraint('Slot_aspect_ratio',lower=4.0,upper=10.0)#25
opt_problem.driver.add_constraint('gen_eff',lower=Eta_Target) #26
opt_problem.driver.add_constraint('n_brushes',upper=6) #27
opt_problem.driver.add_constraint('Power_ratio',upper=2-1.0e-6) #28
opt_problem.setup()
# Specify Target machine parameters
opt_problem['machine_rating']=5000000.0
opt_problem['Torque']=4.143289e6
opt_problem['n_nom']=12.1
# Initial design variables
opt_problem['r_s']=3.2
opt_problem['l_s']=1.4
opt_problem['h_s']= 0.060
opt_problem['tau_p']= 0.170
opt_problem['I_f']= 69
opt_problem['N_f']= 100
opt_problem['h_ys']= 0.130
opt_problem['h_yr']= 0.120
opt_problem['n_s']= 5
opt_problem['b_st']= 0.470
opt_problem['n_r']=5
opt_problem['b_r']= 0.480
opt_problem['d_r']= 0.510
opt_problem['d_s']= 0.400
opt_problem['t_wr']=0.140
opt_problem['t_ws']=0.070
opt_problem['R_o']=0.43 #10MW: 0.523950817,#5MW: 0.43, #3MW:0.363882632 #1.5MW: 0.2775 0.75MW: 0.17625
# Costs
opt_problem['C_Cu']=4.786
opt_problem['C_Fe']= 0.556
opt_problem['C_Fes']=0.50139
#Material properties
opt_problem['rho_Fe']= 7700 #Magnetic Steel/iron density
opt_problem['rho_Fes']= 7850 #structural Steel density
opt_problem['rho_Copper']=8900 # Kg/m3 copper density
opt_problem['main_shaft_cm']=np.array([0.0, 0.0, 0.0])
opt_problem['main_shaft_length'] =2.0
#Run optimization
opt_problem.run()
"""Uncomment to print solution to screen/an excel file
raw_data = {'Parameters': ['Rating','Stator Arms', 'Stator Axial arm dimension','Stator Circumferential arm dimension',' Stator arm Thickness' ,'Rotor Arms', 'Rotor Axial arm dimension','Rotor Circumferential arm dimension',\
'Rotor Arm thickness', ' Rotor Radial deflection', 'Rotor Axial deflection','Rotor circum deflection', 'Stator Radial deflection',' Stator Axial deflection',' Stator Circumferential deflection','Air gap diameter', 'Stator length',\
'l/D ratio', 'Pole pitch', 'Stator slot height','Stator slot width','Slot aspect ratio','Stator tooth width', 'Stator yoke height', 'Rotor yoke height', 'Rotor pole height', 'Rotor pole width', 'Average no load flux density', \
'Peak air gap flux density','Peak stator yoke flux density','Peak rotor yoke flux density','Stator tooth flux density','Rotor pole core flux density','Pole pairs', 'Generator output frequency', 'Generator output phase voltage(rms value)', \
'Generator Output phase current', 'Stator resistance', 'Synchronous inductance','Stator slots','Stator turns','Stator conductor cross-section','Stator Current density ','Specific current loading','Field turns','Conductor cross-section',\
'Field Current','D.C Field resistance','MMF ratio at rated load(Rotor/Stator)','Excitation Power (% of Rated Power)','Number of brushes/polarity','Field Current density','Generator Efficiency', 'Iron mass', 'Copper mass','Mass of Arms','Total Mass','Total Cost'],\
'Values': [opt_problem['machine_rating']/1e6,opt_problem['n_s'],opt_problem['d_s']*1000,opt_problem['b_st']*1000,opt_problem['t_ws']*1000,opt_problem['n_r'],opt_problem['d_r']*1000,opt_problem['b_r']*1000,opt_problem['t_wr']*1000,opt_problem['u_Ar']*1000,\
opt_problem['y_Ar']*1000,opt_problem['z_A_r']*1000,opt_problem['u_As']*1000,opt_problem['y_As']*1000,opt_problem['z_A_s']*1000,2*opt_problem['r_s'],opt_problem['l_s'],opt_problem['K_rad'],opt_problem['tau_p']*1000,opt_problem['h_s']*1000,opt_problem['b_s']*1000,\
opt_problem['Slot_aspect_ratio'],opt_problem['b_t']*1000,opt_problem['h_ys']*1000,opt_problem['h_yr']*1000,opt_problem['h_p']*1000,opt_problem['b_p']*1000,opt_problem['B_gfm'],opt_problem['B_g'],opt_problem['B_symax'],opt_problem['B_rymax'],opt_problem['B_tmax'],\
opt_problem['B_pc'],opt_problem['p'],opt_problem['f'],opt_problem['E_s'],opt_problem['I_s'],opt_problem['R_s'],opt_problem['L_m'],opt_problem['S'],opt_problem['N_s'],opt_problem['A_Cuscalc'],opt_problem['J_s'],opt_problem['A_1']/1000,opt_problem['N_f'],opt_problem['A_Curcalc'],\
opt_problem['I_f'],opt_problem['R_r'],opt_problem['Load_mmf_ratio'],opt_problem['Power_ratio'],opt_problem['n_brushes'],opt_problem['J_f'],opt_problem['gen_eff'],opt_problem['Iron']/1000,opt_problem['Copper']/1000,opt_problem['Structural_mass']/1000,\
opt_problem['Mass']/1000,opt_problem['Costs']/1000],
'Limit': ['','','',opt_problem['b_all_s']*1000,'','','',opt_problem['b_all_r']*1000,'',opt_problem['u_all_r']*1000,opt_problem['y_all']*1000,opt_problem['z_all_r']*1000,opt_problem['u_all_s']*1000,opt_problem['y_all']*1000,opt_problem['z_all_s']*1000,\
'','','(0.2-0.27)','','','','(4-10)','','','','','','(0.62-1.05)','1.2','2','2','2','2','','(10-60)','','','','','','','','(3-6)','<60','','','','','','<2%','','(3-6)',Eta_Target,'','','','',''],
'Units':['MW','unit','mm','mm','mm','unit','mm','mm','mm','mm','mm','mm','mm','mm','mm','m','m','','','mm','mm','mm','mm','mm','mm','mm','mm','T','T','T','T','T','T','-','Hz','V','A','om/phase',\
'p.u','slots','turns','mm^2','A/mm^2','kA/m','turns','mm^2','A','ohm','%','%','brushes','A/mm^2','turns','%','tons','tons','tons','1000$']}
df=pandas.DataFrame(raw_data, columns=['Parameters','Values','Limit','Units'])
print df
df.to_excel('EESG_'+str(opt_problem['machine_rating']/1e6)+'MW_1.7.x.xlsx')
"""
if __name__=="__main__":
# Run an example optimization of EESG generator on cost
EESG_Opt_example()
|
[
"openmdao.api.ExecComp",
"math.tan",
"openmdao.api.IndepVarComp",
"math.sqrt",
"math.cos",
"numpy.array",
"openmdao.drivers.pyoptsparse_driver.pyOptSparseDriver",
"math.sin",
"math.atan"
] |
[((30236, 30255), 'openmdao.drivers.pyoptsparse_driver.pyOptSparseDriver', 'pyOptSparseDriver', ([], {}), '()\n', (30253, 30255), False, 'from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver\n'), ((35050, 35075), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (35058, 35075), True, 'import numpy as np\n'), ((11885, 11906), 'math.sin', 'sin', (['(y_tau_p * pi / 2)'], {}), '(y_tau_p * pi / 2)\n', (11888, 11906), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15751, 15790), 'math.atan', 'atan', (['(om_e * L_qm * self.I_s / self.E_s)'], {}), '(om_e * L_qm * self.I_s / self.E_s)\n', (15755, 15790), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20151, 20166), 'math.sqrt', 'sqrt', (['(I_r / A_r)'], {}), '(I_r / A_r)\n', (20155, 20166), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22265, 22282), 'math.sqrt', 'sqrt', (['(I_st / A_st)'], {}), '(I_st / A_st)\n', (22269, 22282), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((24269, 24294), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (24277, 24294), True, 'import numpy as np\n'), ((24520, 24545), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (24528, 24545), True, 'import numpy as np\n'), ((11948, 11964), 'math.sin', 'sin', (['(pi / 6 / q1)'], {}), '(pi / 6 / q1)\n', (11951, 11964), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((13731, 13738), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (13735, 13738), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15845, 15857), 'math.sin', 'sin', (['delta_m'], {}), '(delta_m)\n', (15848, 15857), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15875, 15887), 'math.cos', 'cos', (['delta_m'], {}), '(delta_m)\n', (15878, 15887), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15939, 15986), 'math.sqrt', 'sqrt', (['(self.E_s ** 2 - (om_e * L_qm * I_sq) ** 2)'], {}), '(self.E_s ** 2 - (om_e * L_qm * I_sq) ** 2)\n', (15943, 15986), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((27278, 27313), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""machine_rating"""', '(0.0)'], {}), "('machine_rating', 0.0)\n", (27290, 27313), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27350, 27381), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""Torque"""'], {'val': '(0.0)'}), "('Torque', val=0.0)\n", (27362, 27381), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27419, 27449), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""n_nom"""'], {'val': '(0.0)'}), "('n_nom', val=0.0)\n", (27431, 27449), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27607, 27649), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""main_shaft_length"""'], {'val': '(0.0)'}), "('main_shaft_length', val=0.0)\n", (27619, 27649), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27687, 27711), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""r_s"""', '(0.0)'], {}), "('r_s', 0.0)\n", (27699, 27711), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27745, 27769), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""l_s"""', '(0.0)'], {}), "('l_s', 0.0)\n", (27757, 27769), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27803, 27827), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_s"""', '(0.0)'], {}), "('h_s', 0.0)\n", (27815, 27827), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27863, 27889), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""tau_p"""', '(0.0)'], {}), "('tau_p', 0.0)\n", (27875, 27889), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27923, 27947), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""I_f"""', '(0.0)'], {}), "('I_f', 0.0)\n", (27935, 27947), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27981, 28005), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""N_f"""', '(0.0)'], {}), "('N_f', 0.0)\n", (27993, 28005), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28044, 28069), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_ys"""', '(0.0)'], {}), "('h_ys', 0.0)\n", (28056, 28069), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28104, 28129), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_yr"""', '(0.0)'], {}), "('h_yr', 0.0)\n", (28116, 28129), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28163, 28187), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""n_s"""', '(0.0)'], {}), "('n_s', 0.0)\n", (28175, 28187), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28222, 28247), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""b_st"""', '(0.0)'], {}), "('b_st', 0.0)\n", (28234, 28247), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28281, 28305), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""n_r"""', '(0.0)'], {}), "('n_r', 0.0)\n", (28293, 28305), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28339, 28363), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""b_r"""', '(0.0)'], {}), "('b_r', 0.0)\n", (28351, 28363), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28397, 28421), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""d_r"""', '(0.0)'], {}), "('d_r', 0.0)\n", (28409, 28421), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28455, 28479), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""d_s"""', '(0.0)'], {}), "('d_s', 0.0)\n", (28467, 28479), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28514, 28539), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""t_wr"""', '(0.0)'], {}), "('t_wr', 0.0)\n", (28526, 28539), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28574, 28599), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""t_ws"""', '(0.0)'], {}), "('t_ws', 0.0)\n", (28586, 28599), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28633, 28657), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""R_o"""', '(0.0)'], {}), "('R_o', 0.0)\n", (28645, 28657), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28699, 28727), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_Fes"""', '(0.0)'], {}), "('rho_Fes', 0.0)\n", (28711, 28727), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28764, 28791), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_Fe"""', '(0.0)'], {}), "('rho_Fe', 0.0)\n", (28776, 28791), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28832, 28863), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_Copper"""', '(0.0)'], {}), "('rho_Copper', 0.0)\n", (28844, 28863), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29005, 29038), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_uAs =u_all_s-u_As"""'], {}), "('con_uAs =u_all_s-u_As')\n", (29013, 29038), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29078, 29112), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_zAs =z_all_s-z_A_s"""'], {}), "('con_zAs =z_all_s-z_A_s')\n", (29086, 29112), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29152, 29183), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_yAs =y_all-y_As"""'], {}), "('con_yAs =y_all-y_As')\n", (29160, 29183), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29223, 29256), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_bst =b_all_s-b_st"""'], {}), "('con_bst =b_all_s-b_st')\n", (29231, 29256), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29296, 29329), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_uAr =u_all_r-u_Ar"""'], {}), "('con_uAr =u_all_r-u_Ar')\n", (29304, 29329), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29369, 29403), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_zAr =z_all_r-z_A_r"""'], {}), "('con_zAr =z_all_r-z_A_r')\n", (29377, 29403), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29443, 29474), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_yAr =y_all-y_Ar"""'], {}), "('con_yAr =y_all-y_Ar')\n", (29451, 29474), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29513, 29544), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_br =b_all_r-b_r"""'], {}), "('con_br =b_all_r-b_r')\n", (29521, 29544), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29584, 29612), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_TC2 =TC2-TC1"""'], {}), "('con_TC2 =TC2-TC1')\n", (29592, 29612), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29652, 29680), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_TC3 =TC3-TC1"""'], {}), "('con_TC3 =TC3-TC1')\n", (29660, 29680), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29801, 29830), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_Cu"""'], {'val': '(0.0)'}), "('C_Cu', val=0.0)\n", (29813, 29830), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29865, 29894), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_Fe"""'], {'val': '(0.0)'}), "('C_Fe', val=0.0)\n", (29877, 29894), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((29930, 29960), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_Fes"""'], {'val': '(0.0)'}), "('C_Fes', val=0.0)\n", (29942, 29960), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((6362, 6387), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6370, 6387), True, 'import numpy as np\n'), ((6510, 6535), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6518, 6535), True, 'import numpy as np\n'), ((6652, 6677), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6660, 6677), True, 'import numpy as np\n'), ((11935, 11946), 'math.sin', 'sin', (['(pi / 6)'], {}), '(pi / 6)\n', (11938, 11946), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((13358, 13395), 'math.sin', 'sin', (['(0.5 * self.b_p * pi / self.tau_p)'], {}), '(0.5 * self.b_p * pi / self.tau_p)\n', (13361, 13395), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((23259, 23267), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (23262, 23267), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((12644, 12651), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (12648, 12651), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((14243, 14255), 'math.cos', 'cos', (['delta_m'], {}), '(delta_m)\n', (14246, 14255), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15397, 15436), 'math.sin', 'sin', (['(0.5 * (self.b_p / self.tau_p) * pi)'], {}), '(0.5 * (self.b_p / self.tau_p) * pi)\n', (15400, 15436), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15544, 15575), 'math.sin', 'sin', (['(pi * self.b_p / self.tau_p)'], {}), '(pi * self.b_p / self.tau_p)\n', (15547, 15575), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((15656, 15691), 'math.cos', 'cos', (['(self.b_p * pi / 2 * self.tau_p)'], {}), '(self.b_p * pi / 2 * self.tau_p)\n', (15659, 15691), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20629, 20641), 'math.tan', 'tan', (['theta_r'], {}), '(theta_r)\n', (20632, 20641), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20929, 20937), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (20932, 20937), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((21082, 21090), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (21085, 21090), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22769, 22781), 'math.tan', 'tan', (['theta_s'], {}), '(theta_s)\n', (22772, 22781), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((23515, 23523), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (23518, 23523), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((27532, 27557), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (27540, 27557), True, 'import numpy as np\n'), ((15619, 15650), 'math.sin', 'sin', (['(pi * self.b_p / self.tau_p)'], {}), '(pi * self.b_p / self.tau_p)\n', (15622, 15650), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20562, 20574), 'math.sin', 'sin', (['theta_r'], {}), '(theta_r)\n', (20565, 20574), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20609, 20621), 'math.sin', 'sin', (['theta_r'], {}), '(theta_r)\n', (20612, 20621), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22701, 22713), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (22704, 22713), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22749, 22761), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (22752, 22761), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((23428, 23436), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (23431, 23436), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20539, 20551), 'math.sin', 'sin', (['theta_r'], {}), '(theta_r)\n', (20542, 20551), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22678, 22690), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (22681, 22690), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20501, 20513), 'math.sin', 'sin', (['theta_r'], {}), '(theta_r)\n', (20504, 20513), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22640, 22652), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (22643, 22652), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((20523, 20535), 'math.cos', 'cos', (['theta_r'], {}), '(theta_r)\n', (20526, 20535), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n'), ((22662, 22674), 'math.cos', 'cos', (['theta_s'], {}), '(theta_s)\n', (22665, 22674), False, 'from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan\n')]
|
import copy
import logging
import os
from typing import Dict, List, Tuple
import checksumdir
import imageio
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from ..adapter import download_object
logger = logging.getLogger("fastface.dataset")
class _IdentitiyTransforms:
"""Dummy tranforms"""
def __call__(self, img: np.ndarray, targets: Dict) -> Tuple:
return img, targets
def default_collate_fn(batch):
batch, targets = zip(*batch)
batch = np.stack(batch, axis=0).astype(np.float32)
batch = torch.from_numpy(batch).permute(0, 3, 1, 2).contiguous()
for i, target in enumerate(targets):
for k, v in target.items():
if isinstance(v, np.ndarray):
targets[i][k] = torch.from_numpy(v)
return batch, targets
class BaseDataset(Dataset):
def __init__(self, ids: List[str], targets: List[Dict], transforms=None, **kwargs):
super().__init__()
assert isinstance(ids, list), "given `ids` must be list"
assert isinstance(targets, list), "given `targets must be list"
assert len(ids) == len(targets), "lenght of both lists must be equal"
self.ids = ids
self.targets = targets
self.transforms = _IdentitiyTransforms() if transforms is None else transforms
# set given kwargs to the dataset
for key, value in kwargs.items():
if hasattr(self, key):
# log warning
continue
setattr(self, key, value)
def __getitem__(self, idx: int) -> Tuple:
img = self._load_image(self.ids[idx])
targets = copy.deepcopy(self.targets[idx])
# apply transforms
img, targets = self.transforms(img, targets)
# clip boxes
targets["target_boxes"] = self._clip_boxes(
targets["target_boxes"], img.shape[:2]
)
# discard zero sized boxes
targets["target_boxes"] = self._discard_zero_size_boxes(targets["target_boxes"])
return (img, targets)
def __len__(self) -> int:
return len(self.ids)
@staticmethod
def _clip_boxes(boxes: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:
# TODO pydoc
height, width = shape
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(min=0, max=width - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(min=0, max=height - 1)
return boxes
@staticmethod
def _discard_zero_size_boxes(boxes: np.ndarray) -> np.ndarray:
# TODO pydoc
scale = (boxes[:, [2, 3]] - boxes[:, [0, 1]]).min(axis=1)
return boxes[scale > 0]
@staticmethod
def _load_image(img_file_path: str):
"""loads rgb image using given file path
Args:
img_path (str): image file path to load
Returns:
np.ndarray: rgb image as np.ndarray
"""
img = imageio.imread(img_file_path)
if not img.flags["C_CONTIGUOUS"]:
# if img is not contiguous than fix it
img = np.ascontiguousarray(img, dtype=img.dtype)
if len(img.shape) == 4:
# found RGBA, converting to => RGB
img = img[:, :, :3]
elif len(img.shape) == 2:
# found GRAYSCALE, converting to => RGB
img = np.stack([img, img, img], axis=-1)
return np.array(img, dtype=np.uint8)
def get_dataloader(
self,
batch_size: int = 1,
shuffle: bool = False,
num_workers: int = 0,
collate_fn=default_collate_fn,
pin_memory: bool = False,
**kwargs
):
return DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory,
**kwargs
)
def get_mean_std(self) -> Dict:
# TODO pydoc
mean_sum, mean_sq_sum = np.zeros(3), np.zeros(3)
for img, _ in tqdm(
self, total=len(self), desc="calculating mean and std for the dataset"
):
d = img.astype(np.float32) / 255
mean_sum[0] += np.mean(d[:, :, 0])
mean_sum[1] += np.mean(d[:, :, 1])
mean_sum[2] += np.mean(d[:, :, 2])
mean_sq_sum[0] += np.mean(d[:, :, 0] ** 2)
mean_sq_sum[1] += np.mean(d[:, :, 1] ** 2)
mean_sq_sum[2] += np.mean(d[:, :, 2] ** 2)
mean = mean_sum / len(self)
std = (mean_sq_sum / len(self) - mean ** 2) ** 0.5
return {"mean": mean.tolist(), "std": std.tolist()}
def get_normalized_boxes(self) -> np.ndarray:
# TODO pydoc
normalized_boxes = []
for img, targets in tqdm(
self, total=len(self), desc="computing normalized target boxes"
):
if targets["target_boxes"].shape[0] == 0:
continue
max_size = max(img.shape)
normalized_boxes.append(targets["target_boxes"] / max_size)
return np.concatenate(normalized_boxes, axis=0)
def get_box_scale_histogram(self) -> Tuple[np.ndarray, np.ndarray]:
bins = map(lambda x: 2 ** x, range(10))
total_boxes = []
for _, targets in tqdm(self, total=len(self), desc="getting box sizes"):
if targets["target_boxes"].shape[0] == 0:
continue
total_boxes.append(targets["target_boxes"])
total_boxes = np.concatenate(total_boxes, axis=0)
areas = (total_boxes[:, 2] - total_boxes[:, 0]) * (
total_boxes[:, 3] - total_boxes[:, 1]
)
return np.histogram(np.sqrt(areas), bins=list(bins))
def download(self, urls: List, target_dir: str):
for k, v in urls.items():
keys = list(v["check"].items())
checked_keys = []
for key, md5hash in keys:
target_sub_dir = os.path.join(target_dir, key)
if not os.path.exists(target_sub_dir):
checked_keys.append(False)
else:
checked_keys.append(
checksumdir.dirhash(target_sub_dir, hashfunc="md5") == md5hash
)
if sum(checked_keys) == len(keys):
logger.debug("found {} at {}".format(k, target_dir))
continue
# download
adapter = v.get("adapter")
kwargs = v.get("kwargs", {})
logger.warning(
"{} not found in the {}, downloading...".format(k, target_dir)
)
download_object(adapter, dest_path=target_dir, **kwargs)
|
[
"logging.getLogger",
"numpy.mean",
"os.path.exists",
"copy.deepcopy",
"numpy.sqrt",
"os.path.join",
"torch.from_numpy",
"numpy.ascontiguousarray",
"numpy.array",
"numpy.stack",
"numpy.zeros",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"imageio.imread",
"checksumdir.dirhash"
] |
[((261, 298), 'logging.getLogger', 'logging.getLogger', (['"""fastface.dataset"""'], {}), "('fastface.dataset')\n", (278, 298), False, 'import logging\n'), ((1663, 1695), 'copy.deepcopy', 'copy.deepcopy', (['self.targets[idx]'], {}), '(self.targets[idx])\n', (1676, 1695), False, 'import copy\n'), ((2914, 2943), 'imageio.imread', 'imageio.imread', (['img_file_path'], {}), '(img_file_path)\n', (2928, 2943), False, 'import imageio\n'), ((3365, 3394), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (3373, 3394), True, 'import numpy as np\n'), ((3637, 3779), 'torch.utils.data.DataLoader', 'DataLoader', (['self'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn', 'pin_memory': 'pin_memory'}), '(self, batch_size=batch_size, shuffle=shuffle, num_workers=\n num_workers, collate_fn=collate_fn, pin_memory=pin_memory, **kwargs)\n', (3647, 3779), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((5044, 5084), 'numpy.concatenate', 'np.concatenate', (['normalized_boxes'], {'axis': '(0)'}), '(normalized_boxes, axis=0)\n', (5058, 5084), True, 'import numpy as np\n'), ((5470, 5505), 'numpy.concatenate', 'np.concatenate', (['total_boxes'], {'axis': '(0)'}), '(total_boxes, axis=0)\n', (5484, 5505), True, 'import numpy as np\n'), ((527, 550), 'numpy.stack', 'np.stack', (['batch'], {'axis': '(0)'}), '(batch, axis=0)\n', (535, 550), True, 'import numpy as np\n'), ((3055, 3097), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'img.dtype'}), '(img, dtype=img.dtype)\n', (3075, 3097), True, 'import numpy as np\n'), ((3959, 3970), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3967, 3970), True, 'import numpy as np\n'), ((3972, 3983), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3980, 3983), True, 'import numpy as np\n'), ((4179, 4198), 'numpy.mean', 'np.mean', (['d[:, :, 0]'], {}), '(d[:, :, 0])\n', (4186, 4198), True, 'import numpy as np\n'), ((4226, 4245), 'numpy.mean', 'np.mean', (['d[:, :, 1]'], {}), '(d[:, :, 1])\n', (4233, 4245), True, 'import numpy as np\n'), ((4273, 4292), 'numpy.mean', 'np.mean', (['d[:, :, 2]'], {}), '(d[:, :, 2])\n', (4280, 4292), True, 'import numpy as np\n'), ((4324, 4348), 'numpy.mean', 'np.mean', (['(d[:, :, 0] ** 2)'], {}), '(d[:, :, 0] ** 2)\n', (4331, 4348), True, 'import numpy as np\n'), ((4379, 4403), 'numpy.mean', 'np.mean', (['(d[:, :, 1] ** 2)'], {}), '(d[:, :, 1] ** 2)\n', (4386, 4403), True, 'import numpy as np\n'), ((4434, 4458), 'numpy.mean', 'np.mean', (['(d[:, :, 2] ** 2)'], {}), '(d[:, :, 2] ** 2)\n', (4441, 4458), True, 'import numpy as np\n'), ((5655, 5669), 'numpy.sqrt', 'np.sqrt', (['areas'], {}), '(areas)\n', (5662, 5669), True, 'import numpy as np\n'), ((790, 809), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (806, 809), False, 'import torch\n'), ((3314, 3348), 'numpy.stack', 'np.stack', (['[img, img, img]'], {'axis': '(-1)'}), '([img, img, img], axis=-1)\n', (3322, 3348), True, 'import numpy as np\n'), ((5923, 5952), 'os.path.join', 'os.path.join', (['target_dir', 'key'], {}), '(target_dir, key)\n', (5935, 5952), False, 'import os\n'), ((582, 605), 'torch.from_numpy', 'torch.from_numpy', (['batch'], {}), '(batch)\n', (598, 605), False, 'import torch\n'), ((5976, 6006), 'os.path.exists', 'os.path.exists', (['target_sub_dir'], {}), '(target_sub_dir)\n', (5990, 6006), False, 'import os\n'), ((6142, 6193), 'checksumdir.dirhash', 'checksumdir.dirhash', (['target_sub_dir'], {'hashfunc': '"""md5"""'}), "(target_sub_dir, hashfunc='md5')\n", (6161, 6193), False, 'import checksumdir\n')]
|
import torch.utils.data as data
import os
import os.path
import numpy as np
from numpy.random import randint
import torch
from colorama import init
from colorama import Fore, Back, Style
import random
from os import listdir
from os.path import join, splitext
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from PIL import Image, ImageFilter, ImageFile
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
init(autoreset=True)
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file, num_dataload,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.t7', transform=None,
force_grayscale=False, random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.num_dataload = num_dataload
if self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list() # read all the video files
def _load_feature(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
feat_path = os.path.join(directory, self.image_tmpl.format(idx))
try:
feat = [torch.load(feat_path)]
except:
print(Back.RED + feat_path)
return feat
elif self.modality == 'Flow':
x_feat = torch.load(os.path.join(directory, self.image_tmpl.format('x', idx)))
y_feat = torch.load(os.path.join(directory, self.image_tmpl.format('y', idx)))
return [x_feat, y_feat]
def _parse_list(self):
self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]
# repeat the list if the length is less than num_dataload (especially for target data)
n_repeat = self.num_dataload//len(self.video_list)
n_left = self.num_dataload%len(self.video_list)
self.video_list = self.video_list*n_repeat + self.video_list[:n_left]
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
#np.random.seed(1)
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_test_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = np.ones(self.num_segments-num_select,dtype=int)*id_select[id_select[0]-1]
offsets = np.append(id_select, id_expand)
return offsets + 1
def __getitem__(self, index):
record = self.video_list[index]
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
frames = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_feats = self._load_feature(record.path, p)
frames.extend(seg_feats)
if p < record.num_frames:
p += 1
# process_data = self.transform(frames)
process_data = torch.stack(frames)
return process_data, record.label
def __len__(self):
return len(self.video_list)
class VideoDataset(data.Dataset):
def __init__(
self,
folder,
n_frames,
frame_size=224,
separator="_"
):
self.folder = folder
self.num_segments = n_frames
self.frame_size = frame_size
self.data_transform = transforms.Compose(
[
transforms.Resize(self.frame_size),
transforms.CenterCrop(self.frame_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
self.separator = separator
self.classes = [c for c in sorted(listdir(folder))]
self.videos_with_classes = []
for c_index, c in enumerate(self.classes):
c_path = join(self.folder, c)
videos = listdir(c_path)
for v in videos:
v_path = join(c_path, v)
num_frames = len(listdir(v_path))
if num_frames >= self.num_segments:
pair = (v_path, c_index)
self.videos_with_classes.append(pair)
def _get_test_indices(self, num_frames):
num_min = self.num_segments
num_select = num_frames
if num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array(
[int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]
) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = (
np.ones(self.num_segments - num_select, dtype=int)
* id_select[id_select[0] - 1]
)
offsets = np.append(id_select, id_expand)
return offsets
def __getitem__(self, index):
video, label = self.videos_with_classes[index]
frames_temp = sorted(
listdir(video),
key=lambda path: int(path.split(self.separator)[-1].split(".")[0]),
)
frames = [f for f in frames_temp if f.endswith('jpg') or f.endswith('jpeg')]
num_frames = len(frames)
data = []
segment_indices = self._get_test_indices(num_frames)
for index in segment_indices:
frame = frames[index]
frame_path = join(video, frame)
frame_img = Image.open(frame_path)
frame_feat = self.data_transform(frame_img)
data.append(frame_feat)
tensor = torch.stack(data)
return tensor, label
def __len__(self):
return len(self.videos_with_classes)
|
[
"torchvision.transforms.CenterCrop",
"torch.utils.data.append",
"os.listdir",
"PIL.Image.open",
"numpy.ones",
"torch.load",
"torch.stack",
"os.path.join",
"numpy.append",
"numpy.zeros",
"numpy.random.randint",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"colorama.init"
] |
[((505, 525), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (509, 525), False, 'from colorama import init\n'), ((5481, 5500), 'torch.stack', 'torch.stack', (['frames'], {}), '(frames)\n', (5492, 5500), False, 'import torch\n'), ((8333, 8350), 'torch.stack', 'torch.stack', (['data'], {}), '(data)\n', (8344, 8350), False, 'import torch\n'), ((3855, 3885), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (3863, 3885), True, 'import numpy as np\n'), ((4659, 4690), 'numpy.append', 'np.append', (['id_select', 'id_expand'], {}), '(id_select, id_expand)\n', (4668, 4690), True, 'import numpy as np\n'), ((6438, 6458), 'os.path.join', 'join', (['self.folder', 'c'], {}), '(self.folder, c)\n', (6442, 6458), False, 'from os.path import join, splitext\n'), ((6480, 6495), 'os.listdir', 'listdir', (['c_path'], {}), '(c_path)\n', (6487, 6495), False, 'from os import listdir\n'), ((7567, 7598), 'numpy.append', 'np.append', (['id_select', 'id_expand'], {}), '(id_select, id_expand)\n', (7576, 7598), True, 'import numpy as np\n'), ((7756, 7770), 'os.listdir', 'listdir', (['video'], {}), '(video)\n', (7763, 7770), False, 'from os import listdir\n'), ((8158, 8176), 'os.path.join', 'join', (['video', 'frame'], {}), '(video, frame)\n', (8162, 8176), False, 'from os.path import join, splitext\n'), ((8201, 8223), 'PIL.Image.open', 'Image.open', (['frame_path'], {}), '(frame_path)\n', (8211, 8223), False, 'from PIL import Image, ImageFilter, ImageFile\n'), ((8292, 8315), 'torch.utils.data.append', 'data.append', (['frame_feat'], {}), '(frame_feat)\n', (8303, 8315), True, 'import torch.utils.data as data\n'), ((3151, 3200), 'numpy.random.randint', 'randint', (['average_duration'], {'size': 'self.num_segments'}), '(average_duration, size=self.num_segments)\n', (3158, 3200), False, 'from numpy.random import randint\n'), ((3393, 3423), 'numpy.zeros', 'np.zeros', (['(self.num_segments,)'], {}), '((self.num_segments,))\n', (3401, 3423), True, 'import numpy as np\n'), ((4563, 4613), 'numpy.ones', 'np.ones', (['(self.num_segments - num_select)'], {'dtype': 'int'}), '(self.num_segments - num_select, dtype=int)\n', (4570, 4613), True, 'import numpy as np\n'), ((5945, 5979), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.frame_size'], {}), '(self.frame_size)\n', (5962, 5979), False, 'from torchvision import transforms\n'), ((5997, 6035), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['self.frame_size'], {}), '(self.frame_size)\n', (6018, 6035), False, 'from torchvision import transforms\n'), ((6053, 6074), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6072, 6074), False, 'from torchvision import transforms\n'), ((6092, 6167), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (6112, 6167), False, 'from torchvision import transforms\n'), ((6550, 6565), 'os.path.join', 'join', (['c_path', 'v'], {}), '(c_path, v)\n', (6554, 6565), False, 'from os.path import join, splitext\n'), ((7434, 7484), 'numpy.ones', 'np.ones', (['(self.num_segments - num_select)'], {'dtype': 'int'}), '(self.num_segments - num_select, dtype=int)\n', (7441, 7484), True, 'import numpy as np\n'), ((2015, 2036), 'torch.load', 'torch.load', (['feat_path'], {}), '(feat_path)\n', (2025, 2036), False, 'import torch\n'), ((3283, 3355), 'numpy.random.randint', 'randint', (['(record.num_frames - self.new_length + 1)'], {'size': 'self.num_segments'}), '(record.num_frames - self.new_length + 1, size=self.num_segments)\n', (3290, 3355), False, 'from numpy.random import randint\n'), ((6309, 6324), 'os.listdir', 'listdir', (['folder'], {}), '(folder)\n', (6316, 6324), False, 'from os import listdir\n'), ((6599, 6614), 'os.listdir', 'listdir', (['v_path'], {}), '(v_path)\n', (6606, 6614), False, 'from os import listdir\n')]
|
"""
author: @nimrobotics
description: calculates the effective connectivity between regions and plots them
"""
import numpy as np
import scipy.io
import glob
import sys
sys.path.append('../utils')
from plots import plotData
dir = "./process3/" #directory of the data
outdir = 'process3/' #directory to save the plots
regions = 3 #number of regions
files = glob.glob(dir+'/*_.mat') # get all the files in the directory
for file in files:
print('Processing condition: ', file)
data = scipy.io.loadmat(file) #load data from the directory
fval = data['fval'] #fval
pval = data['pval'] #pval
sig = data['sig'] #sig
cd = data['cd'] #cd
print('fval shape: ',fval.shape)
print('\nfval \n',fval)
print('pval shape: ',pval.shape)
print('sig shape: ',sig.shape)
print('\nsig \n',sig)
print(cd.shape)
# elementwise multiplication of fval and sig(0/1)
fval_sig = np.multiply(fval, sig)
print(fval_sig.shape)
print('\nfval_sig \n',fval_sig)
# fval_sig = np.mean(fval_sig, axis=2) # average over files
# print(fval_sig.shape)
# fval = np.mean(fval, axis=2)
labels = ['PFC', 'PM-MC', 'VC'] #labels for the regions
condition = file.split('/')[-1].split('.')[0] #get the condition name
plot = plotData(fval_sig, labels, outdir, colormap='viridis', dpi=300, title='EC: '+condition, filename='EC_'+condition +'.png')
plot.matrixPlot()
plot.circularPlot()
|
[
"numpy.multiply",
"plots.plotData",
"sys.path.append",
"glob.glob"
] |
[((170, 197), 'sys.path.append', 'sys.path.append', (['"""../utils"""'], {}), "('../utils')\n", (185, 197), False, 'import sys\n'), ((358, 384), 'glob.glob', 'glob.glob', (["(dir + '/*_.mat')"], {}), "(dir + '/*_.mat')\n", (367, 384), False, 'import glob\n'), ((909, 931), 'numpy.multiply', 'np.multiply', (['fval', 'sig'], {}), '(fval, sig)\n', (920, 931), True, 'import numpy as np\n'), ((1268, 1399), 'plots.plotData', 'plotData', (['fval_sig', 'labels', 'outdir'], {'colormap': '"""viridis"""', 'dpi': '(300)', 'title': "('EC: ' + condition)", 'filename': "('EC_' + condition + '.png')"}), "(fval_sig, labels, outdir, colormap='viridis', dpi=300, title=\n 'EC: ' + condition, filename='EC_' + condition + '.png')\n", (1276, 1399), False, 'from plots import plotData\n')]
|
from __future__ import with_statement
from nose.tools import assert_true
from os.path import exists
import numpy as np
from nibabel import Nifti1Image
from numpy.testing import assert_equal
from ...utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset
from ..bsa_io import make_bsa_image
from nibabel.tmpdirs import InTemporaryDirectory
def test_parcel_intra_from_3d_images_list():
"""Test that a parcellation is generated, starting from a list of 3D images
"""
# Generate an image
shape = (5, 5, 5)
contrast_id = 'plop'
mask_image = Nifti1Image(np.ones(shape), np.eye(4))
#mask_images = [mask_image for _ in range(5)]
with InTemporaryDirectory() as dir_context:
data_image = ['image_%d.nii' % i for i in range(5)]
for datim in data_image:
surrogate_3d_dataset(mask=mask_image, out_image_file=datim)
#run the algo
landmark, hrois = make_bsa_image(
mask_image, data_image, threshold=10., smin=0, sigma=1.,
prevalence_threshold=0, prevalence_pval=0.5, write_dir=dir_context,
algorithm='density', contrast_id=contrast_id)
assert_equal(landmark, None)
assert_equal(len(hrois), 5)
assert_true(exists('density_%s.nii' % contrast_id))
assert_true(exists('prevalence_%s.nii' % contrast_id))
assert_true(exists('AR_%s.nii' % contrast_id))
assert_true(exists('CR_%s.nii' % contrast_id))
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
[
"os.path.exists",
"numpy.eye",
"numpy.ones",
"numpy.testing.assert_equal",
"nibabel.tmpdirs.InTemporaryDirectory",
"nose.run"
] |
[((1505, 1534), 'nose.run', 'nose.run', ([], {'argv': "['', __file__]"}), "(argv=['', __file__])\n", (1513, 1534), False, 'import nose\n'), ((584, 598), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (591, 598), True, 'import numpy as np\n'), ((600, 609), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (606, 609), True, 'import numpy as np\n'), ((671, 693), 'nibabel.tmpdirs.InTemporaryDirectory', 'InTemporaryDirectory', ([], {}), '()\n', (691, 693), False, 'from nibabel.tmpdirs import InTemporaryDirectory\n'), ((1156, 1184), 'numpy.testing.assert_equal', 'assert_equal', (['landmark', 'None'], {}), '(landmark, None)\n', (1168, 1184), False, 'from numpy.testing import assert_equal\n'), ((1241, 1279), 'os.path.exists', 'exists', (["('density_%s.nii' % contrast_id)"], {}), "('density_%s.nii' % contrast_id)\n", (1247, 1279), False, 'from os.path import exists\n'), ((1301, 1342), 'os.path.exists', 'exists', (["('prevalence_%s.nii' % contrast_id)"], {}), "('prevalence_%s.nii' % contrast_id)\n", (1307, 1342), False, 'from os.path import exists\n'), ((1364, 1397), 'os.path.exists', 'exists', (["('AR_%s.nii' % contrast_id)"], {}), "('AR_%s.nii' % contrast_id)\n", (1370, 1397), False, 'from os.path import exists\n'), ((1419, 1452), 'os.path.exists', 'exists', (["('CR_%s.nii' % contrast_id)"], {}), "('CR_%s.nii' % contrast_id)\n", (1425, 1452), False, 'from os.path import exists\n')]
|
import torch
from torch import nn
from transformers import BertTokenizer, VisualBertModel, VisualBertConfig
import numpy as np
class VisualBertClassifier(nn.Module):
def __init__(self,
visual_bert_model,
num_classes: int = 8,
initial_visual_embedding_dim: int = 96,
final_dropout_rate: float = 0.1):
"""
pooler_output (torch.FloatTensor of shape (batch_size, hidden_size))
— Last layer hidden-state of the first token of the sequence (classification token)
after further processing through the layers used for the auxiliary pretraining task.
E.g. for BERT-family of models, this returns the classification token after processing through
a linear layer and a tanh activation function.
The linear layer weights are trained from the next sentence prediction (classification) objective
during pretraining.
@param initial_visual_embedding_dim:
"""
super().__init__()
self.visual_embedding_projection = nn.Linear(initial_visual_embedding_dim, 2048)
self.visual_bert = visual_bert_model
self.final_dropout = nn.Dropout(final_dropout_rate)
self.out = nn.Linear(768, num_classes)
def forward(self,
text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask
):
visual_embeds = self.visual_embedding_projection(visual_embeds)
output = self.visual_bert(input_ids=text_input_ids,
token_type_ids=text_token_type_ids,
attention_mask=text_attention_mask,
visual_embeds=visual_embeds,
visual_token_type_ids=visual_token_type_ids,
visual_attention_mask=visual_attention_mask)
output = self.final_dropout(output.pooler_output)
output = self.out(output)
return output
if __name__ == '__main__':
bert_text_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
inputs = bert_text_tokenizer("What is the man eating?", return_tensors="pt")
text_input_ids = inputs.data['input_ids'].to('cuda')
text_token_type_ids = inputs.data['token_type_ids'].to('cuda')
text_attention_mask = inputs.data['attention_mask'].to('cuda')
sample_face_body_embedding_path = "/home/gsoykan20/Desktop/self_development/emotion-recognition-drawings/data/emoreccom_face_body_embeddings_96d/train/0_3_4.jpg.npy"
sample_face_body_embedding = np.load(sample_face_body_embedding_path)
visual_embeds = torch.from_numpy(sample_face_body_embedding)
visual_embeds = visual_embeds.to('cuda')
visual_embeds = torch.unsqueeze(visual_embeds, 0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long).to('cuda')
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float).to('cuda')
classifier = VisualBertClassifier()
classifier.to('cuda')
classifier.forward(text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask)
|
[
"torch.nn.Dropout",
"torch.unsqueeze",
"transformers.BertTokenizer.from_pretrained",
"torch.from_numpy",
"torch.nn.Linear",
"numpy.load",
"torch.ones"
] |
[((2161, 2211), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (2190, 2211), False, 'from transformers import BertTokenizer, VisualBertModel, VisualBertConfig\n'), ((2689, 2729), 'numpy.load', 'np.load', (['sample_face_body_embedding_path'], {}), '(sample_face_body_embedding_path)\n', (2696, 2729), True, 'import numpy as np\n'), ((2750, 2794), 'torch.from_numpy', 'torch.from_numpy', (['sample_face_body_embedding'], {}), '(sample_face_body_embedding)\n', (2766, 2794), False, 'import torch\n'), ((2860, 2893), 'torch.unsqueeze', 'torch.unsqueeze', (['visual_embeds', '(0)'], {}), '(visual_embeds, 0)\n', (2875, 2893), False, 'import torch\n'), ((1070, 1115), 'torch.nn.Linear', 'nn.Linear', (['initial_visual_embedding_dim', '(2048)'], {}), '(initial_visual_embedding_dim, 2048)\n', (1079, 1115), False, 'from torch import nn\n'), ((1190, 1220), 'torch.nn.Dropout', 'nn.Dropout', (['final_dropout_rate'], {}), '(final_dropout_rate)\n', (1200, 1220), False, 'from torch import nn\n'), ((1240, 1267), 'torch.nn.Linear', 'nn.Linear', (['(768)', 'num_classes'], {}), '(768, num_classes)\n', (1249, 1267), False, 'from torch import nn\n'), ((2922, 2976), 'torch.ones', 'torch.ones', (['visual_embeds.shape[:-1]'], {'dtype': 'torch.long'}), '(visual_embeds.shape[:-1], dtype=torch.long)\n', (2932, 2976), False, 'import torch\n'), ((3016, 3071), 'torch.ones', 'torch.ones', (['visual_embeds.shape[:-1]'], {'dtype': 'torch.float'}), '(visual_embeds.shape[:-1], dtype=torch.float)\n', (3026, 3071), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: <NAME>. Created on Mon Jul 10 20:12:27 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
# -*- coding: utf-8 -*-
"""
(SHORT NAME EXPLANATION)
>>>DOCTEST COMMANDS
(THE TEST ANSWER)
@author: <NAME> (张仪). Created on Thu Jul 6 16:00:33 2017
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
#SUMMARY----------------
#INPUTS-----------------
#ESSENTIAL:
#OPTIONAL:
#OUTPUTS----------------
#EXAMPLES---------------
#NOTES------------------
"""
from function_space import FunctionSpace
import numpy as np
from mesh import CrazyMesh
from forms import Form
from hodge import hodge
from coboundaries import d
from assemble import assemble
from _assembling import assemble_, integral1d_
import matplotlib.pyplot as plt
from quadrature import extended_gauss_quad
from scipy.integrate import quad
from sympy import Matrix
import scipy.io
from scipy import sparse
import scipy as sp
from inner_product import inner
# %% exact solution define
# u^{(1)} = { u, v }^T
def u(x,y):
return +np.cos(np.pi*x) * np.sin(np.pi*y)
def v(x,y):
return -np.sin(np.pi*x) * np.cos(np.pi*y)
def r_u(x,y):
return -2* np.pi**2 * np.cos(np.pi*x) * np.sin(np.pi*y)
def r_v(x,y):
return 2* np.pi**2 * np.sin(np.pi*x) * np.cos(np.pi*y)
# %% define the mesh
mesh = CrazyMesh( 2, (2, 2), ((-1, 1), (-1, 1)), 0.05 )
func_space_gauss1 = FunctionSpace(mesh, '1-gauss', (5, 5), is_inner=False)
func_space_lobatto1 = FunctionSpace(mesh, '1-lobatto', (5, 5), is_inner=False)
form_1_gauss = Form(func_space_gauss1)
form_1_lobatto = Form(func_space_lobatto1)
M = inner(form_1_lobatto.basis,form_1_gauss.basis)
|
[
"inner_product.inner",
"forms.Form",
"function_space.FunctionSpace",
"numpy.cos",
"numpy.sin",
"mesh.CrazyMesh"
] |
[((1661, 1707), 'mesh.CrazyMesh', 'CrazyMesh', (['(2)', '(2, 2)', '((-1, 1), (-1, 1))', '(0.05)'], {}), '(2, (2, 2), ((-1, 1), (-1, 1)), 0.05)\n', (1670, 1707), False, 'from mesh import CrazyMesh\n'), ((1732, 1786), 'function_space.FunctionSpace', 'FunctionSpace', (['mesh', '"""1-gauss"""', '(5, 5)'], {'is_inner': '(False)'}), "(mesh, '1-gauss', (5, 5), is_inner=False)\n", (1745, 1786), False, 'from function_space import FunctionSpace\n'), ((1809, 1865), 'function_space.FunctionSpace', 'FunctionSpace', (['mesh', '"""1-lobatto"""', '(5, 5)'], {'is_inner': '(False)'}), "(mesh, '1-lobatto', (5, 5), is_inner=False)\n", (1822, 1865), False, 'from function_space import FunctionSpace\n'), ((1884, 1907), 'forms.Form', 'Form', (['func_space_gauss1'], {}), '(func_space_gauss1)\n', (1888, 1907), False, 'from forms import Form\n'), ((1925, 1950), 'forms.Form', 'Form', (['func_space_lobatto1'], {}), '(func_space_lobatto1)\n', (1929, 1950), False, 'from forms import Form\n'), ((1956, 2003), 'inner_product.inner', 'inner', (['form_1_lobatto.basis', 'form_1_gauss.basis'], {}), '(form_1_lobatto.basis, form_1_gauss.basis)\n', (1961, 2003), False, 'from inner_product import inner\n'), ((1404, 1421), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (1410, 1421), True, 'import numpy as np\n'), ((1462, 1479), 'numpy.cos', 'np.cos', (['(np.pi * y)'], {}), '(np.pi * y)\n', (1468, 1479), True, 'import numpy as np\n'), ((1539, 1556), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (1545, 1556), True, 'import numpy as np\n'), ((1616, 1633), 'numpy.cos', 'np.cos', (['(np.pi * y)'], {}), '(np.pi * y)\n', (1622, 1633), True, 'import numpy as np\n'), ((1386, 1403), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1392, 1403), True, 'import numpy as np\n'), ((1444, 1461), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1450, 1461), True, 'import numpy as np\n'), ((1521, 1538), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1527, 1538), True, 'import numpy as np\n'), ((1598, 1615), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1604, 1615), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from kalman_filter import KalmanFilter
raw_data = np.loadtxt("barometer_data.txt")
# Truncate raw data (it's super long)
raw_data = raw_data[:raw_data.size//4]
raw_data_step = np.loadtxt("barometer_data_step.txt")
t1 = np.arange(0, raw_data.size/12.5, 1/12.5)
t2 = np.arange(0, raw_data_step.size/12.5, 1/12.5)
fig1 = plt.figure("Data")
ax1 = fig1.add_subplot(121)
ax2 = fig1.add_subplot(122)
fig1.subplots_adjust(bottom=0.25)
[unfiltered_raw_line] = ax1.plot(t1, raw_data)
[unfiltered__step_line] = ax2.plot(t2, raw_data_step)
def filter_data(data, x0, P, Q, R):
filter1 = KalmanFilter(x0, P, 1, 0, 1, Q, R)
x_out = np.zeros(data.size)
P_out = np.zeros(data.size)
for k in np.arange(1, data.size):
x_out[k], P_out[k] = filter1.update(0, data[k])
return x_out, P_out
P0 = 2
Q0 = 1e-4
[filtered_raw_line] = ax1.plot(t1, filter_data(raw_data, 0, P0, Q0, R=raw_data.var())[0])
[filtered_step_line] = ax2.plot(t2, filter_data(raw_data_step, 0, P0, Q0, R=raw_data.var())[0])
P_slider_ax = fig1.add_axes([0.25, 0.15, 0.65, 0.03])
Q_slider_ax = fig1.add_axes([0.25, 0.1, 0.65, 0.03])
P_slider = Slider(P_slider_ax, 'P', 0.5, 5, valinit=P0)
Q_slider = Slider(Q_slider_ax, 'Q', 1e-4, 1e-3, valinit=Q0)
def sliders_on_changed(val):
P = P_slider.val
Q = Q_slider.val
x_raw_new, P_raw_new = filter_data(raw_data, 0, P, Q, R=raw_data.var())
filtered_raw_line.set_ydata(x_raw_new)
x_step_new, P_step_new = filter_data(raw_data_step, 0, P, Q, R=raw_data.var())
filtered_step_line.set_ydata(x_step_new)
P_slider.on_changed(sliders_on_changed)
Q_slider.on_changed(sliders_on_changed)
plt.show()
|
[
"kalman_filter.KalmanFilter",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.widgets.Slider",
"numpy.loadtxt",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((140, 172), 'numpy.loadtxt', 'np.loadtxt', (['"""barometer_data.txt"""'], {}), "('barometer_data.txt')\n", (150, 172), True, 'import numpy as np\n'), ((266, 303), 'numpy.loadtxt', 'np.loadtxt', (['"""barometer_data_step.txt"""'], {}), "('barometer_data_step.txt')\n", (276, 303), True, 'import numpy as np\n'), ((309, 353), 'numpy.arange', 'np.arange', (['(0)', '(raw_data.size / 12.5)', '(1 / 12.5)'], {}), '(0, raw_data.size / 12.5, 1 / 12.5)\n', (318, 353), True, 'import numpy as np\n'), ((355, 404), 'numpy.arange', 'np.arange', (['(0)', '(raw_data_step.size / 12.5)', '(1 / 12.5)'], {}), '(0, raw_data_step.size / 12.5, 1 / 12.5)\n', (364, 404), True, 'import numpy as np\n'), ((409, 427), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Data"""'], {}), "('Data')\n", (419, 427), True, 'import matplotlib.pyplot as plt\n'), ((1232, 1276), 'matplotlib.widgets.Slider', 'Slider', (['P_slider_ax', '"""P"""', '(0.5)', '(5)'], {'valinit': 'P0'}), "(P_slider_ax, 'P', 0.5, 5, valinit=P0)\n", (1238, 1276), False, 'from matplotlib.widgets import Slider\n'), ((1288, 1339), 'matplotlib.widgets.Slider', 'Slider', (['Q_slider_ax', '"""Q"""', '(0.0001)', '(0.001)'], {'valinit': 'Q0'}), "(Q_slider_ax, 'Q', 0.0001, 0.001, valinit=Q0)\n", (1294, 1339), False, 'from matplotlib.widgets import Slider\n'), ((1738, 1748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1746, 1748), True, 'import matplotlib.pyplot as plt\n'), ((671, 705), 'kalman_filter.KalmanFilter', 'KalmanFilter', (['x0', 'P', '(1)', '(0)', '(1)', 'Q', 'R'], {}), '(x0, P, 1, 0, 1, Q, R)\n', (683, 705), False, 'from kalman_filter import KalmanFilter\n'), ((723, 742), 'numpy.zeros', 'np.zeros', (['data.size'], {}), '(data.size)\n', (731, 742), True, 'import numpy as np\n'), ((755, 774), 'numpy.zeros', 'np.zeros', (['data.size'], {}), '(data.size)\n', (763, 774), True, 'import numpy as np\n'), ((797, 820), 'numpy.arange', 'np.arange', (['(1)', 'data.size'], {}), '(1, data.size)\n', (806, 820), True, 'import numpy as np\n')]
|
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``causalnex.pytorch.dist_type._base`` defines the distribution type class interface and default behavior.
"""
import itertools
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from typing import Dict, List, Tuple
import numpy as np
import torch
from causalnex.structure.structuremodel import StructureModel
class DistTypeBase(metaclass=ABCMeta):
"""Base class defining the distribution default behavior and interface"""
def __init__(self, idx: int):
"""
Default constructor for the DistTypeBase class.
Unless overridden, provides default behavior to all subclasses.
Args:
idx: Positional index in data passed to the NOTEARS algorithm
which correspond to this datatype.
"""
self.idx = idx
def get_columns(
self,
X: np.ndarray,
) -> np.ndarray:
"""
Gets the column(s) associated with the instantiated DistType.
Args:
X: Full dataset to be selected from.
Returns:
1d or 2d np.ndarray of columns.
"""
return X[:, self.idx]
# pylint: disable=no-self-use
# pylint: disable=unused-argument
def preprocess_X(self, X: np.ndarray, fit_transform: bool = True) -> np.ndarray:
"""
Overload this method to perform any required preprocessing of the data
matrix. This can include data conversion, column expansion etc.
Changes to the tabu parameters should also be done here.
**WARN** This preprocessing CANNOT reorder the columns of X.
Args:
X: The original passed-in data.
fit_transform: Whether the class first fits
then transforms the data, or just transforms.
Just transforming is used to preprocess new data after the
initial NOTEARS fit.
Returns:
Preprocessed X
"""
return X
# pylint: disable=no-self-use
def preprocess_tabu_edges(
self, tabu_edges: List[Tuple[int, int]]
) -> List[Tuple[int, int]]:
"""
Overload this method to perform any required preprocessing of the tabu_edges.
Args:
tabu_edges: The original tabu_edges.
Returns:
Preprocessed tabu_edges.
"""
return tabu_edges
# pylint: disable=no-self-use
def preprocess_tabu_nodes(self, tabu_nodes: List[int]) -> List[int]:
"""
Overload this method to perform any required preprocessing of the tabu_nodes.
Args:
tabu_nodes: The original tabu_nodes.
Returns:
Preprocessed tabu_nodes.
"""
return tabu_nodes
# pylint: disable=no-self-use
def update_idx_col(self, idx_col: Dict[int, str]) -> Dict[int, str]:
"""
Overload this method to update the idx_col dict with expanded colnames.
Args:
idx_col: The original index to column mapping.
Returns:
Updated index to column mapping.
"""
return idx_col
def add_to_node(self, sm: StructureModel) -> StructureModel:
"""
Adds self to a node of a structure model corresponding to self.idx.
Args:
sm: The input StructureModel
Returns:
Updated StructureModel
"""
sm.nodes[self.idx]["dist_type"] = self
return sm
# pylint: disable=no-self-use
def modify_h(self, square_weight_mat: torch.Tensor) -> torch.Tensor:
"""
Overload this method to apply updates to the W matrix in h(W).
Typically used to prevent spurious cycles when using expended columns.
Args:
square_weight_mat: The weight matrix used in h(W).
Returns:
Updated weight matrix used in h(W).
"""
return square_weight_mat
# pylint: disable=no-self-use
def collapse_adj(self, adj: np.ndarray) -> np.ndarray:
"""
Overload this method to apply updates to collapse the W matrix
of a multi-parameter distribution
Likely has the same impact as modify_h.
Args:
adj: The adjacency matrix.
Returns:
Updated adjacency matrix.
"""
return adj
@abstractmethod
def loss(self, X: torch.Tensor, X_hat: torch.Tensor) -> torch.Tensor:
"""
Args:
X: The original data passed into NOTEARS (i.e. the reconstruction target).
X_hat: The reconstructed data.
Returns:
Scalar pytorch tensor of the reconstruction loss between X and X_hat.
"""
raise NotImplementedError("Must implement the loss() method")
@abstractmethod
def inverse_link_function(self, X_hat: torch.Tensor) -> torch.Tensor:
"""
Convert the transformed data from the latent space to the original dtype
using the inverse link function.
Args:
X_hat: Reconstructed data in the latent space.
Returns:
Modified X_hat.
MUST be same shape as passed in data.
Projects the self.idx column from the latent space to the dist_type space.
"""
raise NotImplementedError("Must implement the inverse_link_function() method")
class ExpandColumnsMixin:
"""
Mixin class providing convenience methods for column expansion.
"""
@staticmethod
def _expand_columns(X: np.ndarray, new_columns: np.ndarray) -> np.ndarray:
"""
Expands the data matrix columns without reordering the indices.
Args:
X: Base dataset to expand.
new_columns: The columns to expand the dataset by.
Returns:
Expanded dataset.
"""
return np.hstack([X, new_columns])
@staticmethod
def update_tabu_edges(
idx_group: List[int],
tabu_edges: List[Tuple[int, int]],
tabu_idx_group: bool,
) -> List[Tuple[int, int]]:
"""
Tabu edges are:
1. all user defined connections to original feature column
2. all inter-feature connections (optional)
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_edges: The list of tabu_edges to be updated.
tabu_idx_group: Whether inter-group edges should also be considered tabu.
I.e if a result of a column expansion, often want to prevent edges being learned
between parameters.
Returns:
Updated tabu_edges
"""
if tabu_edges is None:
tabu_edges = []
# copy to prevent mutations
tabu_edges = deepcopy(tabu_edges)
# handle 1.
new_tabu_edges = []
# for each original tabu pair
for (i, j) in tabu_edges:
# idx_group[0] is the original column index
if i == idx_group[0]:
new_tabu_edges += [(idx, j) for idx in idx_group[1:]]
elif j == idx_group[0]:
new_tabu_edges += [(i, idx) for idx in idx_group[1:]]
# all new edges added to tabu_edges
tabu_edges += new_tabu_edges
# handle 2.
if tabu_idx_group:
# add on all pairwise permutations of particular feature group
# NOTE: permutations are needed for edge directionality
tabu_edges += list(itertools.permutations(idx_group, 2))
return tabu_edges
@staticmethod
def update_tabu_nodes(
idx_group: List[int], tabu_nodes: List[int]
) -> List[Tuple[int, int]]:
"""
Tabu nodes are:
1. all user defined connections to original feature column
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_nodes: The list of tabu_nodes to be updated.
Returns:
Updated tabu_nodes
"""
if tabu_nodes is None:
return tabu_nodes
# copy to prevent mutations
tabu_nodes = deepcopy(tabu_nodes)
new_tabu_nodes = []
for i in tabu_nodes:
# NOTE: the first element in the idx_group is guaranteed as self.idx
if i == idx_group[0]:
new_tabu_nodes += idx_group[1:]
# add on the new tabu nodes
tabu_nodes += new_tabu_nodes
return tabu_nodes
|
[
"itertools.permutations",
"copy.deepcopy",
"numpy.hstack"
] |
[((7215, 7242), 'numpy.hstack', 'np.hstack', (['[X, new_columns]'], {}), '([X, new_columns])\n', (7224, 7242), True, 'import numpy as np\n'), ((8158, 8178), 'copy.deepcopy', 'deepcopy', (['tabu_edges'], {}), '(tabu_edges)\n', (8166, 8178), False, 'from copy import deepcopy\n'), ((9531, 9551), 'copy.deepcopy', 'deepcopy', (['tabu_nodes'], {}), '(tabu_nodes)\n', (9539, 9551), False, 'from copy import deepcopy\n'), ((8869, 8905), 'itertools.permutations', 'itertools.permutations', (['idx_group', '(2)'], {}), '(idx_group, 2)\n', (8891, 8905), False, 'import itertools\n')]
|
from adam_visual_perception import LandmarkDetector
from adam_visual_perception.utility import *
import numpy as np
import math
import cv2
import os
import sys
class HeadGazeEstimator:
""" A class for estimating gaze ray from facial landmarks """
def __init__(self, write_video=False):
# 3D model points.
self.model_points = np.array(
[
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0), # Right mouth corner
]
)
self.dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
"""
Parameters
----------
write_video : bool, optional
Write the resulting OpenCV video
"""
self.write_video = write_video
self.landmark_detector = LandmarkDetector(write_video=False)
def get_gaze_rays(self, filename, bbox_history=None, show=True):
"""
Get the gaze rays for the given video file
"""
# Get the landmarks for the entire video
landmark_map = self.landmark_detector.detect(filename, show=False)
# Capture the video
cap = cv2.VideoCapture(filename)
frame_no = 0
gaze_angles = {}
# Loop over the frames from the video stream
while True:
success, frame = cap.read()
if not success:
if frame_no == 0:
print("Failed to read video")
sys.exit(1)
else:
break
if frame_no == 0:
# Camera internals
size = frame.shape
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[
[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1],
],
dtype="double",
)
if self.write_video:
# Initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
par_path = os.path.abspath(os.path.join(filename, os.pardir))
dir_path = par_path + "_pnp"
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
video_path = os.path.join(dir_path, os.path.basename(filename))
writer = cv2.VideoWriter(
video_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True
)
if frame_no in landmark_map:
# 2D image points.
image_points = np.array(
[
landmark_map[frame_no][33], # Nose tip
landmark_map[frame_no][8], # Chin
landmark_map[frame_no][36], # Left eye left corner
landmark_map[frame_no][45], # Right eye right corne
landmark_map[frame_no][48], # Left Mouth corner
landmark_map[frame_no][54], # Right mouth corner
],
dtype="double",
)
# We use this to draw a line sticking out of the nose
success, rotation_vector, translation_vector = cv2.solvePnP(
self.model_points,
image_points,
camera_matrix,
self.dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE,
)
nose_end_point2D, jacobian = cv2.projectPoints(
np.array([(0.0, 0.0, 1000.0)]),
rotation_vector,
translation_vector,
camera_matrix,
self.dist_coeffs,
)
for p in image_points:
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (255, 0, 0), -1)
for p in landmark_map[frame_no]:
if p in image_points:
continue
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
lenAB = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
length = lenAB * 3
C_x = int(p2[0] + (p2[0] - p1[0]) / lenAB * length)
C_y = int(p2[1] + (p2[1] - p1[1]) / lenAB * length)
cv2.line(frame, p1, (C_x, C_y), (0, 255, 0), 2)
if bbox_history is not None and (self.write_video or show):
bboxes = bbox_history[frame_no]
for i, bbox in enumerate(bboxes):
x, y = int(bbox[0]), int(bbox[1])
w, h = int(bbox[2]), int(bbox[3])
cv2.circle(
frame, (int(x + w / 2), int(y + h / 2)), 5, (0, 0, 255), -1
)
# Store in the return dictionary
gaze_angles[frame_no] = (p1, p2)
# Show the frame if the flag is on
if show:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# Write the video if the flag is on
if self.write_video:
writer.write(frame)
frame_no += 1
# Cleanup
cv2.destroyAllWindows()
if self.write_video:
writer.release()
return gaze_angles
|
[
"os.makedirs",
"cv2.line",
"math.sqrt",
"os.path.join",
"cv2.imshow",
"cv2.VideoWriter",
"numpy.array",
"numpy.zeros",
"cv2.solvePnP",
"cv2.destroyAllWindows",
"adam_visual_perception.LandmarkDetector",
"cv2.VideoCapture",
"sys.exit",
"cv2.VideoWriter_fourcc",
"os.path.isdir",
"os.path.basename",
"cv2.waitKey"
] |
[((352, 506), 'numpy.array', 'np.array', (['[(0.0, 0.0, 0.0), (0.0, -330.0, -65.0), (-225.0, 170.0, -135.0), (225.0, \n 170.0, -135.0), (-150.0, -150.0, -125.0), (150.0, -150.0, -125.0)]'], {}), '([(0.0, 0.0, 0.0), (0.0, -330.0, -65.0), (-225.0, 170.0, -135.0), (\n 225.0, 170.0, -135.0), (-150.0, -150.0, -125.0), (150.0, -150.0, -125.0)])\n', (360, 506), True, 'import numpy as np\n'), ((774, 790), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (782, 790), True, 'import numpy as np\n'), ((1039, 1074), 'adam_visual_perception.LandmarkDetector', 'LandmarkDetector', ([], {'write_video': '(False)'}), '(write_video=False)\n', (1055, 1074), False, 'from adam_visual_perception import LandmarkDetector\n'), ((1387, 1413), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (1403, 1413), False, 'import cv2\n'), ((5829, 5852), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5850, 5852), False, 'import cv2\n'), ((1992, 2093), 'numpy.array', 'np.array', (['[[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]]'], {'dtype': '"""double"""'}), "([[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0,\n 1]], dtype='double')\n", (2000, 2093), True, 'import numpy as np\n'), ((2969, 3171), 'numpy.array', 'np.array', (['[landmark_map[frame_no][33], landmark_map[frame_no][8], landmark_map[\n frame_no][36], landmark_map[frame_no][45], landmark_map[frame_no][48],\n landmark_map[frame_no][54]]'], {'dtype': '"""double"""'}), "([landmark_map[frame_no][33], landmark_map[frame_no][8],\n landmark_map[frame_no][36], landmark_map[frame_no][45], landmark_map[\n frame_no][48], landmark_map[frame_no][54]], dtype='double')\n", (2977, 3171), True, 'import numpy as np\n'), ((3635, 3748), 'cv2.solvePnP', 'cv2.solvePnP', (['self.model_points', 'image_points', 'camera_matrix', 'self.dist_coeffs'], {'flags': 'cv2.SOLVEPNP_ITERATIVE'}), '(self.model_points, image_points, camera_matrix, self.\n dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)\n', (3647, 3748), False, 'import cv2\n'), ((4661, 4715), 'math.sqrt', 'math.sqrt', (['((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)'], {}), '((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n', (4670, 4715), False, 'import math\n'), ((4904, 4951), 'cv2.line', 'cv2.line', (['frame', 'p1', '(C_x, C_y)', '(0, 255, 0)', '(2)'], {}), '(frame, p1, (C_x, C_y), (0, 255, 0), 2)\n', (4912, 4951), False, 'import cv2\n'), ((5586, 5612), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (5596, 5612), False, 'import cv2\n'), ((1708, 1719), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1716, 1719), False, 'import sys\n'), ((2361, 2392), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (2383, 2392), False, 'import cv2\n'), ((2735, 2814), 'cv2.VideoWriter', 'cv2.VideoWriter', (['video_path', 'fourcc', '(30)', '(frame.shape[1], frame.shape[0])', '(True)'], {}), '(video_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True)\n', (2750, 2814), False, 'import cv2\n'), ((3948, 3978), 'numpy.array', 'np.array', (['[(0.0, 0.0, 1000.0)]'], {}), '([(0.0, 0.0, 1000.0)])\n', (3956, 3978), True, 'import numpy as np\n'), ((5635, 5649), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5646, 5649), False, 'import cv2\n'), ((2440, 2473), 'os.path.join', 'os.path.join', (['filename', 'os.pardir'], {}), '(filename, os.pardir)\n', (2452, 2473), False, 'import os\n'), ((2551, 2574), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (2564, 2574), False, 'import os\n'), ((2600, 2621), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (2611, 2621), False, 'import os\n'), ((2678, 2704), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2694, 2704), False, 'import os\n')]
|
import time
from collections import deque
import gym
import numpy as np
from stable_baselines import logger, PPO2
from stable_baselines.a2c.utils import total_episode_reward_logger
from stable_baselines.common import explained_variance, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten
class PPO2WithVAE(PPO2):
"""
Custom PPO2 version.
Notable changes:
- optimization is done after each episode and not after n steps
"""
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="PPO2"):
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
with TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer:
self._setup_learn()
runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam)
self.episode_reward = np.zeros((self.n_envs,))
ep_info_buf = deque(maxlen=100)
t_first_start = time.time()
n_timesteps = 0
# nupdates = total_timesteps // self.n_batch
for timestep in range(1, total_timesteps + 1):
assert self.n_batch % self.nminibatches == 0
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - timestep / total_timesteps
lr_now = self.learning_rate(frac)
cliprangenow = self.cliprange(frac)
# true_reward is the reward without discount
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run()
n_timesteps += len(obs)
ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
# timestep = ((update * self.noptepochs * self.n_batch + epoch_num * self.n_batch + start) //
# batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, writer=writer,
update=n_timesteps))
else: # recurrent version
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for stan_timestepsrt in range(0, self.n_envs, envs_per_batch):
# timestep = ((update * self.noptepochs * self.n_envs + epoch_num * self.n_envs + start) //
# envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, update=n_timesteps,
writer=writer, states=mb_states))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, n_timesteps)
if self.verbose >= 1 and (timestep % log_interval == 0 or timestep == 1):
explained_var = explained_variance(values, returns)
logger.logkv("total_timesteps", n_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if n_timesteps > total_timesteps:
break
return self
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
def run(self):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
while True:
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
if self.dones:
print("Episode finished. Reward: {:.2f} {} Steps".format(np.sum(mb_rewards), len(mb_rewards)))
if len(mb_rewards) >= self.n_steps:
break
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
|
[
"numpy.clip",
"numpy.copy",
"numpy.mean",
"stable_baselines.logger.logkv",
"collections.deque",
"stable_baselines.common.explained_variance",
"time.time",
"numpy.asarray",
"numpy.sum",
"numpy.zeros",
"stable_baselines.common.TensorboardWriter",
"stable_baselines.ppo2.ppo2.safe_mean",
"stable_baselines.ppo2.ppo2.get_schedule_fn",
"stable_baselines.logger.dumpkvs",
"numpy.zeros_like",
"numpy.arange",
"numpy.random.shuffle"
] |
[((724, 759), 'stable_baselines.ppo2.ppo2.get_schedule_fn', 'get_schedule_fn', (['self.learning_rate'], {}), '(self.learning_rate)\n', (739, 759), False, 'from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten\n'), ((785, 816), 'stable_baselines.ppo2.ppo2.get_schedule_fn', 'get_schedule_fn', (['self.cliprange'], {}), '(self.cliprange)\n', (800, 816), False, 'from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten\n'), ((8585, 8625), 'numpy.asarray', 'np.asarray', (['mb_obs'], {'dtype': 'self.obs.dtype'}), '(mb_obs, dtype=self.obs.dtype)\n', (8595, 8625), True, 'import numpy as np\n'), ((8647, 8687), 'numpy.asarray', 'np.asarray', (['mb_rewards'], {'dtype': 'np.float32'}), '(mb_rewards, dtype=np.float32)\n', (8657, 8687), True, 'import numpy as np\n'), ((8709, 8731), 'numpy.asarray', 'np.asarray', (['mb_actions'], {}), '(mb_actions)\n', (8719, 8731), True, 'import numpy as np\n'), ((8752, 8791), 'numpy.asarray', 'np.asarray', (['mb_values'], {'dtype': 'np.float32'}), '(mb_values, dtype=np.float32)\n', (8762, 8791), True, 'import numpy as np\n'), ((8816, 8859), 'numpy.asarray', 'np.asarray', (['mb_neglogpacs'], {'dtype': 'np.float32'}), '(mb_neglogpacs, dtype=np.float32)\n', (8826, 8859), True, 'import numpy as np\n'), ((8879, 8914), 'numpy.asarray', 'np.asarray', (['mb_dones'], {'dtype': 'np.bool'}), '(mb_dones, dtype=np.bool)\n', (8889, 8914), True, 'import numpy as np\n'), ((9049, 9074), 'numpy.zeros_like', 'np.zeros_like', (['mb_rewards'], {}), '(mb_rewards)\n', (9062, 9074), True, 'import numpy as np\n'), ((9097, 9116), 'numpy.copy', 'np.copy', (['mb_rewards'], {}), '(mb_rewards)\n', (9104, 9116), True, 'import numpy as np\n'), ((831, 895), 'stable_baselines.common.TensorboardWriter', 'TensorboardWriter', (['self.graph', 'self.tensorboard_log', 'tb_log_name'], {}), '(self.graph, self.tensorboard_log, tb_log_name)\n', (848, 895), False, 'from stable_baselines.common import explained_variance, TensorboardWriter\n'), ((1082, 1106), 'numpy.zeros', 'np.zeros', (['(self.n_envs,)'], {}), '((self.n_envs,))\n', (1090, 1106), True, 'import numpy as np\n'), ((1134, 1151), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (1139, 1151), False, 'from collections import deque\n'), ((1180, 1191), 'time.time', 'time.time', ([], {}), '()\n', (1189, 1191), False, 'import time\n'), ((1486, 1497), 'time.time', 'time.time', ([], {}), '()\n', (1495, 1497), False, 'import time\n'), ((4170, 4199), 'numpy.mean', 'np.mean', (['mb_loss_vals'], {'axis': '(0)'}), '(mb_loss_vals, axis=0)\n', (4177, 4199), True, 'import numpy as np\n'), ((4224, 4235), 'time.time', 'time.time', ([], {}), '()\n', (4233, 4235), False, 'import time\n'), ((7929, 8000), 'numpy.clip', 'np.clip', (['actions', 'self.env.action_space.low', 'self.env.action_space.high'], {}), '(actions, self.env.action_space.low, self.env.action_space.high)\n', (7936, 8000), True, 'import numpy as np\n'), ((2033, 2056), 'numpy.arange', 'np.arange', (['self.n_batch'], {}), '(self.n_batch)\n', (2042, 2056), True, 'import numpy as np\n'), ((2972, 2994), 'numpy.arange', 'np.arange', (['self.n_envs'], {}), '(self.n_envs)\n', (2981, 2994), True, 'import numpy as np\n'), ((4879, 4914), 'stable_baselines.common.explained_variance', 'explained_variance', (['values', 'returns'], {}), '(values, returns)\n', (4897, 4914), False, 'from stable_baselines.common import explained_variance, TensorboardWriter\n'), ((4935, 4979), 'stable_baselines.logger.logkv', 'logger.logkv', (['"""total_timesteps"""', 'n_timesteps'], {}), "('total_timesteps', n_timesteps)\n", (4947, 4979), False, 'from stable_baselines import logger, PPO2\n'), ((5000, 5024), 'stable_baselines.logger.logkv', 'logger.logkv', (['"""fps"""', 'fps'], {}), "('fps', fps)\n", (5012, 5024), False, 'from stable_baselines import logger, PPO2\n'), ((5323, 5376), 'stable_baselines.logger.logkv', 'logger.logkv', (['"""time_elapsed"""', '(t_start - t_first_start)'], {}), "('time_elapsed', t_start - t_first_start)\n", (5335, 5376), False, 'from stable_baselines import logger, PPO2\n'), ((5537, 5553), 'stable_baselines.logger.dumpkvs', 'logger.dumpkvs', ([], {}), '()\n', (5551, 5553), False, 'from stable_baselines import logger, PPO2\n'), ((2142, 2165), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (2159, 2165), True, 'import numpy as np\n'), ((3252, 3282), 'numpy.random.shuffle', 'np.random.shuffle', (['env_indices'], {}), '(env_indices)\n', (3269, 3282), True, 'import numpy as np\n'), ((5149, 5201), 'stable_baselines.ppo2.ppo2.safe_mean', 'safe_mean', (["[ep_info['r'] for ep_info in ep_info_buf]"], {}), "([ep_info['r'] for ep_info in ep_info_buf])\n", (5158, 5201), False, 'from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten\n'), ((5249, 5301), 'stable_baselines.ppo2.ppo2.safe_mean', 'safe_mean', (["[ep_info['l'] for ep_info in ep_info_buf]"], {}), "([ep_info['l'] for ep_info in ep_info_buf])\n", (5258, 5301), False, 'from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten\n'), ((5483, 5516), 'stable_baselines.logger.logkv', 'logger.logkv', (['loss_name', 'loss_val'], {}), '(loss_name, loss_val)\n', (5495, 5516), False, 'from stable_baselines import logger, PPO2\n'), ((8405, 8423), 'numpy.sum', 'np.sum', (['mb_rewards'], {}), '(mb_rewards)\n', (8411, 8423), True, 'import numpy as np\n'), ((3030, 3067), 'numpy.arange', 'np.arange', (['(self.n_envs * self.n_steps)'], {}), '(self.n_envs * self.n_steps)\n', (3039, 3067), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
"""
Multi-Sensor Moving Platform Simulation Example
===============================================
This example looks at how multiple sensors can be mounted on a single moving platform and exploiting a defined moving
platform as a sensor target.
"""
# %%
# Building a Simulated Multi-Sensor Moving Platform
# -------------------------------------------------
# The focus of this example is to show how to setup and configure a simulation environment in order to provide a
# multi-sensor moving platform, as such the application of a tracker will not be covered in detail. For more information
# about trackers and how to configure them review of the tutorials and demonstrations is recommended.
#
# This example makes use of Stone Soup :class:`~.MovingPlatform`, :class:`~.MultiTransitionMovingPlatform` and
# :class:`~.Sensor` objects.
#
# In order to configure platforms, sensors and the simulation we will need to import some specific Stone Soup objects.
# As these have been introduced in previous tutorials they are imported upfront. New functionality within this example
# will be imported at the relevant point in order to draw attention to the new features.
# Some general imports and set up
from datetime import datetime
from datetime import timedelta
from matplotlib import pyplot as plt
import numpy as np
# Stone Soup imports:
from stonesoup.types.state import State, GaussianState
from stonesoup.types.array import StateVector
from stonesoup.types.array import CovarianceMatrix
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.predictor.particle import ParticlePredictor
from stonesoup.resampler.particle import SystematicResampler
from stonesoup.updater.particle import ParticleUpdater
from stonesoup.measures import Mahalanobis
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.tracker.simple import SingleTargetTracker
# Define the simulation start time
start_time = datetime.now()
# %%
# Create a multi-sensor platform
# ------------------------------
# We have previously demonstrated how to create a :class:`~.FixedPlatform` which exploited a
# :class:`~.RadarRangeBearingElevation` *Sensor* in order to detect and track targets generated within a
# :class:`~.MultiTargetGroundTruthSimulator`.
#
# In this example we are going to create a moving platform which will be mounted with a pair of sensors and moves within
# a 6 dimensional state space according to the following :math:`\mathbf{x}`.
#
# .. math::
# \mathbf{x} = \begin{bmatrix}
# x\\ \dot{x}\\ y\\ \dot{y}\\ z\\ \dot{z} \end{bmatrix}
# = \begin{bmatrix}
# 0\\ 0\\ 0\\ 50\\ 8000\\ 0 \end{bmatrix}
#
# The platform will be initiated with a near constant velocity model which has been parameterised to have zero noise.
# Therefore the platform location at time :math:`k` is given by :math:`F_{k}x_{k-1}` where :math:`F_{k}` is given by:
#
# .. math::
# F_{k} = \begin{bmatrix}
# 1 & \triangle k & 0 & 0 & 0 & 0\\
# 0 & 1 & 0 & 0 & 0 & 0\\
# 0 & 0 & 1 & \triangle k & 0 & 0\\
# 0 & 0 & 0 & 1 & 0 & 0\\
# 0 & 0 & 0 & 0 & 1 & \triangle k \\
# 0 & 0 & 0 & 0 & 0 & 1\\
# \end{bmatrix}
# First import the Moving platform
from stonesoup.platform.base import MovingPlatform
# Define the initial platform position, in this case the origin
initial_loc = StateVector([[0], [0], [0], [50], [8000], [0]])
initial_state = State(initial_loc, start_time)
# Define transition model and position for 3D platform
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# create our fixed platform
sensor_platform = MovingPlatform(states=initial_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
transition_model=transition_model)
# %%
# With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this
# case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor
# (e.g. an optical sensor, which has no capability to directly measure range).
#
# First we will create a radar which is capable of measuring bearing (:math:`\phi`), elevation (:math:`\theta`), range
# (:math:`r`) and range-rate (:math:`\dot{r}`) of the target platform.
# Import a range rate bearing elevation capable radar
from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate
# Create a radar sensor
radar_noise_covar = CovarianceMatrix(np.diag(
np.array([np.deg2rad(3), # Elevation
np.deg2rad(3), # Bearing
100., # Range
25.]))) # Range Rate
# radar mountings
radar_mounting_offsets = StateVector([10, 0, 0]) # e.g. nose cone
radar_rotation_offsets = StateVector([0, 0, 0])
# Mount the radar onto the platform
radar = RadarElevationBearingRangeRate(ndim_state=6,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
noise_covar=radar_noise_covar,
mounting_offset=radar_mounting_offsets,
rotation_offset=radar_rotation_offsets,
)
sensor_platform.add_sensor(radar)
# %%
# Our second sensor is a passive sensor, capable of measuring the bearing (:math:`\phi`) and elevation (:math:`\theta`)
# of the target platform. For the purposes of this example we will assume that the passive sensor is an imager.
# The imager sensor model is described by the following equations:
#
# .. math::
# \mathbf{z}_k = h(\mathbf{x}_k, \dot{\mathbf{x}}_k)
#
# where:
#
# * :math:`\mathbf{z}_k` is a measurement vector of the form:
#
# .. math::
# \mathbf{z}_k = \begin{bmatrix} \theta \\ \phi \end{bmatrix}
#
# * :math:`h` is a non - linear model function of the form:
#
# .. math::
# h(\mathbf{x}_k,\dot{\mathbf{x}}_k) = \begin{bmatrix}
# \arcsin(\mathcal{z} /\sqrt{\mathcal{x} ^ 2 + \mathcal{y} ^ 2 +\mathcal{z} ^ 2}) \\
# \arctan(\mathcal{y},\mathcal{x}) \ \
# \end{bmatrix} + \dot{\mathbf{x}}_k
#
# * :math:`\mathbf{z}_k` is Gaussian distributed with covariance :math:`R`, i.e.:
#
# .. math::
# \mathbf{z}_k \sim \mathcal{N}(0, R)
#
# .. math::
# R = \begin{bmatrix}
# \sigma_{\theta}^2 & 0 \\
# 0 & \sigma_{\phi}^2 \\
# \end{bmatrix}
# Import a passive sensor capability
from stonesoup.sensor.passive import PassiveElevationBearing
imager_noise_covar = CovarianceMatrix(np.diag(np.array([np.deg2rad(0.05), # Elevation
np.deg2rad(0.05)]))) # Bearing
# imager mounting offset
imager_mounting_offsets = StateVector([0, 8, -1]) # e.g. wing mounted imaging pod
imager_rotation_offsets = StateVector([0, 0, 0])
# Mount the imager onto the platform
imager = PassiveElevationBearing(ndim_state=6,
mapping=(0, 2, 4),
noise_covar=imager_noise_covar,
mounting_offset=imager_mounting_offsets,
rotation_offset=imager_rotation_offsets,
)
sensor_platform.add_sensor(imager)
# %%
# Notice that we have added sensors to specific locations on the aircraft, defined by the mounting_offset parameter.
# The values in this array are defined in the platforms local coordinate frame of reference. So in this case an offset
# of :math:`[0, 8, -1]` means the sensor is located 8 meters to the right and 1 meter below the center point of the
# platform.
#
# Now that we have mounted the two sensors we can see that the platform object has both associated with it:
sensor_platform.sensors
# %%
# Create a Target Platform
# ------------------------
# There are two ways of generating a target in Stone Soup. Firstly, we can use the inbuilt ground-truth generator
# functionality within Stone Soup, which we demonstrated in the previous example, and creates a random target based on
# our selected parameters. The second method provides a means to generate a target which will perform specific
# behaviours, this is the approach we will take here.
#
# In order to create a target which moves in pre-defined sequences we exploit the fact that platforms can be used as
# sensor targets within a simulation, coupled with the :class:`~.MultiTransitionMovingPlatform` which enables a platform
# to be provided with a pre-defined list of transition models and transition times. The platform will continue to loop
# over the transition sequence provided until the simulation ends.
#
# When simulating sensor platforms it is important to note that within the simulation Stone Soup treats all platforms as
# potential targets. Therefore if we created multiple sensor platforms they would each *sense* all other platforms
# within the simulation (sensor-target geometry dependant).
#
# For this example we will create an air target which will fly a sequence of straight and level followed by a
# coordinated turn in the :math:`x-y` plane. This is configured such that the target will perform each manoeuvre for 8
# seconds, and it will turn through 45 degrees over the course of the turn manoeuvre.
# Import a Constant Turn model to enable target to perform basic manoeuvre
from stonesoup.models.transition.linear import ConstantTurn
straight_level = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# Configure the aircraft turn behaviour
turn_noise_diff_coeffs = np.array([0., 0.])
turn_rate = np.pi/32 # specified in radians per seconds...
turn_model = ConstantTurn(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate)
# Configure turn model to maintain current altitude
turning = CombinedLinearGaussianTransitionModel(
[turn_model, ConstantVelocity(0.)])
manoeuvre_list = [straight_level, turning]
manoeuvre_times = [timedelta(seconds=8),
timedelta(seconds=8)]
# %%
# Now that we have created a list of manoeuvre behaviours and durations we can build our multi-transition moving
# platform. Because we intend for this platform to be a target we do not need to attach any sensors to it.
# Import a multi-transition moving platform
from stonesoup.platform.base import MultiTransitionMovingPlatform
initial_target_location = StateVector([[0], [-40], [1800], [0], [8000], [0]])
initial_target_state = State(initial_target_location, start_time)
target = MultiTransitionMovingPlatform(transition_models=manoeuvre_list,
transition_times=manoeuvre_times,
states=initial_target_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
sensors=None)
# %%
# Creating the simulator
# ----------------------
# Now that we have build our sensor platform and a target platform we need to wrap them in a simulator. Because we do
# not want any additional ground truth objects, which is how most simulators work in Stone Soup, we need to use a
# :class:`~.DummyGroundTruthSimulator` which returns a set of empty ground truth paths with timestamps. These are then
# feed into a :class:`~.PlatformDetectionSimulator` with the two platforms we have already built.
# Import the required simulators
from stonesoup.simulator.simple import DummyGroundTruthSimulator
from stonesoup.simulator.platform import PlatformDetectionSimulator
# %%
# We now need to create an array of timestamps which starts at *datetime.now()* and enable the simulator to run for
# 25 seconds.
times = np.arange(0, 24, 1) # 25 seconds
timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times]
truths = DummyGroundTruthSimulator(times=timestamps)
sim = PlatformDetectionSimulator(groundtruth=truths, platforms=[sensor_platform, target])
# %%
# Create a Tracker
# ------------------------------------
# Now that we have setup our sensor platform, target and simulation we need to create a tracker. For this example we
# will use a Particle Filter as this enables us to handle the non-linear nature of the imaging sensor. In this example
# we will use an inflated constant noise model to account for target motion uncertainty.
#
# Note that we don't add a measurement model to the updater, this is because each sensor adds their measurement model to
# each detection they generate. The tracker handles this internally by checking for a measurement model with each
# detection it receives and applying only the relevant measurement model.
target_transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(5), ConstantVelocity(5), ConstantVelocity(1)])
# First add a Particle Predictor
predictor = ParticlePredictor(target_transition_model)
# Now create a resampler and particle updater
resampler = SystematicResampler()
updater = ParticleUpdater(measurement_model=None,
resampler=resampler)
# Create a particle initiator
from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator
single_point_initiator = SinglePointInitiator(
GaussianState([[0], [-40], [2000], [0], [8000], [0]], np.diag([10000, 1000, 10000, 1000, 10000, 1000])),
None)
initiator = GaussianParticleInitiator(number_particles=500,
initiator=single_point_initiator)
hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=np.inf)
data_associator = GNNWith2DAssignment(hypothesiser)
from stonesoup.deleter.time import UpdateTimeStepsDeleter
deleter = UpdateTimeStepsDeleter(time_steps_since_update=10)
# Create a Kalman single-target tracker
tracker = SingleTargetTracker(
initiator=initiator,
deleter=deleter,
detector=sim,
data_associator=data_associator,
updater=updater
)
# %%
# The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing
# only sensor it does not make sense to plot out the detections without animating the resulting plot. This
# animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target
# position is shown in black, radar detections are shown in yellow while the bearing only imager detections are
# coloured green.
from matplotlib import animation
import matplotlib
matplotlib.rcParams['animation.html'] = 'jshtml'
from stonesoup.models.measurement.nonlinear import CartesianToElevationBearingRangeRate
from stonesoup.functions import sphere2cart
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
frames = []
for time, ctracks in tracker:
artists = []
ax.set_xlabel("$East$")
ax.set_ylabel("$North$")
ax.set_ylim(0, 2250)
ax.set_xlim(-1000, 1000)
X = [state.state_vector[0] for state in sensor_platform]
Y = [state.state_vector[2] for state in sensor_platform]
artists.extend(ax.plot(X, Y, color='b'))
for detection in sim.detections:
if isinstance(detection.measurement_model, CartesianToElevationBearingRangeRate):
x, y = detection.measurement_model.inverse_function(detection)[[0, 2]]
color = 'y'
else:
r = 10000000
# extract the platform rotation offsets
_, el_offset, az_offset = sensor_platform.orientation
# obtain measurement angles and map to cartesian
e, a = detection.state_vector
x, y, _ = sphere2cart(r, a + az_offset, e + el_offset)
color = 'g'
X = [sensor_platform.state_vector[0], x]
Y = [sensor_platform.state_vector[2], y]
artists.extend(ax.plot(X, Y, color=color))
X = [state.state_vector[0] for state in target]
Y = [state.state_vector[2] for state in target]
artists.extend(ax.plot(X, Y, color='r'))
for track in ctracks:
X = [state.state_vector[0] for state in track]
Y = [state.state_vector[2] for state in track]
artists.extend(ax.plot(X, Y, color='k'))
frames.append(artists)
animation.ArtistAnimation(fig, frames)
# %%
# To increase your confidence with simulated platform targets it would be good practice to modify the target to fly
# pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels
# in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location.
# %%
# Key points
# ----------
# 1. Platforms, static or moving, can be used as targets for sensor platforms.
# 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios.
# 3. A tracker can be configured to exploit all sensor data created in a simulation.
|
[
"stonesoup.updater.particle.ParticleUpdater",
"stonesoup.platform.base.MovingPlatform",
"stonesoup.simulator.simple.DummyGroundTruthSimulator",
"stonesoup.measures.Mahalanobis",
"stonesoup.functions.sphere2cart",
"numpy.array",
"datetime.timedelta",
"stonesoup.simulator.platform.PlatformDetectionSimulator",
"numpy.arange",
"stonesoup.sensor.radar.radar.RadarElevationBearingRangeRate",
"stonesoup.models.transition.linear.ConstantVelocity",
"stonesoup.types.array.StateVector",
"stonesoup.models.transition.linear.ConstantTurn",
"stonesoup.resampler.particle.SystematicResampler",
"stonesoup.dataassociator.neighbour.GNNWith2DAssignment",
"stonesoup.types.state.State",
"numpy.deg2rad",
"stonesoup.sensor.passive.PassiveElevationBearing",
"stonesoup.deleter.time.UpdateTimeStepsDeleter",
"numpy.diag",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"matplotlib.animation.ArtistAnimation",
"stonesoup.tracker.simple.SingleTargetTracker",
"stonesoup.predictor.particle.ParticlePredictor",
"stonesoup.initiator.simple.GaussianParticleInitiator",
"stonesoup.platform.base.MultiTransitionMovingPlatform"
] |
[((2101, 2115), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2113, 2115), False, 'from datetime import datetime\n'), ((3612, 3659), 'stonesoup.types.array.StateVector', 'StateVector', (['[[0], [0], [0], [50], [8000], [0]]'], {}), '([[0], [0], [0], [50], [8000], [0]])\n', (3623, 3659), False, 'from stonesoup.types.array import StateVector\n'), ((3676, 3706), 'stonesoup.types.state.State', 'State', (['initial_loc', 'start_time'], {}), '(initial_loc, start_time)\n', (3681, 3706), False, 'from stonesoup.types.state import State, GaussianState\n'), ((3940, 4071), 'stonesoup.platform.base.MovingPlatform', 'MovingPlatform', ([], {'states': 'initial_state', 'position_mapping': '(0, 2, 4)', 'velocity_mapping': '(1, 3, 5)', 'transition_model': 'transition_model'}), '(states=initial_state, position_mapping=(0, 2, 4),\n velocity_mapping=(1, 3, 5), transition_model=transition_model)\n', (3954, 4071), False, 'from stonesoup.platform.base import MovingPlatform\n'), ((5065, 5088), 'stonesoup.types.array.StateVector', 'StateVector', (['[10, 0, 0]'], {}), '([10, 0, 0])\n', (5076, 5088), False, 'from stonesoup.types.array import StateVector\n'), ((5132, 5154), 'stonesoup.types.array.StateVector', 'StateVector', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (5143, 5154), False, 'from stonesoup.types.array import StateVector\n'), ((5201, 5425), 'stonesoup.sensor.radar.radar.RadarElevationBearingRangeRate', 'RadarElevationBearingRangeRate', ([], {'ndim_state': '(6)', 'position_mapping': '(0, 2, 4)', 'velocity_mapping': '(1, 3, 5)', 'noise_covar': 'radar_noise_covar', 'mounting_offset': 'radar_mounting_offsets', 'rotation_offset': 'radar_rotation_offsets'}), '(ndim_state=6, position_mapping=(0, 2, 4),\n velocity_mapping=(1, 3, 5), noise_covar=radar_noise_covar,\n mounting_offset=radar_mounting_offsets, rotation_offset=\n radar_rotation_offsets)\n', (5231, 5425), False, 'from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate\n'), ((7195, 7218), 'stonesoup.types.array.StateVector', 'StateVector', (['[0, 8, -1]'], {}), '([0, 8, -1])\n', (7206, 7218), False, 'from stonesoup.types.array import StateVector\n'), ((7278, 7300), 'stonesoup.types.array.StateVector', 'StateVector', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (7289, 7300), False, 'from stonesoup.types.array import StateVector\n'), ((7348, 7527), 'stonesoup.sensor.passive.PassiveElevationBearing', 'PassiveElevationBearing', ([], {'ndim_state': '(6)', 'mapping': '(0, 2, 4)', 'noise_covar': 'imager_noise_covar', 'mounting_offset': 'imager_mounting_offsets', 'rotation_offset': 'imager_rotation_offsets'}), '(ndim_state=6, mapping=(0, 2, 4), noise_covar=\n imager_noise_covar, mounting_offset=imager_mounting_offsets,\n rotation_offset=imager_rotation_offsets)\n', (7371, 7527), False, 'from stonesoup.sensor.passive import PassiveElevationBearing\n'), ((10056, 10076), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (10064, 10076), True, 'import numpy as np\n'), ((10150, 10235), 'stonesoup.models.transition.linear.ConstantTurn', 'ConstantTurn', ([], {'turn_noise_diff_coeffs': 'turn_noise_diff_coeffs', 'turn_rate': 'turn_rate'}), '(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate\n )\n', (10162, 10235), False, 'from stonesoup.models.transition.linear import ConstantTurn\n'), ((10863, 10914), 'stonesoup.types.array.StateVector', 'StateVector', (['[[0], [-40], [1800], [0], [8000], [0]]'], {}), '([[0], [-40], [1800], [0], [8000], [0]])\n', (10874, 10914), False, 'from stonesoup.types.array import StateVector\n'), ((10938, 10980), 'stonesoup.types.state.State', 'State', (['initial_target_location', 'start_time'], {}), '(initial_target_location, start_time)\n', (10943, 10980), False, 'from stonesoup.types.state import State, GaussianState\n'), ((10990, 11194), 'stonesoup.platform.base.MultiTransitionMovingPlatform', 'MultiTransitionMovingPlatform', ([], {'transition_models': 'manoeuvre_list', 'transition_times': 'manoeuvre_times', 'states': 'initial_target_state', 'position_mapping': '(0, 2, 4)', 'velocity_mapping': '(1, 3, 5)', 'sensors': 'None'}), '(transition_models=manoeuvre_list,\n transition_times=manoeuvre_times, states=initial_target_state,\n position_mapping=(0, 2, 4), velocity_mapping=(1, 3, 5), sensors=None)\n', (11019, 11194), False, 'from stonesoup.platform.base import MultiTransitionMovingPlatform\n'), ((12201, 12220), 'numpy.arange', 'np.arange', (['(0)', '(24)', '(1)'], {}), '(0, 24, 1)\n', (12210, 12220), True, 'import numpy as np\n'), ((12339, 12382), 'stonesoup.simulator.simple.DummyGroundTruthSimulator', 'DummyGroundTruthSimulator', ([], {'times': 'timestamps'}), '(times=timestamps)\n', (12364, 12382), False, 'from stonesoup.simulator.simple import DummyGroundTruthSimulator\n'), ((12389, 12476), 'stonesoup.simulator.platform.PlatformDetectionSimulator', 'PlatformDetectionSimulator', ([], {'groundtruth': 'truths', 'platforms': '[sensor_platform, target]'}), '(groundtruth=truths, platforms=[sensor_platform,\n target])\n', (12415, 12476), False, 'from stonesoup.simulator.platform import PlatformDetectionSimulator\n'), ((13354, 13396), 'stonesoup.predictor.particle.ParticlePredictor', 'ParticlePredictor', (['target_transition_model'], {}), '(target_transition_model)\n', (13371, 13396), False, 'from stonesoup.predictor.particle import ParticlePredictor\n'), ((13456, 13477), 'stonesoup.resampler.particle.SystematicResampler', 'SystematicResampler', ([], {}), '()\n', (13475, 13477), False, 'from stonesoup.resampler.particle import SystematicResampler\n'), ((13488, 13548), 'stonesoup.updater.particle.ParticleUpdater', 'ParticleUpdater', ([], {'measurement_model': 'None', 'resampler': 'resampler'}), '(measurement_model=None, resampler=resampler)\n', (13503, 13548), False, 'from stonesoup.updater.particle import ParticleUpdater\n'), ((13872, 13958), 'stonesoup.initiator.simple.GaussianParticleInitiator', 'GaussianParticleInitiator', ([], {'number_particles': '(500)', 'initiator': 'single_point_initiator'}), '(number_particles=500, initiator=\n single_point_initiator)\n', (13897, 13958), False, 'from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator\n'), ((14114, 14147), 'stonesoup.dataassociator.neighbour.GNNWith2DAssignment', 'GNNWith2DAssignment', (['hypothesiser'], {}), '(hypothesiser)\n', (14133, 14147), False, 'from stonesoup.dataassociator.neighbour import GNNWith2DAssignment\n'), ((14217, 14267), 'stonesoup.deleter.time.UpdateTimeStepsDeleter', 'UpdateTimeStepsDeleter', ([], {'time_steps_since_update': '(10)'}), '(time_steps_since_update=10)\n', (14239, 14267), False, 'from stonesoup.deleter.time import UpdateTimeStepsDeleter\n'), ((14319, 14444), 'stonesoup.tracker.simple.SingleTargetTracker', 'SingleTargetTracker', ([], {'initiator': 'initiator', 'deleter': 'deleter', 'detector': 'sim', 'data_associator': 'data_associator', 'updater': 'updater'}), '(initiator=initiator, deleter=deleter, detector=sim,\n data_associator=data_associator, updater=updater)\n', (14338, 14444), False, 'from stonesoup.tracker.simple import SingleTargetTracker\n'), ((15175, 15202), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (15185, 15202), True, 'from matplotlib import pyplot as plt\n'), ((16673, 16711), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'frames'], {}), '(fig, frames)\n', (16698, 16711), False, 'from matplotlib import animation\n'), ((10436, 10456), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(8)'}), '(seconds=8)\n', (10445, 10456), False, 'from datetime import timedelta\n'), ((10477, 10497), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(8)'}), '(seconds=8)\n', (10486, 10497), False, 'from datetime import timedelta\n'), ((3826, 3847), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (3842, 3847), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((3848, 3869), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (3864, 3869), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((3870, 3891), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (3886, 3891), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((9923, 9944), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (9939, 9944), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((9945, 9966), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (9961, 9966), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((9967, 9988), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (9983, 9988), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((10350, 10371), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.0)'], {}), '(0.0)\n', (10366, 10371), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((13244, 13263), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(5)'], {}), '(5)\n', (13260, 13263), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((13265, 13284), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(5)'], {}), '(5)\n', (13281, 13284), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((13286, 13305), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(1)'], {}), '(1)\n', (13302, 13305), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((13798, 13846), 'numpy.diag', 'np.diag', (['[10000, 1000, 10000, 1000, 10000, 1000]'], {}), '([10000, 1000, 10000, 1000, 10000, 1000])\n', (13805, 13846), True, 'import numpy as np\n'), ((14057, 14070), 'stonesoup.measures.Mahalanobis', 'Mahalanobis', ([], {}), '()\n', (14068, 14070), False, 'from stonesoup.measures import Mahalanobis\n'), ((16090, 16134), 'stonesoup.functions.sphere2cart', 'sphere2cart', (['r', '(a + az_offset)', '(e + el_offset)'], {}), '(r, a + az_offset, e + el_offset)\n', (16101, 16134), False, 'from stonesoup.functions import sphere2cart\n'), ((4888, 4901), 'numpy.deg2rad', 'np.deg2rad', (['(3)'], {}), '(3)\n', (4898, 4901), True, 'import numpy as np\n'), ((4930, 4943), 'numpy.deg2rad', 'np.deg2rad', (['(3)'], {}), '(3)\n', (4940, 4943), True, 'import numpy as np\n'), ((7024, 7040), 'numpy.deg2rad', 'np.deg2rad', (['(0.05)'], {}), '(0.05)\n', (7034, 7040), True, 'import numpy as np\n'), ((7111, 7127), 'numpy.deg2rad', 'np.deg2rad', (['(0.05)'], {}), '(0.05)\n', (7121, 7127), True, 'import numpy as np\n')]
|
""" render_fmo.py renders obj file to rgb image with fmo model
Aviable function:
- clear_mash: delete all the mesh in the secene
- scene_setting_init: set scene configurations
- node_setting_init: set node configurations
- render: render rgb image for one obj file and one viewpoint
- render_obj: wrapper function for render() render
- init_all: a wrapper function, initialize all configurations
= set_image_path: reset defualt image output folder
author baiyu
modified by rozumden
"""
import sys
import os
import random
import pickle
import bpy
import glob
import numpy as np
from mathutils import Vector
from mathutils import Euler
import cv2
from PIL import Image
from skimage.draw import line_aa
from scipy import signal
from skimage.measure import regionprops
# import moviepy.editor as mpy
from array2gif import write_gif
abs_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(abs_path))
from render_helper import *
from settings import *
import settings
import pdb
def renderTraj(pars, H):
## Input: pars is either 2x2 (line) or 2x3 (parabola)
if pars.shape[1] == 2:
pars = np.concatenate( (pars, np.zeros((2,1))),1)
ns = 2
else:
ns = 5
ns = np.max([2, ns])
rangeint = np.linspace(0,1,ns)
for timeinst in range(rangeint.shape[0]-1):
ti0 = rangeint[timeinst]
ti1 = rangeint[timeinst+1]
start = pars[:,0] + pars[:,1]*ti0 + pars[:,2]*(ti0*ti0)
end = pars[:,0] + pars[:,1]*ti1 + pars[:,2]*(ti1*ti1)
start = np.round(start).astype(np.int32)
end = np.round(end).astype(np.int32)
rr, cc, val = line_aa(start[0], start[1], end[0], end[1])
valid = np.logical_and(np.logical_and(rr < H.shape[0], cc < H.shape[1]), np.logical_and(rr > 0, cc > 0))
rr = rr[valid]
cc = cc[valid]
val = val[valid]
if len(H.shape) > 2:
H[rr, cc, 0] = 0
H[rr, cc, 1] = 0
H[rr, cc, 2] = val
else:
H[rr, cc] = val
return H
def open_log(temp_folder = g_temp): # redirect output to log file
logfile = os.path.join(temp_folder,'blender_render.log')
try:
os.remove(logfile)
except OSError:
pass
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
return old
def close_log(old): # disable output redirection
os.close(1)
os.dup(old)
os.close(old)
def clear_mesh():
""" clear all meshes in the secene
"""
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH':
obj.select = True
bpy.ops.object.delete()
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
def scene_setting_init(use_gpu):
"""initialize blender setting configurations
"""
sce = bpy.context.scene.name
bpy.data.scenes[sce].render.engine = g_engine_type
bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent
#output
bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode
bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth
bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format
bpy.data.scenes[sce].render.use_overwrite = g_depth_use_overwrite
bpy.data.scenes[sce].render.use_file_extension = g_depth_use_file_extension
if g_ambient_light:
world = bpy.data.worlds['World']
world.use_nodes = True
bg = world.node_tree.nodes['Background']
bg.inputs[0].default_value[:3] = g_bg_color
bg.inputs[1].default_value = 1.0
#dimensions
bpy.data.scenes[sce].render.resolution_x = g_resolution_x
bpy.data.scenes[sce].render.resolution_y = g_resolution_y
bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage
if use_gpu:
bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = False
bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True
ndev = len(bpy.context.user_preferences.addons['cycles'].preferences.devices)
print('Number of devices {}'.format(ndev))
for ki in range(2,ndev):
bpy.context.user_preferences.addons['cycles'].preferences.devices[ki].use = False
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
# bpy.types.CyclesRenderSettings.device = 'GPU'
bpy.data.scenes[sce].cycles.device = 'GPU'
def node_setting_init():
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for node in tree.nodes:
tree.nodes.remove(node)
render_layer_node = tree.nodes.new('CompositorNodeRLayers')
image_output_node = tree.nodes.new('CompositorNodeOutputFile')
image_output_node.base_path = g_syn_rgb_folder
links.new(render_layer_node.outputs[0], image_output_node.inputs[0])
# image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = g_temp
image_output_node.file_slots[0].path = 'image-######.png' # blender placeholder #
def render(obj_path, viewpoint, temp_folder):
"""render rbg image
render a object rgb image by a given camera viewpoint and
choose random image as background, only render one image
at a time.
Args:
obj_path: a string variable indicate the obj file path
viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance)
"""
vp = viewpoint
cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance)
cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt)
cam_obj = bpy.data.objects['Camera']
cam_obj.location[0] = cam_location[0]
cam_obj.location[1] = cam_location[1]
cam_obj.location[2] = cam_location[2]
cam_obj.rotation_euler[0] = cam_rot[0]
cam_obj.rotation_euler[1] = cam_rot[1]
cam_obj.rotation_euler[2] = cam_rot[2]
if not os.path.exists(g_syn_rgb_folder):
os.mkdir(g_syn_rgb_folder)
obj = bpy.data.objects['model_normalized']
ni = g_fmo_steps
maxlen = 0.5
maxrot = 1.57/6
tri = 0
# rot_base = np.array([math.pi/2,0,0])
while tri <= g_max_trials:
do_repeat = False
tri += 1
if not g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
for tempi in range(len(bpy.data.objects[oi].data.materials)):
if bpy.data.objects[oi].data.materials[tempi].alpha != 1.0:
return True, True ## transparent object
los_start = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))
loc_step = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))/ni
rot_base = np.array((random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi)))
rot_step = np.array((random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot)))/ni
old = open_log(temp_folder)
for ki in [0, ni-1]+list(range(1,ni-1)):
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.data.objects[oi].location = los_start + loc_step*ki
bpy.data.objects[oi].rotation_euler = Euler(rot_base + (rot_step*ki))
bpy.context.scene.frame_set(ki + 1)
bpy.ops.render.render(write_still=True) #start rendering
if ki == 0 or ki == (ni-1):
Mt = cv2.imread(os.path.join(bpy.context.scene.node_tree.nodes[1].base_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)[:,:,-1] > 0
is_border = ((Mt[0,:].sum()+Mt[-1,:].sum()+Mt[:,0].sum()+Mt[:,-1].sum()) > 0) or Mt.sum()==0
if is_border:
if ki == 0:
close_log(old)
return False, True ## sample different starting viewpoint
else:
do_repeat = True ## just sample another motion direction
if do_repeat:
break
close_log(old)
if do_repeat == False:
break
if do_repeat: ## sample different starting viewpoint
return False, True
return False, False
def make_fmo(path, gt_path, video_path):
n_im = 5
background_images = os.listdir(g_background_image_path)
seq_name = random.choice(background_images)
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.jpg"))
if len(seq_images) <= n_im:
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.png"))
seq_images.sort()
bgri = random.randint(n_im,len(seq_images)-1)
bgr_path = seq_images[bgri]
B0 = cv2.imread(bgr_path)/255
B = cv2.resize(B0, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
B[B > 1] = 1
B[B < 0] = 0
FH = np.zeros(B.shape)
MH = np.zeros(B.shape[:2])
pars = np.array([[(B.shape[0]-1)/2-1, (B.shape[1]-1)/2-1], [1.0, 1.0]]).T
FM = np.zeros(B.shape[:2]+(4,g_fmo_steps,))
centroids = np.zeros((2,g_fmo_steps))
for ki in range(g_fmo_steps):
FM[:,:,:,ki] = cv2.imread(os.path.join(gt_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)/g_rgb_color_max
props = regionprops((FM[:,:,-1,ki]>0).astype(int))
if len(props) != 1:
return False
centroids[:,ki] = props[0].centroid
for ki in range(g_fmo_steps):
F = FM[:,:,:-1,ki]*FM[:,:,-1:,ki]
M = FM[:,:,-1,ki]
if ki < g_fmo_steps-1:
pars[:,1] = centroids[:,ki+1] - centroids[:,ki]
H = renderTraj(pars, np.zeros(B.shape[:2]))
H /= H.sum()*g_fmo_steps
for kk in range(3):
FH[:,:,kk] += signal.fftconvolve(H, F[:,:,kk], mode='same')
MH += signal.fftconvolve(H, M, mode='same')
Im = FH + (1 - MH)[:,:,np.newaxis]*B
Im[Im > 1] = 1
Im[Im < 0] = 0
if g_skip_low_contrast:
Diff = np.sum(np.abs(Im - B),2)
meanval = np.mean(Diff[MH > 0.05])
print("Contrast {}".format(meanval))
if meanval < 0.2:
return False
if g_skip_small:
sizeper = np.sum(MH > 0.01)/(MH.shape[0]*MH.shape[1])
print("Size percentage {}".format(sizeper))
if sizeper < 0.05:
return False
Im = Im[:,:,[2,1,0]]
Ims = Image.fromarray((Im * 255).astype(np.uint8))
Ims.save(path)
Ball = np.zeros(B.shape+(n_im,))
Ball[:,:,:,0] = B
for ki in range(1,n_im):
bgrki_path = seq_images[bgri-ki]
Ball[:,:,:,ki] = cv2.resize(cv2.imread(bgrki_path)/255, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
Ball[Ball > 1] = 1
Ball[Ball < 0] = 0
Bmed = np.median(Ball,3)
Image.fromarray((B[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr.png'))
Image.fromarray((Bmed[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr_med.png'))
# Ims.save(os.path.join(g_temp,"I.png"))
# Image.fromarray((FH * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"FH.png"))
# Image.fromarray((MH * 255).astype(np.uint8)).save(os.path.join(g_temp,"MH.png"))
# Image.fromarray((M * 255).astype(np.uint8)).save(os.path.join(g_temp,"M.png"))
# Image.fromarray((F * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"F.png"))
# Image.fromarray((B0 * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"B.png"))
if False:
Fwr = FM[:,:,:-1,:] * FM[:,:,-1:,:] + 1 * (1 - FM[:,:,-1:,:])
Fwr = (Fwr * 255).astype(np.uint8)
# Fwr[np.repeat(FM[:,:,-1:,:]==0,3,2)]=255
out = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc(*"MJPG"), 6, (F.shape[1],F.shape[0]),True)
for ki in range(g_fmo_steps):
out.write(Fwr[:,:,:,ki])
out.release()
return True
def render_obj(obj_path, path, objid, obj_name, temp_folder):
""" render one obj file by a given viewpoint list
a wrapper function for render()
Args:
obj_path: a string variable indicate the obj file path
"""
vps_path = random.sample(g_view_point_file, 1)[0]
vps = list(load_viewpoint(vps_path))
random.shuffle(vps)
save_path = os.path.join(path,"{}_{:04d}.png".format(obj_name,objid))
gt_path = os.path.join(path,"GT","{}_{:04d}".format(obj_name,objid))
video_path = os.path.join(path,"{}_{:04d}.avi".format(obj_name,objid))
if not os.path.exists(gt_path):
os.mkdir(gt_path)
image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = gt_path
for imt in bpy.data.images:
bpy.data.images.remove(imt)
if g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.context.scene.objects.active = bpy.data.objects[oi]
# pdb.set_trace()
# for m in bpy.data.materials:
# bpy.data.materials.remove(m)
# bpy.ops.object.material_slot_remove()
bpy.ops.object.editmode_toggle()
bpy.ops.uv.cube_project()
bpy.ops.object.editmode_toggle()
texture_images = os.listdir(g_texture_path)
texture = random.choice(texture_images)
tex_path = os.path.join(g_texture_path,texture)
# mat = bpy.data.materials.new(texture)
# mat.use_nodes = True
# nt = mat.node_tree
# nodes = nt.nodes
# links = nt.links
# # Image Texture
# textureNode = nodes.new("ShaderNodeTexImage")
# textureNode.image = bpy.data.images.load(tex_path)
# links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])
# mat.specular_intensity = 0
# bpy.data.objects[oi].active_material = mat
# print(bpy.data.objects[oi].active_material)
for mat in bpy.data.materials:
nodes = mat.node_tree.nodes
links = mat.node_tree.links
textureNode = nodes.new("ShaderNodeTexImage")
textureNode.image = bpy.data.images.load(tex_path)
links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])
# print(bpy.data.objects[oi].active_material)
tri = 0
while tri <= g_max_trials:
tri += 1
vp = random.sample(vps, 1)[0]
sample_different_object, sample_different_vp = render(obj_path, vp, temp_folder)
if sample_different_vp:
if sample_different_object:
print('Transparent object!')
return False
print('Rendering failed, repeating')
continue
success = make_fmo(save_path, gt_path, video_path)
if success:
return True
print('Making FMO failed, repeating')
return False
def init_all():
"""init everything we need for rendering
an image
"""
scene_setting_init(g_gpu_render_enable)
node_setting_init()
cam_obj = bpy.data.objects['Camera']
cam_obj.rotation_mode = g_rotation_mode
if g_render_light:
bpy.data.objects['Lamp'].data.energy = 50
bpy.ops.object.lamp_add(type='SUN')
bpy.data.objects['Sun'].data.energy = 5
### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA
init_all()
argv = sys.argv
argv = argv[argv.index("--") + 1:]
start_index = int(argv[0])
step_index = int(argv[1])
print('Start index {}, step index {}'.format(start_index, step_index))
temp_folder = g_syn_rgb_folder+g_render_objs[start_index]+'/'
for obj_name in g_render_objs[start_index:(start_index+step_index)]:
print("Processing object {}".format(obj_name))
obj_folder = os.path.join(g_syn_rgb_folder, obj_name)
if not os.path.exists(obj_folder):
os.makedirs(obj_folder)
if not os.path.exists(os.path.join(obj_folder,"GT")):
os.mkdir(os.path.join(obj_folder,"GT"))
num = g_shapenet_categlory_pair[obj_name]
search_path = os.path.join(g_shapenet_path, num, '**','*.obj')
pathes = glob.glob(search_path, recursive=True)
random.shuffle(pathes)
objid = 1
tri = 0
while objid <= g_number_per_category:
print(" instance {}".format(objid))
clear_mesh()
path = random.sample(pathes, 1)[0]
old = open_log(temp_folder)
bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl", use_split_groups=False, use_split_objects=True)
# bpy.ops.import_scene.obj(filepath=path)
close_log(old)
#combine_objects()
#scale_objects(0.5)
result = render_obj(path, obj_folder, objid, obj_name, temp_folder)
if result:
objid += 1
tri = 0
else:
print('Error! Rendering another object from the category!')
tri += 1
if tri > g_max_trials:
print('No object find in the category!!!!!!!!!')
break
|
[
"os.open",
"numpy.array",
"bpy.data.images.load",
"bpy.data.images.remove",
"os.remove",
"bpy.ops.object.delete",
"os.path.exists",
"numpy.mean",
"os.listdir",
"scipy.signal.fftconvolve",
"bpy.data.textures.remove",
"numpy.max",
"os.dup",
"numpy.linspace",
"bpy.ops.object.lamp_add",
"os.mkdir",
"bpy.data.materials.remove",
"cv2.VideoWriter_fourcc",
"sys.stdout.flush",
"glob.glob",
"numpy.round",
"numpy.abs",
"random.sample",
"random.choice",
"bpy.data.meshes.remove",
"random.shuffle",
"random.uniform",
"os.close",
"bpy.ops.object.select_all",
"os.path.dirname",
"cv2.imread",
"bpy.ops.object.editmode_toggle",
"mathutils.Euler",
"numpy.median",
"os.makedirs",
"numpy.logical_and",
"os.path.join",
"skimage.draw.line_aa",
"bpy.context.scene.frame_set",
"numpy.sum",
"numpy.zeros",
"bpy.ops.import_scene.obj",
"bpy.ops.render.render",
"os.path.abspath",
"bpy.ops.uv.cube_project"
] |
[((869, 894), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (884, 894), False, 'import os\n'), ((911, 936), 'os.path.dirname', 'os.path.dirname', (['abs_path'], {}), '(abs_path)\n', (926, 936), False, 'import os\n'), ((1235, 1250), 'numpy.max', 'np.max', (['[2, ns]'], {}), '([2, ns])\n', (1241, 1250), True, 'import numpy as np\n'), ((1266, 1287), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ns'], {}), '(0, 1, ns)\n', (1277, 1287), True, 'import numpy as np\n'), ((2127, 2174), 'os.path.join', 'os.path.join', (['temp_folder', '"""blender_render.log"""'], {}), "(temp_folder, 'blender_render.log')\n", (2139, 2174), False, 'import os\n'), ((2284, 2293), 'os.dup', 'os.dup', (['(1)'], {}), '(1)\n', (2290, 2293), False, 'import os\n'), ((2298, 2316), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2314, 2316), False, 'import sys\n'), ((2321, 2332), 'os.close', 'os.close', (['(1)'], {}), '(1)\n', (2329, 2332), False, 'import os\n'), ((2337, 2366), 'os.open', 'os.open', (['logfile', 'os.O_WRONLY'], {}), '(logfile, os.O_WRONLY)\n', (2344, 2366), False, 'import os\n'), ((2436, 2447), 'os.close', 'os.close', (['(1)'], {}), '(1)\n', (2444, 2447), False, 'import os\n'), ((2452, 2463), 'os.dup', 'os.dup', (['old'], {}), '(old)\n', (2458, 2463), False, 'import os\n'), ((2468, 2481), 'os.close', 'os.close', (['old'], {}), '(old)\n', (2476, 2481), False, 'import os\n'), ((2553, 2597), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (2578, 2597), False, 'import bpy\n'), ((2696, 2719), 'bpy.ops.object.delete', 'bpy.ops.object.delete', ([], {}), '()\n', (2717, 2719), False, 'import bpy\n'), ((9367, 9402), 'os.listdir', 'os.listdir', (['g_background_image_path'], {}), '(g_background_image_path)\n', (9377, 9402), False, 'import os\n'), ((9418, 9450), 'random.choice', 'random.choice', (['background_images'], {}), '(background_images)\n', (9431, 9450), False, 'import random\n'), ((9995, 10012), 'numpy.zeros', 'np.zeros', (['B.shape'], {}), '(B.shape)\n', (10003, 10012), True, 'import numpy as np\n'), ((10022, 10043), 'numpy.zeros', 'np.zeros', (['B.shape[:2]'], {}), '(B.shape[:2])\n', (10030, 10043), True, 'import numpy as np\n'), ((10131, 10171), 'numpy.zeros', 'np.zeros', (['(B.shape[:2] + (4, g_fmo_steps))'], {}), '(B.shape[:2] + (4, g_fmo_steps))\n', (10139, 10171), True, 'import numpy as np\n'), ((10186, 10212), 'numpy.zeros', 'np.zeros', (['(2, g_fmo_steps)'], {}), '((2, g_fmo_steps))\n', (10194, 10212), True, 'import numpy as np\n'), ((11548, 11575), 'numpy.zeros', 'np.zeros', (['(B.shape + (n_im,))'], {}), '(B.shape + (n_im,))\n', (11556, 11575), True, 'import numpy as np\n'), ((11924, 11942), 'numpy.median', 'np.median', (['Ball', '(3)'], {}), '(Ball, 3)\n', (11933, 11942), True, 'import numpy as np\n'), ((13405, 13424), 'random.shuffle', 'random.shuffle', (['vps'], {}), '(vps)\n', (13419, 13424), False, 'import random\n'), ((17094, 17134), 'os.path.join', 'os.path.join', (['g_syn_rgb_folder', 'obj_name'], {}), '(g_syn_rgb_folder, obj_name)\n', (17106, 17134), False, 'import os\n'), ((17377, 17426), 'os.path.join', 'os.path.join', (['g_shapenet_path', 'num', '"""**"""', '"""*.obj"""'], {}), "(g_shapenet_path, num, '**', '*.obj')\n", (17389, 17426), False, 'import os\n'), ((17439, 17477), 'glob.glob', 'glob.glob', (['search_path'], {'recursive': '(True)'}), '(search_path, recursive=True)\n', (17448, 17477), False, 'import glob\n'), ((17482, 17504), 'random.shuffle', 'random.shuffle', (['pathes'], {}), '(pathes)\n', (17496, 17504), False, 'import random\n'), ((1644, 1687), 'skimage.draw.line_aa', 'line_aa', (['start[0]', 'start[1]', 'end[0]', 'end[1]'], {}), '(start[0], start[1], end[0], end[1])\n', (1651, 1687), False, 'from skimage.draw import line_aa\n'), ((2191, 2209), 'os.remove', 'os.remove', (['logfile'], {}), '(logfile)\n', (2200, 2209), False, 'import os\n'), ((6634, 6666), 'os.path.exists', 'os.path.exists', (['g_syn_rgb_folder'], {}), '(g_syn_rgb_folder)\n', (6648, 6666), False, 'import os\n'), ((6676, 6702), 'os.mkdir', 'os.mkdir', (['g_syn_rgb_folder'], {}), '(g_syn_rgb_folder)\n', (6684, 6702), False, 'import os\n'), ((9478, 9534), 'os.path.join', 'os.path.join', (['g_background_image_path', 'seq_name', '"""*.jpg"""'], {}), "(g_background_image_path, seq_name, '*.jpg')\n", (9490, 9534), False, 'import os\n'), ((9767, 9787), 'cv2.imread', 'cv2.imread', (['bgr_path'], {}), '(bgr_path)\n', (9777, 9787), False, 'import cv2\n'), ((10055, 10131), 'numpy.array', 'np.array', (['[[(B.shape[0] - 1) / 2 - 1, (B.shape[1] - 1) / 2 - 1], [1.0, 1.0]]'], {}), '([[(B.shape[0] - 1) / 2 - 1, (B.shape[1] - 1) / 2 - 1], [1.0, 1.0]])\n', (10063, 10131), True, 'import numpy as np\n'), ((10921, 10958), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['H', 'M'], {'mode': '"""same"""'}), "(H, M, mode='same')\n", (10939, 10958), False, 'from scipy import signal\n'), ((11124, 11148), 'numpy.mean', 'np.mean', (['Diff[MH > 0.05]'], {}), '(Diff[MH > 0.05])\n', (11131, 11148), True, 'import numpy as np\n'), ((12008, 12040), 'os.path.join', 'os.path.join', (['gt_path', '"""bgr.png"""'], {}), "(gt_path, 'bgr.png')\n", (12020, 12040), False, 'import os\n'), ((12110, 12146), 'os.path.join', 'os.path.join', (['gt_path', '"""bgr_med.png"""'], {}), "(gt_path, 'bgr_med.png')\n", (12122, 12146), False, 'import os\n'), ((13321, 13356), 'random.sample', 'random.sample', (['g_view_point_file', '(1)'], {}), '(g_view_point_file, 1)\n', (13334, 13356), False, 'import random\n'), ((13658, 13681), 'os.path.exists', 'os.path.exists', (['gt_path'], {}), '(gt_path)\n', (13672, 13681), False, 'import os\n'), ((13691, 13708), 'os.mkdir', 'os.mkdir', (['gt_path'], {}), '(gt_path)\n', (13699, 13708), False, 'import os\n'), ((13853, 13880), 'bpy.data.images.remove', 'bpy.data.images.remove', (['imt'], {}), '(imt)\n', (13875, 13880), False, 'import bpy\n'), ((16555, 16590), 'bpy.ops.object.lamp_add', 'bpy.ops.object.lamp_add', ([], {'type': '"""SUN"""'}), "(type='SUN')\n", (16578, 16590), False, 'import bpy\n'), ((17146, 17172), 'os.path.exists', 'os.path.exists', (['obj_folder'], {}), '(obj_folder)\n', (17160, 17172), False, 'import os\n'), ((17182, 17205), 'os.makedirs', 'os.makedirs', (['obj_folder'], {}), '(obj_folder)\n', (17193, 17205), False, 'import os\n'), ((17735, 17885), 'bpy.ops.import_scene.obj', 'bpy.ops.import_scene.obj', ([], {'filepath': 'path', 'axis_forward': '"""-Z"""', 'axis_up': '"""Y"""', 'filter_glob': '"""*.obj;*.mtl"""', 'use_split_groups': '(False)', 'use_split_objects': '(True)'}), "(filepath=path, axis_forward='-Z', axis_up='Y',\n filter_glob='*.obj;*.mtl', use_split_groups=False, use_split_objects=True)\n", (17759, 17885), False, 'import bpy\n'), ((1719, 1767), 'numpy.logical_and', 'np.logical_and', (['(rr < H.shape[0])', '(cc < H.shape[1])'], {}), '(rr < H.shape[0], cc < H.shape[1])\n', (1733, 1767), True, 'import numpy as np\n'), ((1769, 1799), 'numpy.logical_and', 'np.logical_and', (['(rr > 0)', '(cc > 0)'], {}), '(rr > 0, cc > 0)\n', (1783, 1799), True, 'import numpy as np\n'), ((2795, 2824), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['block'], {}), '(block)\n', (2817, 2824), False, 'import bpy\n'), ((2904, 2936), 'bpy.data.materials.remove', 'bpy.data.materials.remove', (['block'], {}), '(block)\n', (2929, 2936), False, 'import bpy\n'), ((3015, 3046), 'bpy.data.textures.remove', 'bpy.data.textures.remove', (['block'], {}), '(block)\n', (3039, 3046), False, 'import bpy\n'), ((3123, 3152), 'bpy.data.images.remove', 'bpy.data.images.remove', (['block'], {}), '(block)\n', (3145, 3152), False, 'import bpy\n'), ((8349, 8384), 'bpy.context.scene.frame_set', 'bpy.context.scene.frame_set', (['(ki + 1)'], {}), '(ki + 1)\n', (8376, 8384), False, 'import bpy\n'), ((8397, 8436), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {'write_still': '(True)'}), '(write_still=True)\n', (8418, 8436), False, 'import bpy\n'), ((9597, 9653), 'os.path.join', 'os.path.join', (['g_background_image_path', 'seq_name', '"""*.png"""'], {}), "(g_background_image_path, seq_name, '*.png')\n", (9609, 9653), False, 'import os\n'), ((10750, 10771), 'numpy.zeros', 'np.zeros', (['B.shape[:2]'], {}), '(B.shape[:2])\n', (10758, 10771), True, 'import numpy as np\n'), ((10861, 10908), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['H', 'F[:, :, kk]'], {'mode': '"""same"""'}), "(H, F[:, :, kk], mode='same')\n", (10879, 10908), False, 'from scipy import signal\n'), ((11088, 11102), 'numpy.abs', 'np.abs', (['(Im - B)'], {}), '(Im - B)\n', (11094, 11102), True, 'import numpy as np\n'), ((11284, 11301), 'numpy.sum', 'np.sum', (['(MH > 0.01)'], {}), '(MH > 0.01)\n', (11290, 11301), True, 'import numpy as np\n'), ((12883, 12914), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (12905, 12914), False, 'import cv2\n'), ((14338, 14370), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (14368, 14370), False, 'import bpy\n'), ((14383, 14408), 'bpy.ops.uv.cube_project', 'bpy.ops.uv.cube_project', ([], {}), '()\n', (14406, 14408), False, 'import bpy\n'), ((14421, 14453), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (14451, 14453), False, 'import bpy\n'), ((14484, 14510), 'os.listdir', 'os.listdir', (['g_texture_path'], {}), '(g_texture_path)\n', (14494, 14510), False, 'import os\n'), ((14533, 14562), 'random.choice', 'random.choice', (['texture_images'], {}), '(texture_images)\n', (14546, 14562), False, 'import random\n'), ((14586, 14623), 'os.path.join', 'os.path.join', (['g_texture_path', 'texture'], {}), '(g_texture_path, texture)\n', (14598, 14623), False, 'import os\n'), ((15731, 15752), 'random.sample', 'random.sample', (['vps', '(1)'], {}), '(vps, 1)\n', (15744, 15752), False, 'import random\n'), ((17232, 17262), 'os.path.join', 'os.path.join', (['obj_folder', '"""GT"""'], {}), "(obj_folder, 'GT')\n", (17244, 17262), False, 'import os\n'), ((17281, 17311), 'os.path.join', 'os.path.join', (['obj_folder', '"""GT"""'], {}), "(obj_folder, 'GT')\n", (17293, 17311), False, 'import os\n'), ((17663, 17687), 'random.sample', 'random.sample', (['pathes', '(1)'], {}), '(pathes, 1)\n', (17676, 17687), False, 'import random\n'), ((1166, 1182), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1174, 1182), True, 'import numpy as np\n'), ((1544, 1559), 'numpy.round', 'np.round', (['start'], {}), '(start)\n', (1552, 1559), True, 'import numpy as np\n'), ((1591, 1604), 'numpy.round', 'np.round', (['end'], {}), '(end)\n', (1599, 1604), True, 'import numpy as np\n'), ((7405, 7446), 'random.uniform', 'random.uniform', (['(-maxlen / 10)', '(maxlen / 10)'], {}), '(-maxlen / 10, maxlen / 10)\n', (7419, 7446), False, 'import random\n'), ((7444, 7475), 'random.uniform', 'random.uniform', (['(-maxlen)', 'maxlen'], {}), '(-maxlen, maxlen)\n', (7458, 7475), False, 'import random\n'), ((7477, 7508), 'random.uniform', 'random.uniform', (['(-maxlen)', 'maxlen'], {}), '(-maxlen, maxlen)\n', (7491, 7508), False, 'import random\n'), ((7685, 7715), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (7699, 7715), False, 'import random\n'), ((7715, 7745), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (7729, 7745), False, 'import random\n'), ((7745, 7775), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (7759, 7775), False, 'import random\n'), ((8305, 8336), 'mathutils.Euler', 'Euler', (['(rot_base + rot_step * ki)'], {}), '(rot_base + rot_step * ki)\n', (8310, 8336), False, 'from mathutils import Euler\n'), ((11702, 11724), 'cv2.imread', 'cv2.imread', (['bgrki_path'], {}), '(bgrki_path)\n', (11712, 11724), False, 'import cv2\n'), ((15459, 15489), 'bpy.data.images.load', 'bpy.data.images.load', (['tex_path'], {}), '(tex_path)\n', (15479, 15489), False, 'import bpy\n'), ((7538, 7579), 'random.uniform', 'random.uniform', (['(-maxlen / 10)', '(maxlen / 10)'], {}), '(-maxlen / 10, maxlen / 10)\n', (7552, 7579), False, 'import random\n'), ((7577, 7608), 'random.uniform', 'random.uniform', (['(-maxlen)', 'maxlen'], {}), '(-maxlen, maxlen)\n', (7591, 7608), False, 'import random\n'), ((7610, 7641), 'random.uniform', 'random.uniform', (['(-maxlen)', 'maxlen'], {}), '(-maxlen, maxlen)\n', (7624, 7641), False, 'import random\n'), ((7805, 7836), 'random.uniform', 'random.uniform', (['(-maxrot)', 'maxrot'], {}), '(-maxrot, maxrot)\n', (7819, 7836), False, 'import random\n'), ((7838, 7869), 'random.uniform', 'random.uniform', (['(-maxrot)', 'maxrot'], {}), '(-maxrot, maxrot)\n', (7852, 7869), False, 'import random\n'), ((7871, 7902), 'random.uniform', 'random.uniform', (['(-maxrot)', 'maxrot'], {}), '(-maxrot, maxrot)\n', (7885, 7902), False, 'import random\n')]
|
#!/usr/bin/env python3
# Copyright 2019-2022 <NAME>, <NAME>, <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
# This script tests the reduced particle diagnostics.
# The setup is a uniform plasma with electrons, protons and photons.
# Various particle and field quantities are written to file using the reduced diagnostics
# and compared with the corresponding quantities computed from the data in the plotfiles.
import os
import sys
import numpy as np
import openpmd_api as io
from scipy.constants import c
from scipy.constants import epsilon_0 as eps0
from scipy.constants import m_e, m_p
from scipy.constants import mu_0 as mu0
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
def do_analysis(single_precision = False):
fn = sys.argv[1]
ds = yt.load(fn)
ad = ds.all_data()
ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)
opmd = io.Series('diags/openpmd/openpmd_%T.h5', io.Access.read_only)
opmd_i = opmd.iterations[200]
#--------------------------------------------------------------------------------------------------
# Part 1: get results from plotfiles (label '_yt')
#--------------------------------------------------------------------------------------------------
# Quantities computed from plotfiles
values_yt = dict()
domain_size = ds.domain_right_edge.value - ds.domain_left_edge.value
dx = domain_size / ds.domain_dimensions
# Electrons
x = ad['electrons', 'particle_position_x'].to_ndarray()
y = ad['electrons', 'particle_position_y'].to_ndarray()
z = ad['electrons', 'particle_position_z'].to_ndarray()
uz = ad['electrons', 'particle_momentum_z'].to_ndarray() / m_e / c
w = ad['electrons', 'particle_weight'].to_ndarray()
filt = uz < 0
x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)
zavg = np.zeros(ds.domain_dimensions)
uzavg = np.zeros(ds.domain_dimensions)
zuzavg = np.zeros(ds.domain_dimensions)
wavg = np.zeros(ds.domain_dimensions)
uzavg_filt = np.zeros(ds.domain_dimensions)
wavg_filt = np.zeros(ds.domain_dimensions)
for i_p in range(len(x)):
zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]
uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]
zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]
uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]
wavg_adj = np.where(wavg == 0, 1, wavg)
wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
values_yt['electrons: zavg'] = zavg / wavg_adj
values_yt['electrons: uzavg'] = uzavg / wavg_adj
values_yt['electrons: zuzavg'] = zuzavg / wavg_adj
values_yt['electrons: uzavg_filt'] = uzavg_filt / wavg_filt_adj
# protons
x = ad['protons', 'particle_position_x'].to_ndarray()
y = ad['protons', 'particle_position_y'].to_ndarray()
z = ad['protons', 'particle_position_z'].to_ndarray()
uz = ad['protons', 'particle_momentum_z'].to_ndarray() / m_p / c
w = ad['protons', 'particle_weight'].to_ndarray()
filt = uz < 0
x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)
zavg = np.zeros(ds.domain_dimensions)
uzavg = np.zeros(ds.domain_dimensions)
zuzavg = np.zeros(ds.domain_dimensions)
wavg = np.zeros(ds.domain_dimensions)
uzavg_filt = np.zeros(ds.domain_dimensions)
wavg_filt = np.zeros(ds.domain_dimensions)
for i_p in range(len(x)):
zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]
uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]
zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]
uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]
wavg_adj = np.where(wavg == 0, 1, wavg)
wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
values_yt['protons: zavg'] = zavg / wavg_adj
values_yt['protons: uzavg'] = uzavg / wavg_adj
values_yt['protons: zuzavg'] = zuzavg / wavg_adj
values_yt['protons: uzavg_filt'] = uzavg_filt / wavg_filt_adj
# Photons (momentum in units of m_e c)
x = ad['photons', 'particle_position_x'].to_ndarray()
y = ad['photons', 'particle_position_y'].to_ndarray()
z = ad['photons', 'particle_position_z'].to_ndarray()
uz = ad['photons', 'particle_momentum_z'].to_ndarray() / m_e / c
w = ad['photons', 'particle_weight'].to_ndarray()
filt = uz < 0
x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)
zavg = np.zeros(ds.domain_dimensions)
uzavg = np.zeros(ds.domain_dimensions)
zuzavg = np.zeros(ds.domain_dimensions)
wavg = np.zeros(ds.domain_dimensions)
uzavg_filt = np.zeros(ds.domain_dimensions)
wavg_filt = np.zeros(ds.domain_dimensions)
for i_p in range(len(x)):
zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]
uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]
zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]
uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]
wavg_adj = np.where(wavg == 0, 1, wavg)
wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
values_yt['photons: zavg'] = zavg / wavg_adj
values_yt['photons: uzavg'] = uzavg / wavg_adj
values_yt['photons: zuzavg'] = zuzavg / wavg_adj
values_yt['photons: uzavg_filt'] = uzavg_filt / wavg_filt_adj
values_rd = dict()
# Load reduced particle diagnostic data from plotfiles
values_rd['electrons: zavg'] = ad0[('boxlib','z_electrons')]
values_rd['protons: zavg'] = ad0[('boxlib','z_protons')]
values_rd['photons: zavg'] = ad0[('boxlib','z_photons')]
values_rd['electrons: uzavg'] = ad0[('boxlib','uz_electrons')]
values_rd['protons: uzavg'] = ad0[('boxlib','uz_protons')]
values_rd['photons: uzavg'] = ad0[('boxlib','uz_photons')]
values_rd['electrons: zuzavg'] = ad0[('boxlib','zuz_electrons')]
values_rd['protons: zuzavg'] = ad0[('boxlib','zuz_protons')]
values_rd['photons: zuzavg'] = ad0[('boxlib','zuz_photons')]
values_rd['electrons: uzavg_filt'] = ad0[('boxlib','uz_filt_electrons')]
values_rd['protons: uzavg_filt'] = ad0[('boxlib','uz_filt_protons')]
values_rd['photons: uzavg_filt'] = ad0[('boxlib','uz_filt_photons')]
values_opmd = dict()
# Load reduced particle diagnostic data from OPMD output
values_opmd['electrons: zavg'] = opmd_i.meshes['z_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: zavg'] = opmd_i.meshes['z_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: zavg'] = opmd_i.meshes['z_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['electrons: uzavg'] = opmd_i.meshes['uz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: uzavg'] = opmd_i.meshes['uz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: uzavg'] = opmd_i.meshes['uz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['electrons: zuzavg'] = opmd_i.meshes['zuz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: zuzavg'] = opmd_i.meshes['zuz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: zuzavg'] = opmd_i.meshes['zuz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['electrons: uzavg_filt'] = opmd_i.meshes['uz_filt_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['protons: uzavg_filt'] = opmd_i.meshes['uz_filt_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()
values_opmd['photons: uzavg_filt'] = opmd_i.meshes['uz_filt_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()
opmd.flush()
del opmd
#--------------------------------------------------------------------------------------------------
# Part 3: compare values from plotfiles and diagnostics and print output
#--------------------------------------------------------------------------------------------------
error_plt = dict()
error_opmd = dict()
tolerance = 5e-3 if single_precision else 1e-12
# if single precision, increase tolerance from default value
check_tolerance = 5e-3 if single_precision else 1e-9
for k in values_yt.keys():
# check that the zeros line up, since we'll be ignoring them in the error calculation
assert(np.all((values_yt[k] == 0) == (values_rd[k] == 0)))
error_plt[k] = np.max(abs(values_yt[k] - values_rd[k])[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0])
print(k, 'relative error plotfile = ', error_plt[k])
assert(error_plt[k] < tolerance)
assert(np.all((values_yt[k] == 0) == (values_opmd[k].T == 0)))
error_opmd[k] = np.max(abs(values_yt[k] - values_opmd[k].T)[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0])
assert(error_opmd[k] < tolerance)
print(k, 'relative error openPMD = ', error_opmd[k])
test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, fn, rtol=check_tolerance)
|
[
"sys.path.insert",
"numpy.where",
"openpmd_api.Series",
"os.getcwd",
"checksumAPI.evaluate_checksum",
"numpy.zeros",
"yt.load",
"numpy.all"
] |
[((667, 727), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../../warpx/Regression/Checksum/"""'], {}), "(1, '../../../../warpx/Regression/Checksum/')\n", (682, 727), False, 'import sys\n'), ((823, 834), 'yt.load', 'yt.load', (['fn'], {}), '(fn)\n', (830, 834), False, 'import yt\n'), ((964, 1025), 'openpmd_api.Series', 'io.Series', (['"""diags/openpmd/openpmd_%T.h5"""', 'io.Access.read_only'], {}), "('diags/openpmd/openpmd_%T.h5', io.Access.read_only)\n", (973, 1025), True, 'import openpmd_api as io\n'), ((2070, 2100), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2078, 2100), True, 'import numpy as np\n'), ((2113, 2143), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2121, 2143), True, 'import numpy as np\n'), ((2157, 2187), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2165, 2187), True, 'import numpy as np\n'), ((2199, 2229), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2207, 2229), True, 'import numpy as np\n'), ((2247, 2277), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2255, 2277), True, 'import numpy as np\n'), ((2294, 2324), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (2302, 2324), True, 'import numpy as np\n'), ((2800, 2828), 'numpy.where', 'np.where', (['(wavg == 0)', '(1)', 'wavg'], {}), '(wavg == 0, 1, wavg)\n', (2808, 2828), True, 'import numpy as np\n'), ((2849, 2887), 'numpy.where', 'np.where', (['(wavg_filt == 0)', '(1)', 'wavg_filt'], {}), '(wavg_filt == 0, 1, wavg_filt)\n', (2857, 2887), True, 'import numpy as np\n'), ((3666, 3696), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3674, 3696), True, 'import numpy as np\n'), ((3709, 3739), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3717, 3739), True, 'import numpy as np\n'), ((3753, 3783), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3761, 3783), True, 'import numpy as np\n'), ((3795, 3825), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3803, 3825), True, 'import numpy as np\n'), ((3843, 3873), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3851, 3873), True, 'import numpy as np\n'), ((3890, 3920), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (3898, 3920), True, 'import numpy as np\n'), ((4396, 4424), 'numpy.where', 'np.where', (['(wavg == 0)', '(1)', 'wavg'], {}), '(wavg == 0, 1, wavg)\n', (4404, 4424), True, 'import numpy as np\n'), ((4445, 4483), 'numpy.where', 'np.where', (['(wavg_filt == 0)', '(1)', 'wavg_filt'], {}), '(wavg_filt == 0, 1, wavg_filt)\n', (4453, 4483), True, 'import numpy as np\n'), ((5283, 5313), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5291, 5313), True, 'import numpy as np\n'), ((5326, 5356), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5334, 5356), True, 'import numpy as np\n'), ((5370, 5400), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5378, 5400), True, 'import numpy as np\n'), ((5412, 5442), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5420, 5442), True, 'import numpy as np\n'), ((5460, 5490), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5468, 5490), True, 'import numpy as np\n'), ((5507, 5537), 'numpy.zeros', 'np.zeros', (['ds.domain_dimensions'], {}), '(ds.domain_dimensions)\n', (5515, 5537), True, 'import numpy as np\n'), ((6013, 6041), 'numpy.where', 'np.where', (['(wavg == 0)', '(1)', 'wavg'], {}), '(wavg == 0, 1, wavg)\n', (6021, 6041), True, 'import numpy as np\n'), ((6062, 6100), 'numpy.where', 'np.where', (['(wavg_filt == 0)', '(1)', 'wavg_filt'], {}), '(wavg_filt == 0, 1, wavg_filt)\n', (6070, 6100), True, 'import numpy as np\n'), ((9973, 10039), 'checksumAPI.evaluate_checksum', 'checksumAPI.evaluate_checksum', (['test_name', 'fn'], {'rtol': 'check_tolerance'}), '(test_name, fn, rtol=check_tolerance)\n', (10002, 10039), False, 'import checksumAPI\n'), ((9344, 9394), 'numpy.all', 'np.all', (['((values_yt[k] == 0) == (values_rd[k] == 0))'], {}), '((values_yt[k] == 0) == (values_rd[k] == 0))\n', (9350, 9394), True, 'import numpy as np\n'), ((9635, 9689), 'numpy.all', 'np.all', (['((values_yt[k] == 0) == (values_opmd[k].T == 0))'], {}), '((values_yt[k] == 0) == (values_opmd[k].T == 0))\n', (9641, 9689), True, 'import numpy as np\n'), ((9953, 9964), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9962, 9964), False, 'import os\n')]
|
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from pmaf.biome.essentials._metakit import EssentialFeatureMetabase
from pmaf.biome.essentials._base import EssentialBackboneBase
from pmaf.internal._constants import (
AVAIL_TAXONOMY_NOTATIONS,
jRegexGG,
jRegexQIIME,
BIOM_TAXONOMY_NAMES,
VALID_RANKS,
)
from pmaf.internal._shared import (
generate_lineages_from_taxa,
get_rank_upto,
indentify_taxon_notation,
validate_ranks,
extract_valid_ranks,
cols2ranks,
)
from collections import defaultdict
from os import path
import pandas as pd
import numpy as np
import biom
from typing import Union, Sequence, Tuple, Any, Optional
from pmaf.internal._typing import AnyGenericIdentifier, Mapper
class RepTaxonomy(EssentialBackboneBase, EssentialFeatureMetabase):
"""An `essential` class for handling taxonomy data."""
def __init__(
self,
taxonomy: Union[pd.DataFrame, pd.Series, str],
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> None:
"""Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.
"""
tmp_metadata = kwargs.pop("metadata", {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if taxonomy.shape[0] > 0:
if taxonomy.shape[1] > 1:
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError(
"Provided `taxonomy` Datafame has invalid ranks."
)
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError("Provided `taxonomy` Datafame is invalid.")
elif isinstance(taxonomy, pd.Series):
if taxonomy.shape[0] > 0:
tmp_taxonomy = taxonomy
else:
raise ValueError("Provided `taxonomy` Series is invalid.")
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[-1].lower()
if file_extension in [".csv", ".tsv"]:
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(
taxonomy,
sep=kwargs.pop("sep", ","),
header=kwargs.pop("header", "infer"),
index_col=kwargs.pop("index_col", None),
).loc[:, taxonomy_columns]
elif file_extension in [".biom", ".biome"]:
tmp_taxonomy, new_metadata = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({"biom": new_metadata})
else:
raise NotImplementedError("File type is not supported.")
else:
raise FileNotFoundError("Provided `taxonomy` file path is invalid.")
else:
raise TypeError("Provided `taxonomy` has invalid type.")
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs)
@classmethod
def from_csv(
cls,
filepath: str,
taxonomy_columns: Union[str, int, Sequence[Union[int, str]]] = None,
**kwargs: Any
) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
if taxonomy_columns is None:
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
else:
if isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"filepath": path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs)
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> "RepTaxonomy":
"""Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy`
"""
taxonomy_frame, new_metadata = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop("metadata", {})
tmp_metadata.update({"biom": new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs)
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[pd.DataFrame, dict]:
"""Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility
"""
biom_file = biom.load_table(filepath)
if biom_file.metadata(axis="observation") is not None:
obs_data = biom_file.metadata_to_dataframe("observation")
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [
colname
for tax_name in BIOM_TAXONOMY_NAMES
for colname in col_names_low
if colname[::-1].find(tax_name[::-1]) < 3
and colname[::-1].find(tax_name[::-1]) > -1
]
metadata_cols = [
col for col in col_names if col.lower() not in avail_col_names
]
if len(avail_col_names) == 1:
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return taxonomy_frame, tmp_metadata
else:
raise ValueError("Biom file does not contain observation metadata.")
def _remove_features_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if len(tmp_ids) > 0:
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action("_remove_features_by_id", ids, **kwargs)
def _merge_features_by_map(
self, map_dict: Mapper, done: bool = False, **kwargs: Any
) -> Optional[Mapper]:
"""Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility
"""
if not done:
raise NotImplementedError
if map_dict:
return self._ratify_action(
"_merge_features_by_map",
map_dict,
_annotations=self.__internal_taxonomy.loc[:, "lineage"].to_dict(),
**kwargs
)
def drop_feature_by_id(
self, ids: AnyGenericIdentifier, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility
"""
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() == len(target_ids):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError("Invalid feature ids are provided.")
def get_taxonomy_by_id(
self, ids: Optional[AnyGenericIdentifier] = None
) -> pd.DataFrame:
"""Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if self.xrid.isin(target_ids).sum() <= len(target_ids):
return self.__internal_taxonomy.loc[target_ids, self.__avail_ranks]
else:
raise ValueError("Invalid feature ids are provided.")
def get_lineage_by_id(
self,
ids: Optional[AnyGenericIdentifier] = None,
missing_rank: bool = False,
desired_ranks: Union[bool, Sequence[str]] = False,
drop_ranks: Union[bool, Sequence[str]] = False,
**kwargs: Any
) -> pd.Series:
"""Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs
"""
if ids is None:
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = VALID_RANKS if desired_ranks is False else desired_ranks
total_valid_rids = self.xrid.isin(target_ids).sum()
if total_valid_rids == len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[target_ids],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
elif total_valid_rids < len(target_ids):
return generate_lineages_from_taxa(
self.__internal_taxonomy.loc[np.unique(target_ids)],
missing_rank,
tmp_desired_ranks,
drop_ranks,
)
else:
raise ValueError("Invalid feature ids are provided.")
def find_features_by_pattern(
self, pattern_str: str, case_sensitive: bool = False, regex: bool = False
) -> np.ndarray:
"""Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices
"""
return self.__internal_taxonomy[
self.__internal_taxonomy.loc[:, "lineage"].str.contains(
pattern_str, case=case_sensitive, regex=regex
)
].index.values
def drop_features_without_taxa(
self, **kwargs: Any
) -> Optional[AnyGenericIdentifier]:
"""Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs)
def drop_features_without_ranks(
self, ranks: Sequence[str], any: bool = False, **kwargs: Any
) -> Optional[AnyGenericIdentifier]: # Done
"""Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility
"""
target_ranks = np.asarray(ranks)
if self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(
target_ranks
):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (
no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1)
)
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError("Invalid ranks are provided.")
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
"""Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility
"""
ret = {}
groupby = self.__internal_taxonomy.groupby("lineage")
if any([len(group) > 1 for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for lineage, feature_ids in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs)
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
"""Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility
"""
ret = {}
if not isinstance(level, str):
raise TypeError("`rank` must have str type.")
if level in self.__avail_ranks:
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(
self.__internal_taxonomy, False, target_ranks, False
)
groups = tmp_lineages.groupby(tmp_lineages)
if len(groups.groups) > 1:
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for lineage, feature_ids in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(
pd.Series(data=tmp_feature_lineage, index=group_indices)
)
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError("Invalid rank are provided.")
return self._merge_features_by_map(ret, True, **kwargs)
def find_features_without_taxa(self) -> np.ndarray:
"""Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices.
"""
return self.__internal_taxonomy.loc[
self.__internal_taxonomy.loc[:, VALID_RANKS].agg(
lambda rank: len("".join(map(lambda x: (str(x or "")), rank))), axis=1
)
< 1
].index.values
def get_subset(
self, rids: Optional[AnyGenericIdentifier] = None, *args, **kwargs: Any
) -> "RepTaxonomy":
"""Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy`
"""
if rids is None:
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if not self.xrid.isin(target_rids).sum() == len(target_rids):
raise ValueError("Invalid feature ids are provided.")
return type(self)(
taxonomy=self.__internal_taxonomy.loc[target_rids, "lineage"],
metadata=self.metadata,
name=self.name,
)
def _export(
self, taxlike: str = "lineage", ascending: bool = True, **kwargs: Any
) -> Tuple[pd.Series, dict]:
"""Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility
"""
if taxlike == "lineage":
return (
self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending),
kwargs,
)
else:
raise NotImplemented
def export(
self,
output_fp: str,
*args,
_add_ext: bool = False,
sep: str = ",",
**kwargs: Any
) -> None:
"""Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility
"""
tmp_export, rkwarg = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv("{}.csv".format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep)
def copy(self) -> "RepTaxonomy":
"""Copy of the instance."""
return type(self)(
taxonomy=self.__internal_taxonomy.loc[:, "lineage"],
metadata=self.metadata,
name=self.name,
)
def __fix_taxon_names(self) -> None:
"""Fix invalid taxon names."""
def taxon_fixer(taxon):
if taxon is not None and pd.notna(taxon):
tmp_taxon_trimmed = taxon.lower().strip()
if len(tmp_taxon_trimmed) > 0:
if tmp_taxon_trimmed[0] == "[":
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if tmp_taxon_trimmed[-1] == "]":
tmp_taxon_trimmed = tmp_taxon_trimmed[:-1]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[
:, VALID_RANKS
].applymap(taxon_fixer)
def __reconstruct_internal_lineages(self) -> None:
"""Reconstruct the internal lineages."""
self.__internal_taxonomy.loc[:, "lineage"] = generate_lineages_from_taxa(
self.__internal_taxonomy, True, self.__avail_ranks, False
)
def __init_internal_taxonomy(
self,
taxonomy_data: Union[pd.Series, pd.DataFrame],
taxonomy_notation: Optional[str] = "greengenes",
order_ranks: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility
"""
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data, taxonomy_notation, order_ranks
)
elif isinstance(taxonomy_data, pd.DataFrame):
if taxonomy_data.shape[1] == 1:
taxonomy_data_series = pd.Series(
data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index
)
new_taxonomy = self.__init_taxonomy_from_lineages(
taxonomy_data_series, taxonomy_notation, order_ranks
)
else:
new_taxonomy = self.__init_taxonomy_from_frame(
taxonomy_data, taxonomy_notation, order_ranks
)
else:
raise RuntimeError(
"`taxonomy_data` must be either pd.Series or pd.Dataframe"
)
if new_taxonomy is None:
raise ValueError("Provided taxonomy is invalid.")
# Assign newly constructed taxonomy to the self.__internal_taxonomy
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names() # Fix incorrect taxa
tmp_avail_ranks = [rank for rank in VALID_RANKS if rank in new_taxonomy.columns]
self.__avail_ranks = [
rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()
]
# Reconstruct internal lineages for default greengenes notation
self.__reconstruct_internal_lineages()
self._init_state = True
def __init_taxonomy_from_lineages(
self,
taxonomy_series: pd.Series,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done
"""Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
"""
# Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation
if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:
notation = taxonomy_notation
else:
# Get first lineage _sample for notation testing assuming the rest have the the same notations
sample_taxon = taxonomy_series.iloc[0]
# Identify notation of the lineage string
notation = indentify_taxon_notation(sample_taxon)
if order_ranks is not None:
if all([rank in VALID_RANKS for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if notation == "greengenes":
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {
elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS
}
for rank in VALID_RANKS:
if rank not in tmp_taxa_dict.keys():
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append([None] + tmp_taxa_ordered)
taxonomy = pd.DataFrame(
index=ordered_indices_list,
data=ordered_taxa_list,
columns=["lineage"] + VALID_RANKS,
)
return taxonomy
elif notation == "qiime":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=lambda x: x[0])
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank, taxon in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif notation == "silva":
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(";")
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for rank_i, taxon in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [
rank for rank in target_order_ranks if rank in VALID_RANKS
]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [
rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]
][::-1]
for rank in VALID_RANKS:
if rank not in tmp_taxonomy_df.columns:
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError
def __init_taxonomy_from_frame(
self,
taxonomy_dataframe: pd.DataFrame,
taxonomy_notation: Optional[str],
order_ranks: Optional[Sequence[str]],
) -> pd.DataFrame: # Done # For now only pass to _init_taxonomy_from_series
"""Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame`
"""
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if valid_ranks is not None:
if len(valid_ranks) > 0:
return pd.concat(
[
taxonomy_dataframe,
pd.DataFrame(
data="",
index=taxonomy_dataframe.index,
columns=[
rank for rank in VALID_RANKS if rank not in valid_ranks
],
),
],
axis=1,
)
else:
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join(taxa.values.tolist()), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply(
lambda taxa: ";".join([(t if isinstance(t,str) else '') for t in taxa.values]), axis=1
)
return self.__init_taxonomy_from_lineages(
taxonomy_series, taxonomy_notation, order_ranks
)
@property
def avail_ranks(self) -> Sequence[str]:
"""List of available taxonomic ranks."""
return self.__avail_ranks
@property
def duplicated(self) -> pd.Index:
"""List of duplicated feature indices."""
return self.__internal_taxonomy.index[
self.__internal_taxonomy["lineage"].duplicated(keep=False)
]
@property
def data(self) -> pd.DataFrame:
"""Actual data representation as pd.DataFrame."""
return self.__internal_taxonomy
@property
def xrid(self) -> pd.Index:
"""Feature indices as pd.Index."""
return self.__internal_taxonomy.index
|
[
"pmaf.internal._shared.generate_lineages_from_taxa",
"pandas.read_csv",
"pmaf.internal._shared.indentify_taxon_notation",
"pmaf.internal._shared.extract_valid_ranks",
"pmaf.internal._shared.get_rank_upto",
"biom.load_table",
"numpy.asarray",
"pandas.DataFrame",
"warnings.simplefilter",
"pandas.notna",
"os.path.splitext",
"pmaf.internal._constants.jRegexGG.findall",
"os.path.isfile",
"pmaf.internal._constants.jRegexQIIME.findall",
"pandas.Series",
"pandas.DataFrame.from_records",
"pmaf.internal._shared.cols2ranks",
"numpy.unique",
"collections.defaultdict",
"os.path.abspath"
] |
[((17, 72), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (38, 72), False, 'import warnings\n'), ((6508, 6533), 'biom.load_table', 'biom.load_table', (['filepath'], {}), '(filepath)\n', (6523, 6533), False, 'import biom\n'), ((7990, 8049), 'numpy.asarray', 'np.asarray', (['ids'], {'dtype': 'self.__internal_taxonomy.index.dtype'}), '(ids, dtype=self.__internal_taxonomy.index.dtype)\n', (8000, 8049), True, 'import numpy as np\n'), ((9250, 9265), 'numpy.asarray', 'np.asarray', (['ids'], {}), '(ids)\n', (9260, 9265), True, 'import numpy as np\n'), ((13618, 13635), 'numpy.asarray', 'np.asarray', (['ranks'], {}), '(ranks)\n', (13628, 13635), True, 'import numpy as np\n'), ((20462, 20553), 'pmaf.internal._shared.generate_lineages_from_taxa', 'generate_lineages_from_taxa', (['self.__internal_taxonomy', '(True)', 'self.__avail_ranks', '(False)'], {}), '(self.__internal_taxonomy, True, self.\n __avail_ranks, False)\n', (20489, 20553), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((28635, 28695), 'pmaf.internal._shared.extract_valid_ranks', 'extract_valid_ranks', (['taxonomy_dataframe.columns', 'VALID_RANKS'], {}), '(taxonomy_dataframe.columns, VALID_RANKS)\n', (28654, 28695), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((5017, 5048), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath, **kwargs)\n', (5028, 5048), True, 'import pandas as pd\n'), ((9959, 9974), 'numpy.asarray', 'np.asarray', (['ids'], {}), '(ids)\n', (9969, 9974), True, 'import numpy as np\n'), ((11249, 11264), 'numpy.asarray', 'np.asarray', (['ids'], {}), '(ids)\n', (11259, 11264), True, 'import numpy as np\n'), ((11477, 11595), 'pmaf.internal._shared.generate_lineages_from_taxa', 'generate_lineages_from_taxa', (['self.__internal_taxonomy.loc[target_ids]', 'missing_rank', 'tmp_desired_ranks', 'drop_ranks'], {}), '(self.__internal_taxonomy.loc[target_ids],\n missing_rank, tmp_desired_ranks, drop_ranks)\n', (11504, 11595), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((15572, 15616), 'pmaf.internal._shared.get_rank_upto', 'get_rank_upto', (['self.avail_ranks', 'level', '(True)'], {}), '(self.avail_ranks, level, True)\n', (15585, 15616), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((24044, 24082), 'pmaf.internal._shared.indentify_taxon_notation', 'indentify_taxon_notation', (['sample_taxon'], {}), '(sample_taxon)\n', (24068, 24082), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((25125, 25229), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'ordered_indices_list', 'data': 'ordered_taxa_list', 'columns': "(['lineage'] + VALID_RANKS)"}), "(index=ordered_indices_list, data=ordered_taxa_list, columns=[\n 'lineage'] + VALID_RANKS)\n", (25137, 25229), True, 'import pandas as pd\n'), ((29610, 29648), 'pmaf.internal._shared.cols2ranks', 'cols2ranks', (['taxonomy_dataframe.columns'], {}), '(taxonomy_dataframe.columns)\n', (29620, 29648), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((5399, 5421), 'os.path.abspath', 'path.abspath', (['filepath'], {}), '(filepath)\n', (5411, 5421), False, 'from os import path\n'), ((14905, 14961), 'pandas.Series', 'pd.Series', ([], {'data': 'tmp_feature_lineage', 'index': 'group_indices'}), '(data=tmp_feature_lineage, index=group_indices)\n', (14914, 14961), True, 'import pandas as pd\n'), ((15677, 15763), 'pmaf.internal._shared.generate_lineages_from_taxa', 'generate_lineages_from_taxa', (['self.__internal_taxonomy', '(False)', 'target_ranks', '(False)'], {}), '(self.__internal_taxonomy, False, target_ranks, \n False)\n', (15704, 15763), False, 'from pmaf.internal._shared import generate_lineages_from_taxa, get_rank_upto, indentify_taxon_notation, validate_ranks, extract_valid_ranks, cols2ranks\n'), ((19641, 19656), 'pandas.notna', 'pd.notna', (['taxon'], {}), '(taxon)\n', (19649, 19656), True, 'import pandas as pd\n'), ((24628, 24656), 'pmaf.internal._constants.jRegexGG.findall', 'jRegexGG.findall', (['lineage[1]'], {}), '(lineage[1])\n', (24644, 24656), False, 'from pmaf.internal._constants import AVAIL_TAXONOMY_NOTATIONS, jRegexGG, jRegexQIIME, BIOM_TAXONOMY_NAMES, VALID_RANKS\n'), ((25965, 26010), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['tmp_taxa_dict_list'], {}), '(tmp_taxa_dict_list)\n', (25990, 26010), True, 'import pandas as pd\n'), ((2403, 2424), 'os.path.isfile', 'path.isfile', (['taxonomy'], {}), '(taxonomy)\n', (2414, 2424), False, 'from os import path\n'), ((17561, 17577), 'numpy.asarray', 'np.asarray', (['rids'], {}), '(rids)\n', (17571, 17577), True, 'import numpy as np\n'), ((21661, 21728), 'pandas.Series', 'pd.Series', ([], {'data': 'taxonomy_data.iloc[:, 0]', 'index': 'taxonomy_data.index'}), '(data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index)\n', (21670, 21728), True, 'import pandas as pd\n'), ((25552, 25583), 'pmaf.internal._constants.jRegexQIIME.findall', 'jRegexQIIME.findall', (['lineage[1]'], {}), '(lineage[1])\n', (25571, 25583), False, 'from pmaf.internal._constants import AVAIL_TAXONOMY_NOTATIONS, jRegexGG, jRegexQIIME, BIOM_TAXONOMY_NAMES, VALID_RANKS\n'), ((25669, 25686), 'collections.defaultdict', 'defaultdict', (['None'], {}), '(None)\n', (25680, 25686), False, 'from collections import defaultdict\n'), ((27120, 27165), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['tmp_taxa_dict_list'], {}), '(tmp_taxa_dict_list)\n', (27145, 27165), True, 'import pandas as pd\n'), ((5144, 5175), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath, **kwargs)\n', (5155, 5175), True, 'import pandas as pd\n'), ((5251, 5282), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath, **kwargs)\n', (5262, 5282), True, 'import pandas as pd\n'), ((11813, 11834), 'numpy.unique', 'np.unique', (['target_ids'], {}), '(target_ids)\n', (11822, 11834), True, 'import numpy as np\n'), ((16316, 16372), 'pandas.Series', 'pd.Series', ([], {'data': 'tmp_feature_lineage', 'index': 'group_indices'}), '(data=tmp_feature_lineage, index=group_indices)\n', (16325, 16372), True, 'import pandas as pd\n'), ((26757, 26774), 'collections.defaultdict', 'defaultdict', (['None'], {}), '(None)\n', (26768, 26774), False, 'from collections import defaultdict\n'), ((28893, 29017), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '""""""', 'index': 'taxonomy_dataframe.index', 'columns': '[rank for rank in VALID_RANKS if rank not in valid_ranks]'}), "(data='', index=taxonomy_dataframe.index, columns=[rank for\n rank in VALID_RANKS if rank not in valid_ranks])\n", (28905, 29017), True, 'import pandas as pd\n'), ((2459, 2482), 'os.path.splitext', 'path.splitext', (['taxonomy'], {}), '(taxonomy)\n', (2472, 2482), False, 'from os import path\n')]
|
from typing import List
import numpy as np
def mask_nan(arrays: List[np.ndarray]) -> List[np.ndarray]:
"""
Drop indices from equal-sized arrays if the element at that index is NaN in
any of the input arrays.
Parameters
----------
arrays : List[np.ndarray]
list of ndarrays containing NaNs, to be masked
Returns
-------
List[np.ndarray]
masked arrays (free of NaNs)
Notes
-----
This function find the indices where one or more elements is NaN in one or
more of the input arrays, then drops those indices from all arrays.
For example:
>> a = np.array([0, 1, np.nan, 3])
>> b = np.array([np.nan, 5, np.nan, 7])
>> c = np.array([8, 9, 10, 11])
>> mask_nan([a, b, c])
[array([ 1., 3.]), array([ 5., 7.]), array([ 9, 11])]
"""
n = arrays[0].size
assert all(a.size == n for a in arrays[1:])
mask = np.array([False] * n)
for arr in arrays:
mask = np.logical_or(mask, np.isnan(arr))
return [arr[np.where(~mask)[0]] for arr in arrays]
|
[
"numpy.where",
"numpy.array",
"numpy.isnan"
] |
[((908, 929), 'numpy.array', 'np.array', (['([False] * n)'], {}), '([False] * n)\n', (916, 929), True, 'import numpy as np\n'), ((988, 1001), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (996, 1001), True, 'import numpy as np\n'), ((1019, 1034), 'numpy.where', 'np.where', (['(~mask)'], {}), '(~mask)\n', (1027, 1034), True, 'import numpy as np\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
# continuously differentiable
fn_dict_cdiff = {'2dpoly': 1, 'sigmoid': 2,
'sin': 3, 'frequent_sin': 4,
'3dpoly': 7, 'linear': 8}
# continuous but not differentiable
fn_dict_cont = {'abs': 0, 'abs_sqrt': 5, 'rand_pw': 9,
'abspos': 10, 'sqrpos': 11, 'pwlinear': 15}
# discontinuous
fn_dict_disc = {'step': 6, 'band': 12, 'invband': 13,
'steplinear': 14}
# monotone
fn_dict_monotone = {'sigmoid': 2,
'step': 6, 'linear': 8,
'abspos': 10, 'sqrpos': 11, 'pwlinear': 15}
# convex
fn_dict_convex = {'abs': 0, '2dpoly': 1, 'linear': 8,
'abspos': 10, 'sqrpos': 11}
# all functions
fn_dict = {'abs': 0, '2dpoly': 1, 'sigmoid': 2,
'sin': 3, 'frequent_sin': 4, 'abs_sqrt': 5,
'step': 6, '3dpoly': 7, 'linear': 8, 'rand_pw': 9,
'abspos': 10, 'sqrpos': 11, 'band': 12, 'invband': 13,
'steplinear': 14, 'pwlinear': 15}
def generate_random_pw_linear(lb=-2, ub=2, n_pieces=5):
splits = np.random.choice(np.arange(lb, ub, 0.1),
n_pieces - 1, replace=False)
splits.sort()
slopes = np.random.uniform(-4, 4, size=n_pieces)
start = []
start.append(np.random.uniform(-1, 1))
for t in range(n_pieces - 1):
start.append(start[t] + slopes[t] * (splits[t] -
(lb if t == 0 else splits[t - 1])))
return lambda x: [start[ind] + slopes[ind] * (x - (lb if ind == 0 else splits[ind - 1])) for ind in [np.searchsorted(splits, x)]][0]
def get_tau_fn(func):
def first(x):
return x[:, [0]] if len(x.shape) == 2 else x
# func describes the relation between response and treatment
if func == fn_dict['abs']:
def tau_fn(x): return np.abs(first(x))
elif func == fn_dict['2dpoly']:
def tau_fn(x): return -1.5 * first(x) + .9 * (first(x)**2)
elif func == fn_dict['sigmoid']:
def tau_fn(x): return 2 / (1 + np.exp(-2 * first(x)))
elif func == fn_dict['sin']:
def tau_fn(x): return np.sin(first(x))
elif func == fn_dict['frequent_sin']:
def tau_fn(x): return np.sin(3 * first(x))
elif func == fn_dict['abs_sqrt']:
def tau_fn(x): return np.sqrt(np.abs(first(x)))
elif func == fn_dict['step']:
def tau_fn(x): return 1. * (first(x) < 0) + 2.5 * (first(x) >= 0)
elif func == fn_dict['3dpoly']:
def tau_fn(x): return -1.5 * first(x) + .9 * \
(first(x)**2) + first(x)**3
elif func == fn_dict['linear']:
def tau_fn(x): return first(x)
elif func == fn_dict['rand_pw']:
pw_linear = generate_random_pw_linear()
def tau_fn(x):
return np.array([pw_linear(x_i) for x_i in first(x).flatten()]).reshape(-1, 1)
elif func == fn_dict['abspos']:
def tau_fn(x): return np.abs(first(x)) * (first(x) >= 0)
elif func == fn_dict['sqrpos']:
def tau_fn(x): return (first(x)**2) * (first(x) >= 0)
elif func == fn_dict['band']:
def tau_fn(x): return 1.0 * (first(x) >= -.75) * (first(x) <= .75)
elif func == fn_dict['invband']:
def tau_fn(x): return 1. - 1. * (first(x) >= -.75) * (first(x) <= .75)
elif func == fn_dict['steplinear']:
def tau_fn(x): return 2. * (first(x) >= 0) - first(x)
elif func == fn_dict['pwlinear']:
def tau_fn(x):
q = first(x)
return (q + 1) * (q <= -1) + (q - 1) * (q >= 1)
else:
raise NotImplementedError()
return tau_fn
def standardize(z, p, y, fn):
ym = y.mean()
ystd = y.std()
y = (y - ym) / ystd
def newfn(x): return (fn(x) - ym) / ystd
return z, p, y, newfn
def get_data(n_samples, n_instruments, iv_strength, tau_fn, dgp_num):
# Construct dataset
# z:- instruments (features included here, can be high-dimensional)
# p :- treatments (features included here as well, can be high-dimensional)
# y :- response (is a scalar always)
confounder = np.random.normal(0, 1, size=(n_samples, 1))
z = np.random.normal(0, 1, size=(n_samples, n_instruments))
fn = tau_fn
if dgp_num == 1:
# DGP 1 in the paper
p = 2 * z[:, [0]] * (z[:, [0]] > 0) * iv_strength \
+ 2 * z[:, [1]] * (z[:, [1]] < 0) * iv_strength \
+ 2 * confounder * (1 - iv_strength) + \
np.random.normal(0, .1, size=(n_samples, 1))
y = fn(p) + 2 * confounder + \
np.random.normal(0, .1, size=(n_samples, 1))
elif dgp_num == 2:
# DGP 2 in the paper
p = 2 * z[:, [0]] * iv_strength \
+ 2 * confounder * (1 - iv_strength) + \
np.random.normal(0, .1, size=(n_samples, 1))
y = fn(p) + 2 * confounder + \
np.random.normal(0, .1, size=(n_samples, 1))
elif dgp_num == 3:
# DeepIV's DGP - has feature variables as well
# z is 3-dimensional: composed of (1) 1D z, (2) t - time unif~(0,10), and (3) s - customer type {1,...,7}
# y is related to p and z in a complex non-linear, non separable manner
# p is related to z again in a non-separable manner, rho is endogeneity parameter
rho = 0.8
psd = 3.7
pmu = 17.779
ysd = 158.
ymu = -292.1
z_1 = np.random.normal(0, 1, size=(n_samples, 1))
v = np.random.normal(0, 1, size=(n_samples, 1))
t = np.random.uniform(0, 10, size=(n_samples, 1))
s = np.random.randint(1, 8, size=(n_samples, 1))
e = rho * v + \
np.random.normal(0, np.sqrt(1 - rho**2), size=(n_samples, 1))
def psi(t): return 2 * (np.power(t - 5, 4) / 600 +
np.exp(-4 * np.power(t - 5, 2)) + t / 10 - 2)
p = 25 + (z_1 + 3) * psi(t) + v
p = (p - pmu) / psd
g = (10 + p) * s * psi(t) - 2 * p + e
y = (g - ymu) / ysd
z = np.hstack((z_1, s, t))
p = np.hstack((p, s, t))
def fn(p): return ((10 + p[:, 0]) * p[:, 1]
* psi(p[:, 2]) - 2 * p[:, 0] - ymu) / ysd
elif dgp_num == 4:
# Many weak Instruments DGP - n_instruments can be very large
z = np.random.normal(0.5, 1, size=(n_samples, n_instruments))
p = np.amin(z, axis=1).reshape(-1, 1) * iv_strength + confounder * \
(1 - iv_strength) + np.random.normal(0, 0.1, size=(n_samples, 1))
y = fn(p) + 2 * confounder + \
np.random.normal(0, 0.1, size=(n_samples, 1))
else:
# Here we have equal number of treatments and instruments and each
# instrument affects a separate treatment. Only the first treatment
# matters for the outcome.
z = np.random.normal(0, 2, size=(n_samples, n_instruments))
U = np.random.normal(0, 2, size=(n_samples, 1))
delta = np.random.normal(0, .1, size=(n_samples, 1))
zeta = np.random.normal(0, .1, size=(n_samples, 1))
p = iv_strength * z + (1 - iv_strength) * U + delta
y = fn(p) + U + zeta
return standardize(z, p, y, fn)
|
[
"numpy.random.normal",
"numpy.sqrt",
"numpy.amin",
"numpy.hstack",
"numpy.searchsorted",
"numpy.power",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.arange"
] |
[((1272, 1311), 'numpy.random.uniform', 'np.random.uniform', (['(-4)', '(4)'], {'size': 'n_pieces'}), '(-4, 4, size=n_pieces)\n', (1289, 1311), True, 'import numpy as np\n'), ((4108, 4151), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n_samples, 1)'}), '(0, 1, size=(n_samples, 1))\n', (4124, 4151), True, 'import numpy as np\n'), ((4160, 4215), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n_samples, n_instruments)'}), '(0, 1, size=(n_samples, n_instruments))\n', (4176, 4215), True, 'import numpy as np\n'), ((1158, 1180), 'numpy.arange', 'np.arange', (['lb', 'ub', '(0.1)'], {}), '(lb, ub, 0.1)\n', (1167, 1180), True, 'import numpy as np\n'), ((1344, 1368), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1361, 1368), True, 'import numpy as np\n'), ((4470, 4515), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (4486, 4515), True, 'import numpy as np\n'), ((4566, 4611), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (4582, 4611), True, 'import numpy as np\n'), ((4770, 4815), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (4786, 4815), True, 'import numpy as np\n'), ((4866, 4911), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (4882, 4911), True, 'import numpy as np\n'), ((5384, 5427), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n_samples, 1)'}), '(0, 1, size=(n_samples, 1))\n', (5400, 5427), True, 'import numpy as np\n'), ((5440, 5483), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n_samples, 1)'}), '(0, 1, size=(n_samples, 1))\n', (5456, 5483), True, 'import numpy as np\n'), ((5496, 5541), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(n_samples, 1)'}), '(0, 10, size=(n_samples, 1))\n', (5513, 5541), True, 'import numpy as np\n'), ((5554, 5598), 'numpy.random.randint', 'np.random.randint', (['(1)', '(8)'], {'size': '(n_samples, 1)'}), '(1, 8, size=(n_samples, 1))\n', (5571, 5598), True, 'import numpy as np\n'), ((5989, 6011), 'numpy.hstack', 'np.hstack', (['(z_1, s, t)'], {}), '((z_1, s, t))\n', (5998, 6011), True, 'import numpy as np\n'), ((6024, 6044), 'numpy.hstack', 'np.hstack', (['(p, s, t)'], {}), '((p, s, t))\n', (6033, 6044), True, 'import numpy as np\n'), ((6272, 6329), 'numpy.random.normal', 'np.random.normal', (['(0.5)', '(1)'], {'size': '(n_samples, n_instruments)'}), '(0.5, 1, size=(n_samples, n_instruments))\n', (6288, 6329), True, 'import numpy as np\n'), ((6790, 6845), 'numpy.random.normal', 'np.random.normal', (['(0)', '(2)'], {'size': '(n_samples, n_instruments)'}), '(0, 2, size=(n_samples, n_instruments))\n', (6806, 6845), True, 'import numpy as np\n'), ((6858, 6901), 'numpy.random.normal', 'np.random.normal', (['(0)', '(2)'], {'size': '(n_samples, 1)'}), '(0, 2, size=(n_samples, 1))\n', (6874, 6901), True, 'import numpy as np\n'), ((6918, 6963), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (6934, 6963), True, 'import numpy as np\n'), ((6978, 7023), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (6994, 7023), True, 'import numpy as np\n'), ((1647, 1673), 'numpy.searchsorted', 'np.searchsorted', (['splits', 'x'], {}), '(splits, x)\n', (1662, 1673), True, 'import numpy as np\n'), ((5655, 5676), 'numpy.sqrt', 'np.sqrt', (['(1 - rho ** 2)'], {}), '(1 - rho ** 2)\n', (5662, 5676), True, 'import numpy as np\n'), ((6439, 6484), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (6455, 6484), True, 'import numpy as np\n'), ((6536, 6581), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(n_samples, 1)'}), '(0, 0.1, size=(n_samples, 1))\n', (6552, 6581), True, 'import numpy as np\n'), ((5730, 5748), 'numpy.power', 'np.power', (['(t - 5)', '(4)'], {}), '(t - 5, 4)\n', (5738, 5748), True, 'import numpy as np\n'), ((6342, 6360), 'numpy.amin', 'np.amin', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (6349, 6360), True, 'import numpy as np\n'), ((5801, 5819), 'numpy.power', 'np.power', (['(t - 5)', '(2)'], {}), '(t - 5, 2)\n', (5809, 5819), True, 'import numpy as np\n')]
|
import argparse
import os
import pathlib
import cv2
import pickle
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from numpy import genfromtxt
def parse_command_line_options(print_options=False):
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int, default=-1)
parser.add_argument("-d", type=pathlib.Path)
parser.add_argument("-s", type=int, choices=[0], default=0)
parser.add_argument("-e", type=int, default=0)
parser.add_argument("-a", type=str, default="ars")
parser.add_argument("-i", type=int, default=100)
parser.add_argument("-g", action="store_true")
parser.add_argument("-r", action="store_true")
args = parser.parse_args()
flags = {
"itno": args.n,
"folder": str(args.d),
"spec_num": args.s,
"env_num": args.e,
"alg": args.a,
"num_iter": args.i,
"gpu_flag": args.g,
"render": args.r
}
if print_options:
print('**** Command Line Options ****')
for key in flags:
print('{}: {}'.format(key, flags[key]))
return flags
def open_log_file(itno, folder):
'''
Open a log file to periodically flush data.
Parameters:
itno: int
folder: str
'''
fname = _get_prefix(folder) + 'log' + _get_suffix(itno) + '.txt'
open(fname, 'w').close()
file = open(fname, 'a')
return file
def save_object(name, object, itno, folder):
'''
Save any pickle-able object.
Parameters:
name: str
object: Object
itno: int
folder: str
'''
file = open(_get_prefix(folder) + name + _get_suffix(itno) + '.pkl', 'wb')
pickle.dump(object, file)
file.close()
def load_object(name, itno, folder):
'''
Load pickled object.
Parameters:
name: str
itno: int
folder: str
'''
file = open(_get_prefix(folder) + name + _get_suffix(itno) + '.pkl', 'rb')
object = pickle.load(file)
file.close()
return object
def save_log_info(log_info, itno, folder):
np.save(_get_prefix(folder) + 'log' + _get_suffix(itno) + '.npy', log_info)
def load_log_info(itno, folder, csv=False):
if csv:
return genfromtxt(_get_prefix(folder) + 'log' + _get_suffix(itno) + '.csv', delimiter=',')
else:
return np.load(_get_prefix(folder) + 'log' + _get_suffix(itno) + '.npy')
def log_to_file(file, iter, num_transitions, reward, prob, additional_data={}):
'''
Log data to file.
Parameters:
file: file_handle
iter: int
num_transitions: int (number of simulation steps in each iter)
reward: float
prob: float (satisfaction probability)
additional_data: dict
'''
file.write('**** Iteration Number {} ****\n'.format(iter))
file.write('Environment Steps Taken: {}\n'.format(num_transitions))
file.write('Reward: {}\n'.format(reward))
file.write('Satisfaction Probability: {}\n'.format(prob))
for key in additional_data:
file.write('{}: {}\n'.format(key, additional_data[key]))
file.write('\n')
file.flush()
def get_image_dir(itno, folder):
image_dir = '{}img{}'.format(_get_prefix(folder), _get_suffix(itno))
if os.path.exists(image_dir) is False:
os.mkdir(image_dir)
return image_dir
def generate_video(env, policy, itno, folder, max_step=10000):
image_dir = get_image_dir(itno, folder)
done = False
state = env.reset()
step = 0
while not done:
img_arr = env.render(mode='rgb_array')
img = Image.fromarray(img_arr)
img.save(image_dir + '/' + str(step) + '.png')
action = policy.get_action(state)
state, _, done, _ = env.step(action)
step += 1
if step > max_step:
done = True
video_name = image_dir + '/' + 'video.avi'
images_temp = [img for img in os.listdir(image_dir)]
images = []
for i in range(len(images_temp)):
for j in images_temp:
directory = str(i) + '.png'
if directory == j:
images.append(j)
frame = cv2.imread(os.path.join(image_dir, images_temp[0]))
height, width, _ = frame.shape
video = cv2.VideoWriter(
video_name, cv2.VideoWriter_fourcc(*'XVID'), 20, (width, height))
for image in images:
video.write(cv2.imread(os.path.join(image_dir, image)))
cv2.destroyAllWindows()
video.release()
def plot_for_threshold(itno, folders, xs, threshold, color):
ys = []
for folder in folders:
val = 0
count = 0
for j in range(itno):
data = load_log_info(j, folder)
for pos in range(len(data)):
if data[pos][-1] >= threshold:
val += data[pos][0]
count += 1
break
ys.append(val / count)
plt.subplots_adjust(bottom=0.145, left=0.13)
plt.rcParams.update({'font.size': 18})
plt.plot(xs, ys, '-ok', label='z = {}'.format(threshold), color=color)
def plot_error_bar(x, data, color, label, points=False):
'''
Plot the error bar from the data.
Parameters:
samples_per_iter: int (number of sample rollouts per iteration of the algorithm)
data: (3+)-tuple of np.array (curve, lower error bar, upper error bar, ...)
color: color of the plot
label: string
'''
plt.subplots_adjust(bottom=0.126)
plt.rcParams.update({'font.size': 18})
if points:
plt.errorbar(x, data[0], data[0] - data[1], fmt='--o', color=color, label=label)
else:
plt.plot(x, data[0], color=color, label=label)
plt.fill_between(x, data[1], data[2], color=color, alpha=0.15)
def extract_plot_data(folder, column_num, low, up, csv=False):
'''
Load and parse log_info to generate error bars
Parameters:
folder: string (name of folder)
column_num: int (column number in log.npy to use)
l: int (lower limit on run number)
u: int (upper limit on run number)
Returns:
4-tuple of numpy arrays (curve, lower error bar, upper error bar, max_over_runs)
'''
log_infos = []
min_length = 1000000
for itno in range(low, up):
log_info = np.transpose(load_log_info(
itno, folder, csv=csv))[column_num]
log_info = np.append([0], log_info)
min_length = min(min_length, len(log_info))
log_infos.append(log_info)
log_infos = [log_info[:min_length] for log_info in log_infos]
data = np.array(log_infos)
curve = np.mean(data, axis=0)
std = np.std(data, axis=0)
max_curve = np.amax(data, axis=0)
return curve, (curve - std), (curve + std), max_curve
# save and render current plot
def save_plot(folder, name, show=True, scientific=True):
plt.rcParams.update({'font.size': 14})
plt.legend()
ax = plt.gca()
ax.xaxis.major.formatter._useMathText = True
if scientific:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.savefig(_get_prefix(folder) + name + '.pdf', format='pdf')
if show:
plt.show()
# get prefix for file name
def _get_prefix(folder):
if folder == '':
return ''
else:
return folder + '/'
# get suffix from itno
def _get_suffix(itno):
if itno < 0:
return ''
else:
return str(itno)
|
[
"matplotlib.pyplot.fill_between",
"numpy.array",
"cv2.destroyAllWindows",
"matplotlib.pyplot.errorbar",
"numpy.mean",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"os.mkdir",
"cv2.VideoWriter_fourcc",
"matplotlib.pyplot.gca",
"pickle.load",
"numpy.std",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"PIL.Image.fromarray",
"pickle.dump",
"os.path.join",
"numpy.append",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.ticklabel_format",
"numpy.amax"
] |
[((243, 268), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (266, 268), False, 'import argparse\n'), ((1698, 1723), 'pickle.dump', 'pickle.dump', (['object', 'file'], {}), '(object, file)\n', (1709, 1723), False, 'import pickle\n'), ((1986, 2003), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1997, 2003), False, 'import pickle\n'), ((4414, 4437), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4435, 4437), False, 'import cv2\n'), ((4888, 4932), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.145)', 'left': '(0.13)'}), '(bottom=0.145, left=0.13)\n', (4907, 4932), True, 'from matplotlib import pyplot as plt\n'), ((4937, 4975), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (4956, 4975), True, 'from matplotlib import pyplot as plt\n'), ((5413, 5446), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.126)'}), '(bottom=0.126)\n', (5432, 5446), True, 'from matplotlib import pyplot as plt\n'), ((5451, 5489), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (5470, 5489), True, 'from matplotlib import pyplot as plt\n'), ((6545, 6564), 'numpy.array', 'np.array', (['log_infos'], {}), '(log_infos)\n', (6553, 6564), True, 'import numpy as np\n'), ((6577, 6598), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6584, 6598), True, 'import numpy as np\n'), ((6609, 6629), 'numpy.std', 'np.std', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6615, 6629), True, 'import numpy as np\n'), ((6646, 6667), 'numpy.amax', 'np.amax', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6653, 6667), True, 'import numpy as np\n'), ((6820, 6858), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (6839, 6858), True, 'from matplotlib import pyplot as plt\n'), ((6863, 6875), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6873, 6875), True, 'from matplotlib import pyplot as plt\n'), ((6885, 6894), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6892, 6894), True, 'from matplotlib import pyplot as plt\n'), ((3256, 3281), 'os.path.exists', 'os.path.exists', (['image_dir'], {}), '(image_dir)\n', (3270, 3281), False, 'import os\n'), ((3300, 3319), 'os.mkdir', 'os.mkdir', (['image_dir'], {}), '(image_dir)\n', (3308, 3319), False, 'import os\n'), ((3586, 3610), 'PIL.Image.fromarray', 'Image.fromarray', (['img_arr'], {}), '(img_arr)\n', (3601, 3610), False, 'from PIL import Image\n'), ((4140, 4179), 'os.path.join', 'os.path.join', (['image_dir', 'images_temp[0]'], {}), '(image_dir, images_temp[0])\n', (4152, 4179), False, 'import os\n'), ((4265, 4296), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (4287, 4296), False, 'import cv2\n'), ((5513, 5598), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'data[0]', '(data[0] - data[1])'], {'fmt': '"""--o"""', 'color': 'color', 'label': 'label'}), "(x, data[0], data[0] - data[1], fmt='--o', color=color, label=label\n )\n", (5525, 5598), True, 'from matplotlib import pyplot as plt\n'), ((5612, 5658), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'data[0]'], {'color': 'color', 'label': 'label'}), '(x, data[0], color=color, label=label)\n', (5620, 5658), True, 'from matplotlib import pyplot as plt\n'), ((5667, 5729), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'data[1]', 'data[2]'], {'color': 'color', 'alpha': '(0.15)'}), '(x, data[1], data[2], color=color, alpha=0.15)\n', (5683, 5729), True, 'from matplotlib import pyplot as plt\n'), ((6356, 6380), 'numpy.append', 'np.append', (['[0]', 'log_info'], {}), '([0], log_info)\n', (6365, 6380), True, 'import numpy as np\n'), ((6971, 7032), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='x', scilimits=(0, 0))\n", (6991, 7032), True, 'from matplotlib import pyplot as plt\n'), ((7121, 7131), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7129, 7131), True, 'from matplotlib import pyplot as plt\n'), ((3905, 3926), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (3915, 3926), False, 'import os\n'), ((4376, 4406), 'os.path.join', 'os.path.join', (['image_dir', 'image'], {}), '(image_dir, image)\n', (4388, 4406), False, 'import os\n')]
|
import numpy as np
def histogram_r(r_array,height, width):
length = height * width
R_rray = []
for i in range(height):
for j in range(width):
R_rray.append(r_array[i][j])
R_rray.sort()
I_min = int(R_rray[int(length / 500)])
I_max = int(R_rray[-int(length / 500)])
array_Global_histogram_stretching = np.zeros((height, width))
for i in range(0, height):
for j in range(0, width):
if r_array[i][j] < I_min:
# p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = I_min
elif (r_array[i][j] > I_max):
p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 255
else:
p_out = int((r_array[i][j] - I_min) * ((255 - I_min) / (I_max - I_min)))+ I_min
array_Global_histogram_stretching[i][j] = p_out
return (array_Global_histogram_stretching)
def histogram_g(r_array,height, width):
length = height * width
R_rray = []
for i in range(height):
for j in range(width):
R_rray.append(r_array[i][j])
R_rray.sort()
I_min = int(R_rray[int(length / 500)])
I_max = int(R_rray[-int(length / 500)])
array_Global_histogram_stretching = np.zeros((height, width))
for i in range(0, height):
for j in range(0, width):
if r_array[i][j] < I_min:
p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 0
elif (r_array[i][j] > I_max):
p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 255
else:
p_out = int((r_array[i][j] - I_min) * ((255) / (I_max - I_min)) )
array_Global_histogram_stretching[i][j] = p_out
return (array_Global_histogram_stretching)
def histogram_b(r_array,height, width):
length = height * width
R_rray = []
for i in range(height):
for j in range(width):
R_rray.append(r_array[i][j])
R_rray.sort()
I_min = int(R_rray[int(length / 500)])
I_max = int(R_rray[-int(length / 500)])
array_Global_histogram_stretching = np.zeros((height, width))
for i in range(0, height):
for j in range(0, width):
if r_array[i][j] < I_min:
# p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = 0
elif (r_array[i][j] > I_max):
# p_out = r_array[i][j]
array_Global_histogram_stretching[i][j] = I_max
else:
p_out = int((r_array[i][j] - I_min) * ((I_max) / (I_max - I_min)))
array_Global_histogram_stretching[i][j] = p_out
return (array_Global_histogram_stretching)
def stretching(img):
height = len(img)
width = len(img[0])
img[:, :, 2] = histogram_r(img[:, :, 2], height, width)
img[:, :, 1] = histogram_g(img[:, :, 1], height, width)
img[:, :, 0] = histogram_b(img[:, :, 0], height, width)
return img
|
[
"numpy.zeros"
] |
[((349, 374), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (357, 374), True, 'import numpy as np\n'), ((1279, 1304), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (1287, 1304), True, 'import numpy as np\n'), ((2189, 2214), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (2197, 2214), True, 'import numpy as np\n')]
|
# Copyright 2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
def test_show_graph():
try:
from nnabla.experimental.tb_graph_writer import TBGraphWriter
except:
pytest.skip(
'Skip because tensorboardX and tensorflow is not installed.')
nn.clear_parameters()
x = nn.Variable((2, 3, 4, 4))
with nn.parameter_scope('c1'):
h = PF.convolution(x, 8, (3, 3), pad=(1, 1))
h = F.relu(PF.batch_normalization(h))
with nn.parameter_scope('f1'):
y = PF.affine(h, 10)
with TBGraphWriter(log_dir='log_out') as tb:
tb.from_variable(y, output_name="y")
def test_show_curve():
try:
from nnabla.experimental.tb_graph_writer import TBGraphWriter
except:
pytest.skip(
'Skip because tensorboardX and tensorflow is not installed.')
with TBGraphWriter(log_dir='log_out') as tb:
values = []
for i in range(360):
s = np.sin(i / 180.0 * np.pi)
tb.add_scalar("show_curve/sin", s, i)
values.append(s)
nd_values = np.array(values)
for i in range(10):
tb.add_histogram("histogram", nd_values, i)
nd_values += 0.05
|
[
"nnabla.parametric_functions.affine",
"nnabla.parametric_functions.convolution",
"nnabla.clear_parameters",
"nnabla.parametric_functions.batch_normalization",
"nnabla.parameter_scope",
"numpy.array",
"nnabla.Variable",
"numpy.sin",
"pytest.skip",
"nnabla.experimental.tb_graph_writer.TBGraphWriter"
] |
[((922, 943), 'nnabla.clear_parameters', 'nn.clear_parameters', ([], {}), '()\n', (941, 943), True, 'import nnabla as nn\n'), ((952, 977), 'nnabla.Variable', 'nn.Variable', (['(2, 3, 4, 4)'], {}), '((2, 3, 4, 4))\n', (963, 977), True, 'import nnabla as nn\n'), ((987, 1011), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""c1"""'], {}), "('c1')\n", (1005, 1011), True, 'import nnabla as nn\n'), ((1025, 1065), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', '(8)', '(3, 3)'], {'pad': '(1, 1)'}), '(x, 8, (3, 3), pad=(1, 1))\n', (1039, 1065), True, 'import nnabla.parametric_functions as PF\n'), ((1121, 1145), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""f1"""'], {}), "('f1')\n", (1139, 1145), True, 'import nnabla as nn\n'), ((1159, 1175), 'nnabla.parametric_functions.affine', 'PF.affine', (['h', '(10)'], {}), '(h, 10)\n', (1168, 1175), True, 'import nnabla.parametric_functions as PF\n'), ((1186, 1218), 'nnabla.experimental.tb_graph_writer.TBGraphWriter', 'TBGraphWriter', ([], {'log_dir': '"""log_out"""'}), "(log_dir='log_out')\n", (1199, 1218), False, 'from nnabla.experimental.tb_graph_writer import TBGraphWriter\n'), ((1492, 1524), 'nnabla.experimental.tb_graph_writer.TBGraphWriter', 'TBGraphWriter', ([], {'log_dir': '"""log_out"""'}), "(log_dir='log_out')\n", (1505, 1524), False, 'from nnabla.experimental.tb_graph_writer import TBGraphWriter\n'), ((1723, 1739), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1731, 1739), True, 'import numpy as np\n'), ((830, 903), 'pytest.skip', 'pytest.skip', (['"""Skip because tensorboardX and tensorflow is not installed."""'], {}), "('Skip because tensorboardX and tensorflow is not installed.')\n", (841, 903), False, 'import pytest\n'), ((1085, 1110), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['h'], {}), '(h)\n', (1107, 1110), True, 'import nnabla.parametric_functions as PF\n'), ((1395, 1468), 'pytest.skip', 'pytest.skip', (['"""Skip because tensorboardX and tensorflow is not installed."""'], {}), "('Skip because tensorboardX and tensorflow is not installed.')\n", (1406, 1468), False, 'import pytest\n'), ((1597, 1622), 'numpy.sin', 'np.sin', (['(i / 180.0 * np.pi)'], {}), '(i / 180.0 * np.pi)\n', (1603, 1622), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
from time import perf_counter as timer
def main():
x = np.load('data/cifar_test_x.npy')
y = np.load('data/cifar_test_y.npy').flatten()
interpreter = tf.lite.Interpreter(model_path='data/fbnet.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
pred = []
t0 = timer()
for i in range(len(x)):
interpreter.set_tensor(input_details[0]['index'], x[i:i+1])
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
pred.append(output_data.argmax())
t = timer() - t0
print('total time: {:.2f}s, average: {:.2f}ms'.format(t, t * 1000 / len(x)))
print('accuracy: {}/{}'.format(sum(y == pred), len(x)))
return output_data
if __name__ == '__main__':
main()
|
[
"tensorflow.lite.Interpreter",
"numpy.load",
"time.perf_counter"
] |
[((104, 136), 'numpy.load', 'np.load', (['"""data/cifar_test_x.npy"""'], {}), "('data/cifar_test_x.npy')\n", (111, 136), True, 'import numpy as np\n'), ((207, 258), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': '"""data/fbnet.tflite"""'}), "(model_path='data/fbnet.tflite')\n", (226, 258), True, 'import tensorflow as tf\n'), ((425, 432), 'time.perf_counter', 'timer', ([], {}), '()\n', (430, 432), True, 'from time import perf_counter as timer\n'), ((683, 690), 'time.perf_counter', 'timer', ([], {}), '()\n', (688, 690), True, 'from time import perf_counter as timer\n'), ((145, 177), 'numpy.load', 'np.load', (['"""data/cifar_test_y.npy"""'], {}), "('data/cifar_test_y.npy')\n", (152, 177), True, 'import numpy as np\n')]
|
################################################################################
# Module: schedule.py
# Description: Functions for handling conversion of EnergyPlus schedule objects
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import functools
import io
import logging as lg
from datetime import datetime, timedelta
import archetypal
import numpy as np
import pandas as pd
from archetypal import log
class Schedule(object):
"""An object designed to handle any EnergyPlys schedule object"""
def __init__(self, sch_name, idf=None, start_day_of_the_week=0,
strict=False, base_year=2018, schType=None, **kwargs):
"""
Args:
idf (IDF): IDF object
sch_name (str): The schedule name in the idf file
start_day_of_the_week (int): 0-based day of week (Monday=0)
strict (bool): if True, schedules that have the Field-Sets such
as Holidays and CustomDay will raise an error if they are absent
from the IDF file. If False, any missing qualifiers will be
ignored.
base_year (int): The base year of the schedule. Defaults to 2018
since the first day of that year is a Monday.
"""
super(Schedule, self).__init__(**kwargs)
self.strict = strict
self.idf = idf
self.schName = sch_name
self.startDayOfTheWeek = self.get_sdow(start_day_of_the_week)
self.year = base_year
self.startDate = self.start_date()
self.count = 0
self.startHOY = 1
self.endHOY = 24
self.unit = "unknown"
self.index_ = None
self.values = None
self.schType = schType
_type = kwargs.get('Type', None)
if _type is None:
self.schTypeLimitsName = self.get_schedule_type_limits_name(
sch_type=self.schType)
else:
self.schTypeLimitsName = _type
@classmethod
def constant_schedule(cls, hourly_value=1, Name='AlwaysOn', **kwargs):
idftxt = "VERSION, 8.9;" # Not an emplty string. has just the
# version number
# we can make a file handle of a string
fhandle = io.StringIO(idftxt)
# initialize the IDF object with the file handle
idf_scratch = archetypal.IDF(fhandle)
idf_scratch.add_object(ep_object='Schedule:Constant'.upper(),
**dict(Name=Name,
Schedule_Type_Limits_Name='',
Hourly_Value=hourly_value),
save=False)
sched = Schedule(sch_name=Name, idf=idf_scratch, **kwargs)
return sched
@property
def all_values(self):
"""returns the values array"""
if self.values is None:
self.values = self.get_schedule_values(sch_name=self.schName,
sch_type=self.schType)
return self.values
else:
return self.values
@property
def max(self):
return max(self.all_values)
@property
def min(self):
return min(self.all_values)
@property
def mean(self):
return np.mean(self.all_values)
@property
def series(self):
"""Returns the schedule values as a pd.Series object with a
DateTimeIndex"""
index = pd.date_range(start=self.startDate, periods=len(
self.all_values), freq='1H')
return pd.Series(self.all_values, index=index)
def get_schedule_type_limits_name(self, sch_name=None, sch_type=None):
"""Return the Schedule Type Limits name associated to a schedule
name"""
if sch_name is None:
sch_name = self.schName
if sch_type is None:
schedule_values = self.idf.get_schedule_data_by_name(sch_name,
sch_type=sch_type)
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
return 'unknown'
else:
return schedule_limit_name
def get_schedule_type_limits_data(self, sch_name=None):
"""Returns Schedule Type Limits data from schedule name"""
if sch_name is None:
sch_name = self.schName
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
# this schedule is probably a 'Schedule:Week:Daily' which does
# not have a Schedule_Type_Limits_Name field
return '', '', '', ''
else:
lower_limit, upper_limit, numeric_type, unit_type = \
self.idf.get_schedule_type_limits_data_by_name(
schedule_limit_name)
self.unit = unit_type
if self.unit == "unknown":
self.unit = numeric_type
return lower_limit, upper_limit, numeric_type, unit_type
def get_schedule_type(self, sch_name=None):
"""Return the schedule type"""
if sch_name is None:
sch_name = self.schName
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
sch_type = schedule_values.fieldvalues[0]
return sch_type
def start_date(self):
"""The start date of the schedule. Satisfies `startDayOfTheWeek`"""
import calendar
c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)
start_date = c.monthdatescalendar(self.year, 1)[0][0]
return datetime(start_date.year, start_date.month, start_date.day)
def plot(self, slice=None, **kwargs):
hourlyvalues = self.all_values
index = pd.date_range(self.startDate, periods=len(
hourlyvalues),
freq='1H')
series = pd.Series(hourlyvalues, index=index, dtype=float)
if slice is None:
slice = pd.IndexSlice[:]
elif len(slice) > 1:
slice = pd.IndexSlice[slice[0]:slice[1]]
ax = series.loc[slice].plot(**kwargs, label=self.schName)
return ax
def get_interval_day_ep_schedule_values(self, sch_name=None):
"""'Schedule:Day:Interval"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('Schedule:Day:Interval'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
number_of_day_sch = int((len(values.fieldvalues) - 3) / 2)
hourly_values = np.arange(24)
start_hour = 0
for i in range(number_of_day_sch):
value = float(values['Value_Until_Time_{}'.format(i + 1)])
until_time = [int(s.strip()) for s in
values['Time_{}'.format(i + 1)].split(":") if
s.strip().isdigit()]
end_hour = int(until_time[0] + until_time[1] / 60)
for hour in range(start_hour, end_hour):
hourly_values[hour] = value
start_hour = end_hour
if numeric_type.strip().lower() == "discrete":
hourly_values = hourly_values.astype(int)
return hourly_values
def get_hourly_day_ep_schedule_values(self, sch_name=None):
"""'Schedule:Day:Hourly'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('Schedule:Day:Hourly'.upper(), sch_name)
fieldvalues_ = np.array(values.fieldvalues[3:])
return fieldvalues_
def get_compact_weekly_ep_schedule_values(self, sch_name=None,
start_date=None, index=None):
"""'schedule:week:compact'"""
if start_date is None:
start_date = self.startDate
if index is None:
idx = pd.date_range(start=start_date, periods=168, freq='1H')
slicer_ = pd.Series([False] * (len(idx)), index=idx)
else:
slicer_ = pd.Series([False] * (len(index)), index=index)
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:week:compact'.upper(), sch_name)
weekly_schedules = pd.Series([0] * len(slicer_), index=slicer_.index)
# update last day of schedule
if self.count == 0:
self.schType = values.key
self.endHOY = 168
num_of_daily_schedules = int(len(values.fieldvalues[2:]) / 2)
for i in range(num_of_daily_schedules):
day_type = values['DayType_List_{}'.format(i + 1)].lower()
how = self.field_set(day_type, slicer_)
if not weekly_schedules.loc[how].empty:
# Loop through days and replace with day:schedule values
days = []
for name, day in weekly_schedules.loc[how].groupby(pd.Grouper(
freq='D')):
if not day.empty:
ref = values.get_referenced_object(
"ScheduleDay_Name_{}".format(i + 1))
day.loc[:] = self.get_schedule_values(
sch_name=ref.Name, sch_type=ref.key)
days.append(day)
new = pd.concat(days)
slicer_.update(
pd.Series([True] * len(new.index), index=new.index))
slicer_ = slicer_.apply(lambda x: x == True)
weekly_schedules.update(new)
else:
return weekly_schedules.values
return weekly_schedules.values
def get_daily_weekly_ep_schedule_values(self, sch_name=None):
"""'schedule:week:daily'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:week:daily'.upper(), sch_name)
# 7 list for 7 days of the week
hourly_values = []
for day in ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']:
ref = values.get_referenced_object(
'{}_ScheduleDay_Name'.format(day))
h = self.get_schedule_values(sch_name=ref.Name, sch_type=ref.key)
hourly_values.append(h)
hourly_values = np.array(hourly_values)
# shift days earlier by self.startDayOfTheWeek
hourly_values = np.roll(hourly_values, -self.startDayOfTheWeek, axis=0)
return hourly_values.ravel()
def get_list_day_ep_schedule_values(self, sch_name=None):
"""'schedule:day:list'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:day:list'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
import pandas as pd
freq = int(values['Minutes_per_Item']) # Frequency of the values
num_values = values.fieldvalues[5:] # List of values
method = values['Interpolate_to_Timestep'] # How to resample
# fill a list of available values and pad with zeros (this is safer
# but should not occur)
all_values = np.arange(int(24 * 60 / freq))
for i in all_values:
try:
all_values[i] = num_values[i]
except:
all_values[i] = 0
# create a fake index to help us with the resampling
index = pd.date_range(start=self.startDate,
periods=(24 * 60) / freq,
freq='{}T'.format(freq))
series = pd.Series(all_values, index=index)
# resample series to hourly values and apply resampler function
series = series.resample('1H').apply(_how(method))
return series.values
def get_constant_ep_schedule_values(self, sch_name=None):
"""'schedule:constant'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:constant'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
hourly_values = np.arange(8760)
value = float(values['Hourly_Value'])
for hour in hourly_values:
hourly_values[hour] = value
if numeric_type.strip().lower() == 'discrete':
hourly_values = hourly_values.astype(int)
return hourly_values
def get_file_ep_schedule_values(self, sch_name=None):
"""'schedule:file'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:file'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
filename = values['File_Name']
column = values['Column_Number']
rows = values['Rows_to_Skip_at_Top']
hours = values['Number_of_Hours_of_Data']
sep = values['Column_Separator']
interp = values['Interpolate_to_Timestep']
import pandas as pd
import os
idfdir = os.path.dirname(self.idf.idfname)
file = os.path.join(idfdir, filename)
delimeter = _separator(sep)
skip_rows = int(rows) - 1 # We want to keep the column
col = [int(column) - 1] # zero-based
values = pd.read_csv(file, delimiter=delimeter, skiprows=skip_rows,
usecols=col)
return values.iloc[:, 0].values
def get_compact_ep_schedule_values(self, sch_name=None):
"""'schedule:compact'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:compact'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
field_sets = ['through', 'for', 'interpolate', 'until', 'value']
fields = values.fieldvalues[3:]
index = pd.date_range(start=self.startDate, periods=8760, freq='H')
zeros = np.zeros(len(index))
slicer_ = pd.Series([False] * len(index), index=index)
series = pd.Series(zeros, index=index)
from_day = self.startDate
ep_from_day = datetime(self.year, 1, 1)
from_time = '00:00'
how_interpolate = None
for field in fields:
if any([spe in field.lower() for spe in field_sets]):
f_set, hour, minute, value = self.field_interpreter(field)
if f_set.lower() == 'through':
# main condition. All sub-conditions must obey a
# `Through` condition
# First, initialize the slice (all False for now)
through_conditions = self.invalidate_condition(series)
# reset from_time
from_time = '00:00'
# Prepare ep_to_day variable
ep_to_day = self.date_field_interpretation(value) + \
timedelta(days=1)
# Calculate Timedelta in days
days = (ep_to_day - ep_from_day).days
# Add timedelta to start_date
to_day = from_day + timedelta(days=days) + timedelta(
hours=-1)
# slice the conditions with the range and apply True
through_conditions.loc[from_day:to_day] = True
from_day = to_day + timedelta(hours=1)
ep_from_day = ep_to_day
elif f_set.lower() == 'for':
# slice specific days
# reset from_time
from_time = '00:00'
for_condition = self.invalidate_condition(series)
values = value.split()
if len(values) > 1:
# if multiple `For`. eg.: For: Weekends Holidays,
# Combine both conditions
for value in values:
if value.lower() == 'allotherdays':
# Apply condition to slice
how = self.field_set(value, slicer_)
# Reset though condition
through_conditions = how
for_condition = how
else:
how = self.field_set(value, slicer_)
for_condition.loc[how] = True
elif value.lower() == 'allotherdays':
# Apply condition to slice
how = self.field_set(value, slicer_)
# Reset though condition
through_conditions = how
for_condition = how
else:
# Apply condition to slice
how = self.field_set(value)
for_condition.loc[how] = True
# Combine the for_condition with all_conditions
all_conditions = through_conditions & for_condition
# update in memory slice
# self.sliced_day_.loc[all_conditions] = True
elif 'interpolate' in f_set.lower():
# we need to upsample to series to 8760 * 60 values
new_idx = pd.date_range(start=self.startDate,
periods=525600, closed='left',
freq='T')
series = series.resample('T').pad()
series = series.reindex(new_idx)
series.fillna(method='pad', inplace=True)
through_conditions = through_conditions.resample('T').pad()
through_conditions = through_conditions.reindex(new_idx)
through_conditions.fillna(method='pad', inplace=True)
for_condition = for_condition.resample('T').pad()
for_condition = for_condition.reindex(new_idx)
for_condition.fillna(method='pad', inplace=True)
how_interpolate = value.lower()
elif f_set.lower() == 'until':
until_condition = self.invalidate_condition(series)
if series.index.freq.name == 'T':
# until_time = str(int(hour) - 1) + ':' + minute
until_time = timedelta(hours=int(hour),
minutes=int(minute)) - timedelta(
minutes=1)
else:
until_time = str(int(hour) - 1) + ':' + minute
until_condition.loc[until_condition.between_time(from_time,
str(
until_time)).index] = True
all_conditions = for_condition & through_conditions & \
until_condition
from_time = str(int(hour)) + ':' + minute
elif f_set.lower() == 'value':
# If the therm `Value: ` field is used, we will catch it
# here.
# update in memory slice
slicer_.loc[all_conditions] = True
series[all_conditions] = value
else:
# Do something here before looping to the next Field
pass
else:
# If the term `Value: ` is not used; the variable is simply
# passed in the Field
value = float(field)
series[all_conditions] = value
# update in memory slice
slicer_.loc[all_conditions] = True
if how_interpolate:
return series.resample('H').mean().values
else:
return series.values
def field_interpreter(self, field):
"""dealing with a Field-Set (Through, For, Interpolate,
# Until, Value) and return the parsed string"""
if 'through' in field.lower():
# deal with through
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
hour = None
minute = None
value = statement.strip()
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'for' in field.lower():
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
value = statement.strip()
hour = None
minute = None
else:
# parse without a colon
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'interpolate' in field.lower():
msg = 'The schedule "{sch}" contains sub-hourly values (' \
'Field-Set="{field}"). The average over the hour is ' \
'taken'.format(sch=self.schName, field=field)
log(msg, lg.WARNING)
f_set, value = field.split(':')
hour = None
minute = None
elif 'until' in field.lower():
if ':' in field.lower():
# parse colon
try:
f_set, hour, minute = field.split(':')
hour = hour.strip() # remove trailing spaces
minute = minute.strip() # remove trailing spaces
value = None
except:
f_set = 'until'
hour, minute = field.split(':')
hour = hour[-2:].strip()
minute = minute.strip()
value = None
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'value' in field.lower():
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
value = statement.strip()
hour = None
minute = None
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
else:
# deal with the data value
f_set = field
hour = None
minute = None
value = field[len(field) + 1:].strip()
return f_set, hour, minute, value
@staticmethod
def invalidate_condition(series):
index = series.index
periods = len(series)
return pd.Series([False] * periods, index=index)
def get_yearly_ep_schedule_values(self, sch_name=None):
"""'schedule:year'"""
# first week
start_date = self.startDate
idx = pd.date_range(start=start_date, periods=8760, freq='1H')
hourly_values = pd.Series([0] * 8760, index=idx)
# update last day of schedule
self.endHOY = 8760
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:year'.upper(), sch_name)
# generate weekly schedules
num_of_weekly_schedules = int(len(values.fieldvalues[3:]) / 5)
for i in range(num_of_weekly_schedules):
ref = values.get_referenced_object(
'ScheduleWeek_Name_{}'.format(i + 1))
start_month = values['Start_Month_{}'.format(i + 1)]
end_month = values['End_Month_{}'.format(i + 1)]
start_day = values['Start_Day_{}'.format(i + 1)]
end_day = values['End_Day_{}'.format(i + 1)]
start = datetime.strptime(
'{}/{}/{}'.format(self.year, start_month, start_day),
'%Y/%m/%d')
end = datetime.strptime(
'{}/{}/{}'.format(self.year, end_month, end_day),
'%Y/%m/%d')
days = (end - start).days + 1
end_date = start_date + timedelta(days=days) + timedelta(hours=23)
how = pd.IndexSlice[start_date:end_date]
weeks = []
for name, week in hourly_values.loc[how].groupby(
pd.Grouper(freq='168H')):
if not week.empty:
try:
week.loc[:] = self.get_schedule_values(
sch_name=ref.Name, start_date=week.index[0],
index=week.index, sch_type=ref.key)
except ValueError:
week.loc[:] = self.get_schedule_values(
ref.Name, week.index[0])[0:len(week)]
finally:
weeks.append(week)
new = pd.concat(weeks)
hourly_values.update(new)
start_date += timedelta(days=days)
return hourly_values.values
def get_schedule_values(self, sch_name=None, start_date=None, index=None,
sch_type=None):
"""Main function that returns the schedule values
Args:
sch_type:
index:
start_date:
"""
if sch_name is None:
sch_name = self.schName
if sch_type is None:
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
self.schType = schedule_values.key.upper()
sch_type = self.schType
if self.count == 0:
# This is the first time, get the schedule type and the type limits.
self.schTypeLimitsName = self.get_schedule_type_limits_name()
self.count += 1
if sch_type.upper() == "schedule:year".upper():
hourly_values = self.get_yearly_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:day:interval".upper():
hourly_values = self.get_interval_day_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:day:hourly".upper():
hourly_values = self.get_hourly_day_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:day:list".upper():
hourly_values = self.get_list_day_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:week:compact".upper():
hourly_values = self.get_compact_weekly_ep_schedule_values(
sch_name, start_date, index)
elif sch_type.upper() == "schedule:week:daily".upper():
hourly_values = self.get_daily_weekly_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:constant".upper():
hourly_values = self.get_constant_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:compact".upper():
hourly_values = self.get_compact_ep_schedule_values(
sch_name)
elif sch_type.upper() == "schedule:file".upper():
hourly_values = self.get_file_ep_schedule_values(
sch_name)
else:
log('Archetypal does not support "{}" currently'.format(
self.schType), lg.WARNING)
hourly_values = []
return hourly_values
def is_schedule(self, sch_name):
"""Returns True if idfobject is one of 'schedule_types'"""
if sch_name.upper() in self.idf.schedules_dict:
return True
else:
return False
def to_year_week_day(self):
"""convert a Schedule Class to the 'Schedule:Year',
'Schedule:Week:Daily' and 'Schedule:Day:Hourly' representation
Returns:
'Schedule:Year', list of ['Schedule:Week:Daily'],
list of ['Schedule:Day:Hourly']
"""
full_year = np.array(self.all_values) # array of shape (8760,)
values = full_year.reshape(-1, 24) # shape (365, 24)
# create unique days
unique_days, nds = np.unique(values, axis=0, return_inverse=True)
ep_days = []
dict_day = {}
count_day = 0
for unique_day in unique_days:
name = 'd_' + self.schName + '_' + '%03d' % count_day
name, count_day = archetypal.check_unique_name('d', count_day,
name,
archetypal.settings.unique_schedules,
suffix=True)
dict_day[name] = unique_day
archetypal.settings.unique_schedules.append(name)
# Create idf_objects for schedule:day:hourly
ep_day = self.idf.add_object(
ep_object='Schedule:Day:Hourly'.upper(),
save=False,
**dict(Name=name,
Schedule_Type_Limits_Name=self.schType,
**{'Hour_{}'.format(i + 1): unique_day[i]
for i in range(24)})
)
ep_days.append(ep_day)
# create unique weeks from unique days
unique_weeks, nwsi, nws, count = np.unique(
full_year[:364 * 24, ...].reshape(-1, 168), return_index=True,
axis=0, return_inverse=True, return_counts=True)
# Appending unique weeks in dictionary with name and values of weeks as
# keys
# {'name_week': {'dayName':[]}}
dict_week = {}
count_week = 0
for unique_week in unique_weeks:
week_id = 'w_' + self.schName + '_' + '%03d' % count_week
week_id, count_week = archetypal.check_unique_name('w',
count_week,
week_id,
archetypal.settings.unique_schedules,
suffix=True)
archetypal.settings.unique_schedules.append(week_id)
dict_week[week_id] = {}
for i in list(range(0, 7)):
day_of_week = unique_week[..., i * 24:(i + 1) * 24]
for key in dict_day:
if (day_of_week == dict_day[key]).all():
dict_week[week_id]['day_{}'.format(i)] = key
# Create idf_objects for schedule:week:daily
list_day_of_week = ['Sunday', 'Monday', 'Tuesday',
'Wednesday', 'Thursday', 'Friday', 'Saturday']
ordered_day_n = np.array([6, 0, 1, 2, 3, 4, 5])
ordered_day_n = np.roll(ordered_day_n, self.startDayOfTheWeek)
ep_weeks = []
for week_id in dict_week:
ep_week = self.idf.add_object(
ep_object='Schedule:Week:Daily'.upper(),
save=False,
**dict(Name=week_id,
**{'{}_ScheduleDay_Name'.format(
weekday): dict_week[week_id][
'day_{}'.format(i)] for
i, weekday in
zip(ordered_day_n, list_day_of_week)
},
Holiday_ScheduleDay_Name=
dict_week[week_id]['day_6'],
SummerDesignDay_ScheduleDay_Name=
dict_week[week_id]['day_1'],
WinterDesignDay_ScheduleDay_Name=
dict_week[week_id]['day_1'],
CustomDay1_ScheduleDay_Name=
dict_week[week_id]['day_2'],
CustomDay2_ScheduleDay_Name=
dict_week[week_id]['day_5'])
)
ep_weeks.append(ep_week)
import itertools
blocks = {}
from_date = datetime(self.year, 1, 1)
bincount = [sum(1 for _ in group)
for key, group in itertools.groupby(nws + 1) if key]
week_order = {i: v for i, v in enumerate(np.array(
[key for key, group in itertools.groupby(nws + 1) if key]) - 1)}
for i, (week_n, count) in enumerate(
zip(week_order, bincount)):
week_id = list(dict_week)[week_order[i]]
to_date = from_date + timedelta(days=int(count * 7), hours=-1)
blocks[i] = {}
blocks[i]['week_id'] = week_id
blocks[i]['from_day'] = from_date.day
blocks[i]['end_day'] = to_date.day
blocks[i]['from_month'] = from_date.month
blocks[i]['end_month'] = to_date.month
from_date = to_date + timedelta(hours=1)
# If this is the last block, force end of year
if i == len(bincount) - 1:
blocks[i]['end_day'] = 31
blocks[i]['end_month'] = 12
new_dict = dict(Name=self.schName + '_',
Schedule_Type_Limits_Name=self.schTypeLimitsName)
for i in blocks:
new_dict.update({"ScheduleWeek_Name_{}".format(i + 1):
blocks[i]['week_id'],
"Start_Month_{}".format(i + 1):
blocks[i]['from_month'],
"Start_Day_{}".format(i + 1):
blocks[i]['from_day'],
"End_Month_{}".format(i + 1):
blocks[i]['end_month'],
"End_Day_{}".format(i + 1):
blocks[i]['end_day']})
ep_year = self.idf.add_object(ep_object='Schedule:Year'.upper(),
save=False, **new_dict)
return ep_year, ep_weeks, ep_days
def date_field_interpretation(self, field):
"""Date Field Interpretation
Args:
field (str): The EnergyPlus Field Contents
Returns:
(datetime): The datetime object
Info:
See EnergyPlus documentation for more details:
1.6.8.1.2 Field: Start Date (Table 1.4: Date Field Interpretation)
"""
# < number > Weekday in Month
formats = ['%m/%d', '%d %B', '%B %d', '%d %b', '%b %d']
date = None
for format_str in formats:
# Tru to parse using each defined formats
try:
date = datetime.strptime(field, format_str)
except:
pass
else:
date = datetime(self.year, date.month, date.day)
if date is None:
# if the defined formats did not work, try the fancy parse
try:
date = self.parse_fancy_string(field)
except:
msg = "the schedule '{sch}' contains a " \
"Field that is not understood: '{field}'".format(
sch=self.schName,
field=field)
raise ValueError(msg)
else:
return date
else:
return date
def parse_fancy_string(self, field):
"""Will try to parse cases such as `3rd Monday in February` or `Last
Weekday In Month`
Args:
field (str): The EnergyPlus Field Contents
Returns:
(datetime): The datetime object
"""
import re
# split the string at the term ' in '
time, month = field.lower().split(' in ')
month = datetime.strptime(month, '%B').month
# split the first part into nth and dayofweek
nth, dayofweek = time.split(' ')
if 'last' in nth:
nth = -1 # Use the last one
else:
nth = re.findall(r'\d+', nth) # use the nth one
nth = int(nth[0]) - 1 # python is zero-based
weekday = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3,
'friday': 4, 'saturday': 5, 'sunday': 6}
# parse the dayofweek eg. monday
dayofweek = weekday.get(dayofweek, 6)
# create list of possible days using Calendar
import calendar
c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)
monthcal = c.monthdatescalendar(self.year, month)
# iterate though the month and get the nth weekday
date = [day for week in monthcal for day in week if \
day.weekday() == dayofweek and \
day.month == month][nth]
return datetime(date.year, date.month, date.day)
def field_set(self, field, slicer_=None):
"""helper function to return the proper slicer depending on the
field_set value.
Available values are:
Weekdays, Weekends, Holidays, Alldays, SummerDesignDay,
WinterDesignDay, Sunday, Monday, Tuesday, Wednesday, Thursday,
Friday, Saturday, CustomDay1, CustomDay2, AllOtherDays
Args:
field (str): The EnergyPlus field set value.
slicer_ (pd.Series): The persistent slicer for this schedule
Returns:
(indexer-like): Returns the appropriate indexer for the series.
"""
if field.lower() == 'weekdays':
# return only days of weeks
return lambda x: x.index.dayofweek < 5
elif field.lower() == 'weekends':
# return only weekends
return lambda x: x.index.dayofweek >= 5
elif field.lower() == 'alldays':
log('For schedule "{}", the field-set "AllDays" may be overridden '
'by the "AllOtherDays" field-set'.format(
self.schName), lg.WARNING)
# return all days := equivalenet to .loc[:]
return pd.IndexSlice[:]
elif field.lower() == 'allotherdays':
# return unused days (including special days). Uses the global
# variable `slicer_`
import operator
if slicer_ is not None:
return _conjunction(*[self.special_day(field, slicer_),
~slicer_], logical=operator.or_)
else:
raise NotImplementedError
elif field.lower() == 'sunday':
# return only sundays
return lambda x: x.index.dayofweek == 6
elif field.lower() == 'monday':
# return only mondays
return lambda x: x.index.dayofweek == 0
elif field.lower() == 'tuesday':
# return only Tuesdays
return lambda x: x.index.dayofweek == 1
elif field.lower() == 'wednesday':
# return only Wednesdays
return lambda x: x.index.dayofweek == 2
elif field.lower() == 'thursday':
# return only Thursdays
return lambda x: x.index.dayofweek == 3
elif field.lower() == 'friday':
# return only Fridays
return lambda x: x.index.dayofweek == 4
elif field.lower() == 'saturday':
# return only Saturdays
return lambda x: x.index.dayofweek == 5
elif field.lower() == 'summerdesignday':
# return design_day(self, field)
return None
elif field.lower() == 'winterdesignday':
# return design_day(self, field)
return None
elif field.lower() == 'holiday' or field.lower() == 'holidays':
field = 'holiday'
return self.special_day(field, slicer_)
elif not self.strict:
# If not strict, ignore missing field-sets such as CustomDay1
return pd.IndexSlice[:]
else:
raise NotImplementedError(
'Archetypal does not yet support The '
'Field_set "{}"'.format(field))
def __len__(self):
"""returns the length of all values of the schedule"""
return len(self.all_values)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Schedule):
return self.all_values == other.all_values
else:
raise NotImplementedError
def __ne__(self, other):
return ~(self.__eq__(other))
def __add__(self, other):
if isinstance(other, Schedule):
return self.all_values + other.all_values
elif isinstance(other, list):
return self.all_values + other
else:
raise NotImplementedError
def __sub__(self, other):
if isinstance(other, Schedule):
return self.all_values - other.all_values
elif isinstance(other, list):
return self.all_values - other
else:
raise NotImplementedError
def __mul__(self, other):
if isinstance(other, Schedule):
return self.all_values * other.all_values
elif isinstance(other, list):
return self.all_values * other
else:
raise NotImplementedError
def get_sdow(self, start_day_of_week):
"""Returns the start day of the week"""
if start_day_of_week is None:
return self.idf.day_of_week_for_start_day
else:
return start_day_of_week
def special_day(self, field, slicer_):
"""try to get the RunPeriodControl:SpecialDays for the corresponding
Day Type"""
sp_slicer_ = slicer_.copy()
sp_slicer_.loc[:] = False
special_day_types = ['holiday', 'customday1', 'customday2']
dds = self.idf.idfobjects['RunPeriodControl:SpecialDays'.upper()]
dd = [dd for dd in dds if dd.Special_Day_Type.lower() == field
or dd.Special_Day_Type.lower() in special_day_types]
if len(dd) > 0:
slice = []
for dd in dd:
# can have more than one special day types
data = dd.Start_Date
ep_start_date = self.date_field_interpretation(data)
ep_orig = datetime(self.year, 1, 1)
days_to_speciald = (ep_start_date - ep_orig).days
duration = int(dd.Duration)
from_date = self.startDate + timedelta(days=days_to_speciald)
to_date = from_date + timedelta(days=duration) + timedelta(
hours=-1)
sp_slicer_.loc[from_date:to_date] = True
return sp_slicer_
elif not self.strict:
return sp_slicer_
else:
msg = 'Could not find a "SizingPeriod:DesignDay" object ' \
'needed for schedule "{}" with Day Type "{}"'.format(
self.schName, field.capitalize()
)
raise ValueError(msg)
def design_day(schedule, field):
# try to get the SizingPeriod:DesignDay for the corresponding Day Type
dds = schedule.idf.idfobjects['SizingPeriod:DesignDay'.upper()]
dd = [dd for dd in dds if dd.Day_Type.lower() == field]
if len(dd) > 0:
# should have found only one design day matching the Day Type
data = [dd[0].Month, dd[0].Day_of_Month]
date = '/'.join([str(item).zfill(2) for item in data])
date = schedule.date_field_interpretation(date)
return lambda x: x.index == date
else:
msg = 'Could not find a "SizingPeriod:DesignDay" object ' \
'needed for schedule "{}" with Day Type "{}"'.format(
schedule.schName, field.capitalize()
)
raise ValueError(msg)
def _conjunction(*conditions, logical=np.logical_and):
"""Applies a logical function on n conditions"""
return functools.reduce(logical, conditions)
def _separator(sep):
"""helper function to return the correct delimiter"""
if sep == 'Comma':
return ','
elif sep == 'Tab':
return '\t'
elif sep == 'Fixed':
return None
elif sep == 'Semicolon':
return ';'
else:
return ','
def _how(how):
"""Helper function to return the correct resampler"""
if how.lower() == 'average':
return 'mean'
elif how.lower() == 'linear':
return 'interpolate'
elif how.lower() == 'no':
return 'max'
else:
return 'max'
|
[
"archetypal.check_unique_name",
"pandas.read_csv",
"pandas.Grouper",
"archetypal.settings.unique_schedules.append",
"numpy.array",
"datetime.timedelta",
"archetypal.log",
"pandas.date_range",
"numpy.arange",
"datetime.datetime",
"numpy.mean",
"io.StringIO",
"functools.reduce",
"os.path.dirname",
"re.findall",
"pandas.Series",
"calendar.Calendar",
"numpy.roll",
"numpy.unique",
"itertools.groupby",
"datetime.datetime.strptime",
"os.path.join",
"archetypal.IDF",
"pandas.concat"
] |
[((44582, 44619), 'functools.reduce', 'functools.reduce', (['logical', 'conditions'], {}), '(logical, conditions)\n', (44598, 44619), False, 'import functools\n'), ((2337, 2356), 'io.StringIO', 'io.StringIO', (['idftxt'], {}), '(idftxt)\n', (2348, 2356), False, 'import io\n'), ((2436, 2459), 'archetypal.IDF', 'archetypal.IDF', (['fhandle'], {}), '(fhandle)\n', (2450, 2459), False, 'import archetypal\n'), ((3372, 3396), 'numpy.mean', 'np.mean', (['self.all_values'], {}), '(self.all_values)\n', (3379, 3396), True, 'import numpy as np\n'), ((3648, 3687), 'pandas.Series', 'pd.Series', (['self.all_values'], {'index': 'index'}), '(self.all_values, index=index)\n', (3657, 3687), True, 'import pandas as pd\n'), ((5639, 5693), 'calendar.Calendar', 'calendar.Calendar', ([], {'firstweekday': 'self.startDayOfTheWeek'}), '(firstweekday=self.startDayOfTheWeek)\n', (5656, 5693), False, 'import calendar\n'), ((5771, 5830), 'datetime.datetime', 'datetime', (['start_date.year', 'start_date.month', 'start_date.day'], {}), '(start_date.year, start_date.month, start_date.day)\n', (5779, 5830), False, 'from datetime import datetime, timedelta\n'), ((6057, 6106), 'pandas.Series', 'pd.Series', (['hourlyvalues'], {'index': 'index', 'dtype': 'float'}), '(hourlyvalues, index=index, dtype=float)\n', (6066, 6106), True, 'import pandas as pd\n'), ((6798, 6811), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (6807, 6811), True, 'import numpy as np\n'), ((7721, 7753), 'numpy.array', 'np.array', (['values.fieldvalues[3:]'], {}), '(values.fieldvalues[3:])\n', (7729, 7753), True, 'import numpy as np\n'), ((10508, 10531), 'numpy.array', 'np.array', (['hourly_values'], {}), '(hourly_values)\n', (10516, 10531), True, 'import numpy as np\n'), ((10611, 10666), 'numpy.roll', 'np.roll', (['hourly_values', '(-self.startDayOfTheWeek)'], {'axis': '(0)'}), '(hourly_values, -self.startDayOfTheWeek, axis=0)\n', (10618, 10666), True, 'import numpy as np\n'), ((11845, 11879), 'pandas.Series', 'pd.Series', (['all_values'], {'index': 'index'}), '(all_values, index=index)\n', (11854, 11879), True, 'import pandas as pd\n'), ((12424, 12439), 'numpy.arange', 'np.arange', (['(8760)'], {}), '(8760)\n', (12433, 12439), True, 'import numpy as np\n'), ((13378, 13411), 'os.path.dirname', 'os.path.dirname', (['self.idf.idfname'], {}), '(self.idf.idfname)\n', (13393, 13411), False, 'import os\n'), ((13427, 13457), 'os.path.join', 'os.path.join', (['idfdir', 'filename'], {}), '(idfdir, filename)\n', (13439, 13457), False, 'import os\n'), ((13621, 13692), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delimiter': 'delimeter', 'skiprows': 'skip_rows', 'usecols': 'col'}), '(file, delimiter=delimeter, skiprows=skip_rows, usecols=col)\n', (13632, 13692), True, 'import pandas as pd\n'), ((14249, 14308), 'pandas.date_range', 'pd.date_range', ([], {'start': 'self.startDate', 'periods': '(8760)', 'freq': '"""H"""'}), "(start=self.startDate, periods=8760, freq='H')\n", (14262, 14308), True, 'import pandas as pd\n'), ((14427, 14456), 'pandas.Series', 'pd.Series', (['zeros'], {'index': 'index'}), '(zeros, index=index)\n', (14436, 14456), True, 'import pandas as pd\n'), ((14514, 14539), 'datetime.datetime', 'datetime', (['self.year', '(1)', '(1)'], {}), '(self.year, 1, 1)\n', (14522, 14539), False, 'from datetime import datetime, timedelta\n'), ((23732, 23773), 'pandas.Series', 'pd.Series', (['([False] * periods)'], {'index': 'index'}), '([False] * periods, index=index)\n', (23741, 23773), True, 'import pandas as pd\n'), ((23937, 23993), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'periods': '(8760)', 'freq': '"""1H"""'}), "(start=start_date, periods=8760, freq='1H')\n", (23950, 23993), True, 'import pandas as pd\n'), ((24018, 24050), 'pandas.Series', 'pd.Series', (['([0] * 8760)'], {'index': 'idx'}), '([0] * 8760, index=idx)\n', (24027, 24050), True, 'import pandas as pd\n'), ((28886, 28911), 'numpy.array', 'np.array', (['self.all_values'], {}), '(self.all_values)\n', (28894, 28911), True, 'import numpy as np\n'), ((29057, 29103), 'numpy.unique', 'np.unique', (['values'], {'axis': '(0)', 'return_inverse': '(True)'}), '(values, axis=0, return_inverse=True)\n', (29066, 29103), True, 'import numpy as np\n'), ((31640, 31671), 'numpy.array', 'np.array', (['[6, 0, 1, 2, 3, 4, 5]'], {}), '([6, 0, 1, 2, 3, 4, 5])\n', (31648, 31671), True, 'import numpy as np\n'), ((31696, 31742), 'numpy.roll', 'np.roll', (['ordered_day_n', 'self.startDayOfTheWeek'], {}), '(ordered_day_n, self.startDayOfTheWeek)\n', (31703, 31742), True, 'import numpy as np\n'), ((32904, 32929), 'datetime.datetime', 'datetime', (['self.year', '(1)', '(1)'], {}), '(self.year, 1, 1)\n', (32912, 32929), False, 'from datetime import datetime, timedelta\n'), ((37196, 37250), 'calendar.Calendar', 'calendar.Calendar', ([], {'firstweekday': 'self.startDayOfTheWeek'}), '(firstweekday=self.startDayOfTheWeek)\n', (37213, 37250), False, 'import calendar\n'), ((37536, 37577), 'datetime.datetime', 'datetime', (['date.year', 'date.month', 'date.day'], {}), '(date.year, date.month, date.day)\n', (37544, 37577), False, 'from datetime import datetime, timedelta\n'), ((8080, 8135), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'periods': '(168)', 'freq': '"""1H"""'}), "(start=start_date, periods=168, freq='1H')\n", (8093, 8135), True, 'import pandas as pd\n'), ((25856, 25872), 'pandas.concat', 'pd.concat', (['weeks'], {}), '(weeks)\n', (25865, 25872), True, 'import pandas as pd\n'), ((25937, 25957), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (25946, 25957), False, 'from datetime import datetime, timedelta\n'), ((29305, 29411), 'archetypal.check_unique_name', 'archetypal.check_unique_name', (['"""d"""', 'count_day', 'name', 'archetypal.settings.unique_schedules'], {'suffix': '(True)'}), "('d', count_day, name, archetypal.settings.\n unique_schedules, suffix=True)\n", (29333, 29411), False, 'import archetypal\n'), ((29637, 29686), 'archetypal.settings.unique_schedules.append', 'archetypal.settings.unique_schedules.append', (['name'], {}), '(name)\n', (29680, 29686), False, 'import archetypal\n'), ((30693, 30803), 'archetypal.check_unique_name', 'archetypal.check_unique_name', (['"""w"""', 'count_week', 'week_id', 'archetypal.settings.unique_schedules'], {'suffix': '(True)'}), "('w', count_week, week_id, archetypal.settings.\n unique_schedules, suffix=True)\n", (30721, 30803), False, 'import archetypal\n'), ((31063, 31115), 'archetypal.settings.unique_schedules.append', 'archetypal.settings.unique_schedules.append', (['week_id'], {}), '(week_id)\n', (31106, 31115), False, 'import archetypal\n'), ((36546, 36576), 'datetime.datetime.strptime', 'datetime.strptime', (['month', '"""%B"""'], {}), "(month, '%B')\n", (36563, 36576), False, 'from datetime import datetime, timedelta\n'), ((36778, 36801), 're.findall', 're.findall', (['"""\\\\d+"""', 'nth'], {}), "('\\\\d+', nth)\n", (36788, 36801), False, 'import re\n'), ((9506, 9521), 'pandas.concat', 'pd.concat', (['days'], {}), '(days)\n', (9515, 9521), True, 'import pandas as pd\n'), ((25131, 25150), 'datetime.timedelta', 'timedelta', ([], {'hours': '(23)'}), '(hours=23)\n', (25140, 25150), False, 'from datetime import datetime, timedelta\n'), ((25310, 25333), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""168H"""'}), "(freq='168H')\n", (25320, 25333), True, 'import pandas as pd\n'), ((33010, 33036), 'itertools.groupby', 'itertools.groupby', (['(nws + 1)'], {}), '(nws + 1)\n', (33027, 33036), False, 'import itertools\n'), ((33704, 33722), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (33713, 33722), False, 'from datetime import datetime, timedelta\n'), ((35454, 35490), 'datetime.datetime.strptime', 'datetime.strptime', (['field', 'format_str'], {}), '(field, format_str)\n', (35471, 35490), False, 'from datetime import datetime, timedelta\n'), ((35573, 35614), 'datetime.datetime', 'datetime', (['self.year', 'date.month', 'date.day'], {}), '(self.year, date.month, date.day)\n', (35581, 35614), False, 'from datetime import datetime, timedelta\n'), ((42965, 42990), 'datetime.datetime', 'datetime', (['self.year', '(1)', '(1)'], {}), '(self.year, 1, 1)\n', (42973, 42990), False, 'from datetime import datetime, timedelta\n'), ((9104, 9124), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""D"""'}), "(freq='D')\n", (9114, 9124), True, 'import pandas as pd\n'), ((21922, 21942), 'archetypal.log', 'log', (['msg', 'lg.WARNING'], {}), '(msg, lg.WARNING)\n', (21925, 21942), False, 'from archetypal import log\n'), ((25108, 25128), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (25117, 25128), False, 'from datetime import datetime, timedelta\n'), ((43146, 43178), 'datetime.timedelta', 'timedelta', ([], {'days': 'days_to_speciald'}), '(days=days_to_speciald)\n', (43155, 43178), False, 'from datetime import datetime, timedelta\n'), ((43244, 43263), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-1)'}), '(hours=-1)\n', (43253, 43263), False, 'from datetime import datetime, timedelta\n'), ((15309, 15326), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (15318, 15326), False, 'from datetime import datetime, timedelta\n'), ((15549, 15568), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-1)'}), '(hours=-1)\n', (15558, 15568), False, 'from datetime import datetime, timedelta\n'), ((15776, 15794), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (15785, 15794), False, 'from datetime import datetime, timedelta\n'), ((43217, 43241), 'datetime.timedelta', 'timedelta', ([], {'days': 'duration'}), '(days=duration)\n', (43226, 43241), False, 'from datetime import datetime, timedelta\n'), ((15526, 15546), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (15535, 15546), False, 'from datetime import datetime, timedelta\n'), ((17753, 17829), 'pandas.date_range', 'pd.date_range', ([], {'start': 'self.startDate', 'periods': '(525600)', 'closed': '"""left"""', 'freq': '"""T"""'}), "(start=self.startDate, periods=525600, closed='left', freq='T')\n", (17766, 17829), True, 'import pandas as pd\n'), ((33139, 33165), 'itertools.groupby', 'itertools.groupby', (['(nws + 1)'], {}), '(nws + 1)\n', (33156, 33165), False, 'import itertools\n'), ((18958, 18978), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (18967, 18978), False, 'from datetime import datetime, timedelta\n')]
|
import higher
from leap import Leap
import numpy as np
import os
import torch
import torch.nn as nn
import gc
def train(model, source_corpus, char2idx, args, device):
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.patience,
threshold=args.threshold)
best_valid_cosine = 1
for epoch in np.arange(args.n_epochs):
valid_cosine = []
valid_ce = []
model.train()
for batch in np.arange(args.n_batch):
train_contexts, train_targets, train_vocabs, train_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
fixed=args.fixed_shot,
return_inds=True)
optimizer.zero_grad()
if args.lang_model:
pred_emb, pred_ind = model.forward(train_contexts, train_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, train_inds)
loss += -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
else:
pred_emb = model.forward(train_contexts, train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
valid_contexts, valid_targets, valid_vocabs, valid_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
use_valid=True,
fixed=args.fixed_shot,
return_inds=True)
if args.lang_model:
pred_emb, pred_ind = model.forward(valid_contexts, valid_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, valid_inds).mean()
valid_ce += [loss.cpu().numpy()]
else:
pred_emb = model.forward(valid_contexts, valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, valid_targets).mean()
valid_cosine += [loss.cpu().numpy()]
avg_valid = np.average(valid_cosine)
lr_scheduler.step(avg_valid)
if args.lang_model:
avg_ce = np.average(valid_ce)
print(f"Average cosine loss: {avg_valid}; Average cross entropy loss: {avg_ce}")
else:
print(f"Average cosine loss: {avg_valid}")
if avg_valid < best_valid_cosine:
best_valid_cosine = avg_valid
torch.save(model.state_dict(), os.path.join(args.save_dir, 'model.pt'))
if optimizer.param_groups[0]['lr'] < args.lr_early_stop:
print('LR early stop')
break
def maml_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
meta_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
gc.collect()
source_valid_cosine = []
target_valid_cosine = []
model.train()
with torch.backends.cudnn.flags(benchmark=True):
for meta_batch in np.arange(args.n_meta_batch):
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_inner_lr_init)
meta_optimizer.zero_grad()
with higher.innerloop_ctx(model, inner_optimizer, copy_initial_weights=False) as (fmodel, diffopt):
for inner_batch in np.arange(args.n_inner_batch):
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = fmodel.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
diffopt.step(loss)
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = fmodel.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
meta_optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'maml_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.maml_lr_early_stop:
print('LR early stop')
break
def leap_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
leap = Leap(model)
meta_optimizer = torch.optim.Adam(leap.parameters(), lr=args.leap_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
source_valid_cosine = []
target_valid_cosine = []
model.train()
for meta_batch in np.arange(args.n_meta_batch):
meta_optimizer.zero_grad()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = model.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.normalize()
meta_optimizer.step()
leap.to(model)
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'leap_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.leap_lr_early_stop:
print('LR early stop')
break
|
[
"higher.innerloop_ctx",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.average",
"torch.backends.cudnn.flags",
"torch.nn.functional.cosine_similarity",
"os.path.join",
"gc.collect",
"torch.nn.functional.cross_entropy",
"torch.no_grad",
"leap.Leap",
"numpy.arange"
] |
[((287, 416), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'factor': 'args.lr_decay', 'patience': 'args.patience', 'threshold': 'args.threshold'}), '(optimizer, factor=args.lr_decay,\n patience=args.patience, threshold=args.threshold)\n', (329, 416), False, 'import torch\n'), ((519, 543), 'numpy.arange', 'np.arange', (['args.n_epochs'], {}), '(args.n_epochs)\n', (528, 543), True, 'import numpy as np\n'), ((3952, 4087), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['meta_optimizer'], {'factor': 'args.lr_decay', 'patience': 'args.patience', 'threshold': 'args.threshold'}), '(meta_optimizer, factor=args.\n lr_decay, patience=args.patience, threshold=args.threshold)\n', (3994, 4087), False, 'import torch\n'), ((4187, 4216), 'numpy.arange', 'np.arange', (['args.n_meta_epochs'], {}), '(args.n_meta_epochs)\n', (4196, 4216), True, 'import numpy as np\n'), ((7560, 7571), 'leap.Leap', 'Leap', (['model'], {}), '(model)\n', (7564, 7571), False, 'from leap import Leap\n'), ((7675, 7810), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['meta_optimizer'], {'factor': 'args.lr_decay', 'patience': 'args.patience', 'threshold': 'args.threshold'}), '(meta_optimizer, factor=args.\n lr_decay, patience=args.patience, threshold=args.threshold)\n', (7717, 7810), False, 'import torch\n'), ((7910, 7939), 'numpy.arange', 'np.arange', (['args.n_meta_epochs'], {}), '(args.n_meta_epochs)\n', (7919, 7939), True, 'import numpy as np\n'), ((637, 660), 'numpy.arange', 'np.arange', (['args.n_batch'], {}), '(args.n_batch)\n', (646, 660), True, 'import numpy as np\n'), ((3135, 3159), 'numpy.average', 'np.average', (['valid_cosine'], {}), '(valid_cosine)\n', (3145, 3159), True, 'import numpy as np\n'), ((4226, 4238), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4236, 4238), False, 'import gc\n'), ((8056, 8084), 'numpy.arange', 'np.arange', (['args.n_meta_batch'], {}), '(args.n_meta_batch)\n', (8065, 8084), True, 'import numpy as np\n'), ((1829, 1844), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1842, 1844), False, 'import torch\n'), ((1871, 1894), 'numpy.arange', 'np.arange', (['args.n_batch'], {}), '(args.n_batch)\n', (1880, 1894), True, 'import numpy as np\n'), ((3247, 3267), 'numpy.average', 'np.average', (['valid_ce'], {}), '(valid_ce)\n', (3257, 3267), True, 'import numpy as np\n'), ((4341, 4383), 'torch.backends.cudnn.flags', 'torch.backends.cudnn.flags', ([], {'benchmark': '(True)'}), '(benchmark=True)\n', (4367, 4383), False, 'import torch\n'), ((4415, 4443), 'numpy.arange', 'np.arange', (['args.n_meta_batch'], {}), '(args.n_meta_batch)\n', (4424, 4443), True, 'import numpy as np\n'), ((5805, 5820), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5818, 5820), False, 'import torch\n'), ((5847, 5870), 'numpy.arange', 'np.arange', (['args.n_batch'], {}), '(args.n_batch)\n', (5856, 5870), True, 'import numpy as np\n'), ((6894, 6925), 'numpy.average', 'np.average', (['source_valid_cosine'], {}), '(source_valid_cosine)\n', (6904, 6925), True, 'import numpy as np\n'), ((6927, 6958), 'numpy.average', 'np.average', (['target_valid_cosine'], {}), '(target_valid_cosine)\n', (6937, 6958), True, 'import numpy as np\n'), ((8308, 8336), 'numpy.arange', 'np.arange', (['args.n_task_steps'], {}), '(args.n_task_steps)\n', (8317, 8336), True, 'import numpy as np\n'), ((9065, 9093), 'numpy.arange', 'np.arange', (['args.n_task_steps'], {}), '(args.n_task_steps)\n', (9074, 9093), True, 'import numpy as np\n'), ((9812, 9827), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9825, 9827), False, 'import torch\n'), ((9854, 9877), 'numpy.arange', 'np.arange', (['args.n_batch'], {}), '(args.n_batch)\n', (9863, 9877), True, 'import numpy as np\n'), ((10901, 10932), 'numpy.average', 'np.average', (['source_valid_cosine'], {}), '(source_valid_cosine)\n', (10911, 10932), True, 'import numpy as np\n'), ((10934, 10965), 'numpy.average', 'np.average', (['target_valid_cosine'], {}), '(target_valid_cosine)\n', (10944, 10965), True, 'import numpy as np\n'), ((1420, 1469), 'torch.nn.functional.cross_entropy', 'nn.functional.cross_entropy', (['pred_ind', 'train_inds'], {}), '(pred_ind, train_inds)\n', (1447, 1469), True, 'import torch.nn as nn\n'), ((3558, 3597), 'os.path.join', 'os.path.join', (['args.save_dir', '"""model.pt"""'], {}), "(args.save_dir, 'model.pt')\n", (3570, 3597), False, 'import os\n'), ((7244, 7288), 'os.path.join', 'os.path.join', (['args.save_dir', '"""maml_model.pt"""'], {}), "(args.save_dir, 'maml_model.pt')\n", (7256, 7288), False, 'import os\n'), ((11251, 11295), 'os.path.join', 'os.path.join', (['args.save_dir', '"""leap_model.pt"""'], {}), "(args.save_dir, 'leap_model.pt')\n", (11263, 11295), False, 'import os\n'), ((4609, 4681), 'higher.innerloop_ctx', 'higher.innerloop_ctx', (['model', 'inner_optimizer'], {'copy_initial_weights': '(False)'}), '(model, inner_optimizer, copy_initial_weights=False)\n', (4629, 4681), False, 'import higher\n'), ((4743, 4772), 'numpy.arange', 'np.arange', (['args.n_inner_batch'], {}), '(args.n_inner_batch)\n', (4752, 4772), True, 'import numpy as np\n'), ((1495, 1551), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'train_targets'], {}), '(pred_emb, train_targets)\n', (1526, 1551), True, 'import torch.nn as nn\n'), ((1672, 1728), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'train_targets'], {}), '(pred_emb, train_targets)\n', (1703, 1728), True, 'import torch.nn as nn\n'), ((2765, 2814), 'torch.nn.functional.cross_entropy', 'nn.functional.cross_entropy', (['pred_ind', 'valid_inds'], {}), '(pred_ind, valid_inds)\n', (2792, 2814), True, 'import torch.nn as nn\n'), ((2997, 3053), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'valid_targets'], {}), '(pred_emb, valid_targets)\n', (3028, 3053), True, 'import torch.nn as nn\n'), ((6201, 6264), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'source_valid_targets'], {}), '(pred_emb, source_valid_targets)\n', (6232, 6264), True, 'import torch.nn as nn\n'), ((6717, 6780), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'target_valid_targets'], {}), '(pred_emb, target_valid_targets)\n', (6748, 6780), True, 'import torch.nn as nn\n'), ((8699, 8762), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'source_train_targets'], {}), '(pred_emb, source_train_targets)\n', (8730, 8762), True, 'import torch.nn as nn\n'), ((9507, 9570), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'target_train_targets'], {}), '(pred_emb, target_train_targets)\n', (9538, 9570), True, 'import torch.nn as nn\n'), ((10208, 10271), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'source_valid_targets'], {}), '(pred_emb, source_valid_targets)\n', (10239, 10271), True, 'import torch.nn as nn\n'), ((10724, 10787), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'target_valid_targets'], {}), '(pred_emb, target_valid_targets)\n', (10755, 10787), True, 'import torch.nn as nn\n'), ((5624, 5687), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'target_train_targets'], {}), '(pred_emb, target_train_targets)\n', (5655, 5687), True, 'import torch.nn as nn\n'), ((5120, 5183), 'torch.nn.functional.cosine_similarity', 'nn.functional.cosine_similarity', (['pred_emb', 'source_train_targets'], {}), '(pred_emb, source_train_targets)\n', (5151, 5183), True, 'import torch.nn as nn\n')]
|
#!/usr/bin/env python
# encoding=utf-8
from inspect import getblock
import json
import os
from os import read
from numpy.core.fromnumeric import mean
import numpy as np
import paddlehub as hub
import six
import math
import random
import sys
from util import read_file
from config import Config
# 配置文件
conf = Config()
class Vocabulary(object):
def __init__(self, meta_file, max_len, allow_unk=0, unk="$UNK$", pad="$PAD$",):
self.voc2id = {}
self.id2voc = {}
self.unk = unk
self.pad = pad
self.max_len = max_len
self.allow_unk = allow_unk
with open(meta_file, encoding='utf-8') as f:
for i, line in enumerate(f):
line = convert_to_unicode(line.strip("\n"))
self.voc2id[line] = i
self.id2voc[i] = line
self.size = len(self.voc2id)
self.oov_num = self.size + 1
def fit(self, words_list):
"""
:param words_list: [[w11, w12, ...], [w21, w22, ...], ...]
:return:
"""
word_lst = []
word_lst_append = word_lst.append
for words in words_list:
if not isinstance(words, list):
print(words)
continue
for word in words:
word = convert_to_unicode(word)
word_lst_append(word)
word_counts = Counter(word_lst)
if self.max_num_word < 0:
self.max_num_word = len(word_counts)
sorted_voc = [w for w, c in word_counts.most_common(self.max_num_word)]
self.max_num_word = len(sorted_voc)
self.oov_index = self.max_num_word + 1
self.voc2id = dict(zip(sorted_voc, range(1, self.max_num_word + 1)))
return self
def _transform2id(self, word):
word = convert_to_unicode(word)
if word in self.voc2id:
return self.voc2id[word]
elif self.allow_unk:
return self.voc2id[self.unk]
else:
print(word)
raise ValueError("word:{} Not in voc2id, please check".format(word))
def _transform_seq2id(self, words, padding=0):
out_ids = []
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
for w in words:
out_ids.append(self._transform2id(w))
if padding and self.max_len:
while len(out_ids) < self.max_len:
out_ids.append(0)
return out_ids
def _transform_intent2ont_hot(self, words, padding=0):
# 将多标签意图转为 one_hot
out_ids = np.zeros(self.size, dtype=np.float32)
words = convert_to_unicode(words)
for w in words:
out_ids[self._transform2id(w)] = 1.0
return out_ids
def _transform_seq2bert_id(self, words, padding=0):
out_ids, seq_len = [], 0
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
seq_len = len(words)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in words:
out_ids.append(self._transform2id(w))
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids = [0 for _ in out_ids]
return out_ids, mask_ids, seg_ids, seq_len
@staticmethod
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _transform_2seq2bert_id(self, seq1, seq2, padding=0):
out_ids, seg_ids, seq_len = [], [1], 0
seq1 = [x for x in convert_to_unicode(seq1)]
seq2 = [x for x in convert_to_unicode(seq2)]
# 截断
self._truncate_seq_pair(seq1, seq2, self.max_len - 2)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in seq1:
out_ids.append(self._transform2id(w))
seg_ids.append(0)
out_ids.append(self._transform2id("[SEP]"))
seg_ids.append(0)
for w in seq2:
out_ids.append(self._transform2id(w))
seg_ids.append(1)
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids.append(0)
return out_ids, mask_ids, seg_ids, seq_len
def transform(self, seq_list, is_bert=0):
if is_bert:
return [self._transform_seq2bert_id(seq) for seq in seq_list]
else:
return [self._transform_seq2id(seq) for seq in seq_list]
def __len__(self):
return len(self.voc2id)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def gen_word_set(file_path, out_path='./data/words.txt'):
word_set = set()
with open(file_path, encoding='utf-8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr = [prefix, title]
query_pred = json.loads(query_pred)
for w in prefix:
word_set.add(w)
for each in query_pred:
for w in each:
word_set.add(w)
with open(word_set, 'w', encoding='utf-8') as o:
for w in word_set:
o.write(w + '\n')
pass
def convert_word2id(query, vocab_map):
ids = []
for w in query:
if w in vocab_map:
ids.append(vocab_map[w])
else:
ids.append(vocab_map[conf.unk])
while len(ids) < conf.max_seq_len:
ids.append(vocab_map[conf.pad])
return ids[:conf.max_seq_len]
def convert_seq2bow(query, vocab_map):
bow_ids = np.zeros(conf.nwords)
for w in query:
if w in vocab_map:
bow_ids[vocab_map[w]] += 1
else:
bow_ids[vocab_map[conf.unk]] += 1
return bow_ids
def get_data(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
# only 4 negative sample
for each in query_pred:
if each == title:
continue
cur_arr.append(convert_word2id(each, conf.vocab_map))
each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(convert_word2id(prefix, conf.vocab_map))
data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))
data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
pass
return data_map
def get_data_siamese_rnn(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_seq = convert_word2id(prefix, conf.vocab_map)
title_seq = convert_word2id(title, conf.vocab_map)
data_arr.append([prefix_seq, title_seq, int(label)])
return data_arr
def get_data_bow(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, prefix, label]], shape = [n, 3]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_ids = convert_seq2bow(prefix, conf.vocab_map)
title_ids = convert_seq2bow(title, conf.vocab_map)
data_arr.append([prefix_ids, title_ids, int(label)])
return data_arr
def trans_lcqmc(dataset):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
t1_ids = convert_word2id(t1, conf.vocab_map)
t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
t2_ids = convert_word2id(t2, conf.vocab_map)
t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
# t2_len = len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
# out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label, t1, t2])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc():
"""
使用LCQMC数据集,并将其转为word_id
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc(dataset.train_examples)
dev_set = trans_lcqmc(dataset.dev_examples)
test_set = trans_lcqmc(dataset.test_examples)
return train_set, dev_set, test_set
# return test_set, test_set, test_set
def trans_lcqmc_bert(dataset:list, vocab:Vocabulary, is_merge=0):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])
text_len.extend([len(t1) + len(t2)])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc_bert(vocab:Vocabulary, is_merge=0):
"""
使用LCQMC数据集,并将每个query其转为word_id,
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc_bert(dataset.train_examples, vocab, is_merge)
dev_set = trans_lcqmc_bert(dataset.dev_examples, vocab, is_merge)
test_set = trans_lcqmc_bert(dataset.test_examples, vocab, is_merge)
return train_set, dev_set, test_set
# test_set = test_set[:100]
# return test_set, test_set, test_set
def get_test(file_:str, vocab:Vocabulary):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
t1_ids = vocab._transform_seq2id(t1, padding=1)
t1_len = vocab.max_len if len(t1) > vocab.max_len else len(t1)
t2_ids = vocab._transform_seq2id(t2, padding=1)
t2_len = vocab.max_len if len(t2) > vocab.max_len else len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len])
return out_arr, test_arr
def get_test_bert(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr, _ = get_test_bert_by_arr(test_arr, vocab, is_merge)
return out_arr, test_arr
def get_test_bert_by_arr(test_arr:list, vocab:Vocabulary, is_merge=0):
# test_arr # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2])
return out_arr, test_arr
def get_test_bert_single(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_) # [q1,...]
out_arr = []
for line in test_arr:
t1 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
return out_arr, test_arr
def get_batch(dataset, batch_size=None, is_test=0):
# tf Dataset太难用,不如自己实现
# https://stackoverflow.com/questions/50539342/getting-batches-in-tensorflow
# dataset:每个元素是一个特征,[[x1, x2, x3,...], ...], 如果是测试集,可能就没有标签
if not batch_size:
batch_size = 32
if not is_test:
random.shuffle(dataset)
steps = int(math.ceil(float(len(dataset)) / batch_size))
for i in range(steps):
idx = i * batch_size
cur_set = dataset[idx: idx + batch_size]
cur_set = zip(*cur_set)
yield cur_set
if __name__ == '__main__':
# prefix, query_prediction, title, tag, label
# query_prediction 为json格式。
file_train = './data/oppo_round1_train_20180929.txt'
file_vali = './data/oppo_round1_vali_20180929.txt'
# data_train = get_data(file_train)
# data_train = get_data(file_vali)
# print(len(data_train['query']), len(data_train['doc_pos']), len(data_train['doc_neg']))
dataset = get_lcqmc()
print(dataset[1][:3])
for each in get_batch(dataset[1][:3], batch_size=2):
t1_ids, t1_len, t2_ids, t2_len, label = each
print(each)
pass
|
[
"numpy.mean",
"json.loads",
"random.shuffle",
"util.read_file",
"config.Config",
"numpy.zeros",
"numpy.core.fromnumeric.mean",
"paddlehub.dataset.LCQMC"
] |
[((308, 316), 'config.Config', 'Config', ([], {}), '()\n', (314, 316), False, 'from config import Config\n'), ((6858, 6879), 'numpy.zeros', 'np.zeros', (['conf.nwords'], {}), '(conf.nwords)\n', (6866, 6879), True, 'import numpy as np\n'), ((10812, 10831), 'paddlehub.dataset.LCQMC', 'hub.dataset.LCQMC', ([], {}), '()\n', (10829, 10831), True, 'import paddlehub as hub\n'), ((12184, 12203), 'paddlehub.dataset.LCQMC', 'hub.dataset.LCQMC', ([], {}), '()\n', (12201, 12203), True, 'import paddlehub as hub\n'), ((12593, 12615), 'util.read_file', 'read_file', (['file_', '"""\t"""'], {}), "(file_, '\\t')\n", (12602, 12615), False, 'from util import read_file\n'), ((13234, 13256), 'util.read_file', 'read_file', (['file_', '"""\t"""'], {}), "(file_, '\\t')\n", (13243, 13256), False, 'from util import read_file\n'), ((14284, 14300), 'util.read_file', 'read_file', (['file_'], {}), '(file_)\n', (14293, 14300), False, 'from util import read_file\n'), ((2577, 2614), 'numpy.zeros', 'np.zeros', (['self.size'], {'dtype': 'np.float32'}), '(self.size, dtype=np.float32)\n', (2585, 2614), True, 'import numpy as np\n'), ((10634, 10648), 'numpy.core.fromnumeric.mean', 'mean', (['text_len'], {}), '(text_len)\n', (10638, 10648), False, 'from numpy.core.fromnumeric import mean\n'), ((10665, 10717), 'numpy.mean', 'np.mean', (['[(x <= conf.max_seq_len) for x in text_len]'], {}), '([(x <= conf.max_seq_len) for x in text_len])\n', (10672, 10717), True, 'import numpy as np\n'), ((11965, 11979), 'numpy.core.fromnumeric.mean', 'mean', (['text_len'], {}), '(text_len)\n', (11969, 11979), False, 'from numpy.core.fromnumeric import mean\n'), ((11996, 12048), 'numpy.mean', 'np.mean', (['[(x <= conf.max_seq_len) for x in text_len]'], {}), '([(x <= conf.max_seq_len) for x in text_len])\n', (12003, 12048), True, 'import numpy as np\n'), ((14906, 14929), 'random.shuffle', 'random.shuffle', (['dataset'], {}), '(dataset)\n', (14920, 14929), False, 'import random\n'), ((6189, 6211), 'json.loads', 'json.loads', (['query_pred'], {}), '(query_pred)\n', (6199, 6211), False, 'import json\n'), ((7697, 7719), 'json.loads', 'json.loads', (['query_pred'], {}), '(query_pred)\n', (7707, 7719), False, 'import json\n')]
|
import numpy as np
import numpy.linalg as la
from MdlUtilities import Field, FieldList
import MdlUtilities as mdl
def get_osaCasing_fields():
OD = Field(2030)
ID = Field(2031)
Weight = Field(2032)
Density = Field(2039)
E = Field(2040)
osaCasing_fields = FieldList()
osaCasing_fields.append( OD )
osaCasing_fields.append( ID )
osaCasing_fields.append( Weight )
osaCasing_fields.append( Density )
osaCasing_fields.append( E )
return osaCasing_fields
def get_osaCent_fields():
Type = Field(2049)
IPOD = Field(2009)
CentOD = Field(2011)
#CentID = Field(2012)
ResF_SO67 = Field(2018)
minResF = Field(2017)
SO_minResF = Field(2019)
ResF_SO67.set_representation('Res. Force @ SO=67%')
minResF.set_representation('minimum Res. Force')
SO_minResF.set_representation('StandOff @ min. Res. F.')
osaCent_fields = FieldList()
osaCent_fields.append( Type )
osaCent_fields.append( IPOD )
osaCent_fields.append( CentOD )
#osaCent_fields.append( CentID )
osaCent_fields.append( ResF_SO67 )
osaCent_fields.append( minResF )
osaCent_fields.append( SO_minResF )
return osaCent_fields
def get_osaWellbore_fields():
HoleID = Field(2010)
MaxSpan = Field(2061)
MudIPDensity = Field(2077)
MudOPDensity = Field(2077)
HoleID.set_representation('Hole ID')
HoleID.set_abbreviation('HoleID')
MaxSpan.set_representation('Max span')
MaxSpan.set_abbreviation('MaxSpan')
MudIPDensity.set_representation('Mud inside pipe')
MudIPDensity.set_abbreviation('MudIPDensity')
MudOPDensity.set_representation('Mud in annulus')
MudOPDensity.set_abbreviation('MudOPDensity')
osaWellbore_fields = FieldList()
osaWellbore_fields.append( HoleID )
osaWellbore_fields.append( MaxSpan )
osaWellbore_fields.append( MudIPDensity )
osaWellbore_fields.append( MudOPDensity )
return osaWellbore_fields
def get_osaOutputdata1_fields():
clearanceA = Field(2073, altBg=True, altFg=True)
clearanceB = Field(2073, altBg=True, altFg=True)
clearanceM = Field(2073, altBg=True, altFg=True)
sideForceA = Field(2074, altBg=True, altFg=True)
sideForceB = Field(2074, altBg=True, altFg=True)
sideForceM = Field(2074, altBg=True, altFg=True)
standoffA = Field(2078, altBg=True, altFg=True)
standoffB = Field(2078, altBg=True, altFg=True)
standoffM = Field(2078, altBg=True, altFg=True)
clearanceA.set_representation('Annular clearance @ cent. A')
clearanceA.set_abbreviation('ClearanceA')
clearanceB.set_representation('Annular clearance @ cent. B')
clearanceB.set_abbreviation('ClearanceB')
clearanceM.set_representation('Annular clearance @ mid span')
clearanceM.set_abbreviation('ClearanceM')
sideForceA.set_representation('Side force @ cent. A')
sideForceA.set_abbreviation('SideForceA')
sideForceB.set_representation('Side force @ cent. B')
sideForceB.set_abbreviation('SideForceB')
sideForceM.set_representation('Side force @ mid span')
sideForceM.set_abbreviation('SideForceM')
standoffA.set_representation('Standoff @ cent. A')
standoffA.set_abbreviation('StandoffA')
standoffB.set_representation('Standoff @ cent. B')
standoffB.set_abbreviation('StandoffB')
standoffM.set_representation('Standoff @ mid span')
standoffM.set_abbreviation('StandoffM')
osaOutputdata1_fields = FieldList()
osaOutputdata1_fields.append( clearanceA )
osaOutputdata1_fields.append( clearanceB )
osaOutputdata1_fields.append( clearanceM )
osaOutputdata1_fields.append( sideForceA )
osaOutputdata1_fields.append( sideForceB )
osaOutputdata1_fields.append( sideForceM )
osaOutputdata1_fields.append( standoffA )
osaOutputdata1_fields.append( standoffB )
osaOutputdata1_fields.append( standoffM )
return osaOutputdata1_fields
def get_osaOutputdata2_fields():
axialForce = Field(2075, altBg=True, altFg=True)
deflection = Field(2076, altBg=True, altFg=True)
wClearance = Field(2073, altBg=True, altFg=True)
wStandoff = Field(2078, altBg=True, altFg=True)
axialForce.set_representation('Axial extra force @ top')
axialForce.set_abbreviation('AxialForce')
deflection.set_representation('Max. pipe deflection')
deflection.set_abbreviation('MaxDeflection')
wClearance.set_representation('Mean wellbore clearance')
wClearance.set_abbreviation('WellboreClearance')
wStandoff.set_representation('Mean wellbore standoff')
wStandoff.set_abbreviation('WellboreStandoff')
osaOutputdata2_fields = FieldList()
osaOutputdata2_fields.append( axialForce )
osaOutputdata2_fields.append( deflection )
osaOutputdata2_fields.append( wClearance )
osaOutputdata2_fields.append( wStandoff )
return osaOutputdata2_fields
def get_casingDeflectionCurve(self):
# Equation(s) Reference 1:
# <NAME>, <NAME>. Casing Deflection and Centralizer Spacing Calculations.
# SPE Drilling Engineering (December 1992).
# Equation(s) Reference 2:
# <NAME>, <NAME>. Discussion of Optimal Spacing for Casing Centralizers.
# SPE Drilling Engineering (December 1988).
# Equation(s) Reference 3:
# <NAME>, <NAME>. Optimizing of Centralizer Distribution.
# SPE Latin American Petroleum Engineering Conference (October 1990).
self.osaCasing_fields.referenceUnitConvert_fields()
self.osaCentA_fields.referenceUnitConvert_fields()
self.osaCentB_fields.referenceUnitConvert_fields()
self.osaWellbore_fields.referenceUnitConvert_fields()
Rot = lambda φ: np.array( [[np.cos(φ),-np.sin(φ)],[np.sin(φ),np.cos(φ)]] )
dH = self.osaWellbore_fields.HoleID[0]
L = self.osaWellbore_fields.MaxSpan[0]*self.osaSpacing_slider.sliderPosition()/100
ρe = self.osaWellbore_fields.MudOPDensity[0]
ρi = self.osaWellbore_fields.MudIPDensity[0]
ρs = self.osaCasing_fields.Density[0]
E = self.osaCasing_fields.E[0]
w = self.osaCasing_fields.PW[0]
D = self.osaCasing_fields.OD[0]
d = self.osaCasing_fields.ID[0]
Type_A = self.osaCentA_fields.Type[0]
F_So67_A = self.osaCentA_fields.ResF_SO67[0]
minF_A = self.osaCentA_fields.minResF[0]
So_minF_A = self.osaCentA_fields.SO_minResF[0]
DA = self.osaCentA_fields.COD[0]
dA = self.osaCentA_fields.IPOD[0]
Type_B = self.osaCentB_fields.Type[0]
F_So67_B = self.osaCentB_fields.ResF_SO67[0]
minF_B = self.osaCentB_fields.minResF[0]
So_minF_B = self.osaCentB_fields.SO_minResF[0]
DB = self.osaCentB_fields.COD[0]
dB = self.osaCentB_fields.IPOD[0]
#kA = ResFA/(DA/2-0.335*(DA-D)) # Con esto se calculan los coeficientes de los resortes ( 0.335=0.67/2 )
#kB = ResFB/(DB/2-0.335*(DB-D))
for field in self.osaWellbore_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCasing_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentA_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentB_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
if dA!=D or dB!=D or dH<=D:
raise mdl.LogicalError('The selected devices are not size-consistent.')
θ = np.pi*self.osaInclination_slider.sliderPosition()/180
I = np.pi/64*(D**4-d**4) # [Ref.3] Momento de inercia diferente a momento de inercia polar.
F = 30000 # [Ref.1]
Radio = L*1e6
aspr = L*0.02
buoyancyFactor = mdl.calculate_buoyancyFactor( OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi ) # [Ref.2]
w *= buoyancyFactor
fC = w*L*np.sin(θ)/2
if Type_A=='Resin': #mdl.isNoneEntry(ResFA):
yA = 0
dA = d
else:
kA = 2*(F_So67_A-minF_A)/(So_minF_A-0.67)/(DA-dA)
yA = fC/kA if (DA<dH) else fC/kA/2
if Type_B=='Resin': #mdl.isNoneEntry(ResFB):
yB = 0
dB = d
else:
kB = 2*(F_So67_B-minF_B)/(So_minF_B-0.67)/(DB-dB)
yB = fC/kB if (DB<dH) else fC/kB/2
R = D/2
rH = dH/2
rA_min = R+(DA/2-R)*0.1
rB_min = R+(DB/2-R)*0.1
rA = (DA/2-yA) if (DA<dH) else (rH-yA)
rB = (DB/2-yB) if (DB<dH) else (rH-yB)
rA = rA_min if (rA<=rA_min) else rA
rB = rB_min if (rB<=rB_min) else rB
α = np.arctan( (rB-rA)/L )
Lα = L/np.cos(α)
x = np.linspace( 0, Lα, 101 )
K = np.sqrt(F/E/I)
y = (Lα/2/Radio/K + w*Lα*np.sin(θ)/2/K/F)*( (np.cosh(K*x)-1)/np.tanh(K*Lα/2) + K*x - np.sinh(K*x) ) - w*np.sin(θ)/2/F*x**2 # [Ref.1]
Rα = Rot(α)
xy = np.array([x,y])
x,y = np.dot(Rα,xy)
Δy = rH-rB
y += Δy
cH = rH-R
cA = rA-R
cB = rB-R
indexes = y>cH
y[indexes] = cH
indexes = y<-cH
y[indexes] =-cH
cy = cH-y
rM = rH-y[50]
if y[50]==cH:
fM = fC
fC = 0
else:
fM = 0
cM = rM-R
x -= L/2
yoh = y*0
ohc = np.array([x, yoh])
ohp = np.array([x, (yoh+rH)*aspr])
ohm = np.array([x, (yoh-rH)*aspr])
xyc = np.array([x, y*aspr])
xyp = np.array([x, (y+R)*aspr])
xym = np.array([x, (y-R)*aspr])
φ = θ + np.pi/2
Rφ = Rot(φ)
OHc = np.dot(Rφ,ohc)
OHp = np.dot(Rφ,ohp)
OHm = np.dot(Rφ,ohm)
XYc = np.dot(Rφ,xyc)
XYp = np.dot(Rφ,xyp)
XYm = np.dot(Rφ,xym)
SA = cA/cH
SB = cB/cH
SM = cM/cH
Sy = cy/cH
δ = (cA+cB)/2-cM
self.osaOutputdata1_fields.clear_content()
self.osaOutputdata2_fields.clear_content()
self.osaOutputdata1_fields.ClearanceA.append( mdl.physicalValue( cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceB.append( mdl.physicalValue( cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceM.append( mdl.physicalValue( cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit ) )
self.osaOutputdata1_fields.SideForceA.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceA.referenceUnit ) )
self.osaOutputdata1_fields.SideForceB.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceB.referenceUnit ) )
self.osaOutputdata1_fields.SideForceM.append( mdl.physicalValue( fM, self.osaOutputdata1_fields.SideForceM.referenceUnit ) )
self.osaOutputdata1_fields.StandoffA.append( mdl.physicalValue( SA, self.osaOutputdata1_fields.StandoffA.referenceUnit ) )
self.osaOutputdata1_fields.StandoffB.append( mdl.physicalValue( SB, self.osaOutputdata1_fields.StandoffB.referenceUnit ) )
self.osaOutputdata1_fields.StandoffM.append( mdl.physicalValue( SM, self.osaOutputdata1_fields.StandoffM.referenceUnit ) )
self.osaOutputdata2_fields.AxialForce.append( mdl.physicalValue( w*L*np.cos(θ), self.osaOutputdata2_fields.AxialForce.referenceUnit ) )
self.osaOutputdata2_fields.MaxDeflection.append( mdl.physicalValue( δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit ) )
self.osaOutputdata2_fields.WellboreClearance.append( mdl.physicalValue( np.mean(cy), self.osaOutputdata2_fields.WellboreClearance.referenceUnit ) )
self.osaOutputdata2_fields.WellboreStandoff.append( mdl.physicalValue( np.mean(Sy), self.osaOutputdata2_fields.WellboreStandoff.referenceUnit ) )
self.osaCasing_fields.inverseReferenceUnitConvert_fields()
self.osaCentA_fields.inverseReferenceUnitConvert_fields()
self.osaCentB_fields.inverseReferenceUnitConvert_fields()
self.osaWellbore_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata1_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata2_fields.inverseReferenceUnitConvert_fields()
lim = L/2*1.05
return OHc, OHp, OHm, XYc, XYp, XYm, lim, rA, rB, rM
|
[
"MdlUtilities.physicalValue",
"numpy.mean",
"numpy.sqrt",
"MdlUtilities.Field",
"MdlUtilities.LogicalError",
"numpy.tanh",
"numpy.sinh",
"numpy.array",
"numpy.linspace",
"numpy.dot",
"MdlUtilities.calculate_buoyancyFactor",
"numpy.cos",
"numpy.cosh",
"numpy.sin",
"MdlUtilities.FieldList",
"numpy.arctan"
] |
[((167, 178), 'MdlUtilities.Field', 'Field', (['(2030)'], {}), '(2030)\n', (172, 178), False, 'from MdlUtilities import Field, FieldList\n'), ((191, 202), 'MdlUtilities.Field', 'Field', (['(2031)'], {}), '(2031)\n', (196, 202), False, 'from MdlUtilities import Field, FieldList\n'), ((215, 226), 'MdlUtilities.Field', 'Field', (['(2032)'], {}), '(2032)\n', (220, 226), False, 'from MdlUtilities import Field, FieldList\n'), ((239, 250), 'MdlUtilities.Field', 'Field', (['(2039)'], {}), '(2039)\n', (244, 250), False, 'from MdlUtilities import Field, FieldList\n'), ((263, 274), 'MdlUtilities.Field', 'Field', (['(2040)'], {}), '(2040)\n', (268, 274), False, 'from MdlUtilities import Field, FieldList\n'), ((296, 307), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (305, 307), False, 'from MdlUtilities import Field, FieldList\n'), ((573, 584), 'MdlUtilities.Field', 'Field', (['(2049)'], {}), '(2049)\n', (578, 584), False, 'from MdlUtilities import Field, FieldList\n'), ((602, 613), 'MdlUtilities.Field', 'Field', (['(2009)'], {}), '(2009)\n', (607, 613), False, 'from MdlUtilities import Field, FieldList\n'), ((631, 642), 'MdlUtilities.Field', 'Field', (['(2011)'], {}), '(2011)\n', (636, 642), False, 'from MdlUtilities import Field, FieldList\n'), ((690, 701), 'MdlUtilities.Field', 'Field', (['(2018)'], {}), '(2018)\n', (695, 701), False, 'from MdlUtilities import Field, FieldList\n'), ((719, 730), 'MdlUtilities.Field', 'Field', (['(2017)'], {}), '(2017)\n', (724, 730), False, 'from MdlUtilities import Field, FieldList\n'), ((748, 759), 'MdlUtilities.Field', 'Field', (['(2019)'], {}), '(2019)\n', (753, 759), False, 'from MdlUtilities import Field, FieldList\n'), ((943, 954), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (952, 954), False, 'from MdlUtilities import Field, FieldList\n'), ((1304, 1315), 'MdlUtilities.Field', 'Field', (['(2010)'], {}), '(2010)\n', (1309, 1315), False, 'from MdlUtilities import Field, FieldList\n'), ((1334, 1345), 'MdlUtilities.Field', 'Field', (['(2061)'], {}), '(2061)\n', (1339, 1345), False, 'from MdlUtilities import Field, FieldList\n'), ((1364, 1375), 'MdlUtilities.Field', 'Field', (['(2077)'], {}), '(2077)\n', (1369, 1375), False, 'from MdlUtilities import Field, FieldList\n'), ((1394, 1405), 'MdlUtilities.Field', 'Field', (['(2077)'], {}), '(2077)\n', (1399, 1405), False, 'from MdlUtilities import Field, FieldList\n'), ((1784, 1795), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (1793, 1795), False, 'from MdlUtilities import Field, FieldList\n'), ((2059, 2094), 'MdlUtilities.Field', 'Field', (['(2073)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2073, altBg=True, altFg=True)\n', (2064, 2094), False, 'from MdlUtilities import Field, FieldList\n'), ((2111, 2146), 'MdlUtilities.Field', 'Field', (['(2073)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2073, altBg=True, altFg=True)\n', (2116, 2146), False, 'from MdlUtilities import Field, FieldList\n'), ((2163, 2198), 'MdlUtilities.Field', 'Field', (['(2073)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2073, altBg=True, altFg=True)\n', (2168, 2198), False, 'from MdlUtilities import Field, FieldList\n'), ((2215, 2250), 'MdlUtilities.Field', 'Field', (['(2074)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2074, altBg=True, altFg=True)\n', (2220, 2250), False, 'from MdlUtilities import Field, FieldList\n'), ((2267, 2302), 'MdlUtilities.Field', 'Field', (['(2074)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2074, altBg=True, altFg=True)\n', (2272, 2302), False, 'from MdlUtilities import Field, FieldList\n'), ((2319, 2354), 'MdlUtilities.Field', 'Field', (['(2074)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2074, altBg=True, altFg=True)\n', (2324, 2354), False, 'from MdlUtilities import Field, FieldList\n'), ((2371, 2406), 'MdlUtilities.Field', 'Field', (['(2078)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2078, altBg=True, altFg=True)\n', (2376, 2406), False, 'from MdlUtilities import Field, FieldList\n'), ((2423, 2458), 'MdlUtilities.Field', 'Field', (['(2078)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2078, altBg=True, altFg=True)\n', (2428, 2458), False, 'from MdlUtilities import Field, FieldList\n'), ((2475, 2510), 'MdlUtilities.Field', 'Field', (['(2078)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2078, altBg=True, altFg=True)\n', (2480, 2510), False, 'from MdlUtilities import Field, FieldList\n'), ((3446, 3457), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (3455, 3457), False, 'from MdlUtilities import Field, FieldList\n'), ((3953, 3988), 'MdlUtilities.Field', 'Field', (['(2075)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2075, altBg=True, altFg=True)\n', (3958, 3988), False, 'from MdlUtilities import Field, FieldList\n'), ((4005, 4040), 'MdlUtilities.Field', 'Field', (['(2076)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2076, altBg=True, altFg=True)\n', (4010, 4040), False, 'from MdlUtilities import Field, FieldList\n'), ((4057, 4092), 'MdlUtilities.Field', 'Field', (['(2073)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2073, altBg=True, altFg=True)\n', (4062, 4092), False, 'from MdlUtilities import Field, FieldList\n'), ((4109, 4144), 'MdlUtilities.Field', 'Field', (['(2078)'], {'altBg': '(True)', 'altFg': '(True)'}), '(2078, altBg=True, altFg=True)\n', (4114, 4144), False, 'from MdlUtilities import Field, FieldList\n'), ((4593, 4604), 'MdlUtilities.FieldList', 'FieldList', ([], {}), '()\n', (4602, 4604), False, 'from MdlUtilities import Field, FieldList\n'), ((7541, 7602), 'MdlUtilities.calculate_buoyancyFactor', 'mdl.calculate_buoyancyFactor', ([], {'OD': 'D', 'ID': 'd', 'ρs': 'ρs', 'ρe': 'ρe', 'ρi': 'ρi'}), '(OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi)\n', (7569, 7602), True, 'import MdlUtilities as mdl\n'), ((8246, 8270), 'numpy.arctan', 'np.arctan', (['((rB - rA) / L)'], {}), '((rB - rA) / L)\n', (8255, 8270), True, 'import numpy as np\n'), ((8293, 8316), 'numpy.linspace', 'np.linspace', (['(0)', 'Lα', '(101)'], {}), '(0, Lα, 101)\n', (8304, 8316), True, 'import numpy as np\n'), ((8327, 8345), 'numpy.sqrt', 'np.sqrt', (['(F / E / I)'], {}), '(F / E / I)\n', (8334, 8345), True, 'import numpy as np\n'), ((8498, 8514), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (8506, 8514), True, 'import numpy as np\n'), ((8522, 8536), 'numpy.dot', 'np.dot', (['Rα', 'xy'], {}), '(Rα, xy)\n', (8528, 8536), True, 'import numpy as np\n'), ((8801, 8819), 'numpy.array', 'np.array', (['[x, yoh]'], {}), '([x, yoh])\n', (8809, 8819), True, 'import numpy as np\n'), ((8828, 8860), 'numpy.array', 'np.array', (['[x, (yoh + rH) * aspr]'], {}), '([x, (yoh + rH) * aspr])\n', (8836, 8860), True, 'import numpy as np\n'), ((8865, 8897), 'numpy.array', 'np.array', (['[x, (yoh - rH) * aspr]'], {}), '([x, (yoh - rH) * aspr])\n', (8873, 8897), True, 'import numpy as np\n'), ((8904, 8927), 'numpy.array', 'np.array', (['[x, y * aspr]'], {}), '([x, y * aspr])\n', (8912, 8927), True, 'import numpy as np\n'), ((8934, 8963), 'numpy.array', 'np.array', (['[x, (y + R) * aspr]'], {}), '([x, (y + R) * aspr])\n', (8942, 8963), True, 'import numpy as np\n'), ((8968, 8997), 'numpy.array', 'np.array', (['[x, (y - R) * aspr]'], {}), '([x, (y - R) * aspr])\n', (8976, 8997), True, 'import numpy as np\n'), ((9038, 9053), 'numpy.dot', 'np.dot', (['Rφ', 'ohc'], {}), '(Rφ, ohc)\n', (9044, 9053), True, 'import numpy as np\n'), ((9061, 9076), 'numpy.dot', 'np.dot', (['Rφ', 'ohp'], {}), '(Rφ, ohp)\n', (9067, 9076), True, 'import numpy as np\n'), ((9084, 9099), 'numpy.dot', 'np.dot', (['Rφ', 'ohm'], {}), '(Rφ, ohm)\n', (9090, 9099), True, 'import numpy as np\n'), ((9109, 9124), 'numpy.dot', 'np.dot', (['Rφ', 'xyc'], {}), '(Rφ, xyc)\n', (9115, 9124), True, 'import numpy as np\n'), ((9132, 9147), 'numpy.dot', 'np.dot', (['Rφ', 'xyp'], {}), '(Rφ, xyp)\n', (9138, 9147), True, 'import numpy as np\n'), ((9155, 9170), 'numpy.dot', 'np.dot', (['Rφ', 'xym'], {}), '(Rφ, xym)\n', (9161, 9170), True, 'import numpy as np\n'), ((7244, 7309), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""The selected devices are not size-consistent."""'], {}), "('The selected devices are not size-consistent.')\n", (7260, 7309), True, 'import MdlUtilities as mdl\n'), ((8278, 8287), 'numpy.cos', 'np.cos', (['α'], {}), '(α)\n', (8284, 8287), True, 'import numpy as np\n'), ((9386, 9460), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['cA', 'self.osaOutputdata1_fields.ClearanceA.referenceUnit'], {}), '(cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit)\n', (9403, 9460), True, 'import MdlUtilities as mdl\n'), ((9513, 9587), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['cB', 'self.osaOutputdata1_fields.ClearanceB.referenceUnit'], {}), '(cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit)\n', (9530, 9587), True, 'import MdlUtilities as mdl\n'), ((9640, 9714), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['cM', 'self.osaOutputdata1_fields.ClearanceM.referenceUnit'], {}), '(cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit)\n', (9657, 9714), True, 'import MdlUtilities as mdl\n'), ((9769, 9843), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['fC', 'self.osaOutputdata1_fields.SideForceA.referenceUnit'], {}), '(fC, self.osaOutputdata1_fields.SideForceA.referenceUnit)\n', (9786, 9843), True, 'import MdlUtilities as mdl\n'), ((9896, 9970), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['fC', 'self.osaOutputdata1_fields.SideForceB.referenceUnit'], {}), '(fC, self.osaOutputdata1_fields.SideForceB.referenceUnit)\n', (9913, 9970), True, 'import MdlUtilities as mdl\n'), ((10023, 10097), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['fM', 'self.osaOutputdata1_fields.SideForceM.referenceUnit'], {}), '(fM, self.osaOutputdata1_fields.SideForceM.referenceUnit)\n', (10040, 10097), True, 'import MdlUtilities as mdl\n'), ((10151, 10224), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['SA', 'self.osaOutputdata1_fields.StandoffA.referenceUnit'], {}), '(SA, self.osaOutputdata1_fields.StandoffA.referenceUnit)\n', (10168, 10224), True, 'import MdlUtilities as mdl\n'), ((10276, 10349), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['SB', 'self.osaOutputdata1_fields.StandoffB.referenceUnit'], {}), '(SB, self.osaOutputdata1_fields.StandoffB.referenceUnit)\n', (10293, 10349), True, 'import MdlUtilities as mdl\n'), ((10401, 10474), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['SM', 'self.osaOutputdata1_fields.StandoffM.referenceUnit'], {}), '(SM, self.osaOutputdata1_fields.StandoffM.referenceUnit)\n', (10418, 10474), True, 'import MdlUtilities as mdl\n'), ((10670, 10746), 'MdlUtilities.physicalValue', 'mdl.physicalValue', (['δ', 'self.osaOutputdata2_fields.MaxDeflection.referenceUnit'], {}), '(δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit)\n', (10687, 10746), True, 'import MdlUtilities as mdl\n'), ((6739, 6803), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""Every parameter should be greater than zero."""'], {}), "('Every parameter should be greater than zero.')\n", (6755, 6803), True, 'import MdlUtilities as mdl\n'), ((6870, 6934), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""Every parameter should be greater than zero."""'], {}), "('Every parameter should be greater than zero.')\n", (6886, 6934), True, 'import MdlUtilities as mdl\n'), ((7004, 7068), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""Every parameter should be greater than zero."""'], {}), "('Every parameter should be greater than zero.')\n", (7020, 7068), True, 'import MdlUtilities as mdl\n'), ((7138, 7202), 'MdlUtilities.LogicalError', 'mdl.LogicalError', (['"""Every parameter should be greater than zero."""'], {}), "('Every parameter should be greater than zero.')\n", (7154, 7202), True, 'import MdlUtilities as mdl\n'), ((7648, 7657), 'numpy.sin', 'np.sin', (['θ'], {}), '(θ)\n', (7654, 7657), True, 'import numpy as np\n'), ((10825, 10836), 'numpy.mean', 'np.mean', (['cy'], {}), '(cy)\n', (10832, 10836), True, 'import numpy as np\n'), ((10974, 10985), 'numpy.mean', 'np.mean', (['Sy'], {}), '(Sy)\n', (10981, 10985), True, 'import numpy as np\n'), ((8433, 8447), 'numpy.sinh', 'np.sinh', (['(K * x)'], {}), '(K * x)\n', (8440, 8447), True, 'import numpy as np\n'), ((10552, 10561), 'numpy.cos', 'np.cos', (['θ'], {}), '(θ)\n', (10558, 10561), True, 'import numpy as np\n'), ((5585, 5594), 'numpy.cos', 'np.cos', (['φ'], {}), '(φ)\n', (5591, 5594), True, 'import numpy as np\n'), ((5610, 5619), 'numpy.sin', 'np.sin', (['φ'], {}), '(φ)\n', (5616, 5619), True, 'import numpy as np\n'), ((5621, 5630), 'numpy.cos', 'np.cos', (['φ'], {}), '(φ)\n', (5627, 5630), True, 'import numpy as np\n'), ((5597, 5606), 'numpy.sin', 'np.sin', (['φ'], {}), '(φ)\n', (5603, 5606), True, 'import numpy as np\n'), ((8408, 8427), 'numpy.tanh', 'np.tanh', (['(K * Lα / 2)'], {}), '(K * Lα / 2)\n', (8415, 8427), True, 'import numpy as np\n'), ((8452, 8461), 'numpy.sin', 'np.sin', (['θ'], {}), '(θ)\n', (8458, 8461), True, 'import numpy as np\n'), ((8392, 8406), 'numpy.cosh', 'np.cosh', (['(K * x)'], {}), '(K * x)\n', (8399, 8406), True, 'import numpy as np\n'), ((8371, 8380), 'numpy.sin', 'np.sin', (['θ'], {}), '(θ)\n', (8377, 8380), True, 'import numpy as np\n')]
|
"""Test converting an image to a pyramid.
"""
import numpy as np
import napari
points = np.random.randint(100, size=(50_000, 2))
with napari.gui_qt():
viewer = napari.view_points(points, face_color='red')
|
[
"napari.gui_qt",
"numpy.random.randint",
"napari.view_points"
] |
[((90, 129), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(50000, 2)'}), '(100, size=(50000, 2))\n', (107, 129), True, 'import numpy as np\n'), ((137, 152), 'napari.gui_qt', 'napari.gui_qt', ([], {}), '()\n', (150, 152), False, 'import napari\n'), ((167, 211), 'napari.view_points', 'napari.view_points', (['points'], {'face_color': '"""red"""'}), "(points, face_color='red')\n", (185, 211), False, 'import napari\n')]
|
from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
|
[
"numpy.testing.assert_allclose",
"math.sqrt",
"math.log",
"numpy.array",
"pandas.DataFrame",
"numpy.testing.assert_array_equal"
] |
[((244, 306), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})\n", (256, 306), True, 'import pandas as pd\n'), ((509, 599), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h',\n 'i', 'j']})\n", (521, 599), True, 'import pandas as pd\n'), ((822, 878), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3]})\n", (834, 878), True, 'import pandas as pd\n'), ((1099, 1173), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})\n", (1111, 1173), True, 'import pandas as pd\n'), ((1387, 1483), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (1399, 1483), True, 'import pandas as pd\n'), ((1675, 1771), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (1687, 1771), True, 'import pandas as pd\n'), ((1981, 2077), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (1993, 2077), True, 'import pandas as pd\n'), ((2358, 2454), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (2370, 2454), True, 'import pandas as pd\n'), ((2719, 2815), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (2731, 2815), True, 'import pandas as pd\n'), ((3165, 3261), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (3177, 3261), True, 'import pandas as pd\n'), ((3549, 3645), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (3561, 3645), True, 'import pandas as pd\n'), ((4007, 4103), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6],\n 'n_4': [7, 8, 9]})\n", (4019, 4103), True, 'import pandas as pd\n'), ((4397, 4447), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (4409, 4447), True, 'import pandas as pd\n'), ((4507, 4594), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 35, 40, 30, 29, 48]', "df['n_' + col]"], {}), "([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' +\n col])\n", (4536, 4594), True, 'import numpy as np\n'), ((4649, 4699), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (4661, 4699), True, 'import pandas as pd\n'), ((4760, 4865), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16]', "df['n_' + col]"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df\n ['n_' + col], rtol=0.001)\n", (4786, 4865), True, 'import numpy as np\n'), ((4920, 4970), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (4932, 4970), True, 'import pandas as pd\n'), ((5033, 5120), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 12, 13, 13, 12, 12]', "df['n_' + col]"], {}), "([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' +\n col])\n", (5062, 5120), True, 'import numpy as np\n'), ((5174, 5224), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (5186, 5224), True, 'import pandas as pd\n'), ((5284, 5369), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 10, 12, 2, 2, 2]', "df['n_' + col]"], {}), "([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col]\n )\n", (5313, 5369), True, 'import numpy as np\n'), ((5422, 5472), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (5434, 5472), True, 'import pandas as pd\n'), ((5532, 5619), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 13, 15, 15, 15, 34]', "df['n_' + col]"], {}), "([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' +\n col])\n", (5561, 5619), True, 'import numpy as np\n'), ((5673, 5723), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (5685, 5723), True, 'import pandas as pd\n'), ((5783, 5891), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371]', "df['n_' + col]"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371],\n df['n_' + col], rtol=0.001)\n", (5809, 5891), True, 'import numpy as np\n'), ((5944, 5994), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34]})\n", (5956, 5994), True, 'import pandas as pd\n'), ((6054, 6161), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268]', "df['n_' + col]"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268],\n df['n_' + col], rtol=0.001)\n", (6080, 6161), True, 'import numpy as np\n'), ((6238, 6323), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (6250, 6323), True, 'import pandas as pd\n'), ((6374, 6474), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 35, 40, 30, 29, 48]', "df['n_rolling_sum(n_1,3)']"], {}), "([np.nan, np.nan, 35, 40, 30, 29, 48], df[\n 'n_rolling_sum(n_1,3)'])\n", (6403, 6474), True, 'import numpy as np\n'), ((6475, 6572), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 6, 10, 10, 9, 8]', "df['n_rolling_sum(n_2,3)']"], {}), "([np.nan, np.nan, 6, 10, 10, 9, 8], df[\n 'n_rolling_sum(n_2,3)'])\n", (6504, 6572), True, 'import numpy as np\n'), ((6626, 6711), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (6638, 6711), True, 'import pandas as pd\n'), ((6763, 6881), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16]', "df['n_rolling_mean(n_1,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df\n ['n_rolling_mean(n_1,3)'], rtol=0.001)\n", (6789, 6881), True, 'import numpy as np\n'), ((6881, 6998), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666]', "df['n_rolling_mean(n_2,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df[\n 'n_rolling_mean(n_2,3)'], rtol=0.001)\n", (6907, 6998), True, 'import numpy as np\n'), ((7053, 7138), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (7065, 7138), True, 'import pandas as pd\n'), ((7192, 7295), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 12, 13, 13, 12, 12]', "df['n_rolling_median(n_1,3)']"], {}), "([np.nan, np.nan, 12, 13, 13, 12, 12], df[\n 'n_rolling_median(n_1,3)'])\n", (7221, 7295), True, 'import numpy as np\n'), ((7296, 7394), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 2, 3, 3, 2, 2]', "df['n_rolling_median(n_2,3)']"], {}), "([np.nan, np.nan, 2, 3, 3, 2, 2], df[\n 'n_rolling_median(n_2,3)'])\n", (7325, 7394), True, 'import numpy as np\n'), ((7447, 7532), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (7459, 7532), True, 'import pandas as pd\n'), ((7583, 7680), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 10, 12, 2, 2, 2]', "df['n_rolling_min(n_1,3)']"], {}), "([np.nan, np.nan, 10, 12, 2, 2, 2], df[\n 'n_rolling_min(n_1,3)'])\n", (7612, 7680), True, 'import numpy as np\n'), ((7681, 7776), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 1, 2, 2, 2, 2]', "df['n_rolling_min(n_2,3)']"], {}), "([np.nan, np.nan, 1, 2, 2, 2, 2], df[\n 'n_rolling_min(n_2,3)'])\n", (7710, 7776), True, 'import numpy as np\n'), ((7829, 7914), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (7841, 7914), True, 'import pandas as pd\n'), ((7965, 8065), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 13, 15, 15, 15, 34]', "df['n_rolling_max(n_1,3)']"], {}), "([np.nan, np.nan, 13, 15, 15, 15, 34], df[\n 'n_rolling_max(n_1,3)'])\n", (7994, 8065), True, 'import numpy as np\n'), ((8066, 8161), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['[np.nan, np.nan, 3, 5, 5, 5, 4]', "df['n_rolling_max(n_2,3)']"], {}), "([np.nan, np.nan, 3, 5, 5, 5, 4], df[\n 'n_rolling_max(n_2,3)'])\n", (8095, 8161), True, 'import numpy as np\n'), ((8214, 8299), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (8226, 8299), True, 'import pandas as pd\n'), ((8350, 8470), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371]', "df['n_rolling_std(n_1,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371],\n df['n_rolling_std(n_1,3)'], rtol=0.001)\n", (8376, 8470), True, 'import numpy as np\n'), ((8471, 8591), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547]', "df['n_rolling_std(n_2,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547],\n df['n_rolling_std(n_2,3)'], rtol=0.001)\n", (8497, 8591), True, 'import numpy as np\n'), ((8644, 8729), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}"], {}), "({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]}\n )\n", (8656, 8729), True, 'import pandas as pd\n'), ((8780, 8899), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268]', "df['n_rolling_var(n_1,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268],\n df['n_rolling_var(n_1,3)'], rtol=0.001)\n", (8806, 8899), True, 'import numpy as np\n'), ((8900, 9016), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['[np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333]', "df['n_rolling_var(n_2,3)']"], {'rtol': '(0.001)'}), "([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df[\n 'n_rolling_var(n_2,3)'], rtol=0.001)\n", (8926, 9016), True, 'import numpy as np\n'), ((9058, 9178), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2, 3], 'n_3': [\n 4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2,\n 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})\n", (9070, 9178), True, 'import pandas as pd\n'), ((9714, 9834), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2, 3], 'n_3': [\n 4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2,\n 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})\n", (9726, 9834), True, 'import pandas as pd\n'), ((10272, 10392), 'pandas.DataFrame', 'pd.DataFrame', (["{'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2, 3], 'n_3': [\n 4, 5, 6], 'n_4': [7, 8, 9]}"], {}), "({'c_1': ['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'n_2': [1, 2,\n 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})\n", (10284, 10392), True, 'import pandas as pd\n'), ((10832, 10882), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [1, 2, 3], 'n_2': [4, 5, 6]}"], {}), "({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})\n", (10844, 10882), True, 'import pandas as pd\n'), ((10898, 10948), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_1': [1, 2, 3], 'n_2': [4, 5, 6]}"], {}), "({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})\n", (10910, 10948), True, 'import pandas as pd\n'), ((11275, 11371), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_mult(n_1,n_2)'].values", "df2['n_mult(n_1,n_2)'].values"], {}), "(df1['n_mult(n_1,n_2)'].values, df2[\n 'n_mult(n_1,n_2)'].values)\n", (11304, 11371), True, 'import numpy as np\n'), ((11373, 11444), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_pow(n_1,3)']", "df2['n_pow(n_1,3)']"], {}), "(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)'])\n", (11402, 11444), True, 'import numpy as np\n'), ((11451, 11530), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_lg(pow(n_1,3))']", "df2['n_lg(pow(n_1,3))']"], {}), "(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))'])\n", (11480, 11530), True, 'import numpy as np\n'), ((11537, 11627), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_lg(mult(n_1,n_2))']", "df2['n_lg(mult(n_1,n_2))']"], {}), "(df1['n_lg(mult(n_1,n_2))'], df2[\n 'n_lg(mult(n_1,n_2))'])\n", (11566, 11627), True, 'import numpy as np\n'), ((11629, 11766), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']", "df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']"], {}), "(df1[\n 'n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2[\n 'n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'])\n", (11658, 11766), True, 'import numpy as np\n'), ((2158, 2293), 'numpy.array', 'np.array', (["[['a', 1, 4, 7, 1 * 1, 4 * 4, 7 * 7], ['b', 2, 5, 8, 2 * 2, 5 * 5, 8 * 8],\n ['c', 3, 6, 9, 3 * 3, 6 * 6, 9 * 9]]", '"""object"""'], {}), "([['a', 1, 4, 7, 1 * 1, 4 * 4, 7 * 7], ['b', 2, 5, 8, 2 * 2, 5 * 5,\n 8 * 8], ['c', 3, 6, 9, 3 * 3, 6 * 6, 9 * 9]], 'object')\n", (2166, 2293), True, 'import numpy as np\n'), ((2540, 2634), 'numpy.array', 'np.array', (["[['a', 1, 4, 7, 4 * 4], ['b', 2, 5, 8, 5 * 5], ['c', 3, 6, 9, 6 * 6]]", '"""object"""'], {}), "([['a', 1, 4, 7, 4 * 4], ['b', 2, 5, 8, 5 * 5], ['c', 3, 6, 9, 6 * \n 6]], 'object')\n", (2548, 2634), True, 'import numpy as np\n'), ((423, 461), 'numpy.array', 'np.array', (["['ad', 'be', 'cf']", '"""object"""'], {}), "(['ad', 'be', 'cf'], 'object')\n", (431, 461), True, 'import numpy as np\n'), ((721, 762), 'numpy.array', 'np.array', (["['had', 'ibe', 'jcf']", '"""object"""'], {}), "(['had', 'ibe', 'jcf'], 'object')\n", (729, 762), True, 'import numpy as np\n'), ((994, 1032), 'numpy.array', 'np.array', (["['a1', 'b2', 'c3']", '"""object"""'], {}), "(['a1', 'b2', 'c3'], 'object')\n", (1002, 1032), True, 'import numpy as np\n'), ((1297, 1338), 'numpy.array', 'np.array', (["['4a1', '5b2', '6c3']", '"""object"""'], {}), "(['4a1', '5b2', '6c3'], 'object')\n", (1305, 1338), True, 'import numpy as np\n'), ((1592, 1619), 'numpy.array', 'np.array', (['[4, 10, 18]', 'long'], {}), '([4, 10, 18], long)\n', (1600, 1619), True, 'import numpy as np\n'), ((1889, 1924), 'numpy.array', 'np.array', (['[4 * 7, 80, 18 * 9]', 'long'], {}), '([4 * 7, 80, 18 * 9], long)\n', (1897, 1924), True, 'import numpy as np\n'), ((2930, 2941), 'math.log', 'math.log', (['(1)'], {}), '(1)\n', (2938, 2941), False, 'import unittest, math\n'), ((2943, 2954), 'math.log', 'math.log', (['(4)'], {}), '(4)\n', (2951, 2954), False, 'import unittest, math\n'), ((2956, 2967), 'math.log', 'math.log', (['(7)'], {}), '(7)\n', (2964, 2967), False, 'import unittest, math\n'), ((2994, 3005), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (3002, 3005), False, 'import unittest, math\n'), ((3007, 3018), 'math.log', 'math.log', (['(5)'], {}), '(5)\n', (3015, 3018), False, 'import unittest, math\n'), ((3020, 3031), 'math.log', 'math.log', (['(8)'], {}), '(8)\n', (3028, 3031), False, 'import unittest, math\n'), ((3058, 3069), 'math.log', 'math.log', (['(3)'], {}), '(3)\n', (3066, 3069), False, 'import unittest, math\n'), ((3071, 3082), 'math.log', 'math.log', (['(6)'], {}), '(6)\n', (3079, 3082), False, 'import unittest, math\n'), ((3084, 3095), 'math.log', 'math.log', (['(9)'], {}), '(9)\n', (3092, 3095), False, 'import unittest, math\n'), ((3379, 3390), 'math.log', 'math.log', (['(4)'], {}), '(4)\n', (3387, 3390), False, 'import unittest, math\n'), ((3417, 3428), 'math.log', 'math.log', (['(5)'], {}), '(5)\n', (3425, 3428), False, 'import unittest, math\n'), ((3455, 3466), 'math.log', 'math.log', (['(6)'], {}), '(6)\n', (3463, 3466), False, 'import unittest, math\n'), ((3762, 3774), 'math.sqrt', 'math.sqrt', (['(1)'], {}), '(1)\n', (3771, 3774), False, 'import unittest, math\n'), ((3776, 3788), 'math.sqrt', 'math.sqrt', (['(4)'], {}), '(4)\n', (3785, 3788), False, 'import unittest, math\n'), ((3790, 3802), 'math.sqrt', 'math.sqrt', (['(7)'], {}), '(7)\n', (3799, 3802), False, 'import unittest, math\n'), ((3829, 3841), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (3838, 3841), False, 'import unittest, math\n'), ((3843, 3855), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (3852, 3855), False, 'import unittest, math\n'), ((3857, 3869), 'math.sqrt', 'math.sqrt', (['(8)'], {}), '(8)\n', (3866, 3869), False, 'import unittest, math\n'), ((3896, 3908), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (3905, 3908), False, 'import unittest, math\n'), ((3910, 3922), 'math.sqrt', 'math.sqrt', (['(6)'], {}), '(6)\n', (3919, 3922), False, 'import unittest, math\n'), ((3924, 3936), 'math.sqrt', 'math.sqrt', (['(9)'], {}), '(9)\n', (3933, 3936), False, 'import unittest, math\n'), ((4223, 4235), 'math.sqrt', 'math.sqrt', (['(4)'], {}), '(4)\n', (4232, 4235), False, 'import unittest, math\n'), ((4262, 4274), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (4271, 4274), False, 'import unittest, math\n'), ((4301, 4313), 'math.sqrt', 'math.sqrt', (['(6)'], {}), '(6)\n', (4310, 4313), False, 'import unittest, math\n'), ((9479, 9490), 'math.log', 'math.log', (['(1)'], {}), '(1)\n', (9487, 9490), False, 'import unittest, math\n'), ((9543, 9554), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (9551, 9554), False, 'import unittest, math\n'), ((9607, 9618), 'math.log', 'math.log', (['(3)'], {}), '(3)\n', (9615, 9618), False, 'import unittest, math\n'), ((10042, 10053), 'math.log', 'math.log', (['(1)'], {}), '(1)\n', (10050, 10053), False, 'import unittest, math\n'), ((10106, 10117), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (10114, 10117), False, 'import unittest, math\n'), ((10170, 10181), 'math.log', 'math.log', (['(3)'], {}), '(3)\n', (10178, 10181), False, 'import unittest, math\n'), ((10611, 10622), 'math.log', 'math.log', (['(1)'], {}), '(1)\n', (10619, 10622), False, 'import unittest, math\n'), ((10675, 10686), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (10683, 10686), False, 'import unittest, math\n'), ((10739, 10750), 'math.log', 'math.log', (['(3)'], {}), '(3)\n', (10747, 10750), False, 'import unittest, math\n')]
|
import numpy as np
import math
import pyrobot.utils.util as prutil
import rospy
import habitat_sim.agent as habAgent
import habitat_sim.utils as habUtils
from habitat_sim.agent.controls import ActuationSpec
import habitat_sim.errors
import quaternion
from tf.transformations import euler_from_quaternion, euler_from_matrix
class LoCoBotBase(object):
"""docstring for SimpleBase"""
def __init__(self, configs, simulator):
self.configs = configs
self.sim = simulator.sim
self.agent = self.sim.get_agent(self.configs.COMMON.SIMULATOR.DEFAULT_AGENT_ID)
self.transform = None
self.init_state = self.get_full_state()
def execute_action(self, action_name, actuation):
# actions = "turn_right" or "turn_left" or "move_forward"
# returns a bool showing if collided or not
return self._act(action_name, actuation)
def get_full_state(self):
# Returns habitat_sim.agent.AgentState
return self.agent.get_state()
def _rot_matrix(self, habitat_quat):
quat_list = [habitat_quat.x, habitat_quat.y, habitat_quat.z, habitat_quat.w]
return prutil.quat_to_rot_mat(quat_list)
def get_state(self, state_type="odom"):
# Returns (x, y, yaw)
assert state_type == "odom", "Error: Only Odom state is available"
cur_state = self.get_full_state()
init_rotation = self._rot_matrix(self.init_state.rotation)
# true position here refers to the relative position from
# where `self.init_state` is treated as origin
true_position = cur_state.position - self.init_state.position
true_position = np.matmul(init_rotation.transpose(), true_position, dtype=np.float64)
cur_rotation = self._rot_matrix(cur_state.rotation)
cur_rotation = np.matmul(init_rotation.transpose(), cur_rotation, dtype=np.float64)
(r, pitch, yaw) = euler_from_matrix(cur_rotation, axes="sxzy")
# Habitat has y perpendicular to map where as ROS has z perpendicular
# to the map. Where as x is same.
# Here ROS_X = -1 * habitat_z and ROS_Y = -1*habitat_x
return (-1 * true_position[2], -1 * true_position[0], yaw)
def stop(self):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def set_vel(self, fwd_speed, turn_speed, exe_time=1):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def go_to_relative(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given
goal state relative to its initial pose.
:param xyt_position: The relative goal state of the form (x,y,t)
:param use_map: When set to "True", ensures that controler is
using only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
abs_yaw = cur_yaw + xyt_position[2]
return self._go_to_relative_pose(xyt_position[0], xyt_position[1], abs_yaw)
def go_to_absolute(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given goal state in the world frame.
:param xyt_position: The goal state of the form (x,y,t)
in the world (map) frame.
:param use_map: When set to "True", ensures that controler is using
only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_X = xyt_position[0] - cur_x
rel_Y = xyt_position[1] - cur_y
abs_yaw = xyt_position[2]
# convert rel_X & rel_Y from global frame to current frame
R = np.array([[np.cos(cur_yaw), np.sin(cur_yaw)],
[-np.sin(cur_yaw), np.cos(cur_yaw)]])
rel_x, rel_y = np.matmul(R, np.array([rel_X, rel_Y]).reshape(-1,1))
return self._go_to_relative_pose(rel_x[0], rel_y[0], abs_yaw)
def _act(self, action_name, actuation):
"""Take the action specified by action_id
:param action_id: ID of the action. Retreives the action from
`agent_config.action_space <AgentConfiguration.action_space>`
:return: Whether or not the action taken resulted in a collision
"""
did_collide = False
act_spec = ActuationSpec(actuation)
did_collide = self.agent.controls.action(
self.agent.scene_node, action_name, act_spec, apply_filter=True
)
return did_collide
def _go_to_relative_pose(self, rel_x, rel_y, abs_yaw):
# clip relative movements beyond 10 micrometer precision
# this is done to improve determinism, as habitat-sim doesn't
# seem to precisely move the robot beyond sub milimeter precision anyways
if abs(rel_x) < 1e-5:
rel_x = 0
if abs(rel_y) < 1e-5:
rel_y = 0
if math.sqrt(rel_x ** 2 + rel_y ** 2) > 0.0:
# rotate to point to (x, y) point
action_name = "turn_left"
if rel_y < 0.0:
action_name = "turn_right"
v1 = np.asarray([1, 0], dtype=np.float64)
v2 = np.asarray([rel_x, rel_y], dtype=np.float64)
cosine_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cosine_angle)
did_collide = self._act(action_name, math.degrees(angle))
if did_collide:
print("Error: Collision accured while 1st rotating!")
return False
# move to (x,y) point
did_collide = self._act("move_forward", math.sqrt(rel_x ** 2 + rel_y ** 2))
if did_collide:
print("Error: Collision accured while moving straight!")
return False
# rotate to match the final yaw!
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_yaw = abs_yaw - cur_yaw
# clip to micro-degree precision to preserve determinism
if abs(rel_yaw) < 1e-4:
rel_yaw = 0
action_name = "turn_left"
if rel_yaw < 0.0:
action_name = "turn_right"
rel_yaw *= -1
did_collide = self._act(action_name, math.degrees(rel_yaw))
if did_collide:
print("Error: Collision accured while rotating!")
return False
return True
def track_trajectory(self, states, controls, close_loop):
"""
State trajectory that the robot should track.
:param states: sequence of (x,y,t) states that the robot should track.
:param controls: optionally specify control sequence as well.
:param close_loop: whether to close loop on the
computed control sequence or not.
:type states: list
:type controls: list
:type close_loop: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
raise NotImplementedError
|
[
"numpy.arccos",
"tf.transformations.euler_from_matrix",
"math.sqrt",
"numpy.asarray",
"habitat_sim.agent.controls.ActuationSpec",
"math.degrees",
"numpy.array",
"numpy.dot",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"pyrobot.utils.util.quat_to_rot_mat"
] |
[((1145, 1178), 'pyrobot.utils.util.quat_to_rot_mat', 'prutil.quat_to_rot_mat', (['quat_list'], {}), '(quat_list)\n', (1167, 1178), True, 'import pyrobot.utils.util as prutil\n'), ((1905, 1949), 'tf.transformations.euler_from_matrix', 'euler_from_matrix', (['cur_rotation'], {'axes': '"""sxzy"""'}), "(cur_rotation, axes='sxzy')\n", (1922, 1949), False, 'from tf.transformations import euler_from_quaternion, euler_from_matrix\n'), ((6431, 6455), 'habitat_sim.agent.controls.ActuationSpec', 'ActuationSpec', (['actuation'], {}), '(actuation)\n', (6444, 6455), False, 'from habitat_sim.agent.controls import ActuationSpec\n'), ((7013, 7047), 'math.sqrt', 'math.sqrt', (['(rel_x ** 2 + rel_y ** 2)'], {}), '(rel_x ** 2 + rel_y ** 2)\n', (7022, 7047), False, 'import math\n'), ((7228, 7264), 'numpy.asarray', 'np.asarray', (['[1, 0]'], {'dtype': 'np.float64'}), '([1, 0], dtype=np.float64)\n', (7238, 7264), True, 'import numpy as np\n'), ((7282, 7326), 'numpy.asarray', 'np.asarray', (['[rel_x, rel_y]'], {'dtype': 'np.float64'}), '([rel_x, rel_y], dtype=np.float64)\n', (7292, 7326), True, 'import numpy as np\n'), ((7433, 7456), 'numpy.arccos', 'np.arccos', (['cosine_angle'], {}), '(cosine_angle)\n', (7442, 7456), True, 'import numpy as np\n'), ((8331, 8352), 'math.degrees', 'math.degrees', (['rel_yaw'], {}), '(rel_yaw)\n', (8343, 8352), False, 'import math\n'), ((7354, 7368), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (7360, 7368), True, 'import numpy as np\n'), ((7507, 7526), 'math.degrees', 'math.degrees', (['angle'], {}), '(angle)\n', (7519, 7526), False, 'import math\n'), ((7743, 7777), 'math.sqrt', 'math.sqrt', (['(rel_x ** 2 + rel_y ** 2)'], {}), '(rel_x ** 2 + rel_y ** 2)\n', (7752, 7777), False, 'import math\n'), ((5842, 5857), 'numpy.cos', 'np.cos', (['cur_yaw'], {}), '(cur_yaw)\n', (5848, 5857), True, 'import numpy as np\n'), ((5859, 5874), 'numpy.sin', 'np.sin', (['cur_yaw'], {}), '(cur_yaw)\n', (5865, 5874), True, 'import numpy as np\n'), ((5918, 5933), 'numpy.cos', 'np.cos', (['cur_yaw'], {}), '(cur_yaw)\n', (5924, 5933), True, 'import numpy as np\n'), ((5973, 5997), 'numpy.array', 'np.array', (['[rel_X, rel_Y]'], {}), '([rel_X, rel_Y])\n', (5981, 5997), True, 'import numpy as np\n'), ((7372, 7390), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (7386, 7390), True, 'import numpy as np\n'), ((7393, 7411), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (7407, 7411), True, 'import numpy as np\n'), ((5901, 5916), 'numpy.sin', 'np.sin', (['cur_yaw'], {}), '(cur_yaw)\n', (5907, 5916), True, 'import numpy as np\n')]
|
from enum import Enum
from typing import Generator, Tuple, Iterable, Dict, List
import cv2
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.ndimage import label, generate_binary_structure
from scipy.ndimage.morphology import distance_transform_edt as dist_trans
import trainer.lib as lib
class ImageNormalizations(Enum):
UnitRange = 1
def duplicate_columns(data, minoccur=2):
ind = np.lexsort(data)
diff = np.any(data.T[ind[1:]] != data.T[ind[:-1]], axis=1)
edges = np.where(diff)[0] + 1
result = np.split(ind, edges)
result = [group for group in result if len(group) >= minoccur]
return result
def pad(small_arr: np.ndarray, size=(30, 30)) -> np.ndarray:
# if small_arr.shape[0] < size[0] or small_arr.shape[1] < size[1]:
size = max(small_arr.shape[0], size[0]), max(small_arr.shape[1], size[1])
res = np.zeros(size, dtype=np.int32)
res[:small_arr.shape[0], :small_arr.shape[1]] = small_arr
return res
# else:
# return small_arr # There is no need for padding
def split_into_regions(arr: np.ndarray, mode=0) -> List[np.ndarray]:
"""
Splits an array into its coherent regions.
:param mode: 0 for orthogonal connection, 1 for full connection
:param arr: Numpy array with shape [W, H]
:return: A list with length #NumberOfRegions of arrays with shape [W, H]
"""
res = []
if mode == 0:
rs, num_regions = label(arr)
elif mode == 1:
rs, num_regions = label(arr, structure=generate_binary_structure(2, 2))
else:
raise Exception("Please specify a valid Neighborhood mode for split_into_regions")
for i in range(1, num_regions + 1):
res.append(rs == i)
return res
def normalize_im(im: np.ndarray, norm_type=ImageNormalizations.UnitRange) -> np.ndarray:
"""
Currently just normalizes an image with pixel intensities in range [0, 255] to [-1, 1]
:return: The normalized image
"""
if norm_type == ImageNormalizations.UnitRange:
return (im.astype(np.float32) / 127.5) - 1
else:
raise Exception("Unknown Normalization type")
def distance_transformed(mask: np.ndarray) -> np.ndarray:
if mask.dtype != np.bool:
mask = mask.astype(np.bool)
return dist_trans(np.invert(mask).astype(np.float32))
def one_hot_to_cont(x: np.ndarray) -> np.ndarray:
"""
Convert a one hot encoded image into the same image with integer representations.
:param x: np.ndarray with (C, W, H)
:return: np.ndarray with (W, H)
"""
return np.argmax(x, axis=len(x.shape) - 3)
def cont_to_ont_hot(arr: np.ndarray, n_values=-1) -> np.ndarray:
if n_values == -1:
n_values = np.max(arr) + 1
res = np.zeros((n_values,) + arr.shape)
for v in np.unique(arr):
res[v, :, :][arr == v] = 1
return res
def reduce_by_attention(arr: np.ndarray, att: np.ndarray):
"""
Reduce an array by a field of attention, such that the result is a rectangle with the empty borders cropped.
:param arr: Target array. The last two dimensions need to be of the same shape as the attention field
:param att: field of attention
:return: cropped array
"""
assert arr.shape[-2] == att.shape[0] and arr.shape[-1] == att.shape[1]
ones = np.argwhere(att)
lmost, rmost = np.min(ones[:, 0]), np.max(ones[:, 0]) + 1
bmost, tmost = np.min(ones[:, 1]), np.max(ones[:, 1]) + 1
grid_slice = [slice(None) for _ in range(len(arr.shape) - 2)]
grid_slice.extend([slice(lmost, rmost), slice(bmost, tmost)])
return arr[tuple(grid_slice)], att[lmost:rmost, bmost:tmost], (lmost, rmost, bmost, tmost)
def pair_augmentation(g: Iterable[Tuple[np.ndarray, np.ndarray]], aug_ls) -> Iterable[Tuple[np.ndarray, np.ndarray]]:
import imgaug.augmenters as iaa
seq = iaa.Sequential(aug_ls)
for im, gt, frame_number in g:
im_prep = im[frame_number] if im.shape[3] > 1 else im.squeeze()
gt_prep = np.expand_dims(gt, len(gt.shape))
images_aug = seq(images=[im_prep], segmentation_maps=[gt_prep])
yield images_aug[0][0].astype(np.float32), images_aug[1][0][:, :, 0].astype(np.float32), frame_number
def insert_np_at(a1: np.ndarray, a2: np.ndarray, pos: Tuple[int, int], filter_arr=None) -> np.ndarray:
assert len(a1.shape) == 2 and len(a2.shape) == 2
if filter_arr is None:
filter_arr = np.ones_like(a2).astype(np.bool)
x, y = pos
res = np.copy(a1)
a1_x = slice(x, min(x + a2.shape[0], a1.shape[0]))
a1_y = slice(y, min(y + a2.shape[1], a1.shape[1]))
if x + a2.shape[0] <= a1.shape[0]:
a2_x = slice(0, a2.shape[0])
else:
a2_x = slice(0, a1.shape[0] - (x + a2.shape[0]))
if y + a2.shape[1] <= a1.shape[1]:
a2_y = slice(0, a2.shape[1])
else:
a2_y = slice(0, a1.shape[1] - (y + a2.shape[1]))
item_filter = filter_arr[(a2_x, a2_y)]
assert res[(a1_x, a1_y)].shape == a2[(a2_x, a2_y)].shape
res[(a1_x, a1_y)][item_filter] = a2[(a2_x, a2_y)][item_filter]
return res
if __name__ == '__main__':
fit = insert_np_at(np.ones((10, 10)), np.ones((3, 3)) * 2, (2, 3))
too_big1 = insert_np_at(np.ones((10, 10)), np.ones((3, 10)) * 2, (2, 3))
too_big = insert_np_at(np.ones((10, 10)), np.ones((10, 10)) * 2, (2, 3))
# def put_array(big_arr: np.ndarray, small_arr: np.ndarray, offset=(0, 0)) -> np.ndarray:
# """
# Puts the small array into the big array. Ignores problems and does its best to fulfill the task
# """
# b, t =
# big_arr[]
# big_arr = np.putmask(big_arr, )
# if __name__ == '__main__':
# # a = np.zeros((10, 10))
# # b = np.random.random((4, 4))
# # c = put_array(a, b)
# # lib.logger.debug_var(c)
|
[
"numpy.copy",
"numpy.ones_like",
"numpy.unique",
"numpy.ones",
"numpy.where",
"scipy.ndimage.generate_binary_structure",
"scipy.ndimage.label",
"numpy.any",
"numpy.max",
"numpy.invert",
"numpy.lexsort",
"numpy.split",
"numpy.zeros",
"numpy.argwhere",
"imgaug.augmenters.Sequential",
"numpy.min"
] |
[((431, 447), 'numpy.lexsort', 'np.lexsort', (['data'], {}), '(data)\n', (441, 447), True, 'import numpy as np\n'), ((459, 510), 'numpy.any', 'np.any', (['(data.T[ind[1:]] != data.T[ind[:-1]])'], {'axis': '(1)'}), '(data.T[ind[1:]] != data.T[ind[:-1]], axis=1)\n', (465, 510), True, 'import numpy as np\n'), ((558, 578), 'numpy.split', 'np.split', (['ind', 'edges'], {}), '(ind, edges)\n', (566, 578), True, 'import numpy as np\n'), ((886, 916), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.int32'}), '(size, dtype=np.int32)\n', (894, 916), True, 'import numpy as np\n'), ((2739, 2772), 'numpy.zeros', 'np.zeros', (['((n_values,) + arr.shape)'], {}), '((n_values,) + arr.shape)\n', (2747, 2772), True, 'import numpy as np\n'), ((2786, 2800), 'numpy.unique', 'np.unique', (['arr'], {}), '(arr)\n', (2795, 2800), True, 'import numpy as np\n'), ((3297, 3313), 'numpy.argwhere', 'np.argwhere', (['att'], {}), '(att)\n', (3308, 3313), True, 'import numpy as np\n'), ((3831, 3853), 'imgaug.augmenters.Sequential', 'iaa.Sequential', (['aug_ls'], {}), '(aug_ls)\n', (3845, 3853), True, 'import imgaug.augmenters as iaa\n'), ((4459, 4470), 'numpy.copy', 'np.copy', (['a1'], {}), '(a1)\n', (4466, 4470), True, 'import numpy as np\n'), ((1448, 1458), 'scipy.ndimage.label', 'label', (['arr'], {}), '(arr)\n', (1453, 1458), False, 'from scipy.ndimage import label, generate_binary_structure\n'), ((3333, 3351), 'numpy.min', 'np.min', (['ones[:, 0]'], {}), '(ones[:, 0])\n', (3339, 3351), True, 'import numpy as np\n'), ((3395, 3413), 'numpy.min', 'np.min', (['ones[:, 1]'], {}), '(ones[:, 1])\n', (3401, 3413), True, 'import numpy as np\n'), ((5107, 5124), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5114, 5124), True, 'import numpy as np\n'), ((5183, 5200), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5190, 5200), True, 'import numpy as np\n'), ((5259, 5276), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5266, 5276), True, 'import numpy as np\n'), ((523, 537), 'numpy.where', 'np.where', (['diff'], {}), '(diff)\n', (531, 537), True, 'import numpy as np\n'), ((2713, 2724), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (2719, 2724), True, 'import numpy as np\n'), ((3353, 3371), 'numpy.max', 'np.max', (['ones[:, 0]'], {}), '(ones[:, 0])\n', (3359, 3371), True, 'import numpy as np\n'), ((3415, 3433), 'numpy.max', 'np.max', (['ones[:, 1]'], {}), '(ones[:, 1])\n', (3421, 3433), True, 'import numpy as np\n'), ((5126, 5141), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (5133, 5141), True, 'import numpy as np\n'), ((5202, 5218), 'numpy.ones', 'np.ones', (['(3, 10)'], {}), '((3, 10))\n', (5209, 5218), True, 'import numpy as np\n'), ((5278, 5295), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (5285, 5295), True, 'import numpy as np\n'), ((2290, 2305), 'numpy.invert', 'np.invert', (['mask'], {}), '(mask)\n', (2299, 2305), True, 'import numpy as np\n'), ((4401, 4417), 'numpy.ones_like', 'np.ones_like', (['a2'], {}), '(a2)\n', (4413, 4417), True, 'import numpy as np\n'), ((1526, 1557), 'scipy.ndimage.generate_binary_structure', 'generate_binary_structure', (['(2)', '(2)'], {}), '(2, 2)\n', (1551, 1557), False, 'from scipy.ndimage import label, generate_binary_structure\n')]
|
#!/usr/bin/env python
'''
Advent of Code 2021 - Day 9: Smoke Basin (Part 1)
https://adventofcode.com/2021/day/9
'''
import numpy as np
class HeightMap():
def __init__(self) -> None:
self._grid = np.array([])
def add_row(self, row):
np_row = np.array(row)
if self._grid.size != 0:
self._grid = np.vstack([self._grid, np_row])
else:
self._grid = np_row
def find_low_points(self, radius=1):
low_points = []
for index, point in np.ndenumerate(self._grid):
neighbor_points = self._neighbors(radius, coordinates=index)
if point < min(neighbor_points):
low_points.append(point)
return low_points
def _neighbors(self, radius, coordinates=(0, 0)):
neighbors = []
row = coordinates[0]
column = coordinates[1]
# Get UP neighbor value
if row >= 1:
neighbors.append(self._grid[row - radius, column])
# Get LEFT neighbor value
if column >= 1:
neighbors.append(self._grid[row, column - radius])
# Get RIGHT neighbor value
if column < len(self._grid[0]) - radius:
neighbors.append(self._grid[row, column + radius])
# Get DOWN neighbor value
if row < len(self._grid) - radius:
neighbors.append(self._grid[row + radius, column])
return neighbors
def __str__(self) -> str:
output = ""
for row in self._grid:
for elem in row:
output = output + f"{elem:>3}"
output = output + "\n"
return output
def calculate_risk(heights):
# Risk is 1 plus the height
return sum([height + 1 for height in heights])
def main():
filename = input("What is the input file name? ")
try:
with open(filename, "r") as file:
# Create a new board
area = HeightMap()
# Read the rows and setup the HeightMap
for line in file:
line = line.strip()
input_row = [int(x) for x in str(line)]
area.add_row(input_row)
print("The input grid: ")
print(area)
low_points = area.find_low_points()
sum_risk_levels = calculate_risk(
low_points) if low_points else None
if sum_risk_levels:
low_points_str = [str(point) for point in low_points]
print(f"Number of low points: {len(low_points)}")
print(f"Low points: {', '.join(low_points_str)}")
print(
f"\nThe sum of the risk levels of all low points is: {sum_risk_levels}\n")
else:
print("The sum of the risk levels of all low points not found.\n")
except FileNotFoundError:
print(f"No such file or directory: '{filename}'")
if __name__ == "__main__":
main()
|
[
"numpy.array",
"numpy.vstack",
"numpy.ndenumerate"
] |
[((211, 223), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (219, 223), True, 'import numpy as np\n'), ((270, 283), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (278, 283), True, 'import numpy as np\n'), ((514, 540), 'numpy.ndenumerate', 'np.ndenumerate', (['self._grid'], {}), '(self._grid)\n', (528, 540), True, 'import numpy as np\n'), ((342, 373), 'numpy.vstack', 'np.vstack', (['[self._grid, np_row]'], {}), '([self._grid, np_row])\n', (351, 373), True, 'import numpy as np\n')]
|
import sklearn.mixture
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
import matplotlib.patheffects as mpatheffects
def get_gmm_and_pos_label(
array, n_components=2, n_steps=5000
):
gmm = sklearn.mixture.GaussianMixture(
n_components=n_components, covariance_type='spherical', random_state=0
)
gmm.fit(array.reshape(-1, 1))
label = np.argmax(gmm.means_)
# low = array.min()
# high = array.max()
low = gmm.means_.min() - 2*np.sqrt(gmm.covariances_[np.argmin(gmm.means_)])
high = gmm.means_.max() + 2*np.sqrt(gmm.covariances_[np.argmax(gmm.means_)])
ref_space = np.linspace(low, high, n_steps)
result = gmm.predict(ref_space.reshape(-1, 1))
idx = np.where(np.ediff1d(result) != 0)
cutoffs = ref_space[idx]
return gmm, label, cutoffs
def _get_gmm_and_pos_label(array, n_components=2):
gmm = sklearn.mixture.GaussianMixture(
n_components=n_components, covariance_type='spherical', random_state=0
)
gmm.fit(array.reshape(-1, 1))
label = np.argmax(gmm.means_)
low = np.expm1(array.min())
high = np.expm1(array.max())
ref_space = np.arange(low, high)
ref_space = np.log1p(ref_space)
result = gmm.predict(ref_space.reshape(-1, 1))
idx = np.where(np.ediff1d(result) != 0)
_cutoffs = ref_space[idx]
diff_mean = np.absolute(_cutoffs - np.mean(array))
diff_high = np.absolute(_cutoffs - np.log1p(high))
cutoffs = _cutoffs[diff_mean < diff_high]
cutoff = np.expm1(cutoffs.max())
# cutoff = cutoffs[np.argmin(diff_mean < diff_high)]
# return gmm, label, cutoff
return gmm, label, _cutoffs
diff_mean = np.absolute(_cutoffs - np.mean(np.expm1(array)))
diff_high = np.absolute(_cutoffs - high)
diff_low = np.absolute(_cutoffs - low)
between = (diff_mean < diff_high) & (diff_mean < diff_low)
cutoffs = _cutoffs[between]
cutoff = cutoffs[np.argmax(between)]
return gmm, label, cutoff
def plot_gmm_fitting(array, gmm, ax):
plt.sca(ax)
_ = plt.hist(array.flatten(), color='lightgray', bins=200, density=True)
x = np.linspace(array.min(), array.max(), 200)
log_prob = gmm.score_samples(x.reshape(-1, 1))
responsibilities = gmm.predict_proba(x.reshape(-1, 1))
pdf = np.exp(log_prob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
mean_index = np.argmax(pdf_individual, axis=0)
rank_map = mean_index.argsort().argsort()
ax.set_prop_cycle(
color=plt.get_cmap('Dark2')(rank_map)
)
ax.plot(x, pdf_individual)
ax.plot(x, pdf, '--k')
return ax
def auto_gate_func(array, n_components=3, n_stds=3, log_transform=True):
gmm = sklearn.mixture.GaussianMixture(
n_components=n_components, covariance_type='spherical', random_state=0
)
if log_transform:
gmm.fit(np.log1p(array).reshape(-1, 1))
else:
gmm.fit(array.reshape(-1, 1))
means = gmm.means_
stds = np.sqrt(gmm.covariances_)
idx = np.argmax(means)
lower_bound = means[idx] - n_stds * stds[idx]
if log_transform:
return np.expm1(lower_bound)
else:
return lower_bound
def plot_cumulative(array, ax, hist_kwargs={}):
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
ax.yaxis.set_major_formatter(formatter)
_ = ax.hist(array, histtype='step', bins=300, cumulative=1, **hist_kwargs)
return ax
def gmm_label_map_by_mean(gmm):
return {
o:n
for o, n in zip(
range(len(gmm.means_)),
sorted(range(len(gmm.means_)), key=lambda x: gmm.means_[x][0])
)
}
def sort_predict_label(gmm, labels):
mapping = gmm_label_map_by_mean(gmm)
sorted_labels = labels.copy()
for k, v in mapping.iteritems():
sorted_labels[labels==k] = v
return sorted_labels
def plot_hist_gmm(
df,
markers,
n_components=2,
subplot_grid_shape=None,
transform_log=True,
xlim_percentiles=(0, 100),
cum_density=False,
hide_yaxis_left=True
):
if transform_log:
df = df.transform(np.log1p)
revert_func = np.expm1
else:
revert_func = np.array
if subplot_grid_shape is None:
subplot_grid_shape = (1, len(markers))
n_rows, n_cols = subplot_grid_shape
fig, axes = plt.subplots(n_rows, n_cols, sharex=True)
axes = np.array(axes)
for m, ax in zip(markers, axes.ravel()):
gmm, _, cutoffs = get_gmm_and_pos_label(
df[m].values, n_components=n_components
)
plot_gmm_fitting(df[m].values, gmm, ax)
ax.title.set_text(m)
if hide_yaxis_left:
ax.yaxis.set_visible(False)
p1, p2 = np.array(xlim_percentiles) / 100
axis_min = df.loc[:, markers].quantile(p1).min()
axis_max = df.loc[:, markers].quantile(p2).max()
color_cum = 'gray'
pax = ax.twinx()
pax = plot_cumulative(
df[m].values, pax,
hist_kwargs=dict(color=color_cum, density=cum_density)
)
pax.tick_params(axis='y', labelsize=8, colors=color_cum)
print(cutoffs)
cutoff_range = np.ptp(cutoffs)
if cutoff_range == 0: cutoff_range = 1
cutoff_colors = plt.get_cmap('plasma')(
(cutoffs - np.min(cutoffs)) / cutoff_range
)
for co, cc in zip(cutoffs, cutoff_colors):
ax.axvline(x=co, c=cc, alpha=0.2)
ax.annotate(
'',
xy=(co, 0), xytext=(co, -0.05),
xycoords=('data', 'axes fraction'),
arrowprops=dict(arrowstyle='wedge, tail_width=0.7, shrink_factor=0.5', color=cc)
)
ax.set_xlim(axis_min, axis_max)
# cutoff_string = np.round(revert_func(cutoffs)).astype(int)
for i, (co, cc) in enumerate(
zip(revert_func(cutoffs)[::-1], cutoff_colors[::-1])
):
text = ax.text(
ax.get_xlim()[0] + 0.02*np.diff(ax.get_xlim()),
ax.get_ylim()[1] - 0.05*(i+1)*np.diff(ax.get_ylim()),
f'{np.round(co).astype(int)}',
fontsize=10, c=cc
)
text_outline = mpatheffects.Stroke(linewidth=1, foreground='#000')
text.set_path_effects(
[text_outline, mpatheffects.Normal()]
)
plt.tight_layout()
for aax in fig.axes:
aax.spines['right'].set_color(color_cum)
power_label = aax.yaxis.get_offset_text()
power_label.set_visible(False)
aax.annotate(
power_label.get_text(), xy=(1.02, 1.01),
xycoords='axes fraction', fontsize=10,
color=color_cum
)
plt.sca(ax)
|
[
"numpy.ptp",
"numpy.sqrt",
"matplotlib.patheffects.Normal",
"numpy.array",
"matplotlib.ticker.ScalarFormatter",
"numpy.arange",
"numpy.mean",
"numpy.exp",
"numpy.linspace",
"numpy.min",
"numpy.argmin",
"numpy.round",
"numpy.ediff1d",
"numpy.argmax",
"matplotlib.pyplot.get_cmap",
"numpy.absolute",
"numpy.expm1",
"matplotlib.pyplot.sca",
"matplotlib.patheffects.Stroke",
"matplotlib.pyplot.tight_layout",
"numpy.log1p",
"matplotlib.pyplot.subplots"
] |
[((410, 431), 'numpy.argmax', 'np.argmax', (['gmm.means_'], {}), '(gmm.means_)\n', (419, 431), True, 'import numpy as np\n'), ((669, 700), 'numpy.linspace', 'np.linspace', (['low', 'high', 'n_steps'], {}), '(low, high, n_steps)\n', (680, 700), True, 'import numpy as np\n'), ((1099, 1120), 'numpy.argmax', 'np.argmax', (['gmm.means_'], {}), '(gmm.means_)\n', (1108, 1120), True, 'import numpy as np\n'), ((1207, 1227), 'numpy.arange', 'np.arange', (['low', 'high'], {}), '(low, high)\n', (1216, 1227), True, 'import numpy as np\n'), ((1245, 1264), 'numpy.log1p', 'np.log1p', (['ref_space'], {}), '(ref_space)\n', (1253, 1264), True, 'import numpy as np\n'), ((1807, 1835), 'numpy.absolute', 'np.absolute', (['(_cutoffs - high)'], {}), '(_cutoffs - high)\n', (1818, 1835), True, 'import numpy as np\n'), ((1852, 1879), 'numpy.absolute', 'np.absolute', (['(_cutoffs - low)'], {}), '(_cutoffs - low)\n', (1863, 1879), True, 'import numpy as np\n'), ((2102, 2113), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (2109, 2113), True, 'import matplotlib.pyplot as plt\n'), ((2369, 2385), 'numpy.exp', 'np.exp', (['log_prob'], {}), '(log_prob)\n', (2375, 2385), True, 'import numpy as np\n'), ((2466, 2499), 'numpy.argmax', 'np.argmax', (['pdf_individual'], {'axis': '(0)'}), '(pdf_individual, axis=0)\n', (2475, 2499), True, 'import numpy as np\n'), ((3069, 3094), 'numpy.sqrt', 'np.sqrt', (['gmm.covariances_'], {}), '(gmm.covariances_)\n', (3076, 3094), True, 'import numpy as np\n'), ((3106, 3122), 'numpy.argmax', 'np.argmax', (['means'], {}), '(means)\n', (3115, 3122), True, 'import numpy as np\n'), ((3344, 3384), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {'useMathText': '(True)'}), '(useMathText=True)\n', (3366, 3384), False, 'from matplotlib import ticker\n'), ((4539, 4580), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', 'n_cols'], {'sharex': '(True)'}), '(n_rows, n_cols, sharex=True)\n', (4551, 4580), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4607), 'numpy.array', 'np.array', (['axes'], {}), '(axes)\n', (4601, 4607), True, 'import numpy as np\n'), ((6638, 6656), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6654, 6656), True, 'import matplotlib.pyplot as plt\n'), ((6998, 7009), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (7005, 7009), True, 'import matplotlib.pyplot as plt\n'), ((2003, 2021), 'numpy.argmax', 'np.argmax', (['between'], {}), '(between)\n', (2012, 2021), True, 'import numpy as np\n'), ((3213, 3234), 'numpy.expm1', 'np.expm1', (['lower_bound'], {}), '(lower_bound)\n', (3221, 3234), True, 'import numpy as np\n'), ((5408, 5423), 'numpy.ptp', 'np.ptp', (['cutoffs'], {}), '(cutoffs)\n', (5414, 5423), True, 'import numpy as np\n'), ((775, 793), 'numpy.ediff1d', 'np.ediff1d', (['result'], {}), '(result)\n', (785, 793), True, 'import numpy as np\n'), ((1339, 1357), 'numpy.ediff1d', 'np.ediff1d', (['result'], {}), '(result)\n', (1349, 1357), True, 'import numpy as np\n'), ((1437, 1451), 'numpy.mean', 'np.mean', (['array'], {}), '(array)\n', (1444, 1451), True, 'import numpy as np\n'), ((1493, 1507), 'numpy.log1p', 'np.log1p', (['high'], {}), '(high)\n', (1501, 1507), True, 'import numpy as np\n'), ((4939, 4965), 'numpy.array', 'np.array', (['xlim_percentiles'], {}), '(xlim_percentiles)\n', (4947, 4965), True, 'import numpy as np\n'), ((5497, 5519), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""plasma"""'], {}), "('plasma')\n", (5509, 5519), True, 'import matplotlib.pyplot as plt\n'), ((6475, 6526), 'matplotlib.patheffects.Stroke', 'mpatheffects.Stroke', ([], {'linewidth': '(1)', 'foreground': '"""#000"""'}), "(linewidth=1, foreground='#000')\n", (6494, 6526), True, 'import matplotlib.patheffects as mpatheffects\n'), ((1772, 1787), 'numpy.expm1', 'np.expm1', (['array'], {}), '(array)\n', (1780, 1787), True, 'import numpy as np\n'), ((2588, 2609), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Dark2"""'], {}), "('Dark2')\n", (2600, 2609), True, 'import matplotlib.pyplot as plt\n'), ((544, 565), 'numpy.argmin', 'np.argmin', (['gmm.means_'], {}), '(gmm.means_)\n', (553, 565), True, 'import numpy as np\n'), ((626, 647), 'numpy.argmax', 'np.argmax', (['gmm.means_'], {}), '(gmm.means_)\n', (635, 647), True, 'import numpy as np\n'), ((2951, 2966), 'numpy.log1p', 'np.log1p', (['array'], {}), '(array)\n', (2959, 2966), True, 'import numpy as np\n'), ((5545, 5560), 'numpy.min', 'np.min', (['cutoffs'], {}), '(cutoffs)\n', (5551, 5560), True, 'import numpy as np\n'), ((6595, 6616), 'matplotlib.patheffects.Normal', 'mpatheffects.Normal', ([], {}), '()\n', (6614, 6616), True, 'import matplotlib.patheffects as mpatheffects\n'), ((6368, 6380), 'numpy.round', 'np.round', (['co'], {}), '(co)\n', (6376, 6380), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2020/5/14 20:41
# @Author: Mecthew
import time
import numpy as np
import pandas as pd
import scipy
from sklearn.svm import LinearSVC
from sklearn.linear_model import logistic
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
import scipy.sparse as sp
from utils.logger import get_logger
logger = get_logger("INFO")
class SVM:
def __init__(self, **kwargs):
self.name = "SVM"
self._model = CalibratedClassifierCV(LinearSVC(C=1.0, max_iter=500, class_weight=None, random_state=666))
def fit(self, x_train, y_train):
self._model.fit(x_train, y_train)
def predict(self, x_test):
return self._model.predict_proba(x_test)
class LR:
def __init__(self, **kwargs):
self.name = "LR"
self._model = logistic.LogisticRegression(C=1.0, solver="liblinear", multi_class="auto",
class_weight=None, max_iter=100, random_state=666)
def fit(self, x_train, y_train):
self._model.fit(x_train, y_train)
def predict(self, x_test):
return self._model.predict_proba(x_test)
def prepredict(graph_df, train_indices, use_valid, use_ohe=False):
t1 = time.time()
fea_table = graph_df['fea_table'].set_index(keys="node_index")
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']]
x_train, y_train = fea_table.loc[train_indices].to_numpy(), train_label.to_numpy()
x_test = fea_table.loc[test_indices].to_numpy()
lr = LR()
lr.fit(x_train, y_train)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(y_train.reshape(-1, 1))
x_train_feat, x_test_feat = ohe.transform(np.argmax(lr.predict(x_train), axis=1).reshape(-1, 1)).toarray(), \
ohe.transform(np.argmax(lr.predict(x_test), axis=1).reshape(-1, 1)).toarray()
else:
x_train_feat, x_test_feat = lr.predict(x_train), \
lr.predict(x_test)
pre_feat = np.concatenate([x_train_feat, x_test_feat], axis=0)
total_indices = np.concatenate([train_indices, test_indices], axis=0)
train_predict = np.argmax(x_train_feat, axis=1)
train_acc = accuracy_score(y_true=y_train, y_pred=train_predict)
t2 = time.time()
logger.info("Time cost for training {}: {}s, train acc {}".format(lr.name, t2-t1, train_acc))
return pd.DataFrame(data=pre_feat, index=total_indices)
def lpa_predict(graph_df, n_class, train_indices, use_valid, max_iter=100, tol=1e-3, use_ohe=False):
t1 = time.time()
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']].to_numpy()
print("Train label shape {}".format(train_label.shape))
train_label = train_label.reshape(-1)
edges = graph_df['edge_file'][['src_idx', 'dst_idx', 'edge_weight']].to_numpy()
edge_index = edges[:, :2].astype(np.int).transpose() # transpose to (2, num_edges)
edge_weight = edges[:, 2].astype(np.float)
num_nodes = len(train_indices) + len(test_indices)
t2 = time.time()
total_indices = np.concatenate([train_indices, test_indices], axis=0)
adj = sp.coo_matrix((edge_weight, edge_index), shape=(num_nodes, num_nodes)).tocsr()
adj = adj[total_indices] # reorder
adj = adj[:, total_indices]
t3 = time.time()
logger.debug("Time cost for transform adj {}s".format(t3 - t2))
row_sum = np.array(adj.sum(axis=1), dtype=np.float)
d_inv = np.power(row_sum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
normal_adj = sp.diags(d_inv).dot(adj).tocsr().transpose()
Pll = normal_adj[:len(train_indices), :len(train_indices)].copy()
Plu = normal_adj[:len(train_indices), len(train_indices):].copy()
Pul = normal_adj[len(train_indices):, :len(train_indices)].copy()
Puu = normal_adj[len(train_indices):, len(train_indices):].copy()
label_mat = np.eye(n_class)[train_label]
label_mat_prob = label_mat.copy()
print("Pul shape {}, label_mat shape {}".format(Pul.shape, label_mat_prob.shape))
Pul_dot_lable_mat = Pul.dot(label_mat)
unlabel_mat = np.zeros(shape=(len(test_indices), n_class))
iter, changed = 0, np.inf
t4 = time.time()
logger.debug("Time cost for prepare matrix {}s".format(t4-t3))
while iter < max_iter and changed > tol:
if iter % 10 == 0:
logger.debug("---> Iteration %d/%d, changed: %f" % (iter, max_iter, changed))
iter += 1
pre_unlabel_mat = unlabel_mat
unlabel_mat = Puu.dot(unlabel_mat) + Pul_dot_lable_mat
label_mat_prob = Pll.dot(label_mat_prob) + Plu.dot(pre_unlabel_mat)
changed = np.abs(pre_unlabel_mat - unlabel_mat).sum()
logger.debug("Time cost for training lpa {}".format(time.time() - t4))
# preds = np.argmax(np.array(unlabel_mat), axis=1)
# unlabel_mat = np.eye(n_class)[preds]
train_acc = accuracy_score(y_true=train_label, y_pred=np.argmax(label_mat_prob, axis=1))
logger.info("LPA training acc {}".format(train_acc))
logger.info("Time cost for LPA {}s".format(time.time() - t1))
total_indices = np.concatenate([train_indices, test_indices], axis=0)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(train_label.reshape(-1, 1))
label_mat_ohe = ohe.transform(np.argmax(label_mat_prob, axis=1).reshape(-1, 1)).toarray()
unlabel_mat_ohe = ohe.transform(np.argmax(unlabel_mat, axis=1).reshape(-1, 1)).toarray()
lu_mat_ohe = np.concatenate([label_mat_ohe, unlabel_mat_ohe], axis=0)
return pd.DataFrame(data=lu_mat_ohe, index=total_indices), train_acc
else:
unlabel_mat_prob = unlabel_mat
lu_mat_prob = np.concatenate([label_mat_prob, unlabel_mat_prob], axis=0)
return pd.DataFrame(data=lu_mat_prob, index=total_indices), train_acc
def is_nonnegative_integer(x_feats):
is_nonnegative = (x_feats >= 0).all()
is_integer = True
for feat in x_feats:
feat_int_sum = np.array(feat, dtype=np.int).sum()
feat_sum = np.array(feat, dtype=np.float).sum()
is_integer = (feat_int_sum == feat_sum)
if is_integer is False:
break
return is_nonnegative and is_integer
|
[
"sklearn.metrics.accuracy_score",
"numpy.abs",
"numpy.eye",
"scipy.sparse.diags",
"numpy.power",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.svm.LinearSVC",
"numpy.argmax",
"numpy.array",
"numpy.concatenate",
"scipy.sparse.coo_matrix",
"pandas.DataFrame",
"sklearn.linear_model.logistic.LogisticRegression",
"numpy.isinf",
"time.time",
"utils.logger.get_logger"
] |
[((452, 470), 'utils.logger.get_logger', 'get_logger', (['"""INFO"""'], {}), "('INFO')\n", (462, 470), False, 'from utils.logger import get_logger\n'), ((1327, 1338), 'time.time', 'time.time', ([], {}), '()\n', (1336, 1338), False, 'import time\n'), ((2410, 2461), 'numpy.concatenate', 'np.concatenate', (['[x_train_feat, x_test_feat]'], {'axis': '(0)'}), '([x_train_feat, x_test_feat], axis=0)\n', (2424, 2461), True, 'import numpy as np\n'), ((2482, 2535), 'numpy.concatenate', 'np.concatenate', (['[train_indices, test_indices]'], {'axis': '(0)'}), '([train_indices, test_indices], axis=0)\n', (2496, 2535), True, 'import numpy as np\n'), ((2557, 2588), 'numpy.argmax', 'np.argmax', (['x_train_feat'], {'axis': '(1)'}), '(x_train_feat, axis=1)\n', (2566, 2588), True, 'import numpy as np\n'), ((2605, 2657), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_train', 'y_pred': 'train_predict'}), '(y_true=y_train, y_pred=train_predict)\n', (2619, 2657), False, 'from sklearn.metrics import accuracy_score\n'), ((2667, 2678), 'time.time', 'time.time', ([], {}), '()\n', (2676, 2678), False, 'import time\n'), ((2789, 2837), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'pre_feat', 'index': 'total_indices'}), '(data=pre_feat, index=total_indices)\n', (2801, 2837), True, 'import pandas as pd\n'), ((2950, 2961), 'time.time', 'time.time', ([], {}), '()\n', (2959, 2961), False, 'import time\n'), ((3713, 3724), 'time.time', 'time.time', ([], {}), '()\n', (3722, 3724), False, 'import time\n'), ((3745, 3798), 'numpy.concatenate', 'np.concatenate', (['[train_indices, test_indices]'], {'axis': '(0)'}), '([train_indices, test_indices], axis=0)\n', (3759, 3798), True, 'import numpy as np\n'), ((3975, 3986), 'time.time', 'time.time', ([], {}), '()\n', (3984, 3986), False, 'import time\n'), ((4845, 4856), 'time.time', 'time.time', ([], {}), '()\n', (4854, 4856), False, 'import time\n'), ((5753, 5806), 'numpy.concatenate', 'np.concatenate', (['[train_indices, test_indices]'], {'axis': '(0)'}), '([train_indices, test_indices], axis=0)\n', (5767, 5806), True, 'import numpy as np\n'), ((912, 1041), 'sklearn.linear_model.logistic.LogisticRegression', 'logistic.LogisticRegression', ([], {'C': '(1.0)', 'solver': '"""liblinear"""', 'multi_class': '"""auto"""', 'class_weight': 'None', 'max_iter': '(100)', 'random_state': '(666)'}), "(C=1.0, solver='liblinear', multi_class='auto',\n class_weight=None, max_iter=100, random_state=666)\n", (939, 1041), False, 'from sklearn.linear_model import logistic\n'), ((4165, 4180), 'numpy.isinf', 'np.isinf', (['d_inv'], {}), '(d_inv)\n', (4173, 4180), True, 'import numpy as np\n'), ((4546, 4561), 'numpy.eye', 'np.eye', (['n_class'], {}), '(n_class)\n', (4552, 4561), True, 'import numpy as np\n'), ((6124, 6180), 'numpy.concatenate', 'np.concatenate', (['[label_mat_ohe, unlabel_mat_ohe]'], {'axis': '(0)'}), '([label_mat_ohe, unlabel_mat_ohe], axis=0)\n', (6138, 6180), True, 'import numpy as np\n'), ((6329, 6387), 'numpy.concatenate', 'np.concatenate', (['[label_mat_prob, unlabel_mat_prob]'], {'axis': '(0)'}), '([label_mat_prob, unlabel_mat_prob], axis=0)\n', (6343, 6387), True, 'import numpy as np\n'), ((589, 656), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1.0)', 'max_iter': '(500)', 'class_weight': 'None', 'random_state': '(666)'}), '(C=1.0, max_iter=500, class_weight=None, random_state=666)\n', (598, 656), False, 'from sklearn.svm import LinearSVC\n'), ((3809, 3879), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(edge_weight, edge_index)'], {'shape': '(num_nodes, num_nodes)'}), '((edge_weight, edge_index), shape=(num_nodes, num_nodes))\n', (3822, 3879), True, 'import scipy.sparse as sp\n'), ((4123, 4144), 'numpy.power', 'np.power', (['row_sum', '(-1)'], {}), '(row_sum, -1)\n', (4131, 4144), True, 'import numpy as np\n'), ((5575, 5608), 'numpy.argmax', 'np.argmax', (['label_mat_prob'], {'axis': '(1)'}), '(label_mat_prob, axis=1)\n', (5584, 5608), True, 'import numpy as np\n'), ((6196, 6246), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'lu_mat_ohe', 'index': 'total_indices'}), '(data=lu_mat_ohe, index=total_indices)\n', (6208, 6246), True, 'import pandas as pd\n'), ((6403, 6454), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'lu_mat_prob', 'index': 'total_indices'}), '(data=lu_mat_prob, index=total_indices)\n', (6415, 6454), True, 'import pandas as pd\n'), ((1972, 2010), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (1985, 2010), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((5300, 5337), 'numpy.abs', 'np.abs', (['(pre_unlabel_mat - unlabel_mat)'], {}), '(pre_unlabel_mat - unlabel_mat)\n', (5306, 5337), True, 'import numpy as np\n'), ((5400, 5411), 'time.time', 'time.time', ([], {}), '()\n', (5409, 5411), False, 'import time\n'), ((5714, 5725), 'time.time', 'time.time', ([], {}), '()\n', (5723, 5725), False, 'import time\n'), ((5837, 5875), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (5850, 5875), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((6617, 6645), 'numpy.array', 'np.array', (['feat'], {'dtype': 'np.int'}), '(feat, dtype=np.int)\n', (6625, 6645), True, 'import numpy as np\n'), ((6671, 6701), 'numpy.array', 'np.array', (['feat'], {'dtype': 'np.float'}), '(feat, dtype=np.float)\n', (6679, 6701), True, 'import numpy as np\n'), ((4204, 4219), 'scipy.sparse.diags', 'sp.diags', (['d_inv'], {}), '(d_inv)\n', (4212, 4219), True, 'import scipy.sparse as sp\n'), ((5946, 5979), 'numpy.argmax', 'np.argmax', (['label_mat_prob'], {'axis': '(1)'}), '(label_mat_prob, axis=1)\n', (5955, 5979), True, 'import numpy as np\n'), ((6046, 6076), 'numpy.argmax', 'np.argmax', (['unlabel_mat'], {'axis': '(1)'}), '(unlabel_mat, axis=1)\n', (6055, 6076), True, 'import numpy as np\n')]
|
import numpy as np
import unittest
from chainer.dataset import DatasetMixin
from chainer import testing
from chainercv.utils import assert_is_bbox_dataset
from chainercv.utils import generate_random_bbox
class BboxDataset(DatasetMixin):
def __init__(self, options=(), empty_bbox=False):
self.options = options
self.empty_bbox = empty_bbox
def __len__(self):
return 10
def get_example(self, i):
img = np.random.randint(0, 256, size=(3, 48, 64))
if self.empty_bbox:
n_bbox = 0
else:
n_bbox = np.random.randint(10, 20)
bbox = generate_random_bbox(n_bbox, (48, 64), 5, 20)
label = np.random.randint(0, 20, size=n_bbox).astype(np.int32)
return (img, bbox, label) + self.options
class InvalidSampleSizeDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(
InvalidSampleSizeDataset, self).get_example(i)[:3]
return img, bbox
class InvalidImageDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(InvalidImageDataset, self).get_example(i)[:3]
return img[0], bbox, label
class InvalidBboxDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(InvalidBboxDataset, self).get_example(i)[:3]
bbox += 1000
return img, bbox, label
class InvalidLabelDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(InvalidLabelDataset, self).get_example(i)[:3]
label += 1000
return img, bbox, label
class MismatchLengthDataset(BboxDataset):
def get_example(self, i):
img, bbox, label = super(
MismatchLengthDataset, self).get_example(i)[:3]
return img, bbox, label[1:]
@testing.parameterize(
{'dataset': BboxDataset(), 'valid': True},
{'dataset': BboxDataset(empty_bbox=True), 'valid': True},
{'dataset': BboxDataset(('option',)), 'valid': True},
{'dataset': InvalidSampleSizeDataset(), 'valid': False},
{'dataset': InvalidImageDataset(), 'valid': False},
{'dataset': InvalidBboxDataset(), 'valid': False},
{'dataset': InvalidLabelDataset(), 'valid': False},
{'dataset': MismatchLengthDataset(), 'valid': False},
)
class TestAssertIsBboxDataset(unittest.TestCase):
def test_assert_is_bbox_dataset(self):
if self.valid:
assert_is_bbox_dataset(self.dataset, 20)
else:
with self.assertRaises(AssertionError):
assert_is_bbox_dataset(self.dataset, 20)
testing.run_module(__name__, __file__)
|
[
"chainercv.utils.generate_random_bbox",
"numpy.random.randint",
"chainer.testing.run_module",
"chainercv.utils.assert_is_bbox_dataset"
] |
[((2565, 2603), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (2583, 2603), False, 'from chainer import testing\n'), ((451, 494), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '(3, 48, 64)'}), '(0, 256, size=(3, 48, 64))\n', (468, 494), True, 'import numpy as np\n'), ((622, 667), 'chainercv.utils.generate_random_bbox', 'generate_random_bbox', (['n_bbox', '(48, 64)', '(5)', '(20)'], {}), '(n_bbox, (48, 64), 5, 20)\n', (642, 667), False, 'from chainercv.utils import generate_random_bbox\n'), ((581, 606), 'numpy.random.randint', 'np.random.randint', (['(10)', '(20)'], {}), '(10, 20)\n', (598, 606), True, 'import numpy as np\n'), ((2399, 2439), 'chainercv.utils.assert_is_bbox_dataset', 'assert_is_bbox_dataset', (['self.dataset', '(20)'], {}), '(self.dataset, 20)\n', (2421, 2439), False, 'from chainercv.utils import assert_is_bbox_dataset\n'), ((684, 721), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)'], {'size': 'n_bbox'}), '(0, 20, size=n_bbox)\n', (701, 721), True, 'import numpy as np\n'), ((2522, 2562), 'chainercv.utils.assert_is_bbox_dataset', 'assert_is_bbox_dataset', (['self.dataset', '(20)'], {}), '(self.dataset, 20)\n', (2544, 2562), False, 'from chainercv.utils import assert_is_bbox_dataset\n')]
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.stateful."""
from absl.testing import absltest
from haiku._src import base
from haiku._src import module
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
import numpy as np
class StatefulTest(absltest.TestCase):
@test_utils.transform_and_run
def test_grad(self):
x = jnp.array(3.)
g = stateful.grad(SquareModule())(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.grad(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_value_and_grad(self):
x = jnp.array(2.)
y, g = stateful.value_and_grad(SquareModule())(x)
self.assertEqual(y, x ** 2)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
def test_value_and_grad_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.grad() instead"):
stateful.value_and_grad(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
g, aux = stateful.grad(f, has_aux=True)(x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
@test_utils.transform_and_run
def test_value_and_grad_aux(self):
o = object()
def f(x):
m = SquareModule()
return m(x), o
x = jnp.array(3.)
(y, aux), g = stateful.value_and_grad(f, has_aux=True)(x)
self.assertEqual(y, x ** 2)
np.testing.assert_allclose(g, 2 * x, rtol=1e-4)
self.assertIs(aux, o)
def test_grad_and_jit(self):
def f(x):
g = stateful.grad(SquareModule())(x)
return g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
g, state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
def test_value_and_grad_and_jit(self):
def f(x):
y, g = stateful.value_and_grad(SquareModule())(x)
return y, g
x = jnp.array(3.)
f = transform.transform_with_state(f)
params, state = jax.jit(f.init)(None, x)
(y, g), state = jax.jit(f.apply)(params, state, None, x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
@test_utils.transform_and_run
def test_jit(self):
mod = SquareModule()
x = jnp.array(2)
y = stateful.jit(mod)(x)
self.assertEqual(y, x ** 2)
def test_jit_no_transform(self):
x = jnp.array(2)
with self.assertRaises(ValueError, msg="Use jax.jit() instead"):
stateful.jit(lambda x: x**2)(x)
@test_utils.transform_and_run
def test_remat(self):
forward, backward = [], []
callback = _callback_prim(lambda: forward.append(None),
lambda: backward.append(None))
def test(remat):
x = jnp.array(3.)
mod = CountingModule()
self.assertEqual(mod.count, 0)
f = lambda x: callback(mod(x))
if remat:
f = stateful.remat(f)
y, g = stateful.value_and_grad(f)(x)
np.testing.assert_allclose(y, x ** 2, rtol=1e-3)
np.testing.assert_allclose(g, 2 * x, rtol=1e-3)
self.assertEqual(mod.count, 1)
num_forward = len(forward)
num_backward = len(backward)
del forward[:], backward[:]
return num_forward, num_backward
# Sanity check.
self.assertEqual(test(remat=True), test(remat=True))
self.assertEqual(test(remat=False), test(remat=False))
# NOTE: JAX does not guarantee to execute primitives once and only once for
# a given function (we observe f=2,b=1 without remat and f=5,b=1 with
# remat), but we do expect that JAX will execute our primitive forward at
# least one more time with remat than without it.
num_forward_remat, num_backward_remat = test(remat=True)
num_forward_no_remat, num_backward_no_remat = test(remat=False)
self.assertGreater(num_forward_remat, num_forward_no_remat)
self.assertEqual(num_backward_remat, num_backward_no_remat)
def test_remat_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.remat() instead"):
stateful.remat(lambda x: x**2)(x)
def test_cond(self):
def f(x):
mod = SquareModule()
return stateful.cond(x == 2, x, mod, x, lambda x: mod(x + 1))
f = transform.transform_with_state(f)
for x, y in ((1, 4), (2, 4), (3, 16)):
x, y = map(jnp.array, (x, y))
params, state = f.init(None, x)
out, state = f.apply(params, state, None, x)
self.assertEqual(state, {"square_module": {"y": y}})
self.assertEqual(out, y)
def test_cond_no_transform(self):
x = jnp.array(3.)
with self.assertRaises(ValueError, msg="Use jax.cond() instead"):
stateful.cond(x == 2, x, lambda x: x**2, x, lambda x: (x + 1)**2)
def _callback_prim(forward, backward):
def f_impl(x):
forward()
return x
def b_impl(x):
backward()
return (x,)
prim = jax.core.Primitive("hk_callback")
prim.def_impl(f_impl)
prim.def_abstract_eval(f_impl)
jax.ad.deflinear(prim, b_impl)
return prim.bind
class CountingModule(module.Module):
@property
def count(self):
return base.get_state("count", [], init=jnp.zeros)
def __call__(self, x):
y = x ** 2
base.set_state("count", self.count + 1)
return y
class SquareModule(module.Module):
def __call__(self, x):
assert x.ndim == 0
p = base.get_parameter("p", [], jnp.int32, init=lambda *_: jnp.array(2))
y = x ** p
base.set_state("y", y)
return y
if __name__ == "__main__":
absltest.main()
|
[
"haiku._src.stateful.grad",
"haiku._src.stateful.cond",
"jax.ad.deflinear",
"numpy.testing.assert_allclose",
"haiku._src.stateful.remat",
"absl.testing.absltest.main",
"jax.numpy.array",
"haiku._src.stateful.jit",
"haiku._src.base.set_state",
"jax.core.Primitive",
"haiku._src.stateful.value_and_grad",
"haiku._src.base.get_state",
"jax.jit",
"haiku._src.transform.transform_with_state"
] |
[((5840, 5873), 'jax.core.Primitive', 'jax.core.Primitive', (['"""hk_callback"""'], {}), "('hk_callback')\n", (5858, 5873), False, 'import jax\n'), ((5933, 5963), 'jax.ad.deflinear', 'jax.ad.deflinear', (['prim', 'b_impl'], {}), '(prim, b_impl)\n', (5949, 5963), False, 'import jax\n'), ((6456, 6471), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (6469, 6471), False, 'from absl.testing import absltest\n'), ((1104, 1118), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (1113, 1118), True, 'import jax.numpy as jnp\n'), ((1163, 1212), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.0001)'}), '(g, 2 * x, rtol=0.0001)\n', (1189, 1212), True, 'import numpy as np\n'), ((1256, 1270), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (1265, 1270), True, 'import jax.numpy as jnp\n'), ((1453, 1467), 'jax.numpy.array', 'jnp.array', (['(2.0)'], {}), '(2.0)\n', (1462, 1467), True, 'import jax.numpy as jnp\n'), ((1557, 1606), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.0001)'}), '(g, 2 * x, rtol=0.0001)\n', (1583, 1606), True, 'import numpy as np\n'), ((1660, 1674), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (1669, 1674), True, 'import jax.numpy as jnp\n'), ((1940, 1954), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (1949, 1954), True, 'import jax.numpy as jnp\n'), ((2005, 2054), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.0001)'}), '(g, 2 * x, rtol=0.0001)\n', (2031, 2054), True, 'import numpy as np\n'), ((2236, 2250), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (2245, 2250), True, 'import jax.numpy as jnp\n'), ((2348, 2397), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.0001)'}), '(g, 2 * x, rtol=0.0001)\n', (2374, 2397), True, 'import numpy as np\n'), ((2535, 2549), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (2544, 2549), True, 'import jax.numpy as jnp\n'), ((2557, 2590), 'haiku._src.transform.transform_with_state', 'transform.transform_with_state', (['f'], {}), '(f)\n', (2587, 2590), False, 'from haiku._src import transform\n'), ((2696, 2744), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.001)'}), '(g, 2 * x, rtol=0.001)\n', (2722, 2744), True, 'import numpy as np\n'), ((2883, 2897), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (2892, 2897), True, 'import jax.numpy as jnp\n'), ((2905, 2938), 'haiku._src.transform.transform_with_state', 'transform.transform_with_state', (['f'], {}), '(f)\n', (2935, 2938), False, 'from haiku._src import transform\n'), ((3049, 3098), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', '(x ** 2)'], {'rtol': '(0.001)'}), '(y, x ** 2, rtol=0.001)\n', (3075, 3098), True, 'import numpy as np\n'), ((3102, 3150), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.001)'}), '(g, 2 * x, rtol=0.001)\n', (3128, 3150), True, 'import numpy as np\n'), ((3238, 3250), 'jax.numpy.array', 'jnp.array', (['(2)'], {}), '(2)\n', (3247, 3250), True, 'import jax.numpy as jnp\n'), ((3356, 3368), 'jax.numpy.array', 'jnp.array', (['(2)'], {}), '(2)\n', (3365, 3368), True, 'import jax.numpy as jnp\n'), ((4937, 4951), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (4946, 4951), True, 'import jax.numpy as jnp\n'), ((5203, 5236), 'haiku._src.transform.transform_with_state', 'transform.transform_with_state', (['f'], {}), '(f)\n', (5233, 5236), False, 'from haiku._src import transform\n'), ((5540, 5554), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (5549, 5554), True, 'import jax.numpy as jnp\n'), ((6065, 6108), 'haiku._src.base.get_state', 'base.get_state', (['"""count"""', '[]'], {'init': 'jnp.zeros'}), "('count', [], init=jnp.zeros)\n", (6079, 6108), False, 'from haiku._src import base\n'), ((6154, 6193), 'haiku._src.base.set_state', 'base.set_state', (['"""count"""', '(self.count + 1)'], {}), "('count', self.count + 1)\n", (6168, 6193), False, 'from haiku._src import base\n'), ((6389, 6411), 'haiku._src.base.set_state', 'base.set_state', (['"""y"""', 'y'], {}), "('y', y)\n", (6403, 6411), False, 'from haiku._src import base\n'), ((1967, 1997), 'haiku._src.stateful.grad', 'stateful.grad', (['f'], {'has_aux': '(True)'}), '(f, has_aux=True)\n', (1980, 1997), False, 'from haiku._src import stateful\n'), ((2268, 2308), 'haiku._src.stateful.value_and_grad', 'stateful.value_and_grad', (['f'], {'has_aux': '(True)'}), '(f, has_aux=True)\n', (2291, 2308), False, 'from haiku._src import stateful\n'), ((2611, 2626), 'jax.jit', 'jax.jit', (['f.init'], {}), '(f.init)\n', (2618, 2626), False, 'import jax\n'), ((2651, 2667), 'jax.jit', 'jax.jit', (['f.apply'], {}), '(f.apply)\n', (2658, 2667), False, 'import jax\n'), ((2959, 2974), 'jax.jit', 'jax.jit', (['f.init'], {}), '(f.init)\n', (2966, 2974), False, 'import jax\n'), ((3004, 3020), 'jax.jit', 'jax.jit', (['f.apply'], {}), '(f.apply)\n', (3011, 3020), False, 'import jax\n'), ((3259, 3276), 'haiku._src.stateful.jit', 'stateful.jit', (['mod'], {}), '(mod)\n', (3271, 3276), False, 'from haiku._src import stateful\n'), ((3717, 3731), 'jax.numpy.array', 'jnp.array', (['(3.0)'], {}), '(3.0)\n', (3726, 3731), True, 'import jax.numpy as jnp\n'), ((3929, 3978), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', '(x ** 2)'], {'rtol': '(0.001)'}), '(y, x ** 2, rtol=0.001)\n', (3955, 3978), True, 'import numpy as np\n'), ((3984, 4032), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['g', '(2 * x)'], {'rtol': '(0.001)'}), '(g, 2 * x, rtol=0.001)\n', (4010, 4032), True, 'import numpy as np\n'), ((5630, 5699), 'haiku._src.stateful.cond', 'stateful.cond', (['(x == 2)', 'x', '(lambda x: x ** 2)', 'x', '(lambda x: (x + 1) ** 2)'], {}), '(x == 2, x, lambda x: x ** 2, x, lambda x: (x + 1) ** 2)\n', (5643, 5699), False, 'from haiku._src import stateful\n'), ((1346, 1377), 'haiku._src.stateful.grad', 'stateful.grad', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (1359, 1377), False, 'from haiku._src import stateful\n'), ((1750, 1791), 'haiku._src.stateful.value_and_grad', 'stateful.value_and_grad', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (1773, 1791), False, 'from haiku._src import stateful\n'), ((3444, 3474), 'haiku._src.stateful.jit', 'stateful.jit', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (3456, 3474), False, 'from haiku._src import stateful\n'), ((3862, 3879), 'haiku._src.stateful.remat', 'stateful.remat', (['f'], {}), '(f)\n', (3876, 3879), False, 'from haiku._src import stateful\n'), ((3893, 3919), 'haiku._src.stateful.value_and_grad', 'stateful.value_and_grad', (['f'], {}), '(f)\n', (3916, 3919), False, 'from haiku._src import stateful\n'), ((5028, 5060), 'haiku._src.stateful.remat', 'stateful.remat', (['(lambda x: x ** 2)'], {}), '(lambda x: x ** 2)\n', (5042, 5060), False, 'from haiku._src import stateful\n'), ((6356, 6368), 'jax.numpy.array', 'jnp.array', (['(2)'], {}), '(2)\n', (6365, 6368), True, 'import jax.numpy as jnp\n')]
|
"""Solvates a host, inserts guest(s) into solvated host, equilibrates
"""
import os
import time
import tempfile
import numpy as np
from rdkit import Chem
from md import builders, minimizer
from fe import pdb_writer, free_energy
from ff import Forcefield
from ff.handlers.deserialize import deserialize_handlers
from timemachine.lib import custom_ops, LangevinIntegrator
from docking import report
def dock_and_equilibrate(
host_pdbfile,
guests_sdfile,
max_lambda,
insertion_steps,
eq_steps,
outdir,
fewer_outfiles=False,
constant_atoms=[],
):
"""Solvates a host, inserts guest(s) into solvated host, equilibrates
Parameters
----------
host_pdbfile: path to host pdb file to dock into
guests_sdfile: path to input sdf with guests to pose/dock
max_lambda: lambda value the guest should insert from or delete to
(recommended: 1.0 for work calulation, 0.25 to stay close to original pose)
(must be =1 for work calculation to be applicable)
insertion_steps: how many steps to insert the guest over (recommended: 501)
eq_steps: how many steps of equilibration to do after insertion (recommended: 15001)
outdir: where to write output (will be created if it does not already exist)
fewer_outfiles: if True, will only write frames for the equilibration, not insertion
constant_atoms: atom numbers from the host_pdbfile to hold mostly fixed across the simulation
(1-indexed, like PDB files)
Output
------
A pdb & sdf file for the last step of insertion
(outdir/<guest_name>/<guest_name>_ins_<step>_[host.pdb/guest.sdf])
A pdb & sdf file every 1000 steps of equilibration
(outdir/<guest_name>/<guest_name>_eq_<step>_[host.pdb/guest.sdf])
stdout corresponding to the files written noting the lambda value and energy
stdout for each guest noting the work of transition, if applicable
stdout for each guest noting how long it took to run
Note
----
The work will not be calculated if the du_dl endpoints are not close to 0 or if any norm of
force per atom exceeds 20000 kJ/(mol*nm) [MAX_NORM_FORCE defined in docking/report.py]
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
print(
f"""
HOST_PDBFILE = {host_pdbfile}
GUESTS_SDFILE = {guests_sdfile}
OUTDIR = {outdir}
MAX_LAMBDA = {max_lambda}
INSERTION_STEPS = {insertion_steps}
EQ_STEPS = {eq_steps}
"""
)
# Prepare host
# TODO: handle extra (non-transitioning) guests?
print("Solvating host...")
(
solvated_host_system,
solvated_host_coords,
_,
_,
host_box,
solvated_topology,
) = builders.build_protein_system(host_pdbfile)
_, solvated_host_pdb = tempfile.mkstemp(suffix=".pdb", text=True)
writer = pdb_writer.PDBWriter([solvated_topology], solvated_host_pdb)
writer.write_frame(solvated_host_coords)
writer.close()
solvated_host_mol = Chem.MolFromPDBFile(solvated_host_pdb, removeHs=False)
os.remove(solvated_host_pdb)
guest_ff_handlers = deserialize_handlers(
open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"ff/params/smirnoff_1_1_0_ccc.py",
)
).read()
)
ff = Forcefield(guest_ff_handlers)
# Run the procedure
print("Getting guests...")
suppl = Chem.SDMolSupplier(guests_sdfile, removeHs=False)
for guest_mol in suppl:
start_time = time.time()
guest_name = guest_mol.GetProp("_Name")
guest_conformer = guest_mol.GetConformer(0)
orig_guest_coords = np.array(guest_conformer.GetPositions(), dtype=np.float64)
orig_guest_coords = orig_guest_coords / 10 # convert to md_units
minimized_coords = minimizer.minimize_host_4d(
[guest_mol], solvated_host_system, solvated_host_coords, ff, host_box
)
afe = free_energy.AbsoluteFreeEnergy(guest_mol, ff)
ups, sys_params, combined_masses, _ = afe.prepare_host_edge(
ff.get_ordered_params(), solvated_host_system, minimized_coords
)
combined_bps = []
for up, sp in zip(ups, sys_params):
combined_bps.append(up.bind(sp))
x0 = np.concatenate([minimized_coords, orig_guest_coords])
v0 = np.zeros_like(x0)
print(f"SYSTEM", f"guest_name: {guest_name}", f"num_atoms: {len(x0)}")
for atom_num in constant_atoms:
combined_masses[atom_num - 1] += 50000
seed = 2021
intg = LangevinIntegrator(300.0, 1.5e-3, 1.0, combined_masses, seed).impl()
u_impls = []
for bp in combined_bps:
bp_impl = bp.bound_impl(precision=np.float32)
u_impls.append(bp_impl)
ctxt = custom_ops.Context(x0, v0, host_box, intg, u_impls)
# insert guest
insertion_lambda_schedule = np.linspace(max_lambda, 0.0, insertion_steps)
calc_work = True
# collect a du_dl calculation once every other step
subsample_interval = 1
full_du_dls, _, _ = ctxt.multiple_steps(insertion_lambda_schedule, subsample_interval)
step = len(insertion_lambda_schedule) - 1
lamb = insertion_lambda_schedule[-1]
ctxt.step(lamb)
report.report_step(
ctxt,
step,
lamb,
host_box,
combined_bps,
u_impls,
guest_name,
insertion_steps,
"INSERTION",
)
if not fewer_outfiles:
host_coords = ctxt.get_x_t()[: len(solvated_host_coords)] * 10
guest_coords = ctxt.get_x_t()[len(solvated_host_coords) :] * 10
report.write_frame(
host_coords,
solvated_host_mol,
guest_coords,
guest_mol,
guest_name,
outdir,
str(step).zfill(len(str(insertion_steps))),
"ins",
)
if report.too_much_force(ctxt, lamb, host_box, combined_bps, u_impls):
print("Not calculating work (too much force)")
calc_work = False
continue
# Note: this condition only applies for ABFE, not RBFE
if abs(full_du_dls[0]) > 0.001 or abs(full_du_dls[-1]) > 0.001:
print("Not calculating work (du_dl endpoints are not ~0)")
calc_work = False
if calc_work:
work = np.trapz(full_du_dls, insertion_lambda_schedule[::subsample_interval])
print(f"guest_name: {guest_name}\tinsertion_work: {work:.2f}")
# equilibrate
for step in range(eq_steps):
ctxt.step(0.00)
if step % 1000 == 0:
report.report_step(
ctxt,
step,
0.00,
host_box,
combined_bps,
u_impls,
guest_name,
eq_steps,
"EQUILIBRATION",
)
if (not fewer_outfiles) or (step == eq_steps - 1):
host_coords = ctxt.get_x_t()[: len(solvated_host_coords)] * 10
guest_coords = ctxt.get_x_t()[len(solvated_host_coords) :] * 10
report.write_frame(
host_coords,
solvated_host_mol,
guest_coords,
guest_mol,
guest_name,
outdir,
str(step).zfill(len(str(eq_steps))),
"eq",
)
if step in (0, int(eq_steps / 2), eq_steps - 1):
if report.too_much_force(ctxt, 0.00, host_box, combined_bps, u_impls):
break
end_time = time.time()
print(f"{guest_name} took {(end_time - start_time):.2f} seconds")
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-p",
"--host_pdbfile",
default="tests/data/hif2a_nowater_min.pdb",
help="host to dock into",
)
parser.add_argument(
"-s",
"--guests_sdfile",
default="tests/data/ligands_40__first-two-ligs.sdf",
help="guests to pose",
)
parser.add_argument(
"--max_lambda",
type=float,
default=1.0,
help=(
"lambda value the guest should insert from or delete to "
"(must be =1 for the work calculation to be applicable)"
),
)
parser.add_argument(
"--insertion_steps",
type=int,
default=501,
help="how many steps to take while phasing in each guest",
)
parser.add_argument(
"--eq_steps",
type=int,
default=15001,
help="equilibration length (1 step = 1.5 femtoseconds)",
)
parser.add_argument("-o", "--outdir", default="dock_equil_out", help="where to write output")
parser.add_argument("--fewer_outfiles", action="store_true", help="write fewer output pdb/sdf files")
parser.add_argument(
"-c",
"--constant_atoms_file",
help="file containing comma-separated atom numbers to hold ~fixed (1-indexed)",
)
args = parser.parse_args()
constant_atoms_list = []
if args.constant_atoms_file:
with open(args.constant_atoms_file, "r") as rfile:
for line in rfile.readlines():
atoms = [int(x.strip()) for x in line.strip().split(",")]
constant_atoms_list += atoms
dock_and_equilibrate(
args.host_pdbfile,
args.guests_sdfile,
args.max_lambda,
args.insertion_steps,
args.eq_steps,
args.outdir,
args.fewer_outfiles,
constant_atoms_list,
)
if __name__ == "__main__":
main()
|
[
"rdkit.Chem.SDMolSupplier",
"os.remove",
"os.path.exists",
"rdkit.Chem.MolFromPDBFile",
"argparse.ArgumentParser",
"docking.report.too_much_force",
"numpy.linspace",
"fe.free_energy.AbsoluteFreeEnergy",
"numpy.concatenate",
"docking.report.report_step",
"timemachine.lib.custom_ops.Context",
"timemachine.lib.LangevinIntegrator",
"numpy.trapz",
"md.minimizer.minimize_host_4d",
"md.builders.build_protein_system",
"ff.Forcefield",
"time.time",
"tempfile.mkstemp",
"fe.pdb_writer.PDBWriter",
"os.makedirs",
"os.path.abspath",
"numpy.zeros_like"
] |
[((2726, 2769), 'md.builders.build_protein_system', 'builders.build_protein_system', (['host_pdbfile'], {}), '(host_pdbfile)\n', (2755, 2769), False, 'from md import builders, minimizer\n'), ((2798, 2840), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".pdb"""', 'text': '(True)'}), "(suffix='.pdb', text=True)\n", (2814, 2840), False, 'import tempfile\n'), ((2854, 2914), 'fe.pdb_writer.PDBWriter', 'pdb_writer.PDBWriter', (['[solvated_topology]', 'solvated_host_pdb'], {}), '([solvated_topology], solvated_host_pdb)\n', (2874, 2914), False, 'from fe import pdb_writer, free_energy\n'), ((3003, 3057), 'rdkit.Chem.MolFromPDBFile', 'Chem.MolFromPDBFile', (['solvated_host_pdb'], {'removeHs': '(False)'}), '(solvated_host_pdb, removeHs=False)\n', (3022, 3057), False, 'from rdkit import Chem\n'), ((3062, 3090), 'os.remove', 'os.remove', (['solvated_host_pdb'], {}), '(solvated_host_pdb)\n', (3071, 3090), False, 'import os\n'), ((3357, 3386), 'ff.Forcefield', 'Forcefield', (['guest_ff_handlers'], {}), '(guest_ff_handlers)\n', (3367, 3386), False, 'from ff import Forcefield\n'), ((3455, 3504), 'rdkit.Chem.SDMolSupplier', 'Chem.SDMolSupplier', (['guests_sdfile'], {'removeHs': '(False)'}), '(guests_sdfile, removeHs=False)\n', (3473, 3504), False, 'from rdkit import Chem\n'), ((8057, 8136), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (8080, 8136), False, 'import argparse\n'), ((2202, 2224), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (2216, 2224), False, 'import os\n'), ((2234, 2253), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (2245, 2253), False, 'import os\n'), ((3554, 3565), 'time.time', 'time.time', ([], {}), '()\n', (3563, 3565), False, 'import time\n'), ((3855, 3956), 'md.minimizer.minimize_host_4d', 'minimizer.minimize_host_4d', (['[guest_mol]', 'solvated_host_system', 'solvated_host_coords', 'ff', 'host_box'], {}), '([guest_mol], solvated_host_system,\n solvated_host_coords, ff, host_box)\n', (3881, 3956), False, 'from md import builders, minimizer\n'), ((3990, 4035), 'fe.free_energy.AbsoluteFreeEnergy', 'free_energy.AbsoluteFreeEnergy', (['guest_mol', 'ff'], {}), '(guest_mol, ff)\n', (4020, 4035), False, 'from fe import pdb_writer, free_energy\n'), ((4322, 4375), 'numpy.concatenate', 'np.concatenate', (['[minimized_coords, orig_guest_coords]'], {}), '([minimized_coords, orig_guest_coords])\n', (4336, 4375), True, 'import numpy as np\n'), ((4389, 4406), 'numpy.zeros_like', 'np.zeros_like', (['x0'], {}), '(x0)\n', (4402, 4406), True, 'import numpy as np\n'), ((4847, 4898), 'timemachine.lib.custom_ops.Context', 'custom_ops.Context', (['x0', 'v0', 'host_box', 'intg', 'u_impls'], {}), '(x0, v0, host_box, intg, u_impls)\n', (4865, 4898), False, 'from timemachine.lib import custom_ops, LangevinIntegrator\n'), ((4959, 5004), 'numpy.linspace', 'np.linspace', (['max_lambda', '(0.0)', 'insertion_steps'], {}), '(max_lambda, 0.0, insertion_steps)\n', (4970, 5004), True, 'import numpy as np\n'), ((5346, 5461), 'docking.report.report_step', 'report.report_step', (['ctxt', 'step', 'lamb', 'host_box', 'combined_bps', 'u_impls', 'guest_name', 'insertion_steps', '"""INSERTION"""'], {}), "(ctxt, step, lamb, host_box, combined_bps, u_impls,\n guest_name, insertion_steps, 'INSERTION')\n", (5364, 5461), False, 'from docking import report\n'), ((6073, 6139), 'docking.report.too_much_force', 'report.too_much_force', (['ctxt', 'lamb', 'host_box', 'combined_bps', 'u_impls'], {}), '(ctxt, lamb, host_box, combined_bps, u_impls)\n', (6094, 6139), False, 'from docking import report\n'), ((7923, 7934), 'time.time', 'time.time', ([], {}), '()\n', (7932, 7934), False, 'import time\n'), ((6530, 6600), 'numpy.trapz', 'np.trapz', (['full_du_dls', 'insertion_lambda_schedule[::subsample_interval]'], {}), '(full_du_dls, insertion_lambda_schedule[::subsample_interval])\n', (6538, 6600), True, 'import numpy as np\n'), ((4614, 4675), 'timemachine.lib.LangevinIntegrator', 'LangevinIntegrator', (['(300.0)', '(0.0015)', '(1.0)', 'combined_masses', 'seed'], {}), '(300.0, 0.0015, 1.0, combined_masses, seed)\n', (4632, 4675), False, 'from timemachine.lib import custom_ops, LangevinIntegrator\n'), ((6813, 6924), 'docking.report.report_step', 'report.report_step', (['ctxt', 'step', '(0.0)', 'host_box', 'combined_bps', 'u_impls', 'guest_name', 'eq_steps', '"""EQUILIBRATION"""'], {}), "(ctxt, step, 0.0, host_box, combined_bps, u_impls,\n guest_name, eq_steps, 'EQUILIBRATION')\n", (6831, 6924), False, 'from docking import report\n'), ((7809, 7874), 'docking.report.too_much_force', 'report.too_much_force', (['ctxt', '(0.0)', 'host_box', 'combined_bps', 'u_impls'], {}), '(ctxt, 0.0, host_box, combined_bps, u_impls)\n', (7830, 7874), False, 'from docking import report\n'), ((3210, 3235), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3225, 3235), False, 'import os\n')]
|
from typing import Dict
import numpy as np
from ..envs.env import StructuralModel
from ..utils.lik_func import *
from ..utils.useful_class import ParameterGrid
class Estimator(ABC):
"""An Estimator takes in a (trained) solver and relevant params
and outputs estimated structural params
"""
def __init__(self, solver: Solver = None, estimator_params: dict = None):
self.solver = solver
self.env = solver.env
self.estimator_params = estimator_params
self.num_structural_params = self.env.env_params['num_structural_params']
self.estimated_params = None
@abstractmethod
def estimate(self) -> dict:
"""Outputs estimation using a dict (e.g. dict['k'] = 0.95)"""
"""How?"""
return self.estimator_params
class SMMEstimator(Estimator, ABC):
"""Estimator using Simulated Method of Moments"""
def __init__(self,
data: np.ndarray = None, # (nsamples, N, T) or (N, T); N: obs dim, T: eps length
solver: Solver = None,
env: StructuralModel = None,
estimator_params: dict = None):
super().__init__(solver=solver, env=env, estimator_params=estimator_params)
self.data = data
self.estimator_params.setdefault("verbose", True)
self.estimator_params.setdefault("weight_matrix", "identity") # weight matrix type for GMM
self.estimator_params.setdefault("sample_size", 1000)
assert "grid" in self.estimator_params
assert "num_moments" in self.estimator_params
self.estimator_params.setdefault("grid", ParameterGrid({'this_is_an_example': [0.1]}))
self.estimator_params.setdefault("n_moment", 1)
if self.estimator_params['weight_matrix'] not in ["identity"]:
raise ValueError(f"No weight matrix {self.estimator_params['weight_matrix']}")
if self.estimator_params['weight_matrix'] == 'identity':
self.weight_matrix = np.eye(self.estimator_params['n_moment'])
def estimate(self) -> Dict[str, float]:
"""Use SMM to estimate structural params
Returns a dict of estimated structural params"""
running_min_error = np.inf
running_best_param = None
for param_dict in self.estimator_params['grid']:
gmm_error = self._gmm_error(param_dict, self.data)
if gmm_error < running_min_error:
running_min_error = gmm_error
running_best_param = param_dict
return running_best_param
@staticmethod
def _data_moments(obs_vec: np.ndarray) -> np.ndarray:
moments = []
if obs_vec.ndim == 2: # (N, T)
for i in range(obs_vec.shape[0]):
mean = obs_vec[i, :].mean()
moments = np.append(moments, mean)
variance = obs_vec[i, :].var()
moments = np.append(moments, variance)
else:
assert obs_vec.ndim == 3 # (nsample, N, T)
for i in range(obs_vec.shape[1]):
mean = obs_vec[:, i, :].mean(axis=1).mean()
moments = np.append(moments, mean)
variance = obs_vec[:, i, :].var(axis=1).mean()
moments = np.append(moments, variance)
return moments
def _gmm_error(self, param_dict: Dict[str, float], data_obs_vec: np.ndarray):
"""Perform GMM on a single param dict
:parameter: param_dict a dict like {'delta': 0.1, 'gamma': 1}
:returns an error term that is float of how much error this param_dict generates in simulated samples"""
sample_size = self.estimator_params['sample_size']
# use: param_dict, sample_size, self.weight_matrix, self.solver, self.env
sim_obs_vec = None
for n in range(sample_size):
obs_sample = self.solver.sample(
param_dict=param_dict) # np array of size (N, T); in WhitedBasicModel N=2 (k, i)
obs_sample = obs_sample.reshape(1, *obs_sample.shape) # obs_sample.shape = (1, N, T)
# some method to concat/aggregate samples
sim_obs_vec = obs_sample if sim_obs_vec is None else np.append(sim_obs_vec, obs_sample, axis=0)
moms_data = self._data_moments(data_obs_vec)
moms_model = self._data_moments(sim_obs_vec)
err = (moms_model - moms_data) / (moms_data + 1.e-9)
crit_val = err.T @ self.weight_matrix @ err
return crit_val
class LikelihoodEstimator(Estimator, ABC):
"""General likelihood estimator using some kind of given likelihood function"""
def __init__(self, solver: Solver = None, estimator_params: dict = None):
super().__init__(solver=solver, estimator_params=estimator_params)
assert "lik_func" in estimator_params # class LikFunc object (likelihood function) from utils.lik_func
self.lik_func = estimator_params['lik_func']
assert isinstance(self.lik_func, LikFunc)
# TODO: JZH
if __name__ == "__main__":
grid = {
'delta': [0.1, 0.2, 0.3],
'gamma': [1, 10]
}
pg = ParameterGrid(grid)
for g in pg:
print(g)
|
[
"numpy.append",
"numpy.eye"
] |
[((1986, 2027), 'numpy.eye', 'np.eye', (["self.estimator_params['n_moment']"], {}), "(self.estimator_params['n_moment'])\n", (1992, 2027), True, 'import numpy as np\n'), ((2800, 2824), 'numpy.append', 'np.append', (['moments', 'mean'], {}), '(moments, mean)\n', (2809, 2824), True, 'import numpy as np\n'), ((2898, 2926), 'numpy.append', 'np.append', (['moments', 'variance'], {}), '(moments, variance)\n', (2907, 2926), True, 'import numpy as np\n'), ((3129, 3153), 'numpy.append', 'np.append', (['moments', 'mean'], {}), '(moments, mean)\n', (3138, 3153), True, 'import numpy as np\n'), ((3243, 3271), 'numpy.append', 'np.append', (['moments', 'variance'], {}), '(moments, variance)\n', (3252, 3271), True, 'import numpy as np\n'), ((4172, 4214), 'numpy.append', 'np.append', (['sim_obs_vec', 'obs_sample'], {'axis': '(0)'}), '(sim_obs_vec, obs_sample, axis=0)\n', (4181, 4214), True, 'import numpy as np\n')]
|
import numpy
import pandas
import hts.hierarchy
from hts.functions import (
_create_bl_str_col,
get_agg_series,
get_hierarchichal_df,
to_sum_mat,
)
def test_sum_mat_uv(uv_tree):
mat, sum_mat_labels = to_sum_mat(uv_tree)
assert isinstance(mat, numpy.ndarray)
shp = mat.shape
assert shp[0] == uv_tree.num_nodes() + 1
assert shp[1] == uv_tree.leaf_sum()
def test_sum_mat_mv(mv_tree):
mat, sum_mat_labels = to_sum_mat(mv_tree)
assert isinstance(mat, numpy.ndarray)
shp = mat.shape
assert shp[0] == mv_tree.num_nodes() + 1
assert shp[1] == mv_tree.leaf_sum()
def test_sum_mat_hierarchical():
hierarchy = {"total": ["A", "B"], "A": ["A_X", "A_Y", "A_Z"], "B": ["B_X", "B_Y"]}
hier_df = pandas.DataFrame(
data={
"total": [],
"A": [],
"B": [],
"A_X": [],
"A_Y": [],
"A_Z": [],
"B_X": [],
"B_Y": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(hierarchy, hier_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1, 1], # total
[0, 0, 0, 1, 1], # B
[1, 1, 1, 0, 0], # A
[1, 0, 0, 0, 0], # A_X
[0, 1, 0, 0, 0], # A_Y
[0, 0, 1, 0, 0], # A_Z
[0, 0, 0, 1, 0], # B_X
[0, 0, 0, 0, 1],
]
) # B_Y
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
assert sum_mat_labels == ["total", "B", "A", "A_X", "A_Y", "A_Z", "B_X", "B_Y"]
def test_sum_mat_grouped():
hierarchy = {
"total": ["A", "B", "X", "Y"],
"A": ["A_X", "A_Y"],
"B": ["B_X", "B_Y"],
}
grouped_df = pandas.DataFrame(
data={
"total": [],
"A": [],
"B": [],
"X": [],
"Y": [],
"A_X": [],
"A_Y": [],
"B_X": [],
"B_Y": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(hierarchy, grouped_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1], # total
[0, 1, 0, 1], # Y
[1, 0, 1, 0], # X
[0, 0, 1, 1], # B
[1, 1, 0, 0], # A
[1, 0, 0, 0], # A_X
[0, 1, 0, 0], # A_Y
[0, 0, 1, 0], # B_X
[0, 0, 0, 1], # B_Y
]
)
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
assert sum_mat_labels == ["total", "Y", "X", "B", "A", "A_X", "A_Y", "B_X", "B_Y"]
def test_sum_mat_visnights_hier(visnights_hier):
hier_df = pandas.DataFrame(
data={
"total": [],
"VIC": [],
"QLD": [],
"SAU": [],
"WAU": [],
"OTH": [],
"NSW": [],
"NSW_Metro": [],
"NSW_NthCo": [],
"NSW_NthIn": [],
"NSW_SthCo": [],
"NSW_SthIn": [],
"OTH_Metro": [],
"OTH_NoMet": [],
"QLD_Cntrl": [],
"QLD_Metro": [],
"QLD_NthCo": [],
"SAU_Coast": [],
"SAU_Inner": [],
"SAU_Metro": [],
"VIC_EstCo": [],
"VIC_Inner": [],
"VIC_Metro": [],
"VIC_WstCo": [],
"WAU_Coast": [],
"WAU_Inner": [],
"WAU_Metro": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(visnights_hier, hier_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # total
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], # VIC
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # QLD
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # SAU
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # OTH
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_Metro
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_NthCo
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_NthIn
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_SthCo
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # NSW_SthIn
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # OTH_Metro
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # OTH_NoMet
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU_Coast
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU_Inner
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # WAU_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # SAU_Coast
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], # SAU_Inner
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], # SAU_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # QLD_Cntrl
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], # QLD_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], # QLD_NthCo
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # VIC_EstCo
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # VIC_Inner
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], # VIC_Metro
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], # VIC_WstCo
]
)
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
def test_demo_unique_constraint():
# Example https://otexts.com/fpp2/hts.html
# Does not work when you have elements that are named the same, but represent
# different levels in the hierarchy. See expected_sum_mat below for example.
hierarchy = {"total": ["A", "B"], "A": ["AA", "AB", "AC"], "B": ["BA", "BB"]}
hier_df = pandas.DataFrame(
data={
"total": [],
"A": [],
"B": [],
"AA": [],
"AB": [],
"AC": [],
"BA": [],
"BB": [],
}
)
tree = hts.hierarchy.HierarchyTree.from_nodes(hierarchy, hier_df)
sum_mat, sum_mat_labels = to_sum_mat(tree)
expected_sum_mat = numpy.array(
[
[1, 1, 1, 1, 1], # total
[0, 1, 0, 1, 1], # B, Incorrectly finds B in AB
[1, 1, 1, 1, 0], # A, Incorrectly finds A in BA
[1, 0, 0, 0, 0], # AA
[0, 1, 0, 0, 0], # AB
[0, 0, 1, 0, 0], # AC
[0, 0, 0, 1, 0], # BA
[0, 0, 0, 0, 1], # BB
]
)
numpy.testing.assert_array_equal(sum_mat, expected_sum_mat)
def test_1lev():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B", "B"], "lev2": ["X", "Y", "X", "Y"],}
)
levels = get_agg_series(grouped_df, [["lev1"]])
expected_levels = ["A", "B"]
assert sorted(levels) == sorted(expected_levels)
levels = get_agg_series(grouped_df, [["lev2"]])
expected_levels = ["X", "Y"]
assert sorted(levels) == sorted(expected_levels)
def test_2lev():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B", "B"], "lev2": ["X", "Y", "X", "Y"],}
)
levels = get_agg_series(grouped_df, [["lev1", "lev2"]])
expected_levels = ["A_X", "A_Y", "B_X", "B_Y"]
assert sorted(levels) == sorted(expected_levels)
def test_hierarchichal():
hier_df = pandas.DataFrame(
data={"lev1": ["A", "A", "A", "B", "B"], "lev2": ["X", "Y", "Z", "X", "Y"],}
)
levels = get_agg_series(hier_df, [["lev1"], ["lev1", "lev2"]])
expected_levels = ["A", "B", "A_X", "A_Y", "A_Z", "B_X", "B_Y"]
assert sorted(levels) == sorted(expected_levels)
def test_grouped():
hier_df = pandas.DataFrame(
data={"lev1": ["A", "A", "A", "B", "B"], "lev2": ["X", "Y", "Z", "X", "Y"],}
)
hierarchy = [["lev1"], ["lev2"], ["lev1", "lev2"]]
levels = get_agg_series(hier_df, hierarchy)
expected_levels = ["A", "B", "X", "Y", "Z", "A_X", "A_Y", "A_Z", "B_X", "B_Y"]
assert sorted(levels) == sorted(expected_levels)
def test_grouped_create_df():
hier_df = pandas.DataFrame(
data={
"ds": ["2020-01", "2020-02"] * 5,
"lev1": ["A", "A", "A", "A", "A", "A", "B", "B", "B", "B"],
"lev2": ["X", "X", "Y", "Y", "Z", "Z", "X", "X", "Y", "Y"],
"val": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
}
)
level_names = ["lev1", "lev2"]
hierarchy = [["lev1"], ["lev2"]]
gts_df, sum_mat, sum_mat_labels = get_hierarchichal_df(
hier_df,
level_names=level_names,
hierarchy=hierarchy,
date_colname="ds",
val_colname="val",
)
expected_columns = [
"A_X",
"A_Y",
"A_Z",
"B_X",
"B_Y",
"A",
"B",
"X",
"Y",
"Z",
"total",
]
assert sorted(list(gts_df.columns)) == sorted(expected_columns)
def test_parent_child():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B"], "lev2": ["X", "Y", "Z"],}
)
levels = get_agg_series(grouped_df, [["lev1", "lev2"]])
expected_levels = ["A_X", "A_Y", "B_Z"]
assert sorted(levels) == sorted(expected_levels)
def test_create_bl_str_col():
grouped_df = pandas.DataFrame(
data={"lev1": ["A", "A", "B"], "lev2": ["X", "Y", "Z"],}
)
col = _create_bl_str_col(grouped_df, ["lev1", "lev2"])
assert col == ["A_X", "A_Y", "B_Z"]
|
[
"hts.functions.get_hierarchichal_df",
"hts.functions._create_bl_str_col",
"hts.functions.get_agg_series",
"numpy.array",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"hts.functions.to_sum_mat"
] |
[((223, 242), 'hts.functions.to_sum_mat', 'to_sum_mat', (['uv_tree'], {}), '(uv_tree)\n', (233, 242), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((448, 467), 'hts.functions.to_sum_mat', 'to_sum_mat', (['mv_tree'], {}), '(mv_tree)\n', (458, 467), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((751, 864), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'total': [], 'A': [], 'B': [], 'A_X': [], 'A_Y': [], 'A_Z': [], 'B_X': [],\n 'B_Y': []}"}), "(data={'total': [], 'A': [], 'B': [], 'A_X': [], 'A_Y': [],\n 'A_Z': [], 'B_X': [], 'B_Y': []})\n", (767, 864), False, 'import pandas\n'), ((1083, 1099), 'hts.functions.to_sum_mat', 'to_sum_mat', (['tree'], {}), '(tree)\n', (1093, 1099), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((1124, 1277), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1], [0, 0, 0, 1, 1], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0], [0, 1,\n 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]]'], {}), '([[1, 1, 1, 1, 1], [0, 0, 0, 1, 1], [1, 1, 1, 0, 0], [1, 0, 0, 0,\n 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]])\n', (1135, 1277), False, 'import numpy\n'), ((1454, 1513), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['sum_mat', 'expected_sum_mat'], {}), '(sum_mat, expected_sum_mat)\n', (1486, 1513), False, 'import numpy\n'), ((1766, 1886), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'total': [], 'A': [], 'B': [], 'X': [], 'Y': [], 'A_X': [], 'A_Y': [],\n 'B_X': [], 'B_Y': []}"}), "(data={'total': [], 'A': [], 'B': [], 'X': [], 'Y': [],\n 'A_X': [], 'A_Y': [], 'B_X': [], 'B_Y': []})\n", (1782, 1886), False, 'import pandas\n'), ((2120, 2136), 'hts.functions.to_sum_mat', 'to_sum_mat', (['tree'], {}), '(tree)\n', (2130, 2136), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((2161, 2304), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 0], [1, \n 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 1, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1,\n 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (2172, 2304), False, 'import numpy\n'), ((2496, 2555), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['sum_mat', 'expected_sum_mat'], {}), '(sum_mat, expected_sum_mat)\n', (2528, 2555), False, 'import numpy\n'), ((2708, 3174), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'total': [], 'VIC': [], 'QLD': [], 'SAU': [], 'WAU': [], 'OTH': [], 'NSW':\n [], 'NSW_Metro': [], 'NSW_NthCo': [], 'NSW_NthIn': [], 'NSW_SthCo': [],\n 'NSW_SthIn': [], 'OTH_Metro': [], 'OTH_NoMet': [], 'QLD_Cntrl': [],\n 'QLD_Metro': [], 'QLD_NthCo': [], 'SAU_Coast': [], 'SAU_Inner': [],\n 'SAU_Metro': [], 'VIC_EstCo': [], 'VIC_Inner': [], 'VIC_Metro': [],\n 'VIC_WstCo': [], 'WAU_Coast': [], 'WAU_Inner': [], 'WAU_Metro': []}"}), "(data={'total': [], 'VIC': [], 'QLD': [], 'SAU': [], 'WAU':\n [], 'OTH': [], 'NSW': [], 'NSW_Metro': [], 'NSW_NthCo': [], 'NSW_NthIn':\n [], 'NSW_SthCo': [], 'NSW_SthIn': [], 'OTH_Metro': [], 'OTH_NoMet': [],\n 'QLD_Cntrl': [], 'QLD_Metro': [], 'QLD_NthCo': [], 'SAU_Coast': [],\n 'SAU_Inner': [], 'SAU_Metro': [], 'VIC_EstCo': [], 'VIC_Inner': [],\n 'VIC_Metro': [], 'VIC_WstCo': [], 'WAU_Coast': [], 'WAU_Inner': [],\n 'WAU_Metro': []})\n", (2724, 3174), False, 'import pandas\n'), ((3606, 3622), 'hts.functions.to_sum_mat', 'to_sum_mat', (['tree'], {}), '(tree)\n', (3616, 3622), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((3647, 5443), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, \n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, \n 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0,\n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, \n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (3658, 5443), False, 'import numpy\n'), ((6000, 6059), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['sum_mat', 'expected_sum_mat'], {}), '(sum_mat, expected_sum_mat)\n', (6032, 6059), False, 'import numpy\n'), ((6403, 6511), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'total': [], 'A': [], 'B': [], 'AA': [], 'AB': [], 'AC': [], 'BA': [],\n 'BB': []}"}), "(data={'total': [], 'A': [], 'B': [], 'AA': [], 'AB': [],\n 'AC': [], 'BA': [], 'BB': []})\n", (6419, 6511), False, 'import pandas\n'), ((6730, 6746), 'hts.functions.to_sum_mat', 'to_sum_mat', (['tree'], {}), '(tree)\n', (6740, 6746), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((6771, 6924), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1], [0, 1, 0, 1, 1], [1, 1, 1, 1, 0], [1, 0, 0, 0, 0], [0, 1,\n 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]]'], {}), '([[1, 1, 1, 1, 1], [0, 1, 0, 1, 1], [1, 1, 1, 1, 0], [1, 0, 0, 0,\n 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]])\n', (6782, 6924), False, 'import numpy\n'), ((7150, 7209), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['sum_mat', 'expected_sum_mat'], {}), '(sum_mat, expected_sum_mat)\n', (7182, 7209), False, 'import numpy\n'), ((7246, 7333), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'X', 'Y']}"}), "(data={'lev1': ['A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'X',\n 'Y']})\n", (7262, 7333), False, 'import pandas\n'), ((7359, 7397), 'hts.functions.get_agg_series', 'get_agg_series', (['grouped_df', "[['lev1']]"], {}), "(grouped_df, [['lev1']])\n", (7373, 7397), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((7498, 7536), 'hts.functions.get_agg_series', 'get_agg_series', (['grouped_df', "[['lev2']]"], {}), "(grouped_df, [['lev2']])\n", (7512, 7536), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((7659, 7746), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'X', 'Y']}"}), "(data={'lev1': ['A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'X',\n 'Y']})\n", (7675, 7746), False, 'import pandas\n'), ((7772, 7818), 'hts.functions.get_agg_series', 'get_agg_series', (['grouped_df', "[['lev1', 'lev2']]"], {}), "(grouped_df, [['lev1', 'lev2']])\n", (7786, 7818), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((7967, 8064), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'Z', 'X', 'Y']}"}), "(data={'lev1': ['A', 'A', 'A', 'B', 'B'], 'lev2': ['X', 'Y',\n 'Z', 'X', 'Y']})\n", (7983, 8064), False, 'import pandas\n'), ((8090, 8143), 'hts.functions.get_agg_series', 'get_agg_series', (['hier_df', "[['lev1'], ['lev1', 'lev2']]"], {}), "(hier_df, [['lev1'], ['lev1', 'lev2']])\n", (8104, 8143), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((8301, 8398), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'A', 'B', 'B'], 'lev2': ['X', 'Y', 'Z', 'X', 'Y']}"}), "(data={'lev1': ['A', 'A', 'A', 'B', 'B'], 'lev2': ['X', 'Y',\n 'Z', 'X', 'Y']})\n", (8317, 8398), False, 'import pandas\n'), ((8479, 8513), 'hts.functions.get_agg_series', 'get_agg_series', (['hier_df', 'hierarchy'], {}), '(hier_df, hierarchy)\n', (8493, 8513), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((8696, 8921), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'ds': ['2020-01', '2020-02'] * 5, 'lev1': ['A', 'A', 'A', 'A', 'A', 'A',\n 'B', 'B', 'B', 'B'], 'lev2': ['X', 'X', 'Y', 'Y', 'Z', 'Z', 'X', 'X',\n 'Y', 'Y'], 'val': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}"}), "(data={'ds': ['2020-01', '2020-02'] * 5, 'lev1': ['A', 'A',\n 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B'], 'lev2': ['X', 'X', 'Y', 'Y',\n 'Z', 'Z', 'X', 'X', 'Y', 'Y'], 'val': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})\n", (8712, 8921), False, 'import pandas\n'), ((9098, 9215), 'hts.functions.get_hierarchichal_df', 'get_hierarchichal_df', (['hier_df'], {'level_names': 'level_names', 'hierarchy': 'hierarchy', 'date_colname': '"""ds"""', 'val_colname': '"""val"""'}), "(hier_df, level_names=level_names, hierarchy=hierarchy,\n date_colname='ds', val_colname='val')\n", (9118, 9215), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((9560, 9633), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'B'], 'lev2': ['X', 'Y', 'Z']}"}), "(data={'lev1': ['A', 'A', 'B'], 'lev2': ['X', 'Y', 'Z']})\n", (9576, 9633), False, 'import pandas\n'), ((9663, 9709), 'hts.functions.get_agg_series', 'get_agg_series', (['grouped_df', "[['lev1', 'lev2']]"], {}), "(grouped_df, [['lev1', 'lev2']])\n", (9677, 9709), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n'), ((9856, 9929), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': "{'lev1': ['A', 'A', 'B'], 'lev2': ['X', 'Y', 'Z']}"}), "(data={'lev1': ['A', 'A', 'B'], 'lev2': ['X', 'Y', 'Z']})\n", (9872, 9929), False, 'import pandas\n'), ((9956, 10004), 'hts.functions._create_bl_str_col', '_create_bl_str_col', (['grouped_df', "['lev1', 'lev2']"], {}), "(grouped_df, ['lev1', 'lev2'])\n", (9974, 10004), False, 'from hts.functions import _create_bl_str_col, get_agg_series, get_hierarchichal_df, to_sum_mat\n')]
|
from pathlib import Path
import shutil
import unittest
import numpy as np
import siml.optimize as optimize
import siml.setting as setting
class TestOptimize(unittest.TestCase):
def test_generate_dict(self):
main_setting = setting.MainSetting.read_settings_yaml(
Path('tests/data/deform/optuna.yml'))
objective = optimize.Objective(main_setting, None)
dict_replace_1 = {
'inputs': [{'name': 'abc', 'dim': 6}],
'n_node': 35,
'hidden_layers': 11,
'dropout': 0.01}
replaced_setting_1 = objective._generate_dict(
main_setting.optuna.setting, dict_replace_1)
dict_replace_2 = {
'inputs': [
{'name': 'elemental_strain', 'dim': 6},
{'name': 'something', 'dim': 100}],
'n_node': 135,
'hidden_layers': 111,
'dropout': 0.11}
replaced_setting_2 = objective._generate_dict(
main_setting.optuna.setting, dict_replace_2)
self.assertEqual(
replaced_setting_1['trainer']['inputs'][0]['name'],
'abc')
self.assertEqual(
replaced_setting_2['trainer']['inputs'][0]['name'],
'elemental_strain')
self.assertEqual(
replaced_setting_2['trainer']['inputs'][1]['name'],
'something')
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_nodes'], 135)
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_layers'], 111)
self.assertEqual(
replaced_setting_2['model']['blocks'][0]['hidden_dropout'], 0.11)
def test_perform_study(self):
main_setting = setting.MainSetting.read_settings_yaml(
Path('tests/data/deform/optuna.yml'))
if main_setting.optuna.output_base_directory.exists():
shutil.rmtree(main_setting.optuna.output_base_directory)
study = optimize.Study(main_setting)
study.perform_study()
self.assertLess(
study.study.best_trial.value,
np.max([t.value for t in study.study.trials]))
def test_perform_study_step_by_step(self):
main_setting_yml = Path('tests/data/deform/optuna.yml')
main_setting = setting.MainSetting.read_settings_yaml(
main_setting_yml)
if main_setting.optuna.output_base_directory.exists():
shutil.rmtree(main_setting.optuna.output_base_directory)
db_setting = setting.DBSetting(use_sqlite=True)
study = optimize.Study(main_setting, db_setting, step_by_step=True)
for _ in range(3):
try:
study.perform_study()
except SystemExit:
continue
self.assertEqual(len(study.study.get_trials()), 3)
|
[
"siml.setting.MainSetting.read_settings_yaml",
"pathlib.Path",
"numpy.max",
"shutil.rmtree",
"siml.optimize.Objective",
"siml.optimize.Study",
"siml.setting.DBSetting"
] |
[((349, 387), 'siml.optimize.Objective', 'optimize.Objective', (['main_setting', 'None'], {}), '(main_setting, None)\n', (367, 387), True, 'import siml.optimize as optimize\n'), ((1979, 2007), 'siml.optimize.Study', 'optimize.Study', (['main_setting'], {}), '(main_setting)\n', (1993, 2007), True, 'import siml.optimize as optimize\n'), ((2239, 2275), 'pathlib.Path', 'Path', (['"""tests/data/deform/optuna.yml"""'], {}), "('tests/data/deform/optuna.yml')\n", (2243, 2275), False, 'from pathlib import Path\n'), ((2299, 2355), 'siml.setting.MainSetting.read_settings_yaml', 'setting.MainSetting.read_settings_yaml', (['main_setting_yml'], {}), '(main_setting_yml)\n', (2337, 2355), True, 'import siml.setting as setting\n'), ((2523, 2557), 'siml.setting.DBSetting', 'setting.DBSetting', ([], {'use_sqlite': '(True)'}), '(use_sqlite=True)\n', (2540, 2557), True, 'import siml.setting as setting\n'), ((2574, 2633), 'siml.optimize.Study', 'optimize.Study', (['main_setting', 'db_setting'], {'step_by_step': '(True)'}), '(main_setting, db_setting, step_by_step=True)\n', (2588, 2633), True, 'import siml.optimize as optimize\n'), ((291, 327), 'pathlib.Path', 'Path', (['"""tests/data/deform/optuna.yml"""'], {}), "('tests/data/deform/optuna.yml')\n", (295, 327), False, 'from pathlib import Path\n'), ((1793, 1829), 'pathlib.Path', 'Path', (['"""tests/data/deform/optuna.yml"""'], {}), "('tests/data/deform/optuna.yml')\n", (1797, 1829), False, 'from pathlib import Path\n'), ((1906, 1962), 'shutil.rmtree', 'shutil.rmtree', (['main_setting.optuna.output_base_directory'], {}), '(main_setting.optuna.output_base_directory)\n', (1919, 1962), False, 'import shutil\n'), ((2117, 2162), 'numpy.max', 'np.max', (['[t.value for t in study.study.trials]'], {}), '([t.value for t in study.study.trials])\n', (2123, 2162), True, 'import numpy as np\n'), ((2444, 2500), 'shutil.rmtree', 'shutil.rmtree', (['main_setting.optuna.output_base_directory'], {}), '(main_setting.optuna.output_base_directory)\n', (2457, 2500), False, 'import shutil\n')]
|
# -*- coding: utf-8 -*-
"""
@author: david
"""
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay
class ModelEvaluation:
def evaluate(pipe, dades, objectiu, name, **evaluacio):
x = dades
y = objectiu
w = np.zeros(len(y))
pred = np.zeros(len(y))
classes = np.sort(np.unique(y))
for c in classes:
w[y==c] = 1 / sum(y==c)
kFolds = evaluacio.get('kFold', 5)
use_weights = evaluacio.get('class_weighted', True)
kf = KFold(n_splits=kFolds)
for ind_train, ind_test in kf.split(y):
x_t, y_t, w_t = x[ind_train], y[ind_train], w[ind_train]
x_cv = x[ind_test]
if use_weights:
pipe.fit(x_t, y_t, model__sample_weight=w_t)
else:
pipe.fit(x_t, y_t)
pred[ind_test] = pipe.predict(x_cv)
pred = pipe.predict(dades)
plots = evaluacio.get('plot', [])
if not type(plots) == list:
plots = [plots]
for plot in plots:
if plot == 'confusion':
cm = confusion_matrix(y, pred)
plt.subplots(figsize=(10, 6))
sns.heatmap(cm, annot = True, fmt = 'g')
plt.xlabel("Predit")
plt.ylabel("Real")
plt.title(f"Matriu de Confusió pel model {name}")
plt.show()
elif plot == 'percentage':
cm = confusion_matrix(y, pred, sample_weight=w)
plt.subplots(figsize=(10, 6))
sns.heatmap(cm, annot = True, fmt = 'g')
plt.xlabel("Predit")
plt.ylabel("Real")
plt.title(f"Matriu dels percentatges pel model {name}")
plt.show()
elif plot == 'AUC':
plt.figure(figsize=(15,10))
ax = plt.gca()
for c in classes:
yi = np.copy(y)
yi[yi!=c] = -1
yi[yi==c] = 1
predi = np.copy(pred)
predi[predi!=c] = -1
predi[predi==c] = 1
PrecisionRecallDisplay.from_predictions(yi, predi, sample_weight=w,\
ax=ax, name=f'Precision-recall curve of class {c}')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="lower left")
plt.title('Precision-Recall Curve')
plt.show()
elif plot == 'ROC':
plt.figure(figsize=(15,10))
ax = plt.gca()
for c in classes:
yi = np.copy(y)
yi[yi!=c] = -1
yi[yi==c] = 1
predi = np.copy(pred)
predi[predi!=c] = -1
predi[predi==c] = 1
RocCurveDisplay.from_predictions(yi, predi, sample_weight=w,\
ax=ax, name=f'ROC curve of class {c}')
plt.xlabel('False Positive')
plt.ylabel('True Positive')
plt.legend(loc="lower right")
plt.title('ROC Curve')
plt.show()
else:
print(f'Plot for {plot} not implemented.')
scores = evaluacio.get('score', [])
if not type(plots) == list:
scores = [scores]
for score in scores:
if score == 'all':
print(classification_report(y, pred))
elif score == 'accuracy':
print(f'Accuracy = {sum(y==pred) / len(y)} : {sum(y==pred)}/{len(y)}')
print(f'Macro accuracy = {sum([sum(c==pred[y==c]) / sum(y==c) for c in classes]) / len(classes)}')
elif score == 'class accuracy':
for c in classes:
ind = y==c
print(f'Accuracy of class {c} = {sum(c==pred[ind]) / sum(ind)} : {sum(c==pred[ind])}/{sum(ind)}')
else:
print(f'Score for {score} not implemented.')
|
[
"numpy.copy",
"numpy.unique",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"seaborn.heatmap",
"sklearn.metrics.RocCurveDisplay.from_predictions",
"matplotlib.pyplot.figure",
"sklearn.metrics.PrecisionRecallDisplay.from_predictions",
"matplotlib.pyplot.title",
"sklearn.model_selection.KFold",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"sklearn.metrics.confusion_matrix"
] |
[((701, 723), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'kFolds'}), '(n_splits=kFolds)\n', (706, 723), False, 'from sklearn.model_selection import KFold\n'), ((509, 521), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (518, 521), True, 'import numpy as np\n'), ((1287, 1312), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'pred'], {}), '(y, pred)\n', (1303, 1312), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((1329, 1358), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1341, 1358), True, 'import matplotlib.pyplot as plt\n'), ((1375, 1411), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '"""g"""'}), "(cm, annot=True, fmt='g')\n", (1386, 1411), True, 'import seaborn as sns\n'), ((1432, 1452), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predit"""'], {}), "('Predit')\n", (1442, 1452), True, 'import matplotlib.pyplot as plt\n'), ((1469, 1487), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Real"""'], {}), "('Real')\n", (1479, 1487), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1553), 'matplotlib.pyplot.title', 'plt.title', (['f"""Matriu de Confusió pel model {name}"""'], {}), "(f'Matriu de Confusió pel model {name}')\n", (1513, 1553), True, 'import matplotlib.pyplot as plt\n'), ((1570, 1580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1578, 1580), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1683), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'pred'], {'sample_weight': 'w'}), '(y, pred, sample_weight=w)\n', (1657, 1683), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((1700, 1729), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1712, 1729), True, 'import matplotlib.pyplot as plt\n'), ((1746, 1782), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '"""g"""'}), "(cm, annot=True, fmt='g')\n", (1757, 1782), True, 'import seaborn as sns\n'), ((1803, 1823), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predit"""'], {}), "('Predit')\n", (1813, 1823), True, 'import matplotlib.pyplot as plt\n'), ((1840, 1858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Real"""'], {}), "('Real')\n", (1850, 1858), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1930), 'matplotlib.pyplot.title', 'plt.title', (['f"""Matriu dels percentatges pel model {name}"""'], {}), "(f'Matriu dels percentatges pel model {name}')\n", (1884, 1930), True, 'import matplotlib.pyplot as plt\n'), ((1947, 1957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1955, 1957), True, 'import matplotlib.pyplot as plt\n'), ((3702, 3732), 'sklearn.metrics.classification_report', 'classification_report', (['y', 'pred'], {}), '(y, pred)\n', (3723, 3732), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((2006, 2034), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (2016, 2034), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2064), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2062, 2064), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (2522, 2532), True, 'import matplotlib.pyplot as plt\n'), ((2549, 2572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (2559, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2589, 2617), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (2599, 2617), True, 'import matplotlib.pyplot as plt\n'), ((2634, 2669), 'matplotlib.pyplot.title', 'plt.title', (['"""Precision-Recall Curve"""'], {}), "('Precision-Recall Curve')\n", (2643, 2669), True, 'import matplotlib.pyplot as plt\n'), ((2686, 2696), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2694, 2696), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2134), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (2131, 2134), True, 'import numpy as np\n'), ((2232, 2245), 'numpy.copy', 'np.copy', (['pred'], {}), '(pred)\n', (2239, 2245), True, 'import numpy as np\n'), ((2347, 2470), 'sklearn.metrics.PrecisionRecallDisplay.from_predictions', 'PrecisionRecallDisplay.from_predictions', (['yi', 'predi'], {'sample_weight': 'w', 'ax': 'ax', 'name': 'f"""Precision-recall curve of class {c}"""'}), "(yi, predi, sample_weight=w, ax=ax,\n name=f'Precision-recall curve of class {c}')\n", (2386, 2470), False, 'from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay\n'), ((2745, 2773), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (2755, 2773), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2803), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2801, 2803), True, 'import matplotlib.pyplot as plt\n'), ((3231, 3259), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive"""'], {}), "('False Positive')\n", (3241, 3259), True, 'import matplotlib.pyplot as plt\n'), ((3276, 3303), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive"""'], {}), "('True Positive')\n", (3286, 3303), True, 'import matplotlib.pyplot as plt\n'), ((3320, 3349), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3330, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3388), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curve"""'], {}), "('ROC Curve')\n", (3375, 3388), True, 'import matplotlib.pyplot as plt\n'), ((3405, 3415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3413, 3415), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2873), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (2870, 2873), True, 'import numpy as np\n'), ((2971, 2984), 'numpy.copy', 'np.copy', (['pred'], {}), '(pred)\n', (2978, 2984), True, 'import numpy as np\n'), ((3086, 3190), 'sklearn.metrics.RocCurveDisplay.from_predictions', 'RocCurveDisplay.from_predictions', (['yi', 'predi'], {'sample_weight': 'w', 'ax': 'ax', 'name': 'f"""ROC curve of class {c}"""'}), "(yi, predi, sample_weight=w, ax=ax, name=\n f'ROC curve of class {c}')\n", (3118, 3190), False, 'from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay\n')]
|
import math
import numpy as np
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import CharRNN
from data import TextDataset, TextConverter
class Trainer(object):
def __init__(self, args):
self.args = args
self.device = torch.device('cuda' if self.args.cuda else 'cpu')
self.convert = None
self.model = None
self.optimizer = None
self.criterion = self.get_loss
self.meter = AverageValueMeter()
self.train_loader = None
self.get_data()
self.get_model()
self.get_optimizer()
def get_data(self):
self.convert = TextConverter(self.args.txt, max_vocab=self.args.max_vocab)
dataset = TextDataset(self.args.txt, self.args.len, self.convert.text_to_arr)
self.train_loader = DataLoader(dataset, self.args.batch_size, shuffle=True, num_workers=self.args.num_workers)
def get_model(self):
self.model = CharRNN(self.convert.vocab_size, self.args.embed_dim, self.args.hidden_size, self.args.num_layers,
self.args.dropout, self.args.cuda).to(self.device)
if self.args.cuda:
cudnn.benchmark = True
def get_optimizer(self):
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
self.optimizer = ScheduledOptim(optimizer)
@staticmethod
def get_loss(score, label):
return nn.CrossEntropyLoss()(score, label.view(-1))
def save_checkpoint(self, epoch):
if (epoch + 1) % self.args.save_interval == 0:
model_out_path = self.args.save_file + "epoch_{}_model.pth".format(epoch + 1)
torch.save(self.model, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
def save(self):
model_out_path = self.args.save_file + "final_model.pth"
torch.save(self.model, model_out_path)
print("Final model saved to {}".format(model_out_path))
@staticmethod
def pick_top_n(predictions, top_n=5):
top_predict_prob, top_predict_label = torch.topk(predictions, top_n, 1)
top_predict_prob /= torch.sum(top_predict_prob)
top_predict_prob = top_predict_prob.squeeze(0).cpu().numpy()
top_predict_label = top_predict_label.squeeze(0).cpu().numpy()
c = np.random.choice(top_predict_label, size=1, p=top_predict_prob)
return c
def train(self):
self.meter.reset()
self.model.train()
for x, y in tqdm(self.train_loader):
y = y.long()
x, y = x.to(self.device), y.to(self.device)
# Forward.
score, _ = self.model(x)
loss = self.criterion(score, y)
# Backward.
self.optimizer.zero_grad()
loss.backward()
# Clip gradient.
nn.utils.clip_grad_norm_(self.model.parameters(), 5)
self.optimizer.step()
self.meter.add(loss.item())
print('perplexity: {}'.format(np.exp(self.meter.value()[0])))
def test(self):
self.model.eval()
begin = np.array([i for i in self.args.begin])
begin = np.random.choice(begin, size=1)
text_len = self.args.predict_len
samples = [self.convert.word_to_int(c) for c in begin]
input_txt = torch.LongTensor(samples)[None]
input_txt = input_txt.to(self.device)
_, init_state = self.model(input_txt)
result = samples
model_input = input_txt[:, -1][:, None]
with torch.no_grad():
for i in range(text_len):
out, init_state = self.model(model_input, init_state)
prediction = self.pick_top_n(out.data)
model_input = torch.LongTensor(prediction)[None].to(self.device)
result.append(prediction[0])
print(self.convert.arr_to_text(result))
def predict(self):
self.model.eval()
samples = [self.convert.word_to_int(c) for c in self.args.begin]
input_txt = torch.LongTensor(samples)[None].to(self.device)
_, init_state = self.model(input_txt)
result = samples
model_input = input_txt[:, -1][:, None]
with torch.no_grad():
for i in range(self.args.predict_len):
out, init_state = self.model(model_input, init_state)
prediction = self.pick_top_n(out.data)
model_input = torch.LongTensor(prediction)[None].to(self.device)
result.append(prediction[0])
print(self.convert.arr_to_text(result))
def run(self):
for e in range(self.args.max_epoch):
print('===> EPOCH: {}/{}'.format(e + 1, self.args.max_epoch))
self.train()
self.test()
self.save_checkpoint(e)
self.save()
class AverageValueMeter(object):
"""
the meter tracker mainly focuses on mean and std
"""
def __init__(self):
super(AverageValueMeter, self).__init__()
self.n = None
self.sum = None
self.var = None
self.val = None
self.mean = None
self.std = None
self.reset()
def add(self, value, n=1):
self.val = value
self.sum += value
self.var += value * value
self.n += n
if self.n == 0:
self.mean, self.std = np.nan, np.nan
elif self.n == 1:
self.mean, self.std = self.sum, np.inf
else:
self.mean = self.sum / self.n
self.std = math.sqrt(
(self.var - self.n * self.mean * self.mean) / (self.n - 1.0))
def value(self):
return self.mean, self.std
def reset(self):
self.n = 0
self.sum = 0.0
self.var = 0.0
self.val = 0.0
self.mean = np.nan
self.std = np.nan
class ScheduledOptim(object):
"""A wrapper class for learning rate scheduling
"""
def __init__(self, optimizer):
self.optimizer = optimizer
self.lr = self.optimizer.param_groups[0]['lr']
self.current_steps = 0
def step(self):
"Step by the inner optimizer"
self.current_steps += 1
self.optimizer.step()
def zero_grad(self):
"Zero out the gradients by the inner optimizer"
self.optimizer.zero_grad()
def lr_multi(self, multi):
for param_group in self.optimizer.param_groups:
param_group['lr'] *= multi
self.lr = self.optimizer.param_groups[0]['lr']
def set_learning_rate(self, lr):
self.lr = lr
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
@property
def learning_rate(self):
return self.lr
|
[
"torch.nn.CrossEntropyLoss",
"data.TextConverter",
"numpy.random.choice",
"torch.topk",
"data.TextDataset",
"tqdm.tqdm",
"torch.LongTensor",
"model.CharRNN",
"math.sqrt",
"numpy.array",
"torch.sum",
"torch.save",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.device"
] |
[((334, 383), 'torch.device', 'torch.device', (["('cuda' if self.args.cuda else 'cpu')"], {}), "('cuda' if self.args.cuda else 'cpu')\n", (346, 383), False, 'import torch\n'), ((709, 768), 'data.TextConverter', 'TextConverter', (['self.args.txt'], {'max_vocab': 'self.args.max_vocab'}), '(self.args.txt, max_vocab=self.args.max_vocab)\n', (722, 768), False, 'from data import TextDataset, TextConverter\n'), ((787, 854), 'data.TextDataset', 'TextDataset', (['self.args.txt', 'self.args.len', 'self.convert.text_to_arr'], {}), '(self.args.txt, self.args.len, self.convert.text_to_arr)\n', (798, 854), False, 'from data import TextDataset, TextConverter\n'), ((883, 978), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'self.args.batch_size'], {'shuffle': '(True)', 'num_workers': 'self.args.num_workers'}), '(dataset, self.args.batch_size, shuffle=True, num_workers=self.\n args.num_workers)\n', (893, 978), False, 'from torch.utils.data import DataLoader\n'), ((1929, 1967), 'torch.save', 'torch.save', (['self.model', 'model_out_path'], {}), '(self.model, model_out_path)\n', (1939, 1967), False, 'import torch\n'), ((2139, 2172), 'torch.topk', 'torch.topk', (['predictions', 'top_n', '(1)'], {}), '(predictions, top_n, 1)\n', (2149, 2172), False, 'import torch\n'), ((2201, 2228), 'torch.sum', 'torch.sum', (['top_predict_prob'], {}), '(top_predict_prob)\n', (2210, 2228), False, 'import torch\n'), ((2381, 2444), 'numpy.random.choice', 'np.random.choice', (['top_predict_label'], {'size': '(1)', 'p': 'top_predict_prob'}), '(top_predict_label, size=1, p=top_predict_prob)\n', (2397, 2444), True, 'import numpy as np\n'), ((2558, 2581), 'tqdm.tqdm', 'tqdm', (['self.train_loader'], {}), '(self.train_loader)\n', (2562, 2581), False, 'from tqdm import tqdm\n'), ((3165, 3203), 'numpy.array', 'np.array', (['[i for i in self.args.begin]'], {}), '([i for i in self.args.begin])\n', (3173, 3203), True, 'import numpy as np\n'), ((3220, 3251), 'numpy.random.choice', 'np.random.choice', (['begin'], {'size': '(1)'}), '(begin, size=1)\n', (3236, 3251), True, 'import numpy as np\n'), ((1488, 1509), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1507, 1509), False, 'from torch import nn\n'), ((1729, 1767), 'torch.save', 'torch.save', (['self.model', 'model_out_path'], {}), '(self.model, model_out_path)\n', (1739, 1767), False, 'import torch\n'), ((3376, 3401), 'torch.LongTensor', 'torch.LongTensor', (['samples'], {}), '(samples)\n', (3392, 3401), False, 'import torch\n'), ((3588, 3603), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3601, 3603), False, 'import torch\n'), ((4267, 4282), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4280, 4282), False, 'import torch\n'), ((1021, 1158), 'model.CharRNN', 'CharRNN', (['self.convert.vocab_size', 'self.args.embed_dim', 'self.args.hidden_size', 'self.args.num_layers', 'self.args.dropout', 'self.args.cuda'], {}), '(self.convert.vocab_size, self.args.embed_dim, self.args.hidden_size,\n self.args.num_layers, self.args.dropout, self.args.cuda)\n', (1028, 1158), False, 'from model import CharRNN\n'), ((5590, 5661), 'math.sqrt', 'math.sqrt', (['((self.var - self.n * self.mean * self.mean) / (self.n - 1.0))'], {}), '((self.var - self.n * self.mean * self.mean) / (self.n - 1.0))\n', (5599, 5661), False, 'import math\n'), ((4086, 4111), 'torch.LongTensor', 'torch.LongTensor', (['samples'], {}), '(samples)\n', (4102, 4111), False, 'import torch\n'), ((3798, 3826), 'torch.LongTensor', 'torch.LongTensor', (['prediction'], {}), '(prediction)\n', (3814, 3826), False, 'import torch\n'), ((4490, 4518), 'torch.LongTensor', 'torch.LongTensor', (['prediction'], {}), '(prediction)\n', (4506, 4518), False, 'import torch\n')]
|
import numpy as np
import pytest
from pandas.core.frame import DataFrame
from bender.importers import DataImporters
from bender.model_loaders import ModelLoaders
from bender.model_trainer.decision_tree import DecisionTreeClassifierTrainer
from bender.split_strategies import SplitStrategies
pytestmark = pytest.mark.asyncio
async def test_predict_data() -> None:
model, data_set = await (
DataImporters.literal(DataFrame({'x': [0, 1], 'y': [0, 1], 'output': [0, 1]}))
# No test set
.split(SplitStrategies.ratio(1))
.train(DecisionTreeClassifierTrainer(), input_features=['x', 'y'], target_feature='output')
.run()
)
test_data = DataFrame({'x': [2, -3, 4], 'y': [2, -3, 4]})
expected = [1, 0, 1]
_, _, result = await (ModelLoaders.literal(model).import_data(DataImporters.literal(test_data)).predict().run())
assert np.all(expected == result)
"""
Supervised Regression
Vector[float] -> float
.train(
RegresionModels.linear(),
input_features=["area", "location"], # floats
target_feature="price" # float
)
"""
"""
Supervised Classification
Vector[float / int / bool / str] -> str / bool / int
.train(
ClassificationModels.DecisionTree(),
input_features=["sepal_length", "sepal_width"], # float / int / bool / str
target_feature="class_name" # str / bool / int
)
# Should only be avaialbe for clustering / classification problems
.predict_probability(
labels={
"setosa": "is_setosa_probability",
"versicolor": "is_versicolor_probability",
}
)
"""
|
[
"bender.model_trainer.decision_tree.DecisionTreeClassifierTrainer",
"bender.split_strategies.SplitStrategies.ratio",
"bender.importers.DataImporters.literal",
"bender.model_loaders.ModelLoaders.literal",
"numpy.all",
"pandas.core.frame.DataFrame"
] |
[((686, 731), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'x': [2, -3, 4], 'y': [2, -3, 4]}"], {}), "({'x': [2, -3, 4], 'y': [2, -3, 4]})\n", (695, 731), False, 'from pandas.core.frame import DataFrame\n'), ((886, 912), 'numpy.all', 'np.all', (['(expected == result)'], {}), '(expected == result)\n', (892, 912), True, 'import numpy as np\n'), ((563, 594), 'bender.model_trainer.decision_tree.DecisionTreeClassifierTrainer', 'DecisionTreeClassifierTrainer', ([], {}), '()\n', (592, 594), False, 'from bender.model_trainer.decision_tree import DecisionTreeClassifierTrainer\n'), ((522, 546), 'bender.split_strategies.SplitStrategies.ratio', 'SplitStrategies.ratio', (['(1)'], {}), '(1)\n', (543, 546), False, 'from bender.split_strategies import SplitStrategies\n'), ((823, 855), 'bender.importers.DataImporters.literal', 'DataImporters.literal', (['test_data'], {}), '(test_data)\n', (844, 855), False, 'from bender.importers import DataImporters\n'), ((783, 810), 'bender.model_loaders.ModelLoaders.literal', 'ModelLoaders.literal', (['model'], {}), '(model)\n', (803, 810), False, 'from bender.model_loaders import ModelLoaders\n'), ((428, 483), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'x': [0, 1], 'y': [0, 1], 'output': [0, 1]}"], {}), "({'x': [0, 1], 'y': [0, 1], 'output': [0, 1]})\n", (437, 483), False, 'from pandas.core.frame import DataFrame\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 23:54:16 2021
@author: rolandvarga
"""
import gym
import numpy as np
import matplotlib.pyplot as plt
import time
from scipy.signal import savgol_filter
import pickle
#%matplotlib qt
#%matplotlib inline
# Set to 1 to repeat SARSA learning (With Intel Core i7-8750H it takes
# around 70 minutes), 0 for loading previous result
REPEAT_LEARNING = 0
# Parameter to set which tests to do
DO_TEST1 = 1 # Simulate the system once and plot the trajectory
DO_TEST2 = 0 # Simulate the system 1000 times and plot success-rate
# Set to 1 to plot a projection of the state-value function V
PLOT_STATEVALUE = 1
#%% Load previous result
if REPEAT_LEARNING == 0:
filename='train_6x6x20x60000.pickle'
with open(filename, 'rb') as f:
cell_nums, dhat, durations, Q, reward_set, rhat, start_time, end_time, states_high, max_steps = pickle.load(f)
#%% SARSA learning
env = gym.make('SphericalRobot-v0')
#Function to choose the next action
def choose_action(state, eps):
action=0
if np.random.uniform(0, 1) < eps:
# Select a random action
action = env.action_space.sample()
else:
# Choose greedy action
action = np.array(np.unravel_index(np.argmax(Q[state], axis=None), Q[state].shape))
# action = np.argmax(Q[state])
return action
#Convert continuous state-space to discrete
def discretize_state(observation_c, low, high, cell_nums):
# Initialize the discretized observation
observation_d = []
# Loop through and discretize all 3 states
for state,low_val,high_val,c_num in zip(observation_c,low,high,cell_nums):
# Define intervals for the possible values
bins = np.linspace(low_val,high_val,c_num+1,endpoint=True)
# Discretize with NumPy function
state_d = np.digitize(state, bins, right=True)
# Check if the discrete values are valid
assert state_d > 0 and state_d <= c_num
observation_d.append(state_d-1) # -1 to have values start at 0
return observation_d
if REPEAT_LEARNING == 1:
# Learning parameters
epsilon = 0.3 # For start
total_episodes = 100
max_steps = 300
alpha = 0.1
gamma = 0.99
# The discretization of the states
states_high = np.array([6,6,2*np.pi/env.c]) # Set boundaries for the values
cell_nums = np.array([6,6,20]) # Set the number of discrete cells
#Initializing the Q-matrix
Q = np.ones(np.append(cell_nums,[3,3]))
#Function to update the Q-value
def update(state, state2, reward, action, action2):
predict = Q[state][action]
target = reward + gamma * Q[state2][action2]
Q[state][action] = Q[state][action] + alpha * (target - predict)
#Initializing the reward
# reward=0
reward_set = []
durations = []
start_time = time.time()
# Starting the SARSA learning
for episode in range(total_episodes):
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), epsilon)
states = [state1]
while t < max_steps:
# Visualizing the training, TODO
# env.render()
# Getting the next state
state2, reward, done, info = env.step(action1)
# Note: The 3rd state is the difference between the wheel angles
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
# Choosing the next action
action2 = choose_action(tuple(state2_d), epsilon)
# Updating the Q-value
update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
# Update variables for next iteration
state1 = state2
action1 = action2
# Save state to be able to plot trajectories
states.append(state2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
reward_set.append(cumm_reward)
durations.append(t)
# plt.figure(0)
# x = np.array(states)[:,0]
# y = np.array(states)[:,1]
# plt.scatter(x, y)
# plt.xlim(-5, 5)
# plt.ylim(-5, 5)
# plt.show()
# Print time it took to run the learning
end_time = time.time()
print("--- %s seconds ---" % (end_time - start_time))
# Plot the filtered rewards during the learning
plt.figure(1)
#plt.plot(reward_set)
rhat = savgol_filter(reward_set, 501, 3) # window size 501, polynomial order 3
plt.plot(rhat)
#plt.ylim(-500, 500)
plt.xlabel(r"Episode [-]")
plt.ylabel(r"Reward [-]")
plt.legend()
plt.savefig('reward_learning.eps', format='eps', bbox_inches='tight')
plt.show()
# Plot the filtered episode lengths during the learning
plt.figure(2)
#plt.plot(durations)
dhat = savgol_filter(durations, 51, 3) # window size 51, polynomial order 3
plt.plot(dhat)
plt.show()
#%% Test 1: Generate one trajectory
if DO_TEST1 == 1:
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), 0.0)
states = [state1]
actions = [action1]
while t < max_steps:
#Visualizing the training
# env.render()
#Getting the next state
state2, reward, done, info = env.step(action1)
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
#Choosing the next action
action2 = choose_action(tuple(state2_d), 0.0)
#Learning the Q-value
#update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
state1 = state2
action1 = action2
states.append(state2)
actions.append(action2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
print(reward)
# Plot trajectory on 2D plot
plt.figure(3)
x = np.array(states)[:,0]
y = np.array(states)[:,1]
plt.scatter(x, y)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.xticks(np.arange(-5, 6, 1))
plt.yticks(np.arange(-5, 6, 1))
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel(r"$x_1$ [m]")
plt.ylabel(r"$x_2$ [m]")
plt.legend()
plt.savefig('trajectory.eps', format='eps', bbox_inches='tight')
plt.show()
# Plot position states separately
plt.figure(4)
plt.plot(x, label="x1")
plt.plot(y, label="x2")
plt.xlabel(r"Time step [-]")
plt.ylabel(r"Coordinate [m]")
plt.legend()
plt.savefig('trajectory_plot.eps', format='eps', bbox_inches='tight')
plt.show()
#%% Test 2: Successful-unsuccessful tries
if DO_TEST2 == 1:
cumm_rewards = []
for k in range(1000):
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), 0.0)
while t < max_steps:
#Visualizing the training
# env.render()
#Getting the next state
state2, reward, done, info = env.step(action1)
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
#Choosing the next action
action2 = choose_action(tuple(state2_d), 0.0)
#Learning the Q-value
#update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
state1 = state2
action1 = action2
#states.append(state2)
#actions.append(action2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
cumm_rewards.append(cumm_reward)
print("Average reward out of 1000 try: " + str(np.average(np.array(cumm_rewards))))
plt.figure(5)
plt.hist(cumm_rewards,np.array([-1000,0,1000]))
plt.show()
#%% Additional plot: State-value function
if PLOT_STATEVALUE == 1:
V = np.zeros([cell_nums[0],cell_nums[1]])
for k in range(V.shape[0]):
for l in range(V.shape[1]):
V[k,l]=np.amax(Q[k,l,:])
plt.figure(6)
plt.imshow(V, cmap='coolwarm', interpolation='nearest')
plt.colorbar()
plt.savefig('state_value.eps', format='eps', bbox_inches='tight')
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"scipy.signal.savgol_filter",
"numpy.array",
"gym.make",
"numpy.arange",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"numpy.digitize",
"matplotlib.pyplot.gca",
"pickle.load",
"numpy.argmax",
"matplotlib.pyplot.xlim",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.colorbar",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.random.uniform",
"numpy.amax"
] |
[((973, 1002), 'gym.make', 'gym.make', (['"""SphericalRobot-v0"""'], {}), "('SphericalRobot-v0')\n", (981, 1002), False, 'import gym\n'), ((2376, 2411), 'numpy.array', 'np.array', (['[6, 6, 2 * np.pi / env.c]'], {}), '([6, 6, 2 * np.pi / env.c])\n', (2384, 2411), True, 'import numpy as np\n'), ((2458, 2478), 'numpy.array', 'np.array', (['[6, 6, 20]'], {}), '([6, 6, 20])\n', (2466, 2478), True, 'import numpy as np\n'), ((2965, 2976), 'time.time', 'time.time', ([], {}), '()\n', (2974, 2976), False, 'import time\n'), ((5005, 5016), 'time.time', 'time.time', ([], {}), '()\n', (5014, 5016), False, 'import time\n'), ((5136, 5149), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5146, 5149), True, 'import matplotlib.pyplot as plt\n'), ((5187, 5220), 'scipy.signal.savgol_filter', 'savgol_filter', (['reward_set', '(501)', '(3)'], {}), '(reward_set, 501, 3)\n', (5200, 5220), False, 'from scipy.signal import savgol_filter\n'), ((5263, 5277), 'matplotlib.pyplot.plot', 'plt.plot', (['rhat'], {}), '(rhat)\n', (5271, 5277), True, 'import matplotlib.pyplot as plt\n'), ((5307, 5332), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode [-]"""'], {}), "('Episode [-]')\n", (5317, 5332), True, 'import matplotlib.pyplot as plt\n'), ((5338, 5362), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reward [-]"""'], {}), "('Reward [-]')\n", (5348, 5362), True, 'import matplotlib.pyplot as plt\n'), ((5368, 5380), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5378, 5380), True, 'import matplotlib.pyplot as plt\n'), ((5385, 5454), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""reward_learning.eps"""'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""'}), "('reward_learning.eps', format='eps', bbox_inches='tight')\n", (5396, 5454), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5467, 5469), True, 'import matplotlib.pyplot as plt\n'), ((5539, 5552), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (5549, 5552), True, 'import matplotlib.pyplot as plt\n'), ((5589, 5620), 'scipy.signal.savgol_filter', 'savgol_filter', (['durations', '(51)', '(3)'], {}), '(durations, 51, 3)\n', (5602, 5620), False, 'from scipy.signal import savgol_filter\n'), ((5662, 5676), 'matplotlib.pyplot.plot', 'plt.plot', (['dhat'], {}), '(dhat)\n', (5670, 5676), True, 'import matplotlib.pyplot as plt\n'), ((5681, 5691), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5689, 5691), True, 'import matplotlib.pyplot as plt\n'), ((7114, 7127), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (7124, 7127), True, 'import matplotlib.pyplot as plt\n'), ((7192, 7209), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (7203, 7209), True, 'import matplotlib.pyplot as plt\n'), ((7214, 7229), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-5)', '(5)'], {}), '(-5, 5)\n', (7222, 7229), True, 'import matplotlib.pyplot as plt\n'), ((7234, 7249), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-5)', '(5)'], {}), '(-5, 5)\n', (7242, 7249), True, 'import matplotlib.pyplot as plt\n'), ((7378, 7401), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x_1$ [m]"""'], {}), "('$x_1$ [m]')\n", (7388, 7401), True, 'import matplotlib.pyplot as plt\n'), ((7407, 7430), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x_2$ [m]"""'], {}), "('$x_2$ [m]')\n", (7417, 7430), True, 'import matplotlib.pyplot as plt\n'), ((7436, 7448), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7446, 7448), True, 'import matplotlib.pyplot as plt\n'), ((7453, 7517), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""trajectory.eps"""'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""'}), "('trajectory.eps', format='eps', bbox_inches='tight')\n", (7464, 7517), True, 'import matplotlib.pyplot as plt\n'), ((7522, 7532), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7530, 7532), True, 'import matplotlib.pyplot as plt\n'), ((7580, 7593), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (7590, 7593), True, 'import matplotlib.pyplot as plt\n'), ((7598, 7621), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {'label': '"""x1"""'}), "(x, label='x1')\n", (7606, 7621), True, 'import matplotlib.pyplot as plt\n'), ((7626, 7649), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'label': '"""x2"""'}), "(y, label='x2')\n", (7634, 7649), True, 'import matplotlib.pyplot as plt\n'), ((7654, 7681), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time step [-]"""'], {}), "('Time step [-]')\n", (7664, 7681), True, 'import matplotlib.pyplot as plt\n'), ((7687, 7715), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coordinate [m]"""'], {}), "('Coordinate [m]')\n", (7697, 7715), True, 'import matplotlib.pyplot as plt\n'), ((7721, 7733), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7731, 7733), True, 'import matplotlib.pyplot as plt\n'), ((7738, 7807), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""trajectory_plot.eps"""'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""'}), "('trajectory_plot.eps', format='eps', bbox_inches='tight')\n", (7749, 7807), True, 'import matplotlib.pyplot as plt\n'), ((7812, 7822), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7820, 7822), True, 'import matplotlib.pyplot as plt\n'), ((9488, 9501), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (9498, 9501), True, 'import matplotlib.pyplot as plt\n'), ((9558, 9568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9566, 9568), True, 'import matplotlib.pyplot as plt\n'), ((9645, 9683), 'numpy.zeros', 'np.zeros', (['[cell_nums[0], cell_nums[1]]'], {}), '([cell_nums[0], cell_nums[1]])\n', (9653, 9683), True, 'import numpy as np\n'), ((9802, 9815), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {}), '(6)\n', (9812, 9815), True, 'import matplotlib.pyplot as plt\n'), ((9820, 9875), 'matplotlib.pyplot.imshow', 'plt.imshow', (['V'], {'cmap': '"""coolwarm"""', 'interpolation': '"""nearest"""'}), "(V, cmap='coolwarm', interpolation='nearest')\n", (9830, 9875), True, 'import matplotlib.pyplot as plt\n'), ((9880, 9894), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (9892, 9894), True, 'import matplotlib.pyplot as plt\n'), ((9899, 9964), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""state_value.eps"""'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""'}), "('state_value.eps', format='eps', bbox_inches='tight')\n", (9910, 9964), True, 'import matplotlib.pyplot as plt\n'), ((9969, 9979), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9977, 9979), True, 'import matplotlib.pyplot as plt\n'), ((926, 940), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (937, 940), False, 'import pickle\n'), ((1091, 1114), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1108, 1114), True, 'import numpy as np\n'), ((1762, 1818), 'numpy.linspace', 'np.linspace', (['low_val', 'high_val', '(c_num + 1)'], {'endpoint': '(True)'}), '(low_val, high_val, c_num + 1, endpoint=True)\n', (1773, 1818), True, 'import numpy as np\n'), ((1882, 1918), 'numpy.digitize', 'np.digitize', (['state', 'bins'], {'right': '(True)'}), '(state, bins, right=True)\n', (1893, 1918), True, 'import numpy as np\n'), ((2569, 2597), 'numpy.append', 'np.append', (['cell_nums', '[3, 3]'], {}), '(cell_nums, [3, 3])\n', (2578, 2597), True, 'import numpy as np\n'), ((7136, 7152), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (7144, 7152), True, 'import numpy as np\n'), ((7166, 7182), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (7174, 7182), True, 'import numpy as np\n'), ((7265, 7284), 'numpy.arange', 'np.arange', (['(-5)', '(6)', '(1)'], {}), '(-5, 6, 1)\n', (7274, 7284), True, 'import numpy as np\n'), ((7301, 7320), 'numpy.arange', 'np.arange', (['(-5)', '(6)', '(1)'], {}), '(-5, 6, 1)\n', (7310, 7320), True, 'import numpy as np\n'), ((9528, 9554), 'numpy.array', 'np.array', (['[-1000, 0, 1000]'], {}), '([-1000, 0, 1000])\n', (9536, 9554), True, 'import numpy as np\n'), ((6218, 6273), 'numpy.array', 'np.array', (['[state1[0], state1[1], state1[2] - state1[3]]'], {}), '([state1[0], state1[1], state1[2] - state1[3]])\n', (6226, 6273), True, 'import numpy as np\n'), ((6383, 6438), 'numpy.array', 'np.array', (['[state2[0], state2[1], state2[2] - state2[3]]'], {}), '([state2[0], state2[1], state2[2] - state2[3]])\n', (6391, 6438), True, 'import numpy as np\n'), ((7326, 7335), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7333, 7335), True, 'import matplotlib.pyplot as plt\n'), ((9775, 9794), 'numpy.amax', 'np.amax', (['Q[k, l, :]'], {}), '(Q[k, l, :])\n', (9782, 9794), True, 'import numpy as np\n'), ((1282, 1312), 'numpy.argmax', 'np.argmax', (['Q[state]'], {'axis': 'None'}), '(Q[state], axis=None)\n', (1291, 1312), True, 'import numpy as np\n'), ((3658, 3713), 'numpy.array', 'np.array', (['[state1[0], state1[1], state1[2] - state1[3]]'], {}), '([state1[0], state1[1], state1[2] - state1[3]])\n', (3666, 3713), True, 'import numpy as np\n'), ((3830, 3885), 'numpy.array', 'np.array', (['[state2[0], state2[1], state2[2] - state2[3]]'], {}), '([state2[0], state2[1], state2[2] - state2[3]])\n', (3838, 3885), True, 'import numpy as np\n'), ((8403, 8458), 'numpy.array', 'np.array', (['[state1[0], state1[1], state1[2] - state1[3]]'], {}), '([state1[0], state1[1], state1[2] - state1[3]])\n', (8411, 8458), True, 'import numpy as np\n'), ((8576, 8631), 'numpy.array', 'np.array', (['[state2[0], state2[1], state2[2] - state2[3]]'], {}), '([state2[0], state2[1], state2[2] - state2[3]])\n', (8584, 8631), True, 'import numpy as np\n'), ((9453, 9475), 'numpy.array', 'np.array', (['cumm_rewards'], {}), '(cumm_rewards)\n', (9461, 9475), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 15:33:47 2019
@author: Bogoclu
"""
import typing
import multiprocessing as mp
import warnings
import numpy as np
from scipy import stats
from .space import FullSpace
from duqo.proba import DS, MC, SUSE, ISPUD, FORM
from duqo.doe.lhs import make_doe
def _check_obj_wgt(obj_weights, num_obj):
""" Check obj_wgt argument passed to CondMom """
if obj_weights is None:
return None
try:
_ = obj_weights[0]
except (TypeError, IndexError):
obj_weights = np.ones(num_obj) * obj_weights
if len(obj_weights) != num_obj:
msg = f"Mismatch between the number of entries ({len(obj_weights)} in "
msg += f"obj_wgt and the number of stochastic objectives ({num_obj})."
raise ValueError(msg)
return np.array(obj_weights).ravel()
def _check_std_inds(use_std, num_obj):
""" Check use_std argument passed to CondMom and
convert it to a slice definition
"""
if isinstance(use_std, bool):
inds = [use_std] * num_obj
if len(inds) != num_obj:
msg = "Mismatch between the number of entries in "
msg += "use_std and the number of stochastic objectives."
raise ValueError(msg)
return np.array(use_std, dtype=bool)
def _find_integrator_cls(integrator):
"""
Find the Integrator class as defined by the string integrator
"""
integrator = integrator.upper()
if integrator == "DS":
IntCls = DS
elif integrator == "MC":
IntCls = MC
elif integrator == "ISPUD":
IntCls = ISPUD
elif integrator == "FORM":
IntCls = FORM
elif integrator == "SUSE":
IntCls = SUSE
else:
msg = f"Requested integrator {integrator} is not found."
raise ValueError(msg)
return IntCls
def _make_chain(methods: list):
"""Makes the chain given a list of method names"""
try:
first = methods[0]
except TypeError:
raise TypeError(f"methods must be a list of strings or classes, not {type(methods)}")
try:
_ = first.upper()
except AttributeError:
return methods
return [_find_integrator_cls(name.upper()) for name in methods]
def _n_para_chk(num_parallel: int = None):
""" Check the num_parallel argument as passed to CondProb """
n_procs = max(1, mp.cpu_count()) # could cpu_count ever be < 1?
if num_parallel is None or num_parallel > n_procs:
print(f"Number of parallel processes was set to {n_procs}")
return n_procs
return num_parallel
def _default_init(targ_prob: float, acc_max: float, num_inp: int,
num_para: int):
"""Decide the default integrator chain methods and arguments depending
on the problem
Parameters
----------
targ_prob : float
target failure probability
acc_max : float
target tolerance for the estimation
num_inp : int
number of stochastic inputs of the constraints
num_para : int
number of parallel processes to use
Returns
-------
integrators : list
Integrator classes, that are to be initiated
int_args : dict
Keyword arguments to pass to integrators
"""
if targ_prob * acc_max >= 1e-5:
if targ_prob * acc_max >= 1e-4:
integrators = ["MC"]
else:
integrators = ["SUSE", "MC"]
int_args = {"num_starts": 1, "batch_size": 1e5}
elif num_inp < 15:
integrators = ["SUSE", "DS"]
int_args = {"num_starts": 1}
else:
integrators = ["SUSE"]
int_args = {"num_starts": num_para}
print("Using", integrators, "as default chain.")
return integrators, int_args
def _is_worker(workers, name):
""" check if name is in workers list of classes"""
for worker in workers:
wname = read_integrator_name(worker)
if name.upper() in wname.upper():
return True
return False
def read_integrator_name(worker):
""" read the name of the integrator instance worker """
name = str(worker).split(".")[-1]
return "".join([c for c in name if c.isalnum()])
class CondMom:
"""Class to estimate conditional means
full_space : FullSpace instance
The definition of the optimization and stochastic spaces
base_doe : int or np.ndarray
set if a new doe should be calculated or the same one should
be transformed during the optimization.
if array, it should have zero mean and unit variance
but the original marginal distributions and correlation.
it should have same number of columns as stochastic variables
used in the objective. If integer, a base_doe with that number of
samples will be created
doe_size : int
The size of the doe to use. If base_doe is a numpy array, this
has no effect and doesn't have to be passed.
obj_wgt : float or iterable of floats:
If not None, these weights will be used for combining the
estimated mean and the variance/std. dev. If iterable, it
must be the same length as the number of stochastic input
variables as used for the objective function.
If None, the variances are returned separetly
use_std : bool or iterable of bools
Flag to use standard deviation (True) or the variance for the
estimation. If iterable, it must be the same length as the number
of stochastic input variables as used for the objective function.
"""
def __init__(self, full_space: FullSpace, base_doe: typing.Union[bool, np.ndarray] = True,
doe_size: int = 100, obj_wgt: typing.Optional[typing.Union[float, list, np.ndarray]] = None,
use_std: typing.Union[bool, list] = False):
self.full_space = full_space
num_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(use_std, num_obj)
self._obj_wgt = _check_obj_wgt(obj_wgt, num_obj)
self._doe_size = None
self._base_doe = None
self.doe_size = doe_size
self.base_doe = base_doe
@property
def base_doe(self):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
return self._base_doe
@base_doe.setter
def base_doe(self, new_doe):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
# Sanity checks for base_doe. Using parameters with multiple valid types
# may be an antipattern but it makes configuration easier from
# the user point of view. Tolerate this for a better user experience.
if isinstance(new_doe, np.ndarray):
if self._is_valid_base(new_doe): # raises errors
self._base_doe = new_doe.copy() # Make our copy.
return
try:
make_base_doe = bool(new_doe)
except ValueError:
return
if make_base_doe:
# Prepare doe with zero mean and unit variance
doe = self.full_space.inp_space.sto_obj_base_doe(self.doe_size)
self._base_doe = doe
return
# if not bool(new_doe); remake new doe so set base_doe to None
self._base_doe = None
return
def _is_valid_base(self, new_doe):
# Assume numpy array
n_sto_obj_inps = len(self.full_space.inp_space.inds["sto_obj"])
if new_doe.shape[1] != n_sto_obj_inps:
msg = "base_doe must be one of None, bool or a 2d array "
msg += f"with shape (num_samples, num_stochastic_objective_variables={n_sto_obj_inps})."
raise TypeError(msg)
if max(abs(new_doe.mean(0).max()), abs(1 - new_doe.std(0).max())) > 0.5:
msg = "base_doe must have zero mean and unit variance."
raise ValueError(msg)
return True
@property
def doe_size(self):
"""Size of the base doe to use for the moment estimation"""
return self._doe_size
@doe_size.setter
def doe_size(self, new_size):
"""Size of the base doe to use for the moment estimation"""
self._doe_size = new_size
if self.base_doe is not None:
self.base_doe = new_size
@property
def obj_wgt(self):
"""Weights for the linear combination of cond. moments"""
return self._obj_wgt
@obj_wgt.setter
def obj_wgt(self, new_obj_wgt):
"""Weights for the linear combination of cond. moments"""
n_obj = len(self.full_space.obj_inds["sto"])
self._obj_wgt = _check_obj_wgt(new_obj_wgt, n_obj)
@property
def use_std(self):
"""Indexes to use std. dev. instead of variance"""
return self._use_std
@use_std.setter
def use_std(self, new_std):
"""Indexes to use std. dev. instead of variance"""
n_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(new_std, n_obj)
def gen_doe(self, x_opt):
"""Get DoE for the Moment estimation for x_opt"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
if self.base_doe is None:
return self.full_space.inp_space.sto_obj_doe(x_opt, self._doe_size)
mean, std = self.full_space.inp_space.opt_moms(x_opt)
names = self.full_space.inp_space.mulvar.names
names = [names[i] for i in self.full_space.inp_space.mv_inds("sto_obj")]
# Translating is not sufficient for lognormal and truncated normal
inds = [i for i, x in enumerate(names) if "log" in x or "trunc" in x]
if not inds:
return self.base_doe * std + mean
# Handle Lognormal
binds = np.ones(self.base_doe.shape[1], dtype=bool)
binds[inds] = False
base_doe = self.base_doe.copy()
base_doe[:, binds] = base_doe[:, binds] * std[binds] + mean[binds]
mean = mean[inds]
std = std[inds]
cur_mv = self.full_space.inp_space.opt_mulvar(x_opt, domain="sto_obj")
for ind in inds:
base_doe[:, ind] = cur_mv.dists[ind].marg.ppf(base_doe[:, ind])
return base_doe
def est_mom(self, x_opt):
""" Estimate conditional moments for a single optimization point x_opt
Conditional moments are E[Y | x_opt] and Var[Y | x_opt]
Parameters
----------
x_opt : numpy.ndarray
the coordinates of the optimization variables to compute
the moments
Returns
-------
mus : numpy.ndarray
Estimated means, or if obj_wgt was not None,
the combined mu + obj_wgt * sigma
sigmas : numpy.ndarray
Estimated variances or std. dev. depending on the settings.
only returned if obj_wgt is None.
"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
doe = self.gen_doe(x_opt)
res = self.full_space.sto_obj(doe, x_opt)
mus = np.mean(res, axis=0)
sigmas = np.zeros(mus.shape)
std_inds = self.use_std
sigmas[std_inds] = np.std(res[:, std_inds], axis=0, ddof=1)
var_inds = np.logical_not(std_inds)
sigmas[var_inds] = np.var(res[:, var_inds], axis=0, ddof=1)
if self.obj_wgt is None:
return mus, sigmas
return mus + self.obj_wgt * sigmas
class CondProba:
"""A chain of integtrators for the calculation of the probability
This starts with a fast integrator to get an initial guess. If the
guess is too far away from target_pf, this stops further calculations
and returns the failure probability. Used for accelerating the
optimization process. Chains with a single element are also possible.
Parameters
----------
num_inputs : int
Number of stochastic inputs used for the constraints
target_fail_prob : float
Target failure probability. If unsure, just set it sufficiently low
i.e. >=1e-6. Note that Numerical unstabilities start at 1e-9 due to
scipy stats returning nans and infs
num_parallel : int
Number of parallel computations, if the used integrator supports it.
If passed, the entry in call_args will override this.
methods : None or list of str
Names of the methods to use for the estimation. If None, a default
chain will be selected depending the problem definition, which is
recommended for new users.
Currently the following names are supported:
MC - Crude Monte Carlo
DS - Directional simulation
FORM - First order reliability method
ISPUD - Importance sampling using design point (MPP)
call_args : None or list
keyword argument dict to pass to the integrator calc_prob_fail
as call arguments. Any argument in this will override the
initialization arguments with the same name i.e. target_fp and
num_parallel
target_tol : float
Target tolerance for the failure probability. Also used
for stopping the chain, if the computed failure probability
is either smaller than target_fp * target_tol or larger than
target_fp / target_tol.
"""
def __init__(self, target_fail_prob: float, num_inputs: int, num_parallel: int = 4,
methods: typing.Optional[typing.Union[str, list]] = None, call_args: typing.Optional[dict] = None,
target_tol: float = 0.01):
self.n_inp = num_inputs
num_para = _n_para_chk(num_parallel)
cargs = {"num_parallel": num_para, "multi_region": True}
if methods is None:
methods, cargs = _default_init(target_fail_prob, target_tol,
num_inputs, num_para)
if call_args is None:
self.call_args = {**cargs}
else:
self.call_args = {**cargs, **call_args}
self._tar_fp = target_fail_prob
self._tar_tol = target_tol
self.workers = _make_chain(methods)
self._prob_tol()
if "doe" in self.call_args.keys():
doe = self.call_args["doe"]
if doe.shape[1] != self.n_inp:
msg = f"Shape mismatch between the number of inputs ({self.n_inp}) "
msg += f"and the DoE {doe.shape[1]}"
raise ValueError()
mu_max = np.max(np.mean(doe, axis=0))
sig_max = np.max(np.std(doe, axis=0))
if abs(mu_max) > 1e-10 or abs(sig_max - 1) > 1e-10:
msg = "Zero mean and unit variance is required for doe "
msg += "in call_args, found mean == {mu_max} and "
msg += "sigma == {sig_max} columns"
raise ValueError(msg)
elif _is_worker(self.workers, "ISPUD"):
margs = [stats.norm() for k in range(self.n_inp)]
self.call_args["doe"] = make_doe(100, margs, num_tries=1000)
self.call_args["post_proc"] = False
self.call_args["num_parallel"] = num_para
@property
def target_fail_prob(self):
"""target failure probability"""
return self._tar_fp
@target_fail_prob.setter
def target_fail_prob(self, new_fp):
"""target failure probability"""
if new_fp <= 0 or new_fp > 0.9:
msg = "Target failure probability should lie in the interval (0,0.9]"
raise ValueError(msg)
self._tar_fp = new_fp
self._prob_tol()
@property
def target_tol(self):
"""Target accuracy for failure probability estimation"""
return self._tar_tol
@target_tol.setter
def target_tol(self, new_tol):
"""Target accuracy for failure probability estimation"""
if new_tol <= 0 or new_tol > 0.9:
msg = "Target probability accuracy should lie in the interval (0,0.9]"
raise ValueError(msg)
self._tar_tol = new_tol
self._prob_tol()
def _prob_tol(self):
prob_tol = self._tar_fp * self._tar_tol
if _is_worker(self.workers, "MC") and prob_tol < 1e-6:
msg = "Crude Monte Carlo can be very inefficient for "
msg += "such low probabilities of failure."
warnings.warn(msg)
self.call_args["prob_tol"] = prob_tol
def calc_fail_prob(self, input_mv, constraints, const_args, verbose: int = 0):
""" Calculate failure probability using the worker chain
Parameters
----------
input_mv : MultiVar instance
Definition of the multivariate input
constraints : list
constraint functions to initialize the integrator
const_args : None or list
arguments to pass to the constraints
Returns:
--------
pof : float
probability of failure
feasible : bool
pof <= target_pf
"""
if not self.workers:
raise ValueError("No estimators defined")
for worker in self.workers:
estimator = worker(input_mv, constraints, const_args)
try:
pof = estimator.calc_fail_prob(**self.call_args)[0]
except ValueError:
if worker == self.workers[-1]:
print("Fatal error while calculating probability of failure with", worker)
print(input_mv)
print("Setting it to 100%.")
pof = 1.
continue
if verbose > 1:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
if pof > self._tar_fp:
prob_ratio = self._tar_fp / pof
else:
prob_ratio = pof / self._tar_fp
if prob_ratio <= self._tar_tol:
break
if verbose > 0:
try:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
except NameError:
pass
return pof, pof <= self._tar_fp
|
[
"numpy.mean",
"numpy.ones",
"duqo.doe.lhs.make_doe",
"scipy.stats.norm",
"numpy.logical_not",
"multiprocessing.cpu_count",
"numpy.array",
"numpy.zeros",
"numpy.std",
"warnings.warn",
"numpy.var"
] |
[((1245, 1274), 'numpy.array', 'np.array', (['use_std'], {'dtype': 'bool'}), '(use_std, dtype=bool)\n', (1253, 1274), True, 'import numpy as np\n'), ((2344, 2358), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (2356, 2358), True, 'import multiprocessing as mp\n'), ((9965, 10008), 'numpy.ones', 'np.ones', (['self.base_doe.shape[1]'], {'dtype': 'bool'}), '(self.base_doe.shape[1], dtype=bool)\n', (9972, 10008), True, 'import numpy as np\n'), ((11293, 11313), 'numpy.mean', 'np.mean', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (11300, 11313), True, 'import numpy as np\n'), ((11331, 11350), 'numpy.zeros', 'np.zeros', (['mus.shape'], {}), '(mus.shape)\n', (11339, 11350), True, 'import numpy as np\n'), ((11410, 11450), 'numpy.std', 'np.std', (['res[:, std_inds]'], {'axis': '(0)', 'ddof': '(1)'}), '(res[:, std_inds], axis=0, ddof=1)\n', (11416, 11450), True, 'import numpy as np\n'), ((11470, 11494), 'numpy.logical_not', 'np.logical_not', (['std_inds'], {}), '(std_inds)\n', (11484, 11494), True, 'import numpy as np\n'), ((11522, 11562), 'numpy.var', 'np.var', (['res[:, var_inds]'], {'axis': '(0)', 'ddof': '(1)'}), '(res[:, var_inds], axis=0, ddof=1)\n', (11528, 11562), True, 'import numpy as np\n'), ((808, 829), 'numpy.array', 'np.array', (['obj_weights'], {}), '(obj_weights)\n', (816, 829), True, 'import numpy as np\n'), ((16529, 16547), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (16542, 16547), False, 'import warnings\n'), ((541, 557), 'numpy.ones', 'np.ones', (['num_obj'], {}), '(num_obj)\n', (548, 557), True, 'import numpy as np\n'), ((14701, 14721), 'numpy.mean', 'np.mean', (['doe'], {'axis': '(0)'}), '(doe, axis=0)\n', (14708, 14721), True, 'import numpy as np\n'), ((14752, 14771), 'numpy.std', 'np.std', (['doe'], {'axis': '(0)'}), '(doe, axis=0)\n', (14758, 14771), True, 'import numpy as np\n'), ((15213, 15249), 'duqo.doe.lhs.make_doe', 'make_doe', (['(100)', 'margs'], {'num_tries': '(1000)'}), '(100, margs, num_tries=1000)\n', (15221, 15249), False, 'from duqo.doe.lhs import make_doe\n'), ((15136, 15148), 'scipy.stats.norm', 'stats.norm', ([], {}), '()\n', (15146, 15148), False, 'from scipy import stats\n')]
|
import tensorflow as tf
import numpy as np
from dps.register import RegisterBank
from dps.env import TensorFlowEnv
from dps.utils import Param, Config
def build_env():
return PathDiscovery()
config = Config(
build_env=build_env,
curriculum=[
dict(shape=(2, 2), threshold=6),
dict(shape=(3, 3), threshold=4),
dict(shape=(4, 4), threshold=2)
],
env_name='path_discovery',
shape=(3, 3),
T=10,
stopping_criteria="reward_per_ep,max",
)
class PathDiscovery(TensorFlowEnv):
""" The top-left cell stored an integers which says which of the other 3 corners is the rewarding corner.
Agents use the "look" to see which integer is present at the current cell.
"""
T = Param()
shape = Param()
n_val = Param()
require_discovery = Param(True)
def __init__(self, **kwargs):
self.action_names = '^ > v < look'.split()
self.action_shape = (len(self.action_names),)
self.rb = RegisterBank('PathDiscoveryRB', 'x y vision action', 'discovered',
[0.0, 0.0, -1.0, 0.0, 0.0], 'x y')
self.val_input = self._make_input(self.n_val)
self.test_input = self._make_input(self.n_val)
super(PathDiscovery, self).__init__()
def _make_input(self, batch_size):
start_x = np.random.randint(self.shape[0], size=(batch_size, 1))
start_y = np.random.randint(self.shape[1], size=(batch_size, 1))
grid = np.random.randint(3, size=(batch_size, np.product(self.shape)))
return np.concatenate([start_x, start_y, grid], axis=1).astype('f')
def _build_placeholders(self):
self.input = tf.placeholder(tf.float32, (None, 2+np.product(self.shape)))
def _make_feed_dict(self, n_rollouts, T, mode):
if mode == 'train':
inp = self._make_input(n_rollouts)
elif mode == 'val':
inp = self.val_input
elif mode == 'test':
inp = self.test_input
else:
raise Exception("Unknown mode: {}.".format(mode))
if n_rollouts is not None:
inp = inp[:n_rollouts, :]
return {self.input: inp}
def build_init(self, r):
return self.rb.wrap(x=self.input[:, 0:1], y=self.input[:, 1:2],
vision=r[:, 2:3], action=r[:, 3:4], discovered=r[:, 4:5])
def build_step(self, t, r, actions):
x, y, vision, action, discovered = self.rb.as_tuple(r)
up, right, down, left, look = tf.split(actions, 5, axis=1)
new_y = (1 - down - up) * y + down * (y+1) + up * (y-1)
new_x = (1 - right - left) * x + right * (x+1) + left * (x-1)
new_y = tf.clip_by_value(new_y, 0.0, self.shape[0]-1)
new_x = tf.clip_by_value(new_x, 0.0, self.shape[1]-1)
idx = tf.cast(y * self.shape[1] + x, tf.int32)
new_vision = tf.reduce_sum(
tf.one_hot(tf.reshape(idx, (-1,)), np.product(self.shape)) * self.input[:, 2:],
axis=1, keepdims=True)
vision = (1 - look) * vision + look * new_vision
action = tf.cast(tf.reshape(tf.argmax(actions, axis=1), (-1, 1)), tf.float32)
top_left = tf.cast(tf.equal(idx, 0), tf.float32)
discovered = discovered + look * top_left
discovered = tf.minimum(discovered, 1.0)
new_registers = self.rb.wrap(x=new_x, y=new_y, vision=vision, action=action, discovered=discovered)
top_right = tf.cast(tf.equal(idx, self.shape[1]-1), tf.float32)
bottom_left = tf.cast(tf.equal(idx, (self.shape[0]-1) * self.shape[1]), tf.float32)
bottom_right = tf.cast(tf.equal(idx, self.shape[0] * self.shape[1] - 1), tf.float32)
reward = (
top_right * tf.cast(tf.equal(self.input[:, 2:3], 0), tf.float32) +
bottom_left * tf.cast(tf.equal(self.input[:, 2:3], 1), tf.float32) +
bottom_right * tf.cast(tf.equal(self.input[:, 2:3], 2), tf.float32)
)
if self.require_discovery:
reward = reward * discovered
return tf.fill((tf.shape(r)[0], 1), 0.0), reward, new_registers
|
[
"numpy.product",
"tensorflow.equal",
"tensorflow.shape",
"dps.register.RegisterBank",
"tensorflow.split",
"dps.utils.Param",
"numpy.random.randint",
"tensorflow.argmax",
"tensorflow.clip_by_value",
"numpy.concatenate",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.minimum"
] |
[((740, 747), 'dps.utils.Param', 'Param', ([], {}), '()\n', (745, 747), False, 'from dps.utils import Param, Config\n'), ((760, 767), 'dps.utils.Param', 'Param', ([], {}), '()\n', (765, 767), False, 'from dps.utils import Param, Config\n'), ((780, 787), 'dps.utils.Param', 'Param', ([], {}), '()\n', (785, 787), False, 'from dps.utils import Param, Config\n'), ((812, 823), 'dps.utils.Param', 'Param', (['(True)'], {}), '(True)\n', (817, 823), False, 'from dps.utils import Param, Config\n'), ((982, 1088), 'dps.register.RegisterBank', 'RegisterBank', (['"""PathDiscoveryRB"""', '"""x y vision action"""', '"""discovered"""', '[0.0, 0.0, -1.0, 0.0, 0.0]', '"""x y"""'], {}), "('PathDiscoveryRB', 'x y vision action', 'discovered', [0.0, \n 0.0, -1.0, 0.0, 0.0], 'x y')\n", (994, 1088), False, 'from dps.register import RegisterBank\n'), ((1329, 1383), 'numpy.random.randint', 'np.random.randint', (['self.shape[0]'], {'size': '(batch_size, 1)'}), '(self.shape[0], size=(batch_size, 1))\n', (1346, 1383), True, 'import numpy as np\n'), ((1402, 1456), 'numpy.random.randint', 'np.random.randint', (['self.shape[1]'], {'size': '(batch_size, 1)'}), '(self.shape[1], size=(batch_size, 1))\n', (1419, 1456), True, 'import numpy as np\n'), ((2497, 2525), 'tensorflow.split', 'tf.split', (['actions', '(5)'], {'axis': '(1)'}), '(actions, 5, axis=1)\n', (2505, 2525), True, 'import tensorflow as tf\n'), ((2678, 2725), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['new_y', '(0.0)', '(self.shape[0] - 1)'], {}), '(new_y, 0.0, self.shape[0] - 1)\n', (2694, 2725), True, 'import tensorflow as tf\n'), ((2740, 2787), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['new_x', '(0.0)', '(self.shape[1] - 1)'], {}), '(new_x, 0.0, self.shape[1] - 1)\n', (2756, 2787), True, 'import tensorflow as tf\n'), ((2801, 2841), 'tensorflow.cast', 'tf.cast', (['(y * self.shape[1] + x)', 'tf.int32'], {}), '(y * self.shape[1] + x, tf.int32)\n', (2808, 2841), True, 'import tensorflow as tf\n'), ((3278, 3305), 'tensorflow.minimum', 'tf.minimum', (['discovered', '(1.0)'], {}), '(discovered, 1.0)\n', (3288, 3305), True, 'import tensorflow as tf\n'), ((3176, 3192), 'tensorflow.equal', 'tf.equal', (['idx', '(0)'], {}), '(idx, 0)\n', (3184, 3192), True, 'import tensorflow as tf\n'), ((3444, 3476), 'tensorflow.equal', 'tf.equal', (['idx', '(self.shape[1] - 1)'], {}), '(idx, self.shape[1] - 1)\n', (3452, 3476), True, 'import tensorflow as tf\n'), ((3518, 3568), 'tensorflow.equal', 'tf.equal', (['idx', '((self.shape[0] - 1) * self.shape[1])'], {}), '(idx, (self.shape[0] - 1) * self.shape[1])\n', (3526, 3568), True, 'import tensorflow as tf\n'), ((3611, 3659), 'tensorflow.equal', 'tf.equal', (['idx', '(self.shape[0] * self.shape[1] - 1)'], {}), '(idx, self.shape[0] * self.shape[1] - 1)\n', (3619, 3659), True, 'import tensorflow as tf\n'), ((1551, 1599), 'numpy.concatenate', 'np.concatenate', (['[start_x, start_y, grid]'], {'axis': '(1)'}), '([start_x, start_y, grid], axis=1)\n', (1565, 1599), True, 'import numpy as np\n'), ((3098, 3124), 'tensorflow.argmax', 'tf.argmax', (['actions'], {'axis': '(1)'}), '(actions, axis=1)\n', (3107, 3124), True, 'import tensorflow as tf\n'), ((1511, 1533), 'numpy.product', 'np.product', (['self.shape'], {}), '(self.shape)\n', (1521, 1533), True, 'import numpy as np\n'), ((1705, 1727), 'numpy.product', 'np.product', (['self.shape'], {}), '(self.shape)\n', (1715, 1727), True, 'import numpy as np\n'), ((2901, 2923), 'tensorflow.reshape', 'tf.reshape', (['idx', '(-1,)'], {}), '(idx, (-1,))\n', (2911, 2923), True, 'import tensorflow as tf\n'), ((2925, 2947), 'numpy.product', 'np.product', (['self.shape'], {}), '(self.shape)\n', (2935, 2947), True, 'import numpy as np\n'), ((3888, 3919), 'tensorflow.equal', 'tf.equal', (['self.input[:, 2:3]', '(2)'], {}), '(self.input[:, 2:3], 2)\n', (3896, 3919), True, 'import tensorflow as tf\n'), ((3725, 3756), 'tensorflow.equal', 'tf.equal', (['self.input[:, 2:3]', '(0)'], {}), '(self.input[:, 2:3], 0)\n', (3733, 3756), True, 'import tensorflow as tf\n'), ((3806, 3837), 'tensorflow.equal', 'tf.equal', (['self.input[:, 2:3]', '(1)'], {}), '(self.input[:, 2:3], 1)\n', (3814, 3837), True, 'import tensorflow as tf\n'), ((4045, 4056), 'tensorflow.shape', 'tf.shape', (['r'], {}), '(r)\n', (4053, 4056), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
import math
import os
import numpy as np
import time
import sys
import copy
import rospy
import moveit_msgs.msg
import geometry_msgs.msg
import random
import csv
from sensor_msgs.msg import JointState
from gazebo_msgs.msg import LinkStates
from gazebo_msgs.msg import LinkState
from std_msgs.msg import Float64
from std_msgs.msg import String
from sensor_msgs.msg import Joy
import moveit_commander
from panda_rl.srv import StepAction, StepActionResponse
group_name = "panda_arm_hand"
move_group = moveit_commander.MoveGroupCommander(group_name)
quat_goal = np.array([1, 0, 0.0075, 0])
def vector2points(v, u):
v = np.array(v)
u = np.array(u)
vector = u - v
vector = np.round(vector, 5)
return vector
def get_hand_position():
msg = rospy.wait_for_message('/gazebo/link_states', LinkStates)
hand_positionx = (msg.pose[9].position.x + msg.pose[10].position.x) / 2
hand_positiony = (msg.pose[9].position.y + msg.pose[10].position.y) / 2
hand_positionz = (msg.pose[9].position.z + msg.pose[10].position.z) / 2
hand_position = [hand_positionx, hand_positiony, hand_positionz]
hand_position = np.round(hand_position, 5)
return hand_position
def get_hand_orientation():
msg = rospy.wait_for_message('/gazebo/link_states', LinkStates)
hand_orientation_x = (msg.pose[9].orientation.x + msg.pose[10].orientation.x) / 2
hand_orientation_y = (msg.pose[9].orientation.y + msg.pose[10].orientation.y) / 2
hand_orientation_z = (msg.pose[9].orientation.z + msg.pose[10].orientation.z) / 2
hand_orientation_w = (msg.pose[9].orientation.w + msg.pose[10].orientation.w) / 2
hand_orientation = [hand_orientation_x, hand_orientation_y, hand_orientation_z, hand_orientation_w]
hand_orientation = np.round(hand_orientation, 5)
return hand_orientation
def goal_distance(x, y):
x = np.array(x)
y = np.array(y)
distance = np.linalg.norm(x-y)
distance = np.round(distance, 5)
return distance
def take_action(msg):
done = False
goal = msg.goal
joint_state = move_group.get_current_joint_values()
joint_state[0] = joint_state[0] + (msg.action[0] / 20)
joint_state[1] = joint_state[1] + (msg.action[1] / 20)
joint_state[2] = joint_state[2] + (msg.action[2] / 20)
joint_state[3] = joint_state[3] + (msg.action[3] / 20)
joint_state[4] = joint_state[4] + (msg.action[4] / 20)
joint_state[5] = joint_state[5] + (msg.action[5] / 20)
joint_state[7] = 0.04
joint_state[8] = 0.04
if joint_state[0] < joint1_threshold_min or joint_state[0] > joint1_threshold_max \
or joint_state[1] < joint2_threshold_min or joint_state[1] > joint2_threshold_max \
or joint_state[2] < joint3_threshold_min or joint_state[2] > joint3_threshold_max \
or joint_state[3] < joint4_threshold_min or joint_state[3] > joint4_threshold_max \
or joint_state[4] < joint5_threshold_min or joint_state[4] > joint5_threshold_max \
or joint_state[5] < joint6_threshold_min or joint_state[5] > joint6_threshold_max:
hand_position = get_hand_position()
vector = vector2points(hand_position, goal)
obs = joint_state[0:7]
obs = np.round(obs, 5)
obs = np.append(obs, vector)
done = True
reward = -50
return StepActionResponse(obs=obs, reward=reward, done=done)
else:
move_group.go(joint_state, wait=True)
move_group.stop()
joint_state = move_group.get_current_joint_values()
obs = joint_state[0:7]
obs = np.round(obs, 5)
hand_position = get_hand_position()
quat = get_hand_orientation()
quat_reward = np.linalg.norm(quat_goal - quat)
d = goal_distance(hand_position, goal)
vector = vector2points(hand_position, goal)
z = hand_position[2] - goal[2]
obs = np.append(obs, vector)
if d < 0.02 and z > 0:
reward = 0
print("Action: ", msg.action)
print("Handpos: ", hand_position)
print("Goal: ", goal)
print("Observation ", obs)
print("reward target reached: ", reward)
done = True
group_name_gripper = "hand"
move_group_gripper = moveit_commander.MoveGroupCommander(group_name_gripper)
joint_values = move_group_gripper.get_current_joint_values()
joint_values[0] = 0.02
joint_values[1] = 0.02
move_group_gripper.go(joint_values, wait=True)
move_group_gripper.stop()
return StepActionResponse(obs=obs, reward=reward, done=done)
elif d > 0.08 and z < 0.05 or z < 0: #Fördert Anfahren von oben durch Bestrafung wenn EE weit weg ist, aber bereits auf ähnlicher Höhe zum Ziel
reward = 5 * (-d - quat_reward)
return StepActionResponse(obs=obs, reward=reward, done=done)
else:
reward = (-d - quat_reward)
#print("Action: ", msg.action)
print("Handpos: ", hand_position)
print("Goal: ", goal)
#print("Observation ", obs)
print("reward: ", reward)
print("Distance", d)
return StepActionResponse(obs=obs, reward=reward, done=done)
joint1_threshold_min = -2.8973
joint2_threshold_min = -1.7628
joint3_threshold_min = -2.8973
joint4_threshold_min = -3.0718
joint5_threshold_min = -2.8973
joint6_threshold_min = -0.0175
joint1_threshold_max = 2.8973
joint2_threshold_max = 1.7628
joint3_threshold_max = 2.8973
joint4_threshold_max = -0.0698
joint5_threshold_max = 2.8973
joint6_threshold_max = 3.7525
rospy.init_node('step_service', anonymous=False)
print("step_nodeaktiv")
s = rospy.Service('step_env', StepAction, take_action)
rospy.spin()
|
[
"panda_rl.srv.StepActionResponse",
"rospy.init_node",
"rospy.Service",
"rospy.wait_for_message",
"numpy.append",
"moveit_commander.MoveGroupCommander",
"numpy.array",
"rospy.spin",
"numpy.linalg.norm",
"numpy.round"
] |
[((523, 570), 'moveit_commander.MoveGroupCommander', 'moveit_commander.MoveGroupCommander', (['group_name'], {}), '(group_name)\n', (558, 570), False, 'import moveit_commander\n'), ((583, 610), 'numpy.array', 'np.array', (['[1, 0, 0.0075, 0]'], {}), '([1, 0, 0.0075, 0])\n', (591, 610), True, 'import numpy as np\n'), ((5634, 5682), 'rospy.init_node', 'rospy.init_node', (['"""step_service"""'], {'anonymous': '(False)'}), "('step_service', anonymous=False)\n", (5649, 5682), False, 'import rospy\n'), ((5711, 5761), 'rospy.Service', 'rospy.Service', (['"""step_env"""', 'StepAction', 'take_action'], {}), "('step_env', StepAction, take_action)\n", (5724, 5761), False, 'import rospy\n'), ((5762, 5774), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (5772, 5774), False, 'import rospy\n'), ((646, 657), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (654, 657), True, 'import numpy as np\n'), ((666, 677), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (674, 677), True, 'import numpy as np\n'), ((710, 729), 'numpy.round', 'np.round', (['vector', '(5)'], {}), '(vector, 5)\n', (718, 729), True, 'import numpy as np\n'), ((785, 842), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/gazebo/link_states"""', 'LinkStates'], {}), "('/gazebo/link_states', LinkStates)\n", (807, 842), False, 'import rospy\n'), ((1160, 1186), 'numpy.round', 'np.round', (['hand_position', '(5)'], {}), '(hand_position, 5)\n', (1168, 1186), True, 'import numpy as np\n'), ((1251, 1308), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/gazebo/link_states"""', 'LinkStates'], {}), "('/gazebo/link_states', LinkStates)\n", (1273, 1308), False, 'import rospy\n'), ((1780, 1809), 'numpy.round', 'np.round', (['hand_orientation', '(5)'], {}), '(hand_orientation, 5)\n', (1788, 1809), True, 'import numpy as np\n'), ((1873, 1884), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1881, 1884), True, 'import numpy as np\n'), ((1893, 1904), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1901, 1904), True, 'import numpy as np\n'), ((1920, 1941), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - y)'], {}), '(x - y)\n', (1934, 1941), True, 'import numpy as np\n'), ((1955, 1976), 'numpy.round', 'np.round', (['distance', '(5)'], {}), '(distance, 5)\n', (1963, 1976), True, 'import numpy as np\n'), ((3211, 3227), 'numpy.round', 'np.round', (['obs', '(5)'], {}), '(obs, 5)\n', (3219, 3227), True, 'import numpy as np\n'), ((3242, 3264), 'numpy.append', 'np.append', (['obs', 'vector'], {}), '(obs, vector)\n', (3251, 3264), True, 'import numpy as np\n'), ((3321, 3374), 'panda_rl.srv.StepActionResponse', 'StepActionResponse', ([], {'obs': 'obs', 'reward': 'reward', 'done': 'done'}), '(obs=obs, reward=reward, done=done)\n', (3339, 3374), False, 'from panda_rl.srv import StepAction, StepActionResponse\n'), ((3564, 3580), 'numpy.round', 'np.round', (['obs', '(5)'], {}), '(obs, 5)\n', (3572, 3580), True, 'import numpy as np\n'), ((3685, 3717), 'numpy.linalg.norm', 'np.linalg.norm', (['(quat_goal - quat)'], {}), '(quat_goal - quat)\n', (3699, 3717), True, 'import numpy as np\n'), ((3871, 3893), 'numpy.append', 'np.append', (['obs', 'vector'], {}), '(obs, vector)\n', (3880, 3893), True, 'import numpy as np\n'), ((4260, 4315), 'moveit_commander.MoveGroupCommander', 'moveit_commander.MoveGroupCommander', (['group_name_gripper'], {}), '(group_name_gripper)\n', (4295, 4315), False, 'import moveit_commander\n'), ((4576, 4629), 'panda_rl.srv.StepActionResponse', 'StepActionResponse', ([], {'obs': 'obs', 'reward': 'reward', 'done': 'done'}), '(obs=obs, reward=reward, done=done)\n', (4594, 4629), False, 'from panda_rl.srv import StepAction, StepActionResponse\n'), ((4846, 4899), 'panda_rl.srv.StepActionResponse', 'StepActionResponse', ([], {'obs': 'obs', 'reward': 'reward', 'done': 'done'}), '(obs=obs, reward=reward, done=done)\n', (4864, 4899), False, 'from panda_rl.srv import StepAction, StepActionResponse\n'), ((5208, 5261), 'panda_rl.srv.StepActionResponse', 'StepActionResponse', ([], {'obs': 'obs', 'reward': 'reward', 'done': 'done'}), '(obs=obs, reward=reward, done=done)\n', (5226, 5261), False, 'from panda_rl.srv import StepAction, StepActionResponse\n')]
|
import unittest
import numpy as np
import pandas as pd
import mlsurvey as mls
class TestData(unittest.TestCase):
def test_to_dict_dict_should_be_set(self):
"""
:test : mlsurvey.model.Data.to_dict()
:condition : x,y, y_pred data are filled.
:main_result : the dictionary generated is the same as expected
"""
x = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = mls.sl.models.DataPandas(df, df_contains='xyypred')
expected = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
result = data.to_dict()
self.assertDictEqual(expected, result)
def test_from_dict_df_empty(self):
"""
:test : mlsurvey.model.DataPandas.from_dict()
:condition : the input dict is set and an empty dataframe is given.
:main_result : a ModelError occurs
"""
df = pd.DataFrame(data=np.array([]))
d = None
input_dict = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
try:
d = mls.sl.models.DataPandas.from_dict(input_dict, df)
self.assertTrue(False)
except mls.exceptions.ModelError:
self.assertIsNone(d)
self.assertTrue(True)
def test_from_dict_dict_empty(self):
"""
:test : mlsurvey.model.Data.from_dict()
:condition : the input dict does not contains all keys and an full dataframe is given
:main_result : a ModelError occurs
"""
x = np.array([[1, 2], [3, 4]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = None
input_dict = {'df_contains': 'xyypred',
'y_pred_col_name': 'target_pred'}
try:
data = mls.sl.models.DataPandas.from_dict(input_dict, df)
self.assertTrue(False)
except mls.exceptions.ModelError:
self.assertIsNone(data)
self.assertTrue(True)
|
[
"pandas.DataFrame",
"numpy.array",
"mlsurvey.sl.models.DataPandas",
"mlsurvey.sl.models.DataPandas.from_dict"
] |
[((369, 401), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (377, 401), True, 'import numpy as np\n'), ((414, 430), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (422, 430), True, 'import numpy as np\n'), ((448, 464), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (456, 464), True, 'import numpy as np\n'), ((566, 595), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_array'}), '(data=data_array)\n', (578, 595), True, 'import pandas as pd\n'), ((611, 662), 'mlsurvey.sl.models.DataPandas', 'mls.sl.models.DataPandas', (['df'], {'df_contains': '"""xyypred"""'}), "(df, df_contains='xyypred')\n", (635, 662), True, 'import mlsurvey as mls\n'), ((1822, 1848), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1830, 1848), True, 'import numpy as np\n'), ((1861, 1877), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1869, 1877), True, 'import numpy as np\n'), ((1895, 1911), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (1903, 1911), True, 'import numpy as np\n'), ((2013, 2042), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_array'}), '(data=data_array)\n', (2025, 2042), True, 'import pandas as pd\n'), ((1364, 1414), 'mlsurvey.sl.models.DataPandas.from_dict', 'mls.sl.models.DataPandas.from_dict', (['input_dict', 'df'], {}), '(input_dict, df)\n', (1398, 1414), True, 'import mlsurvey as mls\n'), ((2199, 2249), 'mlsurvey.sl.models.DataPandas.from_dict', 'mls.sl.models.DataPandas.from_dict', (['input_dict', 'df'], {}), '(input_dict, df)\n', (2233, 2249), True, 'import mlsurvey as mls\n'), ((1154, 1166), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1162, 1166), True, 'import numpy as np\n'), ((505, 518), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (513, 518), True, 'import numpy as np\n'), ((522, 540), 'numpy.array', 'np.array', (['[y_pred]'], {}), '([y_pred])\n', (530, 540), True, 'import numpy as np\n'), ((1952, 1965), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (1960, 1965), True, 'import numpy as np\n'), ((1969, 1987), 'numpy.array', 'np.array', (['[y_pred]'], {}), '([y_pred])\n', (1977, 1987), True, 'import numpy as np\n')]
|
from random import shuffle
import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import cv2
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from PIL import Image
from .RepulsionLoss.my_repulsion_loss import repulsion
def preprocess_input(image):
image /= 255
mean=(0.406, 0.456, 0.485)
std=(0.225, 0.224, 0.229)
image -= mean
image /= std
return image
def calc_iou(a, b):
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih
ua = torch.clamp(ua, min=1e-8)
intersection = iw * ih
IoU = intersection / ua
return IoU
def get_target(anchor, bbox_annotation, classification, cuda):
IoU = calc_iou(anchor[:, :], bbox_annotation[:, :4])
IoU_max, IoU_argmax = torch.max(IoU, dim=1)
# compute the loss for classification
targets = torch.ones_like(classification) * -1
if cuda:
targets = targets.cuda()
targets[torch.lt(IoU_max, 0.4), :] = 0
positive_indices = torch.ge(IoU_max, 0.5)
num_positive_anchors = positive_indices.sum()
assigned_annotations = bbox_annotation[IoU_argmax, :]
targets[positive_indices, :] = 0
targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1
return targets, num_positive_anchors, positive_indices, assigned_annotations
def encode_bbox(assigned_annotations, positive_indices, anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y):
assigned_annotations = assigned_annotations[positive_indices, :]
anchor_widths_pi = anchor_widths[positive_indices]
anchor_heights_pi = anchor_heights[positive_indices]
anchor_ctr_x_pi = anchor_ctr_x[positive_indices]
anchor_ctr_y_pi = anchor_ctr_y[positive_indices]
gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]
gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]
gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths
gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights
# efficientdet style
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi
targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi
targets_dw = torch.log(gt_widths / anchor_widths_pi)
targets_dh = torch.log(gt_heights / anchor_heights_pi)
targets = torch.stack((targets_dy, targets_dx, targets_dh, targets_dw))
targets = targets.t()
return targets
class FocalLoss(nn.Module):
def __init__(self):
super(FocalLoss, self).__init__()
def forward(self, classifications, regressions, anchors, annotations, alpha=0.25, gamma=2.0, cuda=True):
# 设置
dtype = regressions.dtype
batch_size = classifications.shape[0]
classification_losses = []
regression_losses = []
repulsion_losses = []
# 获得先验框,将先验框转换成中心宽高的形势
anchor = anchors[0, :, :].to(dtype)
# 转换成中心,宽高的形式
anchor_widths = anchor[:, 3] - anchor[:, 1]
anchor_heights = anchor[:, 2] - anchor[:, 0]
anchor_ctr_x = anchor[:, 1] + 0.5 * anchor_widths
anchor_ctr_y = anchor[:, 0] + 0.5 * anchor_heights
rep_target = []
rep_regres = []
for j in range(batch_size):
# 取出真实框
bbox_annotation = annotations[j]
# 获得每张图片的分类结果和回归预测结果
classification = classifications[j, :, :]
regression = regressions[j, :, :]
# 平滑标签
classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
if len(bbox_annotation) == 0:
alpha_factor = torch.ones_like(classification) * alpha
if cuda:
alpha_factor = alpha_factor.cuda()
alpha_factor = 1. - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(torch.log(1.0 - classification))
cls_loss = focal_weight * bce
if cuda:
regression_losses.append(torch.tensor(0).to(dtype).cuda())
repulsion_losses.append(torch.tensor(0).to(dtype).cuda())
else:
regression_losses.append(torch.tensor(0).to(dtype))
repulsion_losses.append(torch.tensor(0).to(dtype))
classification_losses.append(cls_loss.sum())
continue
# 获得目标预测结果
targets, num_positive_anchors, positive_indices, assigned_annotations = get_target(anchor, bbox_annotation, classification, cuda)
rep_target.append(bbox_annotation[:, 0:4])
rep_regres.append(anchor[positive_indices,:])
alpha_factor = torch.ones_like(targets) * alpha
if cuda:
alpha_factor = alpha_factor.cuda()
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)
focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))
cls_loss = focal_weight * bce
zeros = torch.zeros_like(cls_loss)
if cuda:
zeros = zeros.cuda()
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, zeros)
classification_losses.append(cls_loss.sum() / torch.clamp(num_positive_anchors.to(dtype), min=1.0)) # cross_entropy ??
# smoooth_l1 & repulsion_loss
if positive_indices.sum() > 0:
targets = encode_bbox(assigned_annotations, positive_indices, anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y)
# print("Targets:", targets)n * 4
regression_diff = torch.abs(targets - regression[positive_indices, :]) # -?
# smoooth_l1
L1delta = 1.0 #0.5
regression_loss = torch.where(
torch.le(regression_diff, L1delta),
0.5 * torch.pow(regression_diff, 2),
L1delta * regression_diff - 0.5 * L1delta ** 2
)
regression_losses.append(regression_loss.sum())
else:
if cuda:
regression_losses.append(torch.tensor(0).to(dtype).cuda())
repulsion_losses.append(torch.tensor(0).to(dtype).cuda())
else:
regression_losses.append(torch.tensor(0).to(dtype))
repulsion_losses.append(torch.tensor(0).to(dtype))
c_loss = torch.stack(classification_losses).mean()
r_loss = torch.stack(regression_losses).mean()
# Repulsion
# rep_target = torch.tensor(rep_target, dtype=torch.float16)
# rep_regres = torch.tensor(rep_regres, dtype=torch.float16)
loss_RepGT = repulsion(rep_target, rep_regres) # anchor
repu_loss = loss_RepGT.mean() # nan problem
loss = c_loss + r_loss #+ repu_loss
return loss, c_loss, r_loss, repu_loss
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
class Generator(object):
def __init__(self,batch_size,
train_lines, image_size,
):
self.batch_size = batch_size
self.train_lines = train_lines
self.train_batches = len(train_lines)
self.image_size = image_size
def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5):
'''r实时数据增强的随机预处理'''
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:,:, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255
# correct boxes
box_data = np.zeros((len(box),5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
box_data = np.zeros((len(box),5))
box_data[:len(box)] = box
if len(box) == 0:
return image_data, []
if (box_data[:,:4]>0).any():
return image_data, box_data
else:
return image_data, []
def generate(self):
while True:
shuffle(self.train_lines)
lines = self.train_lines
inputs = []
targets = []
n = len(lines)
for i in range(len(lines)):
img,y = self.get_random_data(lines[i], self.image_size[0:2])
i = (i+1) % n
if len(y)!=0:
boxes = np.array(y[:,:4],dtype=np.float32)
y = np.concatenate([boxes,y[:,-1:]],axis=-1)
img = np.array(img,dtype = np.float32)
y = np.array(y,dtype = np.float32)
inputs.append(np.transpose(preprocess_input(img),(2,0,1)))
targets.append(y)
if len(targets) == self.batch_size:
tmp_inp = np.array(inputs)
tmp_targets = np.array(targets)
inputs = []
targets = []
yield tmp_inp, tmp_targets
|
[
"numpy.random.rand",
"PIL.Image.new",
"torch.max",
"torch.pow",
"torch.eq",
"numpy.array",
"torch.unsqueeze",
"numpy.concatenate",
"torch.zeros_like",
"torch.le",
"torch.ones_like",
"torch.abs",
"random.shuffle",
"torch.ge",
"cv2.cvtColor",
"torch.lt",
"torch.clamp",
"PIL.Image.open",
"torch.log",
"numpy.logical_and",
"torch.stack",
"torch.ne",
"torch.tensor",
"numpy.random.shuffle"
] |
[((734, 756), 'torch.clamp', 'torch.clamp', (['iw'], {'min': '(0)'}), '(iw, min=0)\n', (745, 756), False, 'import torch\n'), ((766, 788), 'torch.clamp', 'torch.clamp', (['ih'], {'min': '(0)'}), '(ih, min=0)\n', (777, 788), False, 'import torch\n'), ((890, 916), 'torch.clamp', 'torch.clamp', (['ua'], {'min': '(1e-08)'}), '(ua, min=1e-08)\n', (901, 916), False, 'import torch\n'), ((1135, 1156), 'torch.max', 'torch.max', (['IoU'], {'dim': '(1)'}), '(IoU, dim=1)\n', (1144, 1156), False, 'import torch\n'), ((1365, 1387), 'torch.ge', 'torch.ge', (['IoU_max', '(0.5)'], {}), '(IoU_max, 0.5)\n', (1373, 1387), False, 'import torch\n'), ((2415, 2444), 'torch.clamp', 'torch.clamp', (['gt_widths'], {'min': '(1)'}), '(gt_widths, min=1)\n', (2426, 2444), False, 'import torch\n'), ((2462, 2492), 'torch.clamp', 'torch.clamp', (['gt_heights'], {'min': '(1)'}), '(gt_heights, min=1)\n', (2473, 2492), False, 'import torch\n'), ((2642, 2681), 'torch.log', 'torch.log', (['(gt_widths / anchor_widths_pi)'], {}), '(gt_widths / anchor_widths_pi)\n', (2651, 2681), False, 'import torch\n'), ((2699, 2740), 'torch.log', 'torch.log', (['(gt_heights / anchor_heights_pi)'], {}), '(gt_heights / anchor_heights_pi)\n', (2708, 2740), False, 'import torch\n'), ((2756, 2817), 'torch.stack', 'torch.stack', (['(targets_dy, targets_dx, targets_dh, targets_dw)'], {}), '((targets_dy, targets_dx, targets_dh, targets_dw))\n', (2767, 2817), False, 'import torch\n'), ((1214, 1245), 'torch.ones_like', 'torch.ones_like', (['classification'], {}), '(classification)\n', (1229, 1245), False, 'import torch\n'), ((8239, 8258), 'PIL.Image.open', 'Image.open', (['line[0]'], {}), '(line[0])\n', (8249, 8258), False, 'from PIL import Image\n'), ((8845, 8886), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (8854, 8886), False, 'from PIL import Image\n'), ((522, 553), 'torch.unsqueeze', 'torch.unsqueeze', (['a[:, 3]'], {'dim': '(1)'}), '(a[:, 3], dim=1)\n', (537, 553), False, 'import torch\n'), ((576, 603), 'torch.unsqueeze', 'torch.unsqueeze', (['a[:, 1]', '(1)'], {}), '(a[:, 1], 1)\n', (591, 603), False, 'import torch\n'), ((633, 664), 'torch.unsqueeze', 'torch.unsqueeze', (['a[:, 2]'], {'dim': '(1)'}), '(a[:, 2], dim=1)\n', (648, 664), False, 'import torch\n'), ((687, 714), 'torch.unsqueeze', 'torch.unsqueeze', (['a[:, 0]', '(1)'], {}), '(a[:, 0], 1)\n', (702, 714), False, 'import torch\n'), ((798, 863), 'torch.unsqueeze', 'torch.unsqueeze', (['((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]))'], {'dim': '(1)'}), '((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1)\n', (813, 863), False, 'import torch\n'), ((1310, 1332), 'torch.lt', 'torch.lt', (['IoU_max', '(0.4)'], {}), '(IoU_max, 0.4)\n', (1318, 1332), False, 'import torch\n'), ((3925, 3974), 'torch.clamp', 'torch.clamp', (['classification', '(0.0001)', '(1.0 - 0.0001)'], {}), '(classification, 0.0001, 1.0 - 0.0001)\n', (3936, 3974), False, 'import torch\n'), ((5808, 5834), 'torch.zeros_like', 'torch.zeros_like', (['cls_loss'], {}), '(cls_loss)\n', (5824, 5834), False, 'import torch\n'), ((7731, 7747), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7745, 7747), True, 'import numpy as np\n'), ((9584, 9618), 'cv2.cvtColor', 'cv2.cvtColor', (['x', 'cv2.COLOR_HSV2RGB'], {}), '(x, cv2.COLOR_HSV2RGB)\n', (9596, 9618), False, 'import cv2\n'), ((9725, 9747), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (9742, 9747), True, 'import numpy as np\n'), ((10519, 10544), 'random.shuffle', 'shuffle', (['self.train_lines'], {}), '(self.train_lines)\n', (10526, 10544), False, 'from random import shuffle\n'), ((5262, 5286), 'torch.ones_like', 'torch.ones_like', (['targets'], {}), '(targets)\n', (5277, 5286), False, 'import torch\n'), ((5406, 5428), 'torch.eq', 'torch.eq', (['targets', '(1.0)'], {}), '(targets, 1.0)\n', (5414, 5428), False, 'import torch\n'), ((5501, 5523), 'torch.eq', 'torch.eq', (['targets', '(1.0)'], {}), '(targets, 1.0)\n', (5509, 5523), False, 'import torch\n'), ((5603, 5633), 'torch.pow', 'torch.pow', (['focal_weight', 'gamma'], {}), '(focal_weight, gamma)\n', (5612, 5633), False, 'import torch\n'), ((5928, 5951), 'torch.ne', 'torch.ne', (['targets', '(-1.0)'], {}), '(targets, -1.0)\n', (5936, 5951), False, 'import torch\n'), ((6413, 6465), 'torch.abs', 'torch.abs', (['(targets - regression[positive_indices, :])'], {}), '(targets - regression[positive_indices, :])\n', (6422, 6465), False, 'import torch\n'), ((7233, 7267), 'torch.stack', 'torch.stack', (['classification_losses'], {}), '(classification_losses)\n', (7244, 7267), False, 'import torch\n'), ((7292, 7322), 'torch.stack', 'torch.stack', (['regression_losses'], {}), '(regression_losses)\n', (7303, 7322), False, 'import torch\n'), ((9269, 9296), 'numpy.array', 'np.array', (['image', 'np.float32'], {}), '(image, np.float32)\n', (9277, 9296), True, 'import numpy as np\n'), ((10136, 10172), 'numpy.logical_and', 'np.logical_and', (['(box_w > 1)', '(box_h > 1)'], {}), '(box_w > 1, box_h > 1)\n', (10150, 10172), True, 'import numpy as np\n'), ((11006, 11037), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (11014, 11037), True, 'import numpy as np\n'), ((11059, 11088), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float32'}), '(y, dtype=np.float32)\n', (11067, 11088), True, 'import numpy as np\n'), ((4057, 4088), 'torch.ones_like', 'torch.ones_like', (['classification'], {}), '(classification)\n', (4072, 4088), False, 'import torch\n'), ((4335, 4365), 'torch.pow', 'torch.pow', (['focal_weight', 'gamma'], {}), '(focal_weight, gamma)\n', (4344, 4365), False, 'import torch\n'), ((4407, 4438), 'torch.log', 'torch.log', (['(1.0 - classification)'], {}), '(1.0 - classification)\n', (4416, 4438), False, 'import torch\n'), ((6604, 6638), 'torch.le', 'torch.le', (['regression_diff', 'L1delta'], {}), '(regression_diff, L1delta)\n', (6612, 6638), False, 'import torch\n'), ((10863, 10899), 'numpy.array', 'np.array', (['y[:, :4]'], {'dtype': 'np.float32'}), '(y[:, :4], dtype=np.float32)\n', (10871, 10899), True, 'import numpy as np\n'), ((10922, 10965), 'numpy.concatenate', 'np.concatenate', (['[boxes, y[:, -1:]]'], {'axis': '(-1)'}), '([boxes, y[:, -1:]], axis=-1)\n', (10936, 10965), True, 'import numpy as np\n'), ((11297, 11313), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (11305, 11313), True, 'import numpy as np\n'), ((11348, 11365), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (11356, 11365), True, 'import numpy as np\n'), ((5665, 5690), 'torch.log', 'torch.log', (['classification'], {}), '(classification)\n', (5674, 5690), False, 'import torch\n'), ((5711, 5742), 'torch.log', 'torch.log', (['(1.0 - classification)'], {}), '(1.0 - classification)\n', (5720, 5742), False, 'import torch\n'), ((6666, 6695), 'torch.pow', 'torch.pow', (['regression_diff', '(2)'], {}), '(regression_diff, 2)\n', (6675, 6695), False, 'import torch\n'), ((4769, 4784), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4781, 4784), False, 'import torch\n'), ((4840, 4855), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4852, 4855), False, 'import torch\n'), ((7116, 7131), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (7128, 7131), False, 'import torch\n'), ((7187, 7202), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (7199, 7202), False, 'import torch\n'), ((4590, 4605), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4602, 4605), False, 'import torch\n'), ((4668, 4683), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4680, 4683), False, 'import torch\n'), ((6937, 6952), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (6949, 6952), False, 'import torch\n'), ((7015, 7030), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (7027, 7030), False, 'import torch\n')]
|
import numpy as np
from stardist import star_dist, relabel_image_stardist
import pytest
from utils import random_image, real_image2d, check_similar, circle_image
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
@pytest.mark.parametrize('n_rays', (4, 16, 32))
def test_types(img, n_rays):
mode = "cpp"
gt = star_dist(img, n_rays=n_rays, mode=mode)
for dtype in (np.int8, np.int16, np.int32,
np.uint8, np.uint16, np.uint32):
x = star_dist(img.astype(dtype), n_rays=n_rays, mode=mode)
print("test_stardist2D (mode {mode}) for shape {img.shape} and type {dtype}".format(
mode=mode, img=img, dtype=dtype))
check_similar(gt, x)
@pytest.mark.gpu
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
@pytest.mark.parametrize('n_rays', (4, 16, 32))
def test_types_gpu(img, n_rays):
mode = "opencl"
gt = star_dist(img, n_rays=n_rays, mode=mode)
for dtype in (np.int8, np.int16, np.int32,
np.uint8, np.uint16, np.uint32):
x = star_dist(img.astype(dtype), n_rays=n_rays, mode=mode)
print("test_stardist2D with mode {mode} for shape {img.shape} and type {dtype}".format(
mode=mode, img=img, dtype=dtype))
check_similar(gt, x)
@pytest.mark.gpu
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
@pytest.mark.parametrize('n_rays', (4, 16, 32))
def test_cpu_gpu(img, n_rays):
s_cpp = star_dist(img, n_rays=n_rays, mode="cpp")
s_ocl = star_dist(img, n_rays=n_rays, mode="opencl")
check_similar(s_cpp, s_ocl)
@pytest.mark.parametrize('n_rays', (32,64))
@pytest.mark.parametrize('eps', ((1,1),(.4,1.3)))
def test_relabel_consistency(n_rays, eps, plot = False):
""" test whether an already star-convex label image gets perfectly relabeld"""
# img = random_image((128, 123))
lbl1 = circle_image(shape=(32,32), radius=8, eps = eps)
lbl1 = relabel_image_stardist(lbl1, n_rays)
lbl2 = relabel_image_stardist(lbl1, n_rays)
rel_error = 1-np.count_nonzero(np.bitwise_and(lbl1>0, lbl2>0))/np.count_nonzero(lbl1>0)
print(rel_error)
assert rel_error<1e-1
if plot:
import matplotlib.pyplot as plt
plt.figure(num=1, figsize=(8,4))
plt.subplot(1,3,1);plt.imshow(lbl1);plt.title("GT")
plt.subplot(1,3,2);plt.imshow(lbl2);plt.title("Reco")
plt.subplot(1,3,3);plt.imshow(1*(lbl1>0)+2*(lbl2>0));plt.title("Overlay")
plt.tight_layout()
plt.show()
return lbl1, lbl2
if __name__ == '__main__':
lbl1, lbl2 = test_relabel_consistency(32,eps = (.7,1), plot = True)
|
[
"matplotlib.pyplot.imshow",
"stardist.star_dist",
"utils.check_similar",
"utils.circle_image",
"utils.random_image",
"numpy.count_nonzero",
"pytest.mark.parametrize",
"stardist.relabel_image_stardist",
"matplotlib.pyplot.figure",
"numpy.bitwise_and",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"utils.real_image2d",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((244, 290), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rays"""', '(4, 16, 32)'], {}), "('n_rays', (4, 16, 32))\n", (267, 290), False, 'import pytest\n'), ((819, 865), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rays"""', '(4, 16, 32)'], {}), "('n_rays', (4, 16, 32))\n", (842, 865), False, 'import pytest\n'), ((1404, 1450), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rays"""', '(4, 16, 32)'], {}), "('n_rays', (4, 16, 32))\n", (1427, 1450), False, 'import pytest\n'), ((1632, 1675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rays"""', '(32, 64)'], {}), "('n_rays', (32, 64))\n", (1655, 1675), False, 'import pytest\n'), ((1676, 1728), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""eps"""', '((1, 1), (0.4, 1.3))'], {}), "('eps', ((1, 1), (0.4, 1.3)))\n", (1699, 1728), False, 'import pytest\n'), ((346, 386), 'stardist.star_dist', 'star_dist', (['img'], {'n_rays': 'n_rays', 'mode': 'mode'}), '(img, n_rays=n_rays, mode=mode)\n', (355, 386), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((928, 968), 'stardist.star_dist', 'star_dist', (['img'], {'n_rays': 'n_rays', 'mode': 'mode'}), '(img, n_rays=n_rays, mode=mode)\n', (937, 968), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((1494, 1535), 'stardist.star_dist', 'star_dist', (['img'], {'n_rays': 'n_rays', 'mode': '"""cpp"""'}), "(img, n_rays=n_rays, mode='cpp')\n", (1503, 1535), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((1548, 1592), 'stardist.star_dist', 'star_dist', (['img'], {'n_rays': 'n_rays', 'mode': '"""opencl"""'}), "(img, n_rays=n_rays, mode='opencl')\n", (1557, 1592), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((1597, 1624), 'utils.check_similar', 'check_similar', (['s_cpp', 's_ocl'], {}), '(s_cpp, s_ocl)\n', (1610, 1624), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1914, 1961), 'utils.circle_image', 'circle_image', ([], {'shape': '(32, 32)', 'radius': '(8)', 'eps': 'eps'}), '(shape=(32, 32), radius=8, eps=eps)\n', (1926, 1961), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1979, 2015), 'stardist.relabel_image_stardist', 'relabel_image_stardist', (['lbl1', 'n_rays'], {}), '(lbl1, n_rays)\n', (2001, 2015), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((2028, 2064), 'stardist.relabel_image_stardist', 'relabel_image_stardist', (['lbl1', 'n_rays'], {}), '(lbl1, n_rays)\n', (2050, 2064), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((699, 719), 'utils.check_similar', 'check_similar', (['gt', 'x'], {}), '(gt, x)\n', (712, 719), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((216, 240), 'utils.random_image', 'random_image', (['(128, 123)'], {}), '((128, 123))\n', (228, 240), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1284, 1304), 'utils.check_similar', 'check_similar', (['gt', 'x'], {}), '(gt, x)\n', (1297, 1304), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((791, 815), 'utils.random_image', 'random_image', (['(128, 123)'], {}), '((128, 123))\n', (803, 815), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1376, 1400), 'utils.random_image', 'random_image', (['(128, 123)'], {}), '((128, 123))\n', (1388, 1400), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((2267, 2300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)', 'figsize': '(8, 4)'}), '(num=1, figsize=(8, 4))\n', (2277, 2300), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2328), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (2319, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2327, 2343), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lbl1'], {}), '(lbl1)\n', (2337, 2343), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2359), 'matplotlib.pyplot.title', 'plt.title', (['"""GT"""'], {}), "('GT')\n", (2353, 2359), True, 'import matplotlib.pyplot as plt\n'), ((2368, 2388), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (2379, 2388), True, 'import matplotlib.pyplot as plt\n'), ((2387, 2403), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lbl2'], {}), '(lbl2)\n', (2397, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2404, 2421), 'matplotlib.pyplot.title', 'plt.title', (['"""Reco"""'], {}), "('Reco')\n", (2413, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2430, 2450), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (2441, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2449, 2492), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(1 * (lbl1 > 0) + 2 * (lbl2 > 0))'], {}), '(1 * (lbl1 > 0) + 2 * (lbl2 > 0))\n', (2459, 2492), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2503), 'matplotlib.pyplot.title', 'plt.title', (['"""Overlay"""'], {}), "('Overlay')\n", (2492, 2503), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2530), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2528, 2530), True, 'import matplotlib.pyplot as plt\n'), ((2539, 2549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2547, 2549), True, 'import matplotlib.pyplot as plt\n'), ((197, 211), 'utils.real_image2d', 'real_image2d', ([], {}), '()\n', (209, 211), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((772, 786), 'utils.real_image2d', 'real_image2d', ([], {}), '()\n', (784, 786), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1357, 1371), 'utils.real_image2d', 'real_image2d', ([], {}), '()\n', (1369, 1371), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((2133, 2159), 'numpy.count_nonzero', 'np.count_nonzero', (['(lbl1 > 0)'], {}), '(lbl1 > 0)\n', (2149, 2159), True, 'import numpy as np\n'), ((2101, 2135), 'numpy.bitwise_and', 'np.bitwise_and', (['(lbl1 > 0)', '(lbl2 > 0)'], {}), '(lbl1 > 0, lbl2 > 0)\n', (2115, 2135), True, 'import numpy as np\n')]
|
import math
import functools
from scipy.stats import binom
import numpy as np
import itertools
import sys
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from copy import copy
def combine_distribs(deletes, inserts):
"""
Combine insert and delete models/distributions
:param deletes: ndarray - delete distribution
:param inserts: ndarray - insert distribution
:return: ndarray - combined array of the same length
"""
# how much to fill?
to_fill = sum(deletes == 0.0) + 1
while to_fill < len(inserts) and inserts[to_fill] > 0.0001:
to_fill += 1
# create the end array
len_del = len(deletes)
end_distr = np.zeros_like(deletes, dtype=float)
# fill it!
for i, a in enumerate(inserts[:to_fill]):
# print i,a,(deletes*a)[:len_del-i]
end_distr[i:] += (deletes * a)[:len_del - i]
# print("end_distr", end_distr[:3], deletes[:3], inserts[:3])
return end_distr
def const_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Constant rate function.
:param n: int - allele number (unused)
:param p1: float - constant parameter
:param p2: float - linear parameter (unused)
:param p3: float - additional parameter (unused)
:return: float - p1
"""
return p1
def linear_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Linear rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - additional parameter (unused)
:return: float - p1 + p2 * n
"""
return p1 + p2 * n
def n2_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Quadratic rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - quadratic parameter
:return: float - p1 + p2 * n + p3 * n * n
"""
return p1 + p2 * n + p3 * n * n
def exp_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Exponential rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - exponential parameter
:return: float - p1 + p2 * e^(p3 * n)
"""
return p1 + p2 * math.exp(p3 * n)
def clip(value, minimal, maximal):
"""
Clips value to range <minimal, maximal>
:param value: ? - value
:param minimal: ? - minimal value
:param maximal: ? - maximal value
:return: ? - clipped value
"""
return min(max(minimal, value), maximal)
def model_full(rng, model_params, n, rate_func=linear_rate):
"""
Create binomial model for both deletes and inserts of STRs
:param rng: int - max_range of distribution
:param model_params: 4-tuple - parameters for inserts and deletes
:param n: int - target allele number
:param rate_func: function - rate function for deletes
:return: ndarray - combined distribution
"""
p1, p2, p3, q = model_params
deletes = binom.pmf(np.arange(rng), n, clip(1 - rate_func(n, p1, p2, p3), 0.0, 1.0))
inserts = binom.pmf(np.arange(rng), n, q)
return combine_distribs(deletes, inserts)
def model_template(rng, model_params, rate_func=linear_rate):
"""
Partial function for model creation.
:param rng: int - max_range of distribution
:param model_params: 4-tuple - parameters for inserts and deletes
:param rate_func: function - rate function for deletes
:return: partial function with only 1 parameter - n - target allele number
"""
return functools.partial(model_full, rng, model_params, rate_func=rate_func)
class Inference:
""" Class for inference of alleles. """
MIN_REPETITIONS = 1
# default parameters for inference
DEFAULT_MODEL_PARAMS = (-0.0107736, 0.00244419, 0.0, 0.00440608)
DEFAULT_FIT_FUNCTION = "linear"
def __init__(self, read_distribution, params_file, str_rep=3, minl_primer1=5, minl_primer2=5, minl_str=5, p_bckg_closed=None, p_bckg_open=None, p_expanded=None):
"""
Initialization of the Inference class + setup of all models and their probabilities.
:param read_distribution: ndarray(int) - read distribution
:param params_file: str - filename of parameters
:param str_rep: int - length of the STR
:param minl_primer1: int - minimal length of the left primer
:param minl_primer2: int - minimal length of the right primer
:param minl_str: int - minimal length of the STR
:param p_bckg_closed: float - probability of the background model for closed observation
:param p_bckg_open: float - probability of the background model for open observation
:param p_expanded: float - probability of the expanded model (if None it is equal to other models)
"""
# assign variables
self.str_rep = str_rep
self.minl_primer1 = minl_primer1
self.minl_primer2 = minl_primer2
self.minl_str = minl_str
self.read_distribution = read_distribution
self.sum_reads_log = np.log(np.sum(read_distribution))
self.sum_reads = np.sum(read_distribution)
self.params_file = params_file
self.p_expanded = p_expanded
self.p_bckg_closed = p_bckg_closed
self.p_bckg_open = p_bckg_open
def construct_models(self, min_rep, max_rep, e_model):
"""
Construct all models needed for current inference.
:param min_rep: int - minimal allele to model
:param max_rep: int - maximal allele to model
:param e_model: int - model for expanded alleles
:return: None
"""
# extract params
model_params, rate_func_str = self.read_params(self.params_file)
str_to_func = {"linear": linear_rate, "const": const_rate, "exponential": exp_rate, "square": n2_rate}
rate_func = const_rate
if rate_func_str in str_to_func.keys():
rate_func = str_to_func[rate_func_str]
# save min_rep and max_rep
self.min_rep = min_rep
self.max_rep = max_rep # non-inclusive
self.max_with_e = e_model + 1 # non-inclusive
# get models
mt = model_template(self.max_with_e, model_params, rate_func)
self.background_model = np.concatenate([np.zeros(self.min_rep, dtype=float), np.ones(self.max_with_e - self.min_rep, dtype=float) / float(self.max_with_e - self.min_rep)])
self.expanded_model = mt(self.max_with_e - 1)
self.allele_models = {i: mt(i) for i in range(min_rep, max_rep)}
self.models = {'E': self.expanded_model, 'B': self.background_model}
self.models.update(self.allele_models)
# get model likelihoods
open_to_closed = 10.0
l_others = 1.0
l_bckg_open = 0.01
l_exp = 1.01
l_bckg_model_open = 1.0
if self.p_expanded is None:
self.p_expanded = l_exp
if self.p_bckg_open is None and self.p_bckg_closed is None:
self.p_bckg_open = l_bckg_open
self.p_bckg_closed = self.p_bckg_open / open_to_closed
if self.p_bckg_closed is None:
self.p_bckg_closed = self.p_bckg_open / open_to_closed
if self.p_bckg_open is None:
self.p_bckg_open = self.p_bckg_closed * open_to_closed
self.model_probabilities = {'E': self.p_expanded, 'B': l_bckg_model_open}
self.model_probabilities.update({i: l_others for i in self.allele_models.keys()})
def read_params(self, params_file):
"""
Reads all parameters written with write_params(print_all=True)
:param params_file: str - filename to read parameters from, if None, load default params
:return: 4-tuple, 2-tuple, function - parameters for model, read count drop, and error function for model distributions
"""
if params_file is None:
return self.DEFAULT_MODEL_PARAMS, self.DEFAULT_FIT_FUNCTION
# read 2nd and last line of the file
with open(params_file) as f:
lines = f.readlines()
fit_function = lines[1].strip().split()[1]
split = list(map(float, lines[-1].strip().split()))
if len(split) < 4:
print("ERROR: parameters were not read successfully, using defaults!", file=sys.stderr)
return self.DEFAULT_MODEL_PARAMS, self.DEFAULT_FIT_FUNCTION
# extract parameters from last line of file
model_params = tuple(split[0:4])
return model_params, fit_function
def likelihood_rl(self, rl):
"""
Likelihood of a read with this length.
:param rl: int - read length
:return: float - likelihood of a read this long
"""
# print('rl', self.read_distribution[rl] / float(self.sum_reads))
return self.read_distribution[rl] / float(self.sum_reads)
@staticmethod
def likelihood_model(model, g):
"""
Likelihood of a generated allele al from a model of
:param model: ndarray - model that we evaluate
:param g: int - observed read count
:return: float - likelihood of a read coming from this model
"""
return model[g]
def likelihood_intersection(self, model_i, model_j, g):
return min(model_i[g], model_j[g])
def likelihood_coverage(self, true_length, rl, closed=True):
"""
Likelihood of generating a read with this length and this allele.
:param true_length: int - true number of repetitions of an STR
:param rl: int - read length
:param closed: bool - if the read is closed - i.e. both primers are there
:return: float - likelihood of a read being generated with this attributes
"""
whole_inside_str = max(0, true_length * self.str_rep + self.minl_primer1 + self.minl_primer2 - rl + 1)
# closed_overlapping = max(0, rl - self.minl_primer1 - self.minl_primer2 - true_length * self.str_rep + 1)
open_overlapping = max(0, rl + true_length * self.str_rep - 2 * self.minl_str + 1)
assert open_overlapping > whole_inside_str, '%d open %d whole inside %d %d %d' % (open_overlapping, whole_inside_str, true_length, rl, self.minl_str)
return 1.0 / float(open_overlapping - whole_inside_str)
def likelihood_read_allele(self, model, observed, rl, closed=True):
"""
Likelihood of generation of read with observed allele count and rl.
:param model: ndarray - model for the allele
:param observed: int - observed allele count
:param rl: int - read length
:param closed: bool - if the read is closed - i.e. both primers are there
:return:
"""
if closed:
return self.likelihood_rl(rl) * self.likelihood_model(model, observed) * self.likelihood_coverage(observed, rl, True)
else:
number_of_options = 0
partial_likelihood = 0
for true_length in itertools.chain(range(observed, self.max_rep), [self.max_with_e - 1]):
partial_likelihood += self.likelihood_model(model, true_length) * self.likelihood_coverage(true_length, rl, False)
number_of_options += 1
return self.likelihood_rl(rl) * partial_likelihood / float(number_of_options)
def likelihood_read_intersection(self, model_i, model_j, observed, rl, closed=True):
"""
Likelihood of generation of read with observed allele count and rl.
:param model: ndarray - model for the allele
:param observed: int - observed allele count
:param rl: int - read length
:param closed: bool - if the read is closed - i.e. both primers are there
:return:
"""
if closed:
return self.likelihood_rl(rl) * self.likelihood_intersection(model_i, model_j, observed) * self.likelihood_coverage(observed, rl, True)
else:
number_of_options = 0
partial_likelihood = 0
for true_length in itertools.chain(range(observed, self.max_rep), [self.max_with_e - 1]):
partial_likelihood += self.likelihood_intersection(model_i, model_j, true_length) * self.likelihood_coverage(true_length, rl, False)
number_of_options += 1
return self.likelihood_rl(rl) * partial_likelihood / float(number_of_options)
def likelihood_read(self, observed, rl, model_index1, model_index2, closed=True):
"""
Compute likelihood of generation of a read from either of those models.
:param observed: int - observed allele count
:param rl: int - read length
:param model_index1: char/int - model index for left allele
:param model_index2: char/int - model index for right allele
:param closed: bool - if the read is closed - i.e. both primers are therse
:return: float - likelihood of this read generation
"""
# print('testing', model_index1, model_index2)
model_i = self.models[model_index1]
model_j = self.models[model_index2]
model_prob_i = self.model_probabilities[model_index1]
model_prob_j = self.model_probabilities[model_index2]
# TODO: tuto podla mna nemoze byt len tak +, chyba tam korelacia modelov, ale v ramci zjednodusenia asi ok
allele1_likelihood = model_prob_i * self.likelihood_read_allele(model_i, observed, rl, closed)
allele2_likelihood = model_prob_j * self.likelihood_read_allele(model_j, observed, rl, closed)
p_bckg = self.p_bckg_closed if closed else self.p_bckg_open
bckgrnd_likelihood = p_bckg * self.likelihood_read_allele(self.models['B'], observed, rl, closed)
# alleles_intersection = min(model_prob_j, model_prob_i) * self.likelihood_read_intersection(model_i, model_j, observed, rl, closed)
# if alleles_intersection > 0.0:
# print('%g %g %g %s %s %d' % (alleles_intersection, allele2_likelihood, allele1_likelihood, str(model_index1), str(model_index2), observed))
assert not np.isnan(allele2_likelihood)
assert not np.isnan(allele1_likelihood)
assert not np.isnan(bckgrnd_likelihood)
# assert alleles_intersection <= max(allele1_likelihood, allele2_likelihood), '%g %g %g %s %s %d' % (
# alleles_intersection, allele2_likelihood, allele1_likelihood, str(model_index1), str(model_index2), observed)
# print('read_%s' % (str(closed)), observed, 'all1_lh', allele1_likelihood, 'all2_lh', allele2_likelihood)
return allele1_likelihood + allele2_likelihood + bckgrnd_likelihood # - alleles_intersection
def infer(self, annotations, filt_annotations, index_rep, verbose=True):
"""
Does all of the inference, computes for which 2 combination of alleles are these annotations and parameters the best.
argmax_{G1, G2} P(G1, G2 | AL, COV, RL) ~ P(AL, COV, RL | G1, G2) * P(G1, G2) = prod_{read_i} P(al_i, cov_i, rl_i | G1, G2) * P(G1, G2) =independent G1 G2=
= prod_{read_i} P(al_i, cov_i, rl_i | G1) * P(al_i, cov_i, rl_i | G2) * P(G1) * P(G2) {here G1, G2 is from possible alleles, background, and expanded, priors are from params}
P(al_i, cov_i, rl_i | G1) - 2 options: 1. closed evidence (al_i = X), we know X; 2. open evidence (al_i >= X), cl_i == True if i is closed
1.: P(al_i, cov_i, rl_i, cl_i | G1) = P(rl_i is from read distribution) * p(allele is al_i | G1) * P(read generated closed evidence | rl_i, al_i)
2.: P(rl_i is from r.distr.) * P(allele is >= al_i | G1) * P(read generated open evidence | rl_i, al_i)
:param annotations: iterator(reads) - closed reads (both primers set)
:param filt_annotations: iterator(reads) - open reads (only one primer set)
:param index_rep: int - index of a repetition
:param verbose: bool - print more stuff?
:return: dict(tuple(int, int):float) - directory of model indices to their likelihood
"""
# generate closed observed and read_length arrays
observed_annots = list(map(lambda x: x.module_repetitions[index_rep], annotations))
rl_annots = list(map(lambda x: len(x.read.sequence), annotations))
closed_annots = np.ones_like(observed_annots, dtype=bool)
# generate open observed and read_length arrays
observed_fa = list(map(lambda x: x.module_repetitions[index_rep], filt_annotations))
rl_fa = list(map(lambda x: len(x.read.sequence), filt_annotations))
closed_fa = np.zeros_like(observed_fa, dtype=bool)
# join them and keep the information if they are open or closed
observed_arr = np.concatenate([observed_annots, observed_fa]).astype(int)
rl_arr = np.concatenate([rl_annots, rl_fa]).astype(int)
closed_arr = np.concatenate([closed_annots, closed_fa]).astype(bool)
# generate the boundaries:
overhead = 3
if len(observed_annots) == 0:
max_rep = max(observed_fa) + overhead # non-inclusive
min_rep = max(self.MIN_REPETITIONS, max(observed_fa) - overhead) # inclusive
else:
max_rep = max(observed_annots) + overhead + 1 # non-inclusive
min_rep = max(self.MIN_REPETITIONS, min(observed_annots) - overhead) # inclusive
# expanded allele
e_allele = max_rep
if len(observed_fa) > 0:
e_allele = max(max_rep, max(observed_fa) + 1)
# generate all the models
self.construct_models(min_rep, max_rep, e_allele)
tested_models = []
for model_index1 in range(min_rep, max_rep):
for model_index2 in range(model_index1, max_rep):
tested_models.append((model_index1, model_index2))
tested_models.append((model_index1, 'E'))
# tested_models.append(('B', model_index1))
tested_models.append(('B', 'B'))
tested_models.append(('E', 'E'))
# go through every model and evaluate:
evaluated_models = {}
for m1, m2 in tested_models:
evaluated_models[(m1, m2)] = 0
if verbose:
print('model', m1, m2)
# go through every reads
for obs, rl, closed in zip(observed_arr, rl_arr, closed_arr):
lh = self.likelihood_read(obs, rl, m1, m2, closed=closed)
# TODO weighted sum according to the closeness/openness of reads?
evaluated_models[(m1, m2)] += np.log(lh)
if verbose:
print('model', m1, m2, 'log-likelihood', evaluated_models[(m1, m2)])
return evaluated_models
def print_pcolor(self, lh_dict, display_file, name, lognorm=True):
"""
Get maximum likelihood option and alternatively print it to image file.
:param lh_dict: dict(tuple(int, int):float) - directory of model indices to their likelihood
:param display_file: str - filename for pcolor image output
:param name: str - name to use in title
:param lognorm: bool - use loglog scale in displaying likelihood array
:return: tuple(int, int) - option with highest likelihood
"""
# convert to a numpy array:
lh_array = np.zeros((self.max_rep, self.max_rep + 1))
for (k1, k2), v in lh_dict.items():
if k1 == 'B':
k1 = 0
if k2 == 'B':
k2 = 0
if k1 == 'E':
k1 = 0
if k2 == 'E':
k2 = self.max_rep
lh_array[k1, k2] = v
# print(lh_dict, lh_array)
# get minimal and maximal likelihood
ind_good = (lh_array < 0.0) & (lh_array > -1e10) & (lh_array != np.nan)
if len(lh_array[ind_good]) == 0:
return lh_array, (0, 0)
lh_array[~ind_good] = np.NINF
z_min, z_max = min(lh_array[ind_good]), max(lh_array[ind_good])
max_str = len(lh_array)
# generate image file if specified:
if display_file is not None:
plt.figure()
if lognorm:
lh_view = -np.log(-lh_array)
z_min = -np.log(-z_min)
z_max = -np.log(-z_max)
else:
lh_view = lh_array
# background:
bg_size = max(2, (len(lh_view) - self.min_rep) // 6)
if len(lh_view) - self.min_rep <= 6:
bg_size = 1
lh_view[-bg_size:, self.min_rep:self.min_rep + bg_size] = lh_view[0, 0]
# expanded
lh_view[-bg_size:, self.min_rep + bg_size:self.min_rep + 2 * bg_size] = lh_view[0, self.max_rep]
# plotting
plt.title("%s likelihood of each option for %s" % ("Loglog" if lognorm else "Log", name))
plt.xlabel('2nd allele')
plt.ylabel('1st allele')
start_ticks = 5
step_ticks = 5
plt.xticks(np.concatenate([np.array(range(start_ticks - self.min_rep, max_str - self.min_rep, step_ticks)), [max_str - self.min_rep]]) + 0.5,
list(range(start_ticks, max_str, step_ticks)) + ['E(>%d)' % (self.max_with_e - 2)])
plt.yticks(np.array(range(start_ticks - self.min_rep, max_str - self.min_rep, step_ticks)) + 0.5, range(start_ticks, max_str, step_ticks))
palette = copy(plt.cm.jet)
palette.set_under('gray', 1.0)
plt.pcolor(lh_view[self.min_rep:, self.min_rep:], cmap=palette, vmin=z_min, vmax=z_max)
plt.colorbar()
# draw dividing line:
plt.plot([max_str - self.min_rep, max_str - self.min_rep], [0, max_str - self.min_rep], 'k', linewidth=3)
# background:
plt.text(float(bg_size) / 2.0, max_str - self.min_rep - float(bg_size) / 2.0, 'BG', size=20, horizontalalignment='center',
verticalalignment='center', path_effects=[PathEffects.withStroke(linewidth=2.5, foreground="w")])
# expanded
plt.text(bg_size + float(bg_size) / 2.0, max_str - self.min_rep - float(bg_size) / 2.0, 'Exp', size=20, horizontalalignment='center',
verticalalignment='center', path_effects=[PathEffects.withStroke(linewidth=2.5, foreground="w")])
# save
plt.savefig(display_file + '.pdf')
plt.savefig(display_file + '.png')
plt.close()
# output best option
best = sorted(np.unravel_index(np.argmax(lh_array), lh_array.shape))
# and convert it to symbols
if best[0] == 0 and best[1] == 0:
best_sym = ('B', 'B')
else:
best_sym = list(map(lambda x: 'E' if x == self.max_rep or x == 0 else x, best))
return lh_array, best, best_sym
def get_confidence(self, lh_array, predicted):
"""
Get confidence of a prediction.
:param lh_array: 2D-ndarray - log likelihoods of the prediction
:param predicted: tuple(int, int) - predicted alleles
:return: tuple(float, float, float) - prediction confidence of all, first, and second allele(s)
"""
# get confidence
lh_corr_array = lh_array - np.max(lh_array)
lh_sum = np.sum(np.exp(lh_corr_array))
confidence = np.exp(lh_corr_array[predicted[0], predicted[1]]) / lh_sum
confidence1 = np.sum(np.exp(lh_corr_array[predicted[0], :])) / lh_sum
confidence2 = np.sum(np.exp(lh_corr_array[:, predicted[1]])) / lh_sum
confidence_back = np.exp(lh_corr_array[0, 0]) / lh_sum
confidence_back_all = np.sum(np.exp(lh_corr_array[0, :])) / lh_sum
confidence_exp = np.exp(lh_corr_array[0, self.max_rep]) / lh_sum
confidence_exp_all = np.sum(np.exp(lh_corr_array[:, self.max_rep])) / lh_sum
return confidence, confidence1, confidence2, confidence_back, confidence_back_all, confidence_exp, confidence_exp_all
@staticmethod
def write_output(file_desc, predicted, conf, name):
"""
Write result of one prediction.
:param file_desc: file descriptor - where to write to
:param predicted: tuple(int/char, int/char) - predicted alleles
:param conf: tuple(float, float, float) - confidence of prediction (whole, 1st allele, 2nd allele)
:param name: str/int - name/number of the sample
:return: None
"""
def write_output_fd(f, predicted, conf, name):
print("Predicted alleles for %s: (confidence = %5.1f%%)" % (str(name), conf[0] * 100.0), file=f)
print("\t%3s (confidence = %5.1f%%)" % (str(predicted[0]), conf[1] * 100.0), file=f)
print("\t%3s (confidence = %5.1f%%)" % (str(predicted[1]), conf[2] * 100.0), file=f)
print("B B %7.3f%%" % (conf[3] * 100.0), file=f)
print("all B %7.3f%%" % (conf[4] * 100.0), file=f)
print("B E %7.3f%%" % (conf[5] * 100.0), file=f)
print("all E %7.3f%%" % (conf[6] * 100.0), file=f)
if type(file_desc) is str:
with open(file_desc, 'w') as f:
write_output_fd(f, predicted, conf, name)
else:
write_output_fd(file_desc, predicted, conf, name)
def all_call(self, annotations, filt_annotations, index_rep, file_pcolor, file_output, name):
"""
Run All_call - inference of likelihoods, printing of pcolor and writing output.
:param annotations: list(Annotation) - good (blue) annotations
:param filt_annotations: list(Annotation) - (grey) annotations with one primer
:param index_rep: int - index of a repetition
:param file_pcolor: str - file prefix for a pcolor image
:param file_output: str - file for all_call output
:param name: str - name of the sample
:return: None
"""
# if we do not have any good annotations, then quit
if len(annotations) == 0 and len(filt_annotations) == 0:
# write output
# self.write_output(file_output, ('B', 'B'), (0.0, 0.0, 0.0), name)
return None
# infer likelihoods
lh_dict = self.infer(annotations, filt_annotations, index_rep, verbose=False)
# print pcolor image
lh_array, predicted, predicted_sym = self.print_pcolor(lh_dict, file_pcolor, name)
# get confidence of our prediction
conf = self.get_confidence(lh_array, predicted)
# write output
self.write_output(file_output, predicted_sym, conf, name)
|
[
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.pcolor",
"numpy.log",
"copy.copy",
"math.exp",
"matplotlib.patheffects.withStroke",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.concatenate",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.argmax",
"numpy.isnan",
"matplotlib.pyplot.title",
"numpy.ones_like",
"matplotlib.pyplot.colorbar",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"functools.partial",
"numpy.zeros_like"
] |
[((691, 726), 'numpy.zeros_like', 'np.zeros_like', (['deletes'], {'dtype': 'float'}), '(deletes, dtype=float)\n', (704, 726), True, 'import numpy as np\n'), ((3544, 3613), 'functools.partial', 'functools.partial', (['model_full', 'rng', 'model_params'], {'rate_func': 'rate_func'}), '(model_full, rng, model_params, rate_func=rate_func)\n', (3561, 3613), False, 'import functools\n'), ((2999, 3013), 'numpy.arange', 'np.arange', (['rng'], {}), '(rng)\n', (3008, 3013), True, 'import numpy as np\n'), ((3088, 3102), 'numpy.arange', 'np.arange', (['rng'], {}), '(rng)\n', (3097, 3102), True, 'import numpy as np\n'), ((5108, 5133), 'numpy.sum', 'np.sum', (['read_distribution'], {}), '(read_distribution)\n', (5114, 5133), True, 'import numpy as np\n'), ((16179, 16220), 'numpy.ones_like', 'np.ones_like', (['observed_annots'], {'dtype': 'bool'}), '(observed_annots, dtype=bool)\n', (16191, 16220), True, 'import numpy as np\n'), ((16467, 16505), 'numpy.zeros_like', 'np.zeros_like', (['observed_fa'], {'dtype': 'bool'}), '(observed_fa, dtype=bool)\n', (16480, 16505), True, 'import numpy as np\n'), ((19163, 19205), 'numpy.zeros', 'np.zeros', (['(self.max_rep, self.max_rep + 1)'], {}), '((self.max_rep, self.max_rep + 1))\n', (19171, 19205), True, 'import numpy as np\n'), ((2243, 2259), 'math.exp', 'math.exp', (['(p3 * n)'], {}), '(p3 * n)\n', (2251, 2259), False, 'import math\n'), ((5056, 5081), 'numpy.sum', 'np.sum', (['read_distribution'], {}), '(read_distribution)\n', (5062, 5081), True, 'import numpy as np\n'), ((14002, 14030), 'numpy.isnan', 'np.isnan', (['allele2_likelihood'], {}), '(allele2_likelihood)\n', (14010, 14030), True, 'import numpy as np\n'), ((14050, 14078), 'numpy.isnan', 'np.isnan', (['allele1_likelihood'], {}), '(allele1_likelihood)\n', (14058, 14078), True, 'import numpy as np\n'), ((14098, 14126), 'numpy.isnan', 'np.isnan', (['bckgrnd_likelihood'], {}), '(bckgrnd_likelihood)\n', (14106, 14126), True, 'import numpy as np\n'), ((19966, 19978), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19976, 19978), True, 'import matplotlib.pyplot as plt\n'), ((20603, 20696), 'matplotlib.pyplot.title', 'plt.title', (["('%s likelihood of each option for %s' % ('Loglog' if lognorm else 'Log', name)\n )"], {}), "('%s likelihood of each option for %s' % ('Loglog' if lognorm else\n 'Log', name))\n", (20612, 20696), True, 'import matplotlib.pyplot as plt\n'), ((20705, 20729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""2nd allele"""'], {}), "('2nd allele')\n", (20715, 20729), True, 'import matplotlib.pyplot as plt\n'), ((20742, 20766), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""1st allele"""'], {}), "('1st allele')\n", (20752, 20766), True, 'import matplotlib.pyplot as plt\n'), ((21256, 21272), 'copy.copy', 'copy', (['plt.cm.jet'], {}), '(plt.cm.jet)\n', (21260, 21272), False, 'from copy import copy\n'), ((21328, 21419), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['lh_view[self.min_rep:, self.min_rep:]'], {'cmap': 'palette', 'vmin': 'z_min', 'vmax': 'z_max'}), '(lh_view[self.min_rep:, self.min_rep:], cmap=palette, vmin=z_min,\n vmax=z_max)\n', (21338, 21419), True, 'import matplotlib.pyplot as plt\n'), ((21428, 21442), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (21440, 21442), True, 'import matplotlib.pyplot as plt\n'), ((21490, 21599), 'matplotlib.pyplot.plot', 'plt.plot', (['[max_str - self.min_rep, max_str - self.min_rep]', '[0, max_str - self.min_rep]', '"""k"""'], {'linewidth': '(3)'}), "([max_str - self.min_rep, max_str - self.min_rep], [0, max_str -\n self.min_rep], 'k', linewidth=3)\n", (21498, 21599), True, 'import matplotlib.pyplot as plt\n'), ((22197, 22231), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(display_file + '.pdf')"], {}), "(display_file + '.pdf')\n", (22208, 22231), True, 'import matplotlib.pyplot as plt\n'), ((22244, 22278), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(display_file + '.png')"], {}), "(display_file + '.png')\n", (22255, 22278), True, 'import matplotlib.pyplot as plt\n'), ((22291, 22302), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22300, 22302), True, 'import matplotlib.pyplot as plt\n'), ((23085, 23101), 'numpy.max', 'np.max', (['lh_array'], {}), '(lh_array)\n', (23091, 23101), True, 'import numpy as np\n'), ((23126, 23147), 'numpy.exp', 'np.exp', (['lh_corr_array'], {}), '(lh_corr_array)\n', (23132, 23147), True, 'import numpy as np\n'), ((23170, 23219), 'numpy.exp', 'np.exp', (['lh_corr_array[predicted[0], predicted[1]]'], {}), '(lh_corr_array[predicted[0], predicted[1]])\n', (23176, 23219), True, 'import numpy as np\n'), ((23412, 23439), 'numpy.exp', 'np.exp', (['lh_corr_array[0, 0]'], {}), '(lh_corr_array[0, 0])\n', (23418, 23439), True, 'import numpy as np\n'), ((23549, 23587), 'numpy.exp', 'np.exp', (['lh_corr_array[0, self.max_rep]'], {}), '(lh_corr_array[0, self.max_rep])\n', (23555, 23587), True, 'import numpy as np\n'), ((6272, 6307), 'numpy.zeros', 'np.zeros', (['self.min_rep'], {'dtype': 'float'}), '(self.min_rep, dtype=float)\n', (6280, 6307), True, 'import numpy as np\n'), ((16602, 16648), 'numpy.concatenate', 'np.concatenate', (['[observed_annots, observed_fa]'], {}), '([observed_annots, observed_fa])\n', (16616, 16648), True, 'import numpy as np\n'), ((16678, 16712), 'numpy.concatenate', 'np.concatenate', (['[rl_annots, rl_fa]'], {}), '([rl_annots, rl_fa])\n', (16692, 16712), True, 'import numpy as np\n'), ((16746, 16788), 'numpy.concatenate', 'np.concatenate', (['[closed_annots, closed_fa]'], {}), '([closed_annots, closed_fa])\n', (16760, 16788), True, 'import numpy as np\n'), ((18416, 18426), 'numpy.log', 'np.log', (['lh'], {}), '(lh)\n', (18422, 18426), True, 'import numpy as np\n'), ((22372, 22391), 'numpy.argmax', 'np.argmax', (['lh_array'], {}), '(lh_array)\n', (22381, 22391), True, 'import numpy as np\n'), ((23258, 23296), 'numpy.exp', 'np.exp', (['lh_corr_array[predicted[0], :]'], {}), '(lh_corr_array[predicted[0], :])\n', (23264, 23296), True, 'import numpy as np\n'), ((23336, 23374), 'numpy.exp', 'np.exp', (['lh_corr_array[:, predicted[1]]'], {}), '(lh_corr_array[:, predicted[1]])\n', (23342, 23374), True, 'import numpy as np\n'), ((23486, 23513), 'numpy.exp', 'np.exp', (['lh_corr_array[0, :]'], {}), '(lh_corr_array[0, :])\n', (23492, 23513), True, 'import numpy as np\n'), ((23633, 23671), 'numpy.exp', 'np.exp', (['lh_corr_array[:, self.max_rep]'], {}), '(lh_corr_array[:, self.max_rep])\n', (23639, 23671), True, 'import numpy as np\n'), ((6309, 6361), 'numpy.ones', 'np.ones', (['(self.max_with_e - self.min_rep)'], {'dtype': 'float'}), '(self.max_with_e - self.min_rep, dtype=float)\n', (6316, 6361), True, 'import numpy as np\n'), ((20031, 20048), 'numpy.log', 'np.log', (['(-lh_array)'], {}), '(-lh_array)\n', (20037, 20048), True, 'import numpy as np\n'), ((20074, 20088), 'numpy.log', 'np.log', (['(-z_min)'], {}), '(-z_min)\n', (20080, 20088), True, 'import numpy as np\n'), ((20114, 20128), 'numpy.log', 'np.log', (['(-z_max)'], {}), '(-z_max)\n', (20120, 20128), True, 'import numpy as np\n'), ((21821, 21874), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(2.5)', 'foreground': '"""w"""'}), "(linewidth=2.5, foreground='w')\n", (21843, 21874), True, 'import matplotlib.patheffects as PathEffects\n'), ((22109, 22162), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(2.5)', 'foreground': '"""w"""'}), "(linewidth=2.5, foreground='w')\n", (22131, 22162), True, 'import matplotlib.patheffects as PathEffects\n')]
|
'''
file: donkey_env.py
author: <NAME>
date: 2018-08-31
'''
import os
from threading import Thread
import numpy as np
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from donkey_gym.envs.donkey_sim import DonkeyUnitySimContoller
from donkey_gym.envs.donkey_proc import DonkeyUnityProcess
class DonkeyEnv(gym.Env):
"""
OpenAI Gym Environment for Donkey
"""
metadata = {
"render.modes": ["human", "rgb_array"],
}
ACTION = ["steer", "throttle"]
def __init__(self, level, time_step=0.05, frame_skip=2):
print("starting DonkeyGym env")
# start Unity simulation subprocess
self.proc = DonkeyUnityProcess()
try:
exe_path = os.environ['DONKEY_SIM_PATH']
except:
print("Missing DONKEY_SIM_PATH environment var. Using defaults")
#you must start the executable on your own
exe_path = "self_start"
try:
port = int(os.environ['DONKEY_SIM_PORT'])
except:
print("Missing DONKEY_SIM_PORT environment var. Using defaults")
port = 9090
try:
headless = os.environ['DONKEY_SIM_HEADLESS']=='1'
except:
print("Missing DONKEY_SIM_HEADLESS environment var. Using defaults")
headless = False
self.proc.start(exe_path, headless=headless, port=port)
# start simulation com
self.viewer = DonkeyUnitySimContoller(level=level, time_step=time_step, port=port)
# steering
# TODO(r7vme): Add throttle
self.action_space = spaces.Box(low=np.array([-1.0]), high=np.array([1.0]))
# camera sensor data
self.observation_space = spaces.Box(0, 255, self.viewer.get_sensor_size(), dtype=np.uint8)
# simulation related variables.
self.seed()
# Frame Skipping
self.frame_skip = frame_skip
# wait until loaded
self.viewer.wait_until_loaded()
def close(self):
self.proc.quit()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
for i in range(self.frame_skip):
self.viewer.take_action(action)
observation, reward, done, info = self.viewer.observe()
return observation, reward, done, info
def reset(self):
self.viewer.reset()
observation, reward, done, info = self.viewer.observe()
return observation
def render(self, mode="human", close=False):
if close:
self.viewer.quit()
return self.viewer.render(mode)
def is_game_over(self):
return self.viewer.is_game_over()
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
class GeneratedRoadsEnv(DonkeyEnv):
def __init__(self):
super(GeneratedRoadsEnv, self).__init__(level=0)
class WarehouseEnv(DonkeyEnv):
def __init__(self):
super(WarehouseEnv, self).__init__(level=1)
class AvcSparkfunEnv(DonkeyEnv):
def __init__(self):
super(AvcSparkfunEnv, self).__init__(level=2)
class GeneratedTrackEnv(DonkeyEnv):
def __init__(self):
super(GeneratedTrackEnv, self).__init__(level=3)
|
[
"gym.utils.seeding.np_random",
"donkey_gym.envs.donkey_sim.DonkeyUnitySimContoller",
"numpy.array",
"donkey_gym.envs.donkey_proc.DonkeyUnityProcess"
] |
[((705, 725), 'donkey_gym.envs.donkey_proc.DonkeyUnityProcess', 'DonkeyUnityProcess', ([], {}), '()\n', (723, 725), False, 'from donkey_gym.envs.donkey_proc import DonkeyUnityProcess\n'), ((1535, 1603), 'donkey_gym.envs.donkey_sim.DonkeyUnitySimContoller', 'DonkeyUnitySimContoller', ([], {'level': 'level', 'time_step': 'time_step', 'port': 'port'}), '(level=level, time_step=time_step, port=port)\n', (1558, 1603), False, 'from donkey_gym.envs.donkey_sim import DonkeyUnitySimContoller\n'), ((2199, 2222), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (2216, 2222), False, 'from gym.utils import seeding\n'), ((1707, 1723), 'numpy.array', 'np.array', (['[-1.0]'], {}), '([-1.0])\n', (1715, 1723), True, 'import numpy as np\n'), ((1730, 1745), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1738, 1745), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
'''Tests for the likelihood.py module'''
from time import perf_counter_ns
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
from scipy.stats import gamma
import likelihood
SMALL_FIT_PARAMS = {
'baseline_intensities': np.asarray([1, 2, np.nan, np.nan]),
'r_h': 1.5,
'r_c': 0.5
}
SIMPLE_DIST_PARAMS = {
'self_excitation_shape': 2,
'self_excitation_scale': 1,
'discharge_excitation_shape': 3,
'discharge_excitation_scale': 2
}
SMALL_CASES_FILE = 'tests/fixtures/small.csv'
SMALL_COVARIATES_FILE = 'tests/fixtures/small_covariates.csv'
LARGE_FIT_PARAMS = {
'baseline_intensities': np.asarray([0.3, 0.4, 0.6, 0.9]),
'r_h': 1.5,
'r_c': 0.5
}
FULL_DIST_PARAMS = {
'self_excitation_shape': 2.6,
'self_excitation_scale': 2.5,
'discharge_excitation_shape': 2.6,
'discharge_excitation_scale': 2.5
}
def test_gamma_pdf():
x = np.linspace(0, 10, 100)
shape = FULL_DIST_PARAMS['self_excitation_shape']
scale = FULL_DIST_PARAMS['self_excitation_scale']
assert_almost_equal(
gamma.pdf(x, a=shape, scale=scale),
likelihood.gamma_pdf(x, shape, scale)
)
@pytest.mark.parametrize(
"test_element,result_dtype",
[(123_456_789, np.uint32), (65_535, np.uint16), (255, np.uint8)]
)
def test_compactify(test_element, result_dtype):
'''Test that arrays compactify correctly, and to the correct data types'''
array = np.asarray([[1, 2], [3, test_element]], dtype=np.uint32)
result = likelihood.compactify(array)
assert result.dtype == result_dtype
assert_array_equal(array, result)
def test_read_and_tidy_data():
'''Test that a CSV file with care home IDs as a header row
is read, sorted, and split correctly.'''
ids, values = likelihood.read_and_tidy_data(SMALL_CASES_FILE)
assert_array_equal(ids, [14, 16, 35])
assert_array_equal(
values,
[[4, 1, 6], [4, 0, 3], [6, 66, 2]]
)
@pytest.fixture
def small_cases():
'''Get a small data file that could be cases or discharges.'''
return likelihood.read_and_tidy_data(SMALL_CASES_FILE)
@pytest.fixture
def small_covariates():
'''Get a small data file containing covariates.'''
return likelihood.read_and_tidy_data(SMALL_COVARIATES_FILE)
def test_carehome_intensity_null(small_cases, small_covariates):
'''Test that calculating the null-case intensity (based on mapping banded
carehome size to a base intensity) gives the correct result'''
_, cases = small_cases
_, covariates = small_covariates
intensity = likelihood.carehome_intensity_null(
covariates=covariates,
cases=cases,
fit_params=SMALL_FIT_PARAMS
)
assert_array_equal(intensity, [[1, 2, 2], [1, 2, 2], [1, 2, 2]])
def test_single_excitation(small_cases):
'''Test that excitation terms of the form
e_i(t) = \\sum_{s<t} f(t - s) triggers_i(s)
are correctly calculated'''
_, cases = small_cases
excitation = likelihood.single_excitation(cases, 2, 1)
assert_almost_equal(
excitation,
[[0, 0, 0], [1.472, 0.368, 2.207], [2.554, 0.271, 2.728]],
decimal=3
)
def test_cached_single_excitation(small_cases):
'''
Test that the caching of the single_excitation function works correctly.
'''
_, cases = small_cases
cases.flags.writeable = False
shape = SIMPLE_DIST_PARAMS['self_excitation_shape']
scale = SIMPLE_DIST_PARAMS['self_excitation_scale']
uncached_start = perf_counter_ns()
uncached_excitation = likelihood.single_excitation(cases, shape, scale)
uncached_end = perf_counter_ns()
first_excitation = likelihood.cached_single_excitation(
cases, shape, scale
)
assert_array_equal(uncached_excitation, first_excitation)
cached_start = perf_counter_ns()
cached_excitation = likelihood.cached_single_excitation(
cases, shape, scale
)
cached_end = perf_counter_ns()
assert_array_equal(uncached_excitation, cached_excitation)
# Cached version should be quicker
assert (cached_end - cached_start) < (uncached_end - uncached_start)
def test_carehome_intensity_no_discharges(small_cases, small_covariates):
'''Test that the behaviour of carehome_intensity in the case where
discharges are not considered.'''
_, cases = small_cases
_, covariates = small_covariates
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None}
intensity = likelihood.carehome_intensity(
covariates=covariates,
cases=cases,
fit_params=fit_params_no_rh,
dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(
intensity,
[[1, 2, 2], [1.736, 2.184, 3.104], [2.277, 2.135, 3.364]],
decimal=3
)
def test_carehome_intensity_with_discharges(small_cases, small_covariates):
'''Test that the behaviour of carehome_intensity is correct in the case
where discharges are considered.'''
_, cases = small_cases
_, covariates = small_covariates
discharges = cases[::-1]
intensity = likelihood.carehome_intensity(
covariates=covariates,
cases=cases,
fit_params=SMALL_FIT_PARAMS,
dist_params=SIMPLE_DIST_PARAMS,
discharges=discharges
)
assert_almost_equal(
intensity,
[[1, 2, 2], [2.077, 5.937, 3.217], [3.332, 11.240, 3.810]],
decimal=3
)
@pytest.mark.parametrize("mean, cv, expected_shape, expected_scale",
[(1, 1, 1, 1), (6.5, 0.62, 2.601, 2.499)])
def test_calculate_gamma_parameters(mean, cv, expected_shape, expected_scale):
'''Test that calculation of Scipy-style gamma parameters from "descriptive"
gamma parameters is correct.'''
shape, scale = likelihood.calculate_gamma_parameters(mean, cv)
assert_almost_equal([shape, scale], [expected_shape, expected_scale],
decimal=3)
def test_likelihood():
'''Test that the likelihood calculation is correct'''
cases = np.asarray([[3, 1, 0, 1], [1, 0, 2, 1], [0, 0, 0, 1]])
intensity = np.asarray(
[[1, 3, 1.5, 6], [4.2, 3.1, 7, 1.4], [2, 5.1, 4.2, 8.9]]
)
result = likelihood.likelihood(intensity, cases)
assert_almost_equal(result, -39.145, decimal=3)
def test_calculate_likelihood_from_files_no_discharges():
'''Test that likelihood is correctly calculated from input files
when discharges are not considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -187.443, decimal=3)
def test_calculate_likelihood_from_files_no_cases():
'''Test that likelihood is correctly calculated from input files
when cases are not considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_c': 0}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
discharges_file=SMALL_CASES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -189.046, decimal=3)
def test_calculate_likelihood_from_files_no_discharges_or_cases():
'''Test that likelihood is correctly calculated from input files
when neither cases nor discharges are considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None, 'r_c': 0}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -196.466, decimal=3)
def test_calculate_likelihood_from_files_with_discharges():
'''Test that likelihood is correctly calculated from input files
when discharges are considered.'''
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
discharges_file=SMALL_CASES_FILE,
fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -182.761, decimal=3)
def test_calculate_likelihood_from_files_missing_discharges():
'''Test that an error is generated when r_h is provided but discharge data
are not'''
with pytest.raises(AssertionError):
likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS
)
@pytest.mark.parametrize(
'r_c, r_h, expect',
[(0, 0, 196.466),
(0.5, 1.5, 182.761),
(0.5, 0, 187.443),
(0, 1.5, 189.046)]
)
def test_fittable_likelihood(r_c, r_h, expect):
'''Test that the closure to give a version of intensity and likelihood that
can be fitted by scipy works correctly.'''
fittable_likelihood = likelihood.get_fittable_likelihood(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE, SMALL_CASES_FILE
)
fit_params = np.asarray(
[r_c, r_h, *SMALL_FIT_PARAMS['baseline_intensities']]
)
assert_almost_equal(
fittable_likelihood(
fit_params, *map(
SIMPLE_DIST_PARAMS.get,
(('self_excitation_shape', 'self_excitation_scale',
'discharge_excitation_shape', 'discharge_excitation_scale'))
)
),
expect,
decimal=3
)
@pytest.fixture
def large_test_data():
'''Generate test data of the size expected from SAIL.'''
max_categories = 4
num_care_homes = 1000
num_cases = 2000
num_case_homes = 330
num_discharges = 3000
num_discharge_homes = 500
num_days = 181
num_covariates = 1
max_carehome_id = 32767
cases = np.zeros((num_days, num_care_homes), dtype=np.int8)
discharges = np.zeros((num_days, num_care_homes), dtype=np.int8)
covariates = np.zeros((num_covariates, num_care_homes), dtype=np.int8)
# For runs with the same version of numpy, we should get the same
# test data each time. Not guaranteed to work between versions
# because default_rng can change.
rng = np.random.default_rng(seed=0)
care_home_ids = rng.choice(
max_carehome_id, size=num_care_homes, replace=False
)
for sample_array, num_instances, num_places in (
(cases, num_cases, num_case_homes),
(discharges, num_discharges, num_discharge_homes)
):
for _ in range(num_instances):
sample_array[rng.integers(num_days), rng.integers(num_places)] += 1
covariates[0] = rng.choice(max_categories, size=num_care_homes)
for array in care_home_ids, cases, covariates, discharges:
array.flags.writeable = False
return care_home_ids, cases, covariates, discharges
def test_intensity_performance_base(large_test_data, benchmark):
'''
Test the performance of the intensity function for the base case
'''
_, cases, covariates, _ = large_test_data
kwargs = {
'fit_params': {**LARGE_FIT_PARAMS, 'r_h': None, 'r_c': None},
'covariates': covariates,
'cases': cases
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity_null(**kwargs)
benchmark(likelihood.carehome_intensity_null, **kwargs)
@pytest.mark.parametrize("use_cache", [True, False])
def test_intensity_performance_self(large_test_data, benchmark, use_cache):
'''
Test the performance of the intensity function with self-excitation
'''
_, cases, covariates, _ = large_test_data
if not use_cache:
# Writeable arrays are not cached
cases.flags.writeable = True
covariates.flags.writeable = True
kwargs = {
'fit_params': {**LARGE_FIT_PARAMS, 'r_h': None},
'covariates': covariates,
'cases': cases,
'dist_params': FULL_DIST_PARAMS
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity(**kwargs)
benchmark(likelihood.carehome_intensity, **kwargs)
@pytest.mark.parametrize("use_cache", [True, False])
def test_intensity_performance_hospitals(
large_test_data, benchmark, use_cache
):
'''
Test the performance of the intensity function with self- and
discharge excitations.'''
_, cases, covariates, discharges = large_test_data
if not use_cache:
# Writeable arrays are not cached
cases.flags.writeable = True
covariates.flags.writeable = True
discharges.flags.writeable = True
kwargs = {
'fit_params': LARGE_FIT_PARAMS,
'covariates': covariates,
'cases': cases,
'discharges': discharges,
'dist_params': FULL_DIST_PARAMS
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity(**kwargs)
benchmark(likelihood.carehome_intensity, **kwargs)
def test_likelihood_performance(large_test_data, benchmark):
'''
Test the performance of the calculation of likelihood from the intensity
and case distribution.'''
_, cases, covariates, discharges = large_test_data
intensity = likelihood.carehome_intensity(
fit_params=LARGE_FIT_PARAMS,
covariates=covariates,
cases=cases,
discharges=discharges,
dist_params=FULL_DIST_PARAMS
)
benchmark(likelihood.likelihood, intensity, cases)
|
[
"numpy.random.default_rng",
"likelihood.read_and_tidy_data",
"time.perf_counter_ns",
"likelihood.likelihood",
"scipy.stats.gamma.pdf",
"numpy.asarray",
"likelihood.cached_single_excitation",
"numpy.testing.assert_almost_equal",
"numpy.linspace",
"numpy.testing.assert_array_equal",
"likelihood.single_excitation",
"likelihood.carehome_intensity_null",
"likelihood.calculate_likelihood_from_files",
"pytest.raises",
"likelihood.get_fittable_likelihood",
"likelihood.compactify",
"likelihood.calculate_gamma_parameters",
"pytest.mark.parametrize",
"numpy.zeros",
"likelihood.gamma_pdf",
"likelihood.carehome_intensity"
] |
[((1212, 1332), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_element,result_dtype"""', '[(123456789, np.uint32), (65535, np.uint16), (255, np.uint8)]'], {}), "('test_element,result_dtype', [(123456789, np.uint32\n ), (65535, np.uint16), (255, np.uint8)])\n", (1235, 1332), False, 'import pytest\n'), ((5434, 5548), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mean, cv, expected_shape, expected_scale"""', '[(1, 1, 1, 1), (6.5, 0.62, 2.601, 2.499)]'], {}), "('mean, cv, expected_shape, expected_scale', [(1, 1,\n 1, 1), (6.5, 0.62, 2.601, 2.499)])\n", (5457, 5548), False, 'import pytest\n'), ((8580, 8706), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""r_c, r_h, expect"""', '[(0, 0, 196.466), (0.5, 1.5, 182.761), (0.5, 0, 187.443), (0, 1.5, 189.046)]'], {}), "('r_c, r_h, expect', [(0, 0, 196.466), (0.5, 1.5, \n 182.761), (0.5, 0, 187.443), (0, 1.5, 189.046)])\n", (8603, 8706), False, 'import pytest\n'), ((11357, 11408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_cache"""', '[True, False]'], {}), "('use_cache', [True, False])\n", (11380, 11408), False, 'import pytest\n'), ((12105, 12156), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_cache"""', '[True, False]'], {}), "('use_cache', [True, False])\n", (12128, 12156), False, 'import pytest\n'), ((297, 331), 'numpy.asarray', 'np.asarray', (['[1, 2, np.nan, np.nan]'], {}), '([1, 2, np.nan, np.nan])\n', (307, 331), True, 'import numpy as np\n'), ((688, 720), 'numpy.asarray', 'np.asarray', (['[0.3, 0.4, 0.6, 0.9]'], {}), '([0.3, 0.4, 0.6, 0.9])\n', (698, 720), True, 'import numpy as np\n'), ((956, 979), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (967, 979), True, 'import numpy as np\n'), ((1481, 1537), 'numpy.asarray', 'np.asarray', (['[[1, 2], [3, test_element]]'], {'dtype': 'np.uint32'}), '([[1, 2], [3, test_element]], dtype=np.uint32)\n', (1491, 1537), True, 'import numpy as np\n'), ((1551, 1579), 'likelihood.compactify', 'likelihood.compactify', (['array'], {}), '(array)\n', (1572, 1579), False, 'import likelihood\n'), ((1624, 1657), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['array', 'result'], {}), '(array, result)\n', (1642, 1657), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((1817, 1864), 'likelihood.read_and_tidy_data', 'likelihood.read_and_tidy_data', (['SMALL_CASES_FILE'], {}), '(SMALL_CASES_FILE)\n', (1846, 1864), False, 'import likelihood\n'), ((1869, 1906), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['ids', '[14, 16, 35]'], {}), '(ids, [14, 16, 35])\n', (1887, 1906), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((1911, 1973), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['values', '[[4, 1, 6], [4, 0, 3], [6, 66, 2]]'], {}), '(values, [[4, 1, 6], [4, 0, 3], [6, 66, 2]])\n', (1929, 1973), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((2110, 2157), 'likelihood.read_and_tidy_data', 'likelihood.read_and_tidy_data', (['SMALL_CASES_FILE'], {}), '(SMALL_CASES_FILE)\n', (2139, 2157), False, 'import likelihood\n'), ((2266, 2318), 'likelihood.read_and_tidy_data', 'likelihood.read_and_tidy_data', (['SMALL_COVARIATES_FILE'], {}), '(SMALL_COVARIATES_FILE)\n', (2295, 2318), False, 'import likelihood\n'), ((2611, 2714), 'likelihood.carehome_intensity_null', 'likelihood.carehome_intensity_null', ([], {'covariates': 'covariates', 'cases': 'cases', 'fit_params': 'SMALL_FIT_PARAMS'}), '(covariates=covariates, cases=cases,\n fit_params=SMALL_FIT_PARAMS)\n', (2645, 2714), False, 'import likelihood\n'), ((2745, 2809), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['intensity', '[[1, 2, 2], [1, 2, 2], [1, 2, 2]]'], {}), '(intensity, [[1, 2, 2], [1, 2, 2], [1, 2, 2]])\n', (2763, 2809), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((3027, 3068), 'likelihood.single_excitation', 'likelihood.single_excitation', (['cases', '(2)', '(1)'], {}), '(cases, 2, 1)\n', (3055, 3068), False, 'import likelihood\n'), ((3073, 3179), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['excitation', '[[0, 0, 0], [1.472, 0.368, 2.207], [2.554, 0.271, 2.728]]'], {'decimal': '(3)'}), '(excitation, [[0, 0, 0], [1.472, 0.368, 2.207], [2.554, \n 0.271, 2.728]], decimal=3)\n', (3092, 3179), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((3543, 3560), 'time.perf_counter_ns', 'perf_counter_ns', ([], {}), '()\n', (3558, 3560), False, 'from time import perf_counter_ns\n'), ((3587, 3636), 'likelihood.single_excitation', 'likelihood.single_excitation', (['cases', 'shape', 'scale'], {}), '(cases, shape, scale)\n', (3615, 3636), False, 'import likelihood\n'), ((3656, 3673), 'time.perf_counter_ns', 'perf_counter_ns', ([], {}), '()\n', (3671, 3673), False, 'from time import perf_counter_ns\n'), ((3697, 3753), 'likelihood.cached_single_excitation', 'likelihood.cached_single_excitation', (['cases', 'shape', 'scale'], {}), '(cases, shape, scale)\n', (3732, 3753), False, 'import likelihood\n'), ((3772, 3829), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['uncached_excitation', 'first_excitation'], {}), '(uncached_excitation, first_excitation)\n', (3790, 3829), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((3850, 3867), 'time.perf_counter_ns', 'perf_counter_ns', ([], {}), '()\n', (3865, 3867), False, 'from time import perf_counter_ns\n'), ((3892, 3948), 'likelihood.cached_single_excitation', 'likelihood.cached_single_excitation', (['cases', 'shape', 'scale'], {}), '(cases, shape, scale)\n', (3927, 3948), False, 'import likelihood\n'), ((3980, 3997), 'time.perf_counter_ns', 'perf_counter_ns', ([], {}), '()\n', (3995, 3997), False, 'from time import perf_counter_ns\n'), ((4002, 4060), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['uncached_excitation', 'cached_excitation'], {}), '(uncached_excitation, cached_excitation)\n', (4020, 4060), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((4496, 4626), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {'covariates': 'covariates', 'cases': 'cases', 'fit_params': 'fit_params_no_rh', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(covariates=covariates, cases=cases,\n fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS)\n', (4525, 4626), False, 'import likelihood\n'), ((4665, 4770), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['intensity', '[[1, 2, 2], [1.736, 2.184, 3.104], [2.277, 2.135, 3.364]]'], {'decimal': '(3)'}), '(intensity, [[1, 2, 2], [1.736, 2.184, 3.104], [2.277, \n 2.135, 3.364]], decimal=3)\n', (4684, 4770), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((5099, 5257), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {'covariates': 'covariates', 'cases': 'cases', 'fit_params': 'SMALL_FIT_PARAMS', 'dist_params': 'SIMPLE_DIST_PARAMS', 'discharges': 'discharges'}), '(covariates=covariates, cases=cases,\n fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS, discharges\n =discharges)\n', (5128, 5257), False, 'import likelihood\n'), ((5299, 5403), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['intensity', '[[1, 2, 2], [2.077, 5.937, 3.217], [3.332, 11.24, 3.81]]'], {'decimal': '(3)'}), '(intensity, [[1, 2, 2], [2.077, 5.937, 3.217], [3.332, \n 11.24, 3.81]], decimal=3)\n', (5318, 5403), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((5784, 5831), 'likelihood.calculate_gamma_parameters', 'likelihood.calculate_gamma_parameters', (['mean', 'cv'], {}), '(mean, cv)\n', (5821, 5831), False, 'import likelihood\n'), ((5836, 5921), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['[shape, scale]', '[expected_shape, expected_scale]'], {'decimal': '(3)'}), '([shape, scale], [expected_shape, expected_scale], decimal=3\n )\n', (5855, 5921), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((6037, 6091), 'numpy.asarray', 'np.asarray', (['[[3, 1, 0, 1], [1, 0, 2, 1], [0, 0, 0, 1]]'], {}), '([[3, 1, 0, 1], [1, 0, 2, 1], [0, 0, 0, 1]])\n', (6047, 6091), True, 'import numpy as np\n'), ((6108, 6176), 'numpy.asarray', 'np.asarray', (['[[1, 3, 1.5, 6], [4.2, 3.1, 7, 1.4], [2, 5.1, 4.2, 8.9]]'], {}), '([[1, 3, 1.5, 6], [4.2, 3.1, 7, 1.4], [2, 5.1, 4.2, 8.9]])\n', (6118, 6176), True, 'import numpy as np\n'), ((6205, 6244), 'likelihood.likelihood', 'likelihood.likelihood', (['intensity', 'cases'], {}), '(intensity, cases)\n', (6226, 6244), False, 'import likelihood\n'), ((6249, 6296), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-39.145)'], {'decimal': '(3)'}), '(result, -39.145, decimal=3)\n', (6268, 6296), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((6539, 6692), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'fit_params': 'fit_params_no_rh', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, fit_params=fit_params_no_rh, dist_params=\n SIMPLE_DIST_PARAMS)\n', (6581, 6692), False, 'import likelihood\n'), ((6710, 6758), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-187.443)'], {'decimal': '(3)'}), '(result, -187.443, decimal=3)\n', (6729, 6758), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((6988, 7175), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'discharges_file': 'SMALL_CASES_FILE', 'fit_params': 'fit_params_no_rh', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, discharges_file=SMALL_CASES_FILE, fit_params=\n fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS)\n', (7030, 7175), False, 'import likelihood\n'), ((7201, 7249), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-189.046)'], {'decimal': '(3)'}), '(result, -189.046, decimal=3)\n', (7220, 7249), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((7525, 7678), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'fit_params': 'fit_params_no_rh', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, fit_params=fit_params_no_rh, dist_params=\n SIMPLE_DIST_PARAMS)\n', (7567, 7678), False, 'import likelihood\n'), ((7696, 7744), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-196.466)'], {'decimal': '(3)'}), '(result, -196.466, decimal=3)\n', (7715, 7744), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((7928, 8115), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'discharges_file': 'SMALL_CASES_FILE', 'fit_params': 'SMALL_FIT_PARAMS', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, discharges_file=SMALL_CASES_FILE, fit_params=\n SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS)\n', (7970, 8115), False, 'import likelihood\n'), ((8141, 8189), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-182.761)'], {'decimal': '(3)'}), '(result, -182.761, decimal=3)\n', (8160, 8189), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((8929, 9026), 'likelihood.get_fittable_likelihood', 'likelihood.get_fittable_likelihood', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE', 'SMALL_CASES_FILE'], {}), '(SMALL_CASES_FILE, SMALL_COVARIATES_FILE,\n SMALL_CASES_FILE)\n', (8963, 9026), False, 'import likelihood\n'), ((9054, 9119), 'numpy.asarray', 'np.asarray', (["[r_c, r_h, *SMALL_FIT_PARAMS['baseline_intensities']]"], {}), "([r_c, r_h, *SMALL_FIT_PARAMS['baseline_intensities']])\n", (9064, 9119), True, 'import numpy as np\n'), ((9807, 9858), 'numpy.zeros', 'np.zeros', (['(num_days, num_care_homes)'], {'dtype': 'np.int8'}), '((num_days, num_care_homes), dtype=np.int8)\n', (9815, 9858), True, 'import numpy as np\n'), ((9876, 9927), 'numpy.zeros', 'np.zeros', (['(num_days, num_care_homes)'], {'dtype': 'np.int8'}), '((num_days, num_care_homes), dtype=np.int8)\n', (9884, 9927), True, 'import numpy as np\n'), ((9945, 10002), 'numpy.zeros', 'np.zeros', (['(num_covariates, num_care_homes)'], {'dtype': 'np.int8'}), '((num_covariates, num_care_homes), dtype=np.int8)\n', (9953, 10002), True, 'import numpy as np\n'), ((10189, 10218), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': '(0)'}), '(seed=0)\n', (10210, 10218), True, 'import numpy as np\n'), ((11249, 11293), 'likelihood.carehome_intensity_null', 'likelihood.carehome_intensity_null', ([], {}), '(**kwargs)\n', (11283, 11293), False, 'import likelihood\n'), ((12007, 12046), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {}), '(**kwargs)\n', (12036, 12046), False, 'import likelihood\n'), ((12854, 12893), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {}), '(**kwargs)\n', (12883, 12893), False, 'import likelihood\n'), ((13199, 13356), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {'fit_params': 'LARGE_FIT_PARAMS', 'covariates': 'covariates', 'cases': 'cases', 'discharges': 'discharges', 'dist_params': 'FULL_DIST_PARAMS'}), '(fit_params=LARGE_FIT_PARAMS, covariates=\n covariates, cases=cases, discharges=discharges, dist_params=\n FULL_DIST_PARAMS)\n', (13228, 13356), False, 'import likelihood\n'), ((1121, 1155), 'scipy.stats.gamma.pdf', 'gamma.pdf', (['x'], {'a': 'shape', 'scale': 'scale'}), '(x, a=shape, scale=scale)\n', (1130, 1155), False, 'from scipy.stats import gamma\n'), ((1165, 1202), 'likelihood.gamma_pdf', 'likelihood.gamma_pdf', (['x', 'shape', 'scale'], {}), '(x, shape, scale)\n', (1185, 1202), False, 'import likelihood\n'), ((8359, 8388), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (8372, 8388), False, 'import pytest\n'), ((8398, 8551), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'fit_params': 'SMALL_FIT_PARAMS', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, fit_params=SMALL_FIT_PARAMS, dist_params=\n SIMPLE_DIST_PARAMS)\n', (8440, 8551), False, 'import likelihood\n')]
|
from numpy.testing import assert_array_almost_equal as array_assert
from badboids.boids import SimulationParameters
def test_simulation_parameters_init():
"""Tests Simulation Parameters constructor"""
# Arrange
formation_flying_distance = 800
formation_flying_strength = 0.10
alert_distance = 8
move_to_middle_strength = 0.2
delta_t = 1.5
# Act
sut = SimulationParameters(formation_flying_distance, formation_flying_strength, alert_distance,
move_to_middle_strength, delta_t)
# Assert
array_assert(sut.formation_flying_distance, formation_flying_distance)
array_assert(sut.formation_flying_strength, formation_flying_strength)
array_assert(sut.alert_distance, alert_distance)
array_assert(sut.move_to_middle_strength, move_to_middle_strength)
array_assert(sut.delta_t, delta_t)
def test_get_defaults():
"""Tests Simulation Parameters get defaults method"""
# Arrange
expected_formation_flying_distance = 10000
expected_formation_flying_strength = 0.125
expected_alert_distance = 100
expected_move_to_middle_strength = 0.01
expected_delta_t = 1.0
# Act
parameters = SimulationParameters.get_defaults()
# Assert
assert parameters.formation_flying_distance == expected_formation_flying_distance
assert parameters.formation_flying_strength == expected_formation_flying_strength
assert parameters.alert_distance == expected_alert_distance
assert parameters.move_to_middle_strength == expected_move_to_middle_strength
assert parameters.delta_t == expected_delta_t
|
[
"badboids.boids.SimulationParameters",
"numpy.testing.assert_array_almost_equal",
"badboids.boids.SimulationParameters.get_defaults"
] |
[((392, 520), 'badboids.boids.SimulationParameters', 'SimulationParameters', (['formation_flying_distance', 'formation_flying_strength', 'alert_distance', 'move_to_middle_strength', 'delta_t'], {}), '(formation_flying_distance, formation_flying_strength,\n alert_distance, move_to_middle_strength, delta_t)\n', (412, 520), False, 'from badboids.boids import SimulationParameters\n'), ((566, 636), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.formation_flying_distance', 'formation_flying_distance'], {}), '(sut.formation_flying_distance, formation_flying_distance)\n', (578, 636), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((641, 711), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.formation_flying_strength', 'formation_flying_strength'], {}), '(sut.formation_flying_strength, formation_flying_strength)\n', (653, 711), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((716, 764), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.alert_distance', 'alert_distance'], {}), '(sut.alert_distance, alert_distance)\n', (728, 764), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((769, 835), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.move_to_middle_strength', 'move_to_middle_strength'], {}), '(sut.move_to_middle_strength, move_to_middle_strength)\n', (781, 835), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((840, 874), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.delta_t', 'delta_t'], {}), '(sut.delta_t, delta_t)\n', (852, 874), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((1202, 1237), 'badboids.boids.SimulationParameters.get_defaults', 'SimulationParameters.get_defaults', ([], {}), '()\n', (1235, 1237), False, 'from badboids.boids import SimulationParameters\n')]
|
import numpy as np
import cv2
from imutils.object_detection import non_max_suppression
import pytesseract
from matplotlib import pyplot as plt
def ocr(images):
results = []
for image in images:
args = {"image": image, "east": "frozen_east_text_detection.pb", "min_confidence": 0.5, "width": 320,
"height": 320}
args['image'] = image
image = cv2.imread(args['image'])
orig = image.copy()
(origH, origW) = image.shape[:2]
(newW, newH) = (args["width"], args["height"])
rW = origW / float(newW)
rH = origH / float(newH)
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
net = cv2.dnn.readNet(args["east"])
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
def predictions(prob_score, geo):
(numR, numC) = prob_score.shape[2:4]
boxes = []
confidence_val = []
for y in range(0, numR):
scoresData = prob_score[0, 0, y]
x0 = geo[0, 0, y]
x1 = geo[0, 1, y]
x2 = geo[0, 2, y]
x3 = geo[0, 3, y]
anglesData = geo[0, 4, y]
for i in range(0, numC):
if scoresData[i] < args["min_confidence"]:
continue
(offX, offY) = (i * 4.0, y * 4.0)
angle = anglesData[i]
cos = np.cos(angle)
sin = np.sin(angle)
h = x0[i] + x2[i]
w = x1[i] + x3[i]
endX = int(offX + (cos * x1[i]) + (sin * x2[i]))
endY = int(offY - (sin * x1[i]) + (cos * x2[i]))
startX = int(endX - w)
startY = int(endY - h)
boxes.append((startX, startY, endX, endY))
confidence_val.append(scoresData[i])
return (boxes, confidence_val)
(boxes, confidence_val) = predictions(scores, geometry)
boxes = non_max_suppression(np.array(boxes), probs=confidence_val)
result = []
for (startX, startY, endX, endY) in boxes:
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
r = orig[startY:endY, startX:endX]
configuration = ("-l eng --oem 1 --psm 8")
text = pytesseract.image_to_string(r, config=configuration)
result.append(text)
results.append(result)
return results
print(ocr(["./images/car_wash.png"]))
|
[
"cv2.dnn.blobFromImage",
"numpy.array",
"numpy.cos",
"pytesseract.image_to_string",
"numpy.sin",
"cv2.resize",
"cv2.imread",
"cv2.dnn.readNet"
] |
[((391, 416), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (401, 416), False, 'import cv2\n'), ((627, 658), 'cv2.resize', 'cv2.resize', (['image', '(newW, newH)'], {}), '(image, (newW, newH))\n', (637, 658), False, 'import cv2\n'), ((708, 805), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1.0)', '(W, H)', '(123.68, 116.78, 103.94)'], {'swapRB': '(True)', 'crop': '(False)'}), '(image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=\n True, crop=False)\n', (729, 805), False, 'import cv2\n'), ((853, 882), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (["args['east']"], {}), "(args['east'])\n", (868, 882), False, 'import cv2\n'), ((2370, 2385), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (2378, 2385), True, 'import numpy as np\n'), ((2749, 2801), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['r'], {'config': 'configuration'}), '(r, config=configuration)\n', (2776, 2801), False, 'import pytesseract\n'), ((1747, 1760), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1753, 1760), True, 'import numpy as np\n'), ((1787, 1800), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1793, 1800), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
INTRO
@author: <NAME>. Created on Tue May 21 11:57:52 2019
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft,
Delft, the Netherlands
"""
import inspect
from screws.freeze.main import FrozenOnly
from typing import Dict, Union
import numpy as np
from screws.decorators.classproperty.main import classproperty
class DomainInputBase(FrozenOnly):
def __init__(self, domain_name='domain without name'):
self.domain_name = domain_name
self._ndim_ = 2
self._region_corner_coordinates_ = None
self._region_edge_types_ = None
self._boundary_region_edges_ = None
self._region_interpolators_ = None
self._boundary_names_ = None
self._periodic_boundary_pairs_ = dict()
self._periodic_boundaries_ = set()
self._region_sequence_ = None
self._region_type_wr2_metric_ = None
self._internal_parameters_ = list()
INSP = inspect.getfullargspec(self.__init__)
self.__arg_names___ = INSP[0][1:]
assert INSP[1] is None and INSP[2] is None, "A domain input class can not have *args and **kwargs."
assert len(INSP[3]) == len(self.__arg_names___), "A domain input class can only have kwargs."
self._freeze_self_()
@property
def internal_parameters(self):
"""Internal parameters only affect internal metric, does not affect the domain shape."""
return self._internal_parameters_
@internal_parameters.setter
def internal_parameters(self, internal_parameters):
if isinstance(internal_parameters, list):
pass
elif isinstance(internal_parameters, str):
internal_parameters = [internal_parameters,]
elif isinstance(internal_parameters, (tuple, set)):
internal_parameters = list(internal_parameters)
else:
raise NotImplementedError(f"internal_parameters = {internal_parameters} not acceptable.")
assert isinstance(internal_parameters, list), \
f"please put internal_parameters({internal_parameters}) in list."
if len(internal_parameters) > 0:
assert all([ip in self.__arg_names___ for ip in internal_parameters])
self._internal_parameters_ = internal_parameters
@property
def domain_name(self):
""" Mesh name. """
return self._domain_name_
@domain_name.setter
def domain_name(self, dn):
assert isinstance(dn, str), " <DomainInput> : domain name needs to be str."
self._domain_name_ = dn
@property
def ndim(self):
""" dimensions n. """
return self._ndim_
@property
def region_interpolators(self):
return self._region_interpolators_
@region_interpolators.setter
def region_interpolators(self, region_interpolators):
self._region_interpolators_ = region_interpolators
def ___PRIVATE_region_name_requirement_checker___(self, regionDict):
"""
Requirements:
1). must be str
2). != domain name.
3). length > 2
4). Starts with 'R:'
5). can only have letters and _
"""
for R in regionDict:
assert isinstance(R, str), f"region name={R} wrong, need be str!"
assert R != self.domain_name, f"region name == domain.name! wrong!"
assert len(R) > 2, f"region name = {R} too short, must > 2."
assert R[0:2] == 'R:', f"regions name = {R} does not start with 'R:'"
R2 = R[2:].replace('_', '')
assert R2.isalpha(),f"region_name = {R} wrong, can only have letter and _ (at >2)."
@property
def region_corner_coordinates(self):
"""
Store the regions 4 corners' coordinates.
Returns
-------
region_coordinates : dict
A dict whose keys represent the regions names, and values represent
the coordinates of regions corner points.
In 2D: (UL, DL, UR, DR).
L: Left, R: Right, U: Upper, D: Down
"""
return self._region_corner_coordinates_
@region_corner_coordinates.setter
def region_corner_coordinates(self, _dict_):
assert isinstance(_dict_, dict), " <DomainInput> : region_coordinates needs to be a dict."
self.___PRIVATE_region_name_requirement_checker___(_dict_)
for R in _dict_:
assert np.shape(_dict_[R])[0] == 4, \
" <DomainInput> : region_coordinates[{}]={} is wrong.".format(R, _dict_[R])
self._region_corner_coordinates_ = _dict_
@property
def region_edge_types(self):
"""
Store the regions' boundaries' types.
Returns
-------
region_boundary_type : dict
A dict that contains the region boundary info. The keys indicate
the region boundary, the value indicate the info. value[0] indicate
the type, value[1:] indicate the rest info which will be parsed
into full information. The not mentioned regions boundaries will be
set into default type: ('plane',)
Notice that the value will be sent to edge_geometry. And
if this info (value[1:]) to be parsed, it will be done there in
edge_geometry. And the value is stored in the
`edge_geometry.edge_types`.
"""
return self._region_edge_types_
@region_edge_types.setter
def region_edge_types(self, _dict_):
assert self.region_corner_coordinates is not None, " <DomainInput> : please first set region_coordinates."
assert isinstance(_dict_, dict), " <DomainInput> : region_boundary_type needs to be a dict."
for item in _dict_:
R, S = item.split('-')
assert R in self.region_corner_coordinates and S in ('U', 'D', 'L', 'R'), \
" <DomainInput> : region_edge_type key {} is wrong.".format(item)
self._region_edge_types_ = _dict_
def ___PRIVATE_boundary_name_requirement_checker___(self, boundaryRegionSidesDict):
"""
Requirements:
1). != domain name.
2). Length > 2
3). Can not start with 'R:' (So it must be different from regions names).
4). Only have letters
"""
for boundary_name in boundaryRegionSidesDict.keys():
assert boundary_name != self.domain_name
assert len(boundary_name) > 2, f"boundary_name = {boundary_name} is too short (>2 must)."
assert boundary_name[0:2] != 'R:', f"boundary_name = {boundary_name} wrong."
assert boundary_name.isalpha(), f"boundary_name = {boundary_name} wrong, boundary_name can only contain letters."
@property
def boundary_region_edges(self):
"""
Store the domain boundary information.
Returns
-------
domain_boundary : dict
For example:
{'Down': ("Body_center-D", 'Body_back-D', ...),
'West': ("Body_center-R", 'Body_back-R', ...),
......}
This means we have domain boundaries: South, West and so on.
"""
return self._boundary_region_edges_
@boundary_region_edges.setter
def boundary_region_edges(self, _dict_):
assert self.region_corner_coordinates is not None, " <DomainInput> : please first set region_coordinates."
assert isinstance(_dict_, dict), " <DomainInput> : domain_boundary needs to be a dict."
self.___PRIVATE_boundary_name_requirement_checker___(_dict_)
for boundary_names in _dict_.keys():
assert isinstance(boundary_names, str) and boundary_names != '' and '-' not in boundary_names, \
" <DomainInput> : boundary_names = {} is wrong.".format(boundary_names)
assert boundary_names not in self.region_corner_coordinates.keys(), \
" <DomainInput>: boundary_names={} is taken by one of the regions.".format(boundary_names)
for item in _dict_:
if isinstance(_dict_[item], str):
_dict_[item] = (_dict_[item],)
if isinstance(_dict_[item], list) or isinstance(_dict_[item], tuple):
for item_i in _dict_[item]:
R, S = item_i.split('-')
assert R in self.region_corner_coordinates and S in ('U', 'D', 'L', 'R'), \
" <DomainInput> : domain_boundary[{}]={} is wrong.".format(item, _dict_[item])
else:
raise Exception(" <DomainInput> : boundary_region_edges input value accepts only str, tuple of list.")
self._boundary_region_edges_ = _dict_
self._boundary_names_ = list(_dict_.keys())
def ___PRIVATE_periodic_boundary_requirement_checker___(self, pBd):
"""
Here we only do a simple check. We make sure that the keys are in format of:
0). boundary_name_1=boundary_name_2.
1). A boundary name at most appear in one pair.
"""
assert isinstance(pBd, dict)
bnPOOL = set()
for pair in pBd:
assert '=' in pair
bn1, bn2 = pair.split('=')
lengthPOOL = len(bnPOOL)
assert bn1 in self._boundary_names_ and bn2 in self._boundary_names_
bnPOOL.add(bn1)
bnPOOL.add(bn2)
newLengthPOOL = len(bnPOOL)
assert newLengthPOOL == lengthPOOL + 2, "Boundary(s) used for multiple periodic pairs!"
self._periodic_boundaries_ = bnPOOL
@property
def periodic_boundary_pairs(self):
return self._periodic_boundary_pairs_
@periodic_boundary_pairs.setter
def periodic_boundary_pairs(self, pBd):
""" """
self.___PRIVATE_periodic_boundary_requirement_checker___(pBd)
self._periodic_boundary_pairs_ = pBd
@property
def periodic_boundaries(self):
"""(set) Return a set of all boundary names those involved in the periodic boundary setting."""
return self._periodic_boundaries_
@property
def periodic_boundaries_involved_regions(self):
"""The regions that involve periodic boundaries."""
regions = set()
for pb in self.periodic_boundaries:
region_sides = self.boundary_region_edges[pb]
for rs in region_sides:
rn = rs.split('-')[0]
if rn not in regions:
regions.add(rn)
return regions
@property
def region_sequence(self):
"""
This will fix the sequence of regions by fix their names in property
region_names or regions.names. This is very important for numbering. Sometimes, a bad
regions sequence can make the numbering wrong.
"""
return self._region_sequence_
@region_sequence.setter
def region_sequence(self, rS: tuple):
assert len(rS) == len(self.region_corner_coordinates.keys())
assert all([rSi in self.region_corner_coordinates for rSi in rS]) & \
all([rSi in rS for rSi in self.region_corner_coordinates.keys()]), \
f"region_sequence={rS} has invalid regions name(s)."
self._region_sequence_ = rS
@property
def region_type_wr2_metric(self):
return self._region_type_wr2_metric_
@region_type_wr2_metric.setter
def region_type_wr2_metric(self, rTwr2M: Union[str, Dict[str, str]]):
if isinstance(rTwr2M, str):
_D_ = dict()
for region_name in self.region_corner_coordinates:
_D_[region_name] = rTwr2M
rTwr2M = _D_
assert isinstance(rTwr2M, dict), "region_type_wr2_metric needs to be a dictionary."
for key in rTwr2M:
assert key in self.region_corner_coordinates, f"Region name={key} not valid."
self._region_type_wr2_metric_ = rTwr2M
# class properties -------------------------
@classproperty
def statistic(cls):
raise NotImplementedError()
@classproperty
def random_parameters(cls):
raise NotImplementedError()
|
[
"numpy.shape",
"inspect.getfullargspec"
] |
[((1001, 1038), 'inspect.getfullargspec', 'inspect.getfullargspec', (['self.__init__'], {}), '(self.__init__)\n', (1023, 1038), False, 'import inspect\n'), ((4446, 4465), 'numpy.shape', 'np.shape', (['_dict_[R]'], {}), '(_dict_[R])\n', (4454, 4465), True, 'import numpy as np\n')]
|
from foolbox import zoo
import numpy as np
import foolbox
import sys
import pytest
from foolbox.zoo.model_loader import ModelLoader
from os.path import join, dirname
@pytest.fixture(autouse=True)
def unload_foolbox_model_module():
# reload foolbox_model from scratch for every run
# to ensure atomic tests without side effects
module_names = ["foolbox_model", "model"]
for module_name in module_names:
if module_name in sys.modules:
del sys.modules[module_name]
test_data = [
# private repo won't work on travis
# ('https://github.com/bethgelab/AnalysisBySynthesis.git', (1, 28, 28)),
# ('https://github.com/bethgelab/convex_adversarial.git', (1, 28, 28)),
# ('https://github.com/bethgelab/mnist_challenge.git', 784)
(join("file://", dirname(__file__), "data/model_repo"), (3, 224, 224))
]
@pytest.mark.parametrize("url, dim", test_data)
def test_loading_model(url, dim):
# download model
model = zoo.get_model(url)
# create a dummy image
x = np.zeros(dim, dtype=np.float32)
x[:] = np.random.randn(*x.shape)
# run the model
logits = model.forward_one(x)
probabilities = foolbox.utils.softmax(logits)
predicted_class = np.argmax(logits)
# sanity check
assert predicted_class >= 0
assert np.sum(probabilities) >= 0.9999
# TODO: delete fmodel
def test_non_default_module_throws_error():
with pytest.raises(RuntimeError):
ModelLoader.get(key="other")
|
[
"numpy.argmax",
"foolbox.zoo.model_loader.ModelLoader.get",
"pytest.mark.parametrize",
"numpy.zeros",
"numpy.sum",
"pytest.raises",
"os.path.dirname",
"foolbox.utils.softmax",
"pytest.fixture",
"foolbox.zoo.get_model",
"numpy.random.randn"
] |
[((169, 197), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (183, 197), False, 'import pytest\n'), ((853, 899), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""url, dim"""', 'test_data'], {}), "('url, dim', test_data)\n", (876, 899), False, 'import pytest\n'), ((967, 985), 'foolbox.zoo.get_model', 'zoo.get_model', (['url'], {}), '(url)\n', (980, 985), False, 'from foolbox import zoo\n'), ((1022, 1053), 'numpy.zeros', 'np.zeros', (['dim'], {'dtype': 'np.float32'}), '(dim, dtype=np.float32)\n', (1030, 1053), True, 'import numpy as np\n'), ((1065, 1090), 'numpy.random.randn', 'np.random.randn', (['*x.shape'], {}), '(*x.shape)\n', (1080, 1090), True, 'import numpy as np\n'), ((1166, 1195), 'foolbox.utils.softmax', 'foolbox.utils.softmax', (['logits'], {}), '(logits)\n', (1187, 1195), False, 'import foolbox\n'), ((1218, 1235), 'numpy.argmax', 'np.argmax', (['logits'], {}), '(logits)\n', (1227, 1235), True, 'import numpy as np\n'), ((1299, 1320), 'numpy.sum', 'np.sum', (['probabilities'], {}), '(probabilities)\n', (1305, 1320), True, 'import numpy as np\n'), ((1413, 1440), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1426, 1440), False, 'import pytest\n'), ((1450, 1478), 'foolbox.zoo.model_loader.ModelLoader.get', 'ModelLoader.get', ([], {'key': '"""other"""'}), "(key='other')\n", (1465, 1478), False, 'from foolbox.zoo.model_loader import ModelLoader\n'), ((794, 811), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (801, 811), False, 'from os.path import join, dirname\n')]
|
import cv2
import numpy as np
import os
from auto_pose.meshrenderer import meshrenderer
from auto_pose.ae.utils import lazy_property
class PoseVisualizer:
def __init__(self, mp_pose_estimator, downsample=1, vertex_scale=False):
self.downsample = downsample
self.vertex_scale = [mp_pose_estimator.train_args.getint('Dataset', 'VERTEX_SCALE')] if not vertex_scale else [1.]
if hasattr(mp_pose_estimator, 'class_2_objpath'):
self.classes, self.ply_model_paths = zip(*mp_pose_estimator.class_2_objpath.items())
else:
# For BOP evaluation (sry!):
self.classes = mp_pose_estimator.class_2_codebook.keys()
all_model_paths = eval(mp_pose_estimator.train_args.get('Paths', 'MODEL_PATH'))
base_path = '/'.join(all_model_paths[0].split('/')[:-3])
itodd_paths = [os.path.join(base_path, 'itodd/models/obj_0000{: 02d}.ply'.format(i)) for i in range(29)]
all_model_paths = all_model_paths + itodd_paths
all_model_paths = [model_p.replace('YCB_VideoDataset/original2sixd/bop_models/', 'bop/original/ycbv/models_eval/') for model_p in all_model_paths]
self.ply_model_paths = []
for cb_name in mp_pose_estimator.class_2_codebook.values():
for model_path in all_model_paths:
bop_dataset = cb_name.split('_')[0]
bop_dataset = 'ycbv' if bop_dataset == 'original2sixd' else bop_dataset
model_type, obj, obj_id = cb_name.split('_')[-3:]
model_name = obj + '_' + obj_id
if bop_dataset in model_path and model_name in model_path:
self.ply_model_paths.append(model_path)
print(('renderer', 'Model paths: ', self.ply_model_paths))
@lazy_property
def renderer(self):
return meshrenderer.Renderer(self.ply_model_paths,
samples=1,
vertex_tmp_store_folder='.',
vertex_scale=float(self.vertex_scale[0])) # 1000 for models in meters
def render_poses(self, image, camK, pose_ests, dets, vis_bbs=True, vis_mask=False, all_pose_estimates_rgb=None, depth_image=None, waitKey=True):
W_d = image.shape[1] / self.downsample
H_d = image.shape[0] / self.downsample
print( [self.classes.index(pose_est.name) for pose_est in pose_ests])
bgr, depth,_ = self.renderer.render_many(obj_ids = [self.classes.index(pose_est.name) for pose_est in pose_ests],
W = W_d,
H = H_d,
K = camK.copy(),
Rs = [pose_est.trafo[:3,:3] for pose_est in pose_ests],
ts = [pose_est.trafo[:3,3] for pose_est in pose_ests],
near = 10,
far = 10000)
image_show = cv2.resize(image,(W_d,H_d))
if all_pose_estimates_rgb is not None:
image_show_rgb = image_show.copy()
g_y = np.zeros_like(bgr)
g_y[:,:,1]= bgr[:,:,1]
image_show[bgr > 0] = g_y[bgr > 0]*2./3. + image_show[bgr > 0]*1./3.
if all_pose_estimates_rgb is not None:
bgr, depth,_ = self.renderer.render_many(obj_ids = [clas_idx for clas_idx in all_class_idcs],
W = W_d,
H = H_d,
K = camK.copy(),
Rs = [pose_est.trafo[:3,:3] for pose_est in pose_ests],
ts = [pose_est.trafo[:3,3] for pose_est in pose_ests],
near = 10,
far = 10000)
bgr = cv2.resize(bgr,(W_d,H_d))
b_y = np.zeros_like(bgr)
b_y[:,:,0]= bgr[:,:,0]
image_show_rgb[bgr > 0] = b_y[bgr > 0]*2./3. + image_show_rgb[bgr > 0]*1./3.
if np.any(depth_image):
depth_show = depth_image.copy()
depth_show = np.dstack((depth_show,depth_show,depth_show))
depth_show[bgr[:,:,0] > 0] = g_y[bgr[:,:,0] > 0]*2./3. + depth_show[bgr[:,:,0] > 0]*1./3.
cv2.imshow('depth_refined_pose', depth_show)
if vis_bbs:
# for label,box,score in zip(labels,boxes,scores):
for det in dets:
# box = box.astype(np.int32) / self.downsample
# xmin, ymin, xmax, ymax = box[0], box[1], box[0] + box[2], box[1] + box[3]
xmin, ymin, xmax, ymax = int(det.xmin * W_d), int(det.ymin * H_d), int(det.xmax * W_d), int(det.ymax * H_d)
label, score = list(det.classes.items())[0]
try:
cv2.putText(image_show, '%s : %1.3f' % (label,score), (xmin, ymax+20), cv2.FONT_ITALIC, .5, (0,0,255), 2)
cv2.rectangle(image_show,(xmin,ymin),(xmax,ymax),(255,0,0),2)
if all_pose_estimates_rgb is not None:
cv2.putText(image_show_rgb, '%s : %1.3f' % (label,score), (xmin, ymax+20), cv2.FONT_ITALIC, .5, (0,0,255), 2)
cv2.rectangle(image_show_rgb,(xmin,ymin),(xmax,ymax),(255,0,0),2)
except:
print('failed to plot boxes')
if all_pose_estimates_rgb is not None:
cv2.imshow('rgb_pose', image_show_rgb)
cv2.imshow('refined_pose', image_show)
if waitKey:
cv2.waitKey(0)
else:
cv2.waitKey(1)
return (image_show)
|
[
"cv2.rectangle",
"numpy.dstack",
"numpy.any",
"cv2.imshow",
"cv2.putText",
"cv2.waitKey",
"cv2.resize",
"numpy.zeros_like"
] |
[((2879, 2908), 'cv2.resize', 'cv2.resize', (['image', '(W_d, H_d)'], {}), '(image, (W_d, H_d))\n', (2889, 2908), False, 'import cv2\n'), ((3016, 3034), 'numpy.zeros_like', 'np.zeros_like', (['bgr'], {}), '(bgr)\n', (3029, 3034), True, 'import numpy as np\n'), ((3798, 3817), 'numpy.any', 'np.any', (['depth_image'], {}), '(depth_image)\n', (3804, 3817), True, 'import numpy as np\n'), ((5251, 5289), 'cv2.imshow', 'cv2.imshow', (['"""refined_pose"""', 'image_show'], {}), "('refined_pose', image_show)\n", (5261, 5289), False, 'import cv2\n'), ((3599, 3626), 'cv2.resize', 'cv2.resize', (['bgr', '(W_d, H_d)'], {}), '(bgr, (W_d, H_d))\n', (3609, 3626), False, 'import cv2\n'), ((3644, 3662), 'numpy.zeros_like', 'np.zeros_like', (['bgr'], {}), '(bgr)\n', (3657, 3662), True, 'import numpy as np\n'), ((3888, 3935), 'numpy.dstack', 'np.dstack', (['(depth_show, depth_show, depth_show)'], {}), '((depth_show, depth_show, depth_show))\n', (3897, 3935), True, 'import numpy as np\n'), ((4048, 4092), 'cv2.imshow', 'cv2.imshow', (['"""depth_refined_pose"""', 'depth_show'], {}), "('depth_refined_pose', depth_show)\n", (4058, 4092), False, 'import cv2\n'), ((5204, 5242), 'cv2.imshow', 'cv2.imshow', (['"""rgb_pose"""', 'image_show_rgb'], {}), "('rgb_pose', image_show_rgb)\n", (5214, 5242), False, 'import cv2\n'), ((5322, 5336), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5333, 5336), False, 'import cv2\n'), ((5363, 5377), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5374, 5377), False, 'import cv2\n'), ((4599, 4714), 'cv2.putText', 'cv2.putText', (['image_show', "('%s : %1.3f' % (label, score))", '(xmin, ymax + 20)', 'cv2.FONT_ITALIC', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(image_show, '%s : %1.3f' % (label, score), (xmin, ymax + 20),\n cv2.FONT_ITALIC, 0.5, (0, 0, 255), 2)\n", (4610, 4714), False, 'import cv2\n'), ((4725, 4794), 'cv2.rectangle', 'cv2.rectangle', (['image_show', '(xmin, ymin)', '(xmax, ymax)', '(255, 0, 0)', '(2)'], {}), '(image_show, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)\n', (4738, 4794), False, 'import cv2\n'), ((4870, 4990), 'cv2.putText', 'cv2.putText', (['image_show_rgb', "('%s : %1.3f' % (label, score))", '(xmin, ymax + 20)', 'cv2.FONT_ITALIC', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(image_show_rgb, '%s : %1.3f' % (label, score), (xmin, ymax + 20\n ), cv2.FONT_ITALIC, 0.5, (0, 0, 255), 2)\n", (4881, 4990), False, 'import cv2\n'), ((5004, 5077), 'cv2.rectangle', 'cv2.rectangle', (['image_show_rgb', '(xmin, ymin)', '(xmax, ymax)', '(255, 0, 0)', '(2)'], {}), '(image_show_rgb, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)\n', (5017, 5077), False, 'import cv2\n')]
|
import numpy as np
from uncertainties import umath as um
def getTeqpl(Teffst, aR, ecc, A=0, f=1/4.):
"""Return the planet equilibrium temperature.
Relation adapted from equation 4 page 4 in http://www.mpia.de/homes/ppvi/chapter/madhusudhan.pdf
and https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_law
and later updated to include the effect of excentricity on the average stellar planet distance
according to equation 5 p 25 of Laughlin & Lissauer 2015arXiv150105685L (1501.05685)
Plus Exoplanet atmospheres, physical processes, Sara Seager, p30 eq 3.9 for f contribution.
:param float/np.ndarray Teffst: Effective temperature of the star
:param float/np.ndarray aR: Ration of the planetary orbital semi-major axis over the stellar
radius (without unit)
:param float/np.ndarray A: Bond albedo (should be between 0 and 1)
:param float/np.ndarray f: Redistribution factor. If 1/4 the energy is uniformly redistributed
over the planetary surface. If f = 2/3, no redistribution at all, the atmosphere immediately
reradiate whithout advection.
:return float/np.ndarray Teqpl: Equilibrium temperature of the planet
"""
return Teffst * (f * (1 - A))**(1 / 4.) * np.sqrt(1 / aR) / (1 - ecc**2)**(1/8.)
def getTeqpl_error(Teffst, aR, ecc, A=0, f=1/4.):
"""Return the planet equilibrium temperature.
Relation adapted from equation 4 page 4 in http://www.mpia.de/homes/ppvi/chapter/madhusudhan.pdf
and https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_law
and later updated to include the effect of excentricity on the average stellar planet distance
according to equation 5 p 25 of Laughlin & Lissauer 2015arXiv150105685L (1501.05685)
Plus Exoplanet atmospheres, physical processes, Sara Seager, p30 eq 3.9 for f contribution.
:param float/np.ndarray Teffst: Effective temperature of the star
:param float/np.ndarray aR: Ration of the planetary orbital semi-major axis over the stellar
radius (without unit)
:param float/np.ndarray A: Bond albedo (should be between 0 and 1)
:param float/np.ndarray f: Redistribution factor. If 1/4 the energy is uniformly redistributed
over the planetary surface. If f = 2/3, no redistribution at all, the atmosphere immediately
reradiate whithout advection.
:return float/np.ndarray Teqpl: Equilibrium temperature of the planet
"""
return Teffst * (f * (1 - A))**(1 / 4.) * um.sqrt(1 / aR) / (1 - ecc**2)**(1/8.)
def getHtidal(Ms, Rp, a, e):
# a -- in AU, semi major axis
# Teq -- in Kelvins, planetary equilibrium temperature
# M -- in Jupiter masses, planetary mass
# Z -- [Fe/H], stellar metallicity
# Rp -- radius planet
# Ms -- stellar mass
# e -- eccentricity
# G -- gravitational constant
#
#
G = 6.67408 * 10**(-11) # m3 kg-1 s-2
# Equation from Enoch et al. 2012
# Q = 10**5 # Tidal dissipation factor for high mass planets ...?
# k = 0.51 # Love number
# H_tidal = (63/4) * ((G * Ms)**(3/2) * Ms * Rp**5 * a**(-15/2)*e**2) / ((3*Q) / (2*k))
# Equation from Jackson 2008
# Qp' = (3*Qp) / (2*k)
Qp = 500 # with Love number 0.3 for terrestrial planets
H_tidal = (63 / 16*np.pi) * (((G*Ms)**(3/2) * Ms * Rp**3) / (Qp)) * a**(-15/2) * e**2
return H_tidal
def safronov_nb(Mp, Ms, Rp, a):
# Ozturk 2018, Safronov 1972
return (Mp/Ms) * (a/Rp)
|
[
"numpy.sqrt",
"uncertainties.umath.sqrt"
] |
[((1240, 1255), 'numpy.sqrt', 'np.sqrt', (['(1 / aR)'], {}), '(1 / aR)\n', (1247, 1255), True, 'import numpy as np\n'), ((2469, 2484), 'uncertainties.umath.sqrt', 'um.sqrt', (['(1 / aR)'], {}), '(1 / aR)\n', (2476, 2484), True, 'from uncertainties import umath as um\n')]
|
import time
import sys
import json
import argparse
from tqdm import trange
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import numpy as np
from scipy.spatial.distance import jensenshannon
import gym
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.ticker import MaxNLocator
from matplotlib.lines import Line2D
import pandemic_simulator as ps
from pandemic_simulator.environment.reward import RewardFunction, SumReward, RewardFunctionFactory, RewardFunctionType
from pandemic_simulator.environment.interfaces import InfectionSummary
from pandemic_simulator.viz import PandemicViz
from pandemic_simulator.environment import PandemicSimOpts
from stable_baselines3.common import base_class
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
def hellinger(p, q):
# distance between p and q
# p and q are np array probability distributions
return (1.0 / np.sqrt(2.0)) * np.sqrt(np.sum(np.square(np.sqrt(p) - np.sqrt(q)), axis=1))
def evaluate_policy(
name: str,
model: "base_class.BaseAlgorithm",
base_model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 32,
deterministic: bool = True,
render: bool = False,
viz: Optional[PandemicViz] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto the
different elements of the vector env. This static division of work is done to
remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more
details and discussion.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment or ``VecEnv`` environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env])
episode_rewards = []
reward_std = []
episode_true_rewards = []
true_reward_std = []
episode_true_rewards2 = []
true_reward_std2 = []
vfs = []
log_probs = []
ents = []
base_vfs = []
base_log_probs = []
base_ents = []
kls = []
js = []
h = []
numpy_obs = env.reset()
states = None
for t in range(200):
actions, states = model.predict(numpy_obs, state=states, deterministic=True)
vf, logp, ent = model.policy.evaluate_actions(torch.as_tensor(numpy_obs), torch.as_tensor(actions))
base_vf, base_logp, base_ent = base_model.policy.evaluate_actions(torch.as_tensor(numpy_obs), torch.as_tensor(actions))
vfs.append(torch.mean(vf).detach().item())
log_probs.append(torch.mean(logp).detach().item())
ents.append(torch.mean(ent).detach().item())
base_vfs.append(torch.mean(base_vf).detach().item())
base_log_probs.append(torch.mean(base_logp).detach().item())
base_ents.append(torch.mean(base_ent).detach().item())
# Distances
log_ratio = logp - base_logp
# Estimator of KL from http://joschu.net/blog/kl-approx.html
kls.append(torch.mean(torch.exp(log_ratio) - 1 - log_ratio).item())
latent_pi, _, latent_sde = model.policy._get_latent(torch.as_tensor(numpy_obs))
model_dist = model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().numpy()
latent_pi, _, latent_sde = base_model.policy._get_latent(torch.as_tensor(numpy_obs))
base_dist = base_model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().numpy()
js.append(np.mean(jensenshannon(model_dist, base_dist, axis=1)).item())
h.append(np.mean(hellinger(model_dist, base_dist)).item())
numpy_obs, _, done, info = env.step(actions)
rew = env.get_attr("last_reward")
true_rew = env.get_attr("get_true_reward")
true_rew2 = env.get_attr("get_true_reward2")
episode_rewards.append(np.mean(rew))
reward_std.append(rew)
episode_true_rewards.append(np.mean(true_rew))
true_reward_std.append(true_rew)
episode_true_rewards2.append(np.mean(true_rew2))
true_reward_std2.append(true_rew2)
obs = env.get_attr("observation")
infection_data = np.zeros((1, 5))
threshold_data = np.zeros(len(obs))
for o in obs:
infection_data += o.global_infection_summary[-1]
gis = np.array([o.global_infection_summary[-1] for o in obs]).squeeze(1)
gts = np.array([o.global_testing_summary[-1] for o in obs]).squeeze(1)
stage = np.array([o.stage[-1].item() for o in obs])
if viz:
viz.record_list(obs[0], gis, gts, stage, rew, true_rew, true_rew2=true_rew2)
reward = np.sum(episode_rewards).item()
true_reward = np.sum(episode_true_rewards).item()
true_reward2 = np.sum(episode_true_rewards2).item()
#if viz:
# viz.plot(name=name, evaluate=True, plots_to_show=['critical_summary', 'stages', 'cumulative_reward', 'cumulative_true_reward2'])
# viz.reset()
return reward, np.std(np.sum(np.array(reward_std), axis=0)).item(), \
true_reward, np.std(np.sum(np.array(true_reward_std), axis=0)).item(), \
true_reward2, np.std(np.sum(np.array(true_reward_std2), axis=0)).item(), \
kls, js, h, log_probs, base_log_probs, vfs, base_vfs
def plot_critical_summary(ax, viz, color, sty, m):
gis = np.vstack(viz._gis).squeeze()
gis_std = np.vstack(viz._gis_std).squeeze()
ax.plot(viz._num_persons * gis[:, viz._critical_index], color='black', linestyle=sty, linewidth=1, label='_nolegend_')
#ax.fill_between(np.arange(len(gis)), viz._num_persons * (gis-gis_std)[:, viz._critical_index], viz._num_persons * (gis+gis_std)[:, viz._critical_index], alpha=0.1, color=color)
ax.plot(np.arange(gis.shape[0]), np.ones(gis.shape[0]) * viz._max_hospital_capacity, 'y')
ax.legend(['Max hospital capacity'], loc='upper left')
ax.set_ylim(-0.1, viz._max_hospital_capacity * 3)
ax.set_title('ICU Usage', fontsize=16)
ax.set_xlabel('time (days)', fontsize=16)
ax.set_ylabel('persons', fontsize=16)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
height = viz._num_persons * gis[m, viz._critical_index]
ax.plot([m, m], [-0.1, height], color=color, linestyle=sty, linewidth=2)
ax.plot([0, m], [height, height], color=color, linestyle=sty, linewidth=2)
def plot_stages(ax, viz, color, sty):
days = np.arange(len(viz._stages))
stages = np.array(viz._stages)
stages_std = np.array(viz._stages_std)
ax.plot(days, stages, color='black', linestyle=sty, linewidth=1)
#ax.fill_between(days, stages - stages_std, stages + stages_std, alpha=0.1, color=color)
ax.set_ylim(-0.1, 5) # This assumes at most 5 stages!!
ax.set_title('Regulation Stage', fontsize=16)
ax.set_xlabel('time (days)', fontsize=16)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
m = np.argmax(stages[50:]) + 50
ax.plot([m, m], [-0.1, stages[m]], color=color, linestyle=sty, linewidth=2)
p1 = Line2D([0,1],[0,1],linestyle='-', color='black')
p2 = Line2D([0,1],[0,1],linestyle='--', color='black')
ax.legend([p1, p2], ['smaller policy', 'larger policy'], loc='upper right')
return m
def plot(v1, v2):
fig, (ax1, ax2) = plt.subplots(1, 2)
c1 = 'red'
c2 = 'blue'
s1 = '-'
s2 = '--'
m1 = plot_stages(ax2, v1, c1, s1)
plot_critical_summary(ax1, v1, c1, s1, m1)
m2 = plot_stages(ax2, v2, c2, s2)
plot_critical_summary(ax1, v2, c2, s2, m2)
ax1.figure.set_size_inches(4, 3)
ax2.figure.set_size_inches(4, 3)
fig.set_size_inches(8, 3)
plt.savefig('test.svg',dpi=120, bbox_inches='tight', pad_inches = 0, format='svg')
def make_cfg():
# cfg = ps.sh.small_town_config
# cfg.delta_start_lo = int(sys.argv[6])
# cfg.delta_start_hi = int(sys.argv[7])
# return cfg
sim_config = ps.env.PandemicSimConfig(
num_persons=500,
location_configs=[
ps.env.LocationConfig(ps.env.Home, num=150),
ps.env.LocationConfig(ps.env.GroceryStore, num=2, num_assignees=5, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Office, num=2, num_assignees=150, state_opts=dict(visitor_capacity=0)),
ps.env.LocationConfig(ps.env.School, num=10, num_assignees=2, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Hospital, num=1, num_assignees=15, state_opts=dict(patient_capacity=5)),
ps.env.LocationConfig(ps.env.RetailStore, num=2, num_assignees=5, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.HairSalon, num=2, num_assignees=3, state_opts=dict(visitor_capacity=5)),
ps.env.LocationConfig(ps.env.Restaurant, num=1, num_assignees=6, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Bar, num=1, num_assignees=3, state_opts=dict(visitor_capacity=30))
],
person_routine_assignment=ps.sh.DefaultPersonRoutineAssignment(),
delta_start_lo = 95,
delta_start_hi = 105
)
sim_config_med = ps.env.PandemicSimConfig(
num_persons=2000,
location_configs=[
ps.env.LocationConfig(ps.env.Home, num=600),
ps.env.LocationConfig(ps.env.GroceryStore, num=4, num_assignees=10, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Office, num=4, num_assignees=300, state_opts=dict(visitor_capacity=0)),
ps.env.LocationConfig(ps.env.School, num=20, num_assignees=4, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Hospital, num=2, num_assignees=30, state_opts=dict(patient_capacity=5)),
ps.env.LocationConfig(ps.env.RetailStore, num=4, num_assignees=10, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.HairSalon, num=4, num_assignees=6, state_opts=dict(visitor_capacity=5)),
ps.env.LocationConfig(ps.env.Restaurant, num=2, num_assignees=12, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Bar, num=2, num_assignees=6, state_opts=dict(visitor_capacity=30))
],
person_routine_assignment=ps.sh.DefaultPersonRoutineAssignment(),
delta_start_lo = 95,
delta_start_hi = 105
)
return sim_config
def make_reg():
return ps.sh.austin_regulations
def make_sim(sim_config, noise):
sim_opt = PandemicSimOpts()
sim_opt.spontaneous_testing_rate = noise
return ps.env.PandemicSim.from_config(sim_config=sim_config, sim_opts=sim_opt)
def make_viz(sim_config):
return ps.viz.GymViz.from_config(sim_config=sim_config)
def load_model(env, model_path, width, depth):
agent = ps.model.StageModel(env = env)
d_model = width
n_layers = depth
net_arch = [d_model] * n_layers if n_layers != 0 else []
policy_kwargs = {
"net_arch": [dict(pi=net_arch, vf=net_arch)],
}
model = agent.get_model("ppo", policy_kwargs = policy_kwargs, verbose = 0)
return model.load(model_path)
def init(args, noise):
n_cpus = args.n_cpus
ps.init_globals(seed=args.seed)
sim_config = make_cfg()
regulations = make_reg()
viz = make_viz(sim_config)
done_fn = ps.env.DoneFunctionFactory.default(ps.env.DoneFunctionType.TIME_LIMIT, horizon=200)
reward_fn = SumReward(
reward_fns=[
RewardFunctionFactory.default(RewardFunctionType.INFECTION_SUMMARY_ABOVE_THRESHOLD,
summary_type=InfectionSummary.CRITICAL,
threshold=sim_config.max_hospital_capacity / sim_config.num_persons),
RewardFunctionFactory.default(RewardFunctionType.INFECTION_SUMMARY_ABSOLUTE,
summary_type=InfectionSummary.CRITICAL),
RewardFunctionFactory.default(RewardFunctionType.LOWER_STAGE,
num_stages=len(regulations)),
RewardFunctionFactory.default(RewardFunctionType.SMOOTH_STAGE_CHANGES,
num_stages=len(regulations))
],
weights=[0, 10, 0.1, 0.01]
)
gym = ps.env.PandemicPolicyGymEnv.from_config(
sim_config=sim_config,
sim_opts = PandemicSimOpts(spontaneous_testing_rate=noise),
pandemic_regulations=regulations,
done_fn=done_fn,
reward_fn=reward_fn,
constrain=True,
four_start=False,
obs_history_size=3,
num_days_in_obs=8
)
env = gym.get_multi_env(n=n_cpus) if n_cpus > 1 else gym.get_single_env()
return env, viz
def evaluate(env, model_path, width, depth, base_model, viz):
model = load_model(env, model_path, width, depth)
model_parameters = filter(lambda p: p.requires_grad, model.policy.mlp_extractor.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
params = int(params)
print(f"Evaluating {model_path+str(width)}...")
reward, rstd, true_reward, trstd, true_reward2, tr2std, kl, js, h, log_probs, base_log_probs, vfs, base_vfs = evaluate_policy(model_path, model, base_model, env, viz=viz)
env.close()
print(f"Model: {model_path}. Proxy: {reward}. Objective: {true_reward}.")
return params, reward, rstd, true_reward, trstd, true_reward2, tr2std, kl, js, h, log_probs, base_log_probs, vfs, base_vfs
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model_path')
parser.add_argument('base_model_path')
parser.add_argument('base_width', type=int)
parser.add_argument('base_depth', type=int)
parser.add_argument('--seed', type=int, default=17)
parser.add_argument('--n_cpus', type=int, default=32)
parser.add_argument('--n_episodes', type=int, default=32)
parser.add_argument('--epoch', type=int, default=0)
parser.add_argument('--width', type=int, default=0)
#parser.add_argument('--noise', type=str, default="")
args = parser.parse_known_args(sys.argv[1:])[0]
vs = []
for w in [16, 112]:
env, viz = init(args, 0.02)
base_model = load_model(env, args.base_model_path, args.base_width, args.base_depth)
evaluate(env, args.model_path+str(w), w, 2, base_model, viz)
vs.append(viz)
plot(vs[0], vs[1])
# params, reward, reward_std, true_reward, true_reward_std, true_reward2, true_reward2_std, kls, js, h, log_probs, base_log_probs, vfs, base_vfs, e, noises = \
# [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
# #widths = [4, 8, 12, 16, 20, 24, 28, 32] if args.width == 0 else [40, 48, 56, 64, 80, 96, 112, 128]
# for w in [args.width]:
# for noise in ['01', '02', '003', '005', '03', '04', '05', '06', '07', '08', '09', '095', '1']:
# n2n = {'01':0.1, '02':0.2, '003':0.03, '005':0.05, '03':0.3, '04':0.4, '05':0.5, '06':0.6, '07':0.7, '08':0.8, '09':0.9, '095':0.95, '1':1}
# env, viz = init(args, n2n[noise])
# base_model = load_model(env, args.base_model_path, args.base_width, args.base_depth)
# p, r, rs, tr, trs, tr2, tr2s, kl, j_s, h_, logp, blogp, vf, bvf = evaluate(env, args.model_path+noise+"_"+str(w), w, 2, base_model, viz)
# noises.append(n2n[noise])
# params.append(p)
# reward.append(r)
# reward_std.append(rs)
# true_reward.append(tr)
# true_reward_std.append(trs)
# true_reward2.append(tr2)
# true_reward2_std.append(tr2s)
# kls.append(kl)
# js.append(j_s)
# h.append(h_)
# log_probs.append(logp)
# base_log_probs.append(blogp)
# vfs.append(vf)
# base_vfs.append(bvf)
# e.append(args.epoch)
# f = open(f"pandemic_{args.epoch}_{args.width}_noise.json", "w")
# json.dump({'params':params, 'noise':noises, 'rew': reward, 'rew_std': reward_std, 'true_rew': true_reward, 'true_rew_std': true_reward_std, 'true_rew2': true_reward2,
# 'true_rew2_std': true_reward2_std, 'kls': kls, 'js': js, 'h': h, 'log_probs': log_probs, 'base_log_probs': base_log_probs, 'vfs': vfs, 'base_vfs': base_vfs, 'e': e}, f)
# f.close()
if __name__ == '__main__':
main()
|
[
"pandemic_simulator.sh.DefaultPersonRoutineAssignment",
"torch.as_tensor",
"numpy.sqrt",
"pandemic_simulator.model.StageModel",
"torch.exp",
"numpy.array",
"pandemic_simulator.init_globals",
"matplotlib.ticker.MaxNLocator",
"pandemic_simulator.viz.GymViz.from_config",
"pandemic_simulator.env.LocationConfig",
"matplotlib.lines.Line2D",
"numpy.arange",
"scipy.spatial.distance.jensenshannon",
"numpy.mean",
"argparse.ArgumentParser",
"torch.mean",
"pandemic_simulator.env.DoneFunctionFactory.default",
"pandemic_simulator.environment.reward.RewardFunctionFactory.default",
"gym.get_single_env",
"numpy.vstack",
"pandemic_simulator.environment.PandemicSimOpts",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.argmax",
"gym.get_multi_env",
"pandemic_simulator.env.PandemicSim.from_config",
"numpy.sum",
"numpy.zeros",
"stable_baselines3.common.vec_env.DummyVecEnv",
"matplotlib.pyplot.subplots"
] |
[((8355, 8376), 'numpy.array', 'np.array', (['viz._stages'], {}), '(viz._stages)\n', (8363, 8376), True, 'import numpy as np\n'), ((8395, 8420), 'numpy.array', 'np.array', (['viz._stages_std'], {}), '(viz._stages_std)\n', (8403, 8420), True, 'import numpy as np\n'), ((8930, 8982), 'matplotlib.lines.Line2D', 'Line2D', (['[0, 1]', '[0, 1]'], {'linestyle': '"""-"""', 'color': '"""black"""'}), "([0, 1], [0, 1], linestyle='-', color='black')\n", (8936, 8982), False, 'from matplotlib.lines import Line2D\n'), ((8989, 9042), 'matplotlib.lines.Line2D', 'Line2D', (['[0, 1]', '[0, 1]'], {'linestyle': '"""--"""', 'color': '"""black"""'}), "([0, 1], [0, 1], linestyle='--', color='black')\n", (8995, 9042), False, 'from matplotlib.lines import Line2D\n'), ((9178, 9196), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (9190, 9196), True, 'import matplotlib.pyplot as plt\n'), ((9553, 9639), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.svg"""'], {'dpi': '(120)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0)', 'format': '"""svg"""'}), "('test.svg', dpi=120, bbox_inches='tight', pad_inches=0, format=\n 'svg')\n", (9564, 9639), True, 'import matplotlib.pyplot as plt\n'), ((12413, 12430), 'pandemic_simulator.environment.PandemicSimOpts', 'PandemicSimOpts', ([], {}), '()\n', (12428, 12430), False, 'from pandemic_simulator.environment import PandemicSimOpts\n'), ((12489, 12560), 'pandemic_simulator.env.PandemicSim.from_config', 'ps.env.PandemicSim.from_config', ([], {'sim_config': 'sim_config', 'sim_opts': 'sim_opt'}), '(sim_config=sim_config, sim_opts=sim_opt)\n', (12519, 12560), True, 'import pandemic_simulator as ps\n'), ((12602, 12650), 'pandemic_simulator.viz.GymViz.from_config', 'ps.viz.GymViz.from_config', ([], {'sim_config': 'sim_config'}), '(sim_config=sim_config)\n', (12627, 12650), True, 'import pandemic_simulator as ps\n'), ((12714, 12742), 'pandemic_simulator.model.StageModel', 'ps.model.StageModel', ([], {'env': 'env'}), '(env=env)\n', (12733, 12742), True, 'import pandemic_simulator as ps\n'), ((13112, 13143), 'pandemic_simulator.init_globals', 'ps.init_globals', ([], {'seed': 'args.seed'}), '(seed=args.seed)\n', (13127, 13143), True, 'import pandemic_simulator as ps\n'), ((13250, 13337), 'pandemic_simulator.env.DoneFunctionFactory.default', 'ps.env.DoneFunctionFactory.default', (['ps.env.DoneFunctionType.TIME_LIMIT'], {'horizon': '(200)'}), '(ps.env.DoneFunctionType.TIME_LIMIT,\n horizon=200)\n', (13284, 13337), True, 'import pandemic_simulator as ps\n'), ((15576, 15601), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15599, 15601), False, 'import argparse\n'), ((3555, 3582), 'stable_baselines3.common.vec_env.DummyVecEnv', 'DummyVecEnv', (['[lambda : env]'], {}), '([lambda : env])\n', (3566, 3582), False, 'from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv\n'), ((6049, 6065), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {}), '((1, 5))\n', (6057, 6065), True, 'import numpy as np\n'), ((7649, 7672), 'numpy.arange', 'np.arange', (['gis.shape[0]'], {}), '(gis.shape[0])\n', (7658, 7672), True, 'import numpy as np\n'), ((8012, 8037), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (8023, 8037), False, 'from matplotlib.ticker import MaxNLocator\n'), ((8775, 8800), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (8786, 8800), False, 'from matplotlib.ticker import MaxNLocator\n'), ((8811, 8833), 'numpy.argmax', 'np.argmax', (['stages[50:]'], {}), '(stages[50:])\n', (8820, 8833), True, 'import numpy as np\n'), ((14686, 14713), 'gym.get_multi_env', 'gym.get_multi_env', ([], {'n': 'n_cpus'}), '(n=n_cpus)\n', (14703, 14713), False, 'import gym\n'), ((14733, 14753), 'gym.get_single_env', 'gym.get_single_env', ([], {}), '()\n', (14751, 14753), False, 'import gym\n'), ((974, 986), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (981, 986), True, 'import numpy as np\n'), ((4124, 4150), 'torch.as_tensor', 'torch.as_tensor', (['numpy_obs'], {}), '(numpy_obs)\n', (4139, 4150), False, 'import torch\n'), ((4152, 4176), 'torch.as_tensor', 'torch.as_tensor', (['actions'], {}), '(actions)\n', (4167, 4176), False, 'import torch\n'), ((4253, 4279), 'torch.as_tensor', 'torch.as_tensor', (['numpy_obs'], {}), '(numpy_obs)\n', (4268, 4279), False, 'import torch\n'), ((4281, 4305), 'torch.as_tensor', 'torch.as_tensor', (['actions'], {}), '(actions)\n', (4296, 4305), False, 'import torch\n'), ((4942, 4968), 'torch.as_tensor', 'torch.as_tensor', (['numpy_obs'], {}), '(numpy_obs)\n', (4957, 4968), False, 'import torch\n'), ((5170, 5196), 'torch.as_tensor', 'torch.as_tensor', (['numpy_obs'], {}), '(numpy_obs)\n', (5185, 5196), False, 'import torch\n'), ((5732, 5744), 'numpy.mean', 'np.mean', (['rew'], {}), '(rew)\n', (5739, 5744), True, 'import numpy as np\n'), ((5815, 5832), 'numpy.mean', 'np.mean', (['true_rew'], {}), '(true_rew)\n', (5822, 5832), True, 'import numpy as np\n'), ((5914, 5932), 'numpy.mean', 'np.mean', (['true_rew2'], {}), '(true_rew2)\n', (5921, 5932), True, 'import numpy as np\n'), ((6544, 6567), 'numpy.sum', 'np.sum', (['episode_rewards'], {}), '(episode_rewards)\n', (6550, 6567), True, 'import numpy as np\n'), ((6594, 6622), 'numpy.sum', 'np.sum', (['episode_true_rewards'], {}), '(episode_true_rewards)\n', (6600, 6622), True, 'import numpy as np\n'), ((6650, 6679), 'numpy.sum', 'np.sum', (['episode_true_rewards2'], {}), '(episode_true_rewards2)\n', (6656, 6679), True, 'import numpy as np\n'), ((7250, 7269), 'numpy.vstack', 'np.vstack', (['viz._gis'], {}), '(viz._gis)\n', (7259, 7269), True, 'import numpy as np\n'), ((7295, 7318), 'numpy.vstack', 'np.vstack', (['viz._gis_std'], {}), '(viz._gis_std)\n', (7304, 7318), True, 'import numpy as np\n'), ((7674, 7695), 'numpy.ones', 'np.ones', (['gis.shape[0]'], {}), '(gis.shape[0])\n', (7681, 7695), True, 'import numpy as np\n'), ((10942, 10980), 'pandemic_simulator.sh.DefaultPersonRoutineAssignment', 'ps.sh.DefaultPersonRoutineAssignment', ([], {}), '()\n', (10978, 10980), True, 'import pandemic_simulator as ps\n'), ((12182, 12220), 'pandemic_simulator.sh.DefaultPersonRoutineAssignment', 'ps.sh.DefaultPersonRoutineAssignment', ([], {}), '()\n', (12218, 12220), True, 'import pandemic_simulator as ps\n'), ((14379, 14426), 'pandemic_simulator.environment.PandemicSimOpts', 'PandemicSimOpts', ([], {'spontaneous_testing_rate': 'noise'}), '(spontaneous_testing_rate=noise)\n', (14394, 14426), False, 'from pandemic_simulator.environment import PandemicSimOpts\n'), ((6213, 6268), 'numpy.array', 'np.array', (['[o.global_infection_summary[-1] for o in obs]'], {}), '([o.global_infection_summary[-1] for o in obs])\n', (6221, 6268), True, 'import numpy as np\n'), ((6295, 6348), 'numpy.array', 'np.array', (['[o.global_testing_summary[-1] for o in obs]'], {}), '([o.global_testing_summary[-1] for o in obs])\n', (6303, 6348), True, 'import numpy as np\n'), ((9919, 9962), 'pandemic_simulator.env.LocationConfig', 'ps.env.LocationConfig', (['ps.env.Home'], {'num': '(150)'}), '(ps.env.Home, num=150)\n', (9940, 9962), True, 'import pandemic_simulator as ps\n'), ((11156, 11199), 'pandemic_simulator.env.LocationConfig', 'ps.env.LocationConfig', (['ps.env.Home'], {'num': '(600)'}), '(ps.env.Home, num=600)\n', (11177, 11199), True, 'import pandemic_simulator as ps\n'), ((13407, 13614), 'pandemic_simulator.environment.reward.RewardFunctionFactory.default', 'RewardFunctionFactory.default', (['RewardFunctionType.INFECTION_SUMMARY_ABOVE_THRESHOLD'], {'summary_type': 'InfectionSummary.CRITICAL', 'threshold': '(sim_config.max_hospital_capacity / sim_config.num_persons)'}), '(RewardFunctionType.\n INFECTION_SUMMARY_ABOVE_THRESHOLD, summary_type=InfectionSummary.\n CRITICAL, threshold=sim_config.max_hospital_capacity / sim_config.\n num_persons)\n', (13436, 13614), False, 'from pandemic_simulator.environment.reward import RewardFunction, SumReward, RewardFunctionFactory, RewardFunctionType\n'), ((13712, 13832), 'pandemic_simulator.environment.reward.RewardFunctionFactory.default', 'RewardFunctionFactory.default', (['RewardFunctionType.INFECTION_SUMMARY_ABSOLUTE'], {'summary_type': 'InfectionSummary.CRITICAL'}), '(RewardFunctionType.INFECTION_SUMMARY_ABSOLUTE,\n summary_type=InfectionSummary.CRITICAL)\n', (13741, 13832), False, 'from pandemic_simulator.environment.reward import RewardFunction, SumReward, RewardFunctionFactory, RewardFunctionType\n'), ((1015, 1025), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (1022, 1025), True, 'import numpy as np\n'), ((1028, 1038), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (1035, 1038), True, 'import numpy as np\n'), ((5363, 5407), 'scipy.spatial.distance.jensenshannon', 'jensenshannon', (['model_dist', 'base_dist'], {'axis': '(1)'}), '(model_dist, base_dist, axis=1)\n', (5376, 5407), False, 'from scipy.spatial.distance import jensenshannon\n'), ((6904, 6924), 'numpy.array', 'np.array', (['reward_std'], {}), '(reward_std)\n', (6912, 6924), True, 'import numpy as np\n'), ((6985, 7010), 'numpy.array', 'np.array', (['true_reward_std'], {}), '(true_reward_std)\n', (6993, 7010), True, 'import numpy as np\n'), ((7072, 7098), 'numpy.array', 'np.array', (['true_reward_std2'], {}), '(true_reward_std2)\n', (7080, 7098), True, 'import numpy as np\n'), ((4329, 4343), 'torch.mean', 'torch.mean', (['vf'], {}), '(vf)\n', (4339, 4343), False, 'import torch\n'), ((4387, 4403), 'torch.mean', 'torch.mean', (['logp'], {}), '(logp)\n', (4397, 4403), False, 'import torch\n'), ((4442, 4457), 'torch.mean', 'torch.mean', (['ent'], {}), '(ent)\n', (4452, 4457), False, 'import torch\n'), ((4500, 4519), 'torch.mean', 'torch.mean', (['base_vf'], {}), '(base_vf)\n', (4510, 4519), False, 'import torch\n'), ((4568, 4589), 'torch.mean', 'torch.mean', (['base_logp'], {}), '(base_logp)\n', (4578, 4589), False, 'import torch\n'), ((4633, 4653), 'torch.mean', 'torch.mean', (['base_ent'], {}), '(base_ent)\n', (4643, 4653), False, 'import torch\n'), ((4833, 4853), 'torch.exp', 'torch.exp', (['log_ratio'], {}), '(log_ratio)\n', (4842, 4853), False, 'import torch\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
max_epochs = 6000
init_stddev = 0.0001
source_embedding_size = 2
target_embedding_size = 2
source_state_size = 2
preattention_size = 2
target_state_size = 2
max_seq_len = 10
source_tokens = [
'i like it'.split(' '),
'i hate it'.split(' '),
'i don\'t hate it'.split(' '),
'i don\'t like it'.split(' '),
]
target_tokens = [
'i don\'t like it'.split(' '),
'i don\'t hate it'.split(' '),
'i hate it'.split(' '),
'i like it'.split(' '),
]
source_vocab = [ 'EDGE' ] + sorted({ token for sent in source_tokens for token in sent })
source_token2index = { token: index for (index, token) in enumerate(source_vocab) }
source_index2token = { index: token for (index, token) in enumerate(source_vocab) }
source_max_len = max(len(sent) for sent in source_tokens)
index_source_indexes = []
index_source_lens = []
for sent in source_tokens:
source_lens = len(sent)
source_index = [ source_token2index[token] for token in sent ] + [ 0 for _ in range(source_max_len - source_lens) ]
index_source_lens.append(source_lens)
index_source_indexes.append(source_index)
target_vocab = [ 'EDGE' ] + sorted({ token for sent in target_tokens for token in sent })
target_token2index = { token: index for (index, token) in enumerate(target_vocab) }
target_index2token = { index: token for (index, token) in enumerate(target_vocab) }
target_max_len = max(len(sent) for sent in target_tokens) + 1 #Plus edge token
index_target_prefixes = []
index_target_lens = []
index_target_targets = []
for sent in target_tokens:
target_len = len(sent) + 1 #Plus edge token
target_index = [ target_token2index[token] for token in sent ]
target_prefix = [ target_token2index['EDGE'] ] + target_index + [ 0 for _ in range(target_max_len - target_len) ]
target_target = target_index + [ target_token2index['EDGE'] ] + [ 0 for _ in range(target_max_len - target_len) ]
index_target_prefixes.append(target_prefix)
index_target_lens.append(target_len)
index_target_targets.append(target_target)
g = tf.Graph()
with g.as_default():
source_indexes = tf.placeholder(tf.int32, [None, None], 'source_indexes')
source_lens = tf.placeholder(tf.int32, [None], 'source_lens')
target_prefixes = tf.placeholder(tf.int32, [None, None], 'target_prefixes')
target_lens = tf.placeholder(tf.int32, [None], 'target_lens')
target_targets = tf.placeholder(tf.int32, [None, None], 'target_targets')
batch_size = tf.shape(source_indexes)[0]
source_seq_width = tf.shape(source_indexes)[1]
target_seq_width = tf.shape(target_prefixes)[1]
with tf.variable_scope('source'):
with tf.variable_scope('embedding'):
embedding_matrix = tf.get_variable('embedding_matrix', [len(source_vocab), source_embedding_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
embedded = tf.nn.embedding_lookup(embedding_matrix, source_indexes)
with tf.variable_scope('init_state'):
init_state_fw = tf.get_variable('init_state_fw', [source_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init_fw = tf.tile(tf.reshape(init_state_fw, [1, source_state_size]), [batch_size, 1])
init_state_bw = tf.get_variable('init_state_bw', [source_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init_bw = tf.tile(tf.reshape(init_state_bw, [1, source_state_size]), [batch_size, 1])
with tf.variable_scope('rnn'):
cell_fw = tf.contrib.rnn.GRUCell(source_state_size)
cell_bw = tf.contrib.rnn.GRUCell(source_state_size)
((outputs_fw, outputs_bw), _) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, embedded, sequence_length=source_lens, initial_state_fw=batch_init_fw, initial_state_bw=batch_init_bw)
outputs_ = tf.concat([ outputs_fw, outputs_bw ], axis=2)
outputs_2d_ = tf.reshape(outputs_, [batch_size*source_seq_width, 2*source_state_size])
W = tf.get_variable('W', [2*source_state_size, source_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
b = tf.get_variable('b', [source_state_size], tf.float32, tf.zeros_initializer())
source_outputs_2d = tf.matmul(outputs_2d_, W) + b
source_outputs = tf.reshape(source_outputs_2d, [batch_size, source_seq_width, source_state_size])
with tf.variable_scope('targets'):
with tf.variable_scope('embedding'):
embedding_matrix = tf.get_variable('embedding_matrix', [len(target_vocab), target_embedding_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
embedded = tf.nn.embedding_lookup(embedding_matrix, target_prefixes)
with tf.variable_scope('init_state'):
init_state = tf.get_variable('init_state', [target_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init = tf.tile(tf.reshape(init_state, [1, target_state_size]), [batch_size, 1])
with tf.variable_scope('rnn'):
#Custom RNN cell for producing attention vectors that condition the language model via par-inject
class CellAttention(tf.nn.rnn_cell.RNNCell):
def __init__(self):
super(CellAttention, self).__init__()
self.W1 = None
self.b1 = None
self.W2 = None
self.b2 = None
self.inner_cell = tf.contrib.rnn.GRUCell(target_state_size) #The inner RNN cell that actually tranforms the input and previous state into the next state
@property
def state_size(self):
return source_state_size
@property
def output_size(self):
return (source_seq_width, source_state_size) #Return the attention vector apart from the next state (to be able to inspect it later)
def build(self, inputs_shape):
self.W1 = self.add_variable('W1', [source_state_size + target_state_size, preattention_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
self.b1 = tf.get_variable('b1', [preattention_size], tf.float32, tf.zeros_initializer())
self.W2 = self.add_variable('W2', [preattention_size, 1], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
self.b2 = tf.get_variable('b2', [1], tf.float32, tf.zeros_initializer())
self.built = True
def call(self, next_inputs, curr_states):
with tf.variable_scope('attention'):
#Replicate the current state for each source sentence word in order to concatenate it with each source sentence word vector
expanded_curr_state = tf.tile(tf.reshape(curr_states, [batch_size, 1, target_state_size]), [1, source_seq_width, 1])
pre_attention_input = tf.concat([ source_outputs, expanded_curr_state ], axis=2)
pre_attention_input_2d = tf.reshape(pre_attention_input, [batch_size*source_seq_width, source_state_size + target_state_size])
pre_attention_2d = tf.tanh(tf.matmul(pre_attention_input_2d, self.W1) + self.b1)
attention_logits = tf.reshape(tf.matmul(pre_attention_2d, self.W2) + self.b2, [batch_size, source_seq_width])
mask = tf.sequence_mask(source_lens, source_seq_width, tf.float32)
attention = tf.nn.softmax(attention_logits*mask + -1e10*(1 - mask))
expanded_attention = tf.tile(tf.reshape(attention, [batch_size, source_seq_width, 1]), [1, 1, source_state_size])
attended_sources = tf.reduce_sum(source_outputs*expanded_attention, axis=1)
#Pass the input and state to the inner cell to produce the next state (input consists of word embedding and attended source)
(new_output, new_state) = self.inner_cell(tf.concat([ attended_sources, next_inputs ], axis=1), curr_states)
return ((attention, new_state), new_state)
cell = CellAttention()
((attentions, outputs), _) = tf.nn.dynamic_rnn(cell, embedded, sequence_length=target_lens, initial_state=batch_init)
with tf.variable_scope('output'):
W = tf.get_variable('W', [target_state_size, len(target_vocab)], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
b = tf.get_variable('b', [len(target_vocab)], tf.float32, tf.zeros_initializer())
outputs_2d = tf.reshape(outputs, [batch_size*target_seq_width, target_state_size])
logits_2d = tf.matmul(outputs_2d, W) + b
logits = tf.reshape(logits_2d, [batch_size, target_seq_width, len(target_vocab)])
probs = tf.nn.softmax(logits)
next_word_probs = probs[:, -1, :]
mask = tf.sequence_mask(target_lens, target_seq_width, tf.float32)
error = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_targets, logits=logits)*mask)/tf.cast(tf.reduce_sum(target_lens), tf.float32)
step = tf.train.AdamOptimizer().minimize(error)
init = tf.global_variables_initializer()
g.finalize()
with tf.Session() as s:
s.run([ init ], { })
(fig, ax) = plt.subplots(1, 1)
plt.ion()
train_errors = list()
print('epoch', 'train error', sep='\t')
for epoch in range(1, max_epochs+1):
s.run([ step ], { source_indexes: index_source_indexes, source_lens: index_source_lens, target_prefixes: index_target_prefixes, target_lens: index_target_lens, target_targets: index_target_targets })
[ train_error ] = s.run([ error ], { source_indexes: index_source_indexes, source_lens: index_source_lens, target_prefixes: index_target_prefixes, target_lens: index_target_lens, target_targets: index_target_targets })
train_errors.append(train_error)
if epoch%100 == 0:
print(epoch, train_error, sep='\t')
ax.cla()
ax.plot(np.arange(len(train_errors)), train_errors, color='red', linestyle='-', label='train')
ax.set_xlim(0, max_epochs)
ax.set_xlabel('epoch')
ax.set_ylim(0.0, 2.0)
ax.set_ylabel('XE') #Cross entropy
ax.grid(True)
ax.set_title('Error progress')
ax.legend()
fig.tight_layout()
plt.draw()
plt.pause(0.0001)
print()
for sent in source_tokens:
source = [ source_token2index[token] for token in sent ]
prefix_prob = 1.0
index_prefix = [ target_token2index['EDGE'] ]
for _ in range(max_seq_len):
[ curr_probs ] = s.run([ next_word_probs ], { source_indexes: [ source ], source_lens: [ len(source) ], target_prefixes: [ index_prefix ], target_lens: [ len(index_prefix) ] })
selected_index = np.argmax(curr_probs[0, :])
prefix_prob = prefix_prob*curr_probs[0, selected_index]
index_prefix.append(selected_index)
if selected_index == target_token2index['EDGE']:
break
index_generated = index_prefix[1:]
generated = [ target_index2token[i] for i in index_generated ]
[ curr_attentions ] = s.run([ attentions ], { source_indexes: [ source ], source_lens: [ len(source) ], target_prefixes: [ index_generated ], target_lens: [ len(index_generated) ] })
print('Input sentence: ', ' '.join(sent))
print('Generated sentence:', ' '.join(generated))
print('Sentence probability:', prefix_prob)
print('Attention:')
print('', '\t', *sent)
for i in range(len(generated)):
print('', generated[i]+'\t', np.round(curr_attentions[0, i, :], 2))
print()
fig.show()
|
[
"tensorflow.shape",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.reduce_sum",
"tensorflow.logging.set_verbosity",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.nn.softmax",
"tensorflow.zeros_initializer",
"tensorflow.Graph",
"tensorflow.nn.embedding_lookup",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.dynamic_rnn",
"tensorflow.random_normal_initializer",
"tensorflow.concat",
"tensorflow.matmul",
"tensorflow.train.AdamOptimizer",
"numpy.round",
"tensorflow.variable_scope",
"numpy.argmax",
"tensorflow.reshape",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.draw",
"tensorflow.global_variables_initializer",
"tensorflow.sequence_mask",
"matplotlib.pyplot.subplots"
] |
[((76, 118), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (100, 118), True, 'import tensorflow as tf\n'), ((2201, 2211), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2209, 2211), True, 'import tensorflow as tf\n'), ((2254, 2310), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]', '"""source_indexes"""'], {}), "(tf.int32, [None, None], 'source_indexes')\n", (2268, 2310), True, 'import tensorflow as tf\n'), ((2329, 2376), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]', '"""source_lens"""'], {}), "(tf.int32, [None], 'source_lens')\n", (2343, 2376), True, 'import tensorflow as tf\n'), ((2399, 2456), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]', '"""target_prefixes"""'], {}), "(tf.int32, [None, None], 'target_prefixes')\n", (2413, 2456), True, 'import tensorflow as tf\n'), ((2475, 2522), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]', '"""target_lens"""'], {}), "(tf.int32, [None], 'target_lens')\n", (2489, 2522), True, 'import tensorflow as tf\n'), ((2544, 2600), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]', '"""target_targets"""'], {}), "(tf.int32, [None, None], 'target_targets')\n", (2558, 2600), True, 'import tensorflow as tf\n'), ((9404, 9463), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['target_lens', 'target_seq_width', 'tf.float32'], {}), '(target_lens, target_seq_width, tf.float32)\n', (9420, 9463), True, 'import tensorflow as tf\n'), ((9698, 9731), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9729, 9731), True, 'import tensorflow as tf\n'), ((2623, 2647), 'tensorflow.shape', 'tf.shape', (['source_indexes'], {}), '(source_indexes)\n', (2631, 2647), True, 'import tensorflow as tf\n'), ((2674, 2698), 'tensorflow.shape', 'tf.shape', (['source_indexes'], {}), '(source_indexes)\n', (2682, 2698), True, 'import tensorflow as tf\n'), ((2725, 2750), 'tensorflow.shape', 'tf.shape', (['target_prefixes'], {}), '(target_prefixes)\n', (2733, 2750), True, 'import tensorflow as tf\n'), ((2768, 2795), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""source"""'], {}), "('source')\n", (2785, 2795), True, 'import tensorflow as tf\n'), ((4634, 4662), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""targets"""'], {}), "('targets')\n", (4651, 4662), True, 'import tensorflow as tf\n'), ((9760, 9772), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9770, 9772), True, 'import tensorflow as tf\n'), ((9829, 9847), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (9841, 9847), True, 'import matplotlib.pyplot as plt\n'), ((9856, 9865), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (9863, 9865), True, 'import matplotlib.pyplot as plt\n'), ((2810, 2840), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding"""'], {}), "('embedding')\n", (2827, 2840), True, 'import tensorflow as tf\n'), ((3038, 3094), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_matrix', 'source_indexes'], {}), '(embedding_matrix, source_indexes)\n', (3060, 3094), True, 'import tensorflow as tf\n'), ((3117, 3148), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""init_state"""'], {}), "('init_state')\n", (3134, 3148), True, 'import tensorflow as tf\n'), ((3681, 3705), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rnn"""'], {}), "('rnn')\n", (3698, 3705), True, 'import tensorflow as tf\n'), ((3729, 3770), 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['source_state_size'], {}), '(source_state_size)\n', (3751, 3770), True, 'import tensorflow as tf\n'), ((3793, 3834), 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['source_state_size'], {}), '(source_state_size)\n', (3815, 3834), True, 'import tensorflow as tf\n'), ((3879, 4041), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', (['cell_fw', 'cell_bw', 'embedded'], {'sequence_length': 'source_lens', 'initial_state_fw': 'batch_init_fw', 'initial_state_bw': 'batch_init_bw'}), '(cell_fw, cell_bw, embedded, sequence_length\n =source_lens, initial_state_fw=batch_init_fw, initial_state_bw=\n batch_init_bw)\n', (3910, 4041), True, 'import tensorflow as tf\n'), ((4055, 4098), 'tensorflow.concat', 'tf.concat', (['[outputs_fw, outputs_bw]'], {'axis': '(2)'}), '([outputs_fw, outputs_bw], axis=2)\n', (4064, 4098), True, 'import tensorflow as tf\n'), ((4127, 4203), 'tensorflow.reshape', 'tf.reshape', (['outputs_', '[batch_size * source_seq_width, 2 * source_state_size]'], {}), '(outputs_, [batch_size * source_seq_width, 2 * source_state_size])\n', (4137, 4203), True, 'import tensorflow as tf\n'), ((4539, 4624), 'tensorflow.reshape', 'tf.reshape', (['source_outputs_2d', '[batch_size, source_seq_width, source_state_size]'], {}), '(source_outputs_2d, [batch_size, source_seq_width, source_state_size]\n )\n', (4549, 4624), True, 'import tensorflow as tf\n'), ((4677, 4707), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding"""'], {}), "('embedding')\n", (4694, 4707), True, 'import tensorflow as tf\n'), ((4905, 4962), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_matrix', 'target_prefixes'], {}), '(embedding_matrix, target_prefixes)\n', (4927, 4962), True, 'import tensorflow as tf\n'), ((4985, 5016), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""init_state"""'], {}), "('init_state')\n", (5002, 5016), True, 'import tensorflow as tf\n'), ((5276, 5300), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rnn"""'], {}), "('rnn')\n", (5293, 5300), True, 'import tensorflow as tf\n'), ((8688, 8780), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'embedded'], {'sequence_length': 'target_lens', 'initial_state': 'batch_init'}), '(cell, embedded, sequence_length=target_lens,\n initial_state=batch_init)\n', (8705, 8780), True, 'import tensorflow as tf\n'), ((8791, 8818), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output"""'], {}), "('output')\n", (8808, 8818), True, 'import tensorflow as tf\n'), ((9078, 9149), 'tensorflow.reshape', 'tf.reshape', (['outputs', '[batch_size * target_seq_width, target_state_size]'], {}), '(outputs, [batch_size * target_seq_width, target_state_size])\n', (9088, 9149), True, 'import tensorflow as tf\n'), ((9315, 9336), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (9328, 9336), True, 'import tensorflow as tf\n'), ((9589, 9615), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['target_lens'], {}), '(target_lens)\n', (9602, 9615), True, 'import tensorflow as tf\n'), ((9645, 9669), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (9667, 9669), True, 'import tensorflow as tf\n'), ((2965, 3013), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (2993, 3013), True, 'import tensorflow as tf\n'), ((3244, 3292), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (3272, 3292), True, 'import tensorflow as tf\n'), ((3330, 3379), 'tensorflow.reshape', 'tf.reshape', (['init_state_fw', '[1, source_state_size]'], {}), '(init_state_fw, [1, source_state_size])\n', (3340, 3379), True, 'import tensorflow as tf\n'), ((3505, 3553), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (3533, 3553), True, 'import tensorflow as tf\n'), ((3591, 3640), 'tensorflow.reshape', 'tf.reshape', (['init_state_bw', '[1, source_state_size]'], {}), '(init_state_bw, [1, source_state_size])\n', (3601, 3640), True, 'import tensorflow as tf\n'), ((4304, 4352), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (4332, 4352), True, 'import tensorflow as tf\n'), ((4424, 4446), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (4444, 4446), True, 'import tensorflow as tf\n'), ((4480, 4505), 'tensorflow.matmul', 'tf.matmul', (['outputs_2d_', 'W'], {}), '(outputs_2d_, W)\n', (4489, 4505), True, 'import tensorflow as tf\n'), ((4832, 4880), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (4860, 4880), True, 'import tensorflow as tf\n'), ((5106, 5154), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (5134, 5154), True, 'import tensorflow as tf\n'), ((5189, 5235), 'tensorflow.reshape', 'tf.reshape', (['init_state', '[1, target_state_size]'], {}), '(init_state, [1, target_state_size])\n', (5199, 5235), True, 'import tensorflow as tf\n'), ((8909, 8957), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (8937, 8957), True, 'import tensorflow as tf\n'), ((9029, 9051), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (9049, 9051), True, 'import tensorflow as tf\n'), ((9172, 9196), 'tensorflow.matmul', 'tf.matmul', (['outputs_2d', 'W'], {}), '(outputs_2d, W)\n', (9181, 9196), True, 'import tensorflow as tf\n'), ((9490, 9578), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'target_targets', 'logits': 'logits'}), '(labels=target_targets,\n logits=logits)\n', (9536, 9578), True, 'import tensorflow as tf\n'), ((11068, 11078), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (11076, 11078), True, 'import matplotlib.pyplot as plt\n'), ((11095, 11112), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (11104, 11112), True, 'import matplotlib.pyplot as plt\n'), ((11603, 11630), 'numpy.argmax', 'np.argmax', (['curr_probs[0, :]'], {}), '(curr_probs[0, :])\n', (11612, 11630), True, 'import numpy as np\n'), ((5771, 5812), 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['target_state_size'], {}), '(target_state_size)\n', (5793, 5812), True, 'import tensorflow as tf\n'), ((12542, 12579), 'numpy.round', 'np.round', (['curr_attentions[0, i, :]', '(2)'], {}), '(curr_attentions[0, i, :], 2)\n', (12550, 12579), True, 'import numpy as np\n'), ((6409, 6457), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (6437, 6457), True, 'import tensorflow as tf\n'), ((6544, 6566), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (6564, 6566), True, 'import tensorflow as tf\n'), ((6658, 6706), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (6686, 6706), True, 'import tensorflow as tf\n'), ((6777, 6799), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (6797, 6799), True, 'import tensorflow as tf\n'), ((6943, 6973), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention"""'], {}), "('attention')\n", (6960, 6973), True, 'import tensorflow as tf\n'), ((7335, 7391), 'tensorflow.concat', 'tf.concat', (['[source_outputs, expanded_curr_state]'], {'axis': '(2)'}), '([source_outputs, expanded_curr_state], axis=2)\n', (7344, 7391), True, 'import tensorflow as tf\n'), ((7443, 7551), 'tensorflow.reshape', 'tf.reshape', (['pre_attention_input', '[batch_size * source_seq_width, source_state_size + target_state_size]'], {}), '(pre_attention_input, [batch_size * source_seq_width, \n source_state_size + target_state_size])\n', (7453, 7551), True, 'import tensorflow as tf\n'), ((7842, 7901), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['source_lens', 'source_seq_width', 'tf.float32'], {}), '(source_lens, source_seq_width, tf.float32)\n', (7858, 7901), True, 'import tensorflow as tf\n'), ((7938, 8006), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(attention_logits * mask + -10000000000.0 * (1 - mask))'], {}), '(attention_logits * mask + -10000000000.0 * (1 - mask))\n', (7951, 8006), True, 'import tensorflow as tf\n'), ((8176, 8234), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(source_outputs * expanded_attention)'], {'axis': '(1)'}), '(source_outputs * expanded_attention, axis=1)\n', (8189, 8234), True, 'import tensorflow as tf\n'), ((8461, 8511), 'tensorflow.concat', 'tf.concat', (['[attended_sources, next_inputs]'], {'axis': '(1)'}), '([attended_sources, next_inputs], axis=1)\n', (8470, 8511), True, 'import tensorflow as tf\n'), ((7177, 7236), 'tensorflow.reshape', 'tf.reshape', (['curr_states', '[batch_size, 1, target_state_size]'], {}), '(curr_states, [batch_size, 1, target_state_size])\n', (7187, 7236), True, 'import tensorflow as tf\n'), ((8048, 8104), 'tensorflow.reshape', 'tf.reshape', (['attention', '[batch_size, source_seq_width, 1]'], {}), '(attention, [batch_size, source_seq_width, 1])\n', (8058, 8104), True, 'import tensorflow as tf\n'), ((7621, 7663), 'tensorflow.matmul', 'tf.matmul', (['pre_attention_input_2d', 'self.W1'], {}), '(pre_attention_input_2d, self.W1)\n', (7630, 7663), True, 'import tensorflow as tf\n'), ((7730, 7766), 'tensorflow.matmul', 'tf.matmul', (['pre_attention_2d', 'self.W2'], {}), '(pre_attention_2d, self.W2)\n', (7739, 7766), True, 'import tensorflow as tf\n')]
|
import numpy as np
from ctapipe.core import Component
from ctapipe.containers import MuonRingContainer
from .fitting import kundu_chaudhuri_circle_fit, taubin_circle_fit
import traitlets as traits
# the fit methods do not expose the same interface, so we
# force the same interface onto them, here.
# we also modify their names slightly, since the names are
# exposed to the user via the string traitlet `fit_method`
def kundu_chaudhuri(x, y, weights, mask):
"""kundu_chaudhuri_circle_fit with x, y, weights, mask interface"""
return kundu_chaudhuri_circle_fit(x[mask], y[mask], weights[mask])
def taubin(x, y, weights, mask):
"""taubin_circle_fit with x, y, weights, mask interface"""
return taubin_circle_fit(x, y, mask)
FIT_METHOD_BY_NAME = {m.__name__: m for m in [kundu_chaudhuri, taubin]}
__all__ = ["MuonRingFitter"]
class MuonRingFitter(Component):
"""Different ring fit algorithms for muon rings"""
fit_method = traits.CaselessStrEnum(
list(FIT_METHOD_BY_NAME.keys()),
default_value=list(FIT_METHOD_BY_NAME.keys())[0],
).tag(config=True)
def __call__(self, x, y, img, mask):
"""allows any fit to be called in form of
MuonRingFitter(fit_method = "name of the fit")
"""
fit_function = FIT_METHOD_BY_NAME[self.fit_method]
radius, center_x, center_y = fit_function(x, y, img, mask)
return MuonRingContainer(
center_x=center_x,
center_y=center_y,
radius=radius,
center_phi=np.arctan2(center_y, center_x),
center_distance=np.sqrt(center_x ** 2 + center_y ** 2),
)
|
[
"numpy.sqrt",
"numpy.arctan2"
] |
[((1539, 1569), 'numpy.arctan2', 'np.arctan2', (['center_y', 'center_x'], {}), '(center_y, center_x)\n', (1549, 1569), True, 'import numpy as np\n'), ((1599, 1637), 'numpy.sqrt', 'np.sqrt', (['(center_x ** 2 + center_y ** 2)'], {}), '(center_x ** 2 + center_y ** 2)\n', (1606, 1637), True, 'import numpy as np\n')]
|
'''
This script makes an image very similar to Figure 2 of Hutchison et al. 2019 (https://arxiv.org/pdf/1905.08812.pdf). Undoubtedly, there are likely simpler ways to make this figure -- this is how I chose to code it up.
Because the figure in the paper uses some proprietary data, the code below will generate fake data to be plotted.
Credit: <NAME>
<EMAIL>
Texas A&M University
'''
_author_ = '<NAME>'
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import matplotlib.gridspec as gridspec
from matplotlib.patches import Polygon
import matplotlib.patheffects as PathEffects
from mpl_toolkits.axes_grid.inset_locator import inset_axes
from matplotlib.lines import Line2D
from matplotlib import patches
# -- Generating fake data -- #
# -------------------------- #
np.random.seed(seed=3) # fixing the random seed so we can get the same result
gauss2d = np.loadtxt('gaussian2D_sig2_kernel7.txt') # fake 1D emission line
gauss1d = np.loadtxt('gaussian1D_sig2_kernel7.txt') # fake 2D emission line
# 1D & 2D gaussian pulled from here (because it's faster for this exercise):
# http://dev.theomader.com/gaussian-kernel-calculator/
noise1d = np.random.uniform(-1,1,250) # noise for 1D spectrum
noise2d = np.random.uniform(-1,1,(250,70)) # noise for 2D spectrum
shape = noise2d.shape
xcen, ycen = int(shape[0]/2), int(shape[1]/2)
galspec2d_line1 = noise2d.copy()
galspec2d_line1[xcen-3:xcen+4,ycen-3:ycen+4] += gauss2d * 35 # 2D emission line
galspec1d_line1 = noise1d.copy()
galspec1d_line1[xcen-3:xcen+4] += gauss1d * 15 # Lya 1D emission line
galspec2d_line2 = galspec2d_line1.copy()
galspec2d_line2[xcen+17:xcen+24,ycen-3:ycen+4] += gauss2d * 35 # 2D emission line
galspec1d_line2 = galspec1d_line1.copy()
galspec1d_line2[xcen+17:xcen+24] += gauss1d * 10 # CIII] 1D doublet emission line
noisegal = np.random.uniform(-1,1,(50,35)) # noise for photometry of 'galaxy'
galaxy = noisegal.copy()
galaxy[22:29,13:20] += gauss2d * 25 # add signal for galaxy shape
galaxy[24:31,16:23] += gauss2d * 25 # add signal for galaxy shape
wavelength = np.arange(len(galspec1d_line1)) # fake wavelength range
# fake errors
np.random.seed(seed=13) # fixing the random seed so we can get the same result
error1d = np.random.random(len(noise1d)) + 0.4
# ---------------------------#
# -- Initializing the image -- #
# ---------------------------- #
f = plt.figure(figsize=(10.5,9))
gs0 = gridspec.GridSpec(2,1,height_ratios=[1,0.9],hspace=0.1) # the main subplots
# ------------- #
# -- TOP ROW -- #
# ------------- #
gs01 = gridspec.GridSpecFromSubplotSpec(1,2,subplot_spec=gs0[0], # the top panel's subplots
width_ratios=[1.2,2],wspace=0.22)
# --> RIGHT SIDE: the Lya spectrum
line = 'lya'
band = 'Y'
# The subplot gs001 is made up of 3 subplots where the top and bottom are just used to
# center the middle one more accurately -- they aren't necessary if you don't care THAT much :)
gs001 = gridspec.GridSpecFromSubplotSpec(3,1,subplot_spec=gs01[1],
height_ratios=[0.05,1,0.12],hspace=0.0)
# This is the real subplot for the data (the middle one from gs001), split into 2 subplots
# so that we can have the 2D spectrum on top and the 1D on the bottom
gs011 = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gs001[1],
height_ratios=[1.25,2],hspace=0.0)
# 2D spectrum
ax01 = plt.Subplot(f, gs011[0])
ax01.imshow(galspec2d_line1[75:175,28:42].T, # zooming in for the sake of the example
aspect='auto',origin='lower',cmap='gray',clim=(-1.5,2.3))
# removing the tickmarks and labels for the 2D spectrum
ax01.xaxis.set_ticks_position('none')
ax01.yaxis.set_ticks_position('none')
ax01.set_yticklabels([])
ax01.set_xticklabels([])
# white text with black outline
txt = ax01.text(0.023,0.73,'%s-band'%(band), size=20.5, color='w',transform=ax01.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='k')])
f.add_subplot(ax01) # adds the subplot to the image
# 1D spectrum
ax02 = plt.Subplot(f, gs011[1])
ax02.step(wavelength,galspec1d_line1,where='mid',lw=2.3)
ax02.fill_between(wavelength,error1d,error1d*-1,alpha=0.2)
ax02.set_xlim(wavelength[74],wavelength[174])
ax02.set_ylabel(r'F$_{\lambda}$ [10$^{-18}$ erg/s/cm$^2$/$\AA$]',fontsize=16)
ax02.set_xlabel('observed wavelength [microns]',labelpad=5,fontsize=16)
f.add_subplot(ax02) # adds the subplot to the image
# --> LEFT SIDE: F160W STAMP
gs002 = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec=gs01[0])
ax002 = plt.Subplot(f, gs002[0]) # no need to add extra tiny subplots for padding here!
ax002.imshow(galaxy,aspect='auto',origin='upper',cmap='gray',clim=(-1,2))
# removing the tickmarks and labels for the 2D spectrum
ax002.xaxis.set_ticks_position('none')
ax002.yaxis.set_ticks_position('none')
ax002.set_yticklabels([])
ax002.set_xticklabels([])
# white text with black outline
txt = ax002.text(0.03,0.90,'F160W',ha='left',size=22.5, color='w',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='k')])
# adding years for the slit layouts, using the set_path_effects to "bold" the text
txt = ax002.text(0.04,0.13,'2016',size=19.5, color='#CF6060',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.18, foreground='#CF6060')])
txt = ax002.text(0.04,0.22,'2014',size=19.5, color='#F4D03F',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.18, foreground='#F4D03F')])
txt = ax002.text(0.04,0.04,'2017',size=19.5, color='#70B5E3',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.18, foreground='#70B5E3')])
# plotting slits over the regions in the image
# loc: 2, 3, 4, 1
ax002.add_patch(Polygon([[7,7],[22,45],[25.5,43],[11,5]], # 2016 slit
zorder=3,facecolor='none',lw=1.8,edgecolor='#CF6060'))
ax002.add_patch(Polygon([[15,5],[15,45],[20,45],[20,5]], # 2014 slit
zorder=3,facecolor='none',lw=1.8,edgecolor='#F4D03F'))
ax002.add_patch(Polygon([[5,23],[5,28],[28,28],[28,23]], # 2017 slit
zorder=3,facecolor='none',lw=1.8,edgecolor='#70B5E3'))
f.add_subplot(ax002) # adds the subplot to the figure
# ------------------------------------------------------------------------- #
# ---------------- #
# -- BOTTOM ROW -- #
# ---------------- #
# --> the CIII] spectrum
line = 'ciii'
band = 'H'
# similar padding process done as with the Lya spectrum (where only the middle one matters)
gs02 = gridspec.GridSpecFromSubplotSpec(1,3,subplot_spec=gs0[1],width_ratios=[0.28,2,0.13],wspace=0.0)
# splitting the middle subplot from above into two, so that we can have 2D on top and 1D on bottom
gs003 = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gs02[1],height_ratios=[1.75,2],hspace=0.0)
# 2D spectrum
ax21 = plt.Subplot(f, gs003[0])
ax21.imshow(galspec2d_line2[:,15:55].T,aspect='auto',origin='lower',cmap='gray',clim=(-1.5,2.2))
# removing the tickmarks and labels for the 2D spectrum
ax21.xaxis.set_ticks_position('none')
ax21.yaxis.set_ticks_position('none')
ax21.set_yticklabels([])
ax21.set_xticklabels([])
# white text with black outline
txt = ax21.text(0.02,0.75,'%s-band'%(band), size=16+8.5, color='w',transform=ax21.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='k')])
f.add_subplot(ax21) # adds subplot to the figure
# 1D spectrum
ax22 = plt.Subplot(f, gs003[1])
ax22.step(wavelength,galspec1d_line2,where='mid',lw=2.7)
ax22.fill_between(wavelength,error1d,error1d*-1,alpha=0.2)
ax22.set_xlim(wavelength[0],wavelength[-1])
ax22.set_ylabel(r'F$_{\lambda}$ [10$^{-19}$ erg/s/cm$^{2}$/$\AA$]',fontsize=16)
ax22.set_xlabel('observed wavelength [microns]',fontsize=16)
f.add_subplot(ax22) # adds subplot to the figure
# saving figure
plt.savefig('figure.pdf')
#plt.show()
plt.close('all')
|
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.Subplot",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"numpy.random.uniform",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"numpy.loadtxt",
"matplotlib.patheffects.withStroke",
"matplotlib.patches.Polygon"
] |
[((808, 830), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(3)'}), '(seed=3)\n', (822, 830), True, 'import numpy as np\n'), ((897, 938), 'numpy.loadtxt', 'np.loadtxt', (['"""gaussian2D_sig2_kernel7.txt"""'], {}), "('gaussian2D_sig2_kernel7.txt')\n", (907, 938), True, 'import numpy as np\n'), ((973, 1014), 'numpy.loadtxt', 'np.loadtxt', (['"""gaussian1D_sig2_kernel7.txt"""'], {}), "('gaussian1D_sig2_kernel7.txt')\n", (983, 1014), True, 'import numpy as np\n'), ((1182, 1211), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(250)'], {}), '(-1, 1, 250)\n', (1199, 1211), True, 'import numpy as np\n'), ((1244, 1279), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(250, 70)'], {}), '(-1, 1, (250, 70))\n', (1261, 1279), True, 'import numpy as np\n'), ((1845, 1879), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(50, 35)'], {}), '(-1, 1, (50, 35))\n', (1862, 1879), True, 'import numpy as np\n'), ((2154, 2177), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(13)'}), '(seed=13)\n', (2168, 2177), True, 'import numpy as np\n'), ((2384, 2413), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10.5, 9)'}), '(figsize=(10.5, 9))\n', (2394, 2413), True, 'import matplotlib.pyplot as plt\n'), ((2419, 2478), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[1, 0.9]', 'hspace': '(0.1)'}), '(2, 1, height_ratios=[1, 0.9], hspace=0.1)\n', (2436, 2478), True, 'import matplotlib.gridspec as gridspec\n'), ((2559, 2659), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(2)'], {'subplot_spec': 'gs0[0]', 'width_ratios': '[1.2, 2]', 'wspace': '(0.22)'}), '(1, 2, subplot_spec=gs0[0], width_ratios=[\n 1.2, 2], wspace=0.22)\n', (2591, 2659), True, 'import matplotlib.gridspec as gridspec\n'), ((2938, 3046), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(3)', '(1)'], {'subplot_spec': 'gs01[1]', 'height_ratios': '[0.05, 1, 0.12]', 'hspace': '(0.0)'}), '(3, 1, subplot_spec=gs01[1], height_ratios=\n [0.05, 1, 0.12], hspace=0.0)\n', (2970, 3046), True, 'import matplotlib.gridspec as gridspec\n'), ((3213, 3316), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(2)', '(1)'], {'subplot_spec': 'gs001[1]', 'height_ratios': '[1.25, 2]', 'hspace': '(0.0)'}), '(2, 1, subplot_spec=gs001[1], height_ratios\n =[1.25, 2], hspace=0.0)\n', (3245, 3316), True, 'import matplotlib.gridspec as gridspec\n'), ((3338, 3362), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs011[0]'], {}), '(f, gs011[0])\n', (3349, 3362), True, 'import matplotlib.pyplot as plt\n'), ((3977, 4001), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs011[1]'], {}), '(f, gs011[1])\n', (3988, 4001), True, 'import matplotlib.pyplot as plt\n'), ((4406, 4466), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(1)'], {'subplot_spec': 'gs01[0]'}), '(1, 1, subplot_spec=gs01[0])\n', (4438, 4466), True, 'import matplotlib.gridspec as gridspec\n'), ((4473, 4497), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs002[0]'], {}), '(f, gs002[0])\n', (4484, 4497), True, 'import matplotlib.pyplot as plt\n'), ((6469, 6575), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(3)'], {'subplot_spec': 'gs0[1]', 'width_ratios': '[0.28, 2, 0.13]', 'wspace': '(0.0)'}), '(1, 3, subplot_spec=gs0[1], width_ratios=[\n 0.28, 2, 0.13], wspace=0.0)\n', (6501, 6575), True, 'import matplotlib.gridspec as gridspec\n'), ((6673, 6775), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(2)', '(1)'], {'subplot_spec': 'gs02[1]', 'height_ratios': '[1.75, 2]', 'hspace': '(0.0)'}), '(2, 1, subplot_spec=gs02[1], height_ratios=\n [1.75, 2], hspace=0.0)\n', (6705, 6775), True, 'import matplotlib.gridspec as gridspec\n'), ((6788, 6812), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs003[0]'], {}), '(f, gs003[0])\n', (6799, 6812), True, 'import matplotlib.pyplot as plt\n'), ((7367, 7391), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs003[1]'], {}), '(f, gs003[1])\n', (7378, 7391), True, 'import matplotlib.pyplot as plt\n'), ((7760, 7785), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure.pdf"""'], {}), "('figure.pdf')\n", (7771, 7785), True, 'import matplotlib.pyplot as plt\n'), ((7798, 7814), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7807, 7814), True, 'import matplotlib.pyplot as plt\n'), ((5745, 5854), 'matplotlib.patches.Polygon', 'Polygon', (['[[7, 7], [22, 45], [25.5, 43], [11, 5]]'], {'zorder': '(3)', 'facecolor': '"""none"""', 'lw': '(1.8)', 'edgecolor': '"""#CF6060"""'}), "([[7, 7], [22, 45], [25.5, 43], [11, 5]], zorder=3, facecolor='none',\n lw=1.8, edgecolor='#CF6060')\n", (5752, 5854), False, 'from matplotlib.patches import Polygon\n'), ((5876, 5984), 'matplotlib.patches.Polygon', 'Polygon', (['[[15, 5], [15, 45], [20, 45], [20, 5]]'], {'zorder': '(3)', 'facecolor': '"""none"""', 'lw': '(1.8)', 'edgecolor': '"""#F4D03F"""'}), "([[15, 5], [15, 45], [20, 45], [20, 5]], zorder=3, facecolor='none',\n lw=1.8, edgecolor='#F4D03F')\n", (5883, 5984), False, 'from matplotlib.patches import Polygon\n'), ((6006, 6114), 'matplotlib.patches.Polygon', 'Polygon', (['[[5, 23], [5, 28], [28, 28], [28, 23]]'], {'zorder': '(3)', 'facecolor': '"""none"""', 'lw': '(1.8)', 'edgecolor': '"""#70B5E3"""'}), "([[5, 23], [5, 28], [28, 28], [28, 23]], zorder=3, facecolor='none',\n lw=1.8, edgecolor='#70B5E3')\n", (6013, 6114), False, 'from matplotlib.patches import Polygon\n'), ((3849, 3900), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""k"""'}), "(linewidth=3, foreground='k')\n", (3871, 3900), True, 'import matplotlib.patheffects as PathEffects\n'), ((4962, 5013), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""k"""'}), "(linewidth=3, foreground='k')\n", (4984, 5013), True, 'import matplotlib.patheffects as PathEffects\n'), ((5210, 5270), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(1.18)', 'foreground': '"""#CF6060"""'}), "(linewidth=1.18, foreground='#CF6060')\n", (5232, 5270), True, 'import matplotlib.patheffects as PathEffects\n'), ((5383, 5443), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(1.18)', 'foreground': '"""#F4D03F"""'}), "(linewidth=1.18, foreground='#F4D03F')\n", (5405, 5443), True, 'import matplotlib.patheffects as PathEffects\n'), ((5556, 5616), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(1.18)', 'foreground': '"""#70B5E3"""'}), "(linewidth=1.18, foreground='#70B5E3')\n", (5578, 5616), True, 'import matplotlib.patheffects as PathEffects\n'), ((7241, 7292), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""k"""'}), "(linewidth=3, foreground='k')\n", (7263, 7292), True, 'import matplotlib.patheffects as PathEffects\n')]
|
import torch
from torch.autograd import Variable
from util.util import *
from util.data_util import *
import numpy as np
from PIL import Image
from data.base_dataset import get_transform_params, get_raw_transform_fn, \
get_transform_fn, get_soft_bbox, get_masked_image
from util.data_util import crop_canvas, paste_canvas
class JointInference():
def __init__(self, joint_opt):
###########################
# Argument Parsing
###########################
from options.box2mask_test_options import BoxToMaskTestOptions as MaskGenTestOption
from options.mask2image_test_options import MaskToImageTestOptions as ImgGenTestOption
#print('++++++++++++++++++++++++MaskGenTestOption',MaskGenTestOption)
self.opt_maskgen = load_script_to_opt(joint_opt.maskgen_script, MaskGenTestOption)
self.opt_imggen = load_script_to_opt(joint_opt.imggen_script, ImgGenTestOption)
# TODO(sh): make this part less hacky
self.opt_maskgen.gpu_ids = self.opt_imggen.gpu_ids = joint_opt.gpu_ids
###########################
# Model Initialization
###########################
from .models import create_model
self.G_box2mask = create_model(self.opt_maskgen)
self.G_mask2img = create_model(self.opt_imggen)
def sample_bbox(self, bbox_originals, opt, random=False):
candidate_list = []
# sample object based on size
for bbox in bbox_originals:
cls = bbox['cls']
xmin = bbox['bbox'][0]
ymin = bbox['bbox'][1]
xmax = bbox['bbox'][2]
ymax = bbox['bbox'][3]
box_w, box_h = xmax - xmin, ymax - ymin
min_axis = min(box_w, box_h)
max_axis = max(box_w, box_h)
if max_axis < opt.min_box_size:
continue
candidate_list.append(bbox)
if not random and len(candidate_list) > 0:
# Sample from bbox within size limit
return np.random.choice(candidate_list)
else:
# Random sample
return np.random.choice(bbox_originals)
def sample_window(self, img, label, bbox_sampled):
pass
def normalize_input(self, img, label, normalize_image=False):
tnfm_image_raw = get_raw_transform_fn(normalize=normalize_image)
tnfm_label_raw = get_raw_transform_fn(normalize=False)
return tnfm_image_raw(img), tnfm_label_raw(label) * 255.0
def gen_layout(self, bbox_sampled, label_original, opt):
# crop canvas
input_dict = crop_canvas(bbox_sampled, label_original, opt)
# generate layout
with torch.no_grad():
label_generated = self.G_box2mask.evaluate({
'label_map': Variable(input_dict['label']),
'mask_ctx_in': Variable(input_dict['mask_ctx_in']),
'mask_out': Variable(input_dict['mask_out']),
'mask_in': Variable(input_dict['mask_in']),
'cls': Variable(input_dict['cls']),
'label_map_orig': Variable(input_dict['label_orig']),
'mask_ctx_in_orig': Variable(input_dict['mask_ctx_in_orig']),
'mask_out_orig': Variable(input_dict['mask_out_orig'])
}, target_size=(input_dict['label_orig'].size()[2:4]))
# paste canvas
label_canvas = paste_canvas(label_original, label_generated.data, \
input_dict, resize=False)
return label_canvas, input_dict, label_generated.data
def gen_image(self, bbox_sampled, img_original, label_generated, opt):
# crop canvas
input_dict = crop_canvas(bbox_sampled, label_generated, opt, \
img_original=img_original, transform_img=True)
# generate layout
with torch.no_grad():
img_generated = self.G_mask2img.inference(
Variable(input_dict['label']),
Variable(torch.zeros_like(input_dict['label'])),
Variable(input_dict['image']),
Variable(input_dict['mask_in']),
Variable(input_dict['mask_out'])
)
# paste canvas
img_canvas = paste_canvas(img_original, (img_generated.data+1)/2, \
input_dict, method=Image.BICUBIC, is_img=True)
return img_canvas, input_dict, img_generated.data
|
[
"util.data_util.crop_canvas",
"numpy.random.choice",
"util.data_util.paste_canvas",
"data.base_dataset.get_raw_transform_fn",
"torch.no_grad",
"torch.zeros_like",
"torch.autograd.Variable"
] |
[((2324, 2371), 'data.base_dataset.get_raw_transform_fn', 'get_raw_transform_fn', ([], {'normalize': 'normalize_image'}), '(normalize=normalize_image)\n', (2344, 2371), False, 'from data.base_dataset import get_transform_params, get_raw_transform_fn, get_transform_fn, get_soft_bbox, get_masked_image\n'), ((2397, 2434), 'data.base_dataset.get_raw_transform_fn', 'get_raw_transform_fn', ([], {'normalize': '(False)'}), '(normalize=False)\n', (2417, 2434), False, 'from data.base_dataset import get_transform_params, get_raw_transform_fn, get_transform_fn, get_soft_bbox, get_masked_image\n'), ((2606, 2652), 'util.data_util.crop_canvas', 'crop_canvas', (['bbox_sampled', 'label_original', 'opt'], {}), '(bbox_sampled, label_original, opt)\n', (2617, 2652), False, 'from util.data_util import crop_canvas, paste_canvas\n'), ((3401, 3477), 'util.data_util.paste_canvas', 'paste_canvas', (['label_original', 'label_generated.data', 'input_dict'], {'resize': '(False)'}), '(label_original, label_generated.data, input_dict, resize=False)\n', (3413, 3477), False, 'from util.data_util import crop_canvas, paste_canvas\n'), ((3674, 3772), 'util.data_util.crop_canvas', 'crop_canvas', (['bbox_sampled', 'label_generated', 'opt'], {'img_original': 'img_original', 'transform_img': '(True)'}), '(bbox_sampled, label_generated, opt, img_original=img_original,\n transform_img=True)\n', (3685, 3772), False, 'from util.data_util import crop_canvas, paste_canvas\n'), ((4210, 4318), 'util.data_util.paste_canvas', 'paste_canvas', (['img_original', '((img_generated.data + 1) / 2)', 'input_dict'], {'method': 'Image.BICUBIC', 'is_img': '(True)'}), '(img_original, (img_generated.data + 1) / 2, input_dict, method\n =Image.BICUBIC, is_img=True)\n', (4222, 4318), False, 'from util.data_util import crop_canvas, paste_canvas\n'), ((2036, 2068), 'numpy.random.choice', 'np.random.choice', (['candidate_list'], {}), '(candidate_list)\n', (2052, 2068), True, 'import numpy as np\n'), ((2130, 2162), 'numpy.random.choice', 'np.random.choice', (['bbox_originals'], {}), '(bbox_originals)\n', (2146, 2162), True, 'import numpy as np\n'), ((2693, 2708), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2706, 2708), False, 'import torch\n'), ((3823, 3838), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3836, 3838), False, 'import torch\n'), ((3911, 3940), 'torch.autograd.Variable', 'Variable', (["input_dict['label']"], {}), "(input_dict['label'])\n", (3919, 3940), False, 'from torch.autograd import Variable\n'), ((4023, 4052), 'torch.autograd.Variable', 'Variable', (["input_dict['image']"], {}), "(input_dict['image'])\n", (4031, 4052), False, 'from torch.autograd import Variable\n'), ((4070, 4101), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_in']"], {}), "(input_dict['mask_in'])\n", (4078, 4101), False, 'from torch.autograd import Variable\n'), ((4119, 4151), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_out']"], {}), "(input_dict['mask_out'])\n", (4127, 4151), False, 'from torch.autograd import Variable\n'), ((2796, 2825), 'torch.autograd.Variable', 'Variable', (["input_dict['label']"], {}), "(input_dict['label'])\n", (2804, 2825), False, 'from torch.autograd import Variable\n'), ((2858, 2893), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_ctx_in']"], {}), "(input_dict['mask_ctx_in'])\n", (2866, 2893), False, 'from torch.autograd import Variable\n'), ((2923, 2955), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_out']"], {}), "(input_dict['mask_out'])\n", (2931, 2955), False, 'from torch.autograd import Variable\n'), ((2984, 3015), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_in']"], {}), "(input_dict['mask_in'])\n", (2992, 3015), False, 'from torch.autograd import Variable\n'), ((3040, 3067), 'torch.autograd.Variable', 'Variable', (["input_dict['cls']"], {}), "(input_dict['cls'])\n", (3048, 3067), False, 'from torch.autograd import Variable\n'), ((3103, 3137), 'torch.autograd.Variable', 'Variable', (["input_dict['label_orig']"], {}), "(input_dict['label_orig'])\n", (3111, 3137), False, 'from torch.autograd import Variable\n'), ((3175, 3215), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_ctx_in_orig']"], {}), "(input_dict['mask_ctx_in_orig'])\n", (3183, 3215), False, 'from torch.autograd import Variable\n'), ((3250, 3287), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_out_orig']"], {}), "(input_dict['mask_out_orig'])\n", (3258, 3287), False, 'from torch.autograd import Variable\n'), ((3967, 4004), 'torch.zeros_like', 'torch.zeros_like', (["input_dict['label']"], {}), "(input_dict['label'])\n", (3983, 4004), False, 'import torch\n')]
|
r"""Train an EfficientNet classifier.
Currently implementation of multi-label multi-class classification is
non-functional.
During training, start tensorboard from within the classification/ directory:
tensorboard --logdir run --bind_all --samples_per_plugin scalars=0,images=0
Example usage:
python train_classifier_tf.py run_idfg /ssd/crops_sq \
-m "efficientnet-b0" --pretrained --finetune --label-weighted \
--epochs 50 --batch-size 512 --lr 1e-4 \
--seed 123 \
--logdir run_idfg
"""
from __future__ import annotations
import argparse
from collections import defaultdict
from collections.abc import Callable, Mapping, MutableMapping, Sequence
from datetime import datetime
import json
import os
from typing import Any, Optional
import uuid
import numpy as np
import sklearn.metrics
import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
import tqdm
from classification.train_utils import (
HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img,
imgs_with_confidences, load_dataset_csv, prefix_all_keys)
from visualization import plot_utils
AUTOTUNE = tf.data.experimental.AUTOTUNE
# match pytorch EfficientNet model names
EFFICIENTNET_MODELS: Mapping[str, Mapping[str, Any]] = {
'efficientnet-b0': dict(cls='EfficientNetB0', img_size=224, dropout=0.2),
'efficientnet-b1': dict(cls='EfficientNetB1', img_size=240, dropout=0.2),
'efficientnet-b2': dict(cls='EfficientNetB2', img_size=260, dropout=0.3),
'efficientnet-b3': dict(cls='EfficientNetB3', img_size=300, dropout=0.3),
'efficientnet-b4': dict(cls='EfficientNetB4', img_size=380, dropout=0.4),
'efficientnet-b5': dict(cls='EfficientNetB5', img_size=456, dropout=0.4),
'efficientnet-b6': dict(cls='EfficientNetB6', img_size=528, dropout=0.5),
'efficientnet-b7': dict(cls='EfficientNetB7', img_size=600, dropout=0.5)
}
def create_dataset(
img_files: Sequence[str],
labels: Sequence[Any],
sample_weights: Optional[Sequence[float]] = None,
img_base_dir: str = '',
transform: Optional[Callable[[tf.Tensor], Any]] = None,
target_transform: Optional[Callable[[Any], Any]] = None,
cache: bool | str = False
) -> tf.data.Dataset:
"""Create a tf.data.Dataset.
The dataset returns elements (img, label, img_file, sample_weight) if
sample_weights is not None, or (img, label, img_file) if
sample_weights=None.
img: tf.Tensor, shape [H, W, 3], type uint8
label: tf.Tensor
img_file: tf.Tensor, scalar, type str
sample_weight: tf.Tensor, scalar, type float32
Possible TODO: oversample the imbalanced classes
see tf.data.experimental.sample_from_datasets
Args:
img_files: list of str, relative paths from img_base_dir
labels: list of int if multilabel=False
sample_weights: optional list of float
img_base_dir: str, base directory for images
transform: optional transform to apply to a single uint8 JPEG image
target_transform: optional transform to apply to a single label
cache: bool or str, cache images in memory if True, cache images to
a file on disk if a str
Returns: tf.data.Dataset
"""
# images dataset
img_ds = tf.data.Dataset.from_tensor_slices(img_files)
img_ds = img_ds.map(lambda p: tf.io.read_file(img_base_dir + os.sep + p),
num_parallel_calls=AUTOTUNE)
# for smaller disk / memory usage, we cache the raw JPEG bytes instead
# of the decoded Tensor
if isinstance(cache, str):
img_ds = img_ds.cache(cache)
elif cache:
img_ds = img_ds.cache()
# convert JPEG bytes to a 3D uint8 Tensor
# keras EfficientNet already includes normalization from [0, 255] to [0, 1],
# so we don't need to do that here
img_ds = img_ds.map(lambda img: tf.io.decode_jpeg(img, channels=3))
if transform:
img_ds = img_ds.map(transform, num_parallel_calls=AUTOTUNE)
# labels dataset
labels_ds = tf.data.Dataset.from_tensor_slices(labels)
if target_transform:
labels_ds = labels_ds.map(target_transform, num_parallel_calls=AUTOTUNE)
# img_files dataset
img_files_ds = tf.data.Dataset.from_tensor_slices(img_files)
if sample_weights is None:
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds))
# weights dataset
weights_ds = tf.data.Dataset.from_tensor_slices(sample_weights)
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds, weights_ds))
def create_dataloaders(
dataset_csv_path: str,
label_index_json_path: str,
splits_json_path: str,
cropped_images_dir: str,
img_size: int,
multilabel: bool,
label_weighted: bool,
weight_by_detection_conf: bool | str,
batch_size: int,
augment_train: bool,
cache_splits: Sequence[str]
) -> tuple[dict[str, tf.data.Dataset], list[str]]:
"""
Args:
dataset_csv_path: str, path to CSV file with columns
['dataset', 'location', 'label'], where label is a comma-delimited
list of labels
splits_json_path: str, path to JSON file
augment_train: bool, whether to shuffle/augment the training set
cache_splits: list of str, splits to cache
training set is cached at /mnt/tempds/random_file_name
validation and test sets are cached in memory
Returns:
datasets: dict, maps split to DataLoader
label_names: list of str, label names in order of label id
"""
df, label_names, split_to_locs = load_dataset_csv(
dataset_csv_path, label_index_json_path, splits_json_path,
multilabel=multilabel, label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf)
# define the transforms
# efficientnet data preprocessing:
# - train:
# 1) random crop: aspect_ratio_range=(0.75, 1.33), area_range=(0.08, 1.0)
# 2) bicubic resize to img_size
# 3) random horizontal flip
# - test:
# 1) center crop
# 2) bicubic resize to img_size
@tf.function
def train_transform(img: tf.Tensor) -> tf.Tensor:
"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
img = tf.image.resize_with_pad(img, img_size, img_size,
method=tf.image.ResizeMethod.BICUBIC)
img = tf.image.random_flip_left_right(img)
img = tf.image.random_brightness(img, max_delta=0.25)
img = tf.image.random_contrast(img, lower=0.75, upper=1.25)
img = tf.image.random_saturation(img, lower=0.75, upper=1.25)
return img
@tf.function
def test_transform(img: tf.Tensor) -> tf.Tensor:
"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
img = tf.image.resize_with_pad(img, img_size, img_size,
method=tf.image.ResizeMethod.BICUBIC)
return img
dataloaders = {}
for split, locs in split_to_locs.items():
is_train = (split == 'train') and augment_train
split_df = df[df['dataset_location'].isin(locs)]
weights = None
if label_weighted or weight_by_detection_conf:
# weights sums to:
# - if weight_by_detection_conf: (# images in split - conf delta)
# - otherwise: (# images in split)
weights = split_df['weights'].tolist()
if not weight_by_detection_conf:
assert np.isclose(sum(weights), len(split_df))
cache: bool | str = (split in cache_splits)
if split == 'train' and 'train' in cache_splits:
unique_filename = str(uuid.uuid4())
os.makedirs('/mnt/tempds/', exist_ok=True)
cache = f'/mnt/tempds/{unique_filename}'
ds = create_dataset(
img_files=split_df['path'].tolist(),
labels=split_df['label_index'].tolist(),
sample_weights=weights,
img_base_dir=cropped_images_dir,
transform=train_transform if is_train else test_transform,
target_transform=None,
cache=cache)
if is_train:
ds = ds.shuffle(1000, reshuffle_each_iteration=True)
ds = ds.batch(batch_size).prefetch(buffer_size=AUTOTUNE)
dataloaders[split] = ds
return dataloaders, label_names
def build_model(model_name: str, num_classes: int, img_size: int,
pretrained: bool, finetune: bool) -> tf.keras.Model:
"""Creates a model with an EfficientNet base."""
class_name = EFFICIENTNET_MODELS[model_name]['cls']
dropout = EFFICIENTNET_MODELS[model_name]['dropout']
model_class = tf.keras.applications.__dict__[class_name]
weights = 'imagenet' if pretrained else None
inputs = tf.keras.layers.Input(shape=(img_size, img_size, 3))
base_model = model_class(
input_tensor=inputs, weights=weights, include_top=False, pooling='avg')
if finetune:
# freeze the base model's weights, including BatchNorm statistics
# https://www.tensorflow.org/guide/keras/transfer_learning#fine-tuning
base_model.trainable = False
# rebuild output
x = tf.keras.layers.Dropout(dropout, name='top_dropout')(base_model.output)
outputs = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1. / 3., mode='fan_out', distribution='uniform'),
name='logits')(x)
model = tf.keras.Model(inputs, outputs, name='complete_model')
model.base_model = base_model # cache this so that we can turn off finetune
return model
def main(dataset_dir: str,
cropped_images_dir: str,
multilabel: bool,
model_name: str,
pretrained: bool,
finetune: int,
label_weighted: bool,
weight_by_detection_conf: bool | str,
epochs: int,
batch_size: int,
lr: float,
weight_decay: float,
seed: Optional[int] = None,
logdir: str = '',
cache_splits: Sequence[str] = ()) -> None:
"""Main function."""
# input validation
assert os.path.exists(dataset_dir)
assert os.path.exists(cropped_images_dir)
if isinstance(weight_by_detection_conf, str):
assert os.path.exists(weight_by_detection_conf)
# set seed
seed = np.random.randint(10_000) if seed is None else seed
np.random.seed(seed)
tf.random.set_seed(seed)
# create logdir and save params
params = dict(locals()) # make a copy
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') # '20200722_110816'
logdir = os.path.join(logdir, timestamp)
os.makedirs(logdir, exist_ok=True)
print('Created logdir:', logdir)
with open(os.path.join(logdir, 'params.json'), 'w') as f:
json.dump(params, f, indent=1)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
img_size = EFFICIENTNET_MODELS[model_name]['img_size']
# create dataloaders and log the index_to_label mapping
loaders, label_names = create_dataloaders(
dataset_csv_path=os.path.join(dataset_dir, 'classification_ds.csv'),
label_index_json_path=os.path.join(dataset_dir, 'label_index.json'),
splits_json_path=os.path.join(dataset_dir, 'splits.json'),
cropped_images_dir=cropped_images_dir,
img_size=img_size,
multilabel=multilabel,
label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf,
batch_size=batch_size,
augment_train=True,
cache_splits=cache_splits)
writer = tf.summary.create_file_writer(logdir)
writer.set_as_default()
model = build_model(
model_name, num_classes=len(label_names), img_size=img_size,
pretrained=pretrained, finetune=finetune > 0)
# define loss function and optimizer
loss_fn: tf.keras.losses.Loss
if multilabel:
loss_fn = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
# using EfficientNet training defaults
# - batch norm momentum: 0.99
# - optimizer: RMSProp, decay 0.9 and momentum 0.9
# - epochs: 350
# - learning rate: 0.256, decays by 0.97 every 2.4 epochs
# - weight decay: 1e-5
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
lr, decay_steps=1, decay_rate=0.97, staircase=True)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate=lr, rho=0.9, momentum=0.9)
best_epoch_metrics: dict[str, float] = {}
for epoch in range(epochs):
print(f'Epoch: {epoch}')
optimizer.learning_rate = lr_schedule(epoch)
tf.summary.scalar('lr', optimizer.learning_rate, epoch)
if epoch > 0 and finetune == epoch:
print('Turning off fine-tune!')
model.base_model.trainable = True
print('- train:')
# TODO: change weighted to False if oversampling minority classes
train_metrics, train_heaps, train_cm = run_epoch(
model, loader=loaders['train'], weighted=label_weighted,
loss_fn=loss_fn, weight_decay=weight_decay, optimizer=optimizer,
finetune=finetune > epoch, return_extreme_images=True)
train_metrics = prefix_all_keys(train_metrics, prefix='train/')
log_run('train', epoch, writer, label_names,
metrics=train_metrics, heaps=train_heaps, cm=train_cm)
print('- val:')
val_metrics, val_heaps, val_cm = run_epoch(
model, loader=loaders['val'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
val_metrics = prefix_all_keys(val_metrics, prefix='val/')
log_run('val', epoch, writer, label_names,
metrics=val_metrics, heaps=val_heaps, cm=val_cm)
if val_metrics['val/acc_top1'] > best_epoch_metrics.get('val/acc_top1', 0): # pylint: disable=line-too-long
filename = os.path.join(logdir, f'ckpt_{epoch}.h5')
print(f'New best model! Saving checkpoint to {filename}')
model.save(filename)
best_epoch_metrics.update(train_metrics)
best_epoch_metrics.update(val_metrics)
best_epoch_metrics['epoch'] = epoch
print('- test:')
test_metrics, test_heaps, test_cm = run_epoch(
model, loader=loaders['test'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
test_metrics = prefix_all_keys(test_metrics, prefix='test/')
log_run('test', epoch, writer, label_names,
metrics=test_metrics, heaps=test_heaps, cm=test_cm)
# stop training after 8 epochs without improvement
if epoch >= best_epoch_metrics['epoch'] + 8:
break
hparams_dict = {
'model_name': model_name,
'multilabel': multilabel,
'finetune': finetune,
'batch_size': batch_size,
'epochs': epochs
}
hp.hparams(hparams_dict)
writer.close()
def log_run(split: str, epoch: int, writer: tf.summary.SummaryWriter,
label_names: Sequence[str], metrics: MutableMapping[str, float],
heaps: Mapping[str, Mapping[int, list[HeapItem]]], cm: np.ndarray
) -> None:
"""Logs the outputs (metrics, confusion matrix, tp/fp/fn images) from a
single epoch run to Tensorboard.
Args:
metrics: dict, keys already prefixed with {split}/
"""
per_class_recall = recall_from_confusion_matrix(cm, label_names)
metrics.update(prefix_all_keys(per_class_recall, f'{split}/label_recall/'))
# log metrics
for metric, value in metrics.items():
tf.summary.scalar(metric, value, epoch)
# log confusion matrix
cm_fig = plot_utils.plot_confusion_matrix(cm, classes=label_names,
normalize=True)
cm_fig_img = tf.convert_to_tensor(fig_to_img(cm_fig)[np.newaxis, ...])
tf.summary.image(f'confusion_matrix/{split}', cm_fig_img, step=epoch)
# log tp/fp/fn images
for heap_type, heap_dict in heaps.items():
log_images_with_confidence(heap_dict, label_names, epoch=epoch,
tag=f'{split}/{heap_type}')
writer.flush()
def log_images_with_confidence(
heap_dict: Mapping[int, list[HeapItem]],
label_names: Sequence[str],
epoch: int,
tag: str) -> None:
"""
Args:
heap_dict: dict, maps label_id to list of HeapItem, where each HeapItem
data is a list [img, target, top3_conf, top3_preds, img_file],
and img is a tf.Tensor of shape [H, W, 3]
label_names: list of str, label names in order of label id
epoch: int
tag: str
"""
for label_id, heap in heap_dict.items():
label_name = label_names[label_id]
sorted_heap = sorted(heap, reverse=True) # sort largest to smallest
imgs_list = [item.data for item in sorted_heap]
fig, img_files = imgs_with_confidences(imgs_list, label_names)
# tf.summary.image requires input of shape [N, H, W, C]
fig_img = tf.convert_to_tensor(fig_to_img(fig)[np.newaxis, ...])
tf.summary.image(f'{label_name}/{tag}', fig_img, step=epoch)
tf.summary.text(f'{label_name}/{tag}_files', '\n\n'.join(img_files),
step=epoch)
def track_extreme_examples(tp_heaps: dict[int, list[HeapItem]],
fp_heaps: dict[int, list[HeapItem]],
fn_heaps: dict[int, list[HeapItem]],
inputs: tf.Tensor,
labels: tf.Tensor,
img_files: tf.Tensor,
logits: tf.Tensor) -> None:
"""Updates the 5 most extreme true-positive (tp), false-positive (fp), and
false-negative (fn) examples with examples from this batch.
Each HeapItem's data attribute is a tuple with:
- img: np.ndarray, shape [H, W, 3], type uint8
- label: int
- top3_conf: list of float
- top3_preds: list of float
- img_file: str
Args:
*_heaps: dict, maps label_id (int) to heap of HeapItems
inputs: tf.Tensor, shape [batch_size, H, W, 3], type float32
labels: tf.Tensor, shape [batch_size]
img_files: tf.Tensor, shape [batch_size], type tf.string
logits: tf.Tensor, shape [batch_size, num_classes]
"""
labels = labels.numpy().tolist()
inputs = inputs.numpy().astype(np.uint8)
img_files = img_files.numpy().astype(str).tolist()
batch_probs = tf.nn.softmax(logits, axis=1)
iterable = zip(labels, inputs, img_files, batch_probs)
for label, img, img_file, confs in iterable:
label_conf = confs[label].numpy().item()
top3_conf, top3_preds = tf.math.top_k(confs, k=3, sorted=True)
top3_conf = top3_conf.numpy().tolist()
top3_preds = top3_preds.numpy().tolist()
data = (img, label, top3_conf, top3_preds, img_file)
if top3_preds[0] == label: # true positive
item = HeapItem(priority=label_conf - top3_conf[1], data=data)
add_to_heap(tp_heaps[label], item, k=5)
else:
# false positive for top3_pred[0]
# false negative for label
item = HeapItem(priority=top3_conf[0] - label_conf, data=data)
add_to_heap(fp_heaps[top3_preds[0]], item, k=5)
add_to_heap(fn_heaps[label], item, k=5)
def run_epoch(model: tf.keras.Model,
loader: tf.data.Dataset,
weighted: bool,
top: Sequence[int] = (1, 3),
loss_fn: Optional[tf.keras.losses.Loss] = None,
weight_decay: float = 0,
finetune: bool = False,
optimizer: Optional[tf.keras.optimizers.Optimizer] = None,
return_extreme_images: bool = False
) -> tuple[
dict[str, float],
dict[str, dict[int, list[HeapItem]]],
np.ndarray
]:
"""Runs for 1 epoch.
Args:
model: tf.keras.Model
loader: tf.data.Dataset
weighted: bool, whether to use sample weights in calculating loss and
accuracy
top: tuple of int, list of values of k for calculating top-K accuracy
loss_fn: optional loss function, calculates the mean loss over a batch
weight_decay: float, L2-regularization constant
finetune: bool, if true sets model's dropout and BN layers to eval mode
optimizer: optional optimizer
Returns:
metrics: dict, metrics from epoch, contains keys:
'loss': float, mean per-example loss over entire epoch,
only included if loss_fn is not None
'acc_top{k}': float, accuracy@k over the entire epoch
heaps: dict, keys are ['tp', 'fp', 'fn'], values are heap_dicts,
each heap_dict maps label_id (int) to a heap of <= 5 HeapItems with
data attribute (img, target, top3_conf, top3_preds, img_file)
- 'tp': priority is the difference between target confidence and
2nd highest confidence
- 'fp': priority is the difference between highest confidence and
target confidence
- 'fn': same as 'fp'
confusion_matrix: np.ndarray, shape [num_classes, num_classes],
C[i, j] = # of samples with true label i, predicted as label j
"""
# if evaluating or finetuning, set dropout & BN layers to eval mode
is_train = False
train_dropout_and_bn = False
if optimizer is not None:
assert loss_fn is not None
is_train = True
if not finetune:
train_dropout_and_bn = True
reg_vars = [
v for v in model.trainable_variables if 'kernel' in v.name]
if loss_fn is not None:
losses = tf.keras.metrics.Mean()
accuracies_topk = {
k: tf.keras.metrics.SparseTopKCategoricalAccuracy(k) for k in top
}
# for each label, track 5 most-confident and least-confident examples
tp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fn_heaps: dict[int, list[HeapItem]] = defaultdict(list)
all_labels = []
all_preds = []
tqdm_loader = tqdm.tqdm(loader)
for batch in tqdm_loader:
if weighted:
inputs, labels, img_files, weights = batch
else:
# even if batch contains sample weights, don't use them
inputs, labels, img_files = batch[0:3]
weights = None
all_labels.append(labels.numpy())
desc = []
with tf.GradientTape(watch_accessed_variables=is_train) as tape:
outputs = model(inputs, training=train_dropout_and_bn)
if loss_fn is not None:
loss = loss_fn(labels, outputs)
if weights is not None:
loss *= weights
# we do not track L2-regularization loss in the loss metric
losses.update_state(loss, sample_weight=weights)
desc.append(f'Loss {losses.result().numpy():.4f}')
if optimizer is not None:
loss = tf.math.reduce_mean(loss)
if not finetune: # only regularize layers before the final FC
loss += weight_decay * tf.add_n(
tf.nn.l2_loss(v) for v in reg_vars)
all_preds.append(tf.math.argmax(outputs, axis=1).numpy())
if optimizer is not None:
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for k, acc in accuracies_topk.items():
acc.update_state(labels, outputs, sample_weight=weights)
desc.append(f'Acc@{k} {acc.result().numpy() * 100:.3f}')
tqdm_loader.set_description(' '.join(desc))
if return_extreme_images:
track_extreme_examples(tp_heaps, fp_heaps, fn_heaps, inputs,
labels, img_files, outputs)
confusion_matrix = sklearn.metrics.confusion_matrix(
y_true=np.concatenate(all_labels), y_pred=np.concatenate(all_preds))
metrics = {}
if loss_fn is not None:
metrics['loss'] = losses.result().numpy().item()
for k, acc in accuracies_topk.items():
metrics[f'acc_top{k}'] = acc.result().numpy().item() * 100
heaps = {'tp': tp_heaps, 'fp': fp_heaps, 'fn': fn_heaps}
return metrics, heaps, confusion_matrix
def _parse_args() -> argparse.Namespace:
"""Parses arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Trains classifier.')
parser.add_argument(
'dataset_dir',
help='path to directory containing: 1) classification dataset CSV, '
'2) label index JSON, 3) splits JSON')
parser.add_argument(
'cropped_images_dir',
help='path to local directory where image crops are saved')
parser.add_argument(
'--multilabel', action='store_true',
help='for multi-label, multi-class classification')
parser.add_argument(
'-m', '--model-name', default='efficientnet-b0',
choices=list(EFFICIENTNET_MODELS.keys()),
help='which EfficientNet model')
parser.add_argument(
'--pretrained', action='store_true',
help='start with pretrained model')
parser.add_argument(
'--finetune', type=int, default=0,
help='only fine tune the final fully-connected layer for the first '
'<finetune> epochs')
parser.add_argument(
'--label-weighted', action='store_true',
help='weight training samples to balance labels')
parser.add_argument(
'--weight-by-detection-conf', nargs='?', const=True, default=False,
help='weight training examples by detection confidence. '
'Optionally takes a .npz file for isotonic calibration.')
parser.add_argument(
'--epochs', type=int, default=0,
help='number of epochs for training, 0 for eval-only')
parser.add_argument(
'--batch-size', type=int, default=256,
help='batch size for both training and eval')
parser.add_argument(
'--lr', type=float, default=None,
help='initial learning rate, defaults to (0.016 * batch_size / 256)')
parser.add_argument(
'--weight-decay', type=float, default=1e-5,
help='weight decay')
parser.add_argument(
'--seed', type=int,
help='random seed')
parser.add_argument(
'--logdir', default='.',
help='directory where TensorBoard logs and a params file are saved')
parser.add_argument(
'--cache', nargs='*', choices=['train', 'val', 'test'], default=(),
help='which splits of the dataset to cache')
return parser.parse_args()
if __name__ == '__main__':
args = _parse_args()
if args.lr is None:
args.lr = 0.016 * args.batch_size / 256 # based on TF models repo
main(dataset_dir=args.dataset_dir,
cropped_images_dir=args.cropped_images_dir,
multilabel=args.multilabel,
model_name=args.model_name,
pretrained=args.pretrained,
finetune=args.finetune,
label_weighted=args.label_weighted,
weight_by_detection_conf=args.weight_by_detection_conf,
epochs=args.epochs,
batch_size=args.batch_size,
lr=args.lr,
weight_decay=args.weight_decay,
seed=args.seed,
logdir=args.logdir,
cache_splits=args.cache)
|
[
"tensorflow.io.read_file",
"tensorflow.GradientTape",
"tensorflow.nn.softmax",
"tensorflow.image.random_saturation",
"tensorflow.summary.image",
"tensorflow.keras.layers.Input",
"os.path.exists",
"classification.train_utils.imgs_with_confidences",
"argparse.ArgumentParser",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.math.reduce_mean",
"numpy.random.seed",
"numpy.concatenate",
"tensorflow.math.top_k",
"tensorflow.summary.scalar",
"tensorflow.math.argmax",
"tensorflow.data.Dataset.zip",
"classification.train_utils.prefix_all_keys",
"classification.train_utils.recall_from_confusion_matrix",
"classification.train_utils.load_dataset_csv",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.summary.create_file_writer",
"tensorflow.image.resize_with_pad",
"tensorflow.keras.metrics.Mean",
"tensorflow.keras.metrics.SparseTopKCategoricalAccuracy",
"classification.train_utils.HeapItem",
"uuid.uuid4",
"tensorflow.nn.l2_loss",
"tensorflow.image.random_brightness",
"tensorflow.io.decode_jpeg",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.initializers.VarianceScaling",
"tensorflow.image.random_contrast",
"tensorflow.image.random_flip_left_right",
"tensorflow.random.set_seed",
"os.makedirs",
"tensorboard.plugins.hparams.api.hparams",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"tqdm.tqdm",
"os.path.join",
"visualization.plot_utils.plot_confusion_matrix",
"classification.train_utils.fig_to_img",
"classification.train_utils.add_to_heap",
"datetime.datetime.now",
"numpy.random.randint",
"collections.defaultdict",
"tensorflow.keras.Model",
"json.dump",
"tensorflow.config.experimental.list_physical_devices"
] |
[((3300, 3345), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['img_files'], {}), '(img_files)\n', (3334, 3345), True, 'import tensorflow as tf\n'), ((4063, 4105), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['labels'], {}), '(labels)\n', (4097, 4105), True, 'import tensorflow as tf\n'), ((4256, 4301), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['img_files'], {}), '(img_files)\n', (4290, 4301), True, 'import tensorflow as tf\n'), ((4444, 4494), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['sample_weights'], {}), '(sample_weights)\n', (4478, 4494), True, 'import tensorflow as tf\n'), ((4506, 4572), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(img_ds, labels_ds, img_files_ds, weights_ds)'], {}), '((img_ds, labels_ds, img_files_ds, weights_ds))\n', (4525, 4572), True, 'import tensorflow as tf\n'), ((5662, 5850), 'classification.train_utils.load_dataset_csv', 'load_dataset_csv', (['dataset_csv_path', 'label_index_json_path', 'splits_json_path'], {'multilabel': 'multilabel', 'label_weighted': 'label_weighted', 'weight_by_detection_conf': 'weight_by_detection_conf'}), '(dataset_csv_path, label_index_json_path, splits_json_path,\n multilabel=multilabel, label_weighted=label_weighted,\n weight_by_detection_conf=weight_by_detection_conf)\n', (5678, 5850), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((8881, 8933), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(img_size, img_size, 3)'}), '(shape=(img_size, img_size, 3))\n', (8902, 8933), True, 'import tensorflow as tf\n'), ((9584, 9638), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""complete_model"""'}), "(inputs, outputs, name='complete_model')\n", (9598, 9638), True, 'import tensorflow as tf\n'), ((10255, 10282), 'os.path.exists', 'os.path.exists', (['dataset_dir'], {}), '(dataset_dir)\n', (10269, 10282), False, 'import os\n'), ((10294, 10328), 'os.path.exists', 'os.path.exists', (['cropped_images_dir'], {}), '(cropped_images_dir)\n', (10308, 10328), False, 'import os\n'), ((10518, 10538), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10532, 10538), True, 'import numpy as np\n'), ((10543, 10567), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (10561, 10567), True, 'import tensorflow as tf\n'), ((10739, 10770), 'os.path.join', 'os.path.join', (['logdir', 'timestamp'], {}), '(logdir, timestamp)\n', (10751, 10770), False, 'import os\n'), ((10775, 10809), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (10786, 10809), False, 'import os\n'), ((10960, 11011), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (11004, 11011), True, 'import tensorflow as tf\n'), ((11793, 11830), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (11822, 11830), True, 'import tensorflow as tf\n'), ((12636, 12738), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['lr'], {'decay_steps': '(1)', 'decay_rate': '(0.97)', 'staircase': '(True)'}), '(lr, decay_steps=1,\n decay_rate=0.97, staircase=True)\n', (12682, 12738), True, 'import tensorflow as tf\n'), ((12760, 12828), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'learning_rate': 'lr', 'rho': '(0.9)', 'momentum': '(0.9)'}), '(learning_rate=lr, rho=0.9, momentum=0.9)\n', (12787, 12828), True, 'import tensorflow as tf\n'), ((15333, 15357), 'tensorboard.plugins.hparams.api.hparams', 'hp.hparams', (['hparams_dict'], {}), '(hparams_dict)\n', (15343, 15357), True, 'from tensorboard.plugins.hparams import api as hp\n'), ((15841, 15886), 'classification.train_utils.recall_from_confusion_matrix', 'recall_from_confusion_matrix', (['cm', 'label_names'], {}), '(cm, label_names)\n', (15869, 15886), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((16117, 16190), 'visualization.plot_utils.plot_confusion_matrix', 'plot_utils.plot_confusion_matrix', (['cm'], {'classes': 'label_names', 'normalize': '(True)'}), '(cm, classes=label_names, normalize=True)\n', (16149, 16190), False, 'from visualization import plot_utils\n'), ((16316, 16385), 'tensorflow.summary.image', 'tf.summary.image', (['f"""confusion_matrix/{split}"""', 'cm_fig_img'], {'step': 'epoch'}), "(f'confusion_matrix/{split}', cm_fig_img, step=epoch)\n", (16332, 16385), True, 'import tensorflow as tf\n'), ((18945, 18974), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (18958, 18974), True, 'import tensorflow as tf\n'), ((22508, 22525), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22519, 22525), False, 'from collections import defaultdict\n'), ((22568, 22585), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22579, 22585), False, 'from collections import defaultdict\n'), ((22628, 22645), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22639, 22645), False, 'from collections import defaultdict\n'), ((22705, 22722), 'tqdm.tqdm', 'tqdm.tqdm', (['loader'], {}), '(loader)\n', (22714, 22722), False, 'import tqdm\n'), ((25038, 25156), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""Trains classifier."""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description='Trains classifier.')\n", (25061, 25156), False, 'import argparse\n'), ((4349, 4403), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(img_ds, labels_ds, img_files_ds)'], {}), '((img_ds, labels_ds, img_files_ds))\n', (4368, 4403), True, 'import tensorflow as tf\n'), ((6343, 6435), 'tensorflow.image.resize_with_pad', 'tf.image.resize_with_pad', (['img', 'img_size', 'img_size'], {'method': 'tf.image.ResizeMethod.BICUBIC'}), '(img, img_size, img_size, method=tf.image.\n ResizeMethod.BICUBIC)\n', (6367, 6435), True, 'import tensorflow as tf\n'), ((6484, 6520), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['img'], {}), '(img)\n', (6515, 6520), True, 'import tensorflow as tf\n'), ((6535, 6582), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['img'], {'max_delta': '(0.25)'}), '(img, max_delta=0.25)\n', (6561, 6582), True, 'import tensorflow as tf\n'), ((6597, 6650), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['img'], {'lower': '(0.75)', 'upper': '(1.25)'}), '(img, lower=0.75, upper=1.25)\n', (6621, 6650), True, 'import tensorflow as tf\n'), ((6665, 6720), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['img'], {'lower': '(0.75)', 'upper': '(1.25)'}), '(img, lower=0.75, upper=1.25)\n', (6691, 6720), True, 'import tensorflow as tf\n'), ((6903, 6995), 'tensorflow.image.resize_with_pad', 'tf.image.resize_with_pad', (['img', 'img_size', 'img_size'], {'method': 'tf.image.ResizeMethod.BICUBIC'}), '(img, img_size, img_size, method=tf.image.\n ResizeMethod.BICUBIC)\n', (6927, 6995), True, 'import tensorflow as tf\n'), ((9282, 9334), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""top_dropout"""'}), "(dropout, name='top_dropout')\n", (9305, 9334), True, 'import tensorflow as tf\n'), ((10394, 10434), 'os.path.exists', 'os.path.exists', (['weight_by_detection_conf'], {}), '(weight_by_detection_conf)\n', (10408, 10434), False, 'import os\n'), ((10462, 10486), 'numpy.random.randint', 'np.random.randint', (['(10000)'], {}), '(10000)\n', (10479, 10486), True, 'import numpy as np\n'), ((10917, 10947), 'json.dump', 'json.dump', (['params', 'f'], {'indent': '(1)'}), '(params, f, indent=1)\n', (10926, 10947), False, 'import json\n'), ((11041, 11092), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (11081, 11092), True, 'import tensorflow as tf\n'), ((12121, 12220), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(from_logits=True, reduction=tf.keras.\n losses.Reduction.NONE)\n', (12155, 12220), True, 'import tensorflow as tf\n'), ((12257, 12367), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(from_logits=True, reduction=\n tf.keras.losses.Reduction.NONE)\n', (12302, 12367), True, 'import tensorflow as tf\n'), ((13011, 13066), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""lr"""', 'optimizer.learning_rate', 'epoch'], {}), "('lr', optimizer.learning_rate, epoch)\n", (13028, 13066), True, 'import tensorflow as tf\n'), ((13598, 13645), 'classification.train_utils.prefix_all_keys', 'prefix_all_keys', (['train_metrics'], {'prefix': '"""train/"""'}), "(train_metrics, prefix='train/')\n", (13613, 13645), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((13993, 14036), 'classification.train_utils.prefix_all_keys', 'prefix_all_keys', (['val_metrics'], {'prefix': '"""val/"""'}), "(val_metrics, prefix='val/')\n", (14008, 14036), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((15906, 15965), 'classification.train_utils.prefix_all_keys', 'prefix_all_keys', (['per_class_recall', 'f"""{split}/label_recall/"""'], {}), "(per_class_recall, f'{split}/label_recall/')\n", (15921, 15965), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((16036, 16075), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['metric', 'value', 'epoch'], {}), '(metric, value, epoch)\n', (16053, 16075), True, 'import tensorflow as tf\n'), ((17365, 17410), 'classification.train_utils.imgs_with_confidences', 'imgs_with_confidences', (['imgs_list', 'label_names'], {}), '(imgs_list, label_names)\n', (17386, 17410), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((17557, 17617), 'tensorflow.summary.image', 'tf.summary.image', (['f"""{label_name}/{tag}"""', 'fig_img'], {'step': 'epoch'}), "(f'{label_name}/{tag}', fig_img, step=epoch)\n", (17573, 17617), True, 'import tensorflow as tf\n'), ((19165, 19203), 'tensorflow.math.top_k', 'tf.math.top_k', (['confs'], {'k': '(3)', 'sorted': '(True)'}), '(confs, k=3, sorted=True)\n', (19178, 19203), True, 'import tensorflow as tf\n'), ((22263, 22286), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (22284, 22286), True, 'import tensorflow as tf\n'), ((22322, 22371), 'tensorflow.keras.metrics.SparseTopKCategoricalAccuracy', 'tf.keras.metrics.SparseTopKCategoricalAccuracy', (['k'], {}), '(k)\n', (22368, 22371), True, 'import tensorflow as tf\n'), ((3380, 3422), 'tensorflow.io.read_file', 'tf.io.read_file', (['(img_base_dir + os.sep + p)'], {}), '(img_base_dir + os.sep + p)\n', (3395, 3422), True, 'import tensorflow as tf\n'), ((3902, 3936), 'tensorflow.io.decode_jpeg', 'tf.io.decode_jpeg', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (3919, 3936), True, 'import tensorflow as tf\n'), ((7794, 7836), 'os.makedirs', 'os.makedirs', (['"""/mnt/tempds/"""'], {'exist_ok': '(True)'}), "('/mnt/tempds/', exist_ok=True)\n", (7805, 7836), False, 'import os\n'), ((10664, 10678), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10676, 10678), False, 'from datetime import datetime\n'), ((10861, 10896), 'os.path.join', 'os.path.join', (['logdir', '"""params.json"""'], {}), "(logdir, 'params.json')\n", (10873, 10896), False, 'import os\n'), ((11286, 11336), 'os.path.join', 'os.path.join', (['dataset_dir', '"""classification_ds.csv"""'], {}), "(dataset_dir, 'classification_ds.csv')\n", (11298, 11336), False, 'import os\n'), ((11368, 11413), 'os.path.join', 'os.path.join', (['dataset_dir', '"""label_index.json"""'], {}), "(dataset_dir, 'label_index.json')\n", (11380, 11413), False, 'import os\n'), ((11440, 11480), 'os.path.join', 'os.path.join', (['dataset_dir', '"""splits.json"""'], {}), "(dataset_dir, 'splits.json')\n", (11452, 11480), False, 'import os\n'), ((14294, 14334), 'os.path.join', 'os.path.join', (['logdir', 'f"""ckpt_{epoch}.h5"""'], {}), "(logdir, f'ckpt_{epoch}.h5')\n", (14306, 14334), False, 'import os\n'), ((14839, 14884), 'classification.train_utils.prefix_all_keys', 'prefix_all_keys', (['test_metrics'], {'prefix': '"""test/"""'}), "(test_metrics, prefix='test/')\n", (14854, 14884), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((16275, 16293), 'classification.train_utils.fig_to_img', 'fig_to_img', (['cm_fig'], {}), '(cm_fig)\n', (16285, 16293), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19433, 19488), 'classification.train_utils.HeapItem', 'HeapItem', ([], {'priority': '(label_conf - top3_conf[1])', 'data': 'data'}), '(priority=label_conf - top3_conf[1], data=data)\n', (19441, 19488), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19501, 19540), 'classification.train_utils.add_to_heap', 'add_to_heap', (['tp_heaps[label]', 'item'], {'k': '(5)'}), '(tp_heaps[label], item, k=5)\n', (19512, 19540), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19659, 19714), 'classification.train_utils.HeapItem', 'HeapItem', ([], {'priority': '(top3_conf[0] - label_conf)', 'data': 'data'}), '(priority=top3_conf[0] - label_conf, data=data)\n', (19667, 19714), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19727, 19774), 'classification.train_utils.add_to_heap', 'add_to_heap', (['fp_heaps[top3_preds[0]]', 'item'], {'k': '(5)'}), '(fp_heaps[top3_preds[0]], item, k=5)\n', (19738, 19774), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19787, 19826), 'classification.train_utils.add_to_heap', 'add_to_heap', (['fn_heaps[label]', 'item'], {'k': '(5)'}), '(fn_heaps[label], item, k=5)\n', (19798, 19826), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((23063, 23113), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'watch_accessed_variables': 'is_train'}), '(watch_accessed_variables=is_train)\n', (23078, 23113), True, 'import tensorflow as tf\n'), ((24574, 24600), 'numpy.concatenate', 'np.concatenate', (['all_labels'], {}), '(all_labels)\n', (24588, 24600), True, 'import numpy as np\n'), ((24609, 24634), 'numpy.concatenate', 'np.concatenate', (['all_preds'], {}), '(all_preds)\n', (24623, 24634), True, 'import numpy as np\n'), ((7768, 7780), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7778, 7780), False, 'import uuid\n'), ((9439, 9537), 'tensorflow.keras.initializers.VarianceScaling', 'tf.keras.initializers.VarianceScaling', ([], {'scale': '(1.0 / 3.0)', 'mode': '"""fan_out"""', 'distribution': '"""uniform"""'}), "(scale=1.0 / 3.0, mode='fan_out',\n distribution='uniform')\n", (9476, 9537), True, 'import tensorflow as tf\n'), ((17515, 17530), 'classification.train_utils.fig_to_img', 'fig_to_img', (['fig'], {}), '(fig)\n', (17525, 17530), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((23620, 23645), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['loss'], {}), '(loss)\n', (23639, 23645), True, 'import tensorflow as tf\n'), ((23864, 23895), 'tensorflow.math.argmax', 'tf.math.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (23878, 23895), True, 'import tensorflow as tf\n'), ((23802, 23818), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (23815, 23818), True, 'import tensorflow as tf\n')]
|
# Author: <NAME>
# email: <EMAIL>
import matplotlib.pyplot as plt, numpy as np
# import seaborn as sns
# from pandas import DataFrame
# from sklearn.neighbors import NearestNeighbors
from terminaltables import AsciiTable
from collections import Counter
from .private import save_vis_close_helper, get_fig_ax_helper
from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple
from xinshuo_math import calculate_truncated_mse
color_set = ['r', 'b', 'g', 'c', 'm', 'y', 'k', 'w', 'lime', 'cyan', 'aqua']
linestyle_set = ['-', '--', '-.', ':', None, ' ', 'solid', 'dashed']
dpi = 80
def visualize_ced(normed_mean_error_dict, error_threshold, normalized=True, truncated_list=None, display2terminal=True, display_list=None, title='2D PCK curve', debug=True, vis=False, pck_savepath=None, table_savepath=None, closefig=True):
'''
visualize the cumulative error distribution curve (alse called NME curve or pck curve)
all parameters are represented by percentage
parameter:
normed_mean_error_dict: a dictionary whose keys are the method name and values are (N, ) numpy array to represent error in evaluation
error_threshold: threshold to display in x axis
return:
AUC: area under the curve
MSE: mean square error
'''
if debug:
assert isdict(normed_mean_error_dict), 'the input normalized mean error dictionary is not correct'
assert islogical(normalized), 'the normalization flag should be logical'
if normalized: assert error_threshold > 0 and error_threshold < 100, 'threshold percentage is not well set'
if save:
assert is_path_exists_or_creatable(pck_savepath), 'please provide a valid path to save the pck results'
assert is_path_exists_or_creatable(table_savepath), 'please provide a valid path to save the table results'
assert isstring(title), 'title is not correct'
if truncated_list is not None: assert islistofscalar(truncated_list), 'the input truncated list is not correct'
if display_list is not None:
assert islist(display_list) and len(display_list) == len(normed_mean_error_dict), 'the input display list is not correct'
assert CHECK_EQ_LIST_UNORDERED(display_list, normed_mean_error_dict.keys(), debug=debug), 'the input display list does not match the error dictionary key list'
else: display_list = normed_mean_error_dict.keys()
# set display parameters
width, height = 1000, 800
legend_fontsize = 10
scale_distance = 48.8
line_index, color_index = 0, 0
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
# set figure handle
num_bins = 1000
if normalized:
maximum_x = 1
scale = num_bins / 100
else:
maximum_x = error_threshold + 1
scale = num_bins / maximum_x
x_axis = np.linspace(0, maximum_x, num_bins) # error axis, percentage of normalization factor
y_axis = np.zeros(num_bins)
interval_y = 10
interval_x = 1
plt.xlim(0, error_threshold)
plt.ylim(0, 100)
plt.yticks(np.arange(0, 100 + interval_y, interval_y))
plt.xticks(np.arange(0, error_threshold + interval_x, interval_x))
plt.grid()
plt.title(title, fontsize=20)
if normalized: plt.xlabel('Normalized error euclidean distance (%)', fontsize=16)
else: plt.xlabel('Absolute error euclidean distance', fontsize=16)
# calculate metrics for each method
num_methods = len(normed_mean_error_dict)
num_images = len(normed_mean_error_dict.values()[0])
metrics_dict = dict()
metrics_table = list()
table_title = ['Method Name / Metrics', 'AUC', 'MSE']
append2title = False
assert num_images > 0, 'number of error array should be larger than 0'
for ordered_index in range(num_methods):
method_name = display_list[ordered_index]
normed_mean_error = normed_mean_error_dict[method_name]
if debug:
assert isnparray(normed_mean_error) and normed_mean_error.ndim == 1, 'shape of error distance is not good'
assert len(normed_mean_error) == num_images, 'number of testing images should be equal for all methods'
assert len(linestyle_set) * len(color_set) >= len(normed_mean_error_dict)
color_tmp = color_set[color_index]
line_tmp = linestyle_set[line_index]
for i in range(num_bins):
y_axis[i] = float((normed_mean_error < x_axis[i]).sum()) / num_images # percentage of error
# calculate area under the curve and mean square error
entry = dict()
entry['AUC'] = np.sum(y_axis[:error_threshold * scale]) / (error_threshold * scale) # bigger, better
entry['MSE'] = np.mean(normed_mean_error) # smaller, better
metrics_table_tmp = [str(method_name), '%.2f' % (entry['AUC']), '%.1f' % (entry['MSE'])]
if truncated_list is not None:
tmse_dict = calculate_truncated_mse(normed_mean_error.tolist(), truncated_list, debug=debug)
for threshold in truncated_list:
entry['AUC/%s'%threshold] = np.sum(y_axis[:error_threshold * scale]) / (error_threshold * scale) # bigger, better
entry['MSE/%s'%threshold] = tmse_dict[threshold]['T-MSE']
entry['percentage/%s'%threshold] = tmse_dict[threshold]['percentage']
if not append2title:
table_title.append('AUC/%s'%threshold)
table_title.append('MSE/%s'%threshold)
table_title.append('pct/%s'%threshold)
metrics_table_tmp.append('%.2f' % (entry['AUC/%s'%threshold]))
metrics_table_tmp.append('%.1f' % (entry['MSE/%s'%threshold]))
metrics_table_tmp.append('%.1f' % (100 * entry['percentage/%s'%threshold]) + '%')
# print metrics_table_tmp
metrics_table.append(metrics_table_tmp)
append2title = True
metrics_dict[method_name] = entry
# draw
label = '%s, AUC: %.2f, MSE: %.1f (%.0f um)' % (method_name, entry['AUC'], entry['MSE'], entry['MSE'] * scale_distance)
if normalized: plt.plot(x_axis*100, y_axis*100, color=color_tmp, linestyle=line_tmp, label=label, lw=3)
else: plt.plot(x_axis, y_axis*100, color=color_tmp, linestyle=line_tmp, label=label, lw=3)
plt.legend(loc=4, fontsize=legend_fontsize)
color_index += 1
if color_index / len(color_set) == 1:
line_index += 1
color_index = color_index % len(color_set)
# plt.grid()
plt.ylabel('{} Test Images (%)'.format(num_images), fontsize=16)
save_vis_close_helper(fig=fig, ax=None, vis=vis, transparent=False, save_path=pck_savepath, debug=debug, closefig=closefig)
# reorder the table
order_index_list = [display_list.index(method_name_tmp) for method_name_tmp in normed_mean_error_dict.keys()]
order_index_list = [0] + [order_index_tmp + 1 for order_index_tmp in order_index_list]
# print table to terminal
metrics_table = [table_title] + metrics_table
# metrics_table = list_reorder([table_title] + metrics_table, order_index_list, debug=debug)
table = AsciiTable(metrics_table)
if display2terminal:
print('\nprint detailed metrics')
print(table.table)
# save table to file
if table_savepath is not None:
table_file = open(table_savepath, 'w')
table_file.write(table.table)
table_file.close()
if display2terminal: print('\nsave detailed metrics to %s' % table_savepath)
return metrics_dict, metrics_table
def visualize_nearest_neighbor(featuremap_dict, num_neighbor=5, top_number=5, vis=True, save_csv=False, csv_save_path=None, save_vis=False, save_img=False, save_thumb_name='nearest_neighbor.png', img_src_folder=None, ext_filter='.jpg', nn_save_folder=None, debug=True):
'''
visualize nearest neighbor for featuremap from images
parameter:
featuremap_dict: a dictionary contains image path as key, and featuremap as value, the featuremap needs to be numpy array with any shape. No flatten needed
num_neighbor: number of neighbor to visualize, the first nearest is itself
top_number: number of top to visualize, since there might be tons of featuremap (length of dictionary), we choose the top ten with lowest distance with their nearest neighbor
csv_save_path: path to save .csv file which contains indices and distance array for all elements
nn_save_folder: save the nearest neighbor images for top featuremap
return:
all_sorted_nearest_id: a 2d matrix, each row is a feature followed by its nearest neighbor in whole feature dataset, the column is sorted by the distance of all nearest neighbor each row
selected_nearest_id: only top number of sorted nearest id
'''
print('processing feature map to nearest neightbor.......')
if debug:
assert isdict(featuremap_dict), 'featuremap should be dictionary'
assert all(isnparray(featuremap_tmp) for featuremap_tmp in featuremap_dict.values()), 'value of dictionary should be numpy array'
assert isinteger(num_neighbor) and num_neighbor > 1, 'number of neighborhodd is an integer larger than 1'
if save_csv and csv_save_path is not None:
assert is_path_exists_or_creatable(csv_save_path), 'path to save .csv file is not correct'
if save_vis or save_img:
if nn_save_folder is not None: # save image directly
assert isstring(ext_filter), 'extension filter is not correct'
assert is_path_exists(img_src_folder), 'source folder for image is not correct'
assert all(isstring(path_tmp) for path_tmp in featuremap_dict.keys()) # key should be the path for the image
assert is_path_exists_or_creatable(nn_save_folder), 'folder to save top visualized images is not correct'
assert isstring(save_thumb_name), 'name of thumbnail is not correct'
if ext_filter.find('.') == -1:
ext_filter = '.%s' % ext_filter
# flatten the feature map
nn_feature_dict = dict()
for key, featuremap_tmp in featuremap_dict.items():
nn_feature_dict[key] = featuremap_tmp.flatten()
num_features = len(nn_feature_dict)
# nearest neighbor
featuremap = np.array(nn_feature_dict.values())
nearbrs = NearestNeighbors(n_neighbors=num_neighbor, algorithm='ball_tree').fit(featuremap)
distances, indices = nearbrs.kneighbors(featuremap)
if debug:
assert featuremap.shape[0] == num_features, 'shape of feature map is not correct'
assert indices.shape == (num_features, num_neighbor), 'shape of indices is not correct'
assert distances.shape == (num_features, num_neighbor), 'shape of indices is not correct'
# convert the nearest indices for all featuremap to the key accordingly
id_list = nn_feature_dict.keys()
max_length = len(max(id_list, key=len)) # find the maximum length of string in the key
nearest_id = np.chararray(indices.shape, itemsize=max_length+1)
for x in range(nearest_id.shape[0]):
for y in range(nearest_id.shape[1]):
nearest_id[x, y] = id_list[indices[x, y]]
if debug:
assert list(nearest_id[:, 0]) == id_list, 'nearest neighbor has problem'
# sort the feature based on distance
print('sorting the feature based on distance')
featuremap_distance = np.sum(distances, axis=1)
if debug:
assert featuremap_distance.shape == (num_features, ), 'distance is not correct'
sorted_indices = np.argsort(featuremap_distance)
all_sorted_nearest_id = nearest_id[sorted_indices, :]
# save to the csv file
if save_csv and csv_save_path is not None:
print('Saving nearest neighbor result as .csv to path: %s' % csv_save_path)
with open(csv_save_path, 'w+') as file:
np.savetxt(file, distances, delimiter=',', fmt='%f')
np.savetxt(file, all_sorted_nearest_id, delimiter=',', fmt='%s')
file.close()
# choose the best to visualize
selected_sorted_indices = sorted_indices[0:top_number]
if debug:
for i in range(num_features-1):
assert featuremap_distance[sorted_indices[i]] < featuremap_distance[sorted_indices[i+1]], 'feature map is not well sorted based on distance'
selected_nearest_id = nearest_id[selected_sorted_indices, :]
if save_vis:
fig, axarray = plt.subplots(top_number, num_neighbor)
for index in range(top_number):
for nearest_index in range(num_neighbor):
img_path = os.path.join(img_src_folder, '%s%s'%(selected_nearest_id[index, nearest_index], ext_filter))
if debug:
print('loading image from %s'%img_path)
img = imread(img_path)
if isgrayimage_dimension(img):
axarray[index, nearest_index].imshow(img, cmap='gray')
elif iscolorimage_dimension(img):
axarray[index, nearest_index].imshow(img)
else:
assert False, 'unknown error'
axarray[index, nearest_index].axis('off')
save_thumb = os.path.join(nn_save_folder, save_thumb_name)
fig.savefig(save_thumb)
if vis:
plt.show()
plt.close(fig)
# save top visualization to the folder
if save_img and nn_save_folder is not None:
for top_index in range(top_number):
file_list = selected_nearest_id[top_index]
save_subfolder = os.path.join(nn_save_folder, file_list[0])
mkdir_if_missing(save_subfolder)
for file_tmp in file_list:
file_src = os.path.join(img_src_folder, '%s%s'%(file_tmp, ext_filter))
save_path = os.path.join(save_subfolder, '%s%s'%(file_tmp, ext_filter))
if debug:
print('saving %s to %s' % (file_src, save_path))
shutil.copyfile(file_src, save_path)
return all_sorted_nearest_id, selected_nearest_id
def visualize_distribution(data, bin_size=None, vis=False, save_path=None, debug=True, closefig=True):
'''
visualize the histogram of a data, which can be a dictionary or list or numpy array or tuple or a list of list
'''
if debug:
assert istuple(data) or isdict(data) or islist(data) or isnparray(data), 'input data is not correct'
# convert data type
if istuple(data):
data = list(data)
elif isdict(data):
data = data.values()
elif isnparray(data):
data = data.tolist()
num_bins = 1000.0
fig, ax = get_fig_ax_helper(fig=None, ax=None)
# calculate bin size
if bin_size is None:
if islistoflist(data):
max_value = np.max(np.max(data))
min_value = np.min(np.min(data))
else:
max_value = np.max(data)
min_value = np.min(data)
bin_size = (max_value - min_value) / num_bins
else:
try:
bin_size = float(bin_size)
except TypeError:
print('size of bin should be an float value')
# plot
if islistoflist(data):
max_value = np.max(np.max(data))
min_value = np.min(np.min(data))
bins = np.arange(min_value - bin_size, max_value + bin_size, bin_size) # fixed bin size
plt.xlim([min_value - bin_size, max_value + bin_size])
for data_list_tmp in data:
if debug:
assert islist(data_list_tmp), 'the nested list is not correct!'
# plt.hist(data_list_tmp, bins=bins, alpha=0.3)
sns.distplot(data_list_tmp, bins=bins, kde=False)
# sns.distplot(data_list_tmp, bins=bins, kde=False)
else:
bins = np.arange(min(data) - 10 * bin_size, max(data) + 10 * bin_size, bin_size) # fixed bin size
plt.xlim([min(data) - bin_size, max(data) + bin_size])
plt.hist(data, bins=bins, alpha=0.5)
plt.title('distribution of data')
plt.xlabel('data (bin size = %f)' % bin_size)
plt.ylabel('count')
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, debug=debug, closefig=closefig)
def visualize_bar(data, bin_size=2.0, title='Bar Graph of Key-Value Pair', xlabel='index', ylabel='count', vis=True, save_path=None, debug=True, closefig=True):
'''
visualize the bar graph of a data, which can be a dictionary or list of dictionary
different from function of visualize_bar_graph, this function does not depend on panda and dataframe, it's simpler but with less functionality
also the key of this function takes continuous scalar variable
'''
if debug:
assert isstring(title) and isstring(xlabel) and isstring(ylabel), 'title/xlabel/ylabel is not correct'
assert isdict(data) or islist(data), 'input data is not correct'
assert isscalar(bin_size), 'the bin size is not a floating number'
if isdict(data):
index_list = data.keys()
if debug:
assert islistofscalar(index_list), 'the input dictionary does not contain a scalar key'
frequencies = data.values()
else:
index_list = range(len(data))
frequencies = data
index_str_list = scalarlist2strlist(index_list, debug=debug)
index_list = np.array(index_list)
fig, ax = get_fig_ax_helper(fig=None, ax=None)
# ax.set_xticks(index_list)
# ax.set_xticklabels(index_str_list)
plt.bar(index_list, frequencies, bin_size, color='r', alpha=0.5)
plt.title(title, fontsize=20)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, debug=debug, transparent=False, closefig=closefig)
def visualize_bar_graph(data, title='Bar Graph of Key-Value Pair', xlabel='pixel error', ylabel='keypoint index', label=False, label_list=None, vis=True, save_path=None, debug=True, closefig=True):
'''
visualize the bar graph of a data, which can be a dictionary or list of dictionary
inside each dictionary, the keys (string) should be the same which is the y label, the values should be scalar
'''
if debug:
assert isstring(title) and isstring(xlabel) and isstring(ylabel), 'title/xlabel/ylabel is not correct'
assert isdict(data) or islistofdict(data), 'input data is not correct'
if isdict(data):
assert all(isstring(key_tmp) for key_tmp in data.keys()), 'the keys are not all strings'
assert all(isscalar(value_tmp) for value_tmp in data.values()), 'the keys are not all strings'
else:
assert len(data) <= len(color_set), 'number of data set is larger than number of color to use'
keys = sorted(data[0].keys())
for dict_tmp in data:
if not (sorted(dict_tmp.keys()) == keys):
print(dict_tmp.keys())
print(keys)
assert False, 'the keys are not equal across different input set'
assert all(isstring(key_tmp) for key_tmp in dict_tmp.keys()), 'the keys are not all strings'
assert all(isscalar(value_tmp) for value_tmp in dict_tmp.values()), 'the values are not all scalars'
# convert dictionary to DataFrame
data_new = dict()
if isdict(data):
key_list = data.keys()
sorted_index = sorted(range(len(key_list)), key=lambda k: key_list[k])
data_new['names'] = (np.asarray(key_list)[sorted_index]).tolist()
data_new['values'] = (np.asarray(data.values())[sorted_index]).tolist()
else:
key_list = data[0].keys()
sorted_index = sorted(range(len(key_list)), key=lambda k: key_list[k])
data_new['names'] = (np.asarray(key_list)[sorted_index]).tolist()
num_sets = len(data)
for set_index in range(num_sets):
data_new['value_%03d'%set_index] = (np.asarray(data[set_index].values())[sorted_index]).tolist()
dataframe = DataFrame(data_new)
# plot
width = 2000
height = 2000
alpha = 0.5
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
sns.set(style='whitegrid')
# fig, ax = get_fig_ax_helper(fig=None, ax=None)
if isdict(data):
g = sns.barplot(x='values', y='names', data=dataframe, label='data', color='b')
plt.legend(ncol=1, loc='lower right', frameon=True, fontsize=5)
else:
num_sets = len(data)
for set_index in range(num_sets):
if set_index == 0:
sns.set_color_codes('pastel')
else:
sns.set_color_codes('muted')
if label:
sns.barplot(x='value_%03d'%set_index, y='names', data=dataframe, label=label_list[set_index], color=color_set[set_index], alpha=alpha)
else:
sns.barplot(x='value_%03d'%set_index, y='names', data=dataframe, color=solor_set[set_index], alpha=alpha)
plt.legend(ncol=len(data), loc='lower right', frameon=True, fontsize=5)
sns.despine(left=True, bottom=True)
plt.title(title, fontsize=20)
plt.xlim([0, 50])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
num_yticks = len(data_new['names'])
adaptive_fontsize = -0.0555556 * num_yticks + 15.111
plt.yticks(fontsize=adaptive_fontsize)
return save_vis_close_helper(fig=fig, vis=vis, save_path=save_path, debug=debug, closefig=closefig)
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.argsort",
"numpy.array",
"xinshuo_miscellaneous.iscolorimage_dimension",
"xinshuo_miscellaneous.isinteger",
"numpy.arange",
"numpy.mean",
"xinshuo_miscellaneous.isnparray",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"xinshuo_miscellaneous.isgrayimage_dimension",
"numpy.asarray",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"xinshuo_miscellaneous.isdict",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.subplots",
"numpy.min",
"matplotlib.pyplot.ylim",
"xinshuo_miscellaneous.scalarlist2strlist",
"xinshuo_miscellaneous.islogical",
"xinshuo_miscellaneous.is_path_exists_or_creatable",
"xinshuo_miscellaneous.isscalar",
"xinshuo_miscellaneous.isstring",
"numpy.savetxt",
"numpy.chararray",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"xinshuo_miscellaneous.istuple",
"xinshuo_miscellaneous.is_path_exists",
"xinshuo_miscellaneous.islist",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"terminaltables.AsciiTable",
"matplotlib.pyplot.bar",
"xinshuo_miscellaneous.islistoflist"
] |
[((2895, 2922), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2905, 2922), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((3148, 3183), 'numpy.linspace', 'np.linspace', (['(0)', 'maximum_x', 'num_bins'], {}), '(0, maximum_x, num_bins)\n', (3159, 3183), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((3261, 3279), 'numpy.zeros', 'np.zeros', (['num_bins'], {}), '(num_bins)\n', (3269, 3279), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((3323, 3351), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'error_threshold'], {}), '(0, error_threshold)\n', (3331, 3351), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((3356, 3372), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(100)'], {}), '(0, 100)\n', (3364, 3372), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((3507, 3517), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3515, 3517), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((3522, 3551), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (3531, 3551), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((7604, 7629), 'terminaltables.AsciiTable', 'AsciiTable', (['metrics_table'], {}), '(metrics_table)\n', (7614, 7629), False, 'from terminaltables import AsciiTable\n'), ((11504, 11556), 'numpy.chararray', 'np.chararray', (['indices.shape'], {'itemsize': '(max_length + 1)'}), '(indices.shape, itemsize=max_length + 1)\n', (11516, 11556), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((11914, 11939), 'numpy.sum', 'np.sum', (['distances'], {'axis': '(1)'}), '(distances, axis=1)\n', (11920, 11939), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((12063, 12094), 'numpy.argsort', 'np.argsort', (['featuremap_distance'], {}), '(featuremap_distance)\n', (12073, 12094), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((14954, 14967), 'xinshuo_miscellaneous.istuple', 'istuple', (['data'], {}), '(data)\n', (14961, 14967), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((15655, 15673), 'xinshuo_miscellaneous.islistoflist', 'islistoflist', (['data'], {}), '(data)\n', (15667, 15673), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((16482, 16515), 'matplotlib.pyplot.title', 'plt.title', (['"""distribution of data"""'], {}), "('distribution of data')\n", (16491, 16515), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((16520, 16565), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('data (bin size = %f)' % bin_size)"], {}), "('data (bin size = %f)' % bin_size)\n", (16530, 16565), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((16570, 16589), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count"""'], {}), "('count')\n", (16580, 16589), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((17463, 17475), 'xinshuo_miscellaneous.isdict', 'isdict', (['data'], {}), '(data)\n', (17469, 17475), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((17761, 17804), 'xinshuo_miscellaneous.scalarlist2strlist', 'scalarlist2strlist', (['index_list'], {'debug': 'debug'}), '(index_list, debug=debug)\n', (17779, 17804), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((17822, 17842), 'numpy.array', 'np.array', (['index_list'], {}), '(index_list)\n', (17830, 17842), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((17971, 18035), 'matplotlib.pyplot.bar', 'plt.bar', (['index_list', 'frequencies', 'bin_size'], {'color': '"""r"""', 'alpha': '(0.5)'}), "(index_list, frequencies, bin_size, color='r', alpha=0.5)\n", (17978, 18035), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((18040, 18069), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (18049, 18069), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((18074, 18092), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (18084, 18092), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((18097, 18115), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (18107, 18115), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((19813, 19825), 'xinshuo_miscellaneous.isdict', 'isdict', (['data'], {}), '(data)\n', (19819, 19825), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((20639, 20666), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (20649, 20666), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((20758, 20770), 'xinshuo_miscellaneous.isdict', 'isdict', (['data'], {}), '(data)\n', (20764, 20770), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((21592, 21621), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (21601, 21621), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((21626, 21643), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 50]'], {}), '([0, 50])\n', (21634, 21643), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((21648, 21666), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (21658, 21666), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((21671, 21689), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (21681, 21689), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((21792, 21830), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'adaptive_fontsize'}), '(fontsize=adaptive_fontsize)\n', (21802, 21830), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((1563, 1593), 'xinshuo_miscellaneous.isdict', 'isdict', (['normed_mean_error_dict'], {}), '(normed_mean_error_dict)\n', (1569, 1593), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((1670, 1691), 'xinshuo_miscellaneous.islogical', 'islogical', (['normalized'], {}), '(normalized)\n', (1679, 1691), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((2122, 2137), 'xinshuo_miscellaneous.isstring', 'isstring', (['title'], {}), '(title)\n', (2130, 2137), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((3388, 3430), 'numpy.arange', 'np.arange', (['(0)', '(100 + interval_y)', 'interval_y'], {}), '(0, 100 + interval_y, interval_y)\n', (3397, 3430), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((3447, 3501), 'numpy.arange', 'np.arange', (['(0)', '(error_threshold + interval_x)', 'interval_x'], {}), '(0, error_threshold + interval_x, interval_x)\n', (3456, 3501), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((3571, 3637), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Normalized error euclidean distance (%)"""'], {'fontsize': '(16)'}), "('Normalized error euclidean distance (%)', fontsize=16)\n", (3581, 3637), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((3648, 3708), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Absolute error euclidean distance"""'], {'fontsize': '(16)'}), "('Absolute error euclidean distance', fontsize=16)\n", (3658, 3708), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((5048, 5074), 'numpy.mean', 'np.mean', (['normed_mean_error'], {}), '(normed_mean_error)\n', (5055, 5074), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((6750, 6793), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)', 'fontsize': 'legend_fontsize'}), '(loc=4, fontsize=legend_fontsize)\n', (6760, 6793), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((9375, 9398), 'xinshuo_miscellaneous.isdict', 'isdict', (['featuremap_dict'], {}), '(featuremap_dict)\n', (9381, 9398), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((12936, 12974), 'matplotlib.pyplot.subplots', 'plt.subplots', (['top_number', 'num_neighbor'], {}), '(top_number, num_neighbor)\n', (12948, 12974), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((13824, 13838), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (13833, 13838), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((15004, 15016), 'xinshuo_miscellaneous.isdict', 'isdict', (['data'], {}), '(data)\n', (15010, 15016), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((15238, 15256), 'xinshuo_miscellaneous.islistoflist', 'islistoflist', (['data'], {}), '(data)\n', (15250, 15256), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((15772, 15835), 'numpy.arange', 'np.arange', (['(min_value - bin_size)', '(max_value + bin_size)', 'bin_size'], {}), '(min_value - bin_size, max_value + bin_size, bin_size)\n', (15781, 15835), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((15866, 15920), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[min_value - bin_size, max_value + bin_size]'], {}), '([min_value - bin_size, max_value + bin_size])\n', (15874, 15920), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((16436, 16472), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {'bins': 'bins', 'alpha': '(0.5)'}), '(data, bins=bins, alpha=0.5)\n', (16444, 16472), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((17395, 17413), 'xinshuo_miscellaneous.isscalar', 'isscalar', (['bin_size'], {}), '(bin_size)\n', (17403, 17413), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((18878, 18890), 'xinshuo_miscellaneous.isdict', 'isdict', (['data'], {}), '(data)\n', (18884, 18890), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((20868, 20931), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(1)', 'loc': '"""lower right"""', 'frameon': '(True)', 'fontsize': '(5)'}), "(ncol=1, loc='lower right', frameon=True, fontsize=5)\n", (20878, 20931), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((1888, 1929), 'xinshuo_miscellaneous.is_path_exists_or_creatable', 'is_path_exists_or_creatable', (['pck_savepath'], {}), '(pck_savepath)\n', (1915, 1929), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((2005, 2048), 'xinshuo_miscellaneous.is_path_exists_or_creatable', 'is_path_exists_or_creatable', (['table_savepath'], {}), '(table_savepath)\n', (2032, 2048), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((4931, 4971), 'numpy.sum', 'np.sum', (['y_axis[:error_threshold * scale]'], {}), '(y_axis[:error_threshold * scale])\n', (4937, 4971), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((6554, 6650), 'matplotlib.pyplot.plot', 'plt.plot', (['(x_axis * 100)', '(y_axis * 100)'], {'color': 'color_tmp', 'linestyle': 'line_tmp', 'label': 'label', 'lw': '(3)'}), '(x_axis * 100, y_axis * 100, color=color_tmp, linestyle=line_tmp,\n label=label, lw=3)\n', (6562, 6650), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((6657, 6748), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', '(y_axis * 100)'], {'color': 'color_tmp', 'linestyle': 'line_tmp', 'label': 'label', 'lw': '(3)'}), '(x_axis, y_axis * 100, color=color_tmp, linestyle=line_tmp, label=\n label, lw=3)\n', (6665, 6748), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((9587, 9610), 'xinshuo_miscellaneous.isinteger', 'isinteger', (['num_neighbor'], {}), '(num_neighbor)\n', (9596, 9610), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((9756, 9798), 'xinshuo_miscellaneous.is_path_exists_or_creatable', 'is_path_exists_or_creatable', (['csv_save_path'], {}), '(csv_save_path)\n', (9783, 9798), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((12372, 12424), 'numpy.savetxt', 'np.savetxt', (['file', 'distances'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(file, distances, delimiter=',', fmt='%f')\n", (12382, 12424), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((12437, 12501), 'numpy.savetxt', 'np.savetxt', (['file', 'all_sorted_nearest_id'], {'delimiter': '""","""', 'fmt': '"""%s"""'}), "(file, all_sorted_nearest_id, delimiter=',', fmt='%s')\n", (12447, 12501), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((13805, 13815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13813, 13815), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((14828, 14841), 'xinshuo_miscellaneous.istuple', 'istuple', (['data'], {}), '(data)\n', (14835, 14841), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((14845, 14857), 'xinshuo_miscellaneous.isdict', 'isdict', (['data'], {}), '(data)\n', (14851, 14857), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((14861, 14873), 'xinshuo_miscellaneous.islist', 'islist', (['data'], {}), '(data)\n', (14867, 14873), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((14877, 14892), 'xinshuo_miscellaneous.isnparray', 'isnparray', (['data'], {}), '(data)\n', (14886, 14892), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((15056, 15071), 'xinshuo_miscellaneous.isnparray', 'isnparray', (['data'], {}), '(data)\n', (15065, 15071), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((15386, 15398), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (15392, 15398), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((15423, 15435), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (15429, 15435), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((15702, 15714), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (15708, 15714), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((15743, 15755), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (15749, 15755), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((17211, 17226), 'xinshuo_miscellaneous.isstring', 'isstring', (['title'], {}), '(title)\n', (17219, 17226), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((17231, 17247), 'xinshuo_miscellaneous.isstring', 'isstring', (['xlabel'], {}), '(xlabel)\n', (17239, 17247), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((17252, 17268), 'xinshuo_miscellaneous.isstring', 'isstring', (['ylabel'], {}), '(ylabel)\n', (17260, 17268), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((17322, 17334), 'xinshuo_miscellaneous.isdict', 'isdict', (['data'], {}), '(data)\n', (17328, 17334), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((17338, 17350), 'xinshuo_miscellaneous.islist', 'islist', (['data'], {}), '(data)\n', (17344, 17350), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((18692, 18707), 'xinshuo_miscellaneous.isstring', 'isstring', (['title'], {}), '(title)\n', (18700, 18707), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((18712, 18728), 'xinshuo_miscellaneous.isstring', 'isstring', (['xlabel'], {}), '(xlabel)\n', (18720, 18728), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((18733, 18749), 'xinshuo_miscellaneous.isstring', 'isstring', (['ylabel'], {}), '(ylabel)\n', (18741, 18749), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((18803, 18815), 'xinshuo_miscellaneous.isdict', 'isdict', (['data'], {}), '(data)\n', (18809, 18815), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((2338, 2358), 'xinshuo_miscellaneous.islist', 'islist', (['display_list'], {}), '(display_list)\n', (2344, 2358), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((4261, 4289), 'xinshuo_miscellaneous.isnparray', 'isnparray', (['normed_mean_error'], {}), '(normed_mean_error)\n', (4270, 4289), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((9453, 9478), 'xinshuo_miscellaneous.isnparray', 'isnparray', (['featuremap_tmp'], {}), '(featuremap_tmp)\n', (9462, 9478), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((9971, 9991), 'xinshuo_miscellaneous.isstring', 'isstring', (['ext_filter'], {}), '(ext_filter)\n', (9979, 9991), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((10050, 10080), 'xinshuo_miscellaneous.is_path_exists', 'is_path_exists', (['img_src_folder'], {}), '(img_src_folder)\n', (10064, 10080), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((10275, 10318), 'xinshuo_miscellaneous.is_path_exists_or_creatable', 'is_path_exists_or_creatable', (['nn_save_folder'], {}), '(nn_save_folder)\n', (10302, 10318), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((10397, 10422), 'xinshuo_miscellaneous.isstring', 'isstring', (['save_thumb_name'], {}), '(save_thumb_name)\n', (10405, 10422), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((13333, 13359), 'xinshuo_miscellaneous.isgrayimage_dimension', 'isgrayimage_dimension', (['img'], {}), '(img)\n', (13354, 13359), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((15289, 15301), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (15295, 15301), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((15334, 15346), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (15340, 15346), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((16001, 16022), 'xinshuo_miscellaneous.islist', 'islist', (['data_list_tmp'], {}), '(data_list_tmp)\n', (16007, 16022), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((5492, 5532), 'numpy.sum', 'np.sum', (['y_axis[:error_threshold * scale]'], {}), '(y_axis[:error_threshold * scale])\n', (5498, 5532), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((13457, 13484), 'xinshuo_miscellaneous.iscolorimage_dimension', 'iscolorimage_dimension', (['img'], {}), '(img)\n', (13479, 13484), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((18915, 18932), 'xinshuo_miscellaneous.isstring', 'isstring', (['key_tmp'], {}), '(key_tmp)\n', (18923, 18932), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((19016, 19035), 'xinshuo_miscellaneous.isscalar', 'isscalar', (['value_tmp'], {}), '(value_tmp)\n', (19024, 19035), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((19966, 19986), 'numpy.asarray', 'np.asarray', (['key_list'], {}), '(key_list)\n', (19976, 19986), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((20243, 20263), 'numpy.asarray', 'np.asarray', (['key_list'], {}), '(key_list)\n', (20253, 20263), True, 'import matplotlib.pyplot as plt, numpy as np\n'), ((10150, 10168), 'xinshuo_miscellaneous.isstring', 'isstring', (['path_tmp'], {}), '(path_tmp)\n', (10158, 10168), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((19543, 19560), 'xinshuo_miscellaneous.isstring', 'isstring', (['key_tmp'], {}), '(key_tmp)\n', (19551, 19560), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n'), ((19652, 19671), 'xinshuo_miscellaneous.isscalar', 'isscalar', (['value_tmp'], {}), '(value_tmp)\n', (19660, 19671), False, 'from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple\n')]
|
import napari
import time
from napari._qt.qthreading import thread_worker
import numpy as np
# create a viewer window
viewer = napari.Viewer()
# https://napari.org/guides/stable/threading.html
@thread_worker
def loop_run():
while True: # endless loop
print("Hello world", time.time())
time.sleep(0.5)
yield np.random.random((2, 2))
def update_layer(image):
"""
Updates the image in the layer 'result'
or adds this layer.
"""
try:
viewer.layers['result'].data = image
except KeyError:
viewer.add_image(image, name='result')
# Start the loop
worker = loop_run()
worker.yielded.connect(update_layer)
worker.start()
# Start napari
napari.run()
|
[
"napari.Viewer",
"numpy.random.random",
"time.sleep",
"napari.run",
"time.time"
] |
[((128, 143), 'napari.Viewer', 'napari.Viewer', ([], {}), '()\n', (141, 143), False, 'import napari\n'), ((701, 713), 'napari.run', 'napari.run', ([], {}), '()\n', (711, 713), False, 'import napari\n'), ((307, 322), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (317, 322), False, 'import time\n'), ((286, 297), 'time.time', 'time.time', ([], {}), '()\n', (295, 297), False, 'import time\n'), ((337, 361), 'numpy.random.random', 'np.random.random', (['(2, 2)'], {}), '((2, 2))\n', (353, 361), True, 'import numpy as np\n')]
|
from arguments import get_args
import numpy as np
from network.models import MLP_Net
from utils.utils import get_env_params
import torch
import os, gym
"""
script to watch the demo of the ESIL
"""
# process the inputs
def process_inputs(o, g, o_mean, o_std, g_mean, g_std, args):
o_clip = np.clip(o, -args.clip_obs, args.clip_obs)
g_clip = np.clip(g, -args.clip_obs, args.clip_obs)
o_norm = np.clip((o_clip - o_mean) / (o_std), -args.clip_range, args.clip_range)
g_norm = np.clip((g_clip - g_mean) / (g_std), -args.clip_range, args.clip_range)
inputs = np.concatenate([o_norm, g_norm])
inputs = torch.tensor(inputs, dtype=torch.float32).unsqueeze(0)
return inputs
if __name__ == '__main__':
args = get_args()
# create environment
env = gym.make(args.env_name)
# get the environment parameters
env_params = get_env_params(env)
# start to create model
model_path = '{}/{}/model.pt'.format(args.save_dir, args.env_name)
network = MLP_Net(env_params['obs'] + env_params['goal'], env_params['action'], args.dist)
network_model, obs_mean, obs_std, g_mean, g_std = torch.load(model_path, map_location='cpu')
network.load_state_dict(network_model)
network.eval()
# start to do the testing
for i in range(args.demo_length):
observation = env.reset()
# start to do the demo
obs, g = observation['observation'], observation['desired_goal']
for t in range(env._max_episode_steps):
if args.render:
env.render()
inputs = process_inputs(obs, g, obs_mean, obs_std, g_mean, g_std, args)
with torch.no_grad():
_, pi = network(inputs)
if args.dist == 'gauss':
mean, std = pi
input_actions = mean.detach().cpu().numpy().squeeze()
else:
raise NotImplementedError
# put actions into the environment
observation_new, reward, _, info = env.step(input_actions)
obs = observation_new['observation']
print('the episode is: {}, is success: {}'.format(i, info['is_success']))
|
[
"numpy.clip",
"utils.utils.get_env_params",
"torch.load",
"torch.tensor",
"torch.no_grad",
"network.models.MLP_Net",
"numpy.concatenate",
"arguments.get_args",
"gym.make"
] |
[((295, 336), 'numpy.clip', 'np.clip', (['o', '(-args.clip_obs)', 'args.clip_obs'], {}), '(o, -args.clip_obs, args.clip_obs)\n', (302, 336), True, 'import numpy as np\n'), ((350, 391), 'numpy.clip', 'np.clip', (['g', '(-args.clip_obs)', 'args.clip_obs'], {}), '(g, -args.clip_obs, args.clip_obs)\n', (357, 391), True, 'import numpy as np\n'), ((405, 474), 'numpy.clip', 'np.clip', (['((o_clip - o_mean) / o_std)', '(-args.clip_range)', 'args.clip_range'], {}), '((o_clip - o_mean) / o_std, -args.clip_range, args.clip_range)\n', (412, 474), True, 'import numpy as np\n'), ((490, 559), 'numpy.clip', 'np.clip', (['((g_clip - g_mean) / g_std)', '(-args.clip_range)', 'args.clip_range'], {}), '((g_clip - g_mean) / g_std, -args.clip_range, args.clip_range)\n', (497, 559), True, 'import numpy as np\n'), ((575, 607), 'numpy.concatenate', 'np.concatenate', (['[o_norm, g_norm]'], {}), '([o_norm, g_norm])\n', (589, 607), True, 'import numpy as np\n'), ((733, 743), 'arguments.get_args', 'get_args', ([], {}), '()\n', (741, 743), False, 'from arguments import get_args\n'), ((779, 802), 'gym.make', 'gym.make', (['args.env_name'], {}), '(args.env_name)\n', (787, 802), False, 'import os, gym\n'), ((857, 876), 'utils.utils.get_env_params', 'get_env_params', (['env'], {}), '(env)\n', (871, 876), False, 'from utils.utils import get_env_params\n'), ((990, 1075), 'network.models.MLP_Net', 'MLP_Net', (["(env_params['obs'] + env_params['goal'])", "env_params['action']", 'args.dist'], {}), "(env_params['obs'] + env_params['goal'], env_params['action'], args.dist\n )\n", (997, 1075), False, 'from network.models import MLP_Net\n'), ((1125, 1167), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cpu"""'}), "(model_path, map_location='cpu')\n", (1135, 1167), False, 'import torch\n'), ((621, 662), 'torch.tensor', 'torch.tensor', (['inputs'], {'dtype': 'torch.float32'}), '(inputs, dtype=torch.float32)\n', (633, 662), False, 'import torch\n'), ((1642, 1657), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1655, 1657), False, 'import torch\n')]
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from LLC_Membranes.timeseries.forecast_ctrw import System
from LLC_Membranes.llclib import file_rw
import names
residues = ["GCL", "SOH"]
wt = 10
path = "/home/bcoscia/Documents/Gromacs/Transport/NaGA3C11"
colors = ['blue', 'red']
opacity = 1
nbins = 25
lw = 2
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
for j, r in enumerate(residues):
obj = file_rw.load_object('%s/%s/%swt/forecast_%s.pl' % (path, r, wt, r))
hops = []
for i in obj.hop_lengths:
hops += i
print(max(hops))
if j == 0:
hop_hist, edges = np.histogram(hops, density=True, bins=nbins)
bounds = [edges[0], edges[-1]]
else:
hop_hist, edges = np.histogram(hops, density=True, bins=np.linspace(bounds[0], bounds[1], nbins + 1))
hop_outline = np.zeros([len(hop_hist)*2 + 2, 2])
hop_outline[::2, 0] = edges
hop_outline[1::2, 0] = edges
hop_outline[1:-1:2, 1] = hop_hist
hop_outline[2:-1:2, 1] = hop_hist
if j == 0:
dwell_hist, edges = np.histogram(obj.dwell_times, density=True, bins=nbins)
bounds_power = [edges[0], edges[-1]]
else:
dwell_hist, edges = np.histogram(obj.dwell_times, density=True, bins=np.linspace(bounds_power[0], bounds_power[1], nbins + 1))
dwell_outline = np.zeros([len(dwell_hist)*2 + 2, 2])
dwell_outline[::2, 0] = edges
dwell_outline[1::2, 0] = edges
dwell_outline[1:-1:2, 1] = dwell_hist
dwell_outline[2:-1:2, 1] = dwell_hist
ax[0].plot(hop_outline[:, 0], hop_outline[:, 1], color=colors[j], alpha=opacity, linewidth=lw)
ax[1].plot(dwell_outline[:, 0], dwell_outline[:, 1], color=colors[j], alpha=opacity, label=names.res_to_name[r], linewidth=lw)
ax[0].tick_params(labelsize=14)
ax[1].tick_params(labelsize=14)
ax[1].legend(fontsize=14)
ax[0].set_ylabel('Frequency', fontsize=14)
ax[0].set_xlabel('Hop Length (nm)', fontsize=14)
ax[1].set_xlabel('Dwell Time (ns)', fontsize=14)
plt.tight_layout()
plt.savefig('dwell_hop_%s.pdf' % '_'.join(residues))
plt.show()
|
[
"numpy.histogram",
"LLC_Membranes.llclib.file_rw.load_object",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((348, 383), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 5)'}), '(1, 2, figsize=(10, 5))\n', (360, 383), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1900), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1898, 1900), True, 'import matplotlib.pyplot as plt\n'), ((1954, 1964), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1962, 1964), True, 'import matplotlib.pyplot as plt\n'), ((425, 492), 'LLC_Membranes.llclib.file_rw.load_object', 'file_rw.load_object', (["('%s/%s/%swt/forecast_%s.pl' % (path, r, wt, r))"], {}), "('%s/%s/%swt/forecast_%s.pl' % (path, r, wt, r))\n", (444, 492), False, 'from LLC_Membranes.llclib import file_rw\n'), ((593, 637), 'numpy.histogram', 'np.histogram', (['hops'], {'density': '(True)', 'bins': 'nbins'}), '(hops, density=True, bins=nbins)\n', (605, 637), True, 'import numpy as np\n'), ((998, 1053), 'numpy.histogram', 'np.histogram', (['obj.dwell_times'], {'density': '(True)', 'bins': 'nbins'}), '(obj.dwell_times, density=True, bins=nbins)\n', (1010, 1053), True, 'import numpy as np\n'), ((736, 780), 'numpy.linspace', 'np.linspace', (['bounds[0]', 'bounds[1]', '(nbins + 1)'], {}), '(bounds[0], bounds[1], nbins + 1)\n', (747, 780), True, 'import numpy as np\n'), ((1171, 1227), 'numpy.linspace', 'np.linspace', (['bounds_power[0]', 'bounds_power[1]', '(nbins + 1)'], {}), '(bounds_power[0], bounds_power[1], nbins + 1)\n', (1182, 1227), True, 'import numpy as np\n')]
|
from Tkinter import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
master = Tk()
goal = 0
var_goal = StringVar()
GAMMA = 0.9
last_state = Variable(torch.Tensor([0,0,0,0,0,0])).unsqueeze(0)
last_action = 0
last_reward = 0
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.lstm = nn.LSTMCell(6, 6)
self.fc = nn.Linear(6, 2)
#self.softmax = nn.LogSoftmax()
self.states = []
self.next_states = []
self.actions = []
self.rewards = []
self.hiddens = []
self.cells = []
def forward(self, input, hidden):
hx,cx = self.lstm(input,hidden)
output = self.fc(hx)
#output = self.softmax(output)
return output, hx, cx
def initHidden(self):
self.cell_state = Variable(torch.zeros(1,6))
self.hidden_state = Variable(torch.zeros(1,6))
model = Policy()
model.initHidden()
last_hidden = model.hidden_state
last_cell = model.cell_state
optimizer = optim.Adam(model.parameters(), lr=0.01)
def select_action(state):
output, model.hidden_state, model.cell_state = model(state, [model.hidden_state, model.cell_state])
print('val '+str(output.data))
probs = F.softmax(output)
print('probs '+str(probs.data))
action = probs.multinomial()
return action.data[0,0]
def learn(indice):
state = model.states[indice]
next_state = model.next_states[indice].detach()
action = model.actions[indice]
reward = model.rewards[indice]
hidden = model.hiddens[indice]
cell = model.cells[indice]
output, next_hidden, next_cell = model(state, [hidden, cell])
value = output[0,action]
output,_,_ = model(next_state, [next_hidden.detach(), next_hidden.detach()])
#'''
next_action_probs = F.softmax(output)
next_action = next_action_probs.multinomial().data[0,0]
next_value = output[0,next_action]
'''
next_value = output.max(1)[0]
#'''
expected = GAMMA*next_value + reward
td_loss = F.smooth_l1_loss(value, expected)
optimizer.zero_grad()
td_loss.backward(retain_variables=True)
optimizer.step()
def update(signal):
global last_action
global last_state
global last_reward
global last_hidden
global last_cell
state = Variable(torch.Tensor([signal,signal,signal,signal,signal,signal]).float()).unsqueeze(0)
if np.abs(last_reward)>0 or np.random.rand()>0.9 or len(model.states)<10:
model.states.append(last_state)
model.next_states.append(state)
model.rewards.append(last_reward)
model.actions.append(last_action)
model.hiddens.append(last_hidden)
model.cells.append(last_cell)
last_hidden = model.hidden_state
last_cell = model.cell_state
action = select_action(state)
print(action)
reward = 0
if action==1 and goal==1:
reward = 1
if action==1 and goal==0:
reward = -1
if action==0:
learn(np.random.choice(len(model.states)))
else:
learn(-1)
last_action = action
last_state = state
last_reward = reward
def set_goal(new_goal):
global goal
goal = new_goal
print("goal = "+str(goal))
var_goal.set('goal = '+str(goal))
Button(master, text='S1', height = 10, width = 30, command=lambda:update(0)).grid(row=0, column=0, sticky=W, pady=4)
Button(master, text='S2', height = 10, width = 30, command=lambda:update(1)).grid(row=0, column=1, sticky=W, pady=4)
Button(master, text='goal 0', height = 10, width = 30, command=lambda:set_goal(0)).grid(row=1, column=0, sticky=W, pady=4)
Button(master, text='goal 1', height = 10, width = 30, command=lambda:set_goal(1)).grid(row=1, column=1, sticky=W, pady=4)
Label(master, height = 10, textvariable = var_goal).grid(row=2, sticky=EW, pady=4)
mainloop( )
|
[
"numpy.abs",
"numpy.random.rand",
"torch.nn.LSTMCell",
"torch.Tensor",
"torch.nn.functional.smooth_l1_loss",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.functional.softmax"
] |
[((1365, 1382), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {}), '(output)\n', (1374, 1382), True, 'import torch.nn.functional as F\n'), ((1931, 1948), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {}), '(output)\n', (1940, 1948), True, 'import torch.nn.functional as F\n'), ((2154, 2187), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['value', 'expected'], {}), '(value, expected)\n', (2170, 2187), True, 'import torch.nn.functional as F\n'), ((474, 491), 'torch.nn.LSTMCell', 'nn.LSTMCell', (['(6)', '(6)'], {}), '(6, 6)\n', (485, 491), True, 'import torch.nn as nn\n'), ((510, 525), 'torch.nn.Linear', 'nn.Linear', (['(6)', '(2)'], {}), '(6, 2)\n', (519, 525), True, 'import torch.nn as nn\n'), ((290, 322), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (302, 322), False, 'import torch\n'), ((963, 980), 'torch.zeros', 'torch.zeros', (['(1)', '(6)'], {}), '(1, 6)\n', (974, 980), False, 'import torch\n'), ((1018, 1035), 'torch.zeros', 'torch.zeros', (['(1)', '(6)'], {}), '(1, 6)\n', (1029, 1035), False, 'import torch\n'), ((2523, 2542), 'numpy.abs', 'np.abs', (['last_reward'], {}), '(last_reward)\n', (2529, 2542), True, 'import numpy as np\n'), ((2548, 2564), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2562, 2564), True, 'import numpy as np\n'), ((2435, 2497), 'torch.Tensor', 'torch.Tensor', (['[signal, signal, signal, signal, signal, signal]'], {}), '([signal, signal, signal, signal, signal, signal])\n', (2447, 2497), False, 'import torch\n')]
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
import utils
import glob, os
import pca.dataanalyzer as da, pca.pca as pca
from sklearn.metrics import accuracy_score
# visulaize the important characteristics of the dataset
import matplotlib.pyplot as plt
seed = 0
num_headers = 16
data_len = 54*num_headers #1460
dirs = ["C:/Users/salik/Documents/Data/LinuxChrome/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsFirefox/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsChrome/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsSalik/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsAndreas/{}/".format(num_headers)]
# dirs = ["E:/Data/h5/https/", "E:/Data/h5/netflix/"]
# step 1: get the data
dataframes = []
num_examples = 0
for dir in dirs:
for fullname in glob.iglob(dir + '*.h5'):
filename = os.path.basename(fullname)
df = utils.load_h5(dir, filename)
dataframes.append(df)
num_examples = len(df.values)
# create one large dataframe
data = pd.concat(dataframes)
data.sample(frac=1, random_state=seed).reset_index(drop=True)
num_rows = data.shape[0]
columns = data.columns
print(columns)
# step 2: get features (x) and convert it to numpy array
x = da.getbytes(data, data_len)
# step 3: get class labels y and then encode it into number
# get class label data
y = data['label'].values
# encode the class label
class_labels = np.unique(y)
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
# step 4: split the data into training set and test set
test_percentage = 0.5
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_percentage, random_state=seed)
plot_savename = "histogram_payload"
from matplotlib import rcParams
# Make room for xlabel which is otherwise cut off
rcParams.update({'figure.autolayout': True})
# scatter plot the sample points among 5 classes
# markers = ('s', 'd', 'o', '^', 'v', ".", ",", "<", ">", "8", "p", "P", "*", "h", "H", "+", "x", "X", "D", "|", "_")
color_map = {0: '#487fff', 1: '#d342ff', 2: '#4eff4e', 3: '#2ee3ff', 4: '#ffca43', 5:'#ff365e', 6:'#626663'}
plt.figure()
for idx, cl in enumerate(np.unique(y_test)):
# Get count of unique values
values, counts = np.unique(x_test[y_test == cl], return_counts=True)
# Maybe remove zero as there is a lot of zeros in the header
# values = values[1:]
# counts = counts[1:]
n, bins, patches = plt.hist(values, weights=counts, bins=256, facecolor=color_map[idx], label=class_labels[cl], alpha=0.8)
plt.legend(loc='upper right')
plt.title('Histogram of : {}'.format(class_labels))
plt.tight_layout()
# plt.savefig('{0}{1}.png'.format(plot_savename, int(perplexity)), dpi=300)
plt.show()
|
[
"sklearn.preprocessing.LabelEncoder",
"utils.load_h5",
"matplotlib.pyplot.hist",
"numpy.unique",
"matplotlib.rcParams.update",
"glob.iglob",
"matplotlib.pyplot.figure",
"pca.dataanalyzer.getbytes",
"sklearn.cross_validation.train_test_split",
"matplotlib.pyplot.tight_layout",
"os.path.basename",
"pandas.concat",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((1219, 1240), 'pandas.concat', 'pd.concat', (['dataframes'], {}), '(dataframes)\n', (1228, 1240), True, 'import pandas as pd\n'), ((1428, 1455), 'pca.dataanalyzer.getbytes', 'da.getbytes', (['data', 'data_len'], {}), '(data, data_len)\n', (1439, 1455), True, 'import pca.dataanalyzer as da, pca.pca as pca\n'), ((1606, 1618), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1615, 1618), True, 'import numpy as np\n'), ((1635, 1649), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1647, 1649), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1799, 1867), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': 'test_percentage', 'random_state': 'seed'}), '(x, y, test_size=test_percentage, random_state=seed)\n', (1815, 1867), False, 'from sklearn.cross_validation import train_test_split\n'), ((1988, 2032), 'matplotlib.rcParams.update', 'rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (2003, 2032), False, 'from matplotlib import rcParams\n'), ((2312, 2324), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2322, 2324), True, 'import matplotlib.pyplot as plt\n'), ((2722, 2751), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2732, 2751), True, 'import matplotlib.pyplot as plt\n'), ((2804, 2822), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2820, 2822), True, 'import matplotlib.pyplot as plt\n'), ((2899, 2909), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2907, 2909), True, 'import matplotlib.pyplot as plt\n'), ((997, 1021), 'glob.iglob', 'glob.iglob', (["(dir + '*.h5')"], {}), "(dir + '*.h5')\n", (1007, 1021), False, 'import glob, os\n'), ((2350, 2367), 'numpy.unique', 'np.unique', (['y_test'], {}), '(y_test)\n', (2359, 2367), True, 'import numpy as np\n'), ((2424, 2475), 'numpy.unique', 'np.unique', (['x_test[y_test == cl]'], {'return_counts': '(True)'}), '(x_test[y_test == cl], return_counts=True)\n', (2433, 2475), True, 'import numpy as np\n'), ((2616, 2724), 'matplotlib.pyplot.hist', 'plt.hist', (['values'], {'weights': 'counts', 'bins': '(256)', 'facecolor': 'color_map[idx]', 'label': 'class_labels[cl]', 'alpha': '(0.8)'}), '(values, weights=counts, bins=256, facecolor=color_map[idx], label=\n class_labels[cl], alpha=0.8)\n', (2624, 2724), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1068), 'os.path.basename', 'os.path.basename', (['fullname'], {}), '(fullname)\n', (1058, 1068), False, 'import glob, os\n'), ((1082, 1110), 'utils.load_h5', 'utils.load_h5', (['dir', 'filename'], {}), '(dir, filename)\n', (1095, 1110), False, 'import utils\n')]
|
# -*- coding: utf-8 -*-
import time
import numpy as np
from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector
class Driver(BaseDriver):
error_command = '*ESR?'
support_models = ['AFG3102']
quants = [
QOption('Output',ch=1,
set_cmd='OUTP%(ch)d %(option)s', get_cmd='OUTP%(ch)d?',
options=[('OFF', 'OFF'), ('ON', 'ON')]), # must set chanel
QOption('Function',ch=1,set_cmd='SOUR%(ch)d:FUNC %(option)s',get_cmd='SOUR%(ch)d:FUNC?',
options=[('Sin','SIN'),('Square','SQU'),('Pulse','PULS'),('Ramp','RAMP'),
('PRNoise','PRN'),('DC','DC'),('SINC','SINC'),('Gaussian','GAUS'),
('Lorentz','LOR'),('Erise','ERIS'),('Edecay','EDEC'),('Haversine','HAV'),
('User','USER'),('User2','USER2')]),
QReal('Frequency',unit='Hz',ch=1,set_cmd='SOUR%(ch)d:FREQ %(value)e%(unit)s',get_cmd='SOUR%(ch)d:FREQ?'),
QReal('Phase',unit='rad',ch=1,set_cmd='SOUR%(ch)d:PHAS %(value)f%(unit)s',get_cmd='SOUR%(ch)d:PHAS?'),
QReal('Pulse Delay',unit='s',ch=1,set_cmd='SOUR%(ch)d:PULS:DEL %(value).9e%(unit)s',get_cmd='SOUR%(ch)d:PULS:DEL?'),
QReal('Pulse Period',unit='s',ch=1,set_cmd='SOUR%(ch)d:PULS:PER %(value).9e%(unit)s',get_cmd='SOUR%(ch)d:PULS:PER?'),
QReal('Pulse Width',unit='s',ch=1,set_cmd='SOUR%(ch)d:PULS:WIDT %(value).9e%(unit)s',get_cmd='SOUR%(ch)d:PULS:WIDT?'),
#Burst Mode
QReal('Burst Tdelay',unit='s',ch=1,set_cmd='SOUR%(ch)d:BURS:TDEL %(value).9e%(unit)s',get_cmd='SOUR%(ch)d:BURS:TDEL?'),
QReal('Burst Ncycles',ch=1,set_cmd='SOUR%(ch)d:BURS:NCYC %(value)d',get_cmd='SOUR%(ch)d:BURS:NCYC?'),
##
QReal('Frequency',unit='Hz',ch=1,set_cmd='SOUR%(ch)d:FREQ %(value)e%(unit)s',get_cmd='SOUR%(ch)d:FREQ?'),
QReal('Phase',unit='DEG',ch=1,set_cmd='SOUR%(ch)d:PHAS %(value)f%(unit)s',get_cmd='SOUR%(ch)d:PHAS?'),
QReal('High Level',unit='V',ch=1,set_cmd='SOUR%(ch)d:VOLT:HIGH %(value)f%(unit)s',get_cmd='SOUR%(ch)d:VOLT:HIGH?'),
QReal('Low Level',unit='V',ch=1,set_cmd='SOUR%(ch)d:VOLT:LOW %(value)f%(unit)s',get_cmd='SOUR%(ch)d:VOLT:LOW?'),
QReal('Offset',unit='V',ch=1,set_cmd='SOUR%(ch)d:VOLT:OFFS %(value)f%(unit)s',get_cmd='SOUR%(ch)d:VOLT:OFFS?'),
QReal('Amplitude',unit='VPP',ch=1,set_cmd='SOUR%(ch)d:VOLT:AMPL %(value)f%(unit)s',get_cmd='SOUR%(ch)d:VOLT:AMPL?'),
]
def reset(self,delay1=0,delay2=0):
#init
self.write('*CLS')
self.write('*RST')
#set external clock;external source;burst mode&cycle=1&trigdelay=0
self.write('SOURce:ROSCillator:SOURce EXT')
self.write('TRIGger:SEQuence:SOURce EXTernal')
self.write('SOURce1:BURSt:STATe ON')
self.write('SOURce1:BURSt:NCYCles 1')
self.write('SOURce1:BURSt:MODE TRIGgered')
self.write('SOURce1:BURSt:DELay %fus' %delay1)
self.write('SOURce2:BURSt:STATe ON')
self.write('SOURce2:BURSt:NCYCles 1')
self.write('SOURce2:BURSt:MODE TRIGgered')
self.write('SOURce2:BURSt:TDELay %fns' %delay2)
#在创建好的波形文件中,写入或者更新具体波形
def upwave(self,points,ch=1,T0=100):
pointslen=len(points)
pointslen2=2*pointslen
#写入波形数据
self.write('DATA:DEFine EMEMory,%d' %pointslen)
self.write('DATA:POINts EMEMory, %d' %pointslen)
message=':DATA:DATA EMEMory,'# % (len(str(pointslen2)),pointslen2)
points = points.clip(-1,1)
values=np.zeros(pointslen).astype(np.uint16)
#乘积选用8191是为了防止最终值大于16383
values = (points * 8191).astype(np.uint16)+8192 #.astype(np.uint16)
byte=np.zeros(pointslen2).astype(np.uint8)
#将原先的两比特数据点,分割为高低两个比特
byte[1:pointslen2:2]=(values & 0b11111111).astype(np.uint8)
byte[0:pointslen2:2]=((values & 0b11111100000000) >> 8).astype(np.uint8)
#write_binary_value中的message参数不要包括#42048的信息,因为pyvisa可以自动算出结果。详见pyvisa中util.py内的to_binary_block
#AFG3102选用big_endian。这表示程序按照我给的顺序将二进制包写进去
self.write_binary_values(message, byte, datatype='B',is_big_endian=False,termination=None, encoding=None)
# self.write('enable' )
self.write('TRAC:COPY USER%d,EMEM' %ch)
self.write('SOURce%d:FUNCTION USER%d' %(ch,ch))
#set frequency:because the wave total length is set by this parameter,typical for 1Mhz means the wave length is set to 1us!!
self.write('SOURce%d:FREQuency:FIXed %fkHz' %(ch,1e3/T0))
self.write('OUTPut%d:STATe ON' %ch)
|
[
"qulab.device.QReal",
"qulab.device.QOption",
"numpy.zeros"
] |
[((252, 376), 'qulab.device.QOption', 'QOption', (['"""Output"""'], {'ch': '(1)', 'set_cmd': '"""OUTP%(ch)d %(option)s"""', 'get_cmd': '"""OUTP%(ch)d?"""', 'options': "[('OFF', 'OFF'), ('ON', 'ON')]"}), "('Output', ch=1, set_cmd='OUTP%(ch)d %(option)s', get_cmd=\n 'OUTP%(ch)d?', options=[('OFF', 'OFF'), ('ON', 'ON')])\n", (259, 376), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((416, 806), 'qulab.device.QOption', 'QOption', (['"""Function"""'], {'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:FUNC %(option)s"""', 'get_cmd': '"""SOUR%(ch)d:FUNC?"""', 'options': "[('Sin', 'SIN'), ('Square', 'SQU'), ('Pulse', 'PULS'), ('Ramp', 'RAMP'), (\n 'PRNoise', 'PRN'), ('DC', 'DC'), ('SINC', 'SINC'), ('Gaussian', 'GAUS'),\n ('Lorentz', 'LOR'), ('Erise', 'ERIS'), ('Edecay', 'EDEC'), ('Haversine',\n 'HAV'), ('User', 'USER'), ('User2', 'USER2')]"}), "('Function', ch=1, set_cmd='SOUR%(ch)d:FUNC %(option)s', get_cmd=\n 'SOUR%(ch)d:FUNC?', options=[('Sin', 'SIN'), ('Square', 'SQU'), (\n 'Pulse', 'PULS'), ('Ramp', 'RAMP'), ('PRNoise', 'PRN'), ('DC', 'DC'), (\n 'SINC', 'SINC'), ('Gaussian', 'GAUS'), ('Lorentz', 'LOR'), ('Erise',\n 'ERIS'), ('Edecay', 'EDEC'), ('Haversine', 'HAV'), ('User', 'USER'), (\n 'User2', 'USER2')])\n", (423, 806), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((826, 939), 'qulab.device.QReal', 'QReal', (['"""Frequency"""'], {'unit': '"""Hz"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:FREQ %(value)e%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:FREQ?"""'}), "('Frequency', unit='Hz', ch=1, set_cmd=\n 'SOUR%(ch)d:FREQ %(value)e%(unit)s', get_cmd='SOUR%(ch)d:FREQ?')\n", (831, 939), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((940, 1050), 'qulab.device.QReal', 'QReal', (['"""Phase"""'], {'unit': '"""rad"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:PHAS %(value)f%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:PHAS?"""'}), "('Phase', unit='rad', ch=1, set_cmd=\n 'SOUR%(ch)d:PHAS %(value)f%(unit)s', get_cmd='SOUR%(ch)d:PHAS?')\n", (945, 1050), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((1051, 1175), 'qulab.device.QReal', 'QReal', (['"""Pulse Delay"""'], {'unit': '"""s"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:PULS:DEL %(value).9e%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:PULS:DEL?"""'}), "('Pulse Delay', unit='s', ch=1, set_cmd=\n 'SOUR%(ch)d:PULS:DEL %(value).9e%(unit)s', get_cmd='SOUR%(ch)d:PULS:DEL?')\n", (1056, 1175), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((1176, 1301), 'qulab.device.QReal', 'QReal', (['"""Pulse Period"""'], {'unit': '"""s"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:PULS:PER %(value).9e%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:PULS:PER?"""'}), "('Pulse Period', unit='s', ch=1, set_cmd=\n 'SOUR%(ch)d:PULS:PER %(value).9e%(unit)s', get_cmd='SOUR%(ch)d:PULS:PER?')\n", (1181, 1301), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((1302, 1433), 'qulab.device.QReal', 'QReal', (['"""Pulse Width"""'], {'unit': '"""s"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:PULS:WIDT %(value).9e%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:PULS:WIDT?"""'}), "('Pulse Width', unit='s', ch=1, set_cmd=\n 'SOUR%(ch)d:PULS:WIDT %(value).9e%(unit)s', get_cmd='SOUR%(ch)d:PULS:WIDT?'\n )\n", (1307, 1433), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((1449, 1581), 'qulab.device.QReal', 'QReal', (['"""Burst Tdelay"""'], {'unit': '"""s"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:BURS:TDEL %(value).9e%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:BURS:TDEL?"""'}), "('Burst Tdelay', unit='s', ch=1, set_cmd=\n 'SOUR%(ch)d:BURS:TDEL %(value).9e%(unit)s', get_cmd='SOUR%(ch)d:BURS:TDEL?'\n )\n", (1454, 1581), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((1577, 1684), 'qulab.device.QReal', 'QReal', (['"""Burst Ncycles"""'], {'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:BURS:NCYC %(value)d"""', 'get_cmd': '"""SOUR%(ch)d:BURS:NCYC?"""'}), "('Burst Ncycles', ch=1, set_cmd='SOUR%(ch)d:BURS:NCYC %(value)d',\n get_cmd='SOUR%(ch)d:BURS:NCYC?')\n", (1582, 1684), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((1698, 1811), 'qulab.device.QReal', 'QReal', (['"""Frequency"""'], {'unit': '"""Hz"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:FREQ %(value)e%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:FREQ?"""'}), "('Frequency', unit='Hz', ch=1, set_cmd=\n 'SOUR%(ch)d:FREQ %(value)e%(unit)s', get_cmd='SOUR%(ch)d:FREQ?')\n", (1703, 1811), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((1812, 1922), 'qulab.device.QReal', 'QReal', (['"""Phase"""'], {'unit': '"""DEG"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:PHAS %(value)f%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:PHAS?"""'}), "('Phase', unit='DEG', ch=1, set_cmd=\n 'SOUR%(ch)d:PHAS %(value)f%(unit)s', get_cmd='SOUR%(ch)d:PHAS?')\n", (1817, 1922), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((1923, 2046), 'qulab.device.QReal', 'QReal', (['"""High Level"""'], {'unit': '"""V"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:VOLT:HIGH %(value)f%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:VOLT:HIGH?"""'}), "('High Level', unit='V', ch=1, set_cmd=\n 'SOUR%(ch)d:VOLT:HIGH %(value)f%(unit)s', get_cmd='SOUR%(ch)d:VOLT:HIGH?')\n", (1928, 2046), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((2047, 2167), 'qulab.device.QReal', 'QReal', (['"""Low Level"""'], {'unit': '"""V"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:VOLT:LOW %(value)f%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:VOLT:LOW?"""'}), "('Low Level', unit='V', ch=1, set_cmd=\n 'SOUR%(ch)d:VOLT:LOW %(value)f%(unit)s', get_cmd='SOUR%(ch)d:VOLT:LOW?')\n", (2052, 2167), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((2168, 2287), 'qulab.device.QReal', 'QReal', (['"""Offset"""'], {'unit': '"""V"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:VOLT:OFFS %(value)f%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:VOLT:OFFS?"""'}), "('Offset', unit='V', ch=1, set_cmd=\n 'SOUR%(ch)d:VOLT:OFFS %(value)f%(unit)s', get_cmd='SOUR%(ch)d:VOLT:OFFS?')\n", (2173, 2287), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((2288, 2412), 'qulab.device.QReal', 'QReal', (['"""Amplitude"""'], {'unit': '"""VPP"""', 'ch': '(1)', 'set_cmd': '"""SOUR%(ch)d:VOLT:AMPL %(value)f%(unit)s"""', 'get_cmd': '"""SOUR%(ch)d:VOLT:AMPL?"""'}), "('Amplitude', unit='VPP', ch=1, set_cmd=\n 'SOUR%(ch)d:VOLT:AMPL %(value)f%(unit)s', get_cmd='SOUR%(ch)d:VOLT:AMPL?')\n", (2293, 2412), False, 'from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector\n'), ((3485, 3504), 'numpy.zeros', 'np.zeros', (['pointslen'], {}), '(pointslen)\n', (3493, 3504), True, 'import numpy as np\n'), ((3645, 3665), 'numpy.zeros', 'np.zeros', (['pointslen2'], {}), '(pointslen2)\n', (3653, 3665), True, 'import numpy as np\n')]
|
import pickle
import pytest
import numpy as np
from astropy.coordinates import Longitude
from astropy import coordinates as coord
from astropy.tests.helper import pickle_protocol, check_pickling_recovery # noqa
# Can't test distances without scipy due to cosmology deps
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
def test_basic():
lon1 = Longitude(1.23, "radian", wrap_angle='180d')
s = pickle.dumps(lon1)
lon2 = pickle.loads(s)
def test_pickle_longitude_wrap_angle():
a = Longitude(1.23, "radian", wrap_angle='180d')
s = pickle.dumps(a)
b = pickle.loads(s)
assert a.rad == b.rad
assert a.wrap_angle == b.wrap_angle
_names = [coord.Angle,
coord.Distance,
coord.DynamicMatrixTransform,
coord.ICRS,
coord.Latitude,
coord.Longitude,
coord.StaticMatrixTransform,
]
_xfail = [False,
not HAS_SCIPY,
True,
True,
False,
True,
False]
_args = [[0.0],
[],
[lambda *args: np.identity(3), coord.ICRS, coord.ICRS],
[0, 0],
[0],
[0],
[np.identity(3), coord.ICRS, coord.ICRS],
]
_kwargs = [{'unit': 'radian'},
{'z': 0.23},
{},
{'unit': ['radian', 'radian']},
{'unit': 'radian'},
{'unit': 'radian'},
{},
]
@pytest.mark.parametrize(("name", "args", "kwargs", "xfail"),
zip(_names, _args, _kwargs, _xfail))
def test_simple_object(pickle_protocol, name, args, kwargs, xfail):
# Tests easily instantiated objects
if xfail:
pytest.xfail()
original = name(*args, **kwargs)
check_pickling_recovery(original, pickle_protocol)
|
[
"numpy.identity",
"pickle.dumps",
"astropy.coordinates.Longitude",
"pickle.loads",
"astropy.tests.helper.check_pickling_recovery",
"pytest.xfail"
] |
[((369, 413), 'astropy.coordinates.Longitude', 'Longitude', (['(1.23)', '"""radian"""'], {'wrap_angle': '"""180d"""'}), "(1.23, 'radian', wrap_angle='180d')\n", (378, 413), False, 'from astropy.coordinates import Longitude\n'), ((422, 440), 'pickle.dumps', 'pickle.dumps', (['lon1'], {}), '(lon1)\n', (434, 440), False, 'import pickle\n'), ((452, 467), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (464, 467), False, 'import pickle\n'), ((518, 562), 'astropy.coordinates.Longitude', 'Longitude', (['(1.23)', '"""radian"""'], {'wrap_angle': '"""180d"""'}), "(1.23, 'radian', wrap_angle='180d')\n", (527, 562), False, 'from astropy.coordinates import Longitude\n'), ((571, 586), 'pickle.dumps', 'pickle.dumps', (['a'], {}), '(a)\n', (583, 586), False, 'import pickle\n'), ((595, 610), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (607, 610), False, 'import pickle\n'), ((1738, 1788), 'astropy.tests.helper.check_pickling_recovery', 'check_pickling_recovery', (['original', 'pickle_protocol'], {}), '(original, pickle_protocol)\n', (1761, 1788), False, 'from astropy.tests.helper import pickle_protocol, check_pickling_recovery\n'), ((1170, 1184), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (1181, 1184), True, 'import numpy as np\n'), ((1682, 1696), 'pytest.xfail', 'pytest.xfail', ([], {}), '()\n', (1694, 1696), False, 'import pytest\n'), ((1074, 1088), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (1085, 1088), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# Copyright (C) 2017 ShadowMan
#
import operator
import numpy as np
group = np.array([
[1.0, 1.1],
[1.0, 1.0],
[0.0, 0.0],
[0.0, 0.1]
])
labels = ['A', 'A', 'B','B']
def auto_normal(data_set):
# 寻找一行/列中的最小值
# axis:表示行(1)或者列(0)
min_values = data_set.min(axis = 0)
# 寻找一行/列中的最大值
# axis:表示行(1)或者列(0)
max_values = data_set.max(axis = 0)
# 计算最大值和最小值之间的差值
diff_range = max_values - min_values
# 归一化的数组
normal_array = np.zeros(data_set.shape)
# 获得所有行的数目
row_count = data_set.shape[0]
# 得到减去最小值之后的数据集合
normal_array = data_set - np.tile(min_values, (row_count, 1))
# 得到归一化的数组
# 将 [1, 2, 3, 4, 5] 转化到 0 - 1 范围内
# 首先获取最小值为1,最大值为5,差值为4
# 将每个值减去这个最小值得到: [0, 1, 2, 3, 4]
# 最后将每个值除以差值: [0, .25, .5, .75, 1]
# 直接除以最大值是不对的,因为最小值应该转化之后为0,最大值转化之后为1
# 所以需要先把最小值变成0,然后再除以最小值变成0后的最大值(就是最大值和最小值的差值)
normal_array = normal_array / diff_range
# 返回结果
return normal_array, min_values, diff_range
def knn_classify(inX, data_set, labels, k):
# 将输入数据集进行归一化
data_set, min_values, diff_range = auto_normal(data_set)
# 将将要预测值进行归一化
inX = (inX - min_values) / diff_range
# shape 或者该 array/matrix 的大小, 结果为 [行数, 列数]
data_set_row_size = data_set.shape[0] # >>> 4
# 扩展 array/matrix
# 第二个参数如果是 int, 那就在列方向上重复 第一个参数 n 次
# [[1,2],[3,4]] 2 -> [[1,2,1,2],[3,4,3,4]]
# 第二个参数是元组,那就在列方向上重复 t[0] 次,在行方向上重复 t[1] 次
# [[1,2],[3,4]] (2, 3) -> [[1,2,1,2],[3,4,3,4],[1,2,1,2],[3,4,3,4],[1,2,1,2],[3,4,3,4]]
extra_array = np.tile(inX, (data_set_row_size, 1)) # >>> [[1.0, 0.9], [1.0, 0.9], [1.0, 0.9], [1.0, 0.9]]
# 将扩展的输入与每个数据进行对比,计算距离
# 上面已经将输入扩展为和原有数据集相同的行数
# 所以直接将 扩展的数据数组 - 已有数据数组 => 输入数据与原有每条数据对应的差值
# [[1.0, 1.1], [1.0, 1.0], [0.0, 0.0], [0.0, 0.1]]
# [[1.0, 0.9], [1.0, 0.9], [1.0, 0.9], [1.0, 0.9]]
#--------------------------------------------------
# [[0.0, 0.2], [0.0, 0.1], [-1., -.9], [-1., -.8]]
difference_array = extra_array - data_set # >>> [[0.0, 0.2], [0.0, 0.1], [-1., -.9], [-1., -.8]]
# 计算差值的平方
square_difference_array = difference_array ** 2 # >>> [[0.0, 0.04], [0.0, 0.01], [1.0, 0.81], [1.0, 0.64]]
# 计算差值平方和
# axis = 1 => 同行相加,[[1,2],[3,4]] => [3,7]
# axis = 0 -> 同列相加,[[1,2],[3,4]] => [4,6]
# 所以这是将每个数据行的差值的平方加起来
square_difference_matrix_sum = square_difference_array.sum(axis=1) # >>> [0.04, 0.01, 1.81, 1.64]
# 计算距离:将每个平方和开根号
# 计算距离的公式为:sqrt( sum( ((X1 - X2) ** 2) + ((Y1 - Y2) ** 2) ) )
# 其实就是在坐标轴上计算两点距离,即计算三角形第三条边
distances = square_difference_matrix_sum ** 0.5 # >>> [0.2, 0.1, 1.3453624, 1.28062485]
# 根据距离进行排序
# 返回值是一个数组,数组里是根据输入数组的值从小到大排序的索引
# np.array([2, 1, 3]) => [1, 0, 2]
# 最小的值的索引是1,其次是0,最后是2
sorted_distances = distances.argsort() # >>> [1, 0, 3, 2]
# 用于储存前k个最佳匹配的标签
# label => occurs_count
vote_labels = {}
for i in range(k):
# 获取前 i 个最佳匹配的标签
# sorted_distances[i] => 第 i 个最近距离的标签的索引
label = labels[sorted_distances[i]]
# 设置最佳匹配对应的标签的出现次数
vote_labels[label] = vote_labels.get(label, 0) + 1
# 根据标签出现次数进行投票
# operator.itemgetter(1) <===> lambda el: el[1]
sorted_vote_labels = sorted(vote_labels.items(), key=operator.itemgetter(1), reverse=True)
# 获取最佳的标签
return sorted_vote_labels[0][0]
if __name__ == '__main__':
print(knn_classify((1.0, 0.5), group, labels, 2))
print(knn_classify((18, 90), np.array([
[3, 104], [2, 100], [1, 81], [101, 10], [99, 5], [98, 2]
]), [ 'M', 'M', 'M', 'A', 'A', 'A'], 5))
|
[
"numpy.array",
"numpy.zeros",
"numpy.tile",
"operator.itemgetter"
] |
[((101, 159), 'numpy.array', 'np.array', (['[[1.0, 1.1], [1.0, 1.0], [0.0, 0.0], [0.0, 0.1]]'], {}), '([[1.0, 1.1], [1.0, 1.0], [0.0, 0.0], [0.0, 0.1]])\n', (109, 159), True, 'import numpy as np\n'), ((497, 521), 'numpy.zeros', 'np.zeros', (['data_set.shape'], {}), '(data_set.shape)\n', (505, 521), True, 'import numpy as np\n'), ((1606, 1642), 'numpy.tile', 'np.tile', (['inX', '(data_set_row_size, 1)'], {}), '(inX, (data_set_row_size, 1))\n', (1613, 1642), True, 'import numpy as np\n'), ((622, 657), 'numpy.tile', 'np.tile', (['min_values', '(row_count, 1)'], {}), '(min_values, (row_count, 1))\n', (629, 657), True, 'import numpy as np\n'), ((3358, 3380), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3377, 3380), False, 'import operator\n'), ((3562, 3630), 'numpy.array', 'np.array', (['[[3, 104], [2, 100], [1, 81], [101, 10], [99, 5], [98, 2]]'], {}), '([[3, 104], [2, 100], [1, 81], [101, 10], [99, 5], [98, 2]])\n', (3570, 3630), True, 'import numpy as np\n')]
|
from math import pi, sin, cos
from panda3d.core import *
from direct.showbase.ShowBase import ShowBase
from direct.task import Task
from floorplan import Floorplan
import numpy as np
import random
import copy
class Viewer(ShowBase):
def __init__(self):
ShowBase.__init__(self)
#self.scene = self.loader.loadModel("floorplan_1.txt-floor.obj")
#self.scene = base.loader.loadModel("floorplan_1.txt-floor.egg")
#self.scene = base.loader.loadModel("panda.egg")
#self.scene = base.loader.loadModel("environment")
base.setBackgroundColor(0, 0, 0)
self.angle = 0.0
lens = PerspectiveLens()
lens.setFov(60)
lens.setNear(0.01)
lens.setFar(100000)
base.cam.node().setLens(lens)
floorplan = Floorplan('test/floorplan_7')
#floorplan.setFilename('test/floorplan_2')
floorplan.read()
self.scene = floorplan.generateEggModel()
self.scene.reparentTo(self.render)
#self.scene.setScale(0.01, 0.01, 0.01)
#self.scene.setTwoSided(True)
self.scene.setTwoSided(True)
#self.scene.setPos(0, 0, 3)
#texture = loader.loadTexture("floorplan_1.png")
#self.scene.setTexture(texture)
#self.scene.setHpr(0, 0, 0)
# angleDegrees = 0
# angleRadians = angleDegrees * (pi / 180.0)
# self.camera.setPos(20 * sin(angleRadians), -20 * cos(angleRadians), 3)
# self.camera.setHpr(angleDegrees, 0, 0)
#self.camera.lookAt(0, 0, 0)
self.alight = AmbientLight('alight')
self.alight.setColor(VBase4(0.2, 0.2, 0.2, 1))
self.alnp = self.render.attachNewNode(self.alight)
self.render.setLight(self.alnp)
dlight = DirectionalLight('dlight')
dlight.setColor(VBase4(1, 1, 1, 1))
dlnp = self.render.attachNewNode(dlight)
#dlnp.setHpr(0, -90, 0)
dlnp.setPos(0.5, 0.5, 3)
dlnp.lookAt(0.5, 0.5, 2)
self.render.setLight(dlnp)
for i in xrange(10):
plight = PointLight('plight')
plight.setAttenuation((1, 0, 1))
color = random.randint(10, 15)
plight.setColor(VBase4(color, color, color, 1))
plnp = self.render.attachNewNode(plight)
if i == 0:
plnp.setPos(0.5, 0.5, 3)
else:
plnp.setPos(1 * random.random(), 1 * random.random(), 0.3)
pass
self.render.setLight(plnp)
#base.useTrackball()
#base.trackball.node().setPos(2.0, 0, 3)
#base.trackball.node().setHpr(0, 0, 3)
#base.enableMouse()
#base.useDrive()
base.disableMouse()
self.taskMgr.add(self.spinCameraTask, "SpinCameraTask")
#self.accept('arrow_up', self.moveForward)
#self.accept('arrow_up_-repeat', self.moveForward)
self.topDownCameraPos = [0.5, 0.5, 1.5]
self.topDownTarget = [0.5, 0.499, 0.5]
self.topDownH = 0
self.startCameraPos = floorplan.startCameraPos
self.startTarget = floorplan.startTarget
self.startH = 0
self.cameraPos = self.topDownCameraPos
self.target = self.topDownTarget
self.H = self.topDownH
self.accept('space', self.openDoor)
self.accept('enter', self.startChangingView)
self.viewMode = 'T'
self.viewChangingProgress = 1.02
ceiling = self.scene.find("**/ceiling")
ceiling.hide()
return
def moveForward(self):
self.cameraPos[0] -= 0.1
def openDoor(self):
minDistance = 10000
doors = self.scene.find("**/doors")
for door in doors.getChildren():
mins, maxs = door.getTightBounds()
vec_1 = (mins + maxs) / 2 - Vec3(self.target[0], self.target[1], (mins[2] + maxs[2]) / 2)
vec_2 = (mins + maxs) / 2 - Vec3(self.cameraPos[0], self.cameraPos[1], (mins[2] + maxs[2]) / 2)
if (vec_1.dot(vec_2) > 0 and vec_1.length() > vec_2.length()) or np.arccos(abs(vec_1.dot(vec_2)) / (vec_1.length() * vec_2.length())) > np.pi / 4:
continue
distance = pow(pow(self.cameraPos[0] - (mins[0] + maxs[0]) / 2, 2) + pow(self.cameraPos[1] - (mins[1] + maxs[1]) / 2, 2) + pow(self.cameraPos[2] - (mins[2] + maxs[2]) / 2, 2), 0.5)
if distance < minDistance:
minDistanceDoor = door
minDistance = distance
pass
continue
if minDistance > 1:
return
mins, maxs = minDistanceDoor.getTightBounds()
if abs(maxs[0] - mins[0]) > abs(maxs[1] - mins[1]):
minsExpected = Vec3(mins[0] - (maxs[1] - mins[1]), mins[1], mins[2])
maxsExpected = Vec3(mins[0], mins[1] + (maxs[0] - mins[0]), maxs[2])
else:
minsExpected = Vec3(mins[0] - (maxs[1] - mins[1]) + (maxs[0] - mins[0]), mins[1] - (maxs[0] - mins[0]), mins[2])
maxsExpected = Vec3(mins[0] + (maxs[0] - mins[0]), mins[1] + (maxs[0] - mins[0]) - (maxs[0] - mins[0]), maxs[2])
pass
minDistanceDoor.setH(minDistanceDoor, 90)
mins, maxs = minDistanceDoor.getTightBounds()
minDistanceDoor.setPos(minDistanceDoor, minsExpected[1] - mins[1], -minsExpected[0] + mins[0], 0)
#print(scene.findAllMatches('doors'))
return
def startChangingView(self):
self.viewChangingProgress = 0
self.prevCameraPos = copy.deepcopy(self.cameraPos)
self.prevTarget = copy.deepcopy(self.target)
self.prevH = self.camera.getR()
if self.viewMode == 'T':
self.newCameraPos = self.startCameraPos
self.newTarget = self.startTarget
self.newH = self.startH
self.viewMode = 'C'
else:
self.newCameraPos = self.topDownCameraPos
self.newTarget = self.topDownTarget
self.newH = self.topDownH
self.startCameraPos = copy.deepcopy(self.cameraPos)
self.startTarget = copy.deepcopy(self.target)
self.startH = self.camera.getR()
self.viewMode = 'T'
pass
return
def changeView(self):
self.cameraPos = []
self.target = []
for c in xrange(3):
self.cameraPos.append(self.prevCameraPos[c] + (self.newCameraPos[c] - self.prevCameraPos[c]) * self.viewChangingProgress)
self.target.append(self.prevTarget[c] + (self.newTarget[c] - self.prevTarget[c]) * self.viewChangingProgress)
continue
self.H = self.prevH + (self.newH - self.prevH) * self.viewChangingProgress
if self.viewChangingProgress + 0.02 >= 1 and self.viewMode == 'C':
ceiling = self.scene.find("**/ceiling")
ceiling.show()
pass
if self.viewChangingProgress <= 0.02 and self.viewMode == 'T':
ceiling = self.scene.find("**/ceiling")
ceiling.hide()
pass
return
def spinCameraTask(self, task):
#print(task.time)
#angleDegrees = task.time * 6.0
movementStep = 0.003
if self.viewChangingProgress <= 1.01:
self.changeView()
self.viewChangingProgress += 0.02
pass
if base.mouseWatcherNode.is_button_down('w'):
for c in xrange(2):
step = movementStep * (self.target[c] - self.cameraPos[c])
self.cameraPos[c] += step
self.target[c] += step
continue
pass
if base.mouseWatcherNode.is_button_down('s'):
for c in xrange(2):
step = movementStep * (self.target[c] - self.cameraPos[c])
self.cameraPos[c] -= step
self.target[c] -= step
continue
pass
if base.mouseWatcherNode.is_button_down('a'):
step = movementStep * (self.target[0] - self.cameraPos[0])
self.cameraPos[1] += step
self.target[1] += step
step = movementStep * (self.target[1] - self.cameraPos[1])
self.cameraPos[0] -= step
self.target[0] -= step
pass
if base.mouseWatcherNode.is_button_down('d'):
step = movementStep * (self.target[0] - self.cameraPos[0])
self.cameraPos[1] -= step
self.target[1] -= step
step = movementStep * (self.target[1] - self.cameraPos[1])
self.cameraPos[0] += step
self.target[0] += step
pass
rotationStep = 0.02
if base.mouseWatcherNode.is_button_down('arrow_left'):
angle = np.angle(complex(self.target[0] - self.cameraPos[0], self.target[1] - self.cameraPos[1]))
angle += rotationStep
self.target[0] = self.cameraPos[0] + np.cos(angle)
self.target[1] = self.cameraPos[1] + np.sin(angle)
pass
if base.mouseWatcherNode.is_button_down('arrow_right'):
angle = np.angle(complex(self.target[0] - self.cameraPos[0], self.target[1] - self.cameraPos[1]))
angle -= rotationStep
self.target[0] = self.cameraPos[0] + np.cos(angle)
self.target[1] = self.cameraPos[1] + np.sin(angle)
pass
if base.mouseWatcherNode.is_button_down('arrow_up'):
angle = np.arcsin(self.target[2] - self.cameraPos[2])
angle += rotationStep
self.target[2] = self.cameraPos[2] + np.sin(angle)
pass
if base.mouseWatcherNode.is_button_down('arrow_down'):
angle = np.arcsin(self.target[2] - self.cameraPos[2])
angle -= rotationStep
self.target[2] = self.cameraPos[2] + np.sin(angle)
pass
angleDegrees = self.angle
angleRadians = angleDegrees * (pi / 180.0)
#self.camera.setPos(2.0 * sin(angleRadians), -2.0 * cos(angleRadians), 3)
self.camera.setPos(self.cameraPos[0], self.cameraPos[1], self.cameraPos[2])
#self.camera.setHpr(angleDegrees, 0, 0)
#self.camera.lookAt(0, 0, 0)
self.camera.lookAt(self.target[0], self.target[1], self.target[2])
self.camera.setR(self.H)
#if base.mouseWatcherNode.hasMouse()
return Task.cont
app = Viewer()
app.run()
|
[
"numpy.arcsin",
"floorplan.Floorplan",
"numpy.cos",
"copy.deepcopy",
"numpy.sin",
"direct.showbase.ShowBase.ShowBase.__init__",
"random.random",
"random.randint"
] |
[((261, 284), 'direct.showbase.ShowBase.ShowBase.__init__', 'ShowBase.__init__', (['self'], {}), '(self)\n', (278, 284), False, 'from direct.showbase.ShowBase import ShowBase\n'), ((737, 766), 'floorplan.Floorplan', 'Floorplan', (['"""test/floorplan_7"""'], {}), "('test/floorplan_7')\n", (746, 766), False, 'from floorplan import Floorplan\n'), ((4975, 5004), 'copy.deepcopy', 'copy.deepcopy', (['self.cameraPos'], {}), '(self.cameraPos)\n', (4988, 5004), False, 'import copy\n'), ((5027, 5053), 'copy.deepcopy', 'copy.deepcopy', (['self.target'], {}), '(self.target)\n', (5040, 5053), False, 'import copy\n'), ((1965, 1987), 'random.randint', 'random.randint', (['(10)', '(15)'], {}), '(10, 15)\n', (1979, 1987), False, 'import random\n'), ((5421, 5450), 'copy.deepcopy', 'copy.deepcopy', (['self.cameraPos'], {}), '(self.cameraPos)\n', (5434, 5450), False, 'import copy\n'), ((5476, 5502), 'copy.deepcopy', 'copy.deepcopy', (['self.target'], {}), '(self.target)\n', (5489, 5502), False, 'import copy\n'), ((8404, 8449), 'numpy.arcsin', 'np.arcsin', (['(self.target[2] - self.cameraPos[2])'], {}), '(self.target[2] - self.cameraPos[2])\n', (8413, 8449), True, 'import numpy as np\n'), ((8619, 8664), 'numpy.arcsin', 'np.arcsin', (['(self.target[2] - self.cameraPos[2])'], {}), '(self.target[2] - self.cameraPos[2])\n', (8628, 8664), True, 'import numpy as np\n'), ((7933, 7946), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (7939, 7946), True, 'import numpy as np\n'), ((7990, 8003), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (7996, 8003), True, 'import numpy as np\n'), ((8250, 8263), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (8256, 8263), True, 'import numpy as np\n'), ((8307, 8320), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (8313, 8320), True, 'import numpy as np\n'), ((8521, 8534), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (8527, 8534), True, 'import numpy as np\n'), ((8736, 8749), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (8742, 8749), True, 'import numpy as np\n'), ((2175, 2190), 'random.random', 'random.random', ([], {}), '()\n', (2188, 2190), False, 'import random\n'), ((2196, 2211), 'random.random', 'random.random', ([], {}), '()\n', (2209, 2211), False, 'import random\n')]
|
import numpy as np
class KF1D:
# this EKF assumes constant covariance matrix, so calculations are much simpler
# the Kalman gain also needs to be precomputed using the control module
def __init__(self, x0, A, C, K):
self.x = x0
self.A = A
self.C = C
self.K = K
self.A_K = self.A - np.dot(self.K, self.C)
# K matrix needs to be pre-computed as follow:
# import control
# (x, l, K) = control.dare(np.transpose(self.A), np.transpose(self.C), Q, R)
# self.K = np.transpose(K)
def update(self, meas):
self.x = np.dot(self.A_K, self.x) + np.dot(self.K, meas)
return self.x
|
[
"numpy.dot"
] |
[((311, 333), 'numpy.dot', 'np.dot', (['self.K', 'self.C'], {}), '(self.K, self.C)\n', (317, 333), True, 'import numpy as np\n'), ((560, 584), 'numpy.dot', 'np.dot', (['self.A_K', 'self.x'], {}), '(self.A_K, self.x)\n', (566, 584), True, 'import numpy as np\n'), ((587, 607), 'numpy.dot', 'np.dot', (['self.K', 'meas'], {}), '(self.K, meas)\n', (593, 607), True, 'import numpy as np\n')]
|
"""
@author: mkowalska
"""
import os
import numpy as np
from numpy.linalg import LinAlgError
import matplotlib.pyplot as plt
from figure_properties import *
import matplotlib.gridspec as gridspec
from kcsd import KCSD1D
import targeted_basis as tb
__abs_file__ = os.path.abspath(__file__)
def _html(r, g, b):
return "#{:02X}{:02X}{:02X}".format(r, g, b)
def stability_M(n_src, total_ele, ele_pos, pots, R_init=0.23):
"""
Investigates stability of reconstruction for different number of basis
sources
Parameters
----------
n_src: int
Number of basis sources.
total_ele: int
Number of electrodes.
ele_pos: numpy array
Electrodes positions.
pots: numpy array
Values of potentials at ele_pos.
R_init: float
Initial value of R parameter - width of basis source
Default: 0.23.
Returns
-------
obj_all: class object
eigenvalues: numpy array
Eigenvalues of k_pot matrix.
eigenvectors: numpy array
Eigen vectors of k_pot matrix.
"""
obj_all = []
eigenvectors = np.zeros((len(n_src), total_ele, total_ele))
eigenvalues = np.zeros((len(n_src), total_ele))
for i, value in enumerate(n_src):
pots = pots.reshape((len(ele_pos), 1))
obj = KCSD1D(ele_pos, pots, src_type='gauss', sigma=0.3, h=0.25,
gdx=0.01, n_src_init=n_src[i], ext_x=0, xmin=0, xmax=1,
R_init=R_init)
try:
eigenvalue, eigenvector = np.linalg.eigh(obj.k_pot +
obj.lambd *
np.identity
(obj.k_pot.shape[0]))
except LinAlgError:
raise LinAlgError('EVD is failing - try moving the electrodes'
'slightly')
idx = eigenvalue.argsort()[::-1]
eigenvalues[i] = eigenvalue[idx]
eigenvectors[i] = eigenvector[:, idx]
obj_all.append(obj)
return obj_all, eigenvalues, eigenvectors
def set_axis(ax, x, y, letter=None):
"""
Formats the plot's caption.
Parameters
----------
ax: Axes object.
x: float
X-position of caption.
y: float
Y-position of caption.
letter: string
Caption of the plot.
Default: None.
Returns
-------
ax: modyfied Axes object.
"""
ax.text(
x,
y,
letter,
fontsize=15,
weight='bold',
transform=ax.transAxes)
return ax
def generate_figure(csd_profile, R, MU, true_csd_xlims, total_ele, ele_lims,
noise=0, R_init=0.23):
"""
Generates figure for spectral structure decomposition.
Parameters
----------
csd_profile: function
Function to produce csd profile.
R: float
Thickness of the groundtruth source.
Default: 0.2.
MU: float
Central position of Gaussian source
Default: 0.25.
true_csd_xlims: list
Boundaries for ground truth space.
total_ele: int
Number of electrodes.
ele_lims: list
Electrodes limits.
noise: float
Determines the level of noise in the data.
Default: 0.
R_init: float
Initial value of R parameter - width of basis source
Default: 0.23.
Returns
-------
None
"""
csd_at, true_csd, ele_pos, pots, val = tb.simulate_data(csd_profile,
true_csd_xlims,
R, MU, total_ele,
ele_lims,
noise=noise)
n_src_M = [2, 4, 8, 16, 32, 64, 128, 256, 512]
OBJ_M, eigenval_M, eigenvec_M = stability_M(n_src_M,
total_ele, ele_pos, pots,
R_init=R_init)
plt_cord = [(2, 0), (2, 2), (2, 4),
(3, 0), (3, 2), (3, 4),
(4, 0), (4, 2), (4, 4),
(5, 0), (5, 2), (5, 4)]
letters = ['C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']
BLACK = _html(0, 0, 0)
ORANGE = _html(230, 159, 0)
SKY_BLUE = _html(86, 180, 233)
GREEN = _html(0, 158, 115)
YELLOW = _html(240, 228, 66)
BLUE = _html(0, 114, 178)
VERMILION = _html(213, 94, 0)
PURPLE = _html(204, 121, 167)
colors = [BLUE, ORANGE, GREEN, PURPLE, VERMILION, SKY_BLUE, YELLOW, BLACK]
fig = plt.figure(figsize=(18, 16))
# heights = [1, 1, 1, 0.2, 1, 1, 1, 1]
heights = [4, 0.3, 1, 1, 1, 1]
markers = ['^', '.', '*', 'x', ',']
# linestyles = [':', '--', '-.', '-']
linestyles = ['-', '-', '-', '-']
src_idx = [0, 2, 3, 8]
gs = gridspec.GridSpec(6, 6, height_ratios=heights, hspace=0.3, wspace=0.6)
ax = fig.add_subplot(gs[0, :3])
for indx, i in enumerate(src_idx):
ax.plot(np.arange(1, total_ele + 1), eigenval_M[i],
linestyle=linestyles[indx], color=colors[indx],
marker=markers[indx], label='M='+str(n_src_M[i]),
markersize=10)
ht, lh = ax.get_legend_handles_labels()
set_axis(ax, -0.05, 1.05, letter='A')
ax.set_xlabel('Number of components')
ax.set_ylabel('Eigenvalues')
ax.set_yscale('log')
ax.set_ylim([1e-6, 1])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax = fig.add_subplot(gs[0, 3:])
ax.plot(n_src_M, eigenval_M[:, 0], marker='s', color='k', markersize=5,
linestyle=' ')
set_axis(ax, -0.05, 1.05, letter='B')
ax.set_xlabel('Number of basis sources')
ax.set_xscale('log')
ax.set_ylabel('Eigenvalues')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for i in range(OBJ_M[0].k_interp_cross.shape[1]):
ax = fig.add_subplot(gs[plt_cord[i][0],
plt_cord[i][1]:plt_cord[i][1]+2])
for idx, j in enumerate(src_idx):
ax.plot(np.linspace(0, 1, 100), np.dot(OBJ_M[j].k_interp_cross,
eigenvec_M[j, :, i]),
linestyle=linestyles[idx], color=colors[idx],
label='M='+str(n_src_M[j]), lw=2)
ax.text(0.5, 1., r"$\tilde{K}\cdot{v_{{%(i)d}}}$" % {'i': i+1},
horizontalalignment='center', transform=ax.transAxes,
fontsize=20)
set_axis(ax, -0.10, 1.1, letter=letters[i])
if i < 9:
ax.get_xaxis().set_visible(False)
ax.spines['bottom'].set_visible(False)
else:
ax.set_xlabel('Depth ($mm$)')
if i % 3 == 0:
ax.set_ylabel('CSD ($mA/mm$)')
ax.yaxis.set_label_coords(-0.18, 0.5)
ax.ticklabel_format(style='sci', axis='y', scilimits=((0.0, 0.0)))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.legend(ht, lh, loc='lower center', ncol=5, frameon=False)
fig.savefig(os.path.join('vectors_' + '_noise_' +
str(noise) + 'R0_2' + '.png'), dpi=300)
plt.show()
if __name__ == '__main__':
ELE_LIMS = [0, 1.]
TRUE_CSD_XLIMS = [0., 1.]
TOTAL_ELE = 12
CSD_PROFILE = tb.csd_profile
R = 0.2
MU = 0.25
R_init = 0.2
generate_figure(CSD_PROFILE, R, MU, TRUE_CSD_XLIMS, TOTAL_ELE, ELE_LIMS,
noise=None, R_init=R_init)
|
[
"numpy.identity",
"numpy.linalg.LinAlgError",
"targeted_basis.simulate_data",
"kcsd.KCSD1D",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"numpy.dot",
"os.path.abspath",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((265, 290), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (280, 290), False, 'import os\n'), ((3478, 3568), 'targeted_basis.simulate_data', 'tb.simulate_data', (['csd_profile', 'true_csd_xlims', 'R', 'MU', 'total_ele', 'ele_lims'], {'noise': 'noise'}), '(csd_profile, true_csd_xlims, R, MU, total_ele, ele_lims,\n noise=noise)\n', (3494, 3568), True, 'import targeted_basis as tb\n'), ((4635, 4663), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 16)'}), '(figsize=(18, 16))\n', (4645, 4663), True, 'import matplotlib.pyplot as plt\n'), ((4897, 4967), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(6)', '(6)'], {'height_ratios': 'heights', 'hspace': '(0.3)', 'wspace': '(0.6)'}), '(6, 6, height_ratios=heights, hspace=0.3, wspace=0.6)\n', (4914, 4967), True, 'import matplotlib.gridspec as gridspec\n'), ((7305, 7315), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7313, 7315), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1429), 'kcsd.KCSD1D', 'KCSD1D', (['ele_pos', 'pots'], {'src_type': '"""gauss"""', 'sigma': '(0.3)', 'h': '(0.25)', 'gdx': '(0.01)', 'n_src_init': 'n_src[i]', 'ext_x': '(0)', 'xmin': '(0)', 'xmax': '(1)', 'R_init': 'R_init'}), "(ele_pos, pots, src_type='gauss', sigma=0.3, h=0.25, gdx=0.01,\n n_src_init=n_src[i], ext_x=0, xmin=0, xmax=1, R_init=R_init)\n", (1302, 1429), False, 'from kcsd import KCSD1D\n'), ((5060, 5087), 'numpy.arange', 'np.arange', (['(1)', '(total_ele + 1)'], {}), '(1, total_ele + 1)\n', (5069, 5087), True, 'import numpy as np\n'), ((1797, 1862), 'numpy.linalg.LinAlgError', 'LinAlgError', (['"""EVD is failing - try moving the electrodesslightly"""'], {}), "('EVD is failing - try moving the electrodesslightly')\n", (1808, 1862), False, 'from numpy.linalg import LinAlgError\n'), ((6158, 6180), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (6169, 6180), True, 'import numpy as np\n'), ((6182, 6234), 'numpy.dot', 'np.dot', (['OBJ_M[j].k_interp_cross', 'eigenvec_M[j, :, i]'], {}), '(OBJ_M[j].k_interp_cross, eigenvec_M[j, :, i])\n', (6188, 6234), True, 'import numpy as np\n'), ((1664, 1695), 'numpy.identity', 'np.identity', (['obj.k_pot.shape[0]'], {}), '(obj.k_pot.shape[0])\n', (1675, 1695), True, 'import numpy as np\n')]
|
from keras.models import Model, Sequential
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
import numpy as np
from os import listdir,path
from os.path import isfile, join
from PIL import Image
from keras.preprocessing.image import load_img, save_img, img_to_array
from keras.applications.imagenet_utils import preprocess_input
from keras.preprocessing import image
# import matplotlib.pyplot as plt
import cv2 as cv
import boto3
# import sounddevice as sd
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(224,224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
model.load_weights('vgg_face_weights.h5')
vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
def preprocess_image(image_path):
img = load_img(image_path, target_size=(224, 224))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
return img
def preprocess_loaded_image(img):
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
return img
def findCosineSimilarity(source_representation, target_representation):
a = np.matmul(np.transpose(source_representation), target_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(target_representation, target_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findEuclideanDistance(source_representation, target_representation):
euclidean_distance = source_representation - target_representation
euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))
euclidean_distance = np.sqrt(euclidean_distance)
return euclidean_distance
face_cascade = cv.CascadeClassifier(join('haarcascades','haarcascade_frontalface_default.xml'))
faces_dir='faces'
faces={}
face_imgs = [f for f in listdir(faces_dir) if isfile(join(faces_dir, f))]
for face_file in face_imgs:
face_label=path.splitext(face_file)[0]
print(face_label)
face_representation= vgg_face_descriptor.predict(preprocess_image(join(faces_dir,face_file)))[0,:]
faces[face_label]=face_representation
def detect_face(img):
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if(len(faces)>0):
(x,y,w,h)=faces[0]
roi = img[y:y+h, x:x+w]
return roi
vc = cv.VideoCapture(0)
if vc.isOpened():
is_capturing, frame = vc.read()
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
vc.release()
face=detect_face(frame)
# plt.imshow(face)
face=cv.resize(face,(224,224))
face = face[...,::-1]
face_representation= vgg_face_descriptor.predict(preprocess_loaded_image(face))[0,:]
min_sim=2
candidate=''
for key in faces.keys():
candidate_representation=faces[key]
cosine_similarity = findCosineSimilarity(face_representation, candidate_representation) # Should be less then 0.40
euclidean_distance = findEuclideanDistance(face_representation, candidate_representation) #Less then 120
print("Candidate {} CosineSimularity: {}, EuclideanDistance: {}" .format(key, cosine_similarity, euclidean_distance))
if cosine_similarity<min_sim:
min_sim=cosine_similarity
candidate=key
print(candidate)
# speak('Hello '+candidate+'. May I help you?')
# def speak(text):
# response = polly_client.synthesize_speech(VoiceId='Brian',OutputFormat='pcm',SampleRate="8000",Text = text)
# stream=response['AudioStream'].read()
# sound=np.frombuffer(stream,dtype=np.int16)
# sd.play(sound, 8000)
|
[
"keras.preprocessing.image.img_to_array",
"numpy.sqrt",
"keras.layers.Activation",
"numpy.multiply",
"os.listdir",
"keras.models.Model",
"keras.applications.imagenet_utils.preprocess_input",
"keras.layers.ZeroPadding2D",
"keras.layers.Convolution2D",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"os.path.splitext",
"keras.models.Sequential",
"cv2.cvtColor",
"cv2.resize",
"numpy.transpose",
"keras.layers.Dropout",
"keras.preprocessing.image.load_img",
"os.path.join",
"cv2.VideoCapture",
"numpy.expand_dims"
] |
[((529, 541), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (539, 541), False, 'from keras.models import Model, Sequential\n'), ((2285, 2353), 'keras.models.Model', 'Model', ([], {'inputs': 'model.layers[0].input', 'outputs': 'model.layers[-2].output'}), '(inputs=model.layers[0].input, outputs=model.layers[-2].output)\n', (2290, 2353), False, 'from keras.models import Model, Sequential\n'), ((4037, 4055), 'cv2.VideoCapture', 'cv.VideoCapture', (['(0)'], {}), '(0)\n', (4052, 4055), True, 'import cv2 as cv\n'), ((552, 600), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {'input_shape': '(224, 224, 3)'}), '((1, 1), input_shape=(224, 224, 3))\n', (565, 600), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((609, 653), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (622, 653), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((665, 686), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (678, 686), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((697, 741), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (710, 741), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((753, 789), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (765, 789), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((801, 822), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (814, 822), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((833, 878), 'keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (846, 878), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((890, 911), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (903, 911), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((922, 967), 'keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (935, 967), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((979, 1015), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (991, 1015), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1027, 1048), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1040, 1048), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1059, 1104), 'keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""'}), "(256, (3, 3), activation='relu')\n", (1072, 1104), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1116, 1137), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1129, 1137), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1148, 1193), 'keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""'}), "(256, (3, 3), activation='relu')\n", (1161, 1193), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1205, 1226), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1218, 1226), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1237, 1282), 'keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""'}), "(256, (3, 3), activation='relu')\n", (1250, 1282), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1294, 1330), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (1306, 1330), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1342, 1363), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1355, 1363), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1374, 1419), 'keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (1387, 1419), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1431, 1452), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1444, 1452), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1463, 1508), 'keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (1476, 1508), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1520, 1541), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1533, 1541), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1552, 1597), 'keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (1565, 1597), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1609, 1645), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (1621, 1645), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1657, 1678), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1670, 1678), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1689, 1734), 'keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (1702, 1734), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1746, 1767), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1759, 1767), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1778, 1823), 'keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (1791, 1823), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1835, 1856), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1848, 1856), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1867, 1912), 'keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (1880, 1912), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1924, 1960), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (1936, 1960), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((1972, 2018), 'keras.layers.Convolution2D', 'Convolution2D', (['(4096)', '(7, 7)'], {'activation': '"""relu"""'}), "(4096, (7, 7), activation='relu')\n", (1985, 2018), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((2030, 2042), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2037, 2042), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((2054, 2100), 'keras.layers.Convolution2D', 'Convolution2D', (['(4096)', '(1, 1)'], {'activation': '"""relu"""'}), "(4096, (1, 1), activation='relu')\n", (2067, 2100), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((2112, 2124), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2119, 2124), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((2136, 2163), 'keras.layers.Convolution2D', 'Convolution2D', (['(2622)', '(1, 1)'], {}), '(2622, (1, 1))\n', (2149, 2163), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((2175, 2184), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2182, 2184), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((2196, 2217), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2206, 2217), False, 'from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, Activation\n'), ((2399, 2443), 'keras.preprocessing.image.load_img', 'load_img', (['image_path'], {'target_size': '(224, 224)'}), '(image_path, target_size=(224, 224))\n', (2407, 2443), False, 'from keras.preprocessing.image import load_img, save_img, img_to_array\n'), ((2454, 2471), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (2466, 2471), False, 'from keras.preprocessing.image import load_img, save_img, img_to_array\n'), ((2482, 2509), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (2496, 2509), True, 'import numpy as np\n'), ((2520, 2541), 'keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['img'], {}), '(img)\n', (2536, 2541), False, 'from keras.applications.imagenet_utils import preprocess_input\n'), ((2602, 2619), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (2614, 2619), False, 'from keras.preprocessing.image import load_img, save_img, img_to_array\n'), ((2630, 2657), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (2644, 2657), True, 'import numpy as np\n'), ((2668, 2689), 'keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['img'], {}), '(img)\n', (2684, 2689), False, 'from keras.applications.imagenet_utils import preprocess_input\n'), ((3306, 3333), 'numpy.sqrt', 'np.sqrt', (['euclidean_distance'], {}), '(euclidean_distance)\n', (3313, 3333), True, 'import numpy as np\n'), ((3401, 3460), 'os.path.join', 'join', (['"""haarcascades"""', '"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascades', 'haarcascade_frontalface_default.xml')\n", (3405, 3460), False, 'from os.path import isfile, join\n'), ((3838, 3873), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (3849, 3873), True, 'import cv2 as cv\n'), ((4124, 4160), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2RGB'], {}), '(frame, cv.COLOR_BGR2RGB)\n', (4135, 4160), True, 'import cv2 as cv\n'), ((4238, 4265), 'cv2.resize', 'cv.resize', (['face', '(224, 224)'], {}), '(face, (224, 224))\n', (4247, 4265), True, 'import cv2 as cv\n'), ((2796, 2831), 'numpy.transpose', 'np.transpose', (['source_representation'], {}), '(source_representation)\n', (2808, 2831), True, 'import numpy as np\n'), ((2871, 2928), 'numpy.multiply', 'np.multiply', (['source_representation', 'source_representation'], {}), '(source_representation, source_representation)\n', (2882, 2928), True, 'import numpy as np\n'), ((2945, 3002), 'numpy.multiply', 'np.multiply', (['target_representation', 'target_representation'], {}), '(target_representation, target_representation)\n', (2956, 3002), True, 'import numpy as np\n'), ((3228, 3279), 'numpy.multiply', 'np.multiply', (['euclidean_distance', 'euclidean_distance'], {}), '(euclidean_distance, euclidean_distance)\n', (3239, 3279), True, 'import numpy as np\n'), ((3515, 3533), 'os.listdir', 'listdir', (['faces_dir'], {}), '(faces_dir)\n', (3522, 3533), False, 'from os import listdir, path\n'), ((3609, 3633), 'os.path.splitext', 'path.splitext', (['face_file'], {}), '(face_file)\n', (3622, 3633), False, 'from os import listdir, path\n'), ((3544, 3562), 'os.path.join', 'join', (['faces_dir', 'f'], {}), '(faces_dir, f)\n', (3548, 3562), False, 'from os.path import isfile, join\n'), ((3025, 3035), 'numpy.sqrt', 'np.sqrt', (['b'], {}), '(b)\n', (3032, 3035), True, 'import numpy as np\n'), ((3038, 3048), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (3045, 3048), True, 'import numpy as np\n'), ((3729, 3755), 'os.path.join', 'join', (['faces_dir', 'face_file'], {}), '(faces_dir, face_file)\n', (3733, 3755), False, 'from os.path import isfile, join\n')]
|
# 2020.05.10
# update topNscore
# learner on subspace
# particular designed for encounter missing class in this subspace
# if one class do not exists in training data, probability for this class would be zeros under anytime
#
# learner: a regressor or classifier, must have methods named 'predict'
# num_class: total number of class in dataset
import numpy as np
from sklearn.metrics import accuracy_score
class myLearner():
def __init__(self, learner, num_class):
self.learner = learner
self.num_class = num_class
self.class_list = {}
self.oneclass = False
self.trained = False
def mapping(self, Y, train=True, probability=False):
c, res = 0, []
Y = Y.reshape(Y.shape[0], -1)
if train == True:
self.class_list = {}
for i in range(np.array(Y).shape[0]):
if Y[i, 0] not in self.class_list.keys():
self.class_list[Y[i,0]] = c
c += 1
res.append(self.class_list[Y[i, 0]])
else:
if probability == False:
for i in range(np.array(Y).shape[0]):
for d in self.class_list.keys():
if self.class_list[d] == Y[i, 0]:
res.append(d)
else:
res = np.zeros((Y.shape[0], self.num_class))
for i in range(np.array(Y).shape[0]):
c = 0
for j in range(self.num_class):
if j in self.class_list.keys():
res[i, j] = Y[i, self.class_list[j]]
c += 1
return np.array(res)
def fit(self, X, Y):
Y = self.mapping(Y, train=True)
if np.unique(Y).shape[0] == 1:
self.oneclass = True
else:
self.learner.fit(X, Y)
self.trained = True
return self
def predict(self, X):
assert (self.trained == True), "Must call fit first!"
if self.oneclass == False:
tmp_pred = self.learner.predict(X).reshape(-1)
else:
tmp_pred = np.zeros((X.shape[0]))
return self.mapping(tmp_pred, train=False)
def predict_proba(self, X):
assert (self.trained == True), "Must call fit first!"
if self.oneclass == False:
tmp_pred = self.learner.predict_proba(X)
else:
tmp_pred = np.ones((X.shape[0], 1))
return self.mapping(tmp_pred, train=False, probability=True)
def score(self, X, Y):
assert (self.trained == True), "Must call fit first!"
return accuracy_score(Y, self.predict(X))
def topNscore(self, X, Y, N=3):
prob = self.predict_proba(X)
idx = np.argsort(prob, axis=1)
ct = 0.
Y = Y.astype('int16')
for i in range(len(Y)):
if Y[i] in (list)(idx[i, -N:]):
ct+=1
return ct/(float)(len(Y))
if __name__ == "__main__":
from sklearn.svm import SVC
from sklearn import datasets
from sklearn.model_selection import train_test_split
print(" > This is a test example: ")
digits = datasets.load_digits()
X = digits.images.reshape((len(digits.images), -1))
print(" input feature shape: %s"%str(X.shape))
X_train, X_test, y_train, y_test = train_test_split(X, digits.target, test_size=0.2, stratify=digits.target)
clf = myLearner(SVC(gamma='scale', probability=True), 10)
clf.fit(X_train, y_train)
print(" --> train acc: %s"%str(clf.score(X_train, y_train)))
print(" --> test acc.: %s"%str(clf.score(X_test, y_test)))
print(" --> test top3 acc.: %s"%str(clf.topNscore(X_test, y_test, 3)))
print("------- DONE -------\n")
|
[
"numpy.ones",
"numpy.unique",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_digits",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"sklearn.svm.SVC"
] |
[((3189, 3211), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {}), '()\n', (3209, 3211), False, 'from sklearn import datasets\n'), ((3358, 3431), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'digits.target'], {'test_size': '(0.2)', 'stratify': 'digits.target'}), '(X, digits.target, test_size=0.2, stratify=digits.target)\n', (3374, 3431), False, 'from sklearn.model_selection import train_test_split\n'), ((1689, 1702), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (1697, 1702), True, 'import numpy as np\n'), ((2777, 2801), 'numpy.argsort', 'np.argsort', (['prob'], {'axis': '(1)'}), '(prob, axis=1)\n', (2787, 2801), True, 'import numpy as np\n'), ((3457, 3493), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""scale"""', 'probability': '(True)'}), "(gamma='scale', probability=True)\n", (3460, 3493), False, 'from sklearn.svm import SVC\n'), ((2159, 2179), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (2167, 2179), True, 'import numpy as np\n'), ((2454, 2478), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (2461, 2478), True, 'import numpy as np\n'), ((1347, 1385), 'numpy.zeros', 'np.zeros', (['(Y.shape[0], self.num_class)'], {}), '((Y.shape[0], self.num_class))\n', (1355, 1385), True, 'import numpy as np\n'), ((1780, 1792), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (1789, 1792), True, 'import numpy as np\n'), ((840, 851), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (848, 851), True, 'import numpy as np\n'), ((1131, 1142), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1139, 1142), True, 'import numpy as np\n'), ((1417, 1428), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1425, 1428), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import mechkit
import mechmean
class KanataniFactory(object):
def __init__(self, N):
self.con = mechkit.notation.Converter()
self._I2 = mechkit.tensors.Basic().I2
self.N = N = self.con.to_tensor(N)
self.degree = len(N.shape)
degrees = [x for x in range(1, self.degree + 1) if x % 2 == 0]
for degree in reversed(degrees):
N = self.first_kind(degree)
setattr(self, "N{}".format(degree), N)
setattr(self, "F{}".format(degree), self.second_kind(N))
setattr(self, "D{}".format(degree), self.third_kind(N))
def __getitem__(self, key):
"""Make attributes accessible dict-like."""
return getattr(self, key)
def first_kind(self, degree):
nbr_times_decrease = int((self.degree - degree) / 2)
N = self.N
for i in range(nbr_times_decrease):
N = self.decrease_first_kind_by_one_degree(N)
return N
def decrease_first_kind_by_one_degree(self, N):
return np.einsum("...ij, ...ij->...", N, self._I2)
def second_kind(self, N):
degree = len(N.shape)
func = self._get_func_second_kind(degree=degree)
return func(N)
def _get_func_second_kind(self, degree):
funcs = {
2: self.second_kind_N2,
4: self.second_kind_N4,
}
return funcs[degree]
def second_kind_N2(self, N):
return 15.0 / 2.0 * (N - 1.0 / 5.0 * self._I2)
def second_kind_N4(self, N):
return (
315.0
/ 8.0
* (
N
- 2.0
/ 3.0
* mechmean.operators.sym(
np.multiply.outer(self._I2, self.first_kind(degree=2))
)
+ 1.0
/ 21.0
* mechmean.operators.sym(np.multiply.outer(self._I2, self._I2))
)
)
def third_kind(self, N):
degree = len(N.shape)
func = self._get_func_third_kind(degree=degree)
return func(N)
def _get_func_third_kind(self, degree):
funcs = {2: self.third_kind_N2, 4: self.third_kind_N4}
return funcs[degree]
def third_kind_N2(self, N):
return 15.0 / 2.0 * (N - 1.0 / 3.0 * self._I2)
def third_kind_N4(self, N):
return (
315.0
/ 8.0
* (
N
- 6.0
/ 7.0
* mechmean.operators.sym(
np.multiply.outer(self._I2, self.first_kind(degree=2))
)
+ 3.0
/ 35.0
* mechmean.operators.sym(np.multiply.outer(self._I2, self._I2))
)
)
def evenly_distributed_vectors_on_sphere(nbr_vectors=1000):
"""
Define nbr_vectors evenly distributed vectors on a sphere
Using the golden spiral method kindly provided by
stackoverflow-user "<NAME>"
https://stackoverflow.com/a/44164075/8935243
"""
from numpy import pi, cos, sin, arccos, arange
indices = arange(0, nbr_vectors, dtype=float) + 0.5
phi = arccos(1 - 2 * indices / nbr_vectors)
theta = pi * (1 + 5 ** 0.5) * indices
x, y, z = cos(theta) * sin(phi), sin(theta) * sin(phi), cos(phi)
orientations = np.column_stack((x, y, z))
return orientations
def first_kind_discrete(orientations, order=4):
"""
Calc orientation tensors of ... kind
"""
# Normalize orientations
orientations = [np.array(v) / np.linalg.norm(v) for v in orientations]
# Symmetrize orientations
# orientations_reversed = [-v for v in orientations]
# orientations = orientations + orientations_reversed
einsumStrings = {
1: "ij -> j",
2: "ij, ik -> jk",
3: "ij, ik, il -> jkl",
4: "ij, ik, il, im -> jklm",
5: "ij, ik, il, im, in -> jklmn",
6: "ij, ik, il, im, in, ip -> jklmnp",
}
if order > 6:
einsumStrings[order] = einsum_str_fabric_tensor_first_kind_discrete(order=order)
einsumArgs = [orientations for i in range(order)]
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], *einsumArgs)
return N
def einsum_str_fabric_tensor_first_kind_discrete(order):
"""
Generalize to higher orders:
N = sum_i 'order'-times_dyad_product(vector)
=
1: 'ij -> j',
2: 'ij, ik -> jk',
3: 'ij, ik, il -> jkl',
4: 'ij, ik, il, im -> jklm',
5: 'ij, ik, il, im, in -> jklmn',
6: 'ij, ik, il, im, in, ip -> jklmnp',
...
"""
# Get list of all available characters
import string
letters = list(string.ascii_letters)
letters.remove("i")
# Create einsum string and arguments
einsumInput = ",".join(["i" + letters[index] for index in range(order)])
einsumOut = "".join(letters[0:order])
einsumString = einsumInput + "->" + einsumOut
return einsumString
|
[
"numpy.arccos",
"mechkit.notation.Converter",
"mechkit.tensors.Basic",
"numpy.column_stack",
"numpy.multiply.outer",
"numpy.array",
"numpy.einsum",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.arange"
] |
[((3198, 3235), 'numpy.arccos', 'arccos', (['(1 - 2 * indices / nbr_vectors)'], {}), '(1 - 2 * indices / nbr_vectors)\n', (3204, 3235), False, 'from numpy import pi, cos, sin, arccos, arange\n'), ((3367, 3393), 'numpy.column_stack', 'np.column_stack', (['(x, y, z)'], {}), '((x, y, z))\n', (3382, 3393), True, 'import numpy as np\n'), ((177, 205), 'mechkit.notation.Converter', 'mechkit.notation.Converter', ([], {}), '()\n', (203, 205), False, 'import mechkit\n'), ((1093, 1136), 'numpy.einsum', 'np.einsum', (['"""...ij, ...ij->..."""', 'N', 'self._I2'], {}), "('...ij, ...ij->...', N, self._I2)\n", (1102, 1136), True, 'import numpy as np\n'), ((3145, 3180), 'numpy.arange', 'arange', (['(0)', 'nbr_vectors'], {'dtype': 'float'}), '(0, nbr_vectors, dtype=float)\n', (3151, 3180), False, 'from numpy import pi, cos, sin, arccos, arange\n'), ((3339, 3347), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (3342, 3347), False, 'from numpy import pi, cos, sin, arccos, arange\n'), ((4243, 4287), 'numpy.einsum', 'np.einsum', (['einsumStrings[order]', '*einsumArgs'], {}), '(einsumStrings[order], *einsumArgs)\n', (4252, 4287), True, 'import numpy as np\n'), ((225, 248), 'mechkit.tensors.Basic', 'mechkit.tensors.Basic', ([], {}), '()\n', (246, 248), False, 'import mechkit\n'), ((3293, 3303), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (3296, 3303), False, 'from numpy import pi, cos, sin, arccos, arange\n'), ((3306, 3314), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (3309, 3314), False, 'from numpy import pi, cos, sin, arccos, arange\n'), ((3316, 3326), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (3319, 3326), False, 'from numpy import pi, cos, sin, arccos, arange\n'), ((3329, 3337), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (3332, 3337), False, 'from numpy import pi, cos, sin, arccos, arange\n'), ((3574, 3585), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (3582, 3585), True, 'import numpy as np\n'), ((3588, 3605), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (3602, 3605), True, 'import numpy as np\n'), ((1928, 1965), 'numpy.multiply.outer', 'np.multiply.outer', (['self._I2', 'self._I2'], {}), '(self._I2, self._I2)\n', (1945, 1965), True, 'import numpy as np\n'), ((2740, 2777), 'numpy.multiply.outer', 'np.multiply.outer', (['self._I2', 'self._I2'], {}), '(self._I2, self._I2)\n', (2757, 2777), True, 'import numpy as np\n')]
|
# Import modules
from __future__ import print_function
import sys
import numpy as np
from polytope import box2poly
from tulip import hybrid
from tulip.abstract import prop2part, discretize
import Interface.DSL as DSL
from Interface import Statechart as dumpsmach
from Interface.Reduce import *
from Interface.Transform import *
print("----------------------------------\n Script options \n----------------------------------")
verbose = 1 # Decrease printed output = 0, increase= 1
print("""----------------------------------\n System Definition \n----------------------------------
-- System Constants
-- System Label State Space & partition
""")
# System constants
input_bound = 1.0
disturbance_bound = 0.1
# The system dynamics
A = np.array([[1., 0, 2., 0], [0, 1., 0, 2], [0, 0, 0.5, 0], [0, 0, 0, 0.5]])
B = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [5, -5, 0, 0], [0, 0, 5, -5]])
E = np.array([[1., 0, 0, 0], [0, 1., 0, 0], [0, 0, 1., 0], [0, 0, 0, 1.]])
# $x^+=Ax+Bu+E W$
# Size of the sets
X = box2poly([[0, 100.], [0, 100.], [-5, 5.], [-5, 5.]])
U = box2poly(input_bound*np.array([[0, 1], [0, 1], [0, 1], [0, 1]]))
W = box2poly(disturbance_bound*np.array([[0, 10], [0, 10], [-0.1, 0.1], [-0.1, 0.1]]))
print("----------------------------------\n Define system\n----------------------------------")
# Intermezzo polytope tutorial
# https://github.com/tulip-control/polytope/blob/master/doc/tutorial.md
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, X)
print(str(sys_dyn))
print("----------------------------------\n Define labelling \n----------------------------------")
cprops ={}
cprops["inA"] = box2poly([[0, 10], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])
cprops["inB"] = box2poly([[90, 100], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])
cprops["inObj1"] = box2poly([[15, 35], [30, 70], [-5, 5], [-5, 5]])
cprops["inObj2"] = box2poly([[65, 85], [30, 70], [-5, 5], [-5, 5]])
cpartition = prop2part(X, cprops)
if verbose == 1:
print("partition before refinement")
print(cpartition)
print("---------------------------------\n System partition State Space \n----------------------------------")
disc_dynamics = discretize(cpartition, sys_dyn, N=5, min_cell_volume=1, closed_loop=True, conservative=True)
states=[state for (state, label) in disc_dynamics.ts.states.find(with_attr_dict={'ap': {'inA'}})]
disc_dynamics.ts.states.initial|=states
print("----------------------------------\n Define specification \n----------------------------------")
# Specifications
# Environment variables and assumptions
env_vars = list()
env_init = list()
env_safe = list()
env_prog = list()
# System variables and requirements
sys_vars = ['inA', 'inB']
sys_init = ['inA']
sys_safe = ['!inObj1', '!inObj2']
sys_prog = ['inA', 'inB']
(ctrl_modes, grspec) = transform2control(disc_dynamics.ts, statevar='ctrl')
print("----------------------------------\n Combine sys and spec \n----------------------------------")
phi = grspec | spec.GRSpec(env_vars, sys_vars, env_init, sys_init,
env_safe, sys_safe, env_prog, sys_prog)
phi.qinit = '\A \E'
phi.moore = False
phi.plus_one = False
ctrl = synth.synthesize(phi,ignore_sys_init=True)
#
# print("----------------------------------\n Reduce states \n----------------------------------")
#
# Events_init = {('fullGas', True)}
#
#
# ctrl_red=reduce_mealy(ctrl,relabel=False,outputs={'ctrl'}, prune_set=Events_init, combine_trans=False)
#
print("----------------------------------\n Output results \n----------------------------------")
if verbose == 1:
print(" (Verbose) ")
try:
disc_dynamics.ts.save("cimple_aircraft_orig.png")
ctrl_modes.save("cimple_aircraft_modes.png")
# ctrl_red.save('cimple_aircraft_ctrl_red.png')
ctrl.save("cimple_aircraft_ctrl_orig.png")
print(" (Verbose): saved all Finite State Transition Systems ")
except Exception:
pass
print('nodes in ctrl:')
print(len(ctrl.nodes()))
print(len(ctrl.transitions()))
print('\n')
#
# print('nodes in ctrl_red:')
# print(len(ctrl_red.nodes()))
# print(len(ctrl_red.transitions()))
# print('\n')
#
#
print("----------------------------------\n Convert controller to Xmi \n----------------------------------")
sys.stdout.flush()
# --------------- Writing the statechart -----------
try:
filename = str(__file__)
filename = filename[0:-3] + "_gen"
except NameError:
filename = "test_gen"
# write strategy plus control modes at the same time to a statechart
with open(filename+".xml", "w") as f:
# f.write(dumpsmach.tulip_to_xmi(ctrl_red,ctrl_modes))
f.write(dumpsmach.tulip_to_xmi(ctrl, ctrl_modes))
|
[
"tulip.hybrid.LtiSysDyn",
"Interface.Statechart.tulip_to_xmi",
"tulip.abstract.prop2part",
"polytope.box2poly",
"numpy.array",
"sys.stdout.flush",
"tulip.abstract.discretize"
] |
[((769, 845), 'numpy.array', 'np.array', (['[[1.0, 0, 2.0, 0], [0, 1.0, 0, 2], [0, 0, 0.5, 0], [0, 0, 0, 0.5]]'], {}), '([[1.0, 0, 2.0, 0], [0, 1.0, 0, 2], [0, 0, 0.5, 0], [0, 0, 0, 0.5]])\n', (777, 845), True, 'import numpy as np\n'), ((847, 915), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 0, 0, 0], [5, -5, 0, 0], [0, 0, 5, -5]]'], {}), '([[0, 0, 0, 0], [0, 0, 0, 0], [5, -5, 0, 0], [0, 0, 5, -5]])\n', (855, 915), True, 'import numpy as np\n'), ((920, 994), 'numpy.array', 'np.array', (['[[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]]'], {}), '([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])\n', (928, 994), True, 'import numpy as np\n'), ((1033, 1089), 'polytope.box2poly', 'box2poly', (['[[0, 100.0], [0, 100.0], [-5, 5.0], [-5, 5.0]]'], {}), '([[0, 100.0], [0, 100.0], [-5, 5.0], [-5, 5.0]])\n', (1041, 1089), False, 'from polytope import box2poly\n'), ((1452, 1492), 'tulip.hybrid.LtiSysDyn', 'hybrid.LtiSysDyn', (['A', 'B', 'E', 'None', 'U', 'W', 'X'], {}), '(A, B, E, None, U, W, X)\n', (1468, 1492), False, 'from tulip import hybrid\n'), ((1643, 1698), 'polytope.box2poly', 'box2poly', (['[[0, 10], [45, 55], [-0.1, 0.1], [-0.1, 0.1]]'], {}), '([[0, 10], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])\n', (1651, 1698), False, 'from polytope import box2poly\n'), ((1715, 1772), 'polytope.box2poly', 'box2poly', (['[[90, 100], [45, 55], [-0.1, 0.1], [-0.1, 0.1]]'], {}), '([[90, 100], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])\n', (1723, 1772), False, 'from polytope import box2poly\n'), ((1793, 1841), 'polytope.box2poly', 'box2poly', (['[[15, 35], [30, 70], [-5, 5], [-5, 5]]'], {}), '([[15, 35], [30, 70], [-5, 5], [-5, 5]])\n', (1801, 1841), False, 'from polytope import box2poly\n'), ((1861, 1909), 'polytope.box2poly', 'box2poly', (['[[65, 85], [30, 70], [-5, 5], [-5, 5]]'], {}), '([[65, 85], [30, 70], [-5, 5], [-5, 5]])\n', (1869, 1909), False, 'from polytope import box2poly\n'), ((1925, 1945), 'tulip.abstract.prop2part', 'prop2part', (['X', 'cprops'], {}), '(X, cprops)\n', (1934, 1945), False, 'from tulip.abstract import prop2part, discretize\n'), ((2155, 2251), 'tulip.abstract.discretize', 'discretize', (['cpartition', 'sys_dyn'], {'N': '(5)', 'min_cell_volume': '(1)', 'closed_loop': '(True)', 'conservative': '(True)'}), '(cpartition, sys_dyn, N=5, min_cell_volume=1, closed_loop=True,\n conservative=True)\n', (2165, 2251), False, 'from tulip.abstract import prop2part, discretize\n'), ((4274, 4292), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4290, 4292), False, 'import sys\n'), ((1111, 1153), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [0, 1], [0, 1]]'], {}), '([[0, 1], [0, 1], [0, 1], [0, 1]])\n', (1119, 1153), True, 'import numpy as np\n'), ((1186, 1240), 'numpy.array', 'np.array', (['[[0, 10], [0, 10], [-0.1, 0.1], [-0.1, 0.1]]'], {}), '([[0, 10], [0, 10], [-0.1, 0.1], [-0.1, 0.1]])\n', (1194, 1240), True, 'import numpy as np\n'), ((4641, 4681), 'Interface.Statechart.tulip_to_xmi', 'dumpsmach.tulip_to_xmi', (['ctrl', 'ctrl_modes'], {}), '(ctrl, ctrl_modes)\n', (4663, 4681), True, 'from Interface import Statechart as dumpsmach\n')]
|
import argparse
import json
from data_management.DatasetFactory import datasetFactory
from config import cfg
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculates metrics from output of a Classification network.' +
' Run `run_network.py <config> test` first.')
parser.add_argument('config_file', help='config file path')
parser.add_argument('results_file', help='results file path')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
refer = datasetFactory(cfg)
hamming_loss = 0.0
TP = np.zeros((cfg.IMG_NET.N_LABELS+1,))
FP = np.zeros((cfg.IMG_NET.N_LABELS+1,))
FN = np.zeros((cfg.IMG_NET.N_LABELS+1,))
total = 0.0
# load generation outputs
with open(args.results_file, 'r') as f:
genData = json.load(f)
for row in genData:
total += 1.0
hamming_loss += row['Hamming_Loss']
TP[row['TP_classes']] += 1
FP[row['FP_classes']] += 1
FN[row['FN_classes']] += 1
print("Mean Hamming Loss: %3.3f" % (hamming_loss/total))
print("Mean precision: %3.3f" % (np.sum(TP)/(np.sum(TP)+np.sum(FP))))
print("Mean recall: %3.3f" % (np.sum(TP)/(np.sum(TP)+np.sum(FN))))
print("Class\tPrecision\tRecall")
for idx in range(cfg.IMG_NET.N_LABELS):
label = refer[0].coco.cats[refer[0].coco_cat_map[idx]]
print("%s\t%3.3f\t%3.3f" % (label['name'].ljust(20), TP[idx]/(TP[idx]+FP[idx]), TP[idx]/(TP[idx]+FN[idx])))
|
[
"argparse.ArgumentParser",
"config.cfg.freeze",
"config.cfg.merge_from_file",
"numpy.sum",
"numpy.zeros",
"data_management.DatasetFactory.datasetFactory",
"json.load",
"config.cfg.merge_from_list"
] |
[((171, 325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': "('Calculates metrics from output of a Classification network.' +\n ' Run `run_network.py <config> test` first.')"}), "(description=\n 'Calculates metrics from output of a Classification network.' +\n ' Run `run_network.py <config> test` first.')\n", (194, 325), False, 'import argparse\n'), ((696, 733), 'config.cfg.merge_from_file', 'cfg.merge_from_file', (['args.config_file'], {}), '(args.config_file)\n', (715, 733), False, 'from config import cfg\n'), ((738, 768), 'config.cfg.merge_from_list', 'cfg.merge_from_list', (['args.opts'], {}), '(args.opts)\n', (757, 768), False, 'from config import cfg\n'), ((773, 785), 'config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (783, 785), False, 'from config import cfg\n'), ((799, 818), 'data_management.DatasetFactory.datasetFactory', 'datasetFactory', (['cfg'], {}), '(cfg)\n', (813, 818), False, 'from data_management.DatasetFactory import datasetFactory\n'), ((852, 889), 'numpy.zeros', 'np.zeros', (['(cfg.IMG_NET.N_LABELS + 1,)'], {}), '((cfg.IMG_NET.N_LABELS + 1,))\n', (860, 889), True, 'import numpy as np\n'), ((897, 934), 'numpy.zeros', 'np.zeros', (['(cfg.IMG_NET.N_LABELS + 1,)'], {}), '((cfg.IMG_NET.N_LABELS + 1,))\n', (905, 934), True, 'import numpy as np\n'), ((942, 979), 'numpy.zeros', 'np.zeros', (['(cfg.IMG_NET.N_LABELS + 1,)'], {}), '((cfg.IMG_NET.N_LABELS + 1,))\n', (950, 979), True, 'import numpy as np\n'), ((1087, 1099), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1096, 1099), False, 'import json\n'), ((1418, 1428), 'numpy.sum', 'np.sum', (['TP'], {}), '(TP)\n', (1424, 1428), True, 'import numpy as np\n'), ((1489, 1499), 'numpy.sum', 'np.sum', (['TP'], {}), '(TP)\n', (1495, 1499), True, 'import numpy as np\n'), ((1430, 1440), 'numpy.sum', 'np.sum', (['TP'], {}), '(TP)\n', (1436, 1440), True, 'import numpy as np\n'), ((1441, 1451), 'numpy.sum', 'np.sum', (['FP'], {}), '(FP)\n', (1447, 1451), True, 'import numpy as np\n'), ((1501, 1511), 'numpy.sum', 'np.sum', (['TP'], {}), '(TP)\n', (1507, 1511), True, 'import numpy as np\n'), ((1512, 1522), 'numpy.sum', 'np.sum', (['FN'], {}), '(FN)\n', (1518, 1522), True, 'import numpy as np\n')]
|
"""Test the viscous fluid helper functions."""
__copyright__ = """
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import numpy.random
import numpy.linalg as la # noqa
import pyopencl.clmath # noqa
import logging
import pytest # noqa
from pytools.obj_array import make_obj_array
from meshmode.dof_array import thaw
from meshmode.mesh import BTAG_ALL
import grudge.op as op
from grudge.eager import (
EagerDGDiscretization,
interior_trace_pair
)
from meshmode.array_context import ( # noqa
pytest_generate_tests_for_pyopencl_array_context
as pytest_generate_tests)
from mirgecom.fluid import make_conserved
from mirgecom.transport import (
SimpleTransport,
PowerLawTransport
)
from mirgecom.eos import IdealSingleGas
logger = logging.getLogger(__name__)
@pytest.mark.parametrize("transport_model", [0, 1])
def test_viscous_stress_tensor(actx_factory, transport_model):
"""Test tau data structure and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
mass = 2*ones
energy = zeros + 2.5
mom = mass * velocity
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
if transport_model:
tv_model = SimpleTransport(bulk_viscosity=1.0, viscosity=0.5)
else:
tv_model = PowerLawTransport()
eos = IdealSingleGas(transport_model=tv_model)
mu = tv_model.viscosity(eos, cv)
lam = tv_model.volume_viscosity(eos, cv)
# Exact answer for tau
exp_grad_v = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
exp_grad_v_t = np.array([[1, 4, 7], [2, 5, 8], [3, 6, 9]])
exp_div_v = 15
exp_tau = (mu*(exp_grad_v + exp_grad_v_t)
+ lam*exp_div_v*np.eye(3))
from mirgecom.viscous import viscous_stress_tensor
tau = viscous_stress_tensor(discr, eos, cv, grad_cv)
# The errors come from grad_v
assert discr.norm(tau - exp_tau, np.inf) < 1e-12
# Box grid generator widget lifted from @majosm and slightly bent
def _get_box_mesh(dim, a, b, n, t=None):
dim_names = ["x", "y", "z"]
bttf = {}
for i in range(dim):
bttf["-"+str(i+1)] = ["-"+dim_names[i]]
bttf["+"+str(i+1)] = ["+"+dim_names[i]]
from meshmode.mesh.generation import generate_regular_rect_mesh as gen
return gen(a=a, b=b, npoints_per_axis=n, boundary_tag_to_face=bttf, mesh_type=t)
@pytest.mark.parametrize("order", [2, 3, 4])
@pytest.mark.parametrize("kappa", [0.0, 1.0, 2.3])
def test_poiseuille_fluxes(actx_factory, order, kappa):
"""Test the viscous fluxes using a Poiseuille input state."""
actx = actx_factory()
dim = 2
from pytools.convergence import EOCRecorder
e_eoc_rec = EOCRecorder()
p_eoc_rec = EOCRecorder()
base_pressure = 100000.0
pressure_ratio = 1.001
mu = 42 # arbitrary
left_boundary_location = 0
right_boundary_location = 0.1
ybottom = 0.
ytop = .02
nspecies = 0
spec_diffusivity = 0 * np.ones(nspecies)
transport_model = SimpleTransport(viscosity=mu, thermal_conductivity=kappa,
species_diffusivity=spec_diffusivity)
xlen = right_boundary_location - left_boundary_location
p_low = base_pressure
p_hi = pressure_ratio*base_pressure
dpdx = (p_low - p_hi) / xlen
rho = 1.0
eos = IdealSingleGas(transport_model=transport_model)
from mirgecom.initializers import PlanarPoiseuille
initializer = PlanarPoiseuille(density=rho, mu=mu)
def _elbnd_flux(discr, compute_interior_flux, compute_boundary_flux,
int_tpair, boundaries):
return (compute_interior_flux(int_tpair)
+ sum(compute_boundary_flux(btag) for btag in boundaries))
from mirgecom.flux import gradient_flux_central
def cv_flux_interior(int_tpair):
normal = thaw(actx, discr.normal(int_tpair.dd))
flux_weak = gradient_flux_central(int_tpair, normal)
return discr.project(int_tpair.dd, "all_faces", flux_weak)
def cv_flux_boundary(btag):
boundary_discr = discr.discr_from_dd(btag)
bnd_nodes = thaw(actx, boundary_discr.nodes())
cv_bnd = initializer(x_vec=bnd_nodes, eos=eos)
bnd_nhat = thaw(actx, discr.normal(btag))
from grudge.trace_pair import TracePair
bnd_tpair = TracePair(btag, interior=cv_bnd, exterior=cv_bnd)
flux_weak = gradient_flux_central(bnd_tpair, bnd_nhat)
return discr.project(bnd_tpair.dd, "all_faces", flux_weak)
for nfac in [1, 2, 4]:
npts_axis = nfac*(11, 21)
box_ll = (left_boundary_location, ybottom)
box_ur = (right_boundary_location, ytop)
mesh = _get_box_mesh(2, a=box_ll, b=box_ur, n=npts_axis)
logger.info(
f"Number of {dim}d elements: {mesh.nelements}"
)
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
# compute max element size
from grudge.dt_utils import h_max_from_volume
h_max = h_max_from_volume(discr)
# form exact cv
cv = initializer(x_vec=nodes, eos=eos)
cv_int_tpair = interior_trace_pair(discr, cv)
boundaries = [BTAG_ALL]
cv_flux_bnd = _elbnd_flux(discr, cv_flux_interior, cv_flux_boundary,
cv_int_tpair, boundaries)
from mirgecom.operators import grad_operator
grad_cv = make_conserved(dim, q=grad_operator(discr, cv.join(),
cv_flux_bnd.join()))
xp_grad_cv = initializer.exact_grad(x_vec=nodes, eos=eos, cv_exact=cv)
xp_grad_v = 1/cv.mass * xp_grad_cv.momentum
xp_tau = mu * (xp_grad_v + xp_grad_v.transpose())
# sanity check the gradient:
relerr_scale_e = 1.0 / discr.norm(xp_grad_cv.energy, np.inf)
relerr_scale_p = 1.0 / discr.norm(xp_grad_cv.momentum, np.inf)
graderr_e = discr.norm((grad_cv.energy - xp_grad_cv.energy), np.inf)
graderr_p = discr.norm((grad_cv.momentum - xp_grad_cv.momentum), np.inf)
graderr_e *= relerr_scale_e
graderr_p *= relerr_scale_p
assert graderr_e < 5e-7
assert graderr_p < 5e-11
zeros = discr.zeros(actx)
ones = zeros + 1
pressure = eos.pressure(cv)
# grad of p should be dp/dx
xp_grad_p = make_obj_array([dpdx*ones, zeros])
grad_p = op.local_grad(discr, pressure)
dpscal = 1.0/np.abs(dpdx)
temperature = eos.temperature(cv)
tscal = rho*eos.gas_const()*dpscal
xp_grad_t = xp_grad_p/(cv.mass*eos.gas_const())
grad_t = op.local_grad(discr, temperature)
# sanity check
assert discr.norm(grad_p - xp_grad_p, np.inf)*dpscal < 5e-9
assert discr.norm(grad_t - xp_grad_t, np.inf)*tscal < 5e-9
# verify heat flux
from mirgecom.viscous import conductive_heat_flux
heat_flux = conductive_heat_flux(discr, eos, cv, grad_t)
xp_heat_flux = -kappa*xp_grad_t
assert discr.norm(heat_flux - xp_heat_flux, np.inf) < 2e-8
# verify diffusive mass flux is zilch (no scalar components)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
assert len(j) == 0
xp_e_flux = np.dot(xp_tau, cv.velocity) - xp_heat_flux
xp_mom_flux = xp_tau
from mirgecom.viscous import viscous_flux
vflux = viscous_flux(discr, eos, cv, grad_cv, grad_t)
efluxerr = (
discr.norm(vflux.energy - xp_e_flux, np.inf)
/ discr.norm(xp_e_flux, np.inf)
)
momfluxerr = (
discr.norm(vflux.momentum - xp_mom_flux, np.inf)
/ discr.norm(xp_mom_flux, np.inf)
)
assert discr.norm(vflux.mass, np.inf) == 0
e_eoc_rec.add_data_point(h_max, efluxerr)
p_eoc_rec.add_data_point(h_max, momfluxerr)
assert (
e_eoc_rec.order_estimate() >= order - 0.5
or e_eoc_rec.max_error() < 3e-9
)
assert (
p_eoc_rec.order_estimate() >= order - 0.5
or p_eoc_rec.max_error() < 2e-12
)
def test_species_diffusive_flux(actx_factory):
"""Test species diffusive flux and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
# assemble y so that each one has simple, but unique grad components
nspecies = 2*dim
y = make_obj_array([ones for _ in range(nspecies)])
for idim in range(dim):
ispec = 2*idim
y[ispec] = (ispec+1)*(idim*dim+1)*sum([(iidim+1)*nodes[iidim]
for iidim in range(dim)])
y[ispec+1] = -y[ispec]
massval = 2
mass = massval*ones
energy = zeros + 2.5
mom = mass * velocity
species_mass = mass*y
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
mu_b = 1.0
mu = 0.5
kappa = 5.0
# assemble d_alpha so that every species has a unique j
d_alpha = np.array([(ispec+1) for ispec in range(nspecies)])
tv_model = SimpleTransport(bulk_viscosity=mu_b, viscosity=mu,
thermal_conductivity=kappa,
species_diffusivity=d_alpha)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
tol = 1e-10
for idim in range(dim):
ispec = 2*idim
exact_dy = np.array([((ispec+1)*(idim*dim+1))*(iidim+1)
for iidim in range(dim)])
exact_j = -massval * d_alpha[ispec] * exact_dy
assert discr.norm(j[ispec] - exact_j, np.inf) < tol
exact_j = massval * d_alpha[ispec+1] * exact_dy
assert discr.norm(j[ispec+1] - exact_j, np.inf) < tol
def test_diffusive_heat_flux(actx_factory):
"""Test diffusive heat flux and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
# assemble y so that each one has simple, but unique grad components
nspecies = 2*dim
y = make_obj_array([ones for _ in range(nspecies)])
for idim in range(dim):
ispec = 2*idim
y[ispec] = (ispec+1)*(idim*dim+1)*sum([(iidim+1)*nodes[iidim]
for iidim in range(dim)])
y[ispec+1] = -y[ispec]
massval = 2
mass = massval*ones
energy = zeros + 2.5
mom = mass * velocity
species_mass = mass*y
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
mu_b = 1.0
mu = 0.5
kappa = 5.0
# assemble d_alpha so that every species has a unique j
d_alpha = np.array([(ispec+1) for ispec in range(nspecies)])
tv_model = SimpleTransport(bulk_viscosity=mu_b, viscosity=mu,
thermal_conductivity=kappa,
species_diffusivity=d_alpha)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
tol = 1e-10
for idim in range(dim):
ispec = 2*idim
exact_dy = np.array([((ispec+1)*(idim*dim+1))*(iidim+1)
for iidim in range(dim)])
exact_j = -massval * d_alpha[ispec] * exact_dy
assert discr.norm(j[ispec] - exact_j, np.inf) < tol
exact_j = massval * d_alpha[ispec+1] * exact_dy
assert discr.norm(j[ispec+1] - exact_j, np.inf) < tol
@pytest.mark.parametrize("array_valued", [False, True])
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_local_max_species_diffusivity(actx_factory, dim, array_valued):
"""Test the local maximum species diffusivity."""
actx = actx_factory()
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
vel = .32
velocity = make_obj_array([zeros+vel for _ in range(dim)])
massval = 1
mass = massval*ones
energy = zeros + 1.0 / (1.4*.4)
mom = mass * velocity
species_mass = np.array([1., 2., 3.], dtype=object)
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
d_alpha_input = np.array([.1, .2, .3])
if array_valued:
f = 1 + 0.1*actx.np.sin(nodes[0])
d_alpha_input *= f
tv_model = SimpleTransport(species_diffusivity=d_alpha_input)
eos = IdealSingleGas(transport_model=tv_model)
d_alpha = tv_model.species_diffusivity(eos, cv)
from mirgecom.viscous import get_local_max_species_diffusivity
expected = .3*ones
if array_valued:
expected *= f
calculated = get_local_max_species_diffusivity(actx, discr, d_alpha)
assert discr.norm(calculated-expected, np.inf) == 0
@pytest.mark.parametrize("dim", [1, 2, 3])
@pytest.mark.parametrize("mu", [-1, 0, 1, 2])
@pytest.mark.parametrize("vel", [0, 1])
def test_viscous_timestep(actx_factory, dim, mu, vel):
"""Test timestep size."""
actx = actx_factory()
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
zeros = discr.zeros(actx)
ones = zeros + 1.0
velocity = make_obj_array([zeros+vel for _ in range(dim)])
massval = 1
mass = massval*ones
# I *think* this energy should yield c=1.0
energy = zeros + 1.0 / (1.4*.4)
mom = mass * velocity
species_mass = None
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
from grudge.dt_utils import characteristic_lengthscales
chlen = characteristic_lengthscales(actx, discr)
from grudge.op import nodal_min
chlen_min = nodal_min(discr, "vol", chlen)
mu = mu*chlen_min
if mu < 0:
mu = 0
tv_model = None
else:
tv_model = SimpleTransport(viscosity=mu)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import get_viscous_timestep
dt_field = get_viscous_timestep(discr, eos, cv)
speed_total = actx.np.sqrt(np.dot(velocity, velocity)) + eos.sound_speed(cv)
dt_expected = chlen / (speed_total + (mu / chlen))
error = (dt_expected - dt_field) / dt_expected
assert discr.norm(error, np.inf) == 0
|
[
"logging.getLogger",
"mirgecom.transport.PowerLawTransport",
"mirgecom.viscous.conductive_heat_flux",
"grudge.op.nodal_min",
"pytools.convergence.EOCRecorder",
"numpy.array",
"grudge.op.local_grad",
"mirgecom.flux.gradient_flux_central",
"pytools.obj_array.make_obj_array",
"grudge.dt_utils.h_max_from_volume",
"mirgecom.eos.IdealSingleGas",
"numpy.dot",
"grudge.trace_pair.TracePair",
"numpy.abs",
"numpy.eye",
"mirgecom.transport.SimpleTransport",
"mirgecom.viscous.viscous_flux",
"numpy.ones",
"mirgecom.viscous.get_viscous_timestep",
"mirgecom.fluid.make_conserved",
"mirgecom.viscous.get_local_max_species_diffusivity",
"mirgecom.initializers.PlanarPoiseuille",
"grudge.eager.EagerDGDiscretization",
"meshmode.mesh.generation.generate_regular_rect_mesh",
"mirgecom.viscous.diffusive_flux",
"grudge.dt_utils.characteristic_lengthscales",
"mirgecom.viscous.viscous_stress_tensor",
"pytest.mark.parametrize",
"grudge.eager.interior_trace_pair"
] |
[((1839, 1866), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1856, 1866), False, 'import logging\n'), ((1870, 1920), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transport_model"""', '[0, 1]'], {}), "('transport_model', [0, 1])\n", (1893, 1920), False, 'import pytest\n'), ((4128, 4171), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', '[2, 3, 4]'], {}), "('order', [2, 3, 4])\n", (4151, 4171), False, 'import pytest\n'), ((4173, 4222), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kappa"""', '[0.0, 1.0, 2.3]'], {}), "('kappa', [0.0, 1.0, 2.3])\n", (4196, 4222), False, 'import pytest\n'), ((14728, 14782), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""array_valued"""', '[False, True]'], {}), "('array_valued', [False, True])\n", (14751, 14782), False, 'import pytest\n'), ((14784, 14825), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', '[1, 2, 3]'], {}), "('dim', [1, 2, 3])\n", (14807, 14825), False, 'import pytest\n'), ((16280, 16321), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', '[1, 2, 3]'], {}), "('dim', [1, 2, 3])\n", (16303, 16321), False, 'import pytest\n'), ((16323, 16367), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mu"""', '[-1, 0, 1, 2]'], {}), "('mu', [-1, 0, 1, 2])\n", (16346, 16367), False, 'import pytest\n'), ((16369, 16407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""vel"""', '[0, 1]'], {}), "('vel', [0, 1])\n", (16392, 16407), False, 'import pytest\n'), ((2177, 2275), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (2203, 2275), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((2314, 2360), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (2335, 2360), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((2689, 2741), 'pytools.obj_array.make_obj_array', 'make_obj_array', (['[velocity_x, velocity_y, velocity_z]'], {}), '([velocity_x, velocity_y, velocity_z])\n', (2703, 2741), False, 'from pytools.obj_array import make_obj_array\n'), ((2822, 2881), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom'}), '(dim, mass=mass, energy=energy, momentum=mom)\n', (2836, 2881), False, 'from mirgecom.fluid import make_conserved\n'), ((3106, 3146), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (3120, 3146), False, 'from mirgecom.eos import IdealSingleGas\n'), ((3274, 3317), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (3282, 3317), True, 'import numpy as np\n'), ((3337, 3380), 'numpy.array', 'np.array', (['[[1, 4, 7], [2, 5, 8], [3, 6, 9]]'], {}), '([[1, 4, 7], [2, 5, 8], [3, 6, 9]])\n', (3345, 3380), True, 'import numpy as np\n'), ((3554, 3600), 'mirgecom.viscous.viscous_stress_tensor', 'viscous_stress_tensor', (['discr', 'eos', 'cv', 'grad_cv'], {}), '(discr, eos, cv, grad_cv)\n', (3575, 3600), False, 'from mirgecom.viscous import viscous_stress_tensor\n'), ((4051, 4124), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'gen', ([], {'a': 'a', 'b': 'b', 'npoints_per_axis': 'n', 'boundary_tag_to_face': 'bttf', 'mesh_type': 't'}), '(a=a, b=b, npoints_per_axis=n, boundary_tag_to_face=bttf, mesh_type=t)\n', (4054, 4124), True, 'from meshmode.mesh.generation import generate_regular_rect_mesh as gen\n'), ((4448, 4461), 'pytools.convergence.EOCRecorder', 'EOCRecorder', ([], {}), '()\n', (4459, 4461), False, 'from pytools.convergence import EOCRecorder\n'), ((4478, 4491), 'pytools.convergence.EOCRecorder', 'EOCRecorder', ([], {}), '()\n', (4489, 4491), False, 'from pytools.convergence import EOCRecorder\n'), ((4755, 4854), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'viscosity': 'mu', 'thermal_conductivity': 'kappa', 'species_diffusivity': 'spec_diffusivity'}), '(viscosity=mu, thermal_conductivity=kappa,\n species_diffusivity=spec_diffusivity)\n', (4770, 4854), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((5074, 5121), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'transport_model'}), '(transport_model=transport_model)\n', (5088, 5121), False, 'from mirgecom.eos import IdealSingleGas\n'), ((5196, 5232), 'mirgecom.initializers.PlanarPoiseuille', 'PlanarPoiseuille', ([], {'density': 'rho', 'mu': 'mu'}), '(density=rho, mu=mu)\n', (5212, 5232), False, 'from mirgecom.initializers import PlanarPoiseuille\n'), ((10135, 10233), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (10161, 10233), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((10272, 10318), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (10293, 10318), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((10647, 10699), 'pytools.obj_array.make_obj_array', 'make_obj_array', (['[velocity_x, velocity_y, velocity_z]'], {}), '([velocity_x, velocity_y, velocity_z])\n', (10661, 10699), False, 'from pytools.obj_array import make_obj_array\n'), ((11204, 11295), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom', 'species_mass': 'species_mass'}), '(dim, mass=mass, energy=energy, momentum=mom, species_mass=\n species_mass)\n', (11218, 11295), False, 'from mirgecom.fluid import make_conserved\n'), ((11571, 11683), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'bulk_viscosity': 'mu_b', 'viscosity': 'mu', 'thermal_conductivity': 'kappa', 'species_diffusivity': 'd_alpha'}), '(bulk_viscosity=mu_b, viscosity=mu, thermal_conductivity=\n kappa, species_diffusivity=d_alpha)\n', (11586, 11683), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((11752, 11792), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (11766, 11792), False, 'from mirgecom.eos import IdealSingleGas\n'), ((11850, 11889), 'mirgecom.viscous.diffusive_flux', 'diffusive_flux', (['discr', 'eos', 'cv', 'grad_cv'], {}), '(discr, eos, cv, grad_cv)\n', (11864, 11889), False, 'from mirgecom.viscous import diffusive_flux\n'), ((12551, 12649), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (12577, 12649), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((12688, 12734), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (12709, 12734), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((13063, 13115), 'pytools.obj_array.make_obj_array', 'make_obj_array', (['[velocity_x, velocity_y, velocity_z]'], {}), '([velocity_x, velocity_y, velocity_z])\n', (13077, 13115), False, 'from pytools.obj_array import make_obj_array\n'), ((13620, 13711), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom', 'species_mass': 'species_mass'}), '(dim, mass=mass, energy=energy, momentum=mom, species_mass=\n species_mass)\n', (13634, 13711), False, 'from mirgecom.fluid import make_conserved\n'), ((13986, 14098), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'bulk_viscosity': 'mu_b', 'viscosity': 'mu', 'thermal_conductivity': 'kappa', 'species_diffusivity': 'd_alpha'}), '(bulk_viscosity=mu_b, viscosity=mu, thermal_conductivity=\n kappa, species_diffusivity=d_alpha)\n', (14001, 14098), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((14167, 14207), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (14181, 14207), False, 'from mirgecom.eos import IdealSingleGas\n'), ((14265, 14304), 'mirgecom.viscous.diffusive_flux', 'diffusive_flux', (['discr', 'eos', 'cv', 'grad_cv'], {}), '(discr, eos, cv, grad_cv)\n', (14279, 14304), False, 'from mirgecom.viscous import diffusive_flux\n'), ((15075, 15173), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (15101, 15173), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((15212, 15258), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (15233, 15258), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((15551, 15590), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {'dtype': 'object'}), '([1.0, 2.0, 3.0], dtype=object)\n', (15559, 15590), True, 'import numpy as np\n'), ((15598, 15689), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom', 'species_mass': 'species_mass'}), '(dim, mass=mass, energy=energy, momentum=mom, species_mass=\n species_mass)\n', (15612, 15689), False, 'from mirgecom.fluid import make_conserved\n'), ((15730, 15755), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3]'], {}), '([0.1, 0.2, 0.3])\n', (15738, 15755), True, 'import numpy as np\n'), ((15859, 15909), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'species_diffusivity': 'd_alpha_input'}), '(species_diffusivity=d_alpha_input)\n', (15874, 15909), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((15920, 15960), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (15934, 15960), False, 'from mirgecom.eos import IdealSingleGas\n'), ((16164, 16219), 'mirgecom.viscous.get_local_max_species_diffusivity', 'get_local_max_species_diffusivity', (['actx', 'discr', 'd_alpha'], {}), '(actx, discr, d_alpha)\n', (16197, 16219), False, 'from mirgecom.viscous import get_local_max_species_diffusivity\n'), ((16615, 16713), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (16641, 16713), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((16752, 16798), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (16773, 16798), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((17101, 17192), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom', 'species_mass': 'species_mass'}), '(dim, mass=mass, energy=energy, momentum=mom, species_mass=\n species_mass)\n', (17115, 17192), False, 'from mirgecom.fluid import make_conserved\n'), ((17285, 17325), 'grudge.dt_utils.characteristic_lengthscales', 'characteristic_lengthscales', (['actx', 'discr'], {}), '(actx, discr)\n', (17312, 17325), False, 'from grudge.dt_utils import characteristic_lengthscales\n'), ((17378, 17408), 'grudge.op.nodal_min', 'nodal_min', (['discr', '"""vol"""', 'chlen'], {}), "(discr, 'vol', chlen)\n", (17387, 17408), False, 'from grudge.op import nodal_min\n'), ((17556, 17596), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (17570, 17596), False, 'from mirgecom.eos import IdealSingleGas\n'), ((17667, 17703), 'mirgecom.viscous.get_viscous_timestep', 'get_viscous_timestep', (['discr', 'eos', 'cv'], {}), '(discr, eos, cv)\n', (17687, 17703), False, 'from mirgecom.viscous import get_viscous_timestep\n'), ((2995, 3045), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'bulk_viscosity': '(1.0)', 'viscosity': '(0.5)'}), '(bulk_viscosity=1.0, viscosity=0.5)\n', (3010, 3045), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((3075, 3094), 'mirgecom.transport.PowerLawTransport', 'PowerLawTransport', ([], {}), '()\n', (3092, 3094), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((4715, 4732), 'numpy.ones', 'np.ones', (['nspecies'], {}), '(nspecies)\n', (4722, 4732), True, 'import numpy as np\n'), ((5642, 5682), 'mirgecom.flux.gradient_flux_central', 'gradient_flux_central', (['int_tpair', 'normal'], {}), '(int_tpair, normal)\n', (5663, 5682), False, 'from mirgecom.flux import gradient_flux_central\n'), ((6062, 6111), 'grudge.trace_pair.TracePair', 'TracePair', (['btag'], {'interior': 'cv_bnd', 'exterior': 'cv_bnd'}), '(btag, interior=cv_bnd, exterior=cv_bnd)\n', (6071, 6111), False, 'from grudge.trace_pair import TracePair\n'), ((6132, 6174), 'mirgecom.flux.gradient_flux_central', 'gradient_flux_central', (['bnd_tpair', 'bnd_nhat'], {}), '(bnd_tpair, bnd_nhat)\n', (6153, 6174), False, 'from mirgecom.flux import gradient_flux_central\n'), ((6578, 6624), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (6599, 6624), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((6773, 6797), 'grudge.dt_utils.h_max_from_volume', 'h_max_from_volume', (['discr'], {}), '(discr)\n', (6790, 6797), False, 'from grudge.dt_utils import h_max_from_volume\n'), ((6893, 6923), 'grudge.eager.interior_trace_pair', 'interior_trace_pair', (['discr', 'cv'], {}), '(discr, cv)\n', (6912, 6923), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((8108, 8144), 'pytools.obj_array.make_obj_array', 'make_obj_array', (['[dpdx * ones, zeros]'], {}), '([dpdx * ones, zeros])\n', (8122, 8144), False, 'from pytools.obj_array import make_obj_array\n'), ((8160, 8190), 'grudge.op.local_grad', 'op.local_grad', (['discr', 'pressure'], {}), '(discr, pressure)\n', (8173, 8190), True, 'import grudge.op as op\n'), ((8384, 8417), 'grudge.op.local_grad', 'op.local_grad', (['discr', 'temperature'], {}), '(discr, temperature)\n', (8397, 8417), True, 'import grudge.op as op\n'), ((8683, 8727), 'mirgecom.viscous.conductive_heat_flux', 'conductive_heat_flux', (['discr', 'eos', 'cv', 'grad_t'], {}), '(discr, eos, cv, grad_t)\n', (8703, 8727), False, 'from mirgecom.viscous import conductive_heat_flux\n'), ((8969, 9008), 'mirgecom.viscous.diffusive_flux', 'diffusive_flux', (['discr', 'eos', 'cv', 'grad_cv'], {}), '(discr, eos, cv, grad_cv)\n', (8983, 9008), False, 'from mirgecom.viscous import diffusive_flux\n'), ((9195, 9240), 'mirgecom.viscous.viscous_flux', 'viscous_flux', (['discr', 'eos', 'cv', 'grad_cv', 'grad_t'], {}), '(discr, eos, cv, grad_cv, grad_t)\n', (9207, 9240), False, 'from mirgecom.viscous import viscous_flux\n'), ((17515, 17544), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'viscosity': 'mu'}), '(viscosity=mu)\n', (17530, 17544), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((3477, 3486), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3483, 3486), True, 'import numpy as np\n'), ((8212, 8224), 'numpy.abs', 'np.abs', (['dpdx'], {}), '(dpdx)\n', (8218, 8224), True, 'import numpy as np\n'), ((9057, 9084), 'numpy.dot', 'np.dot', (['xp_tau', 'cv.velocity'], {}), '(xp_tau, cv.velocity)\n', (9063, 9084), True, 'import numpy as np\n'), ((17736, 17762), 'numpy.dot', 'np.dot', (['velocity', 'velocity'], {}), '(velocity, velocity)\n', (17742, 17762), True, 'import numpy as np\n')]
|
# ******************************************************************************
# Copyright 2017-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
from tests import xfail_issue_40957
def test_concat():
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
axis = 0
expected = np.concatenate((a, b), axis=0)
runtime = get_runtime()
parameter_a = ng.parameter(list(a.shape), name="A", dtype=np.float32)
parameter_b = ng.parameter(list(b.shape), name="B", dtype=np.float32)
node = ng.concat([parameter_a, parameter_b], axis)
computation = runtime.computation(node, parameter_a, parameter_b)
result = computation(a, b)
assert np.allclose(result, expected)
@xfail_issue_40957
@pytest.mark.parametrize(
"val_type, value", [(bool, False), (bool, np.empty((2, 2), dtype=bool))]
)
def test_constant_from_bool(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"val_type, value",
[
pytest.param(np.float32, np.float32(0.1234), marks=xfail_issue_40957),
pytest.param(np.float64, np.float64(0.1234), marks=xfail_issue_40957),
pytest.param(np.int8, np.int8(-63), marks=xfail_issue_40957),
pytest.param(np.int16, np.int16(-12345), marks=xfail_issue_40957),
pytest.param(np.int32, np.int32(-123456), marks=xfail_issue_40957),
pytest.param(np.int64, np.int64(-1234567), marks=xfail_issue_40957),
pytest.param(np.uint8, np.uint8(63), marks=xfail_issue_40957),
pytest.param(np.uint16, np.uint16(12345), marks=xfail_issue_40957),
pytest.param(np.uint32, np.uint32(123456), marks=xfail_issue_40957),
pytest.param(np.uint64, np.uint64(1234567), marks=xfail_issue_40957),
],
)
def test_constant_from_scalar(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"val_type",
[
pytest.param(np.float32, marks=xfail_issue_40957),
pytest.param(np.float64, marks=xfail_issue_40957),
],
)
def test_constant_from_float_array(val_type):
np.random.seed(133391)
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
result = run_op_numeric_data(input_data, ng.constant, val_type)
assert np.allclose(result, input_data)
@xfail_issue_40957
@pytest.mark.parametrize(
"val_type, range_start, range_end",
[
(np.int8, -8, 8),
(np.int16, -64, 64),
(np.int32, -1024, 1024),
(np.int64, -16383, 16383),
(np.uint8, 0, 8),
(np.uint16, 0, 64),
(np.uint32, 0, 1024),
(np.uint64, 0, 16383),
],
)
def test_constant_from_integer_array(val_type, range_start, range_end):
np.random.seed(133391)
input_data = np.array(
np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type
)
result = run_op_numeric_data(input_data, ng.constant, val_type)
assert np.allclose(result, input_data)
def test_broadcast_numpy():
data_shape = [16, 1, 1]
target_shape_shape = [4]
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
target_shape_parameter = ng.parameter(
target_shape_shape, name="Target_shape", dtype=np.int64
)
node = ng.broadcast(data_parameter, target_shape_parameter)
assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
def test_broadcast_bidirectional():
data_shape = [16, 1, 1]
target_shape_shape = [4]
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
target_shape_parameter = ng.parameter(
target_shape_shape, name="Target_shape", dtype=np.int64
)
node = ng.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL")
assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
def test_gather():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32
).reshape((3, 3))
input_indices = np.array([0, 2], np.int32).reshape(1, 2)
input_axes = np.array([1], np.int32)
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2)
)
result = run_op_node([input_data], ng.gather, input_indices, input_axes)
assert np.allclose(result, expected)
def test_transpose():
input_tensor = np.arange(3 * 3 * 224 * 224, dtype=np.int32).reshape(
(3, 3, 224, 224)
)
input_order = np.array([0, 2, 3, 1], dtype=np.int32)
result = run_op_node([input_tensor], ng.transpose, input_order)
expected = np.transpose(input_tensor, input_order)
assert np.allclose(result, expected)
@pytest.mark.xfail(
reason="Tile operation has a form that is not supported. Tile_2 should be converted to TileIE operation."
)
def test_tile():
input_tensor = np.arange(6, dtype=np.int32).reshape((2, 1, 3))
repeats = np.array([2, 1], dtype=np.int32)
result = run_op_node([input_tensor], ng.tile, repeats)
expected = np.array([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]).reshape((2, 2, 3))
assert np.allclose(result, expected)
@pytest.mark.xfail(
reason="RuntimeError: Check 'shape_size(get_input_shape(0)) == shape_size(output_shape)'"
)
def test_strided_slice():
input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4))
begin = np.array([1, 0], dtype=np.int32)
end = np.array([0, 0], dtype=np.int32)
strides = np.array([1, 1], dtype=np.int32)
begin_mask = np.array([0, 0, 0], dtype=np.int32)
end_mask = np.array([0, 0, 0], dtype=np.int32)
new_axis_mask = np.array([0, 1, 0], dtype=np.int32)
shrink_axis_mask = np.array([1, 0, 0], dtype=np.int32)
ellipsis_mask = np.array([0, 0, 0], dtype=np.int32)
result = run_op_node(
[input_tensor],
ng.strided_slice,
begin,
end,
strides,
begin_mask,
end_mask,
new_axis_mask,
shrink_axis_mask,
ellipsis_mask,
)
expected = np.array(
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype=np.float32
).reshape((1, 3, 4))
assert np.allclose(result, expected)
def test_reshape_v1():
A = np.arange(1200, dtype=np.float32).reshape((2, 5, 5, 24))
shape = np.array([0, -1, 4], dtype=np.int32)
special_zero = True
expected_shape = np.array([2, 150, 4])
expected = np.reshape(A, expected_shape)
result = run_op_node([A], ng.reshape, shape, special_zero)
assert np.allclose(result, expected)
def test_shape_of():
input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
result = run_op_node([input_tensor], ng.shape_of)
assert np.allclose(result, [3, 3])
|
[
"numpy.uint8",
"numpy.random.rand",
"numpy.int32",
"numpy.array",
"numpy.arange",
"numpy.int8",
"numpy.int64",
"numpy.reshape",
"ngraph.concat",
"pytest.mark.xfail",
"numpy.float64",
"numpy.uint64",
"numpy.uint32",
"numpy.empty",
"numpy.random.seed",
"numpy.concatenate",
"ngraph.parameter",
"tests.test_ngraph.util.run_op_node",
"numpy.allclose",
"numpy.int16",
"tests.runtime.get_runtime",
"numpy.transpose",
"tests.test_ngraph.util.run_op_numeric_data",
"pytest.param",
"pytest.mark.parametrize",
"numpy.random.randint",
"ngraph.broadcast",
"numpy.uint16",
"numpy.float32"
] |
[((3265, 3515), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val_type, range_start, range_end"""', '[(np.int8, -8, 8), (np.int16, -64, 64), (np.int32, -1024, 1024), (np.int64,\n -16383, 16383), (np.uint8, 0, 8), (np.uint16, 0, 64), (np.uint32, 0, \n 1024), (np.uint64, 0, 16383)]'], {}), "('val_type, range_start, range_end', [(np.int8, -8, \n 8), (np.int16, -64, 64), (np.int32, -1024, 1024), (np.int64, -16383, \n 16383), (np.uint8, 0, 8), (np.uint16, 0, 64), (np.uint32, 0, 1024), (np\n .uint64, 0, 16383)])\n", (3288, 3515), False, 'import pytest\n'), ((5610, 5744), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Tile operation has a form that is not supported. Tile_2 should be converted to TileIE operation."""'}), "(reason=\n 'Tile operation has a form that is not supported. Tile_2 should be converted to TileIE operation.'\n )\n", (5627, 5744), False, 'import pytest\n'), ((6059, 6177), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""RuntimeError: Check \'shape_size(get_input_shape(0)) == shape_size(output_shape)\'"""'}), '(reason=\n "RuntimeError: Check \'shape_size(get_input_shape(0)) == shape_size(output_shape)\'"\n )\n', (6076, 6177), False, 'import pytest\n'), ((973, 999), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (981, 999), True, 'import numpy as np\n'), ((1008, 1026), 'numpy.array', 'np.array', (['[[5, 6]]'], {}), '([[5, 6]])\n', (1016, 1026), True, 'import numpy as np\n'), ((1055, 1085), 'numpy.concatenate', 'np.concatenate', (['(a, b)'], {'axis': '(0)'}), '((a, b), axis=0)\n', (1069, 1085), True, 'import numpy as np\n'), ((1101, 1114), 'tests.runtime.get_runtime', 'get_runtime', ([], {}), '()\n', (1112, 1114), False, 'from tests.runtime import get_runtime\n'), ((1274, 1317), 'ngraph.concat', 'ng.concat', (['[parameter_a, parameter_b]', 'axis'], {}), '([parameter_a, parameter_b], axis)\n', (1283, 1317), True, 'import ngraph as ng\n'), ((1430, 1459), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (1441, 1459), True, 'import numpy as np\n'), ((1647, 1678), 'numpy.array', 'np.array', (['value'], {'dtype': 'val_type'}), '(value, dtype=val_type)\n', (1655, 1678), True, 'import numpy as np\n'), ((1692, 1741), 'tests.test_ngraph.util.run_op_numeric_data', 'run_op_numeric_data', (['value', 'ng.constant', 'val_type'], {}), '(value, ng.constant, val_type)\n', (1711, 1741), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((1753, 1782), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (1764, 1782), True, 'import numpy as np\n'), ((2670, 2701), 'numpy.array', 'np.array', (['value'], {'dtype': 'val_type'}), '(value, dtype=val_type)\n', (2678, 2701), True, 'import numpy as np\n'), ((2715, 2764), 'tests.test_ngraph.util.run_op_numeric_data', 'run_op_numeric_data', (['value', 'ng.constant', 'val_type'], {}), '(value, ng.constant, val_type)\n', (2734, 2764), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((2776, 2805), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (2787, 2805), True, 'import numpy as np\n'), ((3033, 3055), 'numpy.random.seed', 'np.random.seed', (['(133391)'], {}), '(133391)\n', (3047, 3055), True, 'import numpy as np\n'), ((3145, 3199), 'tests.test_ngraph.util.run_op_numeric_data', 'run_op_numeric_data', (['input_data', 'ng.constant', 'val_type'], {}), '(input_data, ng.constant, val_type)\n', (3164, 3199), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((3211, 3242), 'numpy.allclose', 'np.allclose', (['result', 'input_data'], {}), '(result, input_data)\n', (3222, 3242), True, 'import numpy as np\n'), ((3659, 3681), 'numpy.random.seed', 'np.random.seed', (['(133391)'], {}), '(133391)\n', (3673, 3681), True, 'import numpy as np\n'), ((3807, 3861), 'tests.test_ngraph.util.run_op_numeric_data', 'run_op_numeric_data', (['input_data', 'ng.constant', 'val_type'], {}), '(input_data, ng.constant, val_type)\n', (3826, 3861), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((3873, 3904), 'numpy.allclose', 'np.allclose', (['result', 'input_data'], {}), '(result, input_data)\n', (3884, 3904), True, 'import numpy as np\n'), ((4014, 4069), 'ngraph.parameter', 'ng.parameter', (['data_shape'], {'name': '"""Data"""', 'dtype': 'np.float32'}), "(data_shape, name='Data', dtype=np.float32)\n", (4026, 4069), True, 'import ngraph as ng\n'), ((4099, 4168), 'ngraph.parameter', 'ng.parameter', (['target_shape_shape'], {'name': '"""Target_shape"""', 'dtype': 'np.int64'}), "(target_shape_shape, name='Target_shape', dtype=np.int64)\n", (4111, 4168), True, 'import ngraph as ng\n'), ((4195, 4247), 'ngraph.broadcast', 'ng.broadcast', (['data_parameter', 'target_shape_parameter'], {}), '(data_parameter, target_shape_parameter)\n', (4207, 4247), True, 'import ngraph as ng\n'), ((4452, 4507), 'ngraph.parameter', 'ng.parameter', (['data_shape'], {'name': '"""Data"""', 'dtype': 'np.float32'}), "(data_shape, name='Data', dtype=np.float32)\n", (4464, 4507), True, 'import ngraph as ng\n'), ((4537, 4606), 'ngraph.parameter', 'ng.parameter', (['target_shape_shape'], {'name': '"""Target_shape"""', 'dtype': 'np.int64'}), "(target_shape_shape, name='Target_shape', dtype=np.int64)\n", (4549, 4606), True, 'import ngraph as ng\n'), ((4633, 4702), 'ngraph.broadcast', 'ng.broadcast', (['data_parameter', 'target_shape_parameter', '"""BIDIRECTIONAL"""'], {}), "(data_parameter, target_shape_parameter, 'BIDIRECTIONAL')\n", (4645, 4702), True, 'import ngraph as ng\n'), ((5004, 5027), 'numpy.array', 'np.array', (['[1]', 'np.int32'], {}), '([1], np.int32)\n', (5012, 5027), True, 'import numpy as np\n'), ((5150, 5213), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_data]', 'ng.gather', 'input_indices', 'input_axes'], {}), '([input_data], ng.gather, input_indices, input_axes)\n', (5161, 5213), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((5225, 5254), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (5236, 5254), True, 'import numpy as np\n'), ((5401, 5439), 'numpy.array', 'np.array', (['[0, 2, 3, 1]'], {'dtype': 'np.int32'}), '([0, 2, 3, 1], dtype=np.int32)\n', (5409, 5439), True, 'import numpy as np\n'), ((5454, 5508), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_tensor]', 'ng.transpose', 'input_order'], {}), '([input_tensor], ng.transpose, input_order)\n', (5465, 5508), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((5525, 5564), 'numpy.transpose', 'np.transpose', (['input_tensor', 'input_order'], {}), '(input_tensor, input_order)\n', (5537, 5564), True, 'import numpy as np\n'), ((5577, 5606), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (5588, 5606), True, 'import numpy as np\n'), ((5839, 5871), 'numpy.array', 'np.array', (['[2, 1]'], {'dtype': 'np.int32'}), '([2, 1], dtype=np.int32)\n', (5847, 5871), True, 'import numpy as np\n'), ((5886, 5931), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_tensor]', 'ng.tile', 'repeats'], {}), '([input_tensor], ng.tile, repeats)\n', (5897, 5931), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((6026, 6055), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (6037, 6055), True, 'import numpy as np\n'), ((6289, 6321), 'numpy.array', 'np.array', (['[1, 0]'], {'dtype': 'np.int32'}), '([1, 0], dtype=np.int32)\n', (6297, 6321), True, 'import numpy as np\n'), ((6332, 6364), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.int32'}), '([0, 0], dtype=np.int32)\n', (6340, 6364), True, 'import numpy as np\n'), ((6379, 6411), 'numpy.array', 'np.array', (['[1, 1]'], {'dtype': 'np.int32'}), '([1, 1], dtype=np.int32)\n', (6387, 6411), True, 'import numpy as np\n'), ((6429, 6464), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0], dtype=np.int32)\n', (6437, 6464), True, 'import numpy as np\n'), ((6480, 6515), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0], dtype=np.int32)\n', (6488, 6515), True, 'import numpy as np\n'), ((6536, 6571), 'numpy.array', 'np.array', (['[0, 1, 0]'], {'dtype': 'np.int32'}), '([0, 1, 0], dtype=np.int32)\n', (6544, 6571), True, 'import numpy as np\n'), ((6595, 6630), 'numpy.array', 'np.array', (['[1, 0, 0]'], {'dtype': 'np.int32'}), '([1, 0, 0], dtype=np.int32)\n', (6603, 6630), True, 'import numpy as np\n'), ((6651, 6686), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0], dtype=np.int32)\n', (6659, 6686), True, 'import numpy as np\n'), ((6701, 6841), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_tensor]', 'ng.strided_slice', 'begin', 'end', 'strides', 'begin_mask', 'end_mask', 'new_axis_mask', 'shrink_axis_mask', 'ellipsis_mask'], {}), '([input_tensor], ng.strided_slice, begin, end, strides,\n begin_mask, end_mask, new_axis_mask, shrink_axis_mask, ellipsis_mask)\n', (6712, 6841), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((7063, 7092), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (7074, 7092), True, 'import numpy as np\n'), ((7195, 7231), 'numpy.array', 'np.array', (['[0, -1, 4]'], {'dtype': 'np.int32'}), '([0, -1, 4], dtype=np.int32)\n', (7203, 7231), True, 'import numpy as np\n'), ((7278, 7299), 'numpy.array', 'np.array', (['[2, 150, 4]'], {}), '([2, 150, 4])\n', (7286, 7299), True, 'import numpy as np\n'), ((7315, 7344), 'numpy.reshape', 'np.reshape', (['A', 'expected_shape'], {}), '(A, expected_shape)\n', (7325, 7344), True, 'import numpy as np\n'), ((7358, 7407), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[A]', 'ng.reshape', 'shape', 'special_zero'], {}), '([A], ng.reshape, shape, special_zero)\n', (7369, 7407), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((7420, 7449), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (7431, 7449), True, 'import numpy as np\n'), ((7492, 7553), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {'dtype': 'np.float32'}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)\n', (7500, 7553), True, 'import numpy as np\n'), ((7568, 7608), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_tensor]', 'ng.shape_of'], {}), '([input_tensor], ng.shape_of)\n', (7579, 7608), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((7621, 7648), 'numpy.allclose', 'np.allclose', (['result', '[3, 3]'], {}), '(result, [3, 3])\n', (7632, 7648), True, 'import numpy as np\n'), ((2864, 2913), 'pytest.param', 'pytest.param', (['np.float32'], {'marks': 'xfail_issue_40957'}), '(np.float32, marks=xfail_issue_40957)\n', (2876, 2913), False, 'import pytest\n'), ((2923, 2972), 'pytest.param', 'pytest.param', (['np.float64'], {'marks': 'xfail_issue_40957'}), '(np.float64, marks=xfail_issue_40957)\n', (2935, 2972), False, 'import pytest\n'), ((3717, 3771), 'numpy.random.randint', 'np.random.randint', (['range_start', 'range_end'], {'size': '(2, 2)'}), '(range_start, range_end, size=(2, 2))\n', (3734, 3771), True, 'import numpy as np\n'), ((1553, 1581), 'numpy.empty', 'np.empty', (['(2, 2)'], {'dtype': 'bool'}), '((2, 2), dtype=bool)\n', (1561, 1581), True, 'import numpy as np\n'), ((1873, 1891), 'numpy.float32', 'np.float32', (['(0.1234)'], {}), '(0.1234)\n', (1883, 1891), True, 'import numpy as np\n'), ((1952, 1970), 'numpy.float64', 'np.float64', (['(0.1234)'], {}), '(0.1234)\n', (1962, 1970), True, 'import numpy as np\n'), ((2028, 2040), 'numpy.int8', 'np.int8', (['(-63)'], {}), '(-63)\n', (2035, 2040), True, 'import numpy as np\n'), ((2099, 2115), 'numpy.int16', 'np.int16', (['(-12345)'], {}), '(-12345)\n', (2107, 2115), True, 'import numpy as np\n'), ((2174, 2191), 'numpy.int32', 'np.int32', (['(-123456)'], {}), '(-123456)\n', (2182, 2191), True, 'import numpy as np\n'), ((2250, 2268), 'numpy.int64', 'np.int64', (['(-1234567)'], {}), '(-1234567)\n', (2258, 2268), True, 'import numpy as np\n'), ((2327, 2339), 'numpy.uint8', 'np.uint8', (['(63)'], {}), '(63)\n', (2335, 2339), True, 'import numpy as np\n'), ((2399, 2415), 'numpy.uint16', 'np.uint16', (['(12345)'], {}), '(12345)\n', (2408, 2415), True, 'import numpy as np\n'), ((2475, 2492), 'numpy.uint32', 'np.uint32', (['(123456)'], {}), '(123456)\n', (2484, 2492), True, 'import numpy as np\n'), ((2552, 2570), 'numpy.uint64', 'np.uint64', (['(1234567)'], {}), '(1234567)\n', (2561, 2570), True, 'import numpy as np\n'), ((4828, 4895), 'numpy.array', 'np.array', (['[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2]', 'np.float32'], {}), '([1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32)\n', (4836, 4895), True, 'import numpy as np\n'), ((4946, 4972), 'numpy.array', 'np.array', (['[0, 2]', 'np.int32'], {}), '([0, 2], np.int32)\n', (4954, 4972), True, 'import numpy as np\n'), ((5044, 5102), 'numpy.array', 'np.array', (['[1.0, 1.2, 2.0, 2.2, 3.0, 3.2]'], {'dtype': 'np.float32'}), '([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32)\n', (5052, 5102), True, 'import numpy as np\n'), ((5298, 5342), 'numpy.arange', 'np.arange', (['(3 * 3 * 224 * 224)'], {'dtype': 'np.int32'}), '(3 * 3 * 224 * 224, dtype=np.int32)\n', (5307, 5342), True, 'import numpy as np\n'), ((5777, 5805), 'numpy.arange', 'np.arange', (['(6)'], {'dtype': 'np.int32'}), '(6, dtype=np.int32)\n', (5786, 5805), True, 'import numpy as np\n'), ((5948, 5994), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]'], {}), '([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5])\n', (5956, 5994), True, 'import numpy as np\n'), ((6219, 6257), 'numpy.arange', 'np.arange', (['(2 * 3 * 4)'], {'dtype': 'np.float32'}), '(2 * 3 * 4, dtype=np.float32)\n', (6228, 6257), True, 'import numpy as np\n'), ((6941, 7017), 'numpy.array', 'np.array', (['[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]'], {'dtype': 'np.float32'}), '([12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype=np.float32)\n', (6949, 7017), True, 'import numpy as np\n'), ((7126, 7159), 'numpy.arange', 'np.arange', (['(1200)'], {'dtype': 'np.float32'}), '(1200, dtype=np.float32)\n', (7135, 7159), True, 'import numpy as np\n'), ((3087, 3110), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (3101, 3110), True, 'import numpy as np\n')]
|
from numpy import log10, isnan
def signOfFeasible(p):
r = '-'
if p.isFeas(p.xk): r = '+'
return r
textOutputDict = {\
'objFunVal': lambda p: p.iterObjFunTextFormat % (-p.Fk if p.invertObjFunc else p.Fk),
'log10(maxResidual)': lambda p: '%0.2f' % log10(p.rk+1e-100),
'log10(MaxResidual/ConTol)':lambda p: '%0.2f' % log10(max((p.rk/p.contol, 1e-100))),
'residual':lambda p: '%0.1e' % p._Residual,
'isFeasible': signOfFeasible,
'nSolutions': lambda p: '%d' % p._nObtainedSolutions,
'front length':lambda p: '%d' % p._frontLength,
'outcome': lambda p: ('%+d' % -p._nOutcome if p._nOutcome != 0 else ''),
'income': lambda p: ('%+d' % p._nIncome if p._nIncome != 0 else ''),
'f*_distance_estim': lambda p: ('%0.1g' % p.f_bound_distance if not isnan(p.f_bound_distance) else 'N/A'),
'f*_bound_estim': lambda p: (p.iterObjFunTextFormat % \
p.f_bound_estimation) if not isnan(p.f_bound_estimation) else 'N/A',
}
delimiter = ' '
class ooTextOutput:
def __init__(self):
pass
def iterPrint(self):
if self.lastPrintedIter == self.iter: return
if self.iter == 0 and self.iprint >= 0: # 0th iter (start)
s = ' iter' + delimiter
for fn in self.data4TextOutput:
s += fn + delimiter
self.disp(s)
elif self.iprint<0 or \
(((self.iprint>0 and self.iter % self.iprint != 0) or self.iprint==0) and not(self.isFinished or self.iter == 0)):
return
s = str(self.iter).rjust(5) + ' '
for columnName in self.data4TextOutput:
val = textOutputDict[columnName](self)
#nWhole = length(columnName)
s += val.rjust(len(columnName)) + ' '
self.disp(s)
self.lastPrintedIter = self.iter
|
[
"numpy.log10",
"numpy.isnan"
] |
[((260, 280), 'numpy.log10', 'log10', (['(p.rk + 1e-100)'], {}), '(p.rk + 1e-100)\n', (265, 280), False, 'from numpy import log10, isnan\n'), ((759, 784), 'numpy.isnan', 'isnan', (['p.f_bound_distance'], {}), '(p.f_bound_distance)\n', (764, 784), False, 'from numpy import log10, isnan\n'), ((884, 911), 'numpy.isnan', 'isnan', (['p.f_bound_estimation'], {}), '(p.f_bound_estimation)\n', (889, 911), False, 'from numpy import log10, isnan\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import os
path = "/Users/petermarinov/msci project/electrode data/test data/data/"
filenames = []
for f in os.listdir(path):
if not f.startswith('.'):
filenames.append(f)
i=-12
data = np.genfromtxt(path + filenames[i])
V = np.zeros((200,200))
for i in range (0,200):
for j in range (0,200):
if data[j+200*i][0] == 0:
V[i,j] = -90.0
if data[j+200*i][0] >1:
V[i,j] = 20.-(110./data[j+200*i][1])*(data[j+200*i][0]-1)
if data[j+200*i][0] ==1:
V[i,j] = 20.
i1 = 50
k= 3
total = []
x=0 #dummy
elec = np.zeros((200,200,200))
for j1 in range(0,200):
for i in range (1,200):
for j in range (1,200):
#elec[j1,i,j] = np.divide(float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1])),float(((i-i1)**2+ (j-j1)**2 +k**2)**(3/2)))
#x +=((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/((i-i1)**2+ (j-j1)**2 +k**2)**(3/2)
x += np.float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/np.float(((i-i1)**2+(j-j1)**2+k**2)**3/2)
total.append(x)
x=0
plt.plot(total)
plt.xlabel("time [dimentionless]", fontsize = 18)
plt.ylabel("Voltage [mV]" , fontsize = 18)
plt.title("Electrode measurement for a healthy pacing heart")
plt.grid()
plt.show()
|
[
"os.listdir",
"matplotlib.pyplot.grid",
"numpy.float",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.genfromtxt",
"matplotlib.pyplot.show"
] |
[((167, 183), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (177, 183), False, 'import os\n'), ((264, 298), 'numpy.genfromtxt', 'np.genfromtxt', (['(path + filenames[i])'], {}), '(path + filenames[i])\n', (277, 298), True, 'import numpy as np\n'), ((304, 324), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (312, 324), True, 'import numpy as np\n'), ((656, 681), 'numpy.zeros', 'np.zeros', (['(200, 200, 200)'], {}), '((200, 200, 200))\n', (664, 681), True, 'import numpy as np\n'), ((1211, 1226), 'matplotlib.pyplot.plot', 'plt.plot', (['total'], {}), '(total)\n', (1219, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1228, 1275), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [dimentionless]"""'], {'fontsize': '(18)'}), "('time [dimentionless]', fontsize=18)\n", (1238, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1318), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Voltage [mV]"""'], {'fontsize': '(18)'}), "('Voltage [mV]', fontsize=18)\n", (1289, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1384), 'matplotlib.pyplot.title', 'plt.title', (['"""Electrode measurement for a healthy pacing heart"""'], {}), "('Electrode measurement for a healthy pacing heart')\n", (1332, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1386, 1396), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1394, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1408), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1406, 1408), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1116), 'numpy.float', 'np.float', (['((i - i1) * (V[i, j] - V[i - 1, j]) + (j - j1) * (V[i, j] - V[i, j - 1]))'], {}), '((i - i1) * (V[i, j] - V[i - 1, j]) + (j - j1) * (V[i, j] - V[i, j -\n 1]))\n', (1039, 1116), True, 'import numpy as np\n'), ((1091, 1150), 'numpy.float', 'np.float', (['(((i - i1) ** 2 + (j - j1) ** 2 + k ** 2) ** 3 / 2)'], {}), '(((i - i1) ** 2 + (j - j1) ** 2 + k ** 2) ** 3 / 2)\n', (1099, 1150), True, 'import numpy as np\n')]
|
from data_reader.reader import CsvReader
from util import *
import numpy as np
import matplotlib.pyplot as plt
class LogisticRegression(object):
def __init__(self, learning_rate=0.01, epochs=50):
self.__epochs= epochs
self.__learning_rate = learning_rate
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.__epochs):
# 1- Calculate the net input W^T * x
z = self.__net_input(X)
# 2- Get the activation using Sigmoid function
h = self.__activation(z)
# 3- Calculate the gradient
temp = X.T.dot(y - h)
# 4- Update the weights and bias using the gradient and learning rate
self.w_[1:] += self.__learning_rate * temp
self.w_[0] += self.__learning_rate * sum(temp)
# 5- Uncomment the cost collecting line
self.cost_.append(self.__logit_cost(y, self.__activation(z)))
def __logit_cost(self, y, y_val):
logit = -y.dot(np.log(y_val)) - ((1 - y).dot(np.log(1 - y_val)))
return logit
def __sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
def __net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def __activation(self, X):
return self.__sigmoid(X)
def predict(self, X):
# 1- Calculate the net input W^T * x
z = self.__net_input(X)
# 2- Return the activated values (0 or 1 classes)
h = self.__activation(z)
return np.where(self.__activation(z) >= 0.5, 1, 0)
reader = CsvReader("./data/Iris.csv")
iris_features, iris_labels = reader.get_iris_data()
ignore_verginica = [i for i, v in enumerate(iris_labels) if v == 'Iris-virginica']
iris_features = [v for i, v in enumerate(iris_features) if i not in ignore_verginica]
iris_labels = [v for i, v in enumerate(iris_labels) if i not in ignore_verginica]
print(len(iris_features))
print(len(iris_labels))
iris_features, iris_labels = shuffle(iris_features, iris_labels)
iris_labels = to_onehot(iris_labels)
iris_labels = list(map(lambda v: v.index(max(v)), iris_labels))
train_x, train_y, test_x, test_y = iris_features[0:89], iris_labels[0:89], iris_features[89:], iris_labels[89:]
train_x, train_y, test_x, test_y = np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
train_x, means, stds = standardize(train_x)
test_x = standardize(test_x, means, stds)
lr = LogisticRegression(learning_rate=0.1, epochs=50)
lr.fit(train_x, train_y)
plt.plot(range(1, len(lr.cost_) + 1), np.log10(lr.cost_))
plt.xlabel('Epochs')
plt.ylabel('Cost')
plt.title('Logistic Regression - Learning rate 0.1')
plt.tight_layout()
plt.show()
predicted_test = lr.predict(test_x)
print("Test Accuracy: " + str(((sum([predicted_test[i] == test_y[i] for i in range(0, len(predicted_test))]) / len(predicted_test)) * 100.0)) + "%")
|
[
"numpy.log10",
"matplotlib.pyplot.ylabel",
"data_reader.reader.CsvReader",
"matplotlib.pyplot.xlabel",
"numpy.log",
"numpy.asarray",
"numpy.exp",
"numpy.zeros",
"numpy.dot",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] |
[((1601, 1629), 'data_reader.reader.CsvReader', 'CsvReader', (['"""./data/Iris.csv"""'], {}), "('./data/Iris.csv')\n", (1610, 1629), False, 'from data_reader.reader import CsvReader\n'), ((2608, 2628), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2618, 2628), True, 'import matplotlib.pyplot as plt\n'), ((2629, 2647), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost"""'], {}), "('Cost')\n", (2639, 2647), True, 'import matplotlib.pyplot as plt\n'), ((2648, 2700), 'matplotlib.pyplot.title', 'plt.title', (['"""Logistic Regression - Learning rate 0.1"""'], {}), "('Logistic Regression - Learning rate 0.1')\n", (2657, 2700), True, 'import matplotlib.pyplot as plt\n'), ((2702, 2720), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2718, 2720), True, 'import matplotlib.pyplot as plt\n'), ((2721, 2731), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2729, 2731), True, 'import matplotlib.pyplot as plt\n'), ((2301, 2320), 'numpy.asarray', 'np.asarray', (['train_x'], {}), '(train_x)\n', (2311, 2320), True, 'import numpy as np\n'), ((2322, 2341), 'numpy.asarray', 'np.asarray', (['train_y'], {}), '(train_y)\n', (2332, 2341), True, 'import numpy as np\n'), ((2343, 2361), 'numpy.asarray', 'np.asarray', (['test_x'], {}), '(test_x)\n', (2353, 2361), True, 'import numpy as np\n'), ((2363, 2381), 'numpy.asarray', 'np.asarray', (['test_y'], {}), '(test_y)\n', (2373, 2381), True, 'import numpy as np\n'), ((2588, 2606), 'numpy.log10', 'np.log10', (['lr.cost_'], {}), '(lr.cost_)\n', (2596, 2606), True, 'import numpy as np\n'), ((321, 345), 'numpy.zeros', 'np.zeros', (['(1 + X.shape[1])'], {}), '(1 + X.shape[1])\n', (329, 345), True, 'import numpy as np\n'), ((1236, 1258), 'numpy.dot', 'np.dot', (['X', 'self.w_[1:]'], {}), '(X, self.w_[1:])\n', (1242, 1258), True, 'import numpy as np\n'), ((1079, 1096), 'numpy.log', 'np.log', (['(1 - y_val)'], {}), '(1 - y_val)\n', (1085, 1096), True, 'import numpy as np\n'), ((1178, 1188), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (1184, 1188), True, 'import numpy as np\n'), ((1049, 1062), 'numpy.log', 'np.log', (['y_val'], {}), '(y_val)\n', (1055, 1062), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.