code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
from abc import ABC
import numpy as np
from pydyn.base.expr import Expression, Expr, Manifold
from pydyn.operations.transpose import Transpose
from pydyn.utils.errors import UndefinedCaseError, ExpressionMismatchError
class MatrixExpr(Expr, ABC):
def __init__(self):
super().__init__()
self.type = Expression.MATRIX
def __str__(self):
raise NotImplementedError
def __add__(self, other):
from pydyn.operations.addition import MAdd
return MAdd(self, other)
def __iadd__(self, other):
from pydyn.operations.addition import MAdd
return MAdd(self, other)
def __mul__(self, other):
from pydyn.operations.multiplication import SMMul, MVMul, MMMul
from pydyn.base.scalars import Scalar
if type(other) == float or type(other) == int:
other = Scalar('(' + str(other) + ')', value=other, attr=['Constant'])
if other.type == Expression.SCALAR:
return SMMul(self, other)
elif other.type == Expression.VECTOR:
if type(other) == type(Transpose(None)):
raise ExpressionMismatchError
else:
return MVMul(self, other)
elif other.type == Expression.MATRIX:
return MMMul(self, other)
else:
raise UndefinedCaseError
class Matrix(MatrixExpr, ABC):
def __init__(self, s=None, size=(3, 3), value=None, attr=None):
super().__init__()
self.name = s
self.size = size
if value is None:
self.value = np.empty(size, dtype='object')
else:
self.value = value
if attr is None:
self.attr = []
else:
self.attr = attr
if 'SymmetricMatrix' in self.attr:
self.isSymmetric = True
else:
self.isSymmetric = False
def __str__(self):
return self.name
def delta(self):
if self.isOnes or self.isZero or self.isConstant:
return Matrix('O', attr=['Constant', 'Zero'])
else:
from pydyn.operations.geometry import Delta
return Delta(self)
def variation_vector(self):
return self.delta()
def diff(self):
if self.isConstant:
return Matrix(s='0', size=self.size, attr=['Constant', 'Zero'])
else:
return Matrix(s='dot_' + self.name, size=self.size)
def integrate(self):
if self.isConstant:
raise NotImplementedError
else:
s = self.name
if 'dot_' in s:
s.replace('dot_', '')
return Matrix(s=s, size=self.size)
else:
return Matrix(s='int_' + s, size=self.size)
class SkewSymmMatrix(Matrix, ABC):
def __init__(self):
super().__init__()
self.attr.append('SkewSymmetry')
class SO3(Matrix, Manifold, ABC):
def __init__(self, s=None, size=(3, 3), value=None, attr=None):
super().__init__(s, size, value, attr)
super(Manifold, self).__init__()
self.tangent_vector = '\\Omega_{' + self.name + '}'
self.variation_vector = '\\eta_{' + self.name + '}'
if attr is None:
attr = []
attr.append('Manifold')
self.attr = attr
def delta(self):
from pydyn.operations.multiplication import MMMul
from pydyn.operations.geometry import Hat
return MMMul(self, Hat(self.get_variation_vector()))
def get_tangent_vector(self):
from pydyn.base.vectors import TSO3
return TSO3(self.tangent_vector, SO3=self)
def get_variation_vector(self):
from pydyn.base.vectors import Vector
return Vector(self.variation_vector)
def diff(self):
from pydyn.operations.multiplication import MMMul
from pydyn.operations.geometry import Hat
return MMMul(self, Hat(self.get_tangent_vector()))
ZeroMatrix = Matrix('0', attr=['Constant', 'Zero'])
IdentityMatrix = Matrix('I', attr=['Constant', 'Identity'])
O = ZeroMatrix
I = IdentityMatrix
def getMatrices(x):
if isinstance(x, list):
vars_ = x
elif isinstance(x, str):
vars_ = x.split()
else:
return None
s = []
for v in vars_:
s.append(Matrix(v))
return tuple(s)
|
[
"pydyn.operations.transpose.Transpose",
"pydyn.base.vectors.TSO3",
"numpy.empty",
"pydyn.operations.geometry.Delta",
"pydyn.base.vectors.Vector",
"pydyn.operations.multiplication.MVMul",
"pydyn.operations.multiplication.MMMul",
"pydyn.operations.multiplication.SMMul",
"pydyn.operations.addition.MAdd"
] |
[((494, 511), 'pydyn.operations.addition.MAdd', 'MAdd', (['self', 'other'], {}), '(self, other)\n', (498, 511), False, 'from pydyn.operations.addition import MAdd\n'), ((610, 627), 'pydyn.operations.addition.MAdd', 'MAdd', (['self', 'other'], {}), '(self, other)\n', (614, 627), False, 'from pydyn.operations.addition import MAdd\n'), ((3573, 3608), 'pydyn.base.vectors.TSO3', 'TSO3', (['self.tangent_vector'], {'SO3': 'self'}), '(self.tangent_vector, SO3=self)\n', (3577, 3608), False, 'from pydyn.base.vectors import TSO3\n'), ((3707, 3736), 'pydyn.base.vectors.Vector', 'Vector', (['self.variation_vector'], {}), '(self.variation_vector)\n', (3713, 3736), False, 'from pydyn.base.vectors import Vector\n'), ((978, 996), 'pydyn.operations.multiplication.SMMul', 'SMMul', (['self', 'other'], {}), '(self, other)\n', (983, 996), False, 'from pydyn.operations.multiplication import SMMul, MVMul, MMMul\n'), ((1563, 1593), 'numpy.empty', 'np.empty', (['size'], {'dtype': '"""object"""'}), "(size, dtype='object')\n", (1571, 1593), True, 'import numpy as np\n'), ((2140, 2151), 'pydyn.operations.geometry.Delta', 'Delta', (['self'], {}), '(self)\n', (2145, 2151), False, 'from pydyn.operations.geometry import Delta\n'), ((1183, 1201), 'pydyn.operations.multiplication.MVMul', 'MVMul', (['self', 'other'], {}), '(self, other)\n', (1188, 1201), False, 'from pydyn.operations.multiplication import SMMul, MVMul, MMMul\n'), ((1267, 1285), 'pydyn.operations.multiplication.MMMul', 'MMMul', (['self', 'other'], {}), '(self, other)\n', (1272, 1285), False, 'from pydyn.operations.multiplication import MMMul\n'), ((1078, 1093), 'pydyn.operations.transpose.Transpose', 'Transpose', (['None'], {}), '(None)\n', (1087, 1093), False, 'from pydyn.operations.transpose import Transpose\n')]
|
# processing the SA2 and road shapefiles
# inputs: raw SA2 and road shapefiles
# Outputs: Adelaide SA2 nodal and link dataframes with transport information
# Outputs are pickles:
# sa2_node_with_only_transport_attributes.pickle
# sa2_edge_with_only_transport_attributes.pickle
# Processing files saved:
# sa2_adelaide.shp, sa2_adelaide_edge.shp, OD_full_path.pickle, sa2_roads_in_adelaide.shp, etc.
# util needed: shortest path dictionary; a function turning the road networks to the link dataframe.
# time: ~15 min
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import geopandas as gpd
from pysal.lib import weights
import networkx as nx
import momepy
import pickle
# system path
import sys
import os
# util path
utility_path = os.path.join(os.getcwd(),'src/d00_utils/')
sys.path.append(utility_path)
import utilities as util
# data path
# sw: define the path based on the root project directory.
raw_data_path = os.path.join(os.getcwd(),'data/01_raw/')
intermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')
# # read files
# mount_path = "/Users/shenhaowang/Dropbox (MIT)/project_econ_opportunity_south_Australia"
#region 1. Extact the SA2s for Adelaide area.
# raw data
sa2_shape = gpd.read_file(raw_data_path + "sa2/SA2_2016_AUST.shp")
# Keep Adelaide area
# info from: file:///Users/shenhaowang/Downloads/StatePublicHealthPlan_Final.pdf (page 32)
adelaide_sa4_set = ['401','402','403','404']
sa2_adelaide = sa2_shape.loc[sa2_shape.SA4_CODE16.isin(adelaide_sa4_set)]
print("Shape of SA2 in the Adelaide area is: ", sa2_adelaide.shape)
# only use the most relevant variables.
sa2_adelaide = sa2_adelaide[['SA2_MAIN16', 'SA2_NAME16', 'geometry']]
# projection
sa2_adelaide.crs = 'epsg:3112'
print(sa2_adelaide.crs)
# create a sa2_adelaide link dataframe
index = pd.MultiIndex.from_product([sa2_adelaide['SA2_MAIN16'], sa2_adelaide['SA2_MAIN16']], names=['O', 'D'])
sa2_adelaide_link_df = pd.DataFrame(index=index).reset_index()
# add the geometry part to sa2_adelaide_link_df
from shapely.geometry import LineString
edge_list = []
for idx in range(sa2_adelaide_link_df.shape[0]):
origin = sa2_adelaide_link_df.loc[idx, 'O']
destination = sa2_adelaide_link_df.loc[idx, 'D']
edge = LineString([sa2_adelaide.loc[sa2_adelaide['SA2_MAIN16'] == origin, 'geometry'].centroid.values[0],
sa2_adelaide.loc[sa2_adelaide['SA2_MAIN16'] == destination, 'geometry'].centroid.values[0]])
edge_list.append(edge)
sa2_adelaide_link_df['geometry'] = edge_list
# create the gpd object
sa2_adelaide_link = gpd.GeoDataFrame(sa2_adelaide_link_df, crs='epsg:3112')
# save the process SA2 Adelaide shapefile
sa2_adelaide.to_file(intermediate_data_path+'shapefiles/sa2_adelaide.shp')
sa2_adelaide_link.to_file(intermediate_data_path+'shapefiles/sa2_adelaide_edge.shp')
#endregion
#region 2. Create the OD shortest path dictionary for SA2 Adelaide shapefile.
sa2_adelaide=gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide.shp')
# create the queen contiguity network
adelaide_queen=weights.contiguity.Queen.from_dataframe(sa2_adelaide)
# create the kernel network (using Euclidean distances)
sa2_adelaide_kernel = weights.distance.Kernel.from_dataframe(sa2_adelaide, k=109)
# turn the defaults to euclidean distances as weights.
for i in sa2_adelaide_kernel.neighbors.keys():
for j_idx in range(len(sa2_adelaide_kernel.neighbors[i])):
j = sa2_adelaide_kernel.neighbors[i][j_idx]
# note that kw.weights indices are
# i (node index), j_idx (index of the node on the list - not node index!)
weight = sa2_adelaide_kernel.weights[i][j_idx]
distance = (1 - weight) * sa2_adelaide_kernel.bandwidth[i]
sa2_adelaide_kernel.weights[i][j_idx] = distance[0]
# assign euclidean weights to Queen net
for o in adelaide_queen.neighbors.keys():
# print(o)
for d_idx in range(len(adelaide_queen.neighbors[o])):
d = adelaide_queen.neighbors[o][d_idx] # return the o and d SA2 original indices.
weight = sa2_adelaide_kernel[o][d] # get the kernel weight associated with the o and d.
adelaide_queen.weights[o][d_idx] = weight
# print(adelaide_queen.weights)
# create the nx object
adelaide_nx = adelaide_queen.to_networkx()
# assign weights to adelaide_nx
for o,d in adelaide_nx.edges:
adelaide_nx.edges[o,d]['weight'] = adelaide_queen[o][d]
# create the OD dictionary for the full shortest paths.
path=dict(nx.all_pairs_dijkstra(adelaide_nx, weight='weight'))
# create a OD dictionary.
OD_full_path = {}
for o in range(110):
for d in range(110):
if d == 103 or o == 103: # note that 103 is the island - this is no path to it.
pass
else:
OD_full_path[(o,d)] = path[o][1][d]
# note: OD_full_path idx is the same as sa2_adelaide!
with open(intermediate_data_path+'OD_full_path.pickle', 'wb') as f:
pickle.dump(OD_full_path, f)
#endregion
#region 3. Read road shapefiles and save them
sa2_roads = gpd.read_file(raw_data_path + "roads/Roads_GDA2020.shp")
sa2_roads = sa2_roads.loc[~sa2_roads['class'].isna(),]
# projection to epsg:3112
sa2_roads.crs = 'epsg:3112'
# combine freeway and highway as one category (HWY).
sa2_roads.loc[sa2_roads['class'] == 'FREE', 'class'] = 'HWY'
# extract three types of roads for GIS visualization
sa2_roads_LOCL = sa2_roads.loc[sa2_roads['class'] == 'LOCL', :]
sa2_roads_HWY = sa2_roads.loc[sa2_roads['class'] == 'HWY', :]
sa2_roads_UND = sa2_roads.loc[sa2_roads['class'] == 'UND', :]
# np.unique(sa2_roads['class'], return_counts = True)
# save shapefiles
sa2_roads.to_file(intermediate_data_path+"shapefiles/sa2_roads.shp")
sa2_roads_LOCL.to_file(intermediate_data_path+"shapefiles/sa2_roads_LOCL.shp")
sa2_roads_HWY.to_file(intermediate_data_path+"shapefiles/sa2_roads_HWY.shp")
sa2_roads_UND.to_file(intermediate_data_path+"shapefiles/sa2_roads_UND.shp")
#endregion
#region 4. Turn road shapefiles to node attributes of SA2s' nodes.
# attributes: number of road counts and intersection counts.
# inputs: roads and sa2 shapefiles
# outputs: sa2 shapefile with road attributes.
sa2_roads = gpd.read_file(intermediate_data_path+"shapefiles/sa2_roads.shp")
sa2_adelaide = gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide.shp')
# augment road class info to sa2_adelaide
sa2_adelaide_road_attributes, roads_in_adelaide = util.compute_road_attributes(sa2_adelaide, sa2_roads)
sa2_adelaide_road_attributes['num_roads'] = np.sum(sa2_adelaide_road_attributes[['class_ART', 'class_BUS', 'class_COLL',
'class_HWY', 'class_LOCL','class_SUBA', 'class_TRK2',
'class_TRK4', 'class_UND']], axis = 1)
# augment intersection attributes to sa2_adelaide
sa2_adelaide_intersection_attributes = util.compute_intersection_attributes(sa2_adelaide_road_attributes, roads_in_adelaide)
# merge sa2_adelaide, sa2_adelaide_road_attributes, and sa2_adelaide_intersection_attributes
sa2_adelaide_with_transport_attributes = sa2_adelaide.merge(sa2_adelaide_road_attributes, on='SA2_MAIN16', how='outer', suffixes=("","_x"))
sa2_adelaide_with_transport_attributes.drop(columns=['SA2_NAME16_x', 'geometry_x'], inplace=True)
sa2_adelaide_with_transport_attributes = sa2_adelaide_with_transport_attributes.merge(sa2_adelaide_intersection_attributes, on='SA2_MAIN16', how='outer', suffixes=("","_x"))
# save sa2_adelaide_with_transport_attributes and roads_in_adelaide
sa2_adelaide_with_transport_attributes.to_pickle(intermediate_data_path+"sa2_node_with_only_transport_attributes.pickle")
roads_in_adelaide.to_file(intermediate_data_path+"shapefiles/sa2_roads_in_adelaide.shp")
# sw: Wow. Pickle can save & read the shapefiles with crs info kept.
# sw: I still saved to shp files because QGIS cannot read pickle, I guess.
# with open("./data/sa2_adelaide_with_transport_attributes.pickle", 'rb') as f:
# x_file = pickle.load(f)
# print(x_file.crs)
#endregion
#region 5. Turn road shapefiles to the attributes of SA2s' edges.
# It takes about five minutes for processing.
# roads_in_adelaide = gpd.read_file("./data/shapefiles/sa2_roads_in_adelaide.shp")
# 1. edge file
sa2_adelaide_edge = gpd.read_file(intermediate_data_path+'shapefiles/sa2_adelaide_edge.shp')
# 2. transport attribute file
with open(intermediate_data_path+"sa2_node_with_only_transport_attributes.pickle", 'rb') as f:
sa2_adelaide_with_transport_attributes = pickle.load(f)
# 3. OD path file
with open(intermediate_data_path+'OD_full_path.pickle', 'rb') as f:
OD_full_path = pickle.load(f)
# add the road and intersection attributes to the sa2_adelaide_edge data set.
attribute_name_list = ['class_ART', 'class_BUS', 'class_COLL',
'class_HWY', 'class_LOCL', 'class_SUBA',
'class_TRK2', 'class_TRK4', 'class_UND', 'num_roads', 'num_nodes', 'num_1degree',
'num_2degree', 'num_3degree', 'num_4degree', 'num_greater5degree']
sa2_adelaide_edge[attribute_name_list] = 0.0 # init values
# add road and intersection attributes to the edge df.
for idx in np.arange(sa2_adelaide_edge.shape[0]):
if idx%1000 == 0:
print(idx)
origin = sa2_adelaide_edge.loc[idx, 'O']
destination = sa2_adelaide_edge.loc[idx, 'D']
o_idx = sa2_adelaide_with_transport_attributes.index[sa2_adelaide_with_transport_attributes.SA2_MAIN16 == origin].tolist()[0]
d_idx = sa2_adelaide_with_transport_attributes.index[sa2_adelaide_with_transport_attributes.SA2_MAIN16 == destination].tolist()[0]
# print(o_idx,d_idx)
try:
# OD_full_path might not have all the shortest path...
# note that the OD_full_path idx is consistent with sa2_adelaide.
idx_list_on_shortest_path = OD_full_path[(o_idx, d_idx)]
for node_on_shortest_path in idx_list_on_shortest_path:
sa2_adelaide_edge.loc[idx, attribute_name_list] += sa2_adelaide_with_transport_attributes.loc[
node_on_shortest_path, attribute_name_list]
except KeyError as error:
pass
# output two pickles:
# node network with transport info: sa2_adelaide_with_transport_attributes
# edge network with transport info: sa2_adelaide_edge
sa2_adelaide_with_transport_attributes.to_pickle(intermediate_data_path+'sa2_node_with_only_transport_attributes.pickle')
sa2_adelaide_edge.to_pickle(intermediate_data_path+'sa2_edge_with_only_transport_attributes.pickle')
#endregion
|
[
"sys.path.append",
"pysal.lib.weights.distance.Kernel.from_dataframe",
"pickle.dump",
"pandas.DataFrame",
"numpy.sum",
"utilities.compute_intersection_attributes",
"os.getcwd",
"utilities.compute_road_attributes",
"pandas.MultiIndex.from_product",
"shapely.geometry.LineString",
"geopandas.GeoDataFrame",
"pickle.load",
"numpy.arange",
"pysal.lib.weights.contiguity.Queen.from_dataframe",
"networkx.all_pairs_dijkstra",
"geopandas.read_file"
] |
[((841, 870), 'sys.path.append', 'sys.path.append', (['utility_path'], {}), '(utility_path)\n', (856, 870), False, 'import sys\n'), ((1279, 1333), 'geopandas.read_file', 'gpd.read_file', (["(raw_data_path + 'sa2/SA2_2016_AUST.shp')"], {}), "(raw_data_path + 'sa2/SA2_2016_AUST.shp')\n", (1292, 1333), True, 'import geopandas as gpd\n'), ((1862, 1969), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[sa2_adelaide['SA2_MAIN16'], sa2_adelaide['SA2_MAIN16']]"], {'names': "['O', 'D']"}), "([sa2_adelaide['SA2_MAIN16'], sa2_adelaide[\n 'SA2_MAIN16']], names=['O', 'D'])\n", (1888, 1969), True, 'import pandas as pd\n'), ((2625, 2680), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['sa2_adelaide_link_df'], {'crs': '"""epsg:3112"""'}), "(sa2_adelaide_link_df, crs='epsg:3112')\n", (2641, 2680), True, 'import geopandas as gpd\n'), ((2989, 3058), 'geopandas.read_file', 'gpd.read_file', (["(intermediate_data_path + 'shapefiles/sa2_adelaide.shp')"], {}), "(intermediate_data_path + 'shapefiles/sa2_adelaide.shp')\n", (3002, 3058), True, 'import geopandas as gpd\n'), ((3111, 3164), 'pysal.lib.weights.contiguity.Queen.from_dataframe', 'weights.contiguity.Queen.from_dataframe', (['sa2_adelaide'], {}), '(sa2_adelaide)\n', (3150, 3164), False, 'from pysal.lib import weights\n'), ((3244, 3303), 'pysal.lib.weights.distance.Kernel.from_dataframe', 'weights.distance.Kernel.from_dataframe', (['sa2_adelaide'], {'k': '(109)'}), '(sa2_adelaide, k=109)\n', (3282, 3303), False, 'from pysal.lib import weights\n'), ((5048, 5104), 'geopandas.read_file', 'gpd.read_file', (["(raw_data_path + 'roads/Roads_GDA2020.shp')"], {}), "(raw_data_path + 'roads/Roads_GDA2020.shp')\n", (5061, 5104), True, 'import geopandas as gpd\n'), ((6187, 6253), 'geopandas.read_file', 'gpd.read_file', (["(intermediate_data_path + 'shapefiles/sa2_roads.shp')"], {}), "(intermediate_data_path + 'shapefiles/sa2_roads.shp')\n", (6200, 6253), True, 'import geopandas as gpd\n'), ((6267, 6336), 'geopandas.read_file', 'gpd.read_file', (["(intermediate_data_path + 'shapefiles/sa2_adelaide.shp')"], {}), "(intermediate_data_path + 'shapefiles/sa2_adelaide.shp')\n", (6280, 6336), True, 'import geopandas as gpd\n'), ((6428, 6481), 'utilities.compute_road_attributes', 'util.compute_road_attributes', (['sa2_adelaide', 'sa2_roads'], {}), '(sa2_adelaide, sa2_roads)\n', (6456, 6481), True, 'import utilities as util\n'), ((6526, 6702), 'numpy.sum', 'np.sum', (["sa2_adelaide_road_attributes[['class_ART', 'class_BUS', 'class_COLL',\n 'class_HWY', 'class_LOCL', 'class_SUBA', 'class_TRK2', 'class_TRK4',\n 'class_UND']]"], {'axis': '(1)'}), "(sa2_adelaide_road_attributes[['class_ART', 'class_BUS', 'class_COLL',\n 'class_HWY', 'class_LOCL', 'class_SUBA', 'class_TRK2', 'class_TRK4',\n 'class_UND']], axis=1)\n", (6532, 6702), True, 'import numpy as np\n'), ((6932, 7021), 'utilities.compute_intersection_attributes', 'util.compute_intersection_attributes', (['sa2_adelaide_road_attributes', 'roads_in_adelaide'], {}), '(sa2_adelaide_road_attributes,\n roads_in_adelaide)\n', (6968, 7021), True, 'import utilities as util\n'), ((8323, 8397), 'geopandas.read_file', 'gpd.read_file', (["(intermediate_data_path + 'shapefiles/sa2_adelaide_edge.shp')"], {}), "(intermediate_data_path + 'shapefiles/sa2_adelaide_edge.shp')\n", (8336, 8397), True, 'import geopandas as gpd\n'), ((9231, 9268), 'numpy.arange', 'np.arange', (['sa2_adelaide_edge.shape[0]'], {}), '(sa2_adelaide_edge.shape[0])\n', (9240, 9268), True, 'import numpy as np\n'), ((811, 822), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (820, 822), False, 'import os\n'), ((997, 1008), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1006, 1008), False, 'import os\n'), ((1063, 1074), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1072, 1074), False, 'import os\n'), ((2293, 2493), 'shapely.geometry.LineString', 'LineString', (["[sa2_adelaide.loc[sa2_adelaide['SA2_MAIN16'] == origin, 'geometry'].\n centroid.values[0], sa2_adelaide.loc[sa2_adelaide['SA2_MAIN16'] ==\n destination, 'geometry'].centroid.values[0]]"], {}), "([sa2_adelaide.loc[sa2_adelaide['SA2_MAIN16'] == origin,\n 'geometry'].centroid.values[0], sa2_adelaide.loc[sa2_adelaide[\n 'SA2_MAIN16'] == destination, 'geometry'].centroid.values[0]])\n", (2303, 2493), False, 'from shapely.geometry import LineString\n'), ((4508, 4559), 'networkx.all_pairs_dijkstra', 'nx.all_pairs_dijkstra', (['adelaide_nx'], {'weight': '"""weight"""'}), "(adelaide_nx, weight='weight')\n", (4529, 4559), True, 'import networkx as nx\n'), ((4947, 4975), 'pickle.dump', 'pickle.dump', (['OD_full_path', 'f'], {}), '(OD_full_path, f)\n', (4958, 4975), False, 'import pickle\n'), ((8567, 8581), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8578, 8581), False, 'import pickle\n'), ((8688, 8702), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8699, 8702), False, 'import pickle\n'), ((1988, 2013), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index'}), '(index=index)\n', (2000, 2013), True, 'import pandas as pd\n')]
|
'''Example streaming ffmpeg numpy processing.
Based on examples from https://github.com/kkroening/ffmpeg-python/tree/master/examples
Usage instructions:
1. Install opencv, ffmpeg-python and numpy
2. Run python ffmpeg_stream.py input_file
3. In separate terminal run ffplay -f avi http://localhost:8080 (after enabling port forwarding if running remotely)
TODO: explore methods to reduce latency (both for ffmpeg and ffplay)
Demonstrates using ffmpeg to decode video input, process the frames in
python, and then encode video output using ffmpeg.
This example uses two ffmpeg processes - one to decode the input video
and one to encode an output video - while the raw frame processing is
done in python with numpy.
In addition the audio from the same input file is also streamed and combined with
the video.
At a high level, the signal graph looks like this:
(input video) -> [ffmpeg process 1] -> [python] -> [ffmpeg process 2] -> (output video)
(input audio) -> [ffmpeg process 1_audio] -------------^
Output video is sent to http server.
The simplest processing example simply darkens each frame by
multiplying the frame's numpy array by a constant value; see
``process_frame_simple``.
The audio is read and streamed in 40ms chunks (corresponding to one video frame).
We use named FIFO pipes for the communication to ffmpeg process 2, allowing use of
two separate pipes for audio and video.
The writing to these pipes happens in distinct threads (so that blocking calls in the pipes don't cause trouble).
'''
from __future__ import print_function
import argparse
import ffmpeg
import logging
import numpy as np
import os
import subprocess
import zipfile
import threading
parser = argparse.ArgumentParser(description='Example streaming ffmpeg numpy processing')
parser.add_argument('in_filename', help='Input filename')
parser.add_argument('port', default=8080, help='Port')
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# get video frame size using ffmpeg probe
def get_video_info(filename):
logger.info('Getting video size for {!r}'.format(filename))
probe = ffmpeg.probe(filename)
print(probe['streams'])
video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')
width = int(video_info['width'])
height = int(video_info['height'])
return width, height
# this is process for reading video file and outputting in raw frames to pipe
# we specify fps here which automatically converts the fps to the desired vale
def start_ffmpeg_process1(in_filename, fps):
logger.info('Starting ffmpeg process1')
args = (
ffmpeg
.input(in_filename)
.output('pipe:', format='rawvideo', pix_fmt='rgb24', r=fps)
.compile()
)
# all ffmpeg commands are ultimately run as subprocesses with appropriate piping for stdout
# the 'pipe:' in the output above means the output is written to stdout, which we redirect to
# subprocess.PIPE
return subprocess.Popen(args, stdout=subprocess.PIPE)
# this is process for reading audio file and outputting to pipe
# the format is pcm signed 16 bit little endian (essentially wav)
# ac=1 -> mono
# ar=16k -> audio sampling rate for output (automatically converted)
def start_ffmpeg_process1_audio(in_filename):
logger.info('Starting ffmpeg process1_audio')
args = (
ffmpeg
.input(in_filename)
.output('pipe:', format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
.compile()
)
return subprocess.Popen(args, stdout=subprocess.PIPE)
# process for writing output to http url by taking input from two FIFO pipes (video and audio)
def start_ffmpeg_process2(fifo_name_video, fifo_name_audio, width, height, fps, port,
output_to='socket', output_path='None'):
logger.info('Starting ffmpeg process2')
server_url = "http://127.0.0.1:" + str(port) # any port should be fine, 127.0.0.1 is simply localhost
# inputs: parameters largely the same as in the previous two functions
input_video = ffmpeg.input(fifo_name_video, format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height),
framerate=fps)
input_audio = ffmpeg.input(fifo_name_audio, format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
if output_to == 'socket':
# (mp4 doesn't work because it requires random access not appropriate for streaming)
video_format = 'avi' # format supporting both video and audio.
# combine the two and output to url (listen = 1 probably sets the server)
args = (
ffmpeg
.output(input_audio, input_video, server_url, listen=1, f=video_format, vcodec='libx264',
preset='ultrafast')
# .global_args('-fflags', 'nobuffer') # .run()
# .global_args('-ss', '4')
# .global_args('-preset', 'ultrafast')
.compile()
)
elif output_to == 'file':
video_format = 'mp4'
if output_path == 'None':
raise ValueError('Asked to write in file but path not provided.')
args = (
ffmpeg
.output(input_audio, input_video, output_path, f=video_format, vcodec='libx264', preset='ultrafast')
.compile()
)
else:
raise ValueError("Wrong output format. Should be 'socket' or 'file'.")
return subprocess.Popen(args)
# read frame from process1 stdout pipe and convert to numpy
def read_frame(process1, width, height):
logger.debug('Reading frame')
# Note: RGB24 == 3 bytes per pixel.
frame_size = width * height * 3
in_bytes = process1.stdout.read(frame_size)
if len(in_bytes) == 0:
frame = None
else:
assert len(in_bytes) == frame_size
frame = (
np
.frombuffer(in_bytes, np.uint8)
.reshape([height, width, 3])
)
return frame
# read audio frame from process1_audio stdout pipe
def read_audio_frame(process1_audio, num_bytes):
logger.debug('Reading audio frame')
in_bytes = process1_audio.stdout.read(num_bytes)
return in_bytes
# darken frame
def process_frame_simple(frame):
'''Simple processing example: darken frame.'''
return frame * 0.3
# write video frame to fifo pipe as bytes
def write_video_frame(fifo_video_out, frame):
logger.debug('Writing frame')
fifo_video_out.write(
frame
.astype(np.uint8)
.tobytes()
)
# write audio frame to fifo pipe as bytes
def write_audio_frame(fifo_audio_out, in_audio_frame):
logger.debug('Writing audio frame')
fifo_audio_out.write(in_audio_frame)
def video_thread_handler(fifo_filename_video, process1, width, height):
fifo_video_out = open(fifo_filename_video, "wb")
# this blocks until the read for the fifo opens so we run in separate thread
# read frame one by one, process and write to fifo pipe
while True:
in_frame = read_frame(process1, width, height)
if in_frame is None:
logger.info('End of input stream')
break
logger.debug('Processing frame')
out_frame = process_frame(in_frame)
write_video_frame(fifo_video_out, out_frame)
fifo_video_out.close()
def audio_thread_handler(fifo_filename_audio, process1_audio, audio_bytes_per_video_frame):
fifo_audio_out = open(fifo_filename_audio, "wb")
# this blocks until the read for the fifo opens so we run in separate thread
# read frame one by one, process and write to fifo pipe
while True:
in_audio_frame = read_audio_frame(process1_audio, audio_bytes_per_video_frame)
if len(in_audio_frame) == 0:
break
write_audio_frame(fifo_audio_out, in_audio_frame)
fifo_audio_out.close()
def run(in_filename, process_frame, port):
width, height = get_video_info(in_filename)
fps = 25 # video fps
process1 = start_ffmpeg_process1(in_filename, fps)
process1_audio = start_ffmpeg_process1_audio(in_filename)
# fifo pipes (remove file name if already exists)
fifo_filename_video = '/tmp/fifovideo'
fifo_filename_audio = '/tmp/fifoaudio'
if os.path.exists(fifo_filename_video):
os.remove(fifo_filename_video)
if os.path.exists(fifo_filename_audio):
os.remove(fifo_filename_audio)
os.mkfifo(fifo_filename_video)
os.mkfifo(fifo_filename_audio)
process2 = start_ffmpeg_process2(fifo_filename_video, fifo_filename_audio, width, height, fps, port)
audio_bytes_per_video_frame = 640 * 2 # 2 bytes, 640 audio frames (16000/25)
# we run audio and video in separate threads otherwise the fifo opening blocks
# create threads
video_thread = threading.Thread(target=video_thread_handler, args=(fifo_filename_video, process1, width, height))
audio_thread = threading.Thread(target=audio_thread_handler,
args=(fifo_filename_audio, process1_audio, audio_bytes_per_video_frame))
# start threads
video_thread.start()
audio_thread.start()
# wait for threads to finish executing
video_thread.join()
audio_thread.join()
logger.info('Waiting for ffmpeg process1')
process1.wait()
logger.info('Waiting for ffmpeg process2')
process2.wait()
os.remove(fifo_filename_video)
os.remove(fifo_filename_audio)
logger.info('Done')
if __name__ == '__main__':
args = parser.parse_args()
port = args.port
process_frame = process_frame_simple
run(args.in_filename, process_frame, port)
|
[
"threading.Thread",
"subprocess.Popen",
"os.remove",
"argparse.ArgumentParser",
"logging.basicConfig",
"numpy.frombuffer",
"os.path.exists",
"ffmpeg.output",
"ffmpeg.probe",
"os.mkfifo",
"ffmpeg.input",
"logging.getLogger"
] |
[((1704, 1789), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Example streaming ffmpeg numpy processing"""'}), "(description='Example streaming ffmpeg numpy processing'\n )\n", (1727, 1789), False, 'import argparse\n'), ((1908, 1935), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1925, 1935), False, 'import logging\n'), ((1936, 1975), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1955, 1975), False, 'import logging\n'), ((2126, 2148), 'ffmpeg.probe', 'ffmpeg.probe', (['filename'], {}), '(filename)\n', (2138, 2148), False, 'import ffmpeg\n'), ((2996, 3042), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE)\n', (3012, 3042), False, 'import subprocess\n'), ((3536, 3582), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE)\n', (3552, 3582), False, 'import subprocess\n'), ((4241, 4327), 'ffmpeg.input', 'ffmpeg.input', (['fifo_name_audio'], {'format': '"""s16le"""', 'acodec': '"""pcm_s16le"""', 'ac': '(1)', 'ar': '"""16k"""'}), "(fifo_name_audio, format='s16le', acodec='pcm_s16le', ac=1, ar=\n '16k')\n", (4253, 4327), False, 'import ffmpeg\n'), ((5454, 5476), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (5470, 5476), False, 'import subprocess\n'), ((8252, 8287), 'os.path.exists', 'os.path.exists', (['fifo_filename_video'], {}), '(fifo_filename_video)\n', (8266, 8287), False, 'import os\n'), ((8335, 8370), 'os.path.exists', 'os.path.exists', (['fifo_filename_audio'], {}), '(fifo_filename_audio)\n', (8349, 8370), False, 'import os\n'), ((8416, 8446), 'os.mkfifo', 'os.mkfifo', (['fifo_filename_video'], {}), '(fifo_filename_video)\n', (8425, 8446), False, 'import os\n'), ((8451, 8481), 'os.mkfifo', 'os.mkfifo', (['fifo_filename_audio'], {}), '(fifo_filename_audio)\n', (8460, 8481), False, 'import os\n'), ((8795, 8897), 'threading.Thread', 'threading.Thread', ([], {'target': 'video_thread_handler', 'args': '(fifo_filename_video, process1, width, height)'}), '(target=video_thread_handler, args=(fifo_filename_video,\n process1, width, height))\n', (8811, 8897), False, 'import threading\n'), ((8913, 9035), 'threading.Thread', 'threading.Thread', ([], {'target': 'audio_thread_handler', 'args': '(fifo_filename_audio, process1_audio, audio_bytes_per_video_frame)'}), '(target=audio_thread_handler, args=(fifo_filename_audio,\n process1_audio, audio_bytes_per_video_frame))\n', (8929, 9035), False, 'import threading\n'), ((9372, 9402), 'os.remove', 'os.remove', (['fifo_filename_video'], {}), '(fifo_filename_video)\n', (9381, 9402), False, 'import os\n'), ((9407, 9437), 'os.remove', 'os.remove', (['fifo_filename_audio'], {}), '(fifo_filename_audio)\n', (9416, 9437), False, 'import os\n'), ((8297, 8327), 'os.remove', 'os.remove', (['fifo_filename_video'], {}), '(fifo_filename_video)\n', (8306, 8327), False, 'import os\n'), ((8380, 8410), 'os.remove', 'os.remove', (['fifo_filename_audio'], {}), '(fifo_filename_audio)\n', (8389, 8410), False, 'import os\n'), ((4631, 4751), 'ffmpeg.output', 'ffmpeg.output', (['input_audio', 'input_video', 'server_url'], {'listen': '(1)', 'f': 'video_format', 'vcodec': '"""libx264"""', 'preset': '"""ultrafast"""'}), "(input_audio, input_video, server_url, listen=1, f=\n video_format, vcodec='libx264', preset='ultrafast')\n", (4644, 4751), False, 'import ffmpeg\n'), ((5870, 5903), 'numpy.frombuffer', 'np.frombuffer', (['in_bytes', 'np.uint8'], {}), '(in_bytes, np.uint8)\n', (5883, 5903), True, 'import numpy as np\n'), ((2628, 2653), 'ffmpeg.input', 'ffmpeg.input', (['in_filename'], {}), '(in_filename)\n', (2640, 2653), False, 'import ffmpeg\n'), ((3376, 3401), 'ffmpeg.input', 'ffmpeg.input', (['in_filename'], {}), '(in_filename)\n', (3388, 3401), False, 'import ffmpeg\n'), ((5193, 5304), 'ffmpeg.output', 'ffmpeg.output', (['input_audio', 'input_video', 'output_path'], {'f': 'video_format', 'vcodec': '"""libx264"""', 'preset': '"""ultrafast"""'}), "(input_audio, input_video, output_path, f=video_format, vcodec\n ='libx264', preset='ultrafast')\n", (5206, 5304), False, 'import ffmpeg\n')]
|
#!/usr/bin/env python
import rospy
# import sys
from std_msgs.msg import ColorRGBA
from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point
from ford_msgs.msg import Clusters
from visualization_msgs.msg import Marker, MarkerArray
import numpy as np
import math
from nav_msgs.msg import Odometry
import configparser
import torch
import gym
from crowd_nav.policy.cadrl import CADRL
from crowd_nav.policy.lstm_rl import LstmRL
from crowd_nav.policy.sarl import SARL
from crowd_sim.envs.utils.robot import Robot
PED_RADIUS = 0.3
# angle_1 - angle_2
# contains direction in range [-3.14, 3.14]
def find_angle_diff(angle_1, angle_2):
angle_diff_raw = angle_1 - angle_2
angle_diff = (angle_diff_raw + np.pi) % (2 * np.pi) - np.pi
return angle_diff
class NN_tb3():
def __init__(self, env, env_config, policy):
#
self.env = env
self.env_config = env_config
# configure robot
self.robot = Robot(env_config, 'robot')
self.robot.set_policy(policy)
self.env.set_robot(self.robot) #pass robot parameters into env
self.ob = env.reset('test',1) #intial some parameters from .config file such as time_step,success_reward for other instances
self.policy = policy
self.policy.set_env(env)
# for action
self.angle2Action = 0
self.distance = 0
# for subscribers
self.pose = PoseStamped()
self.vel = Vector3()
self.psi = 0.0
# for publishers
self.global_goal = PoseStamped()
self.goal = PoseStamped()
self.desired_position = PoseStamped()
self.desired_action = np.zeros((2,))
# # publishers
self.pub_twist = rospy.Publisher('/cmd_vel',Twist,queue_size=1)
# self.pub_pose_marker = rospy.Publisher('',Marker,queue_size=1)
# self.pub_agent_markers = rospy.Publisher('~agent_markers',MarkerArray,queue_size=1)
self.pub_path_marker = rospy.Publisher('/action',Marker,queue_size=1)
# self.pub_goal_path_marker = rospy.Publisher('~goal_path_marker',Marker,queue_size=1)
# # sub
self.sub_pose = rospy.Subscriber('/odom',Odometry,self.cbPose)
self.sub_global_goal = rospy.Subscriber('/goal',PoseStamped, self.cbGlobalGoal)
self.sub_subgoal = rospy.Subscriber('/plan_manager/subgoal',PoseStamped, self.cbSubGoal)
# subgoals
self.sub_goal = Vector3()
# self.sub_clusters = rospy.Subscriber('~clusters',Clusters, self.cbClusters)
# control timer
self.control_timer = rospy.Timer(rospy.Duration(0.2),self.cbControl)
self.nn_timer = rospy.Timer(rospy.Duration(0.01),self.cbComputeActionCrowdNav)
def update_angle2Action(self):
# action vector
v_a = np.array([self.desired_position.pose.position.x-self.pose.pose.position.x,self.desired_position.pose.position.y-self.pose.pose.position.y])
# pose direction
e_dir = np.array([math.cos(self.psi), math.sin(self.psi)])
# angle: <v_a, e_dir>
self.angle2Action = np.math.atan2(np.linalg.det([v_a,e_dir]),np.dot(v_a,e_dir))
def cbGlobalGoal(self,msg):
self.stop_moving_flag = True
self.new_global_goal_received = True
self.global_goal = msg
self.goal.pose.position.x = msg.pose.position.x
self.goal.pose.position.y = msg.pose.position.y
self.goal.header = msg.header
# reset subgoals
print("new goal: "+str([self.goal.pose.position.x,self.goal.pose.position.y]))
def cbSubGoal(self,msg):
# update subGoal
self.sub_goal.x = msg.pose.position.x
self.sub_goal.y = msg.pose.position.y
def goalReached(self):
# check if near to global goal
if self.distance > 0.3:
return False
else:
return True
def cbPose(self, msg):
# update robot vel (vx,vy)
self.cbVel(msg)
# get pose angle
q = msg.pose.pose.orientation
self.psi = np.arctan2(2.0*(q.w*q.z + q.x*q.y), 1-2*(q.y*q.y+q.z*q.z)) # bounded by [-pi, pi]
self.pose = msg.pose
self.visualize_path()
v_p = msg.pose.pose.position
v_g = self.sub_goal
v_pg = np.array([v_g.x-v_p.x,v_g.y-v_p.y])
self.distance = np.linalg.norm(v_pg)
# self.visualize_pose(msg.pose.pose.position,msg.pose.pose.orientation)
def cbVel(self, msg):
self.vel = msg.twist.twist.linear
def cbClusters(self,msg):
other_agents = []
xs = []; ys = []; radii = []; labels = []
num_clusters = len(msg.labels)
for i in range(num_clusters):
index = msg.labels[i]
x = msg.mean_points[i].x; y = msg.mean_points[i].y
v_x = msg.velocities[i].x; v_y = msg.velocities[i].y
radius = self.obst_rad
xs.append(x); ys.append(y); radii.append(radius); labels.append(index)
# self.visualize_other_agent(x,y,radius,msg.labels[i])
# helper fields
heading_angle = np.arctan2(v_y, v_x)
pref_speed = np.linalg.norm(np.array([v_x, v_y]))
goal_x = x + 5.0; goal_y = y + 5.0
if pref_speed < 0.2:
pref_speed = 0; v_x = 0; v_y = 0
other_agents.append(agent.Agent(x, y, goal_x, goal_y, radius, pref_speed, heading_angle, index))
self.visualize_other_agents(xs, ys, radii, labels)
self.other_agents_state = other_agents
def stop_moving(self):
twist = Twist()
self.pub_twist.publish(twist)
def update_action(self, action):
# print 'update action'
self.desired_action = action
# self.desired_position.pose.position.x = self.pose.pose.position.x + 1*action[0]*np.cos(action[1])
# self.desired_position.pose.position.y = self.pose.pose.position.y + 1*action[0]*np.sin(action[1])
self.desired_position.pose.position.x = self.pose.pose.position.x + (action[0])
self.desired_position.pose.position.y = self.pose.pose.position.y + (action[1])
# print(action[0])
def cbControl(self, event):
twist = Twist()
if not self.goalReached():
if abs(self.angle2Action) > 0.1 and self.angle2Action > 0:
twist.angular.z = -0.3
print("spinning in place +")
elif abs(self.angle2Action) > 0.1 and self.angle2Action < 0:
twist.angular.z = 0.3
print("spinning in place -")
# else:
vel = np.array([self.desired_action[0],self.desired_action[1]])
twist.linear.x = 0.1*np.linalg.norm(vel)
self.pub_twist.publish(twist)
def cbComputeActionCrowdNav(self, event):
robot_x = self.pose.pose.position.x
robot_y = self.pose.pose.position.y
# goal
goal_x = self.sub_goal.x
goal_y = self.sub_goal.y
# velocity
robot_vx = self.vel.x
robot_vy = self.vel.y
# oriantation
theta = self.psi
robot_radius = 0.3
# set robot info
self.robot.set(robot_x, robot_y, goal_x, goal_y, robot_vx, robot_vy, theta, robot_radius)
# obstacle: position, velocity, radius
# position
# obstacle_x = [0.1,0.2,0.3,0.4,0.5]
# obstacle_y = [0.1,0.2,0.3,0.4,0.5]
# # velocity
# obstacle_vx = [0.1,0.2,0.3,0.4,0.5]
# obstacle_vy = [0.1,0.2,0.3,0.4,0.5]
obstacle_x = [-6.0,-6.0,-6.0,-6.0,-6.0]
obstacle_y = [-6.0,-6.0,-6.0,-6.0,-6.0]
# velocity
obstacle_vx = [0.0,0.0,0.0,0.0,0.0]
obstacle_vy = [0.0,0.0,0.0,0.0,0.0]
obstacle_radius = 0.3
# initial obstacle instances and set value
for i in range(self.env_config.getint('sim','human_num')):
self.env.humans[i].set(obstacle_x[i], obstacle_y[i], goal_x,goal_y, obstacle_vx[i], obstacle_vy[i], theta, obstacle_radius)
self.ob[i]= self.env.humans[i].get_observable_state()
# ************************************ Output ************************************
# get action info
action = self.robot.act(self.ob)
# print('\n---------\nrobot position (X,Y):', position.position)
# print(action)
# print(theta)
self.update_action(action)
self.update_angle2Action()
def update_subgoal(self,subgoal):
self.goal.pose.position.x = subgoal[0]
self.goal.pose.position.y = subgoal[1]
def visualize_path(self):
marker = Marker()
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = 'map'
marker.ns = 'path_arrow'
marker.id = 0
marker.type = marker.ARROW
marker.action = marker.ADD
marker.points.append(self.pose.pose.position)
marker.points.append(self.desired_position.pose.position)
marker.scale = Vector3(x=0.1,y=0.2,z=0.2)
marker.color = ColorRGBA(b=1.0,a=1.0)
marker.lifetime = rospy.Duration(1)
self.pub_path_marker.publish(marker)
# # Display BLUE DOT at NN desired position
# marker = Marker()
# marker.header.stamp = rospy.Time.now()
# marker.header.frame_id = 'map'
# marker.ns = 'path_trail'
# marker.id = self.num_poses
# marker.type = marker.CUBE
# marker.action = marker.ADD
# marker.pose.position = copy.deepcopy(self.pose.pose.position)
# marker.scale = Vector3(x=0.2,y=0.2,z=0.2)
# marker.color = ColorRGBA(g=0.0,r=0,b=1.0,a=0.3)
# marker.lifetime = rospy.Duration(60)
# self.pub_path_marker.publish(marker)
def on_shutdown(self):
rospy.loginfo("[%s] Shutting down.")
self.stop_moving()
rospy.loginfo("Stopped %s's velocity.")
def run():
policy_name = "lstm"
device = 'cpu'
phase = 'test'
select_policy = {"cadrl":CADRL(),"lstm":LstmRL(),"sarl":SARL()}
# the path of training result which contains configs and rl mode
env_config_file = 'crowd_nav/data/output/env.config' #path beginging without slash
policy_config_file = 'crowd_nav/data/output/policy.config'
model_weights = 'crowd_nav/data/output/rl_model_'+policy_name+'.pth'
# print(model_weights)
# select policy
policy = select_policy[policy_name] #{SARL(),CADRL(),LstmRL()}
policy_config = configparser.RawConfigParser()
policy_config.read(policy_config_file)
policy.configure(policy_config)
policy.get_model().load_state_dict(torch.load(model_weights))
policy.set_device(device)
policy.set_phase(phase)
# configure environment / obstacles
env_config = configparser.RawConfigParser()
env_config.read(env_config_file)
env = gym.make('CrowdSim-v0') #env is inherited from CrowdSim class in crowd_sim.py
env.configure(env_config)
rospy.init_node('crowdnav_tb3',anonymous=False)
print('==================================\ncrowdnav node started')
nn_tb3 = NN_tb3(env,env_config,policy)
rospy.on_shutdown(nn_tb3.on_shutdown)
rospy.spin()
if __name__ == '__main__':
run()
|
[
"geometry_msgs.msg.Vector3",
"crowd_nav.policy.sarl.SARL",
"rospy.Subscriber",
"numpy.arctan2",
"numpy.linalg.norm",
"std_msgs.msg.ColorRGBA",
"rospy.Duration",
"geometry_msgs.msg.PoseStamped",
"crowd_sim.envs.utils.robot.Robot",
"rospy.Time.now",
"configparser.RawConfigParser",
"torch.load",
"rospy.init_node",
"math.cos",
"numpy.linalg.det",
"crowd_nav.policy.cadrl.CADRL",
"geometry_msgs.msg.Twist",
"math.sin",
"rospy.loginfo",
"rospy.on_shutdown",
"visualization_msgs.msg.Marker",
"numpy.dot",
"crowd_nav.policy.lstm_rl.LstmRL",
"gym.make",
"numpy.zeros",
"rospy.Publisher",
"numpy.array",
"rospy.spin"
] |
[((10476, 10506), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (10504, 10506), False, 'import configparser\n'), ((10768, 10798), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (10796, 10798), False, 'import configparser\n'), ((10846, 10869), 'gym.make', 'gym.make', (['"""CrowdSim-v0"""'], {}), "('CrowdSim-v0')\n", (10854, 10869), False, 'import gym\n'), ((10963, 11011), 'rospy.init_node', 'rospy.init_node', (['"""crowdnav_tb3"""'], {'anonymous': '(False)'}), "('crowdnav_tb3', anonymous=False)\n", (10978, 11011), False, 'import rospy\n'), ((11130, 11167), 'rospy.on_shutdown', 'rospy.on_shutdown', (['nn_tb3.on_shutdown'], {}), '(nn_tb3.on_shutdown)\n', (11147, 11167), False, 'import rospy\n'), ((11173, 11185), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (11183, 11185), False, 'import rospy\n'), ((951, 977), 'crowd_sim.envs.utils.robot.Robot', 'Robot', (['env_config', '"""robot"""'], {}), "(env_config, 'robot')\n", (956, 977), False, 'from crowd_sim.envs.utils.robot import Robot\n'), ((1414, 1427), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1425, 1427), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point\n'), ((1447, 1456), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (1454, 1456), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point\n'), ((1533, 1546), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1544, 1546), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point\n'), ((1567, 1580), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1578, 1580), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point\n'), ((1613, 1626), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1624, 1626), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point\n'), ((1657, 1671), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (1665, 1671), True, 'import numpy as np\n'), ((1721, 1769), 'rospy.Publisher', 'rospy.Publisher', (['"""/cmd_vel"""', 'Twist'], {'queue_size': '(1)'}), "('/cmd_vel', Twist, queue_size=1)\n", (1736, 1769), False, 'import rospy\n'), ((1967, 2015), 'rospy.Publisher', 'rospy.Publisher', (['"""/action"""', 'Marker'], {'queue_size': '(1)'}), "('/action', Marker, queue_size=1)\n", (1982, 2015), False, 'import rospy\n'), ((2149, 2197), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/odom"""', 'Odometry', 'self.cbPose'], {}), "('/odom', Odometry, self.cbPose)\n", (2165, 2197), False, 'import rospy\n'), ((2227, 2284), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/goal"""', 'PoseStamped', 'self.cbGlobalGoal'], {}), "('/goal', PoseStamped, self.cbGlobalGoal)\n", (2243, 2284), False, 'import rospy\n'), ((2311, 2381), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/plan_manager/subgoal"""', 'PoseStamped', 'self.cbSubGoal'], {}), "('/plan_manager/subgoal', PoseStamped, self.cbSubGoal)\n", (2327, 2381), False, 'import rospy\n'), ((2433, 2442), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (2440, 2442), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point\n'), ((2794, 2942), 'numpy.array', 'np.array', (['[self.desired_position.pose.position.x - self.pose.pose.position.x, self.\n desired_position.pose.position.y - self.pose.pose.position.y]'], {}), '([self.desired_position.pose.position.x - self.pose.pose.position.x,\n self.desired_position.pose.position.y - self.pose.pose.position.y])\n', (2802, 2942), True, 'import numpy as np\n'), ((4046, 4120), 'numpy.arctan2', 'np.arctan2', (['(2.0 * (q.w * q.z + q.x * q.y))', '(1 - 2 * (q.y * q.y + q.z * q.z))'], {}), '(2.0 * (q.w * q.z + q.x * q.y), 1 - 2 * (q.y * q.y + q.z * q.z))\n', (4056, 4120), True, 'import numpy as np\n'), ((4268, 4308), 'numpy.array', 'np.array', (['[v_g.x - v_p.x, v_g.y - v_p.y]'], {}), '([v_g.x - v_p.x, v_g.y - v_p.y])\n', (4276, 4308), True, 'import numpy as np\n'), ((4328, 4348), 'numpy.linalg.norm', 'np.linalg.norm', (['v_pg'], {}), '(v_pg)\n', (4342, 4348), True, 'import numpy as np\n'), ((5576, 5583), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (5581, 5583), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point\n'), ((6199, 6206), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (6204, 6206), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point\n'), ((8623, 8631), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (8629, 8631), False, 'from visualization_msgs.msg import Marker, MarkerArray\n'), ((8662, 8678), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (8676, 8678), False, 'import rospy\n'), ((8986, 9014), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {'x': '(0.1)', 'y': '(0.2)', 'z': '(0.2)'}), '(x=0.1, y=0.2, z=0.2)\n', (8993, 9014), False, 'from geometry_msgs.msg import PoseStamped, Twist, Vector3, Point\n'), ((9036, 9059), 'std_msgs.msg.ColorRGBA', 'ColorRGBA', ([], {'b': '(1.0)', 'a': '(1.0)'}), '(b=1.0, a=1.0)\n', (9045, 9059), False, 'from std_msgs.msg import ColorRGBA\n'), ((9085, 9102), 'rospy.Duration', 'rospy.Duration', (['(1)'], {}), '(1)\n', (9099, 9102), False, 'import rospy\n'), ((9776, 9812), 'rospy.loginfo', 'rospy.loginfo', (['"""[%s] Shutting down."""'], {}), "('[%s] Shutting down.')\n", (9789, 9812), False, 'import rospy\n'), ((9848, 9887), 'rospy.loginfo', 'rospy.loginfo', (['"""Stopped %s\'s velocity."""'], {}), '("Stopped %s\'s velocity.")\n', (9861, 9887), False, 'import rospy\n'), ((9995, 10002), 'crowd_nav.policy.cadrl.CADRL', 'CADRL', ([], {}), '()\n', (10000, 10002), False, 'from crowd_nav.policy.cadrl import CADRL\n'), ((10010, 10018), 'crowd_nav.policy.lstm_rl.LstmRL', 'LstmRL', ([], {}), '()\n', (10016, 10018), False, 'from crowd_nav.policy.lstm_rl import LstmRL\n'), ((10026, 10032), 'crowd_nav.policy.sarl.SARL', 'SARL', ([], {}), '()\n', (10030, 10032), False, 'from crowd_nav.policy.sarl import SARL\n'), ((10625, 10650), 'torch.load', 'torch.load', (['model_weights'], {}), '(model_weights)\n', (10635, 10650), False, 'import torch\n'), ((2597, 2616), 'rospy.Duration', 'rospy.Duration', (['(0.2)'], {}), '(0.2)\n', (2611, 2616), False, 'import rospy\n'), ((2669, 2689), 'rospy.Duration', 'rospy.Duration', (['(0.01)'], {}), '(0.01)\n', (2683, 2689), False, 'import rospy\n'), ((3098, 3125), 'numpy.linalg.det', 'np.linalg.det', (['[v_a, e_dir]'], {}), '([v_a, e_dir])\n', (3111, 3125), True, 'import numpy as np\n'), ((3125, 3143), 'numpy.dot', 'np.dot', (['v_a', 'e_dir'], {}), '(v_a, e_dir)\n', (3131, 3143), True, 'import numpy as np\n'), ((5091, 5111), 'numpy.arctan2', 'np.arctan2', (['v_y', 'v_x'], {}), '(v_y, v_x)\n', (5101, 5111), True, 'import numpy as np\n'), ((6591, 6649), 'numpy.array', 'np.array', (['[self.desired_action[0], self.desired_action[1]]'], {}), '([self.desired_action[0], self.desired_action[1]])\n', (6599, 6649), True, 'import numpy as np\n'), ((2985, 3003), 'math.cos', 'math.cos', (['self.psi'], {}), '(self.psi)\n', (2993, 3003), False, 'import math\n'), ((3005, 3023), 'math.sin', 'math.sin', (['self.psi'], {}), '(self.psi)\n', (3013, 3023), False, 'import math\n'), ((5152, 5172), 'numpy.array', 'np.array', (['[v_x, v_y]'], {}), '([v_x, v_y])\n', (5160, 5172), True, 'import numpy as np\n'), ((6682, 6701), 'numpy.linalg.norm', 'np.linalg.norm', (['vel'], {}), '(vel)\n', (6696, 6701), True, 'import numpy as np\n')]
|
# standard imports
import logging
from sklearn.metrics.cluster import homogeneity_score, completeness_score
import numpy
import matplotlib.pyplot as plt
# our imports
import emission.analysis.modelling.tour_model.cluster_pipeline as cp
import emission.analysis.modelling.tour_model.similarity as similarity
"""
Functions to evaluate clustering based on groundtruth. To use these functions,
an array of the length of the data must be passed in, with different values in the
array indicating different groundtruth clusters.
These functions can be used alongside the cluster pipeline to evaluate clustering.
An example of how to run this with the cluster pipeline is in the main method. To run it,
pass in a list of groundtruth.
Note that the cluster pipeline works with trips, not sections, so to use the above
code the groundtruth has to also be by trips.
"""
#turns color array into an array of integers
def get_colors(data, colors):
if len(data) != len(colors):
raise ValueError('Data and groundtruth must have the same number of elements')
indices = [] * len(set(colors))
for n in colors:
if n not in indices:
indices.append(n)
for i in range(len(colors)):
colors[i] = indices.index(colors[i])
return colors
#update the ground truth after binning
def update_colors(bins, colors):
newcolors = []
for bin in bins:
for b in bin:
newcolors.append(colors[b])
indices = [] * len(set(newcolors))
for n in newcolors:
if n not in indices:
indices.append(n)
for i in range(len(newcolors)):
newcolors[i] = indices.index(newcolors[i])
return newcolors
#evaluates the cluster labels against the groundtruth colors
def evaluate(colors, labels):
b = homogeneity_score(colors, labels)
c = completeness_score(colors, labels)
logging.debug('homogeneity is %d' % b)
logging.debug('completeness is %d' % c)
#maps the clusters, colored by the groundtruth
#creates a map for each groundtruthed cluster and
#a map showing all the clusters.
def map_clusters_by_groundtruth(data, labels, colors, map_individuals=False):
import pygmaps
from matplotlib import colors as matcol
colormap = plt.cm.get_cmap()
import random
r = random.sample(range(len(set(labels))), len(set(labels)))
rand = []
clusters = len(set(labels))
for i in range(len(labels)):
rand.append(r[labels[i]]/float(clusters))
if map_individuals:
for color in set(colors):
first = True
num_paths = 0
for i in range(len(colors)):
if colors[i] == color:
num_paths += 1
start_lat = data[i].trip_start_location.lat
start_lon = data[i].trip_start_location.lon
end_lat = data[i].trip_end_location.lat
end_lon = data[i].trip_end_location.lon
if first:
mymap = pygmaps.maps(start_lat, start_lon, 10)
first = False
path = [(start_lat, start_lon), (end_lat, end_lon)]
mymap.addpath(path, matcol.rgb2hex(colormap(rand[i])))
mymap.draw('./mycluster' + str(color) + '.html')
mymap = pygmaps.maps(37.5, -122.32, 10)
for i in range(len(data)):
start_lat = data[i].trip_start_location.lat
start_lon = data[i].trip_start_location.lon
end_lat = data[i].trip_end_location.lat
end_lon = data[i].trip_end_location.lon
path = [(start_lat, start_lon), (end_lat, end_lon)]
mymap.addpath(path, matcol.rgb2hex(colormap(float(colors[i])/len(set(colors)))))
mymap.draw('./mymap.html')
def main(colors):
data = cp.read_data() #get the data
colors = get_colors(data, colors) #make colors the right format
data, bins = cp.remove_noise(data, .5, 300) #remove noise from data
###### the next few lines are to evaluate the binning
sim = similarity.similarity(data, .5, 300) #create a similarity object
sim.bins = bins #set the bins, since we calculated them above
sim.evaluate_bins() #evaluate them to create the labels
######
colors = update_colors(bins, colors) #update the colors to reflect deleted bins
labels = sim.labels #get labels
evaluate(numpy.array(colors), numpy.array(labels)) #evaluate the bins
clusters, labels, data = cp.cluster(data, len(bins)) #cluster
evaluate(numpy.array(colors), numpy.array(labels)) #evaluate clustering
map_clusters_by_groundtruth(data, labels, colors, map_individuals=False) #map clusters, make last parameter true to map individual clusters
|
[
"sklearn.metrics.cluster.completeness_score",
"emission.analysis.modelling.tour_model.similarity.similarity",
"logging.debug",
"emission.analysis.modelling.tour_model.cluster_pipeline.remove_noise",
"pygmaps.maps",
"emission.analysis.modelling.tour_model.cluster_pipeline.read_data",
"numpy.array",
"sklearn.metrics.cluster.homogeneity_score",
"matplotlib.pyplot.cm.get_cmap"
] |
[((1778, 1811), 'sklearn.metrics.cluster.homogeneity_score', 'homogeneity_score', (['colors', 'labels'], {}), '(colors, labels)\n', (1795, 1811), False, 'from sklearn.metrics.cluster import homogeneity_score, completeness_score\n'), ((1820, 1854), 'sklearn.metrics.cluster.completeness_score', 'completeness_score', (['colors', 'labels'], {}), '(colors, labels)\n', (1838, 1854), False, 'from sklearn.metrics.cluster import homogeneity_score, completeness_score\n'), ((1859, 1897), 'logging.debug', 'logging.debug', (["('homogeneity is %d' % b)"], {}), "('homogeneity is %d' % b)\n", (1872, 1897), False, 'import logging\n'), ((1902, 1941), 'logging.debug', 'logging.debug', (["('completeness is %d' % c)"], {}), "('completeness is %d' % c)\n", (1915, 1941), False, 'import logging\n'), ((2231, 2248), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', ([], {}), '()\n', (2246, 2248), True, 'import matplotlib.pyplot as plt\n'), ((3294, 3325), 'pygmaps.maps', 'pygmaps.maps', (['(37.5)', '(-122.32)', '(10)'], {}), '(37.5, -122.32, 10)\n', (3306, 3325), False, 'import pygmaps\n'), ((3767, 3781), 'emission.analysis.modelling.tour_model.cluster_pipeline.read_data', 'cp.read_data', ([], {}), '()\n', (3779, 3781), True, 'import emission.analysis.modelling.tour_model.cluster_pipeline as cp\n'), ((3881, 3912), 'emission.analysis.modelling.tour_model.cluster_pipeline.remove_noise', 'cp.remove_noise', (['data', '(0.5)', '(300)'], {}), '(data, 0.5, 300)\n', (3896, 3912), True, 'import emission.analysis.modelling.tour_model.cluster_pipeline as cp\n'), ((4004, 4041), 'emission.analysis.modelling.tour_model.similarity.similarity', 'similarity.similarity', (['data', '(0.5)', '(300)'], {}), '(data, 0.5, 300)\n', (4025, 4041), True, 'import emission.analysis.modelling.tour_model.similarity as similarity\n'), ((4339, 4358), 'numpy.array', 'numpy.array', (['colors'], {}), '(colors)\n', (4350, 4358), False, 'import numpy\n'), ((4360, 4379), 'numpy.array', 'numpy.array', (['labels'], {}), '(labels)\n', (4371, 4379), False, 'import numpy\n'), ((4479, 4498), 'numpy.array', 'numpy.array', (['colors'], {}), '(colors)\n', (4490, 4498), False, 'import numpy\n'), ((4500, 4519), 'numpy.array', 'numpy.array', (['labels'], {}), '(labels)\n', (4511, 4519), False, 'import numpy\n'), ((2996, 3034), 'pygmaps.maps', 'pygmaps.maps', (['start_lat', 'start_lon', '(10)'], {}), '(start_lat, start_lon, 10)\n', (3008, 3034), False, 'import pygmaps\n')]
|
import numpy as np
from pandas import DataFrame
import matplotlib.pyplot as py
class ca(object):
'''
Docstring for function ecopy.ca
====================
Conducts correspondance analysis (CA). User supplies
an observation x descriptor matrix.
Use
----
ca(x, siteNames=None, spNames=None, scaling=1)
Returns an object of class ca
Parameters
----------
x: Data for ordination. Should either be a pandas DataFrame or numpy.ndarray.
Observations as rows and descriptors as columns. Only
positive numbers and 0's allowed.
siteNames: A list of site names
spNames: A list of species names
scaling: What type of biplot to produce. See online documentation
Attributes (see online documentation for descriptions)
---------
w_col: column weights of the transformed matrix
w_row: row weights of the transformed matrix
evals: eigenvalues of the QQ matrix
U: column eigenvectors
Uhat: row eigenvectors
cumDesc_Sp: The proportion of variance for each species explained by each
correspondance axis
cumDesc_Site: The proportion of variance for each site explained by each
correspondance axis
siteScores: Site scores along each CA axis
spScores: Species scores along each CA axis
Methods
--------
summary(): provides a pandas.DataFrame summary table of CA axes
biplot(coords=False, xax=1, yax=2, type=1, showSp=True, showSite=True, spCol='r', siteCol='k', spSize=12, siteSize=12, xlim=None, ylim=None):
Produces a biplot of the given CA axes.
showSp: Whether species should be plotted
showSite: Whether site should be plotted
spCol: Color of species text
siteCol: Color of site text
spSize: Size of species text
siteSize: Size of site text
xlim: Provide a xlim list to override default limits
ylim: Provide a ylim list to override default limits
coords: Should the plotting coordinates be returned
xax: Integer specifying CA Axes to be plotted on the x-axis (Defaults to 1)
yax: Integer specifying CA Axes to be plotted on the y-axis (Defaults to 2)
Example
--------
import ecopy as ep
BCI = ep.load_data('BCI')
bci_ca = ep.ca(BCI)
print(bci_ca.summary())
bci_ca.biplot()
'''
def __init__(self, x, siteNames=None, spNames=None, scaling=1):
# if the data is not a dataframe or array, raise error
if not isinstance(x, (DataFrame, np.ndarray)):
msg = 'Data must either be pandas.DataFrame or nump.ndarray'
raise ValueError(msg)
# if x is a DataFrame
if isinstance(x, DataFrame):
# check NAs
if x.isnull().any().any():
msg = 'DataFrame contains null values'
raise ValueError(msg)
# check for non-numeric
if (x.dtypes == 'object').any():
msg = 'DataFrame can only contain numeric values'
raise ValueError(msg)
# convert to a numpy array
y = np.array(x)
# if x is array, simple re-assign
if isinstance(x, np.ndarray):
if np.isnan(x).any():
msg = 'Array contains null values'
raise ValueError(msg)
y = x
# check for negative values
if y.any() < 0:
msg ='Matrix cannot contain negative values'
raise ValueError(msg)
if scaling not in [1,2]:
msg = 'type parameter must be 1 or 2'
raise ValueError(msg)
if y.shape[0] < y.shape[1]:
y = y.T
self.Trans = True
else:
self.Trans = False
pMat = y.astype('float')/y.sum()
self.w_row = pMat.sum(axis=1)
self.w_col = pMat.sum(axis=0)
w_rowA = self.w_row[:,np.newaxis]
w_colA = self.w_col[np.newaxis,:]
Q = (pMat - w_rowA*w_colA)/np.sqrt(w_rowA*w_colA)
self.evals, self.U = np.linalg.eig(Q.T.dot(Q))
idx = self.evals.argsort()[::-1]
self.evals = self.evals[idx]
self.U = self.U[:,idx]
self.Uhat = Q.dot(self.U).dot(np.diag(self.evals**-0.5))
self.evals = self.evals[:-1]
self.U = self.U[:,:-1]
self.Uhat = self.Uhat[:,:-1]
if isinstance(x, DataFrame):
self.siteLabs = x.index
self.spLabs = x.columns
else:
self.siteLabs = ['Site ' + str(x) for x in range(y.shape[0])]
self.spLabs = ['Sp ' + str(x) for x in range(y.shape[1])]
if siteNames is not None:
self.siteLabs = siteNames
if spNames is not None:
self.spLabs = spNames
U2 = self.U.dot(np.diag(self.evals**0.5))
Uhat2 = self.Uhat.dot(np.diag(self.evals**0.5))
if self.Trans:
self.cumDesc_Sp = DataFrame(np.apply_along_axis(lambda x: np.cumsum(x**2) / np.sum(x**2), 1, Uhat2))
self.cumDesc_Site = DataFrame(np.apply_along_axis(lambda x: np.cumsum(x**2) / np.sum(x**2), 1, U2))
else:
self.cumDesc_Sp = DataFrame(np.apply_along_axis(lambda x: np.cumsum(x**2) / np.sum(x**2), 1, U2))
self.cumDesc_Site = DataFrame(np.apply_along_axis(lambda x: np.cumsum(x**2) / np.sum(x**2), 1, Uhat2))
if isinstance(x, DataFrame):
self.cumDesc_Sp.index = x.columns
self.cumDesc_Site.index = x.index
self.cumDesc_Sp.columns = ['CA Axis ' + str(x) for x in range(1, len(self.evals) + 1)]
self.cumDesc_Site.columns = ['CA Axis ' + str(x) for x in range(1, len(self.evals) + 1)]
V = np.diag(self.w_col**-0.5).dot(self.U)
Vhat = np.diag(self.w_row**-0.5).dot(self.Uhat)
F = Vhat.dot(np.diag(self.evals**0.5))
Fhat = V.dot(np.diag(self.evals**0.5))
if self.Trans:
siteCent = Fhat
spCent = F
siteOut = V
spOut = Vhat
if scaling==1:
self.siteScores = DataFrame(siteCent, index=self.siteLabs)
self.spScores = DataFrame(spOut, index=self.spLabs)
elif scaling==2:
self.siteScores = DataFrame(siteOut, columns=self.siteLabs)
self.spScores = DataFrame(spCent, columns=self.spLabs)
else:
siteCent = F
spCent = Fhat
siteOut = Vhat
spOut = V
if scaling==1:
self.siteScores = DataFrame(siteCent, index=self.siteLabs)
self.spScores = DataFrame(spOut, index=self.spLabs)
elif scaling==2:
self.siteScores = DataFrame(siteOut, index=self.siteLabs)
self.spScores = DataFrame(spCent, index=self.spLabs)
def summary(self):
sds = np.sqrt(self.evals)
props = self.evals / np.sum(self.evals)
cumSums = np.cumsum(self.evals) / np.sum(self.evals)
colNames = ['CA Axis ' + str(x) for x in range(1, len(self.evals)+1)]
sumTable = DataFrame(np.vstack((sds, props, cumSums)), index=['Inertia', 'Prop.', 'Cum. Prop.'])
sumTable.columns = colNames
return sumTable
def biplot(self, xax=1, yax=2, showSp=True, showSite=True, spCol='r', siteCol='k', spSize=12, siteSize=12, xlim=None, ylim=None):
f, ax = py.subplots()
if showSite:
ax.plot(self.siteScores.iloc[:,xax-1], self.siteScores.iloc[:,yax-1], 'ko', ms=0)
[ax.text(x, y, s, fontsize=siteSize, color=siteCol, ha='center', va='center') for x,y,s in zip(self.siteScores.iloc[:,xax-1], self.siteScores.iloc[:,yax-1], self.siteLabs)]
if showSp:
ax.plot(self.spScores.iloc[:,xax-1], self.spScores.iloc[:,yax-1], 'k^', ms=0)
[ax.text(x,y,s, fontsize=spSize, color=spCol, ha='center', va='center') for x,y,s in zip(self.spScores.iloc[:,xax-1], self.spScores.iloc[:,yax-1], self.spLabs)]
xmax = max(np.amax(self.siteScores.iloc[:,xax-1]), np.amax(self.spScores.iloc[:,xax-1]))
xmin = min(np.amin(self.siteScores.iloc[:,xax-1]), np.amin(self.spScores.iloc[:,xax-1]))
ymax = max(np.amax(self.siteScores.iloc[:,yax-1]), np.amax(self.spScores.iloc[:,yax-1]))
ymin = min(np.min(self.siteScores.iloc[:,yax-1]), np.min(self.spScores.iloc[:,yax-1]))
ax.set_xlim([xmin*1.15, xmax*1.15])
ax.set_ylim([ymin*1.15, ymax*1.15])
if xlim is not None:
if not isinstance(xlim, list):
msg = "xlim must be a list"
raise ValueError(msg)
ax.set_xlim(xlim)
if ylim is not None:
if not isinstance(ylim, list):
msg = 'ylim must be a list'
raise ValueError(msg)
ax.set_ylim(ylim)
ax.set_xlabel('CA Axis {!s}'.format(xax))
ax.set_ylabel('CA Axis {!s}'.format(yax))
py.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.amin",
"numpy.isnan",
"numpy.amax",
"numpy.cumsum",
"numpy.min",
"numpy.array",
"numpy.diag",
"matplotlib.pyplot.subplots",
"numpy.vstack",
"numpy.sqrt"
] |
[((6821, 6840), 'numpy.sqrt', 'np.sqrt', (['self.evals'], {}), '(self.evals)\n', (6828, 6840), True, 'import numpy as np\n'), ((7352, 7365), 'matplotlib.pyplot.subplots', 'py.subplots', ([], {}), '()\n', (7363, 7365), True, 'import matplotlib.pyplot as py\n'), ((8922, 8931), 'matplotlib.pyplot.show', 'py.show', ([], {}), '()\n', (8929, 8931), True, 'import matplotlib.pyplot as py\n'), ((3109, 3120), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3117, 3120), True, 'import numpy as np\n'), ((3981, 4005), 'numpy.sqrt', 'np.sqrt', (['(w_rowA * w_colA)'], {}), '(w_rowA * w_colA)\n', (3988, 4005), True, 'import numpy as np\n'), ((4206, 4233), 'numpy.diag', 'np.diag', (['(self.evals ** -0.5)'], {}), '(self.evals ** -0.5)\n', (4213, 4233), True, 'import numpy as np\n'), ((4767, 4793), 'numpy.diag', 'np.diag', (['(self.evals ** 0.5)'], {}), '(self.evals ** 0.5)\n', (4774, 4793), True, 'import numpy as np\n'), ((4823, 4849), 'numpy.diag', 'np.diag', (['(self.evals ** 0.5)'], {}), '(self.evals ** 0.5)\n', (4830, 4849), True, 'import numpy as np\n'), ((5784, 5810), 'numpy.diag', 'np.diag', (['(self.evals ** 0.5)'], {}), '(self.evals ** 0.5)\n', (5791, 5810), True, 'import numpy as np\n'), ((5831, 5857), 'numpy.diag', 'np.diag', (['(self.evals ** 0.5)'], {}), '(self.evals ** 0.5)\n', (5838, 5857), True, 'import numpy as np\n'), ((6870, 6888), 'numpy.sum', 'np.sum', (['self.evals'], {}), '(self.evals)\n', (6876, 6888), True, 'import numpy as np\n'), ((6907, 6928), 'numpy.cumsum', 'np.cumsum', (['self.evals'], {}), '(self.evals)\n', (6916, 6928), True, 'import numpy as np\n'), ((6931, 6949), 'numpy.sum', 'np.sum', (['self.evals'], {}), '(self.evals)\n', (6937, 6949), True, 'import numpy as np\n'), ((7057, 7089), 'numpy.vstack', 'np.vstack', (['(sds, props, cumSums)'], {}), '((sds, props, cumSums))\n', (7066, 7089), True, 'import numpy as np\n'), ((5669, 5696), 'numpy.diag', 'np.diag', (['(self.w_col ** -0.5)'], {}), '(self.w_col ** -0.5)\n', (5676, 5696), True, 'import numpy as np\n'), ((5722, 5749), 'numpy.diag', 'np.diag', (['(self.w_row ** -0.5)'], {}), '(self.w_row ** -0.5)\n', (5729, 5749), True, 'import numpy as np\n'), ((6041, 6081), 'pandas.DataFrame', 'DataFrame', (['siteCent'], {'index': 'self.siteLabs'}), '(siteCent, index=self.siteLabs)\n', (6050, 6081), False, 'from pandas import DataFrame\n'), ((6114, 6149), 'pandas.DataFrame', 'DataFrame', (['spOut'], {'index': 'self.spLabs'}), '(spOut, index=self.spLabs)\n', (6123, 6149), False, 'from pandas import DataFrame\n'), ((6501, 6541), 'pandas.DataFrame', 'DataFrame', (['siteCent'], {'index': 'self.siteLabs'}), '(siteCent, index=self.siteLabs)\n', (6510, 6541), False, 'from pandas import DataFrame\n'), ((6574, 6609), 'pandas.DataFrame', 'DataFrame', (['spOut'], {'index': 'self.spLabs'}), '(spOut, index=self.spLabs)\n', (6583, 6609), False, 'from pandas import DataFrame\n'), ((7971, 8012), 'numpy.amax', 'np.amax', (['self.siteScores.iloc[:, xax - 1]'], {}), '(self.siteScores.iloc[:, xax - 1])\n', (7978, 8012), True, 'import numpy as np\n'), ((8011, 8050), 'numpy.amax', 'np.amax', (['self.spScores.iloc[:, xax - 1]'], {}), '(self.spScores.iloc[:, xax - 1])\n', (8018, 8050), True, 'import numpy as np\n'), ((8072, 8113), 'numpy.amin', 'np.amin', (['self.siteScores.iloc[:, xax - 1]'], {}), '(self.siteScores.iloc[:, xax - 1])\n', (8079, 8113), True, 'import numpy as np\n'), ((8112, 8151), 'numpy.amin', 'np.amin', (['self.spScores.iloc[:, xax - 1]'], {}), '(self.spScores.iloc[:, xax - 1])\n', (8119, 8151), True, 'import numpy as np\n'), ((8173, 8214), 'numpy.amax', 'np.amax', (['self.siteScores.iloc[:, yax - 1]'], {}), '(self.siteScores.iloc[:, yax - 1])\n', (8180, 8214), True, 'import numpy as np\n'), ((8213, 8252), 'numpy.amax', 'np.amax', (['self.spScores.iloc[:, yax - 1]'], {}), '(self.spScores.iloc[:, yax - 1])\n', (8220, 8252), True, 'import numpy as np\n'), ((8274, 8314), 'numpy.min', 'np.min', (['self.siteScores.iloc[:, yax - 1]'], {}), '(self.siteScores.iloc[:, yax - 1])\n', (8280, 8314), True, 'import numpy as np\n'), ((8313, 8351), 'numpy.min', 'np.min', (['self.spScores.iloc[:, yax - 1]'], {}), '(self.spScores.iloc[:, yax - 1])\n', (8319, 8351), True, 'import numpy as np\n'), ((3220, 3231), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (3228, 3231), True, 'import numpy as np\n'), ((6213, 6254), 'pandas.DataFrame', 'DataFrame', (['siteOut'], {'columns': 'self.siteLabs'}), '(siteOut, columns=self.siteLabs)\n', (6222, 6254), False, 'from pandas import DataFrame\n'), ((6287, 6325), 'pandas.DataFrame', 'DataFrame', (['spCent'], {'columns': 'self.spLabs'}), '(spCent, columns=self.spLabs)\n', (6296, 6325), False, 'from pandas import DataFrame\n'), ((6673, 6712), 'pandas.DataFrame', 'DataFrame', (['siteOut'], {'index': 'self.siteLabs'}), '(siteOut, index=self.siteLabs)\n', (6682, 6712), False, 'from pandas import DataFrame\n'), ((6745, 6781), 'pandas.DataFrame', 'DataFrame', (['spCent'], {'index': 'self.spLabs'}), '(spCent, index=self.spLabs)\n', (6754, 6781), False, 'from pandas import DataFrame\n'), ((4942, 4959), 'numpy.cumsum', 'np.cumsum', (['(x ** 2)'], {}), '(x ** 2)\n', (4951, 4959), True, 'import numpy as np\n'), ((4960, 4974), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (4966, 4974), True, 'import numpy as np\n'), ((5057, 5074), 'numpy.cumsum', 'np.cumsum', (['(x ** 2)'], {}), '(x ** 2)\n', (5066, 5074), True, 'import numpy as np\n'), ((5075, 5089), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (5081, 5089), True, 'import numpy as np\n'), ((5181, 5198), 'numpy.cumsum', 'np.cumsum', (['(x ** 2)'], {}), '(x ** 2)\n', (5190, 5198), True, 'import numpy as np\n'), ((5199, 5213), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (5205, 5213), True, 'import numpy as np\n'), ((5293, 5310), 'numpy.cumsum', 'np.cumsum', (['(x ** 2)'], {}), '(x ** 2)\n', (5302, 5310), True, 'import numpy as np\n'), ((5311, 5325), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (5317, 5325), True, 'import numpy as np\n')]
|
'''Class to find shapes in gray image with cv2'''
# import the necessary packages
import argparse
import cv2 as cv2
import os
import numpy as np
from PIL import Image
result = [0] *256
image_path = '../resources/'+os.getenv('IMAGE', 'sample.bin')
xbash = np.fromfile(image_path, dtype='uint8')
#print(xbash.shape)
image_path = 'image.png'
x = 256
y = 256
cv2.imwrite(image_path, xbash[:x*y].reshape(x,y))
# load the image, convert it to grayscale, blur it slightly,
# and threshold it
array = np.array(Image.open(image_path))
#print(array)
for row_index, line in enumerate(array):
#print (line)
for column_index, pixel in enumerate(line):
#print (pixel)
if(pixel ==200):
array[row_index][column_index]='0'
if(pixel ==0):
array[row_index][column_index]='255'
invimg = Image.fromarray(array)
invimg.save(image_path)
image = cv2.imread(image_path)
####
row, col = image.shape[:2]
bottom = image[row-2:row, 0:col]
mean = cv2.mean(bottom)[0]
bordersize = 10
border = cv2.copyMakeBorder(
image,
top=bordersize,
bottom=bordersize,
left=bordersize,
right=bordersize,
borderType=cv2.BORDER_CONSTANT,
value=[255, 255, 255]
)
image = border
print(image.shape)
image = cv2.resize(image,(100,100))
print(image.shape)
###
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv2.imshow("Gray", gray)
#cv2.waitKey(0)
#gray = cv2.GaussianBlur(gray, (5,5), 0)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#cv2.imshow("Blurred", blurred)
#cv2.waitKey(0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
cv2.imshow("Imageb", blurred)
cv2.waitKey(0)
cnts, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_ = sorted(cnts, key=cv2.contourArea, reverse=True)[:10]
contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in cnts]
final = np.zeros(image.shape,np.uint8)
mask = np.zeros(gray.shape,np.uint8)
r =0
for c in cnts:
# average L*a*b* value for the masked region
mask = np.zeros(image.shape[:2], dtype="uint8")
cv2.drawContours(mask, [c], -1, 255, -1)
mask = cv2.erode(mask, None, iterations=2)
mean = cv2.mean(image, mask=mask)[:3]
# initialize the minimum distance found thus far
minDist = (np.inf, None)
print ('//////////////')
print(mean[0])
print ('black' if mean[0]<15 else 'other')
print ('--------------/////////////////////-------------')
print ('//////////////')
print ('Grey' if mean[0]<220 and mean[0]>180 else 'other')
print ('--------------/////////////////////-------------')
print ('//////////////')
print ('white' if mean[0]<256 and mean[0]>130 else 'other')
print ('--------------/////////////////////-------------')
x = ((c[0])[0])[0]
y = ((c[0])[0])[1]
#print (str(c))
M = cv2.moments(c)
d = M["m00"]
if d <= 0:
d =1
cX = int(M["m10"] / d)
cY = int(M["m01"] / d)
print(image[cX][cY])
cv2.circle(image, (cX, cY), 7, (255, 0, 0), -1)
cv2.putText(image, "center", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
print('==========================')
#for p in c:
#print(image[p[0][0]][p[0][1]])
color_index = (image[x][y])[0]
result[color_index] += 1
print ('-------------------------')
#print (image[x][y])
#print (image[x][y])
cv2.drawContours(image, [c], 0, (100), 5)
# show the image
cv2.imshow("Image"+str(r), image)
cv2.waitKey(0)
r+=1
print (result)
|
[
"cv2.GaussianBlur",
"cv2.approxPolyDP",
"cv2.erode",
"cv2.imshow",
"cv2.cvtColor",
"cv2.copyMakeBorder",
"cv2.drawContours",
"cv2.mean",
"cv2.resize",
"cv2.circle",
"cv2.waitKey",
"os.getenv",
"cv2.putText",
"numpy.fromfile",
"cv2.threshold",
"cv2.moments",
"numpy.zeros",
"PIL.Image.open",
"cv2.imread",
"PIL.Image.fromarray",
"cv2.findContours"
] |
[((258, 296), 'numpy.fromfile', 'np.fromfile', (['image_path'], {'dtype': '"""uint8"""'}), "(image_path, dtype='uint8')\n", (269, 296), True, 'import numpy as np\n'), ((827, 849), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (842, 849), False, 'from PIL import Image\n'), ((883, 905), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (893, 905), True, 'import cv2 as cv2\n'), ((1025, 1185), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image'], {'top': 'bordersize', 'bottom': 'bordersize', 'left': 'bordersize', 'right': 'bordersize', 'borderType': 'cv2.BORDER_CONSTANT', 'value': '[255, 255, 255]'}), '(image, top=bordersize, bottom=bordersize, left=\n bordersize, right=bordersize, borderType=cv2.BORDER_CONSTANT, value=[\n 255, 255, 255])\n', (1043, 1185), True, 'import cv2 as cv2\n'), ((1249, 1278), 'cv2.resize', 'cv2.resize', (['image', '(100, 100)'], {}), '(image, (100, 100))\n', (1259, 1278), True, 'import cv2 as cv2\n'), ((1307, 1346), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1319, 1346), True, 'import cv2 as cv2\n'), ((1445, 1478), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (1461, 1478), True, 'import cv2 as cv2\n'), ((1596, 1625), 'cv2.imshow', 'cv2.imshow', (['"""Imageb"""', 'blurred'], {}), "('Imageb', blurred)\n", (1606, 1625), True, 'import cv2 as cv2\n'), ((1630, 1644), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1641, 1644), True, 'import cv2 as cv2\n'), ((1664, 1728), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1680, 1728), True, 'import cv2 as cv2\n'), ((1863, 1894), 'numpy.zeros', 'np.zeros', (['image.shape', 'np.uint8'], {}), '(image.shape, np.uint8)\n', (1871, 1894), True, 'import numpy as np\n'), ((1901, 1931), 'numpy.zeros', 'np.zeros', (['gray.shape', 'np.uint8'], {}), '(gray.shape, np.uint8)\n', (1909, 1931), True, 'import numpy as np\n'), ((217, 249), 'os.getenv', 'os.getenv', (['"""IMAGE"""', '"""sample.bin"""'], {}), "('IMAGE', 'sample.bin')\n", (226, 249), False, 'import os\n'), ((506, 528), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (516, 528), False, 'from PIL import Image\n'), ((979, 995), 'cv2.mean', 'cv2.mean', (['bottom'], {}), '(bottom)\n', (987, 995), True, 'import cv2 as cv2\n'), ((1541, 1591), 'cv2.threshold', 'cv2.threshold', (['blurred', '(60)', '(255)', 'cv2.THRESH_BINARY'], {}), '(blurred, 60, 255, cv2.THRESH_BINARY)\n', (1554, 1591), True, 'import cv2 as cv2\n'), ((1806, 1836), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', '(3)', '(True)'], {}), '(cnt, 3, True)\n', (1822, 1836), True, 'import cv2 as cv2\n'), ((2013, 2053), 'numpy.zeros', 'np.zeros', (['image.shape[:2]'], {'dtype': '"""uint8"""'}), "(image.shape[:2], dtype='uint8')\n", (2021, 2053), True, 'import numpy as np\n'), ((2058, 2098), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[c]', '(-1)', '(255)', '(-1)'], {}), '(mask, [c], -1, 255, -1)\n', (2074, 2098), True, 'import cv2 as cv2\n'), ((2110, 2145), 'cv2.erode', 'cv2.erode', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (2119, 2145), True, 'import cv2 as cv2\n'), ((2817, 2831), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (2828, 2831), True, 'import cv2 as cv2\n'), ((2961, 3008), 'cv2.circle', 'cv2.circle', (['image', '(cX, cY)', '(7)', '(255, 0, 0)', '(-1)'], {}), '(image, (cX, cY), 7, (255, 0, 0), -1)\n', (2971, 3008), True, 'import cv2 as cv2\n'), ((3013, 3113), 'cv2.putText', 'cv2.putText', (['image', '"""center"""', '(cX - 20, cY - 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(255, 0, 0)', '(2)'], {}), "(image, 'center', (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, (255, 0, 0), 2)\n", (3024, 3113), True, 'import cv2 as cv2\n'), ((3364, 3403), 'cv2.drawContours', 'cv2.drawContours', (['image', '[c]', '(0)', '(100)', '(5)'], {}), '(image, [c], 0, 100, 5)\n', (3380, 3403), True, 'import cv2 as cv2\n'), ((3473, 3487), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3484, 3487), True, 'import cv2 as cv2\n'), ((2157, 2183), 'cv2.mean', 'cv2.mean', (['image'], {'mask': 'mask'}), '(image, mask=mask)\n', (2165, 2183), True, 'import cv2 as cv2\n')]
|
from PIL import Image
import PIL.ImageOps
import numpy as np
import tensorflow as tf
import time
## 시간측정 시작
stime = time.time()
### 학습모델 불러오기
model = tf.keras.models.load_model('my_model.h5')
# model.summary()
### 유효영역 자르기
img = PIL.ImageOps.invert(Image.open('sample.bmp')).convert("1")
Newimg = np.asarray(img.crop(img.getbbox()))
### 글자 클래스
class ocr:
def __init__(self, Newimg, rowSize,colSize, idxR,idxC):
self.points = set()
self.findContour(Newimg, rowSize,colSize, idxR,idxC)
self.makeNewimage()
self.ID = model.predict(self.resize28(self.image).reshape(1,-1))
def findContour(self, Newimg, rowSize,colSize, idxR,idxC):
## 입력받은 이미지에서 연속된 점들의 처음 그룹을 points(set)로 저장
if (idxR, idxC) not in self.points:
self.points.add( (idxR, idxC) )
if idxC+1 < colSize:
if Newimg[idxR, idxC+1]:
self.findContour(Newimg, rowSize,colSize, idxR,idxC+1)
if idxR+1<rowSize and Newimg[idxR+1, idxC+1]:
self.findContour(Newimg, rowSize,colSize, idxR+1,idxC+1)
if idxR-1>=0 and Newimg[idxR-1, idxC+1]:
self.findContour(Newimg, rowSize,colSize, idxR-1,idxC+1)
if idxR+1<rowSize and Newimg[idxR+1, idxC]:
self.findContour(Newimg, rowSize,colSize, idxR+1,idxC)
if idxR-1>=0 and Newimg[idxR-1, idxC]:
self.findContour(Newimg, rowSize,colSize, idxR-1,idxC)
if idxC-1 >= 0:
if Newimg[idxR, idxC-1]:
self.findContour(Newimg, rowSize,colSize, idxR,idxC-1)
if idxR+1<rowSize and Newimg[idxR+1, idxC-1]:
self.findContour(Newimg, rowSize,colSize, idxR+1,idxC-1)
if idxR-1>=0 and Newimg[idxR-1, idxC-1]:
self.findContour(Newimg, rowSize,colSize, idxR-1,idxC-1)
def makeNewimage(self):
## points의 좌표로 새로운 이미지 만들어서 저장
cRange = np.array([[e1, e2] for (e1, e2) in self.points])
dr = np.amax(cRange, 0)
ul = np.amin(cRange, 0)
self.image = np.zeros((dr[0]-ul[0]+1,dr[1]-ul[1]+1))
self.image[cRange[:,0]-ul[0], cRange[:,1]-ul[1]] = 1
self.position = ( np.around(np.mean(cRange[:,0])), np.around(np.mean(cRange[:,1])) )
@staticmethod
def resize28(image):
## 28*28사이즈로 줄이기
rowSize, colSize = image.shape
temp_im = Image.fromarray(image)
resized = np.zeros((28,28))
if rowSize>colSize:
shortLength = int(colSize*20/rowSize)
temp_im = temp_im.resize( (shortLength,20) )
resized[4:24, int(13-shortLength/2):int(13-shortLength/2)+shortLength] = np.asarray(temp_im)
else:
shortLength = int(rowSize*20/colSize)
temp_im = temp_im.resize( (20,shortLength) )
resized[int(13-shortLength/2):int(13-shortLength/2)+shortLength, 4:24] = np.asarray(temp_im)
return resized
@classmethod
def many(cls, Newimg):
## 글자 목록 뽑아내기
ocrList = []
rowSize, colSize = Newimg.shape
idxs = np.argwhere(Newimg)
idxs = list(map(tuple, idxs[idxs[:,1].argsort()]))
while True:
eachString = cls(Newimg, rowSize,colSize, idxs[0][0],idxs[0][1])
ocrList.append(eachString)
idxs = [e for e in idxs if e not in eachString.points]
if not idxs:
break
return ocrList
### 불연속 기준으로 글자 분류
ocrList = ocr.many(Newimg)
## 시간 출력
print('소요 시간 :', time.time()-stime, '초')
### 잘라낸 이미지별로 저장하기
iter = 0
for classed_string in ocrList:
iter += 1
name = 'CLASSED{}_{}.bmp'.format(iter, np.argmax(classed_string.ID) )
print(name, ['{:.2f}'.format(item) for item in classed_string.ID.tolist()[0]])
Image.fromarray(255*classed_string.image).convert('RGB').save(name)
|
[
"tensorflow.keras.models.load_model",
"numpy.amin",
"numpy.argmax",
"numpy.asarray",
"numpy.zeros",
"time.time",
"numpy.amax",
"numpy.argwhere",
"PIL.Image.open",
"numpy.mean",
"numpy.array",
"PIL.Image.fromarray"
] |
[((117, 128), 'time.time', 'time.time', ([], {}), '()\n', (126, 128), False, 'import time\n'), ((152, 193), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""my_model.h5"""'], {}), "('my_model.h5')\n", (178, 193), True, 'import tensorflow as tf\n'), ((1977, 2023), 'numpy.array', 'np.array', (['[[e1, e2] for e1, e2 in self.points]'], {}), '([[e1, e2] for e1, e2 in self.points])\n', (1985, 2023), True, 'import numpy as np\n'), ((2039, 2057), 'numpy.amax', 'np.amax', (['cRange', '(0)'], {}), '(cRange, 0)\n', (2046, 2057), True, 'import numpy as np\n'), ((2071, 2089), 'numpy.amin', 'np.amin', (['cRange', '(0)'], {}), '(cRange, 0)\n', (2078, 2089), True, 'import numpy as np\n'), ((2111, 2159), 'numpy.zeros', 'np.zeros', (['(dr[0] - ul[0] + 1, dr[1] - ul[1] + 1)'], {}), '((dr[0] - ul[0] + 1, dr[1] - ul[1] + 1))\n', (2119, 2159), True, 'import numpy as np\n'), ((2426, 2448), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (2441, 2448), False, 'from PIL import Image\n'), ((2467, 2485), 'numpy.zeros', 'np.zeros', (['(28, 28)'], {}), '((28, 28))\n', (2475, 2485), True, 'import numpy as np\n'), ((3112, 3131), 'numpy.argwhere', 'np.argwhere', (['Newimg'], {}), '(Newimg)\n', (3123, 3131), True, 'import numpy as np\n'), ((3546, 3557), 'time.time', 'time.time', ([], {}), '()\n', (3555, 3557), False, 'import time\n'), ((3679, 3707), 'numpy.argmax', 'np.argmax', (['classed_string.ID'], {}), '(classed_string.ID)\n', (3688, 3707), True, 'import numpy as np\n'), ((252, 276), 'PIL.Image.open', 'Image.open', (['"""sample.bmp"""'], {}), "('sample.bmp')\n", (262, 276), False, 'from PIL import Image\n'), ((2705, 2724), 'numpy.asarray', 'np.asarray', (['temp_im'], {}), '(temp_im)\n', (2715, 2724), True, 'import numpy as np\n'), ((2931, 2950), 'numpy.asarray', 'np.asarray', (['temp_im'], {}), '(temp_im)\n', (2941, 2950), True, 'import numpy as np\n'), ((2248, 2269), 'numpy.mean', 'np.mean', (['cRange[:, 0]'], {}), '(cRange[:, 0])\n', (2255, 2269), True, 'import numpy as np\n'), ((2281, 2302), 'numpy.mean', 'np.mean', (['cRange[:, 1]'], {}), '(cRange[:, 1])\n', (2288, 2302), True, 'import numpy as np\n'), ((3797, 3840), 'PIL.Image.fromarray', 'Image.fromarray', (['(255 * classed_string.image)'], {}), '(255 * classed_string.image)\n', (3812, 3840), False, 'from PIL import Image\n')]
|
'''
Useful functions for combined signal strategy
'''
import numpy as np
def get_split_w_threshold(alpha, normalization='exponential'):
"""
Get normalize weights and thresholds from alpha vector
:param alpha: optimize Vectorize
:return: weights and thresholds
"""
w = []
if normalization == 'exponential':
w = np.exp(alpha[:len(alpha)-2])/np.sum(np.exp(alpha[:len(alpha)-2]))
elif normalization == 'l1':
w = alpha[:len(alpha)-2]/np.sum(np.abs(alpha[:len(alpha)-2]))
buy_threshold = alpha[len(alpha)-2]
sell_threshold = alpha[len(alpha)-1]
return w, buy_threshold, sell_threshold
def get_combined_signal(moving_average_rules, moving_averages, w, index):
"""
Combines in a weighted way buy-sell signals coming from moving average crosses.
:param moving_average_rules: list with moving average rules
:param moving_averages: dict with moving averages from historical data
:param w: weights vector
:parm index: moving averages index
:return: final signal get from combined all signals
"""
signal_list = []
# Get signals from all moving averages rules
for short_period, long_period in moving_average_rules:
moving_average_short = moving_averages['MA_' + str(short_period)][index]
moving_average_long = moving_averages['MA_' + str(long_period)][index]
if moving_average_short < moving_average_long:
signal_list.append(-1)
else:
signal_list.append(+1)
final_signal = np.sum(np.array(w)*np.array(signal_list))
return final_signal
|
[
"numpy.array"
] |
[((1541, 1552), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (1549, 1552), True, 'import numpy as np\n'), ((1553, 1574), 'numpy.array', 'np.array', (['signal_list'], {}), '(signal_list)\n', (1561, 1574), True, 'import numpy as np\n')]
|
#!/usr/bin/env python2.7
from __future__ import print_function, division
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import dtk
import h5py
import time
import sys
def plot_mag_dust(mag_delta, mag, name,obs= False,ybins=None):
plt.figure()
if obs:
xbins = np.linspace(10,25,100)
else:
xbins = np.linspace(-25,-10,100)
if ybins is None:
ybins = np.linspace(-1,3,100)
h,xbins,ybins = np.histogram2d(mag, mag_delta, bins=(xbins,ybins))
plt.pcolor(xbins,ybins,h.T,cmap='PuBu',norm=clr.LogNorm())
plt.xlabel(name +' no dust');plt.ylabel(name+' (dust - no dust)')
plt.grid()
def plot_clr_dust(mag_delta1, mag_delta2, mag, clr_name, mag_name,obs=False,xbins=None,ybins = None):
plt.figure()
if xbins is None:
if obs:
xbins = np.linspace(10,25,100)
else:
xbins = np.linspace(-25,-10,100)
if ybins is None:
ybins = np.linspace(-1,1,100)
h,xbins,ybins = np.histogram2d(mag, mag_delta1 - mag_delta2, bins = (xbins,ybins))
plt.pcolor(xbins,ybins,h.T,cmap='PuBu',norm=clr.LogNorm())
plt.xlabel(mag_name +' no dust');plt.ylabel(clr_name+' (dust - no dust)')
plt.grid()
def plot_dust_effect(fname):
t1 = time.time()
gal_prop = {}
hfile = h5py.File(fname,'r')
hgp = hfile['galaxyProperties']
m_star = np.log10(hgp['totalMassStellar'].value)
incl = hgp['morphology/inclination'].value
mag_gd = hgp['SDSS_filters/magnitude:SDSS_g:rest:dustAtlas'].value
mag_rd = hgp['SDSS_filters/magnitude:SDSS_r:rest:dustAtlas'].value
mag_id = hgp['SDSS_filters/magnitude:SDSS_i:rest:dustAtlas'].value
mag_gnd = hgp['SDSS_filters/magnitude:SDSS_g:rest'].value
mag_rnd = hgp['SDSS_filters/magnitude:SDSS_r:rest'].value
mag_ind = hgp['SDSS_filters/magnitude:SDSS_i:rest'].value
mag_dgd = mag_gd - mag_gnd
mag_drd = mag_rd - mag_rnd
mag_did = mag_id - mag_ind
plot_mag_dust(mag_dgd, mag_gnd, "Mag g rest", ybins=np.linspace(-.05,.05,100))
plot_clr_dust(mag_dgd, mag_drd, mag_rnd, "g-r rest", "Mag r rest", ybins=np.linspace(-.05,.05,100))
plot_clr_dust(mag_dgd, mag_drd, incl , "g-r rest", "inclination", xbins=np.linspace(0,90,100),ybins=np.linspace(-.06,.06,100))
mag_gd = hgp['SDSS_filters/magnitude:SDSS_g:observed:dustAtlas'].value
mag_rd = hgp['SDSS_filters/magnitude:SDSS_r:observed:dustAtlas'].value
mag_id = hgp['SDSS_filters/magnitude:SDSS_i:observed:dustAtlas'].value
mag_gnd = hgp['SDSS_filters/magnitude:SDSS_g:observed'].value
mag_rnd = hgp['SDSS_filters/magnitude:SDSS_r:observed'].value
mag_ind = hgp['SDSS_filters/magnitude:SDSS_i:observed'].value
mag_dgd = mag_gd - mag_gnd
mag_drd = mag_rd - mag_rnd
mag_did = mag_id - mag_ind
plot_mag_dust(mag_dgd, mag_gnd, "Mag g observed",obs=True)
# plot_mag_dust(mag_drd, mag_rnd, "Mag r observed",obs=True)
# plot_mag_dust(mag_did, mag_ind, "Mag i observed",obs=True)
plot_clr_dust(mag_dgd, mag_drd, mag_rnd, "g-r observed", "Mag r observed",obs=True)
# plot_clr_dust(mag_dgd, mag_drd, mag_rnd, "r-i observed", "Mag r observed",obs=True)
plt.show()
if __name__ == "__main__":
param = dtk.Param(sys.argv[1])
gltcs_fname = param.get_string("gltcs_fname")
steps = param.get_string_list("steps")
plot_dust_effect(gltcs_fname.replace('${step}',str(421)))
|
[
"h5py.File",
"matplotlib.pyplot.show",
"numpy.histogram2d",
"time.time",
"matplotlib.pyplot.figure",
"dtk.Param",
"matplotlib.colors.LogNorm",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((290, 302), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (300, 302), True, 'import matplotlib.pyplot as plt\n'), ((485, 536), 'numpy.histogram2d', 'np.histogram2d', (['mag', 'mag_delta'], {'bins': '(xbins, ybins)'}), '(mag, mag_delta, bins=(xbins, ybins))\n', (499, 536), True, 'import numpy as np\n'), ((603, 632), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["(name + ' no dust')"], {}), "(name + ' no dust')\n", (613, 632), True, 'import matplotlib.pyplot as plt\n'), ((632, 670), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["(name + ' (dust - no dust)')"], {}), "(name + ' (dust - no dust)')\n", (642, 670), True, 'import matplotlib.pyplot as plt\n'), ((673, 683), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (681, 683), True, 'import matplotlib.pyplot as plt\n'), ((791, 803), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (801, 803), True, 'import matplotlib.pyplot as plt\n'), ((1033, 1098), 'numpy.histogram2d', 'np.histogram2d', (['mag', '(mag_delta1 - mag_delta2)'], {'bins': '(xbins, ybins)'}), '(mag, mag_delta1 - mag_delta2, bins=(xbins, ybins))\n', (1047, 1098), True, 'import numpy as np\n'), ((1167, 1200), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["(mag_name + ' no dust')"], {}), "(mag_name + ' no dust')\n", (1177, 1200), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1242), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["(clr_name + ' (dust - no dust)')"], {}), "(clr_name + ' (dust - no dust)')\n", (1210, 1242), True, 'import matplotlib.pyplot as plt\n'), ((1245, 1255), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1253, 1255), True, 'import matplotlib.pyplot as plt\n'), ((1301, 1312), 'time.time', 'time.time', ([], {}), '()\n', (1310, 1312), False, 'import time\n'), ((1343, 1364), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (1352, 1364), False, 'import h5py\n'), ((1413, 1452), 'numpy.log10', 'np.log10', (["hgp['totalMassStellar'].value"], {}), "(hgp['totalMassStellar'].value)\n", (1421, 1452), True, 'import numpy as np\n'), ((3202, 3212), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3210, 3212), True, 'import matplotlib.pyplot as plt\n'), ((3259, 3281), 'dtk.Param', 'dtk.Param', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3268, 3281), False, 'import dtk\n'), ((331, 355), 'numpy.linspace', 'np.linspace', (['(10)', '(25)', '(100)'], {}), '(10, 25, 100)\n', (342, 355), True, 'import numpy as np\n'), ((380, 406), 'numpy.linspace', 'np.linspace', (['(-25)', '(-10)', '(100)'], {}), '(-25, -10, 100)\n', (391, 406), True, 'import numpy as np\n'), ((443, 466), 'numpy.linspace', 'np.linspace', (['(-1)', '(3)', '(100)'], {}), '(-1, 3, 100)\n', (454, 466), True, 'import numpy as np\n'), ((982, 1005), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (993, 1005), True, 'import numpy as np\n'), ((584, 597), 'matplotlib.colors.LogNorm', 'clr.LogNorm', ([], {}), '()\n', (595, 597), True, 'import matplotlib.colors as clr\n'), ((862, 886), 'numpy.linspace', 'np.linspace', (['(10)', '(25)', '(100)'], {}), '(10, 25, 100)\n', (873, 886), True, 'import numpy as np\n'), ((919, 945), 'numpy.linspace', 'np.linspace', (['(-25)', '(-10)', '(100)'], {}), '(-25, -10, 100)\n', (930, 945), True, 'import numpy as np\n'), ((1148, 1161), 'matplotlib.colors.LogNorm', 'clr.LogNorm', ([], {}), '()\n', (1159, 1161), True, 'import matplotlib.colors as clr\n'), ((2048, 2077), 'numpy.linspace', 'np.linspace', (['(-0.05)', '(0.05)', '(100)'], {}), '(-0.05, 0.05, 100)\n', (2059, 2077), True, 'import numpy as np\n'), ((2152, 2181), 'numpy.linspace', 'np.linspace', (['(-0.05)', '(0.05)', '(100)'], {}), '(-0.05, 0.05, 100)\n', (2163, 2181), True, 'import numpy as np\n'), ((2255, 2278), 'numpy.linspace', 'np.linspace', (['(0)', '(90)', '(100)'], {}), '(0, 90, 100)\n', (2266, 2278), True, 'import numpy as np\n'), ((2283, 2312), 'numpy.linspace', 'np.linspace', (['(-0.06)', '(0.06)', '(100)'], {}), '(-0.06, 0.06, 100)\n', (2294, 2312), True, 'import numpy as np\n')]
|
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook glm.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # 广义线性模型
import numpy as np
import statsmodels.api as sm
from scipy import stats
from matplotlib import pyplot as plt
# ## GLM: 二项式响应数据
#
# ### 加载数据
#
# 在此示例中,我们使用 Star98 的数据集,该数据集来自 <NAME>(2000)的许可下获取的广义线性模型:统一方法。 可以通过键入以下内容获得码本信息:
print(sm.datasets.star98.NOTE)
# 加载数据,并将常数项添加到外生(独立)变量中:
data = sm.datasets.star98.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# 因变量是 N by 2 (Success: NABOVE, Failure: NBELOW):
print(data.endog[:5, :])
# 自变量包括上述所有其他变量以及交互项:
print(data.exog[:2, :])
# ### Fit 和 summary
glm_binom = sm.GLM(data.endog, data.exog, family=sm.families.Binomial())
res = glm_binom.fit()
print(res.summary())
# ### Quantities of interest
print('Total number of trials:', data.endog[0].sum())
print('Parameters: ', res.params)
print('T-values: ', res.tvalues)
# 第一个差异:我们将所有解释变量保持在其均值不变,并操纵低收入家庭的百分比来评估其对响应变量的影响:
means = data.exog.mean(axis=0)
means25 = means.copy()
means25[0] = stats.scoreatpercentile(data.exog[:, 0], 25)
means75 = means.copy()
means75[0] = lowinc_75per = stats.scoreatpercentile(data.exog[:, 0], 75)
resp_25 = res.predict(means25)
resp_75 = res.predict(means75)
diff = resp_75 - resp_25
# 学区中低收入家庭的百分比的四分位差是:
print("%2.4f%%" % (diff * 100))
# ### Plots
#
# 我们提取信息用于绘制一些有趣图:
nobs = res.nobs
y = data.endog[:, 0] / data.endog.sum(1)
yhat = res.mu
# Plot yhat vs y:
from statsmodels.graphics.api import abline_plot
fig, ax = plt.subplots()
ax.scatter(yhat, y)
line_fit = sm.OLS(y, sm.add_constant(yhat, prepend=True)).fit()
abline_plot(model_results=line_fit, ax=ax)
ax.set_title('Model Fit Plot')
ax.set_ylabel('Observed values')
ax.set_xlabel('Fitted values')
# Plot yhat vs. Pearson residuals:
fig, ax = plt.subplots()
ax.scatter(yhat, res.resid_pearson)
ax.hlines(0, 0, 1)
ax.set_xlim(0, 1)
ax.set_title('Residual Dependence Plot')
ax.set_ylabel('Pearson Residuals')
ax.set_xlabel('Fitted values')
# 标准化偏差残差直方图:
from scipy import stats
fig, ax = plt.subplots()
resid = res.resid_deviance.copy()
resid_std = stats.zscore(resid)
ax.hist(resid_std, bins=25)
ax.set_title('Histogram of standardized deviance residuals')
# 偏差残差QQ图:
from statsmodels import graphics
graphics.gofplots.qqplot(resid, line='r')
# ## GLM: 比例计数响应的 Gamma
#
# ### 加载数据
#
# 在上面的示例中,我们输出了 ``NOTE`` 属性来了解 Star98 数据集。 statsmodels 数据集附带了其他有用信息。 例如:
print(sm.datasets.scotland.DESCRLONG)
# 加载数据并给外生变量添加常数项:
data2 = sm.datasets.scotland.load()
data2.exog = sm.add_constant(data2.exog, prepend=False)
print(data2.exog[:5, :])
print(data2.endog[:5])
# ### Fit 和 summary
glm_gamma = sm.GLM(data2.endog, data2.exog, family=sm.families.Gamma())
glm_results = glm_gamma.fit()
print(glm_results.summary())
# ## GLM: 具有非规范链接的高斯分布
#
# ### 人工数据
nobs2 = 100
x = np.arange(nobs2)
np.random.seed(54321)
X = np.column_stack((x, x**2))
X = sm.add_constant(X, prepend=False)
lny = np.exp(-(.03 * x + .0001 * x**2 - 1.0)) + .001 * np.random.rand(nobs2)
# ### Fit 和 summary
gauss_log = sm.GLM(lny, X, family=sm.families.Gaussian(sm.families.links.log))
gauss_log_results = gauss_log.fit()
print(gauss_log_results.summary())
|
[
"numpy.random.seed",
"scipy.stats.zscore",
"statsmodels.api.families.Binomial",
"scipy.stats.scoreatpercentile",
"statsmodels.api.families.Gamma",
"numpy.column_stack",
"statsmodels.graphics.gofplots.qqplot",
"numpy.arange",
"numpy.exp",
"statsmodels.graphics.api.abline_plot",
"statsmodels.api.datasets.scotland.load",
"numpy.random.rand",
"statsmodels.api.add_constant",
"statsmodels.api.datasets.star98.load",
"statsmodels.api.families.Gaussian",
"matplotlib.pyplot.subplots"
] |
[((474, 499), 'statsmodels.api.datasets.star98.load', 'sm.datasets.star98.load', ([], {}), '()\n', (497, 499), True, 'import statsmodels.api as sm\n'), ((512, 553), 'statsmodels.api.add_constant', 'sm.add_constant', (['data.exog'], {'prepend': '(False)'}), '(data.exog, prepend=False)\n', (527, 553), True, 'import statsmodels.api as sm\n'), ((1092, 1136), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['data.exog[:, 0]', '(25)'], {}), '(data.exog[:, 0], 25)\n', (1115, 1136), False, 'from scipy import stats\n'), ((1188, 1232), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['data.exog[:, 0]', '(75)'], {}), '(data.exog[:, 0], 75)\n', (1211, 1232), False, 'from scipy import stats\n'), ((1562, 1576), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1574, 1576), True, 'from matplotlib import pyplot as plt\n'), ((1661, 1703), 'statsmodels.graphics.api.abline_plot', 'abline_plot', ([], {'model_results': 'line_fit', 'ax': 'ax'}), '(model_results=line_fit, ax=ax)\n', (1672, 1703), False, 'from statsmodels.graphics.api import abline_plot\n'), ((1847, 1861), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1859, 1861), True, 'from matplotlib import pyplot as plt\n'), ((2094, 2108), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2106, 2108), True, 'from matplotlib import pyplot as plt\n'), ((2156, 2175), 'scipy.stats.zscore', 'stats.zscore', (['resid'], {}), '(resid)\n', (2168, 2175), False, 'from scipy import stats\n'), ((2311, 2352), 'statsmodels.graphics.gofplots.qqplot', 'graphics.gofplots.qqplot', (['resid'], {'line': '"""r"""'}), "(resid, line='r')\n", (2335, 2352), False, 'from statsmodels import graphics\n'), ((2535, 2562), 'statsmodels.api.datasets.scotland.load', 'sm.datasets.scotland.load', ([], {}), '()\n', (2560, 2562), True, 'import statsmodels.api as sm\n'), ((2576, 2618), 'statsmodels.api.add_constant', 'sm.add_constant', (['data2.exog'], {'prepend': '(False)'}), '(data2.exog, prepend=False)\n', (2591, 2618), True, 'import statsmodels.api as sm\n'), ((2874, 2890), 'numpy.arange', 'np.arange', (['nobs2'], {}), '(nobs2)\n', (2883, 2890), True, 'import numpy as np\n'), ((2891, 2912), 'numpy.random.seed', 'np.random.seed', (['(54321)'], {}), '(54321)\n', (2905, 2912), True, 'import numpy as np\n'), ((2917, 2945), 'numpy.column_stack', 'np.column_stack', (['(x, x ** 2)'], {}), '((x, x ** 2))\n', (2932, 2945), True, 'import numpy as np\n'), ((2948, 2981), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {'prepend': '(False)'}), '(X, prepend=False)\n', (2963, 2981), True, 'import statsmodels.api as sm\n'), ((2988, 3031), 'numpy.exp', 'np.exp', (['(-(0.03 * x + 0.0001 * x ** 2 - 1.0))'], {}), '(-(0.03 * x + 0.0001 * x ** 2 - 1.0))\n', (2994, 3031), True, 'import numpy as np\n'), ((752, 774), 'statsmodels.api.families.Binomial', 'sm.families.Binomial', ([], {}), '()\n', (772, 774), True, 'import statsmodels.api as sm\n'), ((2740, 2759), 'statsmodels.api.families.Gamma', 'sm.families.Gamma', ([], {}), '()\n', (2757, 2759), True, 'import statsmodels.api as sm\n'), ((3037, 3058), 'numpy.random.rand', 'np.random.rand', (['nobs2'], {}), '(nobs2)\n', (3051, 3058), True, 'import numpy as np\n'), ((3115, 3158), 'statsmodels.api.families.Gaussian', 'sm.families.Gaussian', (['sm.families.links.log'], {}), '(sm.families.links.log)\n', (3135, 3158), True, 'import statsmodels.api as sm\n'), ((1618, 1653), 'statsmodels.api.add_constant', 'sm.add_constant', (['yhat'], {'prepend': '(True)'}), '(yhat, prepend=True)\n', (1633, 1653), True, 'import statsmodels.api as sm\n')]
|
import sapien.core as sapien
import mplib
import numpy as np
from sapien.utils.viewer import Viewer
class PlanningDemo():
def __init__(self):
self.engine = sapien.Engine()
self.renderer = sapien.VulkanRenderer()
self.engine.set_renderer(self.renderer)
scene_config = sapien.SceneConfig()
self.scene = self.engine.create_scene(scene_config)
self.scene.set_timestep(1 / 240.0)
self.scene.add_ground(-0.8)
physical_material = self.scene.create_physical_material(1, 1, 0.0)
self.scene.default_physical_material = physical_material
self.rscene = self.scene.get_renderer_scene()
self.rscene.set_ambient_light([0.5, 0.5, 0.5])
self.rscene.add_directional_light([0, 1, -1], [0.5, 0.5, 0.5], shadow=True)
self.rscene.add_point_light([1, 2, 2], [1, 1, 1], shadow=True)
self.rscene.add_point_light([1, -2, 2], [1, 1, 1], shadow=True)
self.rscene.add_point_light([-1, 0, 1], [1, 1, 1], shadow=True)
self.viewer = Viewer(self.renderer)
self.viewer.set_scene(self.scene)
self.viewer.set_camera_xyz(x=1.2, y=0.25, z=0.4)
self.viewer.set_camera_rpy(r=0, p=-0.4, y=2.7)
# Robot
# Load URDF
loader: sapien.URDFLoader = self.scene.create_urdf_loader()
loader.fix_root_link = True
self.robot: sapien.Articulation = loader.load("./panda/panda.urdf")
self.robot.set_root_pose(sapien.Pose([0, 0, 0], [1, 0, 0, 0]))
# Set initial joint positions
init_qpos = [0, 0.19634954084936207, 0.0, -2.617993877991494, 0.0, 2.941592653589793, 0.7853981633974483, 0, 0]
self.robot.set_qpos(init_qpos)
self.active_joints = self.robot.get_active_joints()
for joint in self.active_joints:
joint.set_drive_property(stiffness=1000, damping=200)
# table top
builder = self.scene.create_actor_builder()
builder.add_box_collision(half_size=[0.4, 0.4, 0.025])
builder.add_box_visual(half_size=[0.4, 0.4, 0.025])
self.table = builder.build_kinematic(name='table')
self.table.set_pose(sapien.Pose([0.56, 0, - 0.025]))
# boxes
builder = self.scene.create_actor_builder()
builder.add_box_collision(half_size=[0.02, 0.02, 0.06])
builder.add_box_visual(half_size=[0.02, 0.02, 0.06], color=[1, 0, 0])
self.red_cube = builder.build(name='red_cube')
self.red_cube.set_pose(sapien.Pose([0.7, 0, 0.06]))
builder = self.scene.create_actor_builder()
builder.add_box_collision(half_size=[0.04, 0.04, 0.005])
builder.add_box_visual(half_size=[0.04, 0.04, 0.005], color=[0, 1, 0])
self.green_cube = builder.build(name='green_cube')
self.green_cube.set_pose(sapien.Pose([0.4, 0.3, 0.005]))
builder = self.scene.create_actor_builder()
builder.add_box_collision(half_size=[0.05, 0.2, 0.1])
builder.add_box_visual(half_size=[0.05, 0.2, 0.1], color=[0, 0, 1])
self.blue_cube = builder.build(name='blue_cube')
self.blue_cube.set_pose(sapien.Pose([0.55, 0, 0.1]))
self.setup_planner()
def setup_planner(self):
link_names = [link.get_name() for link in self.robot.get_links()]
joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
self.planner = mplib.Planner(
urdf="./panda/panda.urdf",
srdf="./panda/panda.srdf",
user_link_names=link_names,
user_joint_names=joint_names,
move_group="panda_hand",
joint_vel_limits=np.ones(7),
joint_acc_limits=np.ones(7))
def follow_path(self, result):
n_step = result['position'].shape[0]
for i in range(n_step):
qf = self.robot.compute_passive_force(
gravity=True,
coriolis_and_centrifugal=True)
self.robot.set_qf(qf)
for j in range(7):
self.active_joints[j].set_drive_target(result['position'][i][j])
self.active_joints[j].set_drive_velocity_target(result['velocity'][i][j])
self.scene.step()
if i % 4 == 0:
self.scene.update_render()
self.viewer.render()
def open_gripper(self):
for joint in self.active_joints[-2:]:
joint.set_drive_target(0.4)
for i in range(100):
qf = self.robot.compute_passive_force(
gravity=True,
coriolis_and_centrifugal=True)
self.robot.set_qf(qf)
self.scene.step()
if i % 4 == 0:
self.scene.update_render()
self.viewer.render()
def close_gripper(self):
for joint in self.active_joints[-2:]:
joint.set_drive_target(0)
for i in range(100):
qf = self.robot.compute_passive_force(
gravity=True,
coriolis_and_centrifugal=True)
self.robot.set_qf(qf)
self.scene.step()
if i % 4 == 0:
self.scene.update_render()
self.viewer.render()
def add_point_cloud(self):
import trimesh
box = trimesh.creation.box([0.1, 0.4, 0.2])
points, _ = trimesh.sample.sample_surface(box, 1000)
points += [0.55, 0, 0.1]
self.planner.update_point_cloud(points)
return
def move_to_pose(self, pose):
result = self.planner.plan_screw(pose, self.robot.get_qpos(), time_step=1/250,
use_point_cloud=self.use_point_cloud, use_attach=self.use_attach)
if result['status'] != "Success":
result = self.planner.plan(pose, self.robot.get_qpos(), time_step=1/250,
use_point_cloud=self.use_point_cloud, use_attach=self.use_attach)
if result['status'] != "Success":
print(result['status'])
return -1
self.follow_path(result)
return 0
def demo(self, with_screw = True, use_point_cloud = True, use_attach = True):
pickup_pose = [0.7, 0, 0.12, 0, 1, 0, 0]
delivery_pose = [0.4, 0.3, 0.13, 0, 1, 0, 0]
self.use_point_cloud = use_point_cloud
if self.use_point_cloud:
self.add_point_cloud()
self.use_attach = False
pickup_pose[2] += 0.2
self.move_to_pose(pickup_pose)
self.open_gripper()
pickup_pose[2] -= 0.12
self.move_to_pose(pickup_pose)
self.close_gripper()
if use_attach:
self.use_attach = True
self.planner.update_attached_box([0.04, 0.04, 0.12], [0, 0, 0.14, 1, 0, 0, 0])
pickup_pose[2] += 0.12
self.move_to_pose(pickup_pose)
delivery_pose[2] += 0.2
self.move_to_pose(delivery_pose)
delivery_pose[2] -= 0.12
self.move_to_pose(delivery_pose)
self.open_gripper()
delivery_pose[2] += 0.12
self.move_to_pose(delivery_pose)
if __name__ == '__main__':
demo = PlanningDemo()
demo.demo()
|
[
"sapien.core.SceneConfig",
"trimesh.sample.sample_surface",
"numpy.ones",
"sapien.core.Pose",
"trimesh.creation.box",
"sapien.utils.viewer.Viewer",
"sapien.core.Engine",
"sapien.core.VulkanRenderer"
] |
[((169, 184), 'sapien.core.Engine', 'sapien.Engine', ([], {}), '()\n', (182, 184), True, 'import sapien.core as sapien\n'), ((209, 232), 'sapien.core.VulkanRenderer', 'sapien.VulkanRenderer', ([], {}), '()\n', (230, 232), True, 'import sapien.core as sapien\n'), ((305, 325), 'sapien.core.SceneConfig', 'sapien.SceneConfig', ([], {}), '()\n', (323, 325), True, 'import sapien.core as sapien\n'), ((1037, 1058), 'sapien.utils.viewer.Viewer', 'Viewer', (['self.renderer'], {}), '(self.renderer)\n', (1043, 1058), False, 'from sapien.utils.viewer import Viewer\n'), ((5263, 5300), 'trimesh.creation.box', 'trimesh.creation.box', (['[0.1, 0.4, 0.2]'], {}), '([0.1, 0.4, 0.2])\n', (5283, 5300), False, 'import trimesh\n'), ((5321, 5361), 'trimesh.sample.sample_surface', 'trimesh.sample.sample_surface', (['box', '(1000)'], {}), '(box, 1000)\n', (5350, 5361), False, 'import trimesh\n'), ((1463, 1499), 'sapien.core.Pose', 'sapien.Pose', (['[0, 0, 0]', '[1, 0, 0, 0]'], {}), '([0, 0, 0], [1, 0, 0, 0])\n', (1474, 1499), True, 'import sapien.core as sapien\n'), ((2150, 2180), 'sapien.core.Pose', 'sapien.Pose', (['[0.56, 0, -0.025]'], {}), '([0.56, 0, -0.025])\n', (2161, 2180), True, 'import sapien.core as sapien\n'), ((2480, 2507), 'sapien.core.Pose', 'sapien.Pose', (['[0.7, 0, 0.06]'], {}), '([0.7, 0, 0.06])\n', (2491, 2507), True, 'import sapien.core as sapien\n'), ((2798, 2828), 'sapien.core.Pose', 'sapien.Pose', (['[0.4, 0.3, 0.005]'], {}), '([0.4, 0.3, 0.005])\n', (2809, 2828), True, 'import sapien.core as sapien\n'), ((3110, 3137), 'sapien.core.Pose', 'sapien.Pose', (['[0.55, 0, 0.1]'], {}), '([0.55, 0, 0.1])\n', (3121, 3137), True, 'import sapien.core as sapien\n'), ((3626, 3636), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (3633, 3636), True, 'import numpy as np\n'), ((3667, 3677), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (3674, 3677), True, 'import numpy as np\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
""" Generating random graphs"""
from cyberbattle.simulation.model import Identifiers, NodeID, CredentialID, PortName, FirewallConfiguration, FirewallRule, RulePermission
import numpy as np
import networkx as nx
from cyberbattle.simulation import model as m
import random
from typing import List, Optional, Tuple, DefaultDict
from collections import defaultdict
ENV_IDENTIFIERS = Identifiers(
properties=[
'breach_node'
],
ports=['SMB', 'HTTP', 'RDP'],
local_vulnerabilities=[
'ScanWindowsCredentialManagerForRDP',
'ScanWindowsExplorerRecentFiles',
'ScanWindowsCredentialManagerForSMB'
],
remote_vulnerabilities=[
'Traceroute'
]
)
def generate_random_traffic_network(
n_clients: int = 200,
n_servers={
"SMB": 1,
"HTTP": 1,
"RDP": 1,
},
seed: Optional[int] = 0,
tolerance: np.float32 = np.float32(1e-3),
alpha=np.array([(0.1, 0.3), (0.18, 0.09)], dtype=float),
beta=np.array([(100, 10), (10, 100)], dtype=float),
) -> nx.DiGraph:
"""
Randomly generate a directed multi-edge network graph representing
fictitious SMB, HTTP, and RDP traffic.
Arguments:
n_clients: number of workstation nodes that can initiate sessions with server nodes
n_servers: dictionary indicatin the numbers of each nodes listening to each protocol
seed: seed for the psuedo-random number generator
tolerance: absolute tolerance for bounding the edge probabilities in [tolerance, 1-tolerance]
alpha: beta distribution parameters alpha such that E(edge prob) = alpha / beta
beta: beta distribution parameters beta such that E(edge prob) = alpha / beta
Returns:
(nx.classes.multidigraph.MultiDiGraph): the randomly generated network from the hierarchical block model
"""
edges_labels = defaultdict(set) # set backed multidict
for protocol in list(n_servers.keys()):
sizes = [n_clients, n_servers[protocol]]
# sample edge probabilities from a beta distribution
np.random.seed(seed)
probs: np.ndarray = np.random.beta(a=alpha, b=beta, size=(2, 2))
# scale by edge type
if protocol == "SMB":
probs = 3 * probs
if protocol == "RDP":
probs = 4 * probs
# don't allow probs too close to zero or one
probs = np.clip(probs, a_min=tolerance, a_max=np.float32(1.0 - tolerance))
# sample edges using block models given edge probabilities
di_graph_for_protocol = nx.stochastic_block_model(
sizes=sizes, p=probs, directed=True, seed=seed)
for edge in di_graph_for_protocol.edges:
edges_labels[edge].add(protocol)
digraph = nx.DiGraph()
for (u, v), port in list(edges_labels.items()):
digraph.add_edge(u, v, protocol=port)
return digraph
def cyberbattle_model_from_traffic_graph(
traffic_graph: nx.DiGraph,
cached_smb_password_probability=0.75,
cached_rdp_password_probability=0.8,
cached_accessed_network_shares_probability=0.6,
cached_password_has_changed_probability=0.1,
traceroute_discovery_probability=0.5,
probability_two_nodes_use_same_password_to_access_given_resource=0.8
) -> nx.DiGraph:
"""Generate a random CyberBattle network model from a specified traffic (directed multi) graph.
The input graph can for instance be generated with `generate_random_traffic_network`.
Each edge of the input graph indicates that a communication took place
between the two nodes with the protocol specified in the edge label.
Returns a CyberBattle network with the same nodes and implanted vulnerabilities
to be used to instantiate a CyverBattleSim gym.
Arguments:
cached_smb_password_probability, cached_rdp_password_probability:
probability that a password used for authenticated traffic was cached by the OS for SMB and RDP
cached_accessed_network_shares_probability:
probability that a network share accessed by the system was cached by the OS
cached_password_has_changed_probability:
probability that a given password cached on a node has been rotated on the target node
(typically low has people tend to change their password infrequently)
probability_two_nodes_use_same_password_to_access_given_resource:
as the variable name says
traceroute_discovery_probability:
probability that a target node of an SMB/RDP connection get exposed by a traceroute attack
"""
# convert node IDs to string
graph = nx.relabel_nodes(traffic_graph, {i: str(i) for i in traffic_graph.nodes})
password_counter: int = 0
def generate_password() -> CredentialID:
nonlocal password_counter
password_counter = password_counter + 1
return f'unique_pwd{password_counter}'
def traffic_targets(source_node: NodeID, protocol: str) -> List[NodeID]:
neighbors = [t for (s, t) in graph.edges()
if s == source_node and protocol in graph.edges[(s, t)]['protocol']]
return neighbors
# Map (node, port name) -> assigned pwd
assigned_passwords: DefaultDict[Tuple[NodeID, PortName],
List[CredentialID]] = defaultdict(list)
def assign_new_valid_password(node: NodeID, port: PortName) -> CredentialID:
pwd = generate_password()
assigned_passwords[node, port].append(pwd)
return pwd
def reuse_valid_password(node: NodeID, port: PortName) -> CredentialID:
"""Reuse a password already assigned to that node an port, if none is already
assigned create and assign a new valid password"""
if (node, port) not in assigned_passwords:
return assign_new_valid_password(node, port)
# reuse any of the existing assigne valid password for that node/port
return random.choice(assigned_passwords[node, port])
def create_cached_credential(node: NodeID, port: PortName) -> CredentialID:
if random.random() < cached_password_has_changed_probability:
# generate a new invalid password
return generate_password()
else:
if random.random() < probability_two_nodes_use_same_password_to_access_given_resource:
return reuse_valid_password(node, port)
else:
return assign_new_valid_password(node, port)
def add_leak_neighbors_vulnerability(
node_id: m.NodeID,
library: m.VulnerabilityLibrary = {}) -> m.VulnerabilityLibrary:
"""Create random vulnerabilities
that reveals immediate traffic neighbors from a given node"""
rdp_neighbors = traffic_targets(node_id, 'RDP')
if len(rdp_neighbors) > 0:
library['ScanWindowsCredentialManagerForRDP'] = m.VulnerabilityInfo(
description="Look for RDP credentials in the Windows Credential Manager",
type=m.VulnerabilityType.LOCAL,
outcome=m.LeakedCredentials(credentials=[
m.CachedCredential(node=target_node, port='RDP',
credential=create_cached_credential(target_node, 'RDP'))
for target_node in rdp_neighbors
if random.random() < cached_rdp_password_probability
]),
reward_string="Discovered creds in the Windows Credential Manager",
cost=2.0
)
smb_neighbors = traffic_targets(node_id, 'SMB')
if len(smb_neighbors) > 0:
library['ScanWindowsExplorerRecentFiles'] = m.VulnerabilityInfo(
description="Look for network shares in the Windows Explorer Recent files",
type=m.VulnerabilityType.LOCAL,
outcome=m.LeakedNodesId(
[target_node
for target_node in smb_neighbors
if random.random() < cached_accessed_network_shares_probability
]
),
reward_string="Windows Explorer Recent Files revealed network shares",
cost=1.0
)
library['ScanWindowsCredentialManagerForSMB'] = m.VulnerabilityInfo(
description="Look for network credentials in the Windows Credential Manager",
type=m.VulnerabilityType.LOCAL,
outcome=m.LeakedCredentials(credentials=[
m.CachedCredential(node=target_node, port='SMB',
credential=create_cached_credential(target_node, 'SMB'))
for target_node in smb_neighbors
if random.random() < cached_smb_password_probability
]),
reward_string="Discovered SMB creds in the Windows Credential Manager",
cost=2.0
)
if len(smb_neighbors) > 0 and len(rdp_neighbors) > 0:
library['Traceroute'] = m.VulnerabilityInfo(
description="Attempt to discvover network nodes using Traceroute",
type=m.VulnerabilityType.REMOTE,
outcome=m.LeakedNodesId(
[target_node
for target_node in smb_neighbors or rdp_neighbors
if random.random() < traceroute_discovery_probability
]
),
reward_string="Discovered new network nodes via traceroute",
cost=5.0
)
return library
def create_vulnerabilities_from_traffic_data(node_id: m.NodeID):
return add_leak_neighbors_vulnerability(node_id=node_id)
firewall_conf = FirewallConfiguration(
[FirewallRule("RDP", RulePermission.ALLOW), FirewallRule("SMB", RulePermission.ALLOW)],
[FirewallRule("RDP", RulePermission.ALLOW), FirewallRule("SMB", RulePermission.ALLOW)])
# Pick a random node as the agent entry node
entry_node_index = random.randrange(len(graph.nodes))
entry_node_id, entry_node_data = list(graph.nodes(data=True))[entry_node_index]
graph.nodes[entry_node_id].clear()
graph.nodes[entry_node_id].update(
{'data': m.NodeInfo(services=[],
value=0,
properties=["breach_node"],
vulnerabilities=create_vulnerabilities_from_traffic_data(entry_node_id),
agent_installed=True,
firewall=firewall_conf,
reimagable=False)})
def create_node_data(node_id: m.NodeID):
return m.NodeInfo(
services=[m.ListeningService(name=port, allowedCredentials=assigned_passwords[(target_node, port)])
for (target_node, port) in assigned_passwords.keys()
if target_node == node_id
],
value=random.randint(0, 100),
vulnerabilities=create_vulnerabilities_from_traffic_data(node_id),
agent_installed=False,
firewall=firewall_conf
)
for node in list(graph.nodes):
if node != entry_node_id:
graph.nodes[node].clear()
graph.nodes[node].update({'data': create_node_data(node)})
return graph
def new_environment(n_servers_per_protocol: int):
"""Create a new simulation environment based on
a randomly generated network topology.
NOTE: the probabilities and parameter values used
here for the statistical generative model
were arbirarily picked. We recommend exploring different values for those parameters.
"""
traffic = generate_random_traffic_network(seed=None,
n_clients=50,
n_servers={
"SMB": n_servers_per_protocol,
"HTTP": n_servers_per_protocol,
"RDP": n_servers_per_protocol,
},
alpha=np.array([(1, 1), (0.2, 0.5)]),
beta=np.array([(1000, 10), (10, 100)]))
network = cyberbattle_model_from_traffic_graph(
traffic,
cached_rdp_password_probability=0.8,
cached_smb_password_probability=0.7,
cached_accessed_network_shares_probability=0.8,
cached_password_has_changed_probability=0.01,
probability_two_nodes_use_same_password_to_access_given_resource=0.9)
return m.Environment(network=network,
vulnerability_library=dict([]),
identifiers=ENV_IDENTIFIERS)
|
[
"cyberbattle.simulation.model.ListeningService",
"numpy.random.seed",
"random.randint",
"numpy.random.beta",
"numpy.float32",
"random.choice",
"cyberbattle.simulation.model.Identifiers",
"cyberbattle.simulation.model.FirewallRule",
"collections.defaultdict",
"networkx.stochastic_block_model",
"random.random",
"numpy.array",
"networkx.DiGraph"
] |
[((455, 709), 'cyberbattle.simulation.model.Identifiers', 'Identifiers', ([], {'properties': "['breach_node']", 'ports': "['SMB', 'HTTP', 'RDP']", 'local_vulnerabilities': "['ScanWindowsCredentialManagerForRDP', 'ScanWindowsExplorerRecentFiles',\n 'ScanWindowsCredentialManagerForSMB']", 'remote_vulnerabilities': "['Traceroute']"}), "(properties=['breach_node'], ports=['SMB', 'HTTP', 'RDP'],\n local_vulnerabilities=['ScanWindowsCredentialManagerForRDP',\n 'ScanWindowsExplorerRecentFiles', 'ScanWindowsCredentialManagerForSMB'],\n remote_vulnerabilities=['Traceroute'])\n", (466, 709), False, 'from cyberbattle.simulation.model import Identifiers, NodeID, CredentialID, PortName, FirewallConfiguration, FirewallRule, RulePermission\n'), ((974, 991), 'numpy.float32', 'np.float32', (['(0.001)'], {}), '(0.001)\n', (984, 991), True, 'import numpy as np\n'), ((1002, 1051), 'numpy.array', 'np.array', (['[(0.1, 0.3), (0.18, 0.09)]'], {'dtype': 'float'}), '([(0.1, 0.3), (0.18, 0.09)], dtype=float)\n', (1010, 1051), True, 'import numpy as np\n'), ((1062, 1107), 'numpy.array', 'np.array', (['[(100, 10), (10, 100)]'], {'dtype': 'float'}), '([(100, 10), (10, 100)], dtype=float)\n', (1070, 1107), True, 'import numpy as np\n'), ((1937, 1953), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1948, 1953), False, 'from collections import defaultdict\n'), ((2819, 2831), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (2829, 2831), True, 'import networkx as nx\n'), ((5340, 5357), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5351, 5357), False, 'from collections import defaultdict\n'), ((2141, 2161), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2155, 2161), True, 'import numpy as np\n'), ((2190, 2234), 'numpy.random.beta', 'np.random.beta', ([], {'a': 'alpha', 'b': 'beta', 'size': '(2, 2)'}), '(a=alpha, b=beta, size=(2, 2))\n', (2204, 2234), True, 'import numpy as np\n'), ((2622, 2695), 'networkx.stochastic_block_model', 'nx.stochastic_block_model', ([], {'sizes': 'sizes', 'p': 'probs', 'directed': '(True)', 'seed': 'seed'}), '(sizes=sizes, p=probs, directed=True, seed=seed)\n', (2647, 2695), True, 'import networkx as nx\n'), ((5969, 6014), 'random.choice', 'random.choice', (['assigned_passwords[node, port]'], {}), '(assigned_passwords[node, port])\n', (5982, 6014), False, 'import random\n'), ((6107, 6122), 'random.random', 'random.random', ([], {}), '()\n', (6120, 6122), False, 'import random\n'), ((9818, 9859), 'cyberbattle.simulation.model.FirewallRule', 'FirewallRule', (['"""RDP"""', 'RulePermission.ALLOW'], {}), "('RDP', RulePermission.ALLOW)\n", (9830, 9859), False, 'from cyberbattle.simulation.model import Identifiers, NodeID, CredentialID, PortName, FirewallConfiguration, FirewallRule, RulePermission\n'), ((9861, 9902), 'cyberbattle.simulation.model.FirewallRule', 'FirewallRule', (['"""SMB"""', 'RulePermission.ALLOW'], {}), "('SMB', RulePermission.ALLOW)\n", (9873, 9902), False, 'from cyberbattle.simulation.model import Identifiers, NodeID, CredentialID, PortName, FirewallConfiguration, FirewallRule, RulePermission\n'), ((9914, 9955), 'cyberbattle.simulation.model.FirewallRule', 'FirewallRule', (['"""RDP"""', 'RulePermission.ALLOW'], {}), "('RDP', RulePermission.ALLOW)\n", (9926, 9955), False, 'from cyberbattle.simulation.model import Identifiers, NodeID, CredentialID, PortName, FirewallConfiguration, FirewallRule, RulePermission\n'), ((9957, 9998), 'cyberbattle.simulation.model.FirewallRule', 'FirewallRule', (['"""SMB"""', 'RulePermission.ALLOW'], {}), "('SMB', RulePermission.ALLOW)\n", (9969, 9998), False, 'from cyberbattle.simulation.model import Identifiers, NodeID, CredentialID, PortName, FirewallConfiguration, FirewallRule, RulePermission\n'), ((12253, 12283), 'numpy.array', 'np.array', (['[(1, 1), (0.2, 0.5)]'], {}), '([(1, 1), (0.2, 0.5)])\n', (12261, 12283), True, 'import numpy as np\n'), ((12336, 12369), 'numpy.array', 'np.array', (['[(1000, 10), (10, 100)]'], {}), '([(1000, 10), (10, 100)])\n', (12344, 12369), True, 'import numpy as np\n'), ((2493, 2520), 'numpy.float32', 'np.float32', (['(1.0 - tolerance)'], {}), '(1.0 - tolerance)\n', (2503, 2520), True, 'import numpy as np\n'), ((6280, 6295), 'random.random', 'random.random', ([], {}), '()\n', (6293, 6295), False, 'import random\n'), ((11007, 11029), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (11021, 11029), False, 'import random\n'), ((10751, 10843), 'cyberbattle.simulation.model.ListeningService', 'm.ListeningService', ([], {'name': 'port', 'allowedCredentials': 'assigned_passwords[target_node, port]'}), '(name=port, allowedCredentials=assigned_passwords[\n target_node, port])\n', (10769, 10843), True, 'from cyberbattle.simulation import model as m\n'), ((8027, 8042), 'random.random', 'random.random', ([], {}), '()\n', (8040, 8042), False, 'import random\n'), ((9397, 9412), 'random.random', 'random.random', ([], {}), '()\n', (9410, 9412), False, 'import random\n'), ((7372, 7387), 'random.random', 'random.random', ([], {}), '()\n', (7385, 7387), False, 'import random\n'), ((8779, 8794), 'random.random', 'random.random', ([], {}), '()\n', (8792, 8794), False, 'import random\n')]
|
"""The :func:`deephyper.nas.run.horovod.run` function is used to evaluate a deep neural network by enabling data-parallelism with Horovod to the :func:`deephyper.nas.run.alpha.run` function. This function will automatically apply the linear scaling rule to the learning rate and batch size given the current number of ranks (i.e., the initial learning rate and batch size are scaled by the number of ranks).
"""
import os
import traceback
import logging
import numpy as np
import tensorflow as tf
from deephyper.contrib.callbacks import import_callback
import horovod.tensorflow.keras as hvd
import deephyper.nas.trainer._arch as a
from deephyper.nas.trainer import HorovodTrainer
from deephyper.nas.run._util import (
compute_objective,
load_config,
preproc_trainer,
save_history,
setup_data,
get_search_space,
)
logger = logging.getLogger(__name__)
# Default callbacks parameters
default_callbacks_config = {
"EarlyStopping": dict(
monitor="val_loss", min_delta=0, mode="min", verbose=0, patience=0
),
"ModelCheckpoint": dict(
monitor="val_loss",
mode="min",
save_best_only=True,
verbose=1,
filepath="model.h5",
save_weights_only=False,
),
"TensorBoard": dict(
log_dir="",
histogram_freq=0,
batch_size=32,
write_graph=False,
write_grads=False,
write_images=False,
update_freq="epoch",
),
"CSVLogger": dict(filename="training.csv", append=True),
"CSVExtendedLogger": dict(filename="training.csv", append=True),
"TimeStopping": dict(),
"ReduceLROnPlateau": dict(patience=5, verbose=0),
}
# Name of Callbacks reserved for root node
hvd_root_cb = ["ModelCheckpoint", "Tensorboard", "CSVLogger", "CSVExtendedLogger"]
def run_horovod(config: dict) -> float:
hvd.init()
# Threading configuration
if os.environ.get("OMP_NUM_THREADS", None) is not None:
logger.debug(f"OMP_NUM_THREADS is {os.environ.get('OMP_NUM_THREADS')}")
num_intra = int(os.environ.get("OMP_NUM_THREADS"))
tf.config.threading.set_intra_op_parallelism_threads(num_intra)
tf.config.threading.set_inter_op_parallelism_threads(2)
if os.environ.get("CUDA_VISIBLE_DEVICES") is not None:
devices = os.environ.get("CUDA_VISIBLE_DEVICES").split(",")
os.environ["CUDA_VISIBLE_DEVICES"] = devices[hvd.rank()]
config["seed"]
seed = config["seed"]
if seed is not None:
np.random.seed(seed)
tf.random.set_seed(seed)
load_config(config)
# Scale batch size and learning rate according to the number of ranks
initial_lr = config[a.hyperparameters][a.learning_rate]
batch_size = config[a.hyperparameters][a.batch_size] * hvd.size()
learning_rate = config[a.hyperparameters][a.learning_rate] * hvd.size()
logger.info(
f"Scaled: 'batch_size' from {config[a.hyperparameters][a.batch_size]} to {batch_size} "
)
logger.info(
f"Scaled: 'learning_rate' from {config[a.hyperparameters][a.learning_rate]} to {learning_rate} "
)
config[a.hyperparameters][a.batch_size] = batch_size
config[a.hyperparameters][a.learning_rate] = learning_rate
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
# Initialize Horovod
model_created = False
try:
model = search_space.sample(config["arch_seq"])
model_created = True
except:
logger.info("Error: Model creation failed...")
logger.info(traceback.format_exc())
if model_created:
# Setup callbacks only
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.
#! initial_lr argument is not available in horovod==0.19.0
hvd.callbacks.LearningRateWarmupCallback(
warmup_epochs=5, verbose=0, initial_lr=initial_lr
),
]
cb_requires_valid = False # Callbacks requires validation data
callbacks_config = config[a.hyperparameters].get(a.callbacks, {})
if callbacks_config is not None:
for cb_name, cb_conf in callbacks_config.items():
if cb_name in default_callbacks_config:
# cb_bame in hvd_root_cb implies hvd.rank() == 0
if not (cb_name in hvd_root_cb) or hvd.rank() == 0:
default_callbacks_config[cb_name].update(cb_conf)
# Import and create corresponding callback
Callback = import_callback(cb_name)
callbacks.append(Callback(**default_callbacks_config[cb_name]))
if cb_name in ["EarlyStopping"]:
cb_requires_valid = "val" in cb_conf["monitor"].split("_")
else:
logger.error(f"'{cb_name}' is not an accepted callback!")
trainer = HorovodTrainer(config=config, model=model)
trainer.callbacks.extend(callbacks)
last_only, with_pred = preproc_trainer(config)
last_only = last_only and not cb_requires_valid
history = trainer.train(with_pred=with_pred, last_only=last_only)
# save history
if hvd.rank() == 0:
save_history(config.get("log_dir", None), history, config)
result = compute_objective(config["objective"], history)
else:
# penalising actions if model cannot be created
result = -1
if result < -10:
result = -10
return result
|
[
"tensorflow.random.set_seed",
"numpy.random.seed",
"tensorflow.config.threading.set_intra_op_parallelism_threads",
"horovod.tensorflow.keras.callbacks.MetricAverageCallback",
"deephyper.nas.run._util.get_search_space",
"horovod.tensorflow.keras.callbacks.LearningRateWarmupCallback",
"deephyper.nas.run._util.preproc_trainer",
"traceback.format_exc",
"horovod.tensorflow.keras.callbacks.BroadcastGlobalVariablesCallback",
"tensorflow.config.threading.set_inter_op_parallelism_threads",
"deephyper.nas.trainer.HorovodTrainer",
"horovod.tensorflow.keras.rank",
"horovod.tensorflow.keras.init",
"deephyper.contrib.callbacks.import_callback",
"horovod.tensorflow.keras.size",
"deephyper.nas.run._util.compute_objective",
"os.environ.get",
"deephyper.nas.run._util.setup_data",
"logging.getLogger",
"deephyper.nas.run._util.load_config"
] |
[((851, 878), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (868, 878), False, 'import logging\n'), ((1841, 1851), 'horovod.tensorflow.keras.init', 'hvd.init', ([], {}), '()\n', (1849, 1851), True, 'import horovod.tensorflow.keras as hvd\n'), ((2549, 2568), 'deephyper.nas.run._util.load_config', 'load_config', (['config'], {}), '(config)\n', (2560, 2568), False, 'from deephyper.nas.run._util import compute_objective, load_config, preproc_trainer, save_history, setup_data, get_search_space\n'), ((3250, 3268), 'deephyper.nas.run._util.setup_data', 'setup_data', (['config'], {}), '(config)\n', (3260, 3268), False, 'from deephyper.nas.run._util import compute_objective, load_config, preproc_trainer, save_history, setup_data, get_search_space\n'), ((3289, 3351), 'deephyper.nas.run._util.get_search_space', 'get_search_space', (['config', 'input_shape', 'output_shape'], {'seed': 'seed'}), '(config, input_shape, output_shape, seed=seed)\n', (3305, 3351), False, 'from deephyper.nas.run._util import compute_objective, load_config, preproc_trainer, save_history, setup_data, get_search_space\n'), ((1890, 1929), 'os.environ.get', 'os.environ.get', (['"""OMP_NUM_THREADS"""', 'None'], {}), "('OMP_NUM_THREADS', None)\n", (1904, 1929), False, 'import os\n'), ((2090, 2153), 'tensorflow.config.threading.set_intra_op_parallelism_threads', 'tf.config.threading.set_intra_op_parallelism_threads', (['num_intra'], {}), '(num_intra)\n', (2142, 2153), True, 'import tensorflow as tf\n'), ((2162, 2217), 'tensorflow.config.threading.set_inter_op_parallelism_threads', 'tf.config.threading.set_inter_op_parallelism_threads', (['(2)'], {}), '(2)\n', (2214, 2217), True, 'import tensorflow as tf\n'), ((2226, 2264), 'os.environ.get', 'os.environ.get', (['"""CUDA_VISIBLE_DEVICES"""'], {}), "('CUDA_VISIBLE_DEVICES')\n", (2240, 2264), False, 'import os\n'), ((2490, 2510), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2504, 2510), True, 'import numpy as np\n'), ((2519, 2543), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (2537, 2543), True, 'import tensorflow as tf\n'), ((2763, 2773), 'horovod.tensorflow.keras.size', 'hvd.size', ([], {}), '()\n', (2771, 2773), True, 'import horovod.tensorflow.keras as hvd\n'), ((2839, 2849), 'horovod.tensorflow.keras.size', 'hvd.size', ([], {}), '()\n', (2847, 2849), True, 'import horovod.tensorflow.keras as hvd\n'), ((5802, 5844), 'deephyper.nas.trainer.HorovodTrainer', 'HorovodTrainer', ([], {'config': 'config', 'model': 'model'}), '(config=config, model=model)\n', (5816, 5844), False, 'from deephyper.nas.trainer import HorovodTrainer\n'), ((5922, 5945), 'deephyper.nas.run._util.preproc_trainer', 'preproc_trainer', (['config'], {}), '(config)\n', (5937, 5945), False, 'from deephyper.nas.run._util import compute_objective, load_config, preproc_trainer, save_history, setup_data, get_search_space\n'), ((6218, 6265), 'deephyper.nas.run._util.compute_objective', 'compute_objective', (["config['objective']", 'history'], {}), "(config['objective'], history)\n", (6235, 6265), False, 'from deephyper.nas.run._util import compute_objective, load_config, preproc_trainer, save_history, setup_data, get_search_space\n'), ((2047, 2080), 'os.environ.get', 'os.environ.get', (['"""OMP_NUM_THREADS"""'], {}), "('OMP_NUM_THREADS')\n", (2061, 2080), False, 'import os\n'), ((2399, 2409), 'horovod.tensorflow.keras.rank', 'hvd.rank', ([], {}), '()\n', (2407, 2409), True, 'import horovod.tensorflow.keras as hvd\n'), ((3965, 4014), 'horovod.tensorflow.keras.callbacks.BroadcastGlobalVariablesCallback', 'hvd.callbacks.BroadcastGlobalVariablesCallback', (['(0)'], {}), '(0)\n', (4011, 4014), True, 'import horovod.tensorflow.keras as hvd\n'), ((4266, 4303), 'horovod.tensorflow.keras.callbacks.MetricAverageCallback', 'hvd.callbacks.MetricAverageCallback', ([], {}), '()\n', (4301, 4303), True, 'import horovod.tensorflow.keras as hvd\n'), ((4668, 4763), 'horovod.tensorflow.keras.callbacks.LearningRateWarmupCallback', 'hvd.callbacks.LearningRateWarmupCallback', ([], {'warmup_epochs': '(5)', 'verbose': '(0)', 'initial_lr': 'initial_lr'}), '(warmup_epochs=5, verbose=0,\n initial_lr=initial_lr)\n', (4708, 4763), True, 'import horovod.tensorflow.keras as hvd\n'), ((6112, 6122), 'horovod.tensorflow.keras.rank', 'hvd.rank', ([], {}), '()\n', (6120, 6122), True, 'import horovod.tensorflow.keras as hvd\n'), ((2296, 2334), 'os.environ.get', 'os.environ.get', (['"""CUDA_VISIBLE_DEVICES"""'], {}), "('CUDA_VISIBLE_DEVICES')\n", (2310, 2334), False, 'import os\n'), ((3586, 3608), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3606, 3608), False, 'import traceback\n'), ((1986, 2019), 'os.environ.get', 'os.environ.get', (['"""OMP_NUM_THREADS"""'], {}), "('OMP_NUM_THREADS')\n", (2000, 2019), False, 'import os\n'), ((5425, 5449), 'deephyper.contrib.callbacks.import_callback', 'import_callback', (['cb_name'], {}), '(cb_name)\n', (5440, 5449), False, 'from deephyper.contrib.callbacks import import_callback\n'), ((5231, 5241), 'horovod.tensorflow.keras.rank', 'hvd.rank', ([], {}), '()\n', (5239, 5241), True, 'import horovod.tensorflow.keras as hvd\n')]
|
import numpy as np
def distances_to_point(lat_point, lon_point, lats, lons):
"""Method to calculate distances between a project and an array of lats and lons
:Parameters:
lat_project: float
Project latitude
lon_project: float
Project longitude
lats: np.array
Latitudes from which to calculate distances
lons: np.array
Longitudes from which to calculate distances
:Returns:
out: np.array of distances
"""
lat_point = np.deg2rad(lat_point)
lon_point = np.deg2rad(lon_point)
avg_earth_radius = 6373 # in km
lats = np.deg2rad(lats)
lons = np.deg2rad(lons)
lat = lat_point - lats
lon = lon_point - lons
d = np.sin(lat * 0.5) ** 2 + np.cos(lat_point) * np.cos(lats) * np.sin(lon * 0.5) ** 2
dist = 2 * avg_earth_radius * np.arcsin(np.sqrt(d))
return dist
|
[
"numpy.sin",
"numpy.sqrt",
"numpy.cos",
"numpy.deg2rad"
] |
[((523, 544), 'numpy.deg2rad', 'np.deg2rad', (['lat_point'], {}), '(lat_point)\n', (533, 544), True, 'import numpy as np\n'), ((562, 583), 'numpy.deg2rad', 'np.deg2rad', (['lon_point'], {}), '(lon_point)\n', (572, 583), True, 'import numpy as np\n'), ((634, 650), 'numpy.deg2rad', 'np.deg2rad', (['lats'], {}), '(lats)\n', (644, 650), True, 'import numpy as np\n'), ((663, 679), 'numpy.deg2rad', 'np.deg2rad', (['lons'], {}), '(lons)\n', (673, 679), True, 'import numpy as np\n'), ((745, 762), 'numpy.sin', 'np.sin', (['(lat * 0.5)'], {}), '(lat * 0.5)\n', (751, 762), True, 'import numpy as np\n'), ((873, 883), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (880, 883), True, 'import numpy as np\n'), ((770, 787), 'numpy.cos', 'np.cos', (['lat_point'], {}), '(lat_point)\n', (776, 787), True, 'import numpy as np\n'), ((790, 802), 'numpy.cos', 'np.cos', (['lats'], {}), '(lats)\n', (796, 802), True, 'import numpy as np\n'), ((805, 822), 'numpy.sin', 'np.sin', (['(lon * 0.5)'], {}), '(lon * 0.5)\n', (811, 822), True, 'import numpy as np\n')]
|
# array module example
import sample
import array
a = array.array('d', [1, -3, 4, 7, 2, 0])
PETSc.Sys.Print(a)
sample.clip(a, 1, 4, a)
PETSc.Sys.Print(a)
# numpy example
import numpy
b = numpy.random.uniform(-10, 10, size=1000000)
PETSc.Sys.Print(b)
c = numpy.zeros_like(b)
PETSc.Sys.Print(c)
sample.clip(b, -5, 5, c)
PETSc.Sys.Print(c)
PETSc.Sys.Print(min(c))
PETSc.Sys.Print(max(c))
# Timing test
from timeit import timeit
PETSc.Sys.Print('numpy.clip')
PETSc.Sys.Print(timeit('numpy.clip(b,-5,5,c)', 'from __main__ import b,c,numpy', number=1000))
PETSc.Sys.Print('sample.clip')
PETSc.Sys.Print(timeit('sample.clip(b,-5,5,c)', 'from __main__ import b,c,sample', number=1000))
PETSc.Sys.Print('sample.clip_fast')
PETSc.Sys.Print(timeit('sample.clip_fast(b,-5,5,c)', 'from __main__ import b,c,sample', number=1000))
# 2D test
d = numpy.random.uniform(-10, 10, size=(1000, 1000))
PETSc.Sys.Print(d)
sample.clip2d(d, -5, 5, d)
PETSc.Sys.Print(d)
|
[
"numpy.random.uniform",
"numpy.zeros_like",
"sample.clip2d",
"array.array",
"timeit.timeit",
"sample.clip"
] |
[((59, 96), 'array.array', 'array.array', (['"""d"""', '[1, -3, 4, 7, 2, 0]'], {}), "('d', [1, -3, 4, 7, 2, 0])\n", (70, 96), False, 'import array\n'), ((118, 141), 'sample.clip', 'sample.clip', (['a', '(1)', '(4)', 'a'], {}), '(a, 1, 4, a)\n', (129, 141), False, 'import sample\n'), ((202, 245), 'numpy.random.uniform', 'numpy.random.uniform', (['(-10)', '(10)'], {'size': '(1000000)'}), '(-10, 10, size=1000000)\n', (222, 245), False, 'import numpy\n'), ((271, 290), 'numpy.zeros_like', 'numpy.zeros_like', (['b'], {}), '(b)\n', (287, 290), False, 'import numpy\n'), ((312, 336), 'sample.clip', 'sample.clip', (['b', '(-5)', '(5)', 'c'], {}), '(b, -5, 5, c)\n', (323, 336), False, 'import sample\n'), ((870, 918), 'numpy.random.uniform', 'numpy.random.uniform', (['(-10)', '(10)'], {'size': '(1000, 1000)'}), '(-10, 10, size=(1000, 1000))\n', (890, 918), False, 'import numpy\n'), ((940, 966), 'sample.clip2d', 'sample.clip2d', (['d', '(-5)', '(5)', 'd'], {}), '(d, -5, 5, d)\n', (953, 966), False, 'import sample\n'), ((501, 578), 'timeit.timeit', 'timeit', (['"""numpy.clip(b,-5,5,c)"""', '"""from __main__ import b,c,numpy"""'], {'number': '(1000)'}), "('numpy.clip(b,-5,5,c)', 'from __main__ import b,c,numpy', number=1000)\n", (507, 578), False, 'from timeit import timeit\n'), ((629, 708), 'timeit.timeit', 'timeit', (['"""sample.clip(b,-5,5,c)"""', '"""from __main__ import b,c,sample"""'], {'number': '(1000)'}), "('sample.clip(b,-5,5,c)', 'from __main__ import b,c,sample', number=1000)\n", (635, 708), False, 'from timeit import timeit\n'), ((766, 854), 'timeit.timeit', 'timeit', (['"""sample.clip_fast(b,-5,5,c)"""', '"""from __main__ import b,c,sample"""'], {'number': '(1000)'}), "('sample.clip_fast(b,-5,5,c)', 'from __main__ import b,c,sample',\n number=1000)\n", (772, 854), False, 'from timeit import timeit\n')]
|
from random import randint, choice
from math import sin, cos, radians, exp, sqrt, fabs
import pygame
from pygame.sprite import Sprite
# from pygame.math import vec2d
from utils import SIM_COLORS, SCALE, SIGN
from utils import euclidean_distance, vec2d, Rotate2D
import numpy as np
class Agent(Sprite):
""" A agent sprite that bounces off walls and changes its
direction from time to time.
"""
# __slots__ = ('id', 'screen', 'game', 'field', 'image', \
# 'vmax', 'position', 'velocity', 'acceleration'\
# 'radius', 'relaxation_time', 'direction', 'neighbors'\
# 'forces, force_factors', 'waypoints')
def __init__(self, agent_id, screen, game, agent_image,
field, init_position, init_direction, max_speed, waypoints,
radius = 0.2, relaxation_time = 0.5, atype = 0):
""" Create a new Agent.
screen:
The screen on which the agent lives (must be a
pygame Surface object, such as pygame.display)
game:
The game object that holds information about the
game world.
agent_image:
Image reprsenting the agent in the simulation
field:
A Rect specifying the 'playing field' boundaries.
The agent will bounce off the 'walls' of this
field.
init_position:
A vec2d or a pair specifying the initial position
of the agent on the screen in metres
init_direction:
A vec2d or a pair specifying the initial direction
of the agent. Must have an angle that is a
multiple of 45 degres.
vmax:
maximum agent speed, in (m/s)
waypoints:
a list of waypoints for the agent to follow
"""
Sprite.__init__(self)
self._id = agent_id
self.screen = screen
self.game = game
self._vmax = max_speed
self._field = field
self._radius = radius
self._relaxation_time = relaxation_time
self._type = atype
# the current image representing the agent
self._image = agent_image
# A vector specifying the agent's position on the screen
self._position = vec2d(init_position)
self.prev_pos = vec2d(self._position)
# The direction is a normalized vector
self._direction = vec2d(init_direction).normalized()
self._velocity = vec2d(init_direction)
self._acceleration = vec2d(0.0, 0.0)
self._waypoints = waypoints
self._waypoint_index = 0
self._neighbors = []
# # default no forces
self._social_force = vec2d(0.0, 0.0)
self._desired_force = vec2d(0.0, 0.0)
self._obstacle_force = vec2d(0.0, 0.0)
self._lookahead_force = vec2d(0.0, 0.0)
def draw(self):
"""
Draw the agent onto the screen that is set in the constructor
"""
x, y = int(self._position.x*SCALE), int(self._position.y*SCALE)
r = int(self._radius*SCALE)
# poly = [(x-r/2, y), (x, y-40), (x+r/2, y), (x, y+r/2)]
poly = np.array([[x-r/2, y], [x, y-30], [x+r/2, y], [x, y+r/2]])
rpoly = Rotate2D(poly, (x,y), radians(self._direction.get_angle()))
# self.draw_rect = self._image.get_rect().move(
# self._position.x - self._image_w / 2,
# self._position.y - self._image_h / 2)
# self.screen.blit(self._image, self.draw_rect)
# agent representation
if self._type == 0:
pygame.draw.circle(self.screen, SIM_COLORS['yellow'], (x, y), r, int(0))
# pygame.draw.ellipse(self.screen, SIM_COLORS['yellow'], (x, y, 20, 50), int(0))
elif self._type == 1:
pygame.draw.circle(self.screen, SIM_COLORS['aqua'], (x, y), r, int(0))
# pygame.draw.polygon(self.screen, SIM_COLORS['white'], rpoly, int(0))
# pygame.draw.ellipse(self.screen, SIM_COLORS['white'], self._get_ellipse_params(x, y, r, r/2), int(0))
# draw the forces on the agent
self.draw_forces()
def draw_forces(self):
# desired force
pygame.draw.line(self.screen, SIM_COLORS['red'],
((self._position.x*SCALE), (self._position.y*SCALE)),
((self._position.x*SCALE) + self.desired_force[0]*SCALE, (self._position.y*SCALE) + self.desired_force[1]*SCALE), 2)
# social force
pygame.draw.line(self.screen, SIM_COLORS['lime'],
((self._position.x*SCALE), (self._position.y*SCALE)),
((self._position.x*SCALE) + self.social_force[0]*SCALE, (self._position.y*SCALE) + self.social_force[1]*SCALE), 2)
# obstacle force
pygame.draw.line(self.screen, SIM_COLORS['blue'],
((self._position.x*SCALE), (self._position.y*SCALE)),
((self._position.x*SCALE) + self.obstacle_force[0]*SCALE, (self._position.y*SCALE) + self.obstacle_force[1]*SCALE), 2)
def reached_waypoint(self, waypoint):
""" Check if the agent has reached the given waypoint so we
advance to the next one. Reaching means being in the
waypoint circle
"""
if euclidean_distance((self._position.x, self._position.y), waypoint.position) <= waypoint.radius:
return True
else:
return False
def update(self, time_passed):
# cim = Image.open('assets/blueagent.bmp')
# rim = cim.rotate(self._direction.get_angle(), expand=1)
# self._image = pygame.image.fromstring(rim.tostring(), rim.size, rim.mode)
# When the image is rotated, its size is changed.
# self._image_w, self._image_h = self._image.get_size()
# bounds_rect = self.screen.get_rect().inflate(-self._image_w, -self._image_h)
bounds_rect = self.game.field_box.get_internal_rect()
self._direction = vec2d(self._velocity.x, -self._velocity.y)
if self._position.x*SCALE < bounds_rect.left:
self._position.x = bounds_rect.left/SCALE
self._direction.x *= -1
elif self._position.x*SCALE > bounds_rect.right:
self._position.x = bounds_rect.right/SCALE
self._direction.x *= -1
elif self._position.y*SCALE < bounds_rect.top:
self._position.y = bounds_rect.top/SCALE
self._direction.y *= -1
elif self._position.y*SCALE > bounds_rect.bottom:
self._position.y = bounds_rect.bottom/SCALE
self._direction.y *= -1
def social_move(self, time_passed):
# force is computed over neighbors with 0.5m radius (= 0.5*100 px)
self._neighbors = self.game.get_agent_neighbors(self, (0.5*SCALE))
# compute the forces
self._social_force = self._compute_social_force()
self._desired_force = self._compute_desired_force()
self._obstacle_force = self._compute_obstacle_force()
self._lookahead_force = self._compute_lookahead_force()
# =================================================================
# Properties and how to compute them
# =================================================================
@property
def social_force(self):
return self._social_force
@property
def obstacle_force(self):
return self._obstacle_force
@property
def desired_force(self):
return self._desired_force
@property
def lookahead_force(self):
return self._lookahead_force
@property
def id(self):
return self._id
@property
def position(self):
return self._position
@position.setter
def position(self, newpos):
self._position = newpos
@property
def velocity(self):
return self._velocity
@property
def acceleration(self):
return self._acceleration
@property
def vmax(self):
return self._vmax
@property
def relaxation_time(self):
return self._relaxation_time
@property
def next_waypoint(self):
return self._waypoints[self._waypoint_index]
def _compute_social_force(self):
# variables according to Moussaid-Helbing paper
lambda_importance = 2.0
gamma = 0.35
n, n_prime = 2, 3
social_force = vec2d(0, 0)
for neighbor in self._neighbors:
# no social force with oneself
if neighbor.id == self.id:
continue
else:
# position difference
diff = neighbor.position - self.position
diff_direction = diff.normalized()
# velocity difference
vel_diff = self.velocity - neighbor.velocity
# interaction direction t_ij
interaction_vector = lambda_importance * vel_diff + diff_direction
if (interaction_vector.get_length()) == 0:
continue;
interaction_direction = interaction_vector / interaction_vector.get_length()
# theta (angle between interaction direction and position difference vector)
theta = interaction_direction.get_angle_between(diff_direction)
# model parameter B = gamma * ||D||
B = gamma * interaction_vector.get_length()
theta_rad = radians(theta)
force_vel_amount = -exp(-diff.get_length() / B - (n_prime * B * theta_rad)**2)
force_angle_amount = (-1 * SIGN(theta)) * exp(-diff.get_length() / B - (n * B * theta_rad)**2)
force_vel = force_vel_amount * interaction_direction
force_angle = force_angle_amount * interaction_direction.left_normal_vector()
# social_force[0] += force_vel.x + force_angle.x
# social_force[1] += force_vel.y + force_angle.y
social_force += force_vel + force_angle
return social_force
def _compute_desired_force(self):
if self.reached_waypoint(self.next_waypoint):
self._waypoint_index += 1
# if all waypoints are covered, go back to the beginning
# NOTE - this does not take into account birth and death waypoints yet
if self._waypoint_index == len(self._waypoints):
self._waypoint_index = 0
wp_force = self.next_waypoint.force_towards(self)
desired_force = wp_force
return desired_force
def _compute_obstacle_force(self):
obstacle_force = vec2d(0.0, 0.0)
# if there are no obstacles, there is no obstacle force
if len(self.game.obstacles) == 0:
return obstacle_force
# find the closest obstacle and the closest point on it
closest_distance, closest_point = self.game.obstacles[0].agent_distance(self)
for obstacle in self.game.obstacles:
other_distance, other_point = obstacle.agent_distance(self)
if other_distance < closest_distance:
closest_distance, closest_point = other_distance, other_point
distance = closest_distance - self._radius
if closest_distance > self._radius*5:
return obstacle_force
force_amount = exp(-distance)
min_diffn = (self._position - vec2d(closest_point)).normalized()
obstacle_force.x = (force_amount * min_diffn).x
obstacle_force.y = (force_amount * min_diffn).y
return obstacle_force
def _compute_lookahead_force(self):
lookahead_force = vec2d(0, 0)
return lookahead_force
def _get_ellipse_params(self, x, y, w, h):
return ((x-w/2), (y-h/2), w, h)
|
[
"utils.SIGN",
"math.exp",
"pygame.draw.line",
"utils.vec2d",
"math.radians",
"utils.euclidean_distance",
"numpy.array",
"pygame.sprite.Sprite.__init__"
] |
[((2015, 2036), 'pygame.sprite.Sprite.__init__', 'Sprite.__init__', (['self'], {}), '(self)\n', (2030, 2036), False, 'from pygame.sprite import Sprite\n'), ((2487, 2507), 'utils.vec2d', 'vec2d', (['init_position'], {}), '(init_position)\n', (2492, 2507), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2532, 2553), 'utils.vec2d', 'vec2d', (['self._position'], {}), '(self._position)\n', (2537, 2553), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2688, 2709), 'utils.vec2d', 'vec2d', (['init_direction'], {}), '(init_direction)\n', (2693, 2709), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2739, 2754), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (2744, 2754), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2914, 2929), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (2919, 2929), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2960, 2975), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (2965, 2975), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((3007, 3022), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (3012, 3022), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((3055, 3070), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (3060, 3070), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((3377, 3448), 'numpy.array', 'np.array', (['[[x - r / 2, y], [x, y - 30], [x + r / 2, y], [x, y + r / 2]]'], {}), '([[x - r / 2, y], [x, y - 30], [x + r / 2, y], [x, y + r / 2]])\n', (3385, 3448), True, 'import numpy as np\n'), ((4407, 4644), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', "SIM_COLORS['red']", '(self._position.x * SCALE, self._position.y * SCALE)', '(self._position.x * SCALE + self.desired_force[0] * SCALE, self._position.y *\n SCALE + self.desired_force[1] * SCALE)', '(2)'], {}), "(self.screen, SIM_COLORS['red'], (self._position.x * SCALE,\n self._position.y * SCALE), (self._position.x * SCALE + self.\n desired_force[0] * SCALE, self._position.y * SCALE + self.desired_force\n [1] * SCALE), 2)\n", (4423, 4644), False, 'import pygame\n'), ((4691, 4927), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', "SIM_COLORS['lime']", '(self._position.x * SCALE, self._position.y * SCALE)', '(self._position.x * SCALE + self.social_force[0] * SCALE, self._position.y *\n SCALE + self.social_force[1] * SCALE)', '(2)'], {}), "(self.screen, SIM_COLORS['lime'], (self._position.x * SCALE,\n self._position.y * SCALE), (self._position.x * SCALE + self.\n social_force[0] * SCALE, self._position.y * SCALE + self.social_force[1\n ] * SCALE), 2)\n", (4707, 4927), False, 'import pygame\n'), ((4976, 5216), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', "SIM_COLORS['blue']", '(self._position.x * SCALE, self._position.y * SCALE)', '(self._position.x * SCALE + self.obstacle_force[0] * SCALE, self._position.\n y * SCALE + self.obstacle_force[1] * SCALE)', '(2)'], {}), "(self.screen, SIM_COLORS['blue'], (self._position.x * SCALE,\n self._position.y * SCALE), (self._position.x * SCALE + self.\n obstacle_force[0] * SCALE, self._position.y * SCALE + self.\n obstacle_force[1] * SCALE), 2)\n", (4992, 5216), False, 'import pygame\n'), ((6178, 6220), 'utils.vec2d', 'vec2d', (['self._velocity.x', '(-self._velocity.y)'], {}), '(self._velocity.x, -self._velocity.y)\n', (6183, 6220), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((8589, 8600), 'utils.vec2d', 'vec2d', (['(0)', '(0)'], {}), '(0, 0)\n', (8594, 8600), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((10843, 10858), 'utils.vec2d', 'vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (10848, 10858), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((11562, 11576), 'math.exp', 'exp', (['(-distance)'], {}), '(-distance)\n', (11565, 11576), False, 'from math import sin, cos, radians, exp, sqrt, fabs\n'), ((11862, 11873), 'utils.vec2d', 'vec2d', (['(0)', '(0)'], {}), '(0, 0)\n', (11867, 11873), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((5462, 5537), 'utils.euclidean_distance', 'euclidean_distance', (['(self._position.x, self._position.y)', 'waypoint.position'], {}), '((self._position.x, self._position.y), waypoint.position)\n', (5480, 5537), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((2628, 2649), 'utils.vec2d', 'vec2d', (['init_direction'], {}), '(init_direction)\n', (2633, 2649), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((9671, 9685), 'math.radians', 'radians', (['theta'], {}), '(theta)\n', (9678, 9685), False, 'from math import sin, cos, radians, exp, sqrt, fabs\n'), ((11615, 11635), 'utils.vec2d', 'vec2d', (['closest_point'], {}), '(closest_point)\n', (11620, 11635), False, 'from utils import euclidean_distance, vec2d, Rotate2D\n'), ((9824, 9835), 'utils.SIGN', 'SIGN', (['theta'], {}), '(theta)\n', (9828, 9835), False, 'from utils import SIM_COLORS, SCALE, SIGN\n')]
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DLA primitives and full network models.
"""
import numpy as np
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.functions as F
from nnabla.initializer import UniformInitializer, ConstantInitializer, NormalInitializer, calc_normal_std_he_forward, calc_normal_std_he_backward
from nnabla.logger import logger
from nnabla.utils.save import save
from nnabla.utils.nnp_graph import NnpNetworkPass
from models.networks.initializers import he_initializer, bilinear_depthwise_initializer, bilinear_initializer
RNG = np.random.RandomState(214)
def pf_depthwise_deconvolution(x, kernel, stride=(1, 1), pad=(1, 1), dilation=(2, 2), with_bias=False, w_init=None, b_init=None, channel_last=False):
out_map = x.shape[3] if channel_last else x.shape[1]
if channel_last:
w_init = np.transpose(w_init, (0, 2, 3, 1))
x = PF.deconvolution(
x,
out_map,
kernel,
pad=pad,
stride=stride,
dilation=dilation,
w_init=w_init,
with_bias=with_bias,
b_init=b_init,
group=out_map,
channel_last=channel_last
)
return x
def pf_affine(r, num_classes=1000, channel_last=False):
r = PF.convolution(r, num_classes, (1, 1), channel_last=channel_last,
w_init=NormalInitializer(sigma=0.01, rng=RNG), name='fc')
return F.reshape(r, (r.shape[0], -1), inplace=False)
def pf_convolution(x, ochannels, kernel, pad=None, stride=(1, 1), dilation=None, with_bias=False, w_init=None, b_init=None, channel_last=False):
return PF.convolution(x, ochannels, kernel, stride=stride, pad=pad, dilation=dilation,
with_bias=with_bias, w_init=w_init, b_init=b_init, channel_last=channel_last)
def shortcut(x, ochannels, stride, shortcut_type, test, channel_last=False):
axes = 3 if channel_last else 1
ichannels = x.shape[axes]
use_conv = shortcut_type.lower() == 'c'
if ichannels != ochannels:
assert (ichannels * 2 == ochannels) or (ichannels * 4 == ochannels)
if shortcut_type.lower() == 'b':
use_conv = True
if use_conv:
# Convolution does everything.
# Matching channels, striding.
with nn.parameter_scope("shortcut_conv"):
x = PF.convolution(x, ochannels, (1, 1),
stride=(stride, stride), with_bias=False, channel_last=channel_last)
x = PF.batch_normalization(x, axes=[axes], batch_stat=not test)
else:
# shortcut block is slightly different for dla
if stride != 1:
# Stride
x = F.max_pooling(
x, kernel=(
stride, stride), stride=(
stride, stride), channel_last=channel_last)
if ichannels != ochannels:
x = PF.convolution(
x, ochannels, (1, 1), stride=(
1, 1), with_bias=False, channel_last=channel_last)
x = PF.batch_normalization(x, axes=[axes], batch_stat=not test)
return x
def basicblock(x, residual, ochannels, stride, test, channel_last=False):
def bn(h):
axes = [3 if channel_last else 1]
return PF.batch_normalization(h, axes=axes, batch_stat=not test)
if residual is None:
residual = x
with nn.parameter_scope("basicblock1"):
h = F.relu(bn(PF.convolution(x, ochannels, (3, 3), stride=(
stride, stride), pad=(1, 1), with_bias=False, channel_last=channel_last)))
with nn.parameter_scope("basicblock2"):
h = bn(
PF.convolution(
h, ochannels, (3, 3), pad=(
1, 1), with_bias=False, channel_last=channel_last))
return F.relu(F.add2(h, residual))
def bottleneck(x, ochannels, shortcut_type, stride, test, channel_last=False):
def bn(h):
axes = [3 if channel_last else 1]
return PF.batch_normalization(h, axes=axes, batch_stat=not test)
assert ochannels % 4 == 0
hchannels = ochannels / 4
with nn.parameter_scope("bottleneck1"):
h = F.relu(
bn(PF.convolution(x, hchannels, (1, 1),
with_bias=False, channel_last=channel_last))
)
with nn.parameter_scope("bottleneck2"):
h = F.relu(
bn(PF.convolution(h, hchannels, (3, 3), pad=(1, 1),
stride=stride, with_bias=False, channel_last=channel_last)))
with nn.parameter_scope("bottleneck3"):
h = bn(PF.convolution(h, ochannels, (1, 1),
with_bias=False, channel_last=channel_last))
with nn.parameter_scope("bottleneck_s"):
s = shortcut(x, ochannels, stride, shortcut_type, test, channel_last)
return F.relu(F.add2(h, s))
def layer(x, block, ochannels, count, stride, shortcut_type, test, channel_last=False):
for i in range(count):
with nn.parameter_scope("layer{}".format(i + 1)):
x = block(x, ochannels, stride if i ==
0 else (1, 1), shortcut_type, test, channel_last=channel_last)
return x
def _make_conv_level(x, ochannels, convs, test, stride=1, dilation=1, channel_last=False):
axes = [3 if channel_last else 1]
for i in range(convs):
with nn.parameter_scope("conv{}".format(i + 1)):
s = (stride, stride) if i == 0 else (1, 1)
x = pf_convolution(
x, ochannels, (3, 3), stride=s,
pad=(dilation, dilation),
dilation=(dilation, dilation),
with_bias=False,
channel_last=channel_last)
x = F.relu(PF.batch_normalization(
x, axes=axes, batch_stat=not test))
return x
def root(x, children, ochannels, test, concat_axis=1, kernel_size=1, channel_last=False):
axes = 3 if channel_last else 1
with nn.parameter_scope("root"):
rng = np.random.RandomState(313)
x = F.concatenate(x, *children, axis=axes)
x = pf_convolution(
x, ochannels, (kernel_size, kernel_size), pad=((kernel_size-1)//2, (kernel_size-1)//2), stride=(
1, 1),
with_bias=False,
w_init=he_initializer(ochannels, kernel_size, rng),
channel_last=channel_last
)
x = PF.batch_normalization(x, axes=[axes], batch_stat=not test)
x = F.relu(x)
return x
def upsample(x, ochannels, test, kernel_size=4, channel_last=False):
rng = np.random.RandomState(313)
axes = 3 if channel_last else 1
with nn.parameter_scope("up"):
x = pf_convolution(
x, ochannels, (1, 1), stride=(
1, 1),
with_bias=False,
w_init=he_initializer(ochannels, kernel_size, rng),
channel_last=channel_last
)
x = F.relu(
PF.batch_normalization(
x,
axes=[axes],
batch_stat=not test)
)
ichannels = x.shape[axes]
x = pf_depthwise_deconvolution(
x,
(kernel_size, kernel_size),
pad=(1, 1),
stride=(2, 2),
dilation=(1, 1),
with_bias=False,
w_init=bilinear_depthwise_initializer(ichannels, kernel_size),
channel_last=channel_last
)
return x
def _make_tree_level1(
x,
children,
block,
ochannels,
level,
test,
level_root=False,
stride=1,
channel_last=False
):
axes = 3 if channel_last else 1
ichannels = x.shape[axes]
bottom = F.max_pooling(
x,
kernel=(stride, stride),
stride=(stride, stride),
channel_last=channel_last
) if stride > 1 else x
if ichannels != ochannels:
residual = pf_convolution(
bottom, ochannels, (1, 1), stride=(1, 1), pad=None, with_bias=False, channel_last=channel_last)
residual = PF.batch_normalization(
residual, axes=[axes], batch_stat=not test)
else:
residual = bottom
with nn.parameter_scope('block1'):
b1 = block(x, residual, ochannels, stride,
test, channel_last=channel_last)
with nn.parameter_scope('block2'):
b2 = block(b1, b1, ochannels, 1, test, channel_last=channel_last)
_children = [bottom, b2] if level_root else [b2]
if children:
_children += children
x = root(b1, _children, ochannels, test,
kernel_size=1, channel_last=channel_last)
return x, bottom
def _make_tree_level2(
x,
children,
block,
ochannels,
level,
test,
level_root=False,
stride=1,
channel_last=False):
with nn.parameter_scope('node1'):
ag1, bottom1 = _make_tree_level1(
x, None, block, ochannels, level, test, False, stride, channel_last=channel_last)
with nn.parameter_scope('node2'):
x, _ = _make_tree_level1(
ag1, [bottom1], block, ochannels, level, test, level_root, 1, channel_last=channel_last)
return x
def dla_imagenet(
x,
num_classes,
num_layers,
test,
residual_root=False,
tiny=False,
channel_last=False):
"""
Args:
x : Variable
num_classes : Number of classes of outputs
num_layers : Number of layers of DLA chosen from (34).
test : Construct net for testing.
tiny (bool): Tiny imagenet mode. Input image must be (3, 56, 56).
"""
layers = {
# 18: ((2, 2, 2, 2), basicblock, 1),
34: ((1, 1, 1, 2, 2, 1), (False, False, False, True, True, True), basicblock)
# 50: ((3, 4, 6, 3), bottleneck, 4),
# 101: ((3, 4, 23, 3), bottleneck, 4),
# 152: ((3, 8, 36, 3), bottleneck, 4)
}
ochannels = [16, 32, 64, 128, 256, 512]
levels, levels_root, block = layers[num_layers]
strides = [1, 2, 2, 2, 2, 2]
logger.debug(x.shape)
axes = 3 if channel_last else 1
with nn.parameter_scope("conv1"):
stride = (1, 1)
r = pf_convolution(x, 16, (7, 7),
pad=(3, 3), stride=stride, with_bias=False, channel_last=channel_last)
r = F.relu(PF.batch_normalization(
r, axes=[axes], batch_stat=not test))
hidden = {}
hidden['conv0'] = r
logger.debug(r.shape)
with nn.parameter_scope("level0"):
r = _make_conv_level(
r,
ochannels[0],
levels[0],
test=test,
stride=strides[0],
channel_last=channel_last)
hidden['level0'] = r
logger.debug(r.shape)
with nn.parameter_scope("level1"):
r = _make_conv_level(
r,
ochannels[1],
levels[1],
test=test,
stride=strides[1],
channel_last=channel_last)
hidden['level1'] = r
logger.debug(r.shape)
with nn.parameter_scope("level2"):
r, _ = _make_tree_level1(
r, None, block, ochannels[2], levels[2], test, levels_root[2], stride=strides[2], channel_last=channel_last)
hidden['level2'] = r
logger.debug(r.shape)
with nn.parameter_scope("level3"):
r = _make_tree_level2(
r,
None,
block,
ochannels[3],
levels[3],
test,
levels_root[3],
stride=strides[3],
channel_last=channel_last)
hidden['level3'] = r
logger.debug(r.shape)
with nn.parameter_scope("level4"):
r = _make_tree_level2(
r,
None,
block,
ochannels[4],
levels[4],
test,
levels_root[4],
stride=strides[4],
channel_last=channel_last)
hidden['level4'] = r
logger.debug(r.shape)
with nn.parameter_scope("level5"):
r, _ = _make_tree_level1(
r, None, block, ochannels[5], levels[5], test, levels_root[5], stride=strides[5], channel_last=channel_last)
hidden['level5'] = r
logger.debug(r.shape)
pool_shape = r.shape[-2:]
if channel_last:
pool_shape = r.shape[1:3]
r = F.average_pooling(r, pool_shape, channel_last=channel_last)
with nn.parameter_scope("fc"):
r = pf_affine(r, num_classes, channel_last=channel_last)
logger.debug(r.shape)
return r, hidden
# Upsampling portion of DLA
def DLAUp(x, test, residual_root=False, channel_last=False):
r, hidden = dla_imagenet(
x, num_classes=1000, num_layers=34, test=test, channel_last=channel_last)
callback = NnpNetworkPass(True)
callback.remove_and_rewire('fc')
ochannels = [256, 128, 64, 32]
with nn.parameter_scope("up16"):
x = upsample(hidden['level5'], ochannels[0], test,
kernel_size=4, channel_last=channel_last)
hidden['up16'] = x
with nn.parameter_scope("up8"):
x = root(x, [hidden['level4']], ochannels[0], test,
kernel_size=3, channel_last=channel_last)
x = upsample(x, ochannels[1], test,
kernel_size=4, channel_last=channel_last)
hidden['up8'] = x
with nn.parameter_scope("up4"):
with nn.parameter_scope("residual_level3"):
level4up = upsample(
hidden['level4'], ochannels[1], test, kernel_size=4, channel_last=channel_last)
with nn.parameter_scope("level3up_root"):
level3up = root(
level4up, [hidden['level3']], ochannels[1], test, kernel_size=3, channel_last=channel_last)
with nn.parameter_scope("x_root"):
x = root(x, [level3up], ochannels[1], test,
kernel_size=1, channel_last=channel_last)
x = upsample(x, ochannels[2], test,
kernel_size=4, channel_last=channel_last)
hidden['up4'] = x
with nn.parameter_scope("up2_b"):
level3up_b = upsample(
level3up, ochannels[2], test, kernel_size=4, channel_last=channel_last)
with nn.parameter_scope("up2_c"):
level3up_c = upsample(
hidden['level3'], ochannels[2], test, kernel_size=4, channel_last=channel_last)
with nn.parameter_scope("level3up_c_root"):
level3up_c = root(hidden['level2'], [
level3up_c], ochannels[2], test, kernel_size=3, channel_last=channel_last)
with nn.parameter_scope("level2up_root"):
level2up = root(level3up_b, [level3up_c],
ochannels[2], test, kernel_size=3, channel_last=channel_last)
with nn.parameter_scope("x_root"):
x = root(x, [level2up], ochannels[2], test,
kernel_size=3, channel_last=channel_last)
return x
|
[
"nnabla.functions.max_pooling",
"nnabla.functions.concatenate",
"nnabla.parametric_functions.deconvolution",
"models.networks.initializers.bilinear_depthwise_initializer",
"numpy.transpose",
"nnabla.functions.relu",
"numpy.random.RandomState",
"nnabla.logger.logger.debug",
"nnabla.functions.reshape",
"nnabla.parametric_functions.batch_normalization",
"nnabla.parameter_scope",
"nnabla.functions.add2",
"nnabla.parametric_functions.convolution",
"nnabla.initializer.NormalInitializer",
"models.networks.initializers.he_initializer",
"nnabla.functions.average_pooling",
"nnabla.utils.nnp_graph.NnpNetworkPass"
] |
[((1166, 1192), 'numpy.random.RandomState', 'np.random.RandomState', (['(214)'], {}), '(214)\n', (1187, 1192), True, 'import numpy as np\n'), ((1483, 1665), 'nnabla.parametric_functions.deconvolution', 'PF.deconvolution', (['x', 'out_map', 'kernel'], {'pad': 'pad', 'stride': 'stride', 'dilation': 'dilation', 'w_init': 'w_init', 'with_bias': 'with_bias', 'b_init': 'b_init', 'group': 'out_map', 'channel_last': 'channel_last'}), '(x, out_map, kernel, pad=pad, stride=stride, dilation=\n dilation, w_init=w_init, with_bias=with_bias, b_init=b_init, group=\n out_map, channel_last=channel_last)\n', (1499, 1665), True, 'import nnabla.parametric_functions as PF\n'), ((1991, 2036), 'nnabla.functions.reshape', 'F.reshape', (['r', '(r.shape[0], -1)'], {'inplace': '(False)'}), '(r, (r.shape[0], -1), inplace=False)\n', (2000, 2036), True, 'import nnabla.functions as F\n'), ((2195, 2361), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'ochannels', 'kernel'], {'stride': 'stride', 'pad': 'pad', 'dilation': 'dilation', 'with_bias': 'with_bias', 'w_init': 'w_init', 'b_init': 'b_init', 'channel_last': 'channel_last'}), '(x, ochannels, kernel, stride=stride, pad=pad, dilation=\n dilation, with_bias=with_bias, w_init=w_init, b_init=b_init,\n channel_last=channel_last)\n', (2209, 2361), True, 'import nnabla.parametric_functions as PF\n'), ((7084, 7110), 'numpy.random.RandomState', 'np.random.RandomState', (['(313)'], {}), '(313)\n', (7105, 7110), True, 'import numpy as np\n'), ((10581, 10602), 'nnabla.logger.logger.debug', 'logger.debug', (['x.shape'], {}), '(x.shape)\n', (10593, 10602), False, 'from nnabla.logger import logger\n'), ((10979, 11000), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (10991, 11000), False, 'from nnabla.logger import logger\n'), ((12862, 12921), 'nnabla.functions.average_pooling', 'F.average_pooling', (['r', 'pool_shape'], {'channel_last': 'channel_last'}), '(r, pool_shape, channel_last=channel_last)\n', (12879, 12921), True, 'import nnabla.functions as F\n'), ((13026, 13047), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (13038, 13047), False, 'from nnabla.logger import logger\n'), ((13289, 13309), 'nnabla.utils.nnp_graph.NnpNetworkPass', 'NnpNetworkPass', (['(True)'], {}), '(True)\n', (13303, 13309), False, 'from nnabla.utils.nnp_graph import NnpNetworkPass\n'), ((1440, 1474), 'numpy.transpose', 'np.transpose', (['w_init', '(0, 2, 3, 1)'], {}), '(w_init, (0, 2, 3, 1))\n', (1452, 1474), True, 'import numpy as np\n'), ((3820, 3877), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['h'], {'axes': 'axes', 'batch_stat': '(not test)'}), '(h, axes=axes, batch_stat=not test)\n', (3842, 3877), True, 'import nnabla.parametric_functions as PF\n'), ((3933, 3966), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""basicblock1"""'], {}), "('basicblock1')\n", (3951, 3966), True, 'import nnabla as nn\n'), ((4132, 4165), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""basicblock2"""'], {}), "('basicblock2')\n", (4150, 4165), True, 'import nnabla as nn\n'), ((4345, 4364), 'nnabla.functions.add2', 'F.add2', (['h', 'residual'], {}), '(h, residual)\n', (4351, 4364), True, 'import nnabla.functions as F\n'), ((4519, 4576), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['h'], {'axes': 'axes', 'batch_stat': '(not test)'}), '(h, axes=axes, batch_stat=not test)\n', (4541, 4576), True, 'import nnabla.parametric_functions as PF\n'), ((4646, 4679), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""bottleneck1"""'], {}), "('bottleneck1')\n", (4664, 4679), True, 'import nnabla as nn\n'), ((4851, 4884), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""bottleneck2"""'], {}), "('bottleneck2')\n", (4869, 4884), True, 'import nnabla as nn\n'), ((5070, 5103), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""bottleneck3"""'], {}), "('bottleneck3')\n", (5088, 5103), True, 'import nnabla as nn\n'), ((5241, 5275), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""bottleneck_s"""'], {}), "('bottleneck_s')\n", (5259, 5275), True, 'import nnabla as nn\n'), ((5373, 5385), 'nnabla.functions.add2', 'F.add2', (['h', 's'], {}), '(h, s)\n', (5379, 5385), True, 'import nnabla.functions as F\n'), ((6475, 6501), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""root"""'], {}), "('root')\n", (6493, 6501), True, 'import nnabla as nn\n'), ((6517, 6543), 'numpy.random.RandomState', 'np.random.RandomState', (['(313)'], {}), '(313)\n', (6538, 6543), True, 'import numpy as np\n'), ((6556, 6594), 'nnabla.functions.concatenate', 'F.concatenate', (['x', '*children'], {'axis': 'axes'}), '(x, *children, axis=axes)\n', (6569, 6594), True, 'import nnabla.functions as F\n'), ((6908, 6967), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(x, axes=[axes], batch_stat=not test)\n', (6930, 6967), True, 'import nnabla.parametric_functions as PF\n'), ((6980, 6989), 'nnabla.functions.relu', 'F.relu', (['x'], {}), '(x)\n', (6986, 6989), True, 'import nnabla.functions as F\n'), ((7156, 7180), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up"""'], {}), "('up')\n", (7174, 7180), True, 'import nnabla as nn\n'), ((8224, 8321), 'nnabla.functions.max_pooling', 'F.max_pooling', (['x'], {'kernel': '(stride, stride)', 'stride': '(stride, stride)', 'channel_last': 'channel_last'}), '(x, kernel=(stride, stride), stride=(stride, stride),\n channel_last=channel_last)\n', (8237, 8321), True, 'import nnabla.functions as F\n'), ((8574, 8640), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['residual'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(residual, axes=[axes], batch_stat=not test)\n', (8596, 8640), True, 'import nnabla.parametric_functions as PF\n'), ((8699, 8727), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""block1"""'], {}), "('block1')\n", (8717, 8727), True, 'import nnabla as nn\n'), ((8841, 8869), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""block2"""'], {}), "('block2')\n", (8859, 8869), True, 'import nnabla as nn\n'), ((9365, 9392), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""node1"""'], {}), "('node1')\n", (9383, 9392), True, 'import nnabla as nn\n'), ((9539, 9566), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""node2"""'], {}), "('node2')\n", (9557, 9566), True, 'import nnabla as nn\n'), ((10649, 10676), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (10667, 10676), True, 'import nnabla as nn\n'), ((11010, 11038), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level0"""'], {}), "('level0')\n", (11028, 11038), True, 'import nnabla as nn\n'), ((11264, 11285), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (11276, 11285), False, 'from nnabla.logger import logger\n'), ((11295, 11323), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level1"""'], {}), "('level1')\n", (11313, 11323), True, 'import nnabla as nn\n'), ((11549, 11570), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (11561, 11570), False, 'from nnabla.logger import logger\n'), ((11580, 11608), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level2"""'], {}), "('level2')\n", (11598, 11608), True, 'import nnabla as nn\n'), ((11802, 11823), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (11814, 11823), False, 'from nnabla.logger import logger\n'), ((11833, 11861), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level3"""'], {}), "('level3')\n", (11851, 11861), True, 'import nnabla as nn\n'), ((12148, 12169), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (12160, 12169), False, 'from nnabla.logger import logger\n'), ((12179, 12207), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level4"""'], {}), "('level4')\n", (12197, 12207), True, 'import nnabla as nn\n'), ((12494, 12515), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (12506, 12515), False, 'from nnabla.logger import logger\n'), ((12525, 12553), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level5"""'], {}), "('level5')\n", (12543, 12553), True, 'import nnabla as nn\n'), ((12747, 12768), 'nnabla.logger.logger.debug', 'logger.debug', (['r.shape'], {}), '(r.shape)\n', (12759, 12768), False, 'from nnabla.logger import logger\n'), ((12931, 12955), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""fc"""'], {}), "('fc')\n", (12949, 12955), True, 'import nnabla as nn\n'), ((13391, 13417), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up16"""'], {}), "('up16')\n", (13409, 13417), True, 'import nnabla as nn\n'), ((13577, 13602), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up8"""'], {}), "('up8')\n", (13595, 13602), True, 'import nnabla as nn\n'), ((13865, 13890), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up4"""'], {}), "('up4')\n", (13883, 13890), True, 'import nnabla as nn\n'), ((14588, 14615), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up2_b"""'], {}), "('up2_b')\n", (14606, 14615), True, 'import nnabla as nn\n'), ((14741, 14768), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""up2_c"""'], {}), "('up2_c')\n", (14759, 14768), True, 'import nnabla as nn\n'), ((1929, 1967), 'nnabla.initializer.NormalInitializer', 'NormalInitializer', ([], {'sigma': '(0.01)', 'rng': 'RNG'}), '(sigma=0.01, rng=RNG)\n', (1946, 1967), False, 'from nnabla.initializer import UniformInitializer, ConstantInitializer, NormalInitializer, calc_normal_std_he_forward, calc_normal_std_he_backward\n'), ((2852, 2887), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""shortcut_conv"""'], {}), "('shortcut_conv')\n", (2870, 2887), True, 'import nnabla as nn\n'), ((2905, 3015), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'ochannels', '(1, 1)'], {'stride': '(stride, stride)', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(x, ochannels, (1, 1), stride=(stride, stride), with_bias=\n False, channel_last=channel_last)\n', (2919, 3015), True, 'import nnabla.parametric_functions as PF\n'), ((3058, 3117), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(x, axes=[axes], batch_stat=not test)\n', (3080, 3117), True, 'import nnabla.parametric_functions as PF\n'), ((3244, 3341), 'nnabla.functions.max_pooling', 'F.max_pooling', (['x'], {'kernel': '(stride, stride)', 'stride': '(stride, stride)', 'channel_last': 'channel_last'}), '(x, kernel=(stride, stride), stride=(stride, stride),\n channel_last=channel_last)\n', (3257, 3341), True, 'import nnabla.functions as F\n'), ((3448, 3547), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'ochannels', '(1, 1)'], {'stride': '(1, 1)', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(x, ochannels, (1, 1), stride=(1, 1), with_bias=False,\n channel_last=channel_last)\n', (3462, 3547), True, 'import nnabla.parametric_functions as PF\n'), ((3598, 3657), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(x, axes=[axes], batch_stat=not test)\n', (3620, 3657), True, 'import nnabla.parametric_functions as PF\n'), ((4195, 4291), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['h', 'ochannels', '(3, 3)'], {'pad': '(1, 1)', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(h, ochannels, (3, 3), pad=(1, 1), with_bias=False,\n channel_last=channel_last)\n', (4209, 4291), True, 'import nnabla.parametric_functions as PF\n'), ((5120, 5205), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['h', 'ochannels', '(1, 1)'], {'with_bias': '(False)', 'channel_last': 'channel_last'}), '(h, ochannels, (1, 1), with_bias=False, channel_last=channel_last\n )\n', (5134, 5205), True, 'import nnabla.parametric_functions as PF\n'), ((7449, 7508), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(x, axes=[axes], batch_stat=not test)\n', (7471, 7508), True, 'import nnabla.parametric_functions as PF\n'), ((10861, 10920), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['r'], {'axes': '[axes]', 'batch_stat': '(not test)'}), '(r, axes=[axes], batch_stat=not test)\n', (10883, 10920), True, 'import nnabla.parametric_functions as PF\n'), ((13905, 13942), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""residual_level3"""'], {}), "('residual_level3')\n", (13923, 13942), True, 'import nnabla as nn\n'), ((14906, 14943), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level3up_c_root"""'], {}), "('level3up_c_root')\n", (14924, 14943), True, 'import nnabla as nn\n'), ((15113, 15148), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level2up_root"""'], {}), "('level2up_root')\n", (15131, 15148), True, 'import nnabla as nn\n'), ((15307, 15335), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""x_root"""'], {}), "('x_root')\n", (15325, 15335), True, 'import nnabla as nn\n'), ((3990, 4111), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'ochannels', '(3, 3)'], {'stride': '(stride, stride)', 'pad': '(1, 1)', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(x, ochannels, (3, 3), stride=(stride, stride), pad=(1, 1),\n with_bias=False, channel_last=channel_last)\n', (4004, 4111), True, 'import nnabla.parametric_functions as PF\n'), ((4716, 4801), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'hchannels', '(1, 1)'], {'with_bias': '(False)', 'channel_last': 'channel_last'}), '(x, hchannels, (1, 1), with_bias=False, channel_last=channel_last\n )\n', (4730, 4801), True, 'import nnabla.parametric_functions as PF\n'), ((4921, 5033), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['h', 'hchannels', '(3, 3)'], {'pad': '(1, 1)', 'stride': 'stride', 'with_bias': '(False)', 'channel_last': 'channel_last'}), '(h, hchannels, (3, 3), pad=(1, 1), stride=stride, with_bias=\n False, channel_last=channel_last)\n', (4935, 5033), True, 'import nnabla.parametric_functions as PF\n'), ((6249, 6306), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'axes': 'axes', 'batch_stat': '(not test)'}), '(x, axes=axes, batch_stat=not test)\n', (6271, 6306), True, 'import nnabla.parametric_functions as PF\n'), ((6803, 6846), 'models.networks.initializers.he_initializer', 'he_initializer', (['ochannels', 'kernel_size', 'rng'], {}), '(ochannels, kernel_size, rng)\n', (6817, 6846), False, 'from models.networks.initializers import he_initializer, bilinear_depthwise_initializer, bilinear_initializer\n'), ((7324, 7367), 'models.networks.initializers.he_initializer', 'he_initializer', (['ochannels', 'kernel_size', 'rng'], {}), '(ochannels, kernel_size, rng)\n', (7338, 7367), False, 'from models.networks.initializers import he_initializer, bilinear_depthwise_initializer, bilinear_initializer\n'), ((7829, 7883), 'models.networks.initializers.bilinear_depthwise_initializer', 'bilinear_depthwise_initializer', (['ichannels', 'kernel_size'], {}), '(ichannels, kernel_size)\n', (7859, 7883), False, 'from models.networks.initializers import he_initializer, bilinear_depthwise_initializer, bilinear_initializer\n'), ((14090, 14125), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""level3up_root"""'], {}), "('level3up_root')\n", (14108, 14125), True, 'import nnabla as nn\n'), ((14289, 14317), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""x_root"""'], {}), "('x_root')\n", (14307, 14317), True, 'import nnabla as nn\n')]
|
"""
This module implements plotting functions useful to report analysis results.
Author: <NAME>, <NAME>, 2017
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
from nilearn.glm.first_level import check_design_matrix
from nilearn.glm.contrasts import expression_to_contrast_vector
def plot_design_matrix(design_matrix, rescale=True, ax=None, output_file=None):
"""Plot a design matrix provided as a DataFrame
Parameters
----------
design matrix : pandas DataFrame,
Describes a design matrix.
rescale : bool, optional
Rescale columns magnitude for visualization or not.
ax : axis handle, optional
Handle to axis onto which we will draw design matrix.
output_file : string or None, optional,
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
Returns
-------
ax: axis handle
The axis used for plotting.
"""
# We import _set_mpl_backend because just the fact that we are
# importing it sets the backend
# normalize the values per column for better visualization
_, X, names = check_design_matrix(design_matrix)
if rescale:
X = X / np.maximum(1.e-12, np.sqrt(
np.sum(X ** 2, 0))) # pylint: disable=no-member
if ax is None:
max_len = np.max([len(str(name)) for name in names])
fig_height = 1 + .1 * X.shape[0] + .04 * max_len
if fig_height < 3:
fig_height = 3
elif fig_height > 10:
fig_height = 10
plt.figure(figsize=(1 + .23 * len(names), fig_height))
ax = plt.subplot(1, 1, 1)
ax.imshow(X, interpolation='nearest', aspect='auto')
ax.set_label('conditions')
ax.set_ylabel('scan number')
ax.set_xticks(range(len(names)))
ax.set_xticklabels(names, rotation=60, ha='left')
# Set ticks above, to have a display more similar to the display of a
# corresponding dataframe
ax.xaxis.tick_top()
plt.tight_layout()
if output_file is not None:
plt.savefig(output_file)
plt.close()
ax = None
return ax
def plot_event(model_event, cmap=None, output_file=None, **fig_kwargs):
"""Creates plot for event visualization.
Parameters
----------
model_event : pandas DataFrame or list of pandas DataFrame
the `pandas.DataFrame` must have three columns
``event_type`` with event name, ``onset`` and ``duration``.
The `pandas.DataFrame` can also be obtained from
:func:`nilearn.glm.first_level.first_level_from_bids`.
cmap : str or matplotlib.cmap, optional
the colormap used to label different events
output_file : string or None, optional,
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
**fig_kwargs : extra keyword arguments, optional
Extra arguments passed to matplotlib.pyplot.subplots
Returns
-------
Plot Figure object
"""
if isinstance(model_event, pd.DataFrame):
model_event = [model_event]
n_runs = len(model_event)
figure, ax = plt.subplots(1, 1, **fig_kwargs)
# input validation
if cmap is None:
cmap = plt.cm.tab20
elif isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
else:
cmap = cmap
event_labels = pd.concat(event['trial_type'] for event in model_event)
event_labels = np.unique(event_labels)
cmap_dictionary = {label:idx for idx, label in enumerate(event_labels)}
if len(event_labels) > cmap.N:
plt.close()
raise ValueError("The number of event types is greater than "+ \
" colors in colormap (%d > %d). Use a different colormap." \
% (len(event_labels), cmap.N))
for idx_run, event_df in enumerate(model_event):
for _, event in event_df.iterrows():
event_onset = event['onset']
event_end = event['onset'] + event['duration']
color = cmap.colors[cmap_dictionary[event['trial_type']]]
ax.axvspan(event_onset,
event_end,
ymin=(idx_run + .25) / n_runs,
ymax=(idx_run + .75) / n_runs,
facecolor=color)
handles = []
for label, idx in cmap_dictionary.items():
patch = mpatches.Patch(color=cmap.colors[idx], label=label)
handles.append(patch)
_ = ax.legend(handles=handles, ncol=4)
ax.set_xlabel("Time (sec.)")
ax.set_ylabel("Runs")
ax.set_ylim(0, n_runs)
ax.set_yticks(np.arange(n_runs) + .5)
ax.set_yticklabels(np.arange(n_runs) + 1)
plt.tight_layout()
if output_file is not None:
plt.savefig(output_file)
plt.close()
figure = None
return figure
def plot_contrast_matrix(contrast_def, design_matrix, colorbar=False, ax=None,
output_file=None):
"""Creates plot for contrast definition.
Parameters
----------
contrast_def : str or array of shape (n_col) or list of (string or
array of shape (n_col))
where ``n_col`` is the number of columns of the design matrix, (one
array per run). If only one array is provided when there are several
runs, it will be assumed that the same contrast is desired for all
runs. The string can be a formula compatible with
`pandas.DataFrame.eval`. Basically one can use the name of the
conditions as they appear in the design matrix of the fitted model
combined with operators +- and combined with numbers with operators
+-`*`/.
design_matrix : pandas DataFrame
colorbar : Boolean, optional (default False)
Include a colorbar in the contrast matrix plot.
ax : matplotlib Axes object, optional (default None)
Directory where plotted figures will be stored.
output_file : string or None, optional,
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
Returns
-------
Plot Axes object
"""
design_column_names = design_matrix.columns.tolist()
if isinstance(contrast_def, str):
contrast_def = expression_to_contrast_vector(
contrast_def, design_column_names)
maxval = np.max(np.abs(contrast_def))
con_matrix = np.asmatrix(contrast_def)
max_len = np.max([len(str(name)) for name in design_column_names])
if ax is None:
plt.figure(figsize=(.4 * len(design_column_names),
1 + .5 * con_matrix.shape[0] + .04 * max_len))
ax = plt.gca()
mat = ax.matshow(con_matrix, aspect='equal',
cmap='gray', vmin=-maxval, vmax=maxval)
ax.set_label('conditions')
ax.set_ylabel('')
ax.set_yticks(())
ax.xaxis.set(ticks=np.arange(len(design_column_names)))
ax.set_xticklabels(design_column_names, rotation=50, ha='left')
if colorbar:
plt.colorbar(mat, fraction=0.025, pad=0.04)
plt.tight_layout()
plt.subplots_adjust(top=np.min([.3 + .05 * con_matrix.shape[0], .55]))
if output_file is not None:
plt.savefig(output_file)
plt.close()
ax = None
return ax
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.savefig",
"numpy.abs",
"matplotlib.pyplot.get_cmap",
"numpy.sum",
"matplotlib.pyplot.close",
"nilearn.glm.first_level.check_design_matrix",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"nilearn.glm.contrasts.expression_to_contrast_vector",
"numpy.min",
"numpy.asmatrix",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.patches.Patch",
"matplotlib.pyplot.tight_layout",
"pandas.concat",
"numpy.unique"
] |
[((1283, 1317), 'nilearn.glm.first_level.check_design_matrix', 'check_design_matrix', (['design_matrix'], {}), '(design_matrix)\n', (1302, 1317), False, 'from nilearn.glm.first_level import check_design_matrix\n'), ((2132, 2150), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2148, 2150), True, 'import matplotlib.pyplot as plt\n'), ((3368, 3400), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1, **fig_kwargs)\n', (3380, 3400), True, 'import matplotlib.pyplot as plt\n'), ((3590, 3645), 'pandas.concat', 'pd.concat', (["(event['trial_type'] for event in model_event)"], {}), "(event['trial_type'] for event in model_event)\n", (3599, 3645), True, 'import pandas as pd\n'), ((3665, 3688), 'numpy.unique', 'np.unique', (['event_labels'], {}), '(event_labels)\n', (3674, 3688), True, 'import numpy as np\n'), ((4912, 4930), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4928, 4930), True, 'import matplotlib.pyplot as plt\n'), ((6708, 6733), 'numpy.asmatrix', 'np.asmatrix', (['contrast_def'], {}), '(contrast_def)\n', (6719, 6733), True, 'import numpy as np\n'), ((7381, 7399), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7397, 7399), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1784), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1775, 1784), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2215), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {}), '(output_file)\n', (2202, 2215), True, 'import matplotlib.pyplot as plt\n'), ((2224, 2235), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2233, 2235), True, 'import matplotlib.pyplot as plt\n'), ((3810, 3821), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3819, 3821), True, 'import matplotlib.pyplot as plt\n'), ((4602, 4653), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'cmap.colors[idx]', 'label': 'label'}), '(color=cmap.colors[idx], label=label)\n', (4616, 4653), True, 'import matplotlib.patches as mpatches\n'), ((4971, 4995), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {}), '(output_file)\n', (4982, 4995), True, 'import matplotlib.pyplot as plt\n'), ((5004, 5015), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5013, 5015), True, 'import matplotlib.pyplot as plt\n'), ((6571, 6635), 'nilearn.glm.contrasts.expression_to_contrast_vector', 'expression_to_contrast_vector', (['contrast_def', 'design_column_names'], {}), '(contrast_def, design_column_names)\n', (6600, 6635), False, 'from nilearn.glm.contrasts import expression_to_contrast_vector\n'), ((6669, 6689), 'numpy.abs', 'np.abs', (['contrast_def'], {}), '(contrast_def)\n', (6675, 6689), True, 'import numpy as np\n'), ((6976, 6985), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6983, 6985), True, 'import matplotlib.pyplot as plt\n'), ((7332, 7375), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mat'], {'fraction': '(0.025)', 'pad': '(0.04)'}), '(mat, fraction=0.025, pad=0.04)\n', (7344, 7375), True, 'import matplotlib.pyplot as plt\n'), ((7516, 7540), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {}), '(output_file)\n', (7527, 7540), True, 'import matplotlib.pyplot as plt\n'), ((7549, 7560), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7558, 7560), True, 'import matplotlib.pyplot as plt\n'), ((3521, 3539), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (3533, 3539), True, 'import matplotlib.pyplot as plt\n'), ((4833, 4850), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (4842, 4850), True, 'import numpy as np\n'), ((4880, 4897), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (4889, 4897), True, 'import numpy as np\n'), ((7428, 7476), 'numpy.min', 'np.min', (['[0.3 + 0.05 * con_matrix.shape[0], 0.55]'], {}), '([0.3 + 0.05 * con_matrix.shape[0], 0.55])\n', (7434, 7476), True, 'import numpy as np\n'), ((1390, 1407), 'numpy.sum', 'np.sum', (['(X ** 2)', '(0)'], {}), '(X ** 2, 0)\n', (1396, 1407), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import pickle
import json
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.models import Model
from keras.optimizers import RMSprop, Adam
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from gensim.models import Word2Vec, KeyedVectors
from deepmm.models import DeepMultimodalModel
# Read some data that contains a mix of categorical and text-based features
# - e.g. Mercari Price Suggestion Challenge https://www.kaggle.com/c/mercari-price-suggestion-challenge/data
df = pd.read_csv('data.csv')
# Load pretrained embeddings
w2v = KeyedVectors.load_word2vec_format('embeddings_w2v.txt')
# Hyperparameters for text tokenization
EMBEDDING_DIM = 100
NUM_MAX_WORDS = 500
MAX_LEN = 150
X_nlp = df['TEXT']
# Tokenize text documents via keras tokenizer
tok = Tokenizer(num_words=NUM_MAX_WORDS)
tok.fit_on_texts(X_nlp)
sequences = tok.texts_to_sequences(X_nlp)
sequences_matrix = sequence.pad_sequences(sequences, maxlen=MAX_LEN)
word_index = tok.word_index
print('Found %s unique tokens.' % len(word_index))
vocabulary_size = min(len(word_index)+1, NUM_MAX_WORDS)
embedding_matrix = np.zeros((vocabulary_size, EMBEDDING_DIM))
# Preparing the embedding matrix:
# We only take the embeddings that are neccessarry for the given vocabulary
num_none = 0
for word, i in word_index.items():
if i>=NUM_MAX_WORDS:
continue
try:
embedding_vector = w2v[word]
embedding_matrix[i] = embedding_vector
except KeyError:
embedding_matrix[i]=np.random.normal(0,np.sqrt(0.25),EMBEDDING_DIM)
num_none = num_none+1
# Define categorical features and target
cat_features = ['C1', 'C2', 'C3', 'C4']
target = ['TARGET']
# Label encode the categories for each categorical feature (numeric value is needed for feeding into keras model)
X_categorical = []
label_encoder = []
for feature in cat_features:
le = LabelEncoder()
X_categorical.append(pd.DataFrame(le.fit_transform(df[feature]), columns=[feature]))
label_encoder.append(le)
Y = df[target]
# Split all data into training and test chunks
# IMPORTANT: Make sure that textual and categorical data is properly aligned (e.g. here choose same random_state)!
X_nlp_train_all, X_nlp_test_all, y_train_all, y_test_all = train_test_split(sequences_matrix, Y, random_state=42)
# Split sparse part into train and test
X_categorical_train = []
X_categorical_test = []
for X_category in X_categorical:
tr, te, y_train_catembeddings, y_test_catembeddings = train_test_split(X_category, Y, random_state=42)
X_categorical_train.append(tr)
X_categorical_test.append(te)
X_train_catembeddings = X_categorical_train
X_train_all = X_categorical_train
X_train_all.append(X_nlp_train_all)
X_test_all = X_categorical_test
X_test_catembeddings = X_categorical_test
X_test_all.append(X_nlp_test_all)
# Get cardinality of each categorical variable
num_unique_categories = [df[cat].nunique() for cat in cat_features]
# Setup model object
model = DeepMultimodalModel(task='regression', num_unique_categories=num_unique_categories, cat_embedding_dim=16,
txt_vocab_size=vocabulary_size, txt_embedding_dim=EMBEDDING_DIM, txt_max_len=MAX_LEN,
txt_weights=embedding_matrix,
cat_hidden_neurons=[100,50,10], cat_dropout=[0.1, 0.2, 0.2], cat_bi_interaction=True,
txt_lstm_neurons=32, txt_dropout=0.2, final_hidden_neurons=[64, 32], final_dropout=[0.3, 0.3])
model.compile("adam", "mse", metrics=['mse', 'mae'], )
# Fit model
hist = model.fit(X_train_all, y_train_all, epochs=100, batch_size=256, validation_split=0.2)
|
[
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"sklearn.preprocessing.LabelEncoder",
"keras.preprocessing.text.Tokenizer",
"gensim.models.KeyedVectors.load_word2vec_format",
"deepmm.models.DeepMultimodalModel",
"numpy.sqrt"
] |
[((687, 710), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (698, 710), True, 'import pandas as pd\n'), ((748, 803), 'gensim.models.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['"""embeddings_w2v.txt"""'], {}), "('embeddings_w2v.txt')\n", (781, 803), False, 'from gensim.models import Word2Vec, KeyedVectors\n'), ((972, 1006), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'NUM_MAX_WORDS'}), '(num_words=NUM_MAX_WORDS)\n', (981, 1006), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1092, 1141), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['sequences'], {'maxlen': 'MAX_LEN'}), '(sequences, maxlen=MAX_LEN)\n', (1114, 1141), False, 'from keras.preprocessing import sequence\n'), ((1297, 1339), 'numpy.zeros', 'np.zeros', (['(vocabulary_size, EMBEDDING_DIM)'], {}), '((vocabulary_size, EMBEDDING_DIM))\n', (1305, 1339), True, 'import numpy as np\n'), ((2439, 2493), 'sklearn.model_selection.train_test_split', 'train_test_split', (['sequences_matrix', 'Y'], {'random_state': '(42)'}), '(sequences_matrix, Y, random_state=42)\n', (2455, 2493), False, 'from sklearn.model_selection import train_test_split\n'), ((3164, 3595), 'deepmm.models.DeepMultimodalModel', 'DeepMultimodalModel', ([], {'task': '"""regression"""', 'num_unique_categories': 'num_unique_categories', 'cat_embedding_dim': '(16)', 'txt_vocab_size': 'vocabulary_size', 'txt_embedding_dim': 'EMBEDDING_DIM', 'txt_max_len': 'MAX_LEN', 'txt_weights': 'embedding_matrix', 'cat_hidden_neurons': '[100, 50, 10]', 'cat_dropout': '[0.1, 0.2, 0.2]', 'cat_bi_interaction': '(True)', 'txt_lstm_neurons': '(32)', 'txt_dropout': '(0.2)', 'final_hidden_neurons': '[64, 32]', 'final_dropout': '[0.3, 0.3]'}), "(task='regression', num_unique_categories=\n num_unique_categories, cat_embedding_dim=16, txt_vocab_size=\n vocabulary_size, txt_embedding_dim=EMBEDDING_DIM, txt_max_len=MAX_LEN,\n txt_weights=embedding_matrix, cat_hidden_neurons=[100, 50, 10],\n cat_dropout=[0.1, 0.2, 0.2], cat_bi_interaction=True, txt_lstm_neurons=\n 32, txt_dropout=0.2, final_hidden_neurons=[64, 32], final_dropout=[0.3,\n 0.3])\n", (3183, 3595), False, 'from deepmm.models import DeepMultimodalModel\n'), ((2066, 2080), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2078, 2080), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2676, 2724), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_category', 'Y'], {'random_state': '(42)'}), '(X_category, Y, random_state=42)\n', (2692, 2724), False, 'from sklearn.model_selection import train_test_split\n'), ((1704, 1717), 'numpy.sqrt', 'np.sqrt', (['(0.25)'], {}), '(0.25)\n', (1711, 1717), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from mode_shape import make_dir
from scipy.interpolate import spline
num = 300
fre = 2
scale = 1
x = np.arange(0,101)
mode1 = np.sin(x*2*np.pi/100)
mode2 = np.sin(x*np.pi/100)
xnew = np.linspace(x.min(),x.max(),300)
#4 0.01
result_path = 'data/1+2_scale_%0.1f_fre_%d'%(scale,fre)
make_dir(result_path)
count = 0
for i in range(0,1):
for w in range(0,801):
y = (mode1*np.sin(fre*np.pi*w/100)+mode2*np.sin(fre*w*np.pi/(50)))
y = y*scale
xsmoo = spline(x, y, xnew)
plt.figure()
plt.xlim(0, 105)
plt.ylim(-50, 50)
plt.axis('off')
plt.plot(xnew, xsmoo, linewidth=15)
plt.savefig(result_path+'/%d.png'%(count))
count +=1
plt.close()
#
# print(y)
#
#
#
#
#
#
# plt.show()
#
|
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"scipy.interpolate.spline",
"mode_shape.make_dir",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.savefig"
] |
[((159, 176), 'numpy.arange', 'np.arange', (['(0)', '(101)'], {}), '(0, 101)\n', (168, 176), True, 'import numpy as np\n'), ((184, 211), 'numpy.sin', 'np.sin', (['(x * 2 * np.pi / 100)'], {}), '(x * 2 * np.pi / 100)\n', (190, 211), True, 'import numpy as np\n'), ((214, 237), 'numpy.sin', 'np.sin', (['(x * np.pi / 100)'], {}), '(x * np.pi / 100)\n', (220, 237), True, 'import numpy as np\n'), ((340, 361), 'mode_shape.make_dir', 'make_dir', (['result_path'], {}), '(result_path)\n', (348, 361), False, 'from mode_shape import make_dir\n'), ((533, 551), 'scipy.interpolate.spline', 'spline', (['x', 'y', 'xnew'], {}), '(x, y, xnew)\n', (539, 551), False, 'from scipy.interpolate import spline\n'), ((560, 572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (570, 572), True, 'import matplotlib.pyplot as plt\n'), ((581, 597), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(105)'], {}), '(0, 105)\n', (589, 597), True, 'import matplotlib.pyplot as plt\n'), ((606, 623), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-50)', '(50)'], {}), '(-50, 50)\n', (614, 623), True, 'import matplotlib.pyplot as plt\n'), ((632, 647), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (640, 647), True, 'import matplotlib.pyplot as plt\n'), ((656, 691), 'matplotlib.pyplot.plot', 'plt.plot', (['xnew', 'xsmoo'], {'linewidth': '(15)'}), '(xnew, xsmoo, linewidth=15)\n', (664, 691), True, 'import matplotlib.pyplot as plt\n'), ((701, 745), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(result_path + '/%d.png' % count)"], {}), "(result_path + '/%d.png' % count)\n", (712, 745), True, 'import matplotlib.pyplot as plt\n'), ((770, 781), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (779, 781), True, 'import matplotlib.pyplot as plt\n'), ((441, 470), 'numpy.sin', 'np.sin', (['(fre * np.pi * w / 100)'], {}), '(fre * np.pi * w / 100)\n', (447, 470), True, 'import numpy as np\n'), ((471, 499), 'numpy.sin', 'np.sin', (['(fre * w * np.pi / 50)'], {}), '(fre * w * np.pi / 50)\n', (477, 499), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from math import pi
class proposal_q():
def __init__(self, config, scope_name='proposal'):
self.config = config
with tf.variable_scope(scope_name) as scope:
self.param_size = (self.config.dim + 1) * self.config.n_hidden + self.config.n_hidden + 1
self._mu = tf.get_variable('mean', shape=(self.param_size), dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=.02))
self._log_variance = tf.get_variable('log_variance',
initializer=tf.constant(-10.+np.zeros((self.param_size)).astype('float32')), dtype=tf.float32)
self._log_v_noise = tf.get_variable('log_v_noise',
initializer=tf.constant(np.log(1.0,).astype('float32')),
dtype=tf.float32)
self.params = self.get_parameters_q()
def draw_samples(self, n_samples):
# (d+1) x nh + nh + 1
ret = tf.random_normal([int(n_samples), self.param_size]) * tf.sqrt(self.params['v']) + self.params['m']
return ret
def get_parameters_q(self, v_prior=1., scale=1.):
#v = tf.exp(self._log_variance)
v = 1.0 / (scale * tf.exp(-self._log_variance ) + 1./v_prior)
m = self._mu
#m = scale * self._mu * tf.exp(- self._log_variance ) * v
return {'m': m, 'v': v}
def log_prob(self, samples, stop_grad=False):
qv = self.params['v']
qm = self.params['m']
if stop_grad:
qv = tf.stop_gradient(qv)
qm = tf.stop_gradient(qm)
lq = -0.5*tf.log(2*pi*qv) - 0.5*(samples - qm)**2 / qv
return tf.reduce_sum(lq, axis=1)
class Model():
def __init__(self, config,
scope_name = 'variational', is_train=True):
self.config = config
self.debug = {}
self.N = self.config.n_train
self.v_prior = 1.
# create placeholders for the input
self.X = tf.placeholder(
name='X', dtype=tf.float32,
shape=[None, self.config.dim],
)
self.y = tf.placeholder(
name='y', dtype=tf.float32,
shape=[None],
)
self.q_approx = proposal_q(self.config)
self.kl_loss = self.get_klqp_loss(self.config.sample_size, self.X, self.y)
tf.summary.scalar("kl_loss", self.kl_loss)
self.rmse, self.ll = self.get_error_and_ll(self.X, self.y, 0., 1.)
tf.summary.scalar("batch_rmse", self.rmse)
tf.summary.scalar("batch_ll", self.ll)
def get_feed_dict(self, batch_chunk):
fd = {
self.X: batch_chunk['X'],
self.y: batch_chunk['y'],
}
return fd
#k : number of samples
def predict(self, samples_q, X):
# X: n x d
n, d = X.get_shape()[0].value, self.config.dim
k = self.config.sample_size
nh = self.config.n_hidden
# first layer
w1 = samples_q[:, :d * nh] # w1: k x (nh x d)
w1 = tf.reshape(w1, (k*nh, d)) # w1 (K x nh) x d
b1 = samples_q[:, d*nh: (d+1)*nh] # K x nh
b1 = tf.reshape(b1, (1, k*nh)) # 1 x (K x nh)
a = tf.matmul(X, w1, transpose_b=True) + b1 # n x (k * nh)
h = tf.nn.relu(a) # RELU, n x (k x nh)
# second layer
samples_q = samples_q[:, (d+1)*nh:]
w2 = samples_q[:, :nh] # w2: k x nh
w2 = tf.reshape(w2, (1, k*nh)) # w2: 1 x (kxnh)
b2 = tf.reshape(samples_q[:, nh:], (1,-1)) # b2: [k]
out = tf.reshape( tf.reduce_sum(tf.reshape(h*w2, (-1, nh)), axis=1) , (-1, k)) + b2
return out
def get_error_and_ll(self, X, y, location, scale, v_prior=1.):
v_noise = tf.exp(self.q_approx._log_v_noise) * scale**2
samples_q = self.q_approx.draw_samples( self.config.sample_size)
py = self.predict(samples_q, X) * scale + location
log_factor = -0.5 * tf.log(2 * pi * v_noise) - 0.5 * (tf.expand_dims(y, 1) - py)**2 / v_noise
ll = tf.reduce_mean(tf.reduce_logsumexp(log_factor - tf.log(1.*self.config.sample_size), axis=1))
error = tf.sqrt(tf.reduce_mean((y - tf.reduce_mean(py, 1))**2))
return error, ll
def phi(self, n_samples, lpx, lqx, method, alpha=0):
diff = lpx - lqx
if method == 'adapted':
# \#(t_i < t)
diff -= tf.reduce_max(diff)
dx = tf.exp(diff)
prob = tf.sign(tf.expand_dims(dx, 1) - tf.expand_dims(dx, 0))
#prob = tf.cast(tf.equal(prob, -1), tf.float32)
prob = tf.cast(tf.greater(prob, 0.5), tf.float32)
wx = tf.reduce_sum(prob, axis=1) / n_samples
wx = (1.-wx)**alpha ## alpha= -1 or alpha = -0.5
elif method == 'alpha':
diff = alpha * diff
diff -= tf.reduce_max(diff)
wx = tf.exp(diff)
else:
raise NotImplementedError
wx /= tf.reduce_sum(wx) # normalization
return wx
def get_klqp_loss(self, n_samples, X, y):
v_noise = tf.exp(self.q_approx._log_v_noise)
samples_q = self.q_approx.draw_samples(n_samples)
log_factor_value = 1.0 * self.N * self.log_likelihood_factor(samples_q, v_noise, X, y)
logp0 = self.log_prior(samples_q)
lqx = self.q_approx.log_prob(samples_q, stop_grad=True)
lpx = logp0 + log_factor_value
wx = self.phi(n_samples, lpx, lqx, self.config.method, alpha=self.config.alpha)
wx = tf.stop_gradient(wx)
loss = tf.reduce_sum(wx * (lqx - lpx))
return loss
def log_likelihood_factor(self, samples_q, v_noise, X, y):
assert X.get_shape().ndims == 2, 'illegal inputs'
assert y.get_shape().ndims == 1, 'illegal inputs'
py = self.predict(samples_q, X) # n x k
lik = -0.5 * tf.log(2 * pi * v_noise) - 0.5 * (tf.expand_dims(y, 1) - py) ** 2 /v_noise
return tf.reduce_mean(lik, axis=0)
def log_prior(self, samples_q):
log_p0 = -0.5 * tf.log(2 * pi * self.v_prior) - 0.5 * samples_q **2 / self.v_prior
return tf.reduce_sum(log_p0, axis=1)
|
[
"tensorflow.reduce_sum",
"tensorflow.nn.relu",
"tensorflow.sqrt",
"tensorflow.summary.scalar",
"numpy.log",
"tensorflow.stop_gradient",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"numpy.zeros",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.exp",
"tensorflow.random_normal_initializer",
"tensorflow.log",
"tensorflow.reduce_max",
"tensorflow.greater",
"tensorflow.expand_dims"
] |
[((1769, 1794), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['lq'], {'axis': '(1)'}), '(lq, axis=1)\n', (1782, 1794), True, 'import tensorflow as tf\n'), ((2088, 2161), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""X"""', 'dtype': 'tf.float32', 'shape': '[None, self.config.dim]'}), "(name='X', dtype=tf.float32, shape=[None, self.config.dim])\n", (2102, 2161), True, 'import tensorflow as tf\n'), ((2215, 2271), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""y"""', 'dtype': 'tf.float32', 'shape': '[None]'}), "(name='y', dtype=tf.float32, shape=[None])\n", (2229, 2271), True, 'import tensorflow as tf\n'), ((2448, 2490), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""kl_loss"""', 'self.kl_loss'], {}), "('kl_loss', self.kl_loss)\n", (2465, 2490), True, 'import tensorflow as tf\n'), ((2575, 2617), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""batch_rmse"""', 'self.rmse'], {}), "('batch_rmse', self.rmse)\n", (2592, 2617), True, 'import tensorflow as tf\n'), ((2626, 2664), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""batch_ll"""', 'self.ll'], {}), "('batch_ll', self.ll)\n", (2643, 2664), True, 'import tensorflow as tf\n'), ((3133, 3160), 'tensorflow.reshape', 'tf.reshape', (['w1', '(k * nh, d)'], {}), '(w1, (k * nh, d))\n', (3143, 3160), True, 'import tensorflow as tf\n'), ((3243, 3270), 'tensorflow.reshape', 'tf.reshape', (['b1', '(1, k * nh)'], {}), '(b1, (1, k * nh))\n', (3253, 3270), True, 'import tensorflow as tf\n'), ((3365, 3378), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (3375, 3378), True, 'import tensorflow as tf\n'), ((3527, 3554), 'tensorflow.reshape', 'tf.reshape', (['w2', '(1, k * nh)'], {}), '(w2, (1, k * nh))\n', (3537, 3554), True, 'import tensorflow as tf\n'), ((3583, 3621), 'tensorflow.reshape', 'tf.reshape', (['samples_q[:, nh:]', '(1, -1)'], {}), '(samples_q[:, nh:], (1, -1))\n', (3593, 3621), True, 'import tensorflow as tf\n'), ((5042, 5059), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['wx'], {}), '(wx)\n', (5055, 5059), True, 'import tensorflow as tf\n'), ((5161, 5195), 'tensorflow.exp', 'tf.exp', (['self.q_approx._log_v_noise'], {}), '(self.q_approx._log_v_noise)\n', (5167, 5195), True, 'import tensorflow as tf\n'), ((5600, 5620), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['wx'], {}), '(wx)\n', (5616, 5620), True, 'import tensorflow as tf\n'), ((5637, 5668), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(wx * (lqx - lpx))'], {}), '(wx * (lqx - lpx))\n', (5650, 5668), True, 'import tensorflow as tf\n'), ((6031, 6058), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['lik'], {'axis': '(0)'}), '(lik, axis=0)\n', (6045, 6058), True, 'import tensorflow as tf\n'), ((6203, 6232), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['log_p0'], {'axis': '(1)'}), '(log_p0, axis=1)\n', (6216, 6232), True, 'import tensorflow as tf\n'), ((292, 321), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), '(scope_name)\n', (309, 321), True, 'import tensorflow as tf\n'), ((1631, 1651), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['qv'], {}), '(qv)\n', (1647, 1651), True, 'import tensorflow as tf\n'), ((1669, 1689), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['qm'], {}), '(qm)\n', (1685, 1689), True, 'import tensorflow as tf\n'), ((3298, 3332), 'tensorflow.matmul', 'tf.matmul', (['X', 'w1'], {'transpose_b': '(True)'}), '(X, w1, transpose_b=True)\n', (3307, 3332), True, 'import tensorflow as tf\n'), ((3829, 3863), 'tensorflow.exp', 'tf.exp', (['self.q_approx._log_v_noise'], {}), '(self.q_approx._log_v_noise)\n', (3835, 3863), True, 'import tensorflow as tf\n'), ((4475, 4494), 'tensorflow.reduce_max', 'tf.reduce_max', (['diff'], {}), '(diff)\n', (4488, 4494), True, 'import tensorflow as tf\n'), ((4512, 4524), 'tensorflow.exp', 'tf.exp', (['diff'], {}), '(diff)\n', (4518, 4524), True, 'import tensorflow as tf\n'), ((1133, 1158), 'tensorflow.sqrt', 'tf.sqrt', (["self.params['v']"], {}), "(self.params['v'])\n", (1140, 1158), True, 'import tensorflow as tf\n'), ((1709, 1728), 'tensorflow.log', 'tf.log', (['(2 * pi * qv)'], {}), '(2 * pi * qv)\n', (1715, 1728), True, 'import tensorflow as tf\n'), ((4035, 4059), 'tensorflow.log', 'tf.log', (['(2 * pi * v_noise)'], {}), '(2 * pi * v_noise)\n', (4041, 4059), True, 'import tensorflow as tf\n'), ((4688, 4709), 'tensorflow.greater', 'tf.greater', (['prob', '(0.5)'], {}), '(prob, 0.5)\n', (4698, 4709), True, 'import tensorflow as tf\n'), ((4740, 4767), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['prob'], {'axis': '(1)'}), '(prob, axis=1)\n', (4753, 4767), True, 'import tensorflow as tf\n'), ((4925, 4944), 'tensorflow.reduce_max', 'tf.reduce_max', (['diff'], {}), '(diff)\n', (4938, 4944), True, 'import tensorflow as tf\n'), ((4962, 4974), 'tensorflow.exp', 'tf.exp', (['diff'], {}), '(diff)\n', (4968, 4974), True, 'import tensorflow as tf\n'), ((5941, 5965), 'tensorflow.log', 'tf.log', (['(2 * pi * v_noise)'], {}), '(2 * pi * v_noise)\n', (5947, 5965), True, 'import tensorflow as tf\n'), ((6121, 6150), 'tensorflow.log', 'tf.log', (['(2 * pi * self.v_prior)'], {}), '(2 * pi * self.v_prior)\n', (6127, 6150), True, 'import tensorflow as tf\n'), ((554, 595), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (582, 595), True, 'import tensorflow as tf\n'), ((1319, 1346), 'tensorflow.exp', 'tf.exp', (['(-self._log_variance)'], {}), '(-self._log_variance)\n', (1325, 1346), True, 'import tensorflow as tf\n'), ((3671, 3699), 'tensorflow.reshape', 'tf.reshape', (['(h * w2)', '(-1, nh)'], {}), '(h * w2, (-1, nh))\n', (3681, 3699), True, 'import tensorflow as tf\n'), ((4170, 4207), 'tensorflow.log', 'tf.log', (['(1.0 * self.config.sample_size)'], {}), '(1.0 * self.config.sample_size)\n', (4176, 4207), True, 'import tensorflow as tf\n'), ((4552, 4573), 'tensorflow.expand_dims', 'tf.expand_dims', (['dx', '(1)'], {}), '(dx, 1)\n', (4566, 4573), True, 'import tensorflow as tf\n'), ((4576, 4597), 'tensorflow.expand_dims', 'tf.expand_dims', (['dx', '(0)'], {}), '(dx, 0)\n', (4590, 4597), True, 'import tensorflow as tf\n'), ((4259, 4280), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['py', '(1)'], {}), '(py, 1)\n', (4273, 4280), True, 'import tensorflow as tf\n'), ((4069, 4089), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(1)'], {}), '(y, 1)\n', (4083, 4089), True, 'import tensorflow as tf\n'), ((5975, 5995), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(1)'], {}), '(y, 1)\n', (5989, 5995), True, 'import tensorflow as tf\n'), ((879, 890), 'numpy.log', 'np.log', (['(1.0)'], {}), '(1.0)\n', (885, 890), True, 'import numpy as np\n'), ((708, 733), 'numpy.zeros', 'np.zeros', (['self.param_size'], {}), '(self.param_size)\n', (716, 733), True, 'import numpy as np\n')]
|
"""
RB-related functions of gates and models
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import warnings as _warnings
import numpy as _np
from pygsti.tools import matrixtools as _mtls
from pygsti.tools import optools as _optls
from pygsti.tools import rbtools as _rbtls
def predicted_rb_number(model, target_model, weights=None, d=None, rtype='EI'):
"""
Predicts the RB error rate from a model.
Uses the "L-matrix" theory from Proctor et al Phys. Rev. Lett. 119, 130502
(2017). Note that this gives the same predictions as the theory in Wallman
Quantum 2, 47 (2018).
This theory is valid for various types of RB, including standard
Clifford RB -- i.e., it will accurately predict the per-Clifford
error rate reported by standard Clifford RB. It is also valid for
"direct RB" under broad circumstances.
For this function to be valid the model should be trace preserving
and completely positive in some representation, but the particular
representation of the model used is irrelevant, as the predicted RB
error rate is a gauge-invariant quantity. The function is likely reliable
when complete positivity is slightly violated, although the theory on
which it is based assumes complete positivity.
Parameters
----------
model : Model
The model to calculate the RB number of. This model is the
model randomly sampled over, so this is not necessarily the
set of physical primitives. In Clifford RB this is a set of
Clifford gates; in "direct RB" this normally would be the
physical primitives.
target_model : Model
The target model, corresponding to `model`. This function is not invariant
under swapping `model` and `target_model`: this Model must be the target model,
and should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
d : int, optional
The Hilbert space dimension. If None, then sqrt(model.dim) is used.
rtype : str, optional
The type of RB error rate, either "EI" or "AGI", corresponding to
different dimension-dependent rescalings of the RB decay constant
p obtained from fitting to Pm = A + Bp^m. "EI" corresponds to
an RB error rate that is associated with entanglement infidelity, which
is the probability of error for a gate with stochastic errors. This is
the RB error rate defined in the "direct RB" protocol, and is given by:
r = (d^2 - 1)(1 - p)/d^2,
The AGI-type r is given by
r = (d - 1)(1 - p)/d,
which is the conventional r definition in Clifford RB. This r is
associated with (gate-averaged) average gate infidelity.
Returns
-------
r : float.
The predicted RB number.
"""
if d is None: d = int(round(_np.sqrt(model.dim)))
p = predicted_rb_decay_parameter(model, target_model, weights=weights)
r = _rbtls.p_to_r(p, d=d, rtype=rtype)
return r
def predicted_rb_decay_parameter(model, target_model, weights=None):
"""
Computes the second largest eigenvalue of the 'L matrix' (see the `L_matrix` function).
For standard Clifford RB and direct RB, this corresponds to the RB decay
parameter p in Pm = A + Bp^m for "reasonably low error" trace preserving and
completely positive gates. See also the `predicted_rb_number` function.
Parameters
----------
model : Model
The model to calculate the RB decay parameter of. This model is the
model randomly sampled over, so this is not necessarily the
set of physical primitives. In Clifford RB this is a set of
Clifford gates; in "direct RB" this normally would be the
physical primitives.
target_model : Model
The target model corresponding to model. This function is not invariant under
swapping `model` and `target_model`: this Model must be the target model, and
should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
Returns
-------
p : float.
The second largest eigenvalue of L. This is the RB decay parameter
for various types of RB.
"""
L = L_matrix(model, target_model, weights=weights)
E = _np.absolute(_np.linalg.eigvals(L))
E = _np.flipud(_np.sort(E))
if abs(E[0] - 1) > 10**(-12):
_warnings.warn("Output may be unreliable because the model is not approximately trace-preserving.")
if E[1].imag > 10**(-10):
_warnings.warn("Output may be unreliable because the RB decay constant has a significant imaginary component.")
p = abs(E[1])
return p
def rb_gauge(model, target_model, weights=None, mx_basis=None, eigenvector_weighting=1.0):
"""
Computes the gauge transformation required so that the RB number matches the average model infidelity.
This function computes the gauge transformation required so that, when the
model is transformed via this gauge-transformation, the RB number -- as
predicted by the function `predicted_rb_number` -- is the average model
infidelity between the transformed `model` model and the target model
`target_model`. This transformation is defined Proctor et al
Phys. Rev. Lett. 119, 130502 (2017), and see also Wallman Quantum 2, 47
(2018).
Parameters
----------
model : Model
The RB model. This is not necessarily the set of physical primitives -- it
is the model randomly sampled over in the RB protocol (e.g., the Cliffords).
target_model : Model
The target model corresponding to model. This function is not invariant under
swapping `model` and `target_model`: this Model must be the target model, and
should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
mx_basis : {"std","gm","pp"}, optional
The basis of the models. If None, the basis is obtained from the model.
eigenvector_weighting : float, optional
Must be non-zero. A weighting on the eigenvector with eigenvalue that
is the RB decay parameter, in the sum of this eigenvector and the
eigenvector with eigenvalue of 1 that defines the returned matrix `l_operator`.
The value of this factor does not change whether this `l_operator` transforms into
a gauge in which r = AGsI, but it may impact on other properties of the
gates in that gauge. It is irrelevant if the gates are unital.
Returns
-------
l_operator : array
The matrix defining the gauge-transformation.
"""
gam, vecs = _np.linalg.eig(L_matrix(model, target_model, weights=weights))
absgam = abs(gam)
index_max = _np.argmax(absgam)
gam_max = gam[index_max]
if abs(gam_max - 1) > 10**(-12):
_warnings.warn("Output may be unreliable because the model is not approximately trace-preserving.")
absgam[index_max] = 0.0
index_2ndmax = _np.argmax(absgam)
decay_constant = gam[index_2ndmax]
if decay_constant.imag > 10**(-12):
_warnings.warn("Output may be unreliable because the RB decay constant has a significant imaginary component.")
vec_l_operator = vecs[:, index_max] + eigenvector_weighting * vecs[:, index_2ndmax]
if mx_basis is None:
mx_basis = model.basis.name
assert(mx_basis == 'pp' or mx_basis == 'gm' or mx_basis == 'std'), "mx_basis must be 'gm', 'pp' or 'std'."
if mx_basis in ('pp', 'gm'):
assert(_np.amax(vec_l_operator.imag) < 10**(-15)), "If 'gm' or 'pp' basis, RB gauge matrix should be real."
vec_l_operator = vec_l_operator.real
vec_l_operator[abs(vec_l_operator) < 10**(-15)] = 0.
l_operator = _mtls.unvec(vec_l_operator)
return l_operator
def transform_to_rb_gauge(model, target_model, weights=None, mx_basis=None, eigenvector_weighting=1.0):
"""
Transforms a Model into the "RB gauge" (see the `RB_gauge` function).
This notion was introduced in Proctor et al Phys. Rev. Lett. 119, 130502
(2017). This gauge is a function of both the model and its target. These may
be input in any gauge, for the purposes of obtaining "r = average model
infidelity" between the output :class:`Model` and `target_model`.
Parameters
----------
model : Model
The RB model. This is not necessarily the set of physical primitives -- it
is the model randomly sampled over in the RB protocol (e.g., the Cliffords).
target_model : Model
The target model corresponding to model. This function is not invariant under
swapping `model` and `target_model`: this Model must be the target model, and
should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
mx_basis : {"std","gm","pp"}, optional
The basis of the models. If None, the basis is obtained from the model.
eigenvector_weighting : float, optional
Must be non-zero. A weighting on the eigenvector with eigenvalue that
is the RB decay parameter, in the sum of this eigenvector and the
eigenvector with eigenvalue of 1 that defines the returned matrix `l_operator`.
The value of this factor does not change whether this `l_operator` transforms into
a gauge in which r = AGsI, but it may impact on other properties of the
gates in that gauge. It is irrelevant if the gates are unital.
Returns
-------
model_in_RB_gauge : Model
The model `model` transformed into the "RB gauge".
"""
from ..models.gaugegroup import FullGaugeGroupElement as _FullGaugeGroupElement
l = rb_gauge(model, target_model, weights=weights, mx_basis=mx_basis,
eigenvector_weighting=eigenvector_weighting)
model_in_RB_gauge = model.copy()
S = _FullGaugeGroupElement(_np.linalg.inv(l))
model_in_RB_gauge.transform_inplace(S)
return model_in_RB_gauge
def L_matrix(model, target_model, weights=None): # noqa N802
"""
Constructs a generalization of the 'L-matrix' linear operator on superoperators.
From Proctor et al Phys. Rev. Lett. 119, 130502 (2017), the 'L-matrix' is
represented as a matrix via the "stack" operation. This eigenvalues of this
matrix describe the decay constant (or constants) in an RB decay curve for
an RB protocol whereby random elements of the provided model are sampled
according to the `weights` probability distribution over the model. So, this
facilitates predictions of Clifford RB and direct RB decay curves.
Parameters
----------
model : Model
The RB model. This is not necessarily the set of physical primitives -- it
is the model randomly sampled over in the RB protocol (e.g., the Cliffords).
target_model : Model
The target model corresponding to model. This function is not invariant under
swapping `model` and `target_model`: this Model must be the target model, and
should consistent of perfect gates.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `model` and the values are the unnormalized probabilities to apply
each gate at each stage of the RB protocol. If not None, the values
in weights must all be non-negative, and they must not all be zero.
Because, when divided by their sum, they must be a valid probability
distribution. If None, the weighting defaults to an equal weighting
on all gates, as this is used in many RB protocols (e.g., Clifford RB).
But, this weighting is flexible in the "direct RB" protocol.
Returns
-------
L : float
A weighted version of the L operator from Proctor et al Phys. Rev. Lett.
119, 130502 (2017), represented as a matrix using the 'stacking' convention.
"""
if weights is None:
weights = {}
for key in list(target_model.operations.keys()):
weights[key] = 1.
normalizer = _np.sum(_np.array([weights[key] for key in list(target_model.operations.keys())]))
L_matrix = (1 / normalizer) * _np.sum(
weights[key] * _np.kron(
model.operations[key].to_dense(on_space='HilbertSchmidt').T,
_np.linalg.inv(target_model.operations[key].to_dense(on_space='HilbertSchmidt'))
) for key in target_model.operations.keys())
return L_matrix
def R_matrix_predicted_rb_decay_parameter(model, group, group_to_model=None, weights=None): # noqa N802
"""
Returns the second largest eigenvalue of a generalization of the 'R-matrix' [see the `R_matrix` function].
Introduced in Proctor et al Phys. Rev. Lett. 119, 130502 (2017). This
number is a prediction of the RB decay parameter for trace-preserving gates
and a variety of forms of RB, including Clifford and direct RB. This
function creates a matrix which scales super-exponentially in the number of
qubits.
Parameters
----------
model : Model
The model to predict the RB decay paramter for. If `group_to_model` is
None, the labels of the gates in `model` should be the same as the labels of the
group elements in `group`. For Clifford RB this would be the clifford model,
for direct RB it would be the primitive gates.
group : MatrixGroup
The group that the `model` model contains gates from (`model` does not
need to be the full group, and could be a subset of `group`). For
Clifford RB and direct RB, this would be the Clifford group.
group_to_model : dict, optional
If not None, a dictionary that maps labels of group elements to labels
of `model`. If `model` and `group` elements have the same labels, this dictionary
is not required. Otherwise it is necessary.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates in `model`
and the values are the unnormalized probabilities to apply each gate at
each stage of the RB protocol. If not None, the values in weights must all
be positive or zero, and they must not all be zero (because, when divided by
their sum, they must be a valid probability distribution). If None, the
weighting defaults to an equal weighting on all gates, as used in most RB
protocols.
Returns
-------
p : float
The predicted RB decay parameter. Valid for standard Clifford RB or direct RB
with trace-preserving gates, and in a range of other circumstances.
"""
R = R_matrix(model, group, group_to_model=group_to_model, weights=weights)
E = _np.absolute(_np.linalg.eigvals(R))
E = _np.flipud(_np.sort(E))
p = E[1]
return p
def R_matrix(model, group, group_to_model=None, weights=None): # noqa N802
"""
Constructs a generalization of the 'R-matrix' of Proctor et al Phys. Rev. Lett. 119, 130502 (2017).
This matrix described the exact behaviour of the average success
probablities of RB sequences. This matrix is super-exponentially large in
the number of qubits, but can be constructed for 1-qubit models.
Parameters
----------
model : Model
The noisy model (e.g., the Cliffords) to calculate the R matrix of.
The correpsonding `target` model (not required in this function)
must be equal to or a subset of (a faithful rep of) the group `group`.
If `group_to_model `is None, the labels of the gates in model should be
the same as the labels of the corresponding group elements in `group`.
For Clifford RB `model` should be the clifford model; for direct RB
this should be the native model.
group : MatrixGroup
The group that the `model` model contains gates from. For Clifford RB
or direct RB, this would be the Clifford group.
group_to_model : dict, optional
If not None, a dictionary that maps labels of group elements to labels
of model. This is required if the labels of the gates in `model` are different
from the labels of the corresponding group elements in `group`.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates in model
and the values are the unnormalized probabilities to apply each gate at
for each layer of the RB protocol. If None, the weighting defaults to an
equal weighting on all gates, as used in most RB protocols (e.g., Clifford
RB).
Returns
-------
R : float
A weighted, a subset-sampling generalization of the 'R-matrix' from Proctor
et al Phys. Rev. Lett. 119, 130502 (2017).
"""
if group_to_model is None:
for key in list(model.operations.keys()):
assert(key in group.labels), "Gates labels are not in `group`!"
else:
for key in list(model.operations.keys()):
assert(key in group_to_model.values()), "Gates labels are not in `group_to_model`!"
d = int(round(_np.sqrt(model.dim)))
group_dim = len(group)
R_dim = group_dim * d**2
R = _np.zeros([R_dim, R_dim], float)
if weights is None:
weights = {}
for key in list(model.operations.keys()):
weights[key] = 1.
normalizer = _np.sum(_np.array([weights[key] for key in list(model.operations.keys())]))
for i in range(0, group_dim):
for j in range(0, group_dim):
label_itoj = group.labels[group.product([group.inverse_index(i), j])]
if group_to_model is not None:
if label_itoj in group_to_model:
gslabel = group_to_model[label_itoj]
R[j * d**2:(j + 1) * d**2, i * d**2:(i + 1) * d**2] = weights[gslabel] * model.operations[gslabel]
else:
if label_itoj in list(model.operations.keys()):
gslabel = label_itoj
R[j * d**2:(j + 1) * d**2, i * d**2:(i + 1) * d**2] = weights[gslabel] * model.operations[gslabel]
R = R / normalizer
return R
### COMMENTED OUT SO THAT THIS FILE DOESN'T NEED "from .. import construction as _cnst".
### THIS SHOULD BE ADDED BACK IN AT SOME POINT.
# def exact_rb_asps(model, group, m_max, m_min=0, m_step=1, success_outcomelabel=('0',),
# group_to_model=None, weights=None, compilation=None, group_twirled=False):
# """
# Calculates the exact RB average success probablilites (ASP).
# Uses some generalizations of the formula given Proctor et al
# Phys. Rev. Lett. 119, 130502 (2017). This formula does not scale well with
# group size and qubit number, and for the Clifford group it is likely only
# practical for a single qubit.
# Parameters
# ----------
# model : Model
# The noisy model (e.g., the Cliffords) to calculate the R matrix of.
# The correpsonding `target` model (not required in this function)
# must be equal to or a subset of (a faithful rep of) the group `group`.
# If group_to_model is None, the labels of the gates in model should be
# the same as the labels of the corresponding group elements in `group`.
# For Clifford RB `model` should be the clifford model; for direct RB
# this should be the native model.
# group : MatrixGroup
# The group that the `model` model contains gates from. For Clifford RB
# or direct RB, this would be the Clifford group.
# m_max : int
# The maximal sequence length of the random gates, not including the
# inversion gate.
# m_min : int, optional
# The minimal sequence length. Defaults to the smallest valid value of 0.
# m_step : int, optional
# The step size between sequence lengths. Defaults to the smallest valid
# value of 1.
# success_outcomelabel : str or tuple, optional
# The outcome label associated with success.
# group_to_model : dict, optional
# If not None, a dictionary that maps labels of group elements to labels
# of model. This is required if the labels of the gates in `model` are different
# from the labels of the corresponding group elements in `group`.
# weights : dict, optional
# If not None, a dictionary of floats, whereby the keys are the gates in model
# and the values are the unnormalized probabilities to apply each gate at
# for each layer of the RB protocol. If None, the weighting defaults to an
# equal weighting on all gates, as used in most RB protocols (e.g., Clifford
# RB).
# compilation : dict, optional
# If `model` is not the full group `group` (with the same labels), then a
# compilation for the group elements, used to implement the inversion gate
# (and the initial randomgroup element, if `group_twirled` is True). This
# is a dictionary with the group labels as keys and a gate sequence of the
# elements of `model` as values.
# group_twirled : bool, optional
# If True, the random sequence starts with a single uniformly random group
# element before the m random elements of `model`.
# Returns
# -------
# m : float
# Array of sequence length values that the ASPs have been calculated for.
# P_m : float
# Array containing ASP values for the specified sequence length values.
# """
# if compilation is None:
# for key in list(model.operations.keys()):
# assert(key in group.labels), "Gates labels are not in `group`, so `compilation must be specified."
# for label in group.labels:
# assert(label in list(model.operations.keys())
# ), "Some group elements not in `model`, so `compilation must be specified."
# i_max = _np.floor((m_max - m_min) / m_step).astype('int')
# m = _np.zeros(1 + i_max, int)
# P_m = _np.zeros(1 + i_max, float)
# group_dim = len(group)
# R = R_matrix(model, group, group_to_model=group_to_model, weights=weights)
# success_prepLabel = list(model.preps.keys())[0] # just take first prep
# success_effectLabel = success_outcomelabel[-1] if isinstance(success_outcomelabel, tuple) \
# else success_outcomelabel
# extended_E = _np.kron(_mtls.column_basis_vector(0, group_dim).T, model.povms['Mdefault'][success_effectLabel].T)
# extended_rho = _np.kron(_mtls.column_basis_vector(0, group_dim), model.preps[success_prepLabel])
# if compilation is None:
# extended_E = group_dim * _np.dot(extended_E, R)
# if group_twirled is True:
# extended_rho = _np.dot(R, extended_rho)
# else:
# full_model = _cnst.create_explicit_alias_model(model, compilation)
# R_fullgroup = R_matrix(full_model, group)
# extended_E = group_dim * _np.dot(extended_E, R_fullgroup)
# if group_twirled is True:
# extended_rho = _np.dot(R_fullgroup, extended_rho)
# Rstep = _np.linalg.matrix_power(R, m_step)
# Riterate = _np.linalg.matrix_power(R, m_min)
# for i in range(0, 1 + i_max):
# m[i] = m_min + i * m_step
# P_m[i] = _np.dot(extended_E, _np.dot(Riterate, extended_rho))
# Riterate = _np.dot(Rstep, Riterate)
# return m, P_m
### COMMENTED OUT SO THAT THIS FILE DOESN'T NEED "from .. import construction as _cnst"
### THIS SHOULD BE ADDED BACK IN AT SOME POINT.
# def L_matrix_asps(model, target_model, m_max, m_min=0, m_step=1, success_outcomelabel=('0',), # noqa N802
# compilation=None, group_twirled=False, weights=None, gauge_optimize=True,
# return_error_bounds=False, norm='diamond'):
# """
# Computes RB average survival probablities, as predicted by the 'L-matrix' theory.
# This theory was introduced in Proctor et al Phys. Rev. Lett. 119, 130502
# (2017). Within the function, the model is gauge-optimized to target_model. This is
# *not* optimized to the gauge specified by Proctor et al, but instead performs the
# standard pyGSTi gauge-optimization (using the frobenius distance). In most cases,
# this is likely to be a reasonable proxy for the gauge optimization perscribed by
# Proctor et al.
# Parameters
# ----------
# model : Model
# The noisy model.
# target_model : Model
# The target model.
# m_max : int
# The maximal sequence length of the random gates, not including the inversion gate.
# m_min : int, optional
# The minimal sequence length. Defaults to the smallest valid value of 0.
# m_step : int, optional
# The step size between sequence lengths.
# success_outcomelabel : str or tuple, optional
# The outcome label associated with success.
# compilation : dict, optional
# If `model` is not the full group, then a compilation for the group elements,
# used to implement the inversion gate (and the initial random group element,
# if `group_twirled` is True). This is a dictionary with the group labels as
# keys and a gate sequence of the elements of `model` as values.
# group_twirled : bool, optional
# If True, the random sequence starts with a single uniformly random group
# element before the m random elements of `model`.
# weights : dict, optional
# If not None, a dictionary of floats, whereby the keys are the gates in model
# and the values are the unnormalized probabilities to apply each gate at
# for each layer of the RB protocol. If None, the weighting defaults to an
# equal weighting on all gates, as used in most RB protocols (e.g., Clifford
# RB).
# gauge_optimize : bool, optional
# If True a gauge-optimization to the target model is implemented before
# calculating all quantities. If False, no gauge optimization is performed.
# Whether or not a gauge optimization is performed does not affect the rate of
# decay but it will generally affect the exact form of the decay. E.g., if a
# perfect model is given to the function -- but in the "wrong" gauge -- no
# decay will be observed in the output P_m, but the P_m can be far from 1 (even
# for perfect SPAM) for all m. The gauge optimization is optional, as it is
# not guaranteed to always improve the accuracy of the reported P_m, although when
# gauge optimization is performed this limits the possible deviations of the
# reported P_m from the true P_m.
# return_error_bounds : bool, optional
# Sets whether or not to return error bounds for how far the true ASPs can deviate
# from the values returned by this function.
# norm : str, optional
# The norm used in the error bound calculation. Either 'diamond' for the diamond
# norm (the default) or '1to1' for the Hermitian 1 to 1 norm.
# Returns
# -------
# m : float
# Array of sequence length values that the ASPs have been calculated for.
# P_m : float
# Array containing predicted ASP values for the specified sequence length values.
# if error_bounds is True :
# lower_bound: float
# Array containing lower bounds on the possible ASP values
# upper_bound: float
# Array containing upper bounds on the possible ASP values
# """
# d = int(round(_np.sqrt(model.dim)))
# if gauge_optimize:
# model_go = _algs.gaugeopt_to_target(model, target_model)
# else:
# model_go = model.copy()
# L = L_matrix(model_go, target_model, weights=weights)
# success_prepLabel = list(model.preps.keys())[0] # just take first prep
# success_effectLabel = success_outcomelabel[-1] if isinstance(success_outcomelabel, tuple) \
# else success_outcomelabel
# identity_vec = _mtls.vec(_np.identity(d**2, float))
# if compilation is not None:
# model_group = _cnst.create_explicit_alias_model(model_go, compilation)
# model_target_group = _cnst.create_explicit_alias_model(target_model, compilation)
# delta = gate_dependence_of_errormaps(model_group, model_target_group, norm=norm)
# emaps = errormaps(model_group, model_target_group)
# E_eff = _np.dot(model_go.povms['Mdefault'][success_effectLabel].T, emaps.operations['Gavg'])
# if group_twirled is True:
# L_group = L_matrix(model_group, model_target_group)
# if compilation is None:
# delta = gate_dependence_of_errormaps(model_go, target_model, norm=norm)
# emaps = errormaps(model_go, target_model)
# E_eff = _np.dot(model_go.povms['Mdefault'][success_effectLabel].T, emaps.operations['Gavg'])
# i_max = _np.floor((m_max - m_min) / m_step).astype('int')
# m = _np.zeros(1 + i_max, int)
# P_m = _np.zeros(1 + i_max, float)
# upper_bound = _np.zeros(1 + i_max, float)
# lower_bound = _np.zeros(1 + i_max, float)
# Lstep = _np.linalg.matrix_power(L, m_step)
# Literate = _np.linalg.matrix_power(L, m_min)
# for i in range(0, 1 + i_max):
# m[i] = m_min + i * m_step
# if group_twirled:
# L_m_rdd = _mtls.unvec(_np.dot(L_group, _np.dot(Literate, identity_vec)))
# else:
# L_m_rdd = _mtls.unvec(_np.dot(Literate, identity_vec))
# P_m[i] = _np.dot(E_eff, _np.dot(L_m_rdd, model_go.preps[success_prepLabel]))
# Literate = _np.dot(Lstep, Literate)
# upper_bound[i] = P_m[i] + delta / 2
# lower_bound[i] = P_m[i] - delta / 2
# if upper_bound[i] > 1:
# upper_bound[i] = 1.
# if lower_bound[i] < 0:
# lower_bound[i] = 0.
# if return_error_bounds:
# return m, P_m, lower_bound, upper_bound
# else:
# return m, P_m
def errormaps(model, target_model):
"""
Computes the 'left-multiplied' error maps associated with a noisy gate set, along with the average error map.
This is the model [E_1,...] such that
`G_i = E_iT_i`,
where `T_i` is the gate which `G_i` is a noisy
implementation of. There is an additional gate in the set, that has
the key 'Gavg'. This is the average of the error maps.
Parameters
----------
model : Model
The imperfect model.
target_model : Model
The target model.
Returns
-------
errormaps : Model
The left multplied error gates, along with the average error map,
with the key 'Gavg'.
"""
errormaps_gate_list = []
errormaps = model.copy()
for gate in list(target_model.operations.keys()):
errormaps.operations[gate] = _np.dot(model.operations[gate],
_np.transpose(target_model.operations[gate]))
errormaps_gate_list.append(errormaps.operations[gate])
errormaps.operations['Gavg'] = _np.mean(_np.array([i for i in errormaps_gate_list]),
axis=0, dtype=_np.float64)
return errormaps
def gate_dependence_of_errormaps(model, target_model, norm='diamond', mx_basis=None):
"""
Computes the "gate-dependence of errors maps" parameter defined by
delta_avg = avg_i|| E_i - avg_i(E_i) ||,
where E_i are the error maps, and the norm is either the diamond norm
or the 1-to-1 norm. This quantity is defined in Magesan et al PRA 85
042311 2012.
Parameters
----------
model : Model
The actual model
target_model : Model
The target model.
norm : str, optional
The norm used in the calculation. Can be either 'diamond' for
the diamond norm, or '1to1' for the Hermitian 1 to 1 norm.
mx_basis : {"std","gm","pp"}, optional
The basis of the models. If None, the basis is obtained from
the model.
Returns
-------
delta_avg : float
The value of the parameter defined above.
"""
error_gs = errormaps(model, target_model)
delta = []
if mx_basis is None:
mx_basis = model.basis.name
assert(mx_basis == 'pp' or mx_basis == 'gm' or mx_basis == 'std'), "mx_basis must be 'gm', 'pp' or 'std'."
for gate in list(target_model.operations.keys()):
if norm == 'diamond':
print(error_gs.operations[gate])
print(error_gs.operations['Gavg'])
delta.append(_optls.diamonddist(error_gs.operations[gate], error_gs.operations['Gavg'],
mx_basis=mx_basis))
elif norm == '1to1':
gate_dif = error_gs.operations[gate] - error_gs.operations['Gavg']
delta.append(_optls.norm1to1(gate_dif, num_samples=1000, mx_basis=mx_basis, return_list=False))
else:
raise ValueError("Only diamond or 1to1 norm available.")
delta_avg = _np.mean(delta)
return delta_avg
# Future : perhaps put these back in.
#def Magesan_theory_predicted_decay(model, target_model, mlist, success_outcomelabel=('0',),
# norm='1to1', order='zeroth', return_all = False):
#
# assert(order == 'zeroth' or order == 'first')
#
# d = int(round(_np.sqrt(model.dim)))
# MTPs = {}
# MTPs['r'] = gateset_infidelity(model,target_model,itype='AGI')
# MTPs['p'] = _analysis.r_to_p(MTPs['r'],d,rtype='AGI')
# MTPs['delta'] = gate_dependence_of_errormaps(model, target_model, norm)
# error_gs = errormaps(model, target_model)
#
# R_list = []
# Q_list = []
# for gate in list(target_model.operations.keys()):
# R_list.append(_np.dot(_np.dot(error_gs.operations[gate],target_model.operations[gate]),
# _np.dot(error_gs.operations['Gavg'],_np.transpose(target_model.operations[gate]))))
# Q_list.append(_np.dot(target_model.operations[gate],
# _np.dot(error_gs.operations[gate],_np.transpose(target_model.operations[gate]))))
#
# error_gs.operations['GR'] = _np.mean(_np.array([ i for i in R_list]),axis=0)
# error_gs.operations['GQ'] = _np.mean(_np.array([ i for i in Q_list]),axis=0)
# error_gs.operations['GQ2'] = _np.dot(error_gs.operations['GQ'],error_gs.operations['Gavg'])
# error_gs.preps['rhoc_mixed'] = 1./d*_cnst.create_identity_vec(error_gs.basis)#
#
# #Assumes standard POVM labels
# povm = _objs.UnconstrainedPOVM( [('0_cm', target_model.povms['Mdefault']['0']),
# ('1_cm', target_model.povms['Mdefault']['1'])] )
# ave_error_gsl = _cnst.to_circuits([('rho0','Gavg'),('rho0','GR'),('rho0','Gavg','GQ')])
# data = _cnst.simulate_data(error_gs, ave_error_gsl, num_samples=1, sample_error="none")#
# pr_L_p = data[('rho0','Gavg')][success_outcomelabel]
# pr_L_I = data[('rho0','Gavg')][success_outcomelabel_cm]
# pr_R_p = data[('rho0','GR')][success_outcomelabel]
# pr_R_I = data[('rho0','GR')][success_outcomelabel_cm]
# pr_Q_p = data[('rho0','Gavg','GQ')][success_outcomelabel]
# p = MTPs['p']
# B_1 = pr_R_I
# A_1 = (pr_Q_p/p) - pr_L_p + ((p -1)*pr_L_I/p) + ((pr_R_p - pr_R_I)/p)
# C_1 = pr_L_p - pr_L_I
# q = _tls.average_gate_infidelity(error_gs.operations['GQ2'],_np.identity(d**2,float))
# q = _analysis.r_to_p(q,d,rtype='AGI')
#
# if order == 'zeroth':
# MTPs['A'] = pr_L_I
# MTPs['B'] = pr_L_p - pr_L_I
# if order == 'first':
# MTPs['A'] = B_1
# MTPs['B'] = A_1 - C_1*(q - 1)/p**2
# MTPs['C'] = C_1*(q- p**2)/p**2
#
# if order == 'zeroth':
# Pm = MTPs['A'] + MTPs['B']*MTPs['p']**_np.array(mlist)
# if order == 'first':
# Pm = MTPs['A'] + (MTPs['B'] + _np.array(mlist)*MTPs['C'])*MTPs['p']**_np.array(mlist)
#
# sys_eb = (MTPs['delta'] + 1)**(_np.array(mlist)+1) - 1
# if order == 'first':
# sys_eb = sys_eb - (_np.array(mlist)+1)*MTPs['delta']
#
# upper = Pm + sys_eb
# upper[upper > 1]=1.
#
# lower = Pm - sys_eb
# lower[lower < 0]=0.
#
# return mlist, Pm, upper, lower, MTPs
|
[
"numpy.linalg.eigvals",
"pygsti.tools.optools.diamonddist",
"numpy.argmax",
"pygsti.tools.rbtools.p_to_r",
"pygsti.tools.matrixtools.unvec",
"numpy.zeros",
"numpy.transpose",
"numpy.amax",
"numpy.sort",
"numpy.mean",
"numpy.linalg.inv",
"numpy.array",
"warnings.warn",
"pygsti.tools.optools.norm1to1",
"numpy.sqrt"
] |
[((4189, 4223), 'pygsti.tools.rbtools.p_to_r', '_rbtls.p_to_r', (['p'], {'d': 'd', 'rtype': 'rtype'}), '(p, d=d, rtype=rtype)\n', (4202, 4223), True, 'from pygsti.tools import rbtools as _rbtls\n'), ((9132, 9150), 'numpy.argmax', '_np.argmax', (['absgam'], {}), '(absgam)\n', (9142, 9150), True, 'import numpy as _np\n'), ((9374, 9392), 'numpy.argmax', '_np.argmax', (['absgam'], {}), '(absgam)\n', (9384, 9392), True, 'import numpy as _np\n'), ((10124, 10151), 'pygsti.tools.matrixtools.unvec', '_mtls.unvec', (['vec_l_operator'], {}), '(vec_l_operator)\n', (10135, 10151), True, 'from pygsti.tools import matrixtools as _mtls\n'), ((20112, 20144), 'numpy.zeros', '_np.zeros', (['[R_dim, R_dim]', 'float'], {}), '([R_dim, R_dim], float)\n', (20121, 20144), True, 'import numpy as _np\n'), ((35880, 35895), 'numpy.mean', '_np.mean', (['delta'], {}), '(delta)\n', (35888, 35895), True, 'import numpy as _np\n'), ((6111, 6132), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['L'], {}), '(L)\n', (6129, 6132), True, 'import numpy as _np\n'), ((6153, 6164), 'numpy.sort', '_np.sort', (['E'], {}), '(E)\n', (6161, 6164), True, 'import numpy as _np\n'), ((6208, 6317), 'warnings.warn', '_warnings.warn', (['"""Output may be unreliable because the model is not approximately trace-preserving."""'], {}), "(\n 'Output may be unreliable because the model is not approximately trace-preserving.'\n )\n", (6222, 6317), True, 'import warnings as _warnings\n'), ((6347, 6468), 'warnings.warn', '_warnings.warn', (['"""Output may be unreliable because the RB decay constant has a significant imaginary component."""'], {}), "(\n 'Output may be unreliable because the RB decay constant has a significant imaginary component.'\n )\n", (6361, 6468), True, 'import warnings as _warnings\n'), ((9226, 9335), 'warnings.warn', '_warnings.warn', (['"""Output may be unreliable because the model is not approximately trace-preserving."""'], {}), "(\n 'Output may be unreliable because the model is not approximately trace-preserving.'\n )\n", (9240, 9335), True, 'import warnings as _warnings\n'), ((9480, 9601), 'warnings.warn', '_warnings.warn', (['"""Output may be unreliable because the RB decay constant has a significant imaginary component."""'], {}), "(\n 'Output may be unreliable because the RB decay constant has a significant imaginary component.'\n )\n", (9494, 9601), True, 'import warnings as _warnings\n'), ((12826, 12843), 'numpy.linalg.inv', '_np.linalg.inv', (['l'], {}), '(l)\n', (12840, 12843), True, 'import numpy as _np\n'), ((17665, 17686), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['R'], {}), '(R)\n', (17683, 17686), True, 'import numpy as _np\n'), ((17707, 17718), 'numpy.sort', '_np.sort', (['E'], {}), '(E)\n', (17715, 17718), True, 'import numpy as _np\n'), ((33948, 33991), 'numpy.array', '_np.array', (['[i for i in errormaps_gate_list]'], {}), '([i for i in errormaps_gate_list])\n', (33957, 33991), True, 'import numpy as _np\n'), ((9903, 9932), 'numpy.amax', '_np.amax', (['vec_l_operator.imag'], {}), '(vec_l_operator.imag)\n', (9911, 9932), True, 'import numpy as _np\n'), ((20026, 20045), 'numpy.sqrt', '_np.sqrt', (['model.dim'], {}), '(model.dim)\n', (20034, 20045), True, 'import numpy as _np\n'), ((33794, 33838), 'numpy.transpose', '_np.transpose', (['target_model.operations[gate]'], {}), '(target_model.operations[gate])\n', (33807, 33838), True, 'import numpy as _np\n'), ((4084, 4103), 'numpy.sqrt', '_np.sqrt', (['model.dim'], {}), '(model.dim)\n', (4092, 4103), True, 'import numpy as _np\n'), ((35425, 35522), 'pygsti.tools.optools.diamonddist', '_optls.diamonddist', (['error_gs.operations[gate]', "error_gs.operations['Gavg']"], {'mx_basis': 'mx_basis'}), "(error_gs.operations[gate], error_gs.operations['Gavg'],\n mx_basis=mx_basis)\n", (35443, 35522), True, 'from pygsti.tools import optools as _optls\n'), ((35697, 35783), 'pygsti.tools.optools.norm1to1', '_optls.norm1to1', (['gate_dif'], {'num_samples': '(1000)', 'mx_basis': 'mx_basis', 'return_list': '(False)'}), '(gate_dif, num_samples=1000, mx_basis=mx_basis, return_list=\n False)\n', (35712, 35783), True, 'from pygsti.tools import optools as _optls\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 12 08:54:32 2021
OK so far:
swoosh h2o: 1994-2019 30S to 30N mean, 82 hpa
regressors:
QBO_CDAS = +5 months lag correlated with h2o: 0.508
Anom_nino3p4 = no lags corr with h2o: -0.167
LR:
no CV does R2 of 0.2857
Cross validate 5 kfolds: mean R2: 0.1786 std R2: 0.245
SVM:
CV 5 kfolds: mean R2: 0.418, mean adj_R2: 0.408,
std R2: 0.047, std adj_R2: 0.0485
need to plot residuals with best model.
@author: shlomi
"""
from strat_paths import work_chaim
ml_path = work_chaim / 'ML'
def split_qbo_en_ln_neut_enso(qbo):
from make_regressors import load_all_regressors
ln = load_all_regressors()['LN'].dropna('time')
en = load_all_regressors()['EN'].dropna('time')
neut = load_all_regressors()['neutENSO'].dropna('time')
qbo_en = qbo.where(en>=0.5).fillna(0)
qbo_en.name = 'qbo_en'
qbo_ln = qbo.where(ln<=-0.5).fillna(0)
qbo_ln.name = 'qbo_ln'
qbo_neut = qbo.where(neut!=0).fillna(0)
qbo_neut.name = 'qbo_neut'
return qbo_en, qbo_ln, qbo_neut
# def CV_splitter_for_xarray_time_series(X_da, time_dim='time', grp='year'):
# groups = X_da.groupby('{}.{}'.format(time_dim, grp)).groups
# sorted_groups = [value for (key, value) in sorted(groups.items())]
# cv = [(sorted_groups[i] + sorted_groups[i+1], sorted_groups[i+2])
# for i in range(len(sorted_groups)-2)]
# return cv\
def ABS_SHAP(df_shap, df):
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
#import matplotlib as plt
# Make a copy of the input data
shap_v = pd.DataFrame(df_shap)
feature_list = df.columns
shap_v.columns = feature_list
df_v = df.copy().reset_index().drop('time', axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i], df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat(
[pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Predictor', 'Corr']
corr_df['Sign'] = np.where(corr_df['Corr'] > 0, 'red', 'blue')
# Plot it
shap_abs = np.abs(shap_v)
k = pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Predictor', 'SHAP_abs']
k2 = k.merge(corr_df, left_on='Predictor', right_on='Predictor', how='inner')
k2 = k2.sort_values(by='SHAP_abs', ascending=True)
colorlist = k2['Sign']
ax = k2.plot.barh(x='Predictor', y='SHAP_abs',
color=colorlist, figsize=(9, 3), legend=False)
ax.set_xlabel("SHAP Value (Red = Positive Impact)")
return
def plot_simplified_shap_tree_explainer(rf_model):
import shap
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
X = produce_X(lag={'qbo_cdas': 5}, syear='1994',
eyear='2019', add_co2=False)
y = produce_y(detrend='lowess',
lat_band_mean=[-15, 15], syear='1994', eyear='2019', standertize=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
rf_model.fit(X_train, y_train)
dfX = X_test.to_dataset('regressor').to_dataframe()
dfX = dfX.rename(
{'qbo_cdas': 'QBO', 'anom_nino3p4': 'ENSO'}, axis=1)
ex_rf = shap.Explainer(rf_model)
shap_values_rf = ex_rf.shap_values(dfX)
ABS_SHAP(shap_values_rf, dfX)
ax = plt.gca()
ax.set_xlabel(r'H$_{2}$O anomalies (STD) (Red is positive)')
return
def plot_Tree_explainer_shap(rf_model):
import shap
from sklearn.model_selection import train_test_split
X = produce_X(lag={'qbo_cdas': 5})
y = produce_y(detrend=None, lat_band_mean=[-15, 15])
X = X.sel(time=slice('1994', '2019'))
y = y.sel(time=slice('1994', '2019'))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
rf_model.fit(X_train, y_train)
dfX = X_test.to_dataset('regressor').to_dataframe()
dfX = dfX.rename(
{'qbo_cdas': 'QBO', 'anom_nino3p4': 'ENSO', 'co2': r'CO$_2$'}, axis=1)
fi = dict(zip(dfX.columns, rf_model.feature_importances_ * 100))
print(fi)
ex_rf = shap.Explainer(rf_model)
shap_values_rf = ex_rf.shap_values(dfX)
shap.summary_plot(shap_values_rf, dfX, plot_size=1.1)
return
def plot_model_prediction_fig_3():
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from sklearn.linear_model import LinearRegression
import seaborn as sns
import matplotlib.pyplot as plt
X = produce_X()
X = add_enso2_and_enso_qbo_to_X(X)
y = produce_y(detrend=None, lat_band_mean=[-15, 15])
X_test = X.sel(time=slice('1994', '2019'))
y_test = y.sel(time=slice('1994', '2019'))
X_train = X.sel(time=slice('2005', '2019'))
y_train = y.sel(time=slice('2005', '2019'))
lr = LinearRegression()
rds = make_results_for_MLR(lr, X_train, y_train, X_test=X_test, y_test=y_test)
df = rds['predict'].to_dataframe()
df['y_true'] = y_test.to_dataframe()
df['resid'] = df['predict'] - df['y_true']
df = df.rename({'resid': 'Residuals', 'predict': 'MLR', 'y_true': 'SWOOSH'}, axis=1)
sns.set_theme(style='ticks', font_scale=1.5)
fig, ax = plt.subplots(2, 1, figsize=(18, 7))
df[['SWOOSH', 'MLR']].plot(ax=ax[0], color=['tab:purple', 'tab:red'])
df[['Residuals']].plot(ax=ax[1], color='k', legend=False)
[x.grid(True) for x in ax]
[x.set_xlabel('') for x in ax]
ax[0].set_ylabel(r'H$_{2}$O anomalies [std]')
ax[1].set_ylabel(r'H$_{2}$O residuals [std]')
[x.xaxis.set_minor_locator(AutoMinorLocator()) for x in ax]
[x.xaxis.grid(True, which='minor') for x in ax]
# legend = ax.legend(prop={'size': 13}, ncol=5, loc='upper left')
plot_forecast_busts_lines_datetime(ax[0], color='k')
fig.tight_layout()
# # get handles and labels of legend:
# hands, labes = ax.get_legend_handles_labels()
# colors = [x.get_color() for x in hands]
# # change the text labels to the colors of the lines:
# for i, text in enumerate(legend.get_texts()):
# text.set_color(colors[i])
return fig
def plot_beta_coeffs(rds, col_wrap=3, figsize=(13, 6), extent=[-170, 170, -57.5, 57.5], drop_co2=True):
import cartopy.crs as ccrs
import seaborn as sns
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from palettable.scientific import diverging as divsci
from strato_figures import remove_regressors_and_set_title
predict_cmap = divsci.Vik_20.mpl_colormap
sns.set_theme(style='ticks', font_scale=1.5)
proj = ccrs.PlateCarree(central_longitude=0)
plt_kwargs = dict(add_colorbar=False,
col_wrap=col_wrap,
cmap=predict_cmap, center=0.0, extend='max', vmax=0.6,
levels=41, subplot_kws=dict(projection=proj),
transform=ccrs.PlateCarree(), figsize=figsize)
label = r'$\beta$ coefficients'
gl_list = []
if drop_co2:
rds = rds.drop_sel(regressor='co2')
plt_kwargs.update(extend=None, vmax=None, col_wrap=2)
fg = rds['params'].plot.contourf(col='regressor', **plt_kwargs)
cbar_kws = {'label': '', 'format': '%0.2f'}
cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .035]) # last num controls width
fg.add_colorbar(cax=cbar_ax, orientation="horizontal", **cbar_kws)
for ax in fg.axes.flatten():
ax.coastlines()
ax.set_extent(extent, crs=ccrs.PlateCarree())
gl = ax.gridlines(
crs=ccrs.PlateCarree(),
linewidth=1,
color='black',
alpha=0.5,
linestyle='--',
draw_labels=True)
gl.xlabels_top = False
gl.xlabel_style = {'size': 9}
gl.ylabel_style = {'size': 9}
gl.xlines = True
gl.xlocator = mticker.FixedLocator([-180, -120, -60, 0, 60, 120, 180])
gl.ylocator = mticker.FixedLocator([-45, -30, -15, 0, 15, 30, 45])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl_list.append(gl)
ax = remove_regressors_and_set_title(ax)
gl_list[0].ylabels_right = False
gl_list[1].ylabels_right = False
gl_list[1].ylabels_left = True
gl_list[2].ylabels_right = False
gl_list[3].ylabels_left = True
gl_list[3].ylabels_right = True
try:
gl_list[3].ylabels_right = False
except IndexError:
pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.93,
bottom=0.2,
left=0.05,
right=0.979,
hspace=0.275,
wspace=0.044)
# fg = rds['params'].plot.contourf(col='regressor', **plt_kwargs)
# cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .025])
# fg.add_colorbar(cax=cbar_ax, orientation="horizontal", label='',
# format='%0.3f')
# # fg.fig.suptitle(label, fontsize=12, fontweight=750)
# [ax.coastlines() for ax in fg.axes.flatten()]
# [ax.gridlines(
# crs=ccrs.PlateCarree(),
# linewidth=1,
# color='black',
# alpha=0.5,
# linestyle='--',
# draw_labels=False) for ax in fg.axes.flatten()]
# fg.fig.subplots_adjust(bottom=0.2, top=0.9, left=0.05)
return fg
def plot_r2_map_predictor_sets_with_co2(path=work_chaim, cpt_source='randel',
save=True):
"""r2 map (lat-lon) for cdas-plags, enso, ch4"""
import xarray as xr
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import seaborn as sns
from strato_figures import remove_regressors_and_set_title
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from palettable.colorbrewer import sequential as seqbr
# from palettable.scientific import diverging as divsci
# from palettable.colorbrewer import diverging as divbr
from strat_paths import savefig_path
error_cmap = seqbr.YlGnBu_9.mpl_colormap
sns.set_theme(style='ticks', font_scale=1.5)
titles =[r'(a) $\sum_{i=0}^{5}$CPT(t-$i$)',
r'(b) $\eta_1$ = QBO + ENSO + CO$_2$',
r'(c) $\eta_1$ + QBO $\times$ ENSO + ENSO$^2$',
r'(d) $\eta_1$ + T500 + BDC']
# rds1 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_2004-2019.nc')
# rds2 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_bdc_t500_2004-2019.nc')
# rds3 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_radio_cold_lags6_2004-2019.nc')
# rds4 = xr.open_dataset(
# path /
# 'MLR_H2O_latlon_cdas-plags_ch4_enso_poly_2_no_qbo^2_no_ch4_extra_2004-2019.nc')
rds1 = produce_rds_etas(eta=3, cpt_source=cpt_source)
rds2 = produce_rds_etas(eta=1)
rds3 = produce_rds_etas(eta=4)
rds4 = produce_rds_etas(eta=2)
rds = xr.concat([x['r2'] for x in [rds1, rds2, rds3, rds4]], 'eta')
rds['eta'] = range(1, 5)
rds = rds.sortby('eta')
# fig = plt.figure(figsize=(11, 5))
# ax = fig.add_subplot(1, 1, 1,
# projection=ccrs.PlateCarree(central_longitude=0))
# ax.coastlines()
proj = ccrs.PlateCarree(central_longitude=0)
fg = rds.plot.contourf(col='eta', add_colorbar=False, cmap=error_cmap,
vmin=0.0, extend=None, levels=41, col_wrap=2,
subplot_kws=dict(projection=proj),
transform=ccrs.PlateCarree(), figsize=(13, 6))
# lons = rds.lon.values[0:int(len(rds.lon.values) / 2)][::2]
# lons_mirror = abs(lons[::-1])
# lons = np.concatenate([lons, lons_mirror])
# lats = rds.lat.values[0:int(len(rds.lat.values) / 2)][::2]
# lats_mirror = abs(lats[::-1])
# lats = np.concatenate([lats, lats_mirror])
# ax.set_xticks(lons, crs=ccrs.PlateCarree())
# ax.set_yticks(lats, crs=ccrs.PlateCarree())
# lon_formatter = LongitudeFormatter(zero_direction_label=True)
# lat_formatter = LatitudeFormatter()
# ax.xaxis.set_major_formatter(lon_formatter)
# ax.yaxis.set_major_formatter(lat_formatter)
cbar_kws = {'label': '', 'format': '%0.2f', 'aspect': 20}
cbar_ax = fg.fig.add_axes([0.1, 0.1, .8, .025]) # last num controls width
fg.add_colorbar(cax=cbar_ax, orientation="horizontal", **cbar_kws)
gl_list = []
for i, ax in enumerate(fg.axes.flatten()):
ax.coastlines()
gl = ax.gridlines(
crs=ccrs.PlateCarree(),
linewidth=1,
color='black',
alpha=0.5,
linestyle='--',
draw_labels=True)
gl.xlabels_top = False
gl.xlabel_style = {'size': 9}
gl.ylabel_style = {'size': 9}
gl.xlines = True
gl.xlocator = mticker.FixedLocator([-180, -120, -60, 0, 60, 120, 180])
gl.ylocator = mticker.FixedLocator([-45, -30, -15, 0, 15, 30, 45])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl_list.append(gl)
if i == 0:
plt.rcParams['axes.titlepad'] = 16
else:
plt.rcParams['axes.titlepad'] = 6
ax.set_title(titles[i])
# ax = remove_regressors_and_set_title(ax)
# gl_list[0].ylabels_right = False
# gl_list[2].ylabels_left = False
# try:
# gl_list[3].ylabels_right = False
# except IndexError:
# pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.92,
bottom=0.16,
left=0.065,
right=0.935,
hspace=0.0,
wspace=0.208)
print('Caption: ')
print('The adjusted R^2 for the water vapor anomalies MLR analysis in the 82 hPa level with CH4 ,ENSO, and pressure level lag varied QBO as predictors. This MLR spans from 2004 to 2018')
filename = 'MLR_H2O_r2_map_82_eta_with_co2.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def produce_rds_etas(eta=1, cpt_source='randel'):
""" run produce_MLR_2D_for_figs_6_and_7 with regressors:
eta=1 : co2, anom_nino3p4, qbo_lagged
eta=2 : co2, anom_nino3p4, qbo_lagged, T500, BDC
eta=3 : co2, anom_nino3p4, qbo_lagged + 6XCPT_lagged
eta=4 : co2, anom_nino3p4, qbo_lagged, anom_nino3p4^2, qbo_laggedXanom_nino3p4
co2 is automatically added"""
pred = ['qbo_cdas', 'anom_nino3p4']
if eta == 1:
print('producing eta {} with {}'.format(eta, pred))
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False)
elif eta == 2:
pred = pred + ['era5_bdc70', 'era5_t500']
print('producing eta {} with {}'.format(eta, pred))
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False)
elif eta == 3:
if cpt_source == 'randel':
pred = ['radio_cold_no_qbo']
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False, reg_shift=['radio_cold_no_qbo', 6])
elif cpt_source == 'sean':
pred = ['cpt_ERA5']
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=False, reg_shift=['cpt_ERA5', 6])
print('producing eta {} with {}'.format(eta, pred))
elif eta == 4:
print('producing eta {} with {} and enso^2'.format(eta, pred))
rds = produce_MLR_2D_for_figs_6_and_7(pred, add_enso2=True)
return rds
def produce_MLR_2D_for_figs_6_and_7(predictors=['qbo_cdas', 'anom_nino3p4'],
lag={'qbo_cdas': 5}, add_enso2=True,
reg_shift=None):
from sklearn.linear_model import LinearRegression
if [x for x in lag.keys()][0] not in predictors:
lag = None
X = produce_X(lag=lag, regressors=predictors, add_co2=True,
reg_shift=reg_shift, standertize=False)
if add_enso2:
X = add_enso2_and_enso_qbo_to_X(X)
X = X.sel(time=slice('2005', '2019'))
y = produce_y(detrend=None, lat_band_mean=None, plevel=82, deseason='std',
filename='swoosh_lonlatpress-20deg-5deg.nc', sw_var='combinedanomh2oq')
y = y.sel(lat=slice(-60, 60))
y = y.sel(time=X.time)
lr = LinearRegression()
rds = make_results_for_MLR(lr, X, y)
return rds
def make_results_for_MLR(lr, X_train, y_train, X_test=None, y_test=None):
import xarray as xr
from sklearn.metrics import r2_score
if len(y_train.dims) > 1:
# assume sample dim is time:
target_dims = [x for x in y_train.dims if x != 'time']
# infer reg_dim from X:
reg_dim = [x for x in X_train.dims if x != 'time'][0]
ys_train = y_train.stack(targets=target_dims)
# fit the model:
lr.fit(X_train, ys_train)
rds = xr.Dataset()
# produce beta:
rds['params'] = xr.DataArray(lr.coef_, dims=['targets', reg_dim])
# produce predict:
if X_test is not None:
rds['predict'] = xr.DataArray(lr.predict(X_test), dims=['time', 'targets'])
else:
rds['predict'] = xr.DataArray(lr.predict(X_train), dims=['time', 'targets'])
# produce R^2:
if y_test is not None:
ys_test = y_test.stack(targets=target_dims)
r2 = r2_score(ys_test, rds['predict'], multioutput='raw_values')
else:
r2 = r2_score(ys_train, rds['predict'], multioutput='raw_values')
rds['r2'] = xr.DataArray(r2, dims='targets')
# dims:
rds[reg_dim] = X_train[reg_dim]
rds['time'] = ys_train['time']
rds['targets'] = ys_train['targets']
# unstack:
rds = rds.unstack('targets')
rds['original'] = y_train
rds.attrs['sample_dim'] = 'time'
rds.attrs['feature_dim'] = 'regressor'
elif len(y_train.dims) == 1:
reg_dim = [x for x in X_train.dims if x != 'time'][0]
# fit the model:
lr.fit(X_train, y_train)
rds = xr.Dataset()
# produce beta:
rds['params'] = xr.DataArray(lr.coef_, dims=[reg_dim])
# produce predict:
if X_test is not None:
rds['predict'] = xr.DataArray(lr.predict(X_test), dims=['time'])
rds['time'] = y_test['time']
else:
rds['predict'] = xr.DataArray(lr.predict(X_train), dims=['time'])
rds['time'] = y_train['time']
# produce R^2:
if y_test is not None:
r2 = r2_score(y_test, rds['predict'])
else:
r2 = r2_score(y_train, rds['predict'])
rds['r2'] = xr.DataArray(r2)
# dims:
rds[reg_dim] = X_train[reg_dim]
rds['original'] = y_train
rds.attrs['sample_dim'] = 'time'
rds.attrs['feature_dim'] = 'regressor'
return rds
def plot_forecast_busts_lines_datetime(ax, color='r', style='--'):
import pandas as pd
dts = ['2010-11', '2011-04', '2015-09', '2016-01', '2016-09', '2017-01']
dts = [pd.to_datetime(x) for x in dts]
[ax.axvline(x, c=color, ls=style) for x in dts]
# three forecast busts:
# 2010D2011JFM, 2015-OND, 2016-OND
# ax.axvline('2010-05', c=color, ls=style)
# ax.axvline('2010-09', c=color, ls=style)
return ax
def plot_model_predictions(da):
""" run produce_CV_predictions_for_all_HP_optimized_models first"""
import seaborn as sns
import matplotlib.pyplot as plt
from aux_functions_strat import convert_da_to_long_form_df
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
sns.set_theme(style='ticks', font_scale=1.5)
df = convert_da_to_long_form_df(da)
fig, ax = plt.subplots(figsize=(18, 5))
ax = sns.lineplot(data=df, x='time', y='value', hue='model/obs.',
legend=True)
lw = ax.lines[4].get_linewidth() # lw of first line
plt.setp(ax.lines[4], linewidth=2.5)
ax.grid(True)
ax.set_xlabel('')
ax.set_ylabel(r'H$_{2}$O anomalies [std]')
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.xaxis.grid(True, which='minor')
legend = ax.legend(prop={'size': 13}, ncol=5, loc='upper left')
plot_forecast_busts_lines_datetime(ax, color='k')
fig.tight_layout()
# get handles and labels of legend:
hands, labes = ax.get_legend_handles_labels()
colors = [x.get_color() for x in hands]
# change the text labels to the colors of the lines:
for i, text in enumerate(legend.get_texts()):
text.set_color(colors[i])
return fig
def add_enso2_and_enso_qbo_to_X(X):
import xarray as xr
from ML_OOP_stratosphere_gases import poly_features
feats = [x for x in X.regressor.values if 'qbo' in x or 'nino' in x]
other_feats = [x for x in X.regressor.values if 'qbo' not in x and 'nino' not in x]
X1 = poly_features(X.sel(regressor=feats), feature_dim='regressor')
X1 = X1.drop_sel(regressor='qbo_cdas^2')
X = xr.concat([X.sel(regressor=other_feats), X1], 'regressor')
return X
def produce_CV_predictions_for_all_HP_optimized_models(path=ml_path,
cv='kfold'):
import xarray as xr
X = produce_X(syear='1994', eyear='2019', add_co2=False)
y = produce_y(detrend='lowess', lat_band_mean=[-15, 15], syear='1994', eyear='2019')
ml = ML_Classifier_Switcher()
das = []
for model_name in ['RF', 'SVM', 'MLP', 'MLR']:
print('preforming LOO with yearly group for {}.'.format(model_name))
model = ml.pick_model(model_name)
if model_name != 'MLR':
model.set_params(**get_HP_params_from_optimized_model(path=path, model=model_name))
da = cross_val_predict_da(model, X, y, cv=cv)
da.name = model_name + ' model'
das.append(da)
ds = xr.merge(das)
ds['SWOOSH'] = y
da = ds.to_array('model/obs.')
da.name = 'h2o'
return da
def cross_val_predict_da(estimator, X, y, cv='kfold'):
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_predict
if cv == 'logo':
logo = LeaveOneGroupOut()
groups = X['time'].dt.year
cvr = cross_val_predict(estimator, X, y, groups=groups, cv=logo)
elif cv == 'kfold':
kfold = KFold(n_splits=5, shuffle=True, random_state=1)
cvr = cross_val_predict(estimator, X, y, cv=kfold)
da_ts = y.copy(data=cvr)
da_ts.attrs['estimator'] = estimator.__repr__().split('(')[0]
da_ts.name = da_ts.name + '_' + da_ts.attrs['estimator']
for key, value in estimator.get_params().items():
da_ts.attrs[key] = value
return da_ts
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
def change_width(ax, new_value) :
for patch in ax.patches :
current_width = patch.get_width()
diff = current_width - new_value
# we change the bar width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
def show_values_on_bars(axs, fs=12, fw='bold', exclude_bar_num=None):
import numpy as np
def _show_on_single_plot(ax, exclude_bar_num=3):
for i, p in enumerate(ax.patches):
if i != exclude_bar_num and exclude_bar_num is not None:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.1f}'.format(p.get_height())
ax.text(_x, _y, value, ha="right",
fontsize=fs, fontweight=fw, zorder=20)
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax, exclude_bar_num)
else:
_show_on_single_plot(axs, exclude_bar_num)
sns.set_theme(style='ticks', font_scale=1.5)
fi_da['regressor'] = ['QBO', 'ENSO']
df = fi_da.to_dataframe('feature_importance') * 100.0
df = df.unstack().melt()
fig, ax = plt.subplots(figsize=(6, 8))
sns.barplot(data=df, x='regressor', y='value', orient='v', ci='sd',
ax=ax, hue='regressor', estimator=np.mean, dodge=False)
ax.set_xlabel('')
ax.set_ylabel('Feature Importance [%]')
show_values_on_bars(ax, fs=16, exclude_bar_num=1)
change_width(ax, 0.31)
ax.legend(loc='upper right')
fig.tight_layout()
return fig
def plot_repeated_kfold_dist(df, model_dict, X, y):
"""run assemble_cvr_dataframe first with strategy=Nonen and add_MLR2"""
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
in_sample_r2 = {}
X2 = add_enso2_and_enso_qbo_to_X(X)
for model_name, model in model_dict.items():
if model_name == 'MLR2':
model.fit(X2, y)
in_sample_r2[model_name] = model.score(X2, y)
else:
model.fit(X, y)
in_sample_r2[model_name] = model.score(X, y)
print(in_sample_r2)
df_melted = df.T.melt(var_name='model', value_name=r'R$^2$')
pal = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:pink']
fg = sns.displot(data=df_melted, x=r'R$^2$', col="model",
kind="hist", col_wrap=2, hue='model', stat='density',
kde=True, palette=pal)
letter = ['a', 'b', 'c', 'd', 'e']
for i, ax in enumerate(fg.axes):
label = ax.title.get_text()
model = label.split('=')[-1].strip()
title = '({}) model = {}'.format(letter[i], model)
ax.set_title(title)
mean = df.T.mean().loc[model]
std = df.T.std().loc[model]
median = df.T.median().loc[model]
in_sample = in_sample_r2[model]
textstr = '\n'.join((
r'$\mathrm{mean}=%.2f$' % (mean, ),
r'$\mathrm{median}=%.2f$' % (median, ),
r'$\mathrm{std}=%.2f$' % (std, ),
r'in sample result$=%.2f$' % (in_sample, )))
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
# fg.fig.suptitle('Out of sample testing models comparison')
# fg.fig.subplots_adjust(top=0.916)
# fg.fig.tight_layout()
return fg
def assemble_cvr_dataframe(path=ml_path, score='test_r2', n_splits=5,
strategy='LOGO-year', add_MLR2=False):
import pandas as pd
rf, rf_model = cross_validate_using_optimized_HP(
path, model='RF', n_splits=n_splits, strategy=strategy)
svm, svm_model = cross_validate_using_optimized_HP(
path, model='SVM', n_splits=n_splits, strategy=strategy)
mlp, mlp_model = cross_validate_using_optimized_HP(
path, model='MLP', n_splits=n_splits, strategy=strategy)
lr, lr_model = cross_validate_using_optimized_HP(
path, model='MLR', n_splits=n_splits, strategy=strategy)
lr2, lr2_model = cross_validate_using_optimized_HP(
path, model='MLR', n_splits=n_splits, strategy=strategy,
add_MLR2=add_MLR2)
if add_MLR2:
df = pd.DataFrame([rf[score], svm[score], mlp[score], lr[score], lr2[score]])
df.index = ['RF', 'SVM', 'MLP', 'MLR', 'MLR2']
len_cols = len(df.columns)
df.columns = ['kfold_{}'.format(x+1) for x in range(len_cols)]
model_dict = {'RF': rf_model, 'SVM': svm_model,
'MLP': mlp_model, 'MLR': lr_model, 'MLR2': lr2_model}
else:
df = pd.DataFrame([rf[score], svm[score], mlp[score], lr[score]])
df.index = ['RF', 'SVM', 'MLP', 'MLR']
len_cols = len(df.columns)
df.columns = ['kfold_{}'.format(x+1) for x in range(len_cols)]
model_dict = {'RF': rf_model, 'SVM': svm_model,
'MLP': mlp_model, 'MLR': lr_model}
return df, model_dict
def cross_validate_using_optimized_HP(path=ml_path, model='SVM', n_splits=5,
n_repeats=20, strategy='LOGO-year',
scorers=['r2', 'r2_adj',
'neg_mean_squared_error',
'explained_variance'],
add_MLR2=False):
from sklearn.model_selection import cross_validate
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import KFold
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import GroupShuffleSplit
logo = LeaveOneGroupOut()
gss = GroupShuffleSplit(n_splits=20, test_size=0.1, random_state=1)
from sklearn.metrics import make_scorer
X = produce_X(syear='1994', eyear='2019', add_co2=False)
if add_MLR2:
X = add_enso2_and_enso_qbo_to_X(X)
print('adding ENSO^2 and ENSO*QBO')
y = produce_y(detrend='lowess', lat_band_mean=[-15, 15], syear='1994', eyear='2019')
groups = X['time'].dt.year
scores_dict = {s: s for s in scorers}
if 'r2_adj' in scorers:
scores_dict['r2_adj'] = make_scorer(r2_adj_score)
if 'MLR' not in model:
hp_params = get_HP_params_from_optimized_model(path, model)
ml = ML_Classifier_Switcher()
ml_model = ml.pick_model(model_name=model)
if 'MLR' not in model:
ml_model.set_params(**hp_params)
print(ml_model)
# cv = TimeSeriesSplit(5)
# cv = KFold(10, shuffle=True, random_state=1)
cv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=1)
if strategy == 'LOGO-year':
print('using LeaveOneGroupOut strategy.')
cvr = cross_validate(ml_model, X, y, scoring=scores_dict, cv=logo,
groups=groups)
elif strategy == 'GSS-year':
print('using GroupShuffleSplit strategy.')
cvr = cross_validate(ml_model, X, y, scoring=scores_dict, cv=gss,
groups=groups)
else:
cvr = cross_validate(ml_model, X, y, scoring=scores_dict, cv=cv)
return cvr, ml_model
def manual_cross_validation_for_RF_feature_importances(rf_model, n_splits=5, n_repeats=20, scorers=['r2', 'r2_adj',
'neg_mean_squared_error',
'explained_variance']):
from sklearn.model_selection import KFold
import xarray as xr
import numpy as np
from sklearn.model_selection import RepeatedKFold
from sklearn.metrics import make_scorer
scores_dict = {s: s for s in scorers}
if 'r2_adj' in scorers:
scores_dict['r2_adj'] = make_scorer(r2_adj_score)
print(rf_model)
X = produce_X(syear='1994', eyear='2019')
y = produce_y(syear='1994', eyear='2019')
# cv = TimeSeriesSplit(5)
# cv = KFold(10, shuffle=True, random_state=1)
cv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=1)
fis = []
for train_index, test_index in cv.split(X):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
rf_model.fit(X_train, y_train)
fis.append(rf_model.feature_importances_)
fi = xr.DataArray(fis, dims=['repeats', 'regressor'])
fi['repeats'] = np.arange(1, len(fis)+1)
fi['regressor'] = X['regressor']
return fi
def get_HP_params_from_optimized_model(path=ml_path, model='SVM'):
import joblib
from aux_functions_strat import path_glob
files = path_glob(path, 'GRSRCHCV_*.pkl')
file = [x for x in files if model in x.as_posix()][0]
gr = joblib.load(file)
df = read_one_gridsearchcv_object(gr)
return df.iloc[0][:-2].to_dict()
def produce_X(regressors=['qbo_cdas', 'anom_nino3p4'],
lag={'qbo_cdas': 5}, add_co2=True, standertize=False,
reg_shift=None, syear=None, eyear=None):
"""reg_shift is dict = {regressor: n} where n is the number of times to
shift backwards one month"""
from make_regressors import load_all_regressors
from ML_OOP_stratosphere_gases import regressor_shift
import xarray as xr
ds = load_all_regressors()
ds = ds[regressors].dropna('time')
if lag is not None:
for key, value in lag.items():
print(key, value)
ds[key] = ds[key].shift(time=value)
if standertize:
ds = (ds - ds.mean('time')) / ds.std('time')
if add_co2:
ds['co2'] = produce_co2_trend()
if reg_shift is not None:
dss = regressor_shift(ds[reg_shift[0]].dropna('time'), shifts=[1,reg_shift[-1]])
ds = xr.merge([ds, dss])
if syear is not None:
ds = ds.sel(time=slice(syear, None))
if eyear is not None:
ds = ds.sel(time=slice(None, eyear))
if ((syear is not None) or (eyear is not None)) and add_co2:
ds['co2'] = (ds['co2'] - ds['co2'].mean('time')) / ds['co2'].std('time')
X = ds.dropna('time').to_array('regressor')
X = X.transpose('time', 'regressor')
return X
def produce_y(path=work_chaim, detrend=None,
sw_var='combinedeqfillanomfillh2oq', filename='swoosh_latpress-2.5deg.nc',
lat_band_mean=[-5, 5], plevel=82, deseason='mean', standertize=True,
syear=None, eyear=None):
import xarray as xr
from aux_functions_strat import lat_mean
from aux_functions_strat import detrend_ts
from aux_functions_strat import anomalize_xr
file = path / filename
da = xr.open_dataset(file)[sw_var]
if plevel is not None:
da = da.sel(level=plevel, method='nearest')
if lat_band_mean is not None:
da = lat_mean(da.sel(lat=slice(lat_band_mean[0], lat_band_mean[1])))
if detrend is not None:
if detrend == 'lowess':
print('lowess detrend for h2o')
da = detrend_ts(da)
if deseason is not None:
print('deseasonlizing h2o...')
da = anomalize_xr(da, freq='MS', units=deseason, time_dim='time')
if standertize is not None:
print('standertzing h2o')
da = (da - da.mean('time')) / da.std('time')
if syear is not None:
print('picking {} as start year'.format(syear))
da = da.sel(time=slice(syear, None))
if eyear is not None:
print('picking {} as end year'.format(eyear))
da = da.sel(time=slice(None, eyear))
y = da
return y
def produce_co2_trend(standertize=True):
from make_regressors import load_all_regressors
from aux_functions_strat import loess_curve
ds = load_all_regressors()
co2 = ds['co2'].dropna('time')
trend = loess_curve(co2, plot=False)
if standertize:
co2 = (trend['mean']-trend['mean'].mean('time')) / \
trend['mean'].std('time')
return co2
else:
return trend['mean']
def r2_adj_score(y_true, y_pred, **kwargs):
from sklearn.metrics import r2_score
r2 = r2_score(y_true, y_pred)
n = len(y_true)
if 'p' in kwargs:
p = kwargs['p']
else:
p = 2
r2_adj = 1.0 - (1.0 - r2) * (n - 1.0) / (n - p)
# r2_adj = 1-(1-r2)*(n-1)/(n-p-1)
return r2_adj
def Optimize_HP_per_model(test_size=0.1, model_name='SVM',
n_splits=5, savepath=None):
from sklearn.model_selection import train_test_split
X = produce_X(syear='1994', eyear='2019', add_co2=False)
y = produce_y(detrend='lowess', lat_band_mean=[-15, 15], syear='1994', eyear='2019')
if test_size is None:
X_val = X
y_val = y
else:
X_val, X_test, y_val, y_test = train_test_split(X, y, test_size=test_size)
gr = single_cross_validation(X_val, y_val, model_name=model_name,
n_splits=n_splits,
savepath=savepath)
return gr
def single_cross_validation(X_val, y_val, model_name='SVM',
n_splits=5, scorers=['r2', 'r2_adj',
'neg_mean_squared_error',
'explained_variance'],
seed=42, savepath=None, verbose=0,
param_grid='dense', n_jobs=-1):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import GridSearchCV
# from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
# import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'r2_adj' in scorers:
scores_dict['r2_adj'] = make_scorer(r2_adj_score)
X = X_val.dropna('time').sel(time=y_val['time'])
y = y_val
# if param_grid == 'light':
# print(np.unique(X.feature.values))
# configure the cross-validation procedure
# cv = TimeSeriesSplit(n_splits=n_splits)
cv = KFold(n_splits=n_splits, random_state=seed, shuffle=True)
# print('CV TimeSeriesKfolds of {}.'.format(n_splits))
print('CV KFold of {}.'.format(n_splits))
# define the model and search space:
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
# if outer_split == '1-1':
# cv_type = 'holdout'
# print('holdout cv is selected.')
# else:
# cv_type = 'nested'
# print('nested cv {} out of {}.'.format(
# outer_split.split('-')[0], outer_split.split('-')[1]))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
features = [x for x in X['regressor'].values]
if savepath is not None:
filename = 'GRSRCHCV_{}_{}_{}_{}_{}_{}.pkl'.format(model_name, '+'.join(features), '+'.join(
scorers), n_splits,
param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
return gr_search
def save_gridsearchcv_object(GridSearchCV, savepath, filename):
import joblib
print('{} was saved to {}'.format(filename, savepath))
joblib.dump(GridSearchCV, savepath / filename)
return
def load_one_gridsearchcv_object(path=ml_path, model_name='SVM', verbose=True):
"""load one gridsearchcv obj with model_name and features and run read_one_gridsearchcv_object"""
from aux_functions_strat import path_glob
import joblib
# first filter for model name:
if verbose:
print('loading GridsearchCVs results for {} model'.format(model_name))
model_files = path_glob(path, 'GRSRCHCV_*.pkl')
model_files = [x for x in model_files if model_name in x.as_posix()]
# now select features:
# if verbose:
# print('loading GridsearchCVs results with {} features'.format(features))
# model_features = [x.as_posix().split('/')[-1].split('_')[3] for x in model_files]
# feat_ind = get_feature_set_from_list(model_features, features)
# also get the test ratio and seed number:
# if len(feat_ind) > 1:
# if verbose:
# print('found {} GR objects.'.format(len(feat_ind)))
# files = sorted([model_files[x] for x in feat_ind])
# outer_splits = [x.as_posix().split('/')[-1].split('.')[0].split('_')[-3] for x in files]
# grs = [joblib.load(x) for x in files]
# best_dfs = [read_one_gridsearchcv_object(x) for x in grs]
# di = dict(zip(outer_splits, best_dfs))
# return di
# else:
# file = model_files[feat_ind]
# seed = file.as_posix().split('/')[-1].split('.')[0].split('_')[-1]
# outer_splits = file.as_posix().split('/')[-1].split('.')[0].split('_')[-3]
# load and produce best_df:
gr = joblib.load(model_files[0])
best_df = read_one_gridsearchcv_object(gr)
return best_df
def read_one_gridsearchcv_object(gr):
"""read one gridsearchcv multimetric object and
get the best params, best mean/std scores"""
import pandas as pd
# first get all the scorers used:
scorers = [x for x in gr.scorer_.keys()]
# now loop over the scorers:
best_params = []
best_mean_scores = []
best_std_scores = []
for scorer in scorers:
df_mean = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["mean_test_{}".format(scorer)], columns=[scorer])], axis=1)
df_std = pd.concat([pd.DataFrame(gr.cv_results_["params"]), pd.DataFrame(
gr.cv_results_["std_test_{}".format(scorer)], columns=[scorer])], axis=1)
# best index = highest score:
best_ind = df_mean[scorer].idxmax()
best_mean_scores.append(df_mean.iloc[best_ind][scorer])
best_std_scores.append(df_std.iloc[best_ind][scorer])
best_params.append(df_mean.iloc[best_ind].to_frame().T.iloc[:, :-1])
best_df = pd.concat(best_params)
best_df['mean_score'] = best_mean_scores
best_df['std_score'] = best_std_scores
best_df.index = scorers
return best_df
def order_of_mag(minimal=-5, maximal=1):
import numpy as np
return [10**float(x) for x in np.arange(minimal, maximal + 1)]
class ML_Classifier_Switcher(object):
def pick_model(self, model_name, pgrid='normal'):
"""Dispatch method"""
# from sklearn.model_selection import GridSearchCV
self.param_grid = None
method_name = str(model_name)
# Get the method from 'self'. Default to a lambda.
method = getattr(self, method_name, lambda: "Invalid ML Model")
# if gridsearch:
# return(GridSearchCV(method(), self.param_grid, n_jobs=-1,
# return_train_score=True))
# else:
# Call the method as we return it
# whether to select lighter param grid, e.g., for testing purposes.
self.pgrid = pgrid
return method()
def SVM(self):
from sklearn.svm import SVR
import numpy as np
if self.pgrid == 'light':
self.param_grid = {'kernel': ['poly'],
'C': [0.1],
'gamma': [0.0001],
'degree': [1, 2],
'coef0': [1, 4]}
# elif self.pgrid == 'normal':
# self.param_grid = {'kernel': ['rbf', 'sigmoid', 'linear', 'poly'],
# 'C': order_of_mag(-1, 2),
# 'gamma': order_of_mag(-5, 0),
# 'degree': [1, 2, 3, 4, 5],
# 'coef0': [0, 1, 2, 3, 4]}
elif self.pgrid == 'dense':
# self.param_grid = {'kernel': ['rbf', 'sigmoid', 'linear', 'poly'],
# 'C': np.logspace(-2, 2, 10), # order_of_mag(-2, 2),
# 'gamma': np.logspace(-5, 1, 14), # order_of_mag(-5, 0),
# 'degree': [1, 2, 3, 4, 5],
# 'coef0': [0, 1, 2, 3, 4]}
self.param_grid = {'kernel': ['rbf', 'sigmoid', 'linear'],
'C': np.logspace(-2, 2, 10), # order_of_mag(-2, 2),
'gamma': np.logspace(-5, 1, 14)}#, # order_of_mag(-5, 0),
# 'degree': [1, 2, 3, 4, 5],
# 'coef0': [0, 1, 2, 3, 4]}
return SVR()
def MLP(self):
import numpy as np
from sklearn.neural_network import MLPRegressor
if self.pgrid == 'light':
self.param_grid = {
'activation': [
'identity',
'relu'],
'hidden_layer_sizes': [(50, 50, 50), (50, 100, 50)]}
# elif self.pgrid == 'normal':
# self.param_grid = {'alpha': order_of_mag(-5, 1),
# 'activation': ['identity', 'logistic', 'tanh', 'relu'],
# 'hidden_layer_sizes': [(50, 50, 50), (50, 100, 50), (100,)],
# 'learning_rate': ['constant', 'adaptive'],
# 'solver': ['adam', 'lbfgs', 'sgd']}
elif self.pgrid == 'dense':
self.param_grid = {'alpha': np.logspace(-5, 1, 7),
'activation': ['identity', 'logistic', 'tanh', 'relu'],
'hidden_layer_sizes': [(10, 10, 10), (10, 20, 10), (10,), (5,), (1,)],
'learning_rate': ['constant'],
'solver': ['adam', 'sgd']}
#(1,),(2,),(3,),(4,),(5,),(6,),(7,),(8,),(9,),(10,),(11,), (12,),(13,),(14,),(15,),(16,),(17,),(18,),(19,),(20,),(21,)
return MLPRegressor(random_state=42, max_iter=500, learning_rate_init=0.1)
def RF(self):
from sklearn.ensemble import RandomForestRegressor
# import numpy as np
if self.pgrid == 'light':
self.param_grid = {'max_features': ['auto', 'sqrt']}
elif self.pgrid == 'normal':
self.param_grid = {'max_depth': [5, 10, 25, 50, 100],
'max_features': ['auto', 'sqrt'],
'min_samples_leaf': [1, 2, 5, 10],
'min_samples_split': [2, 5, 15, 50],
'n_estimators': [100, 300, 700, 1200]
}
elif self.pgrid == 'dense':
self.param_grid = {'max_depth': [2, 5, 10],
'max_features': ['auto', 'sqrt'],
'min_samples_leaf': [1, 2],
'min_samples_split': [2, 5],
'n_estimators': [50, 100, 400]
}
return RandomForestRegressor(random_state=42, n_jobs=-1)
def MLR(self):
from sklearn.linear_model import LinearRegression
return LinearRegression(n_jobs=-1)
|
[
"seaborn.lineplot",
"sklearn.model_selection.GridSearchCV",
"numpy.abs",
"sklearn.model_selection.cross_validate",
"sklearn.model_selection.train_test_split",
"aux_functions_strat.anomalize_xr",
"sklearn.metrics.r2_score",
"joblib.dump",
"numpy.logspace",
"aux_functions_strat.path_glob",
"numpy.arange",
"matplotlib.pyplot.gca",
"sklearn.model_selection.RepeatedKFold",
"pandas.DataFrame",
"matplotlib.pyplot.setp",
"matplotlib.ticker.FixedLocator",
"aux_functions_strat.convert_da_to_long_form_df",
"xarray.merge",
"sklearn.metrics.make_scorer",
"aux_functions_strat.loess_curve",
"sklearn.model_selection.LeaveOneGroupOut",
"matplotlib.pyplot.subplots",
"pandas.concat",
"seaborn.set_theme",
"numpy.ndenumerate",
"numpy.corrcoef",
"seaborn.barplot",
"xarray.concat",
"sklearn.linear_model.LinearRegression",
"xarray.Dataset",
"make_regressors.load_all_regressors",
"matplotlib.ticker.AutoMinorLocator",
"pandas.to_datetime",
"sklearn.ensemble.RandomForestRegressor",
"pandas.Series",
"strato_figures.remove_regressors_and_set_title",
"shap.summary_plot",
"sklearn.model_selection.GroupShuffleSplit",
"sklearn.svm.SVR",
"seaborn.displot",
"aux_functions_strat.detrend_ts",
"xarray.open_dataset",
"shap.Explainer",
"sklearn.model_selection.KFold",
"sklearn.model_selection.cross_val_predict",
"sklearn.neural_network.MLPRegressor",
"numpy.where",
"xarray.DataArray",
"cartopy.crs.PlateCarree",
"joblib.load",
"matplotlib.pyplot.savefig"
] |
[((1556, 1600), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (1569, 1600), True, 'import seaborn as sns\n'), ((1680, 1701), 'pandas.DataFrame', 'pd.DataFrame', (['df_shap'], {}), '(df_shap)\n', (1692, 1701), True, 'import pandas as pd\n'), ((2282, 2326), 'numpy.where', 'np.where', (["(corr_df['Corr'] > 0)", '"""red"""', '"""blue"""'], {}), "(corr_df['Corr'] > 0, 'red', 'blue')\n", (2290, 2326), True, 'import numpy as np\n'), ((2357, 2371), 'numpy.abs', 'np.abs', (['shap_v'], {}), '(shap_v)\n', (2363, 2371), True, 'import numpy as np\n'), ((3243, 3280), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)'}), '(X, y, test_size=0.1)\n', (3259, 3280), False, 'from sklearn.model_selection import train_test_split\n'), ((3467, 3491), 'shap.Explainer', 'shap.Explainer', (['rf_model'], {}), '(rf_model)\n', (3481, 3491), False, 'import shap\n'), ((3579, 3588), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3586, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3999, 4036), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)'}), '(X, y, test_size=0.1)\n', (4015, 4036), False, 'from sklearn.model_selection import train_test_split\n'), ((4324, 4348), 'shap.Explainer', 'shap.Explainer', (['rf_model'], {}), '(rf_model)\n', (4338, 4348), False, 'import shap\n'), ((4397, 4450), 'shap.summary_plot', 'shap.summary_plot', (['shap_values_rf', 'dfX'], {'plot_size': '(1.1)'}), '(shap_values_rf, dfX, plot_size=1.1)\n', (4414, 4450), False, 'import shap\n'), ((5000, 5018), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5016, 5018), False, 'from sklearn.linear_model import LinearRegression\n'), ((5322, 5366), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (5335, 5366), True, 'import seaborn as sns\n'), ((5381, 5416), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(18, 7)'}), '(2, 1, figsize=(18, 7))\n', (5393, 5416), True, 'import matplotlib.pyplot as plt\n'), ((6739, 6783), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (6752, 6783), True, 'import seaborn as sns\n'), ((6795, 6832), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {'central_longitude': '(0)'}), '(central_longitude=0)\n', (6811, 6832), True, 'import cartopy.crs as ccrs\n'), ((10276, 10320), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (10289, 10320), True, 'import seaborn as sns\n'), ((11217, 11278), 'xarray.concat', 'xr.concat', (["[x['r2'] for x in [rds1, rds2, rds3, rds4]]", '"""eta"""'], {}), "([x['r2'] for x in [rds1, rds2, rds3, rds4]], 'eta')\n", (11226, 11278), True, 'import xarray as xr\n'), ((11518, 11555), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {'central_longitude': '(0)'}), '(central_longitude=0)\n', (11534, 11555), True, 'import cartopy.crs as ccrs\n'), ((16522, 16540), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (16538, 16540), False, 'from sklearn.linear_model import LinearRegression\n'), ((19820, 19864), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (19833, 19864), True, 'import seaborn as sns\n'), ((19874, 19904), 'aux_functions_strat.convert_da_to_long_form_df', 'convert_da_to_long_form_df', (['da'], {}), '(da)\n', (19900, 19904), False, 'from aux_functions_strat import convert_da_to_long_form_df\n'), ((19919, 19948), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(18, 5)'}), '(figsize=(18, 5))\n', (19931, 19948), True, 'import matplotlib.pyplot as plt\n'), ((19958, 20031), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'df', 'x': '"""time"""', 'y': '"""value"""', 'hue': '"""model/obs."""', 'legend': '(True)'}), "(data=df, x='time', y='value', hue='model/obs.', legend=True)\n", (19970, 20031), True, 'import seaborn as sns\n'), ((20115, 20151), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.lines[4]'], {'linewidth': '(2.5)'}), '(ax.lines[4], linewidth=2.5)\n', (20123, 20151), True, 'import matplotlib.pyplot as plt\n'), ((22025, 22038), 'xarray.merge', 'xr.merge', (['das'], {}), '(das)\n', (22033, 22038), True, 'import xarray as xr\n'), ((24120, 24164), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (24133, 24164), True, 'import seaborn as sns\n'), ((24307, 24335), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (24319, 24335), True, 'import matplotlib.pyplot as plt\n'), ((24340, 24467), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'df', 'x': '"""regressor"""', 'y': '"""value"""', 'orient': '"""v"""', 'ci': '"""sd"""', 'ax': 'ax', 'hue': '"""regressor"""', 'estimator': 'np.mean', 'dodge': '(False)'}), "(data=df, x='regressor', y='value', orient='v', ci='sd', ax=ax,\n hue='regressor', estimator=np.mean, dodge=False)\n", (24351, 24467), True, 'import seaborn as sns\n'), ((24858, 24902), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""ticks"""', 'font_scale': '(1.5)'}), "(style='ticks', font_scale=1.5)\n", (24871, 24902), True, 'import seaborn as sns\n'), ((25404, 25536), 'seaborn.displot', 'sns.displot', ([], {'data': 'df_melted', 'x': '"""R$^2$"""', 'col': '"""model"""', 'kind': '"""hist"""', 'col_wrap': '(2)', 'hue': '"""model"""', 'stat': '"""density"""', 'kde': '(True)', 'palette': 'pal'}), "(data=df_melted, x='R$^2$', col='model', kind='hist', col_wrap=2,\n hue='model', stat='density', kde=True, palette=pal)\n", (25415, 25536), True, 'import seaborn as sns\n'), ((28864, 28882), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (28880, 28882), False, 'from sklearn.model_selection import LeaveOneGroupOut\n'), ((28893, 28954), 'sklearn.model_selection.GroupShuffleSplit', 'GroupShuffleSplit', ([], {'n_splits': '(20)', 'test_size': '(0.1)', 'random_state': '(1)'}), '(n_splits=20, test_size=0.1, random_state=1)\n', (28910, 28954), False, 'from sklearn.model_selection import GroupShuffleSplit\n'), ((29766, 29835), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats', 'random_state': '(1)'}), '(n_splits=n_splits, n_repeats=n_repeats, random_state=1)\n', (29779, 29835), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((31259, 31328), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': 'n_splits', 'n_repeats': 'n_repeats', 'random_state': '(1)'}), '(n_splits=n_splits, n_repeats=n_repeats, random_state=1)\n', (31272, 31328), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((31683, 31731), 'xarray.DataArray', 'xr.DataArray', (['fis'], {'dims': "['repeats', 'regressor']"}), "(fis, dims=['repeats', 'regressor'])\n", (31695, 31731), True, 'import xarray as xr\n'), ((31973, 32006), 'aux_functions_strat.path_glob', 'path_glob', (['path', '"""GRSRCHCV_*.pkl"""'], {}), "(path, 'GRSRCHCV_*.pkl')\n", (31982, 32006), False, 'from aux_functions_strat import path_glob\n'), ((32074, 32091), 'joblib.load', 'joblib.load', (['file'], {}), '(file)\n', (32085, 32091), False, 'import joblib\n'), ((32603, 32624), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (32622, 32624), False, 'from make_regressors import load_all_regressors\n'), ((34980, 35001), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (34999, 35001), False, 'from make_regressors import load_all_regressors\n'), ((35049, 35077), 'aux_functions_strat.loess_curve', 'loess_curve', (['co2'], {'plot': '(False)'}), '(co2, plot=False)\n', (35060, 35077), False, 'from aux_functions_strat import loess_curve\n'), ((35351, 35375), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (35359, 35375), False, 'from sklearn.metrics import r2_score\n'), ((37467, 37524), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'random_state': 'seed', 'shuffle': '(True)'}), '(n_splits=n_splits, random_state=seed, shuffle=True)\n', (37472, 37524), False, 'from sklearn.model_selection import KFold\n'), ((38161, 38325), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'sk_model', 'param_grid': 'search_space', 'cv': 'cv', 'n_jobs': 'n_jobs', 'scoring': 'scores_dict', 'verbose': 'verbose', 'refit': '(False)', 'return_train_score': '(True)'}), '(estimator=sk_model, param_grid=search_space, cv=cv, n_jobs=\n n_jobs, scoring=scores_dict, verbose=verbose, refit=False,\n return_train_score=True)\n', (38173, 38325), False, 'from sklearn.model_selection import GridSearchCV\n'), ((38932, 38978), 'joblib.dump', 'joblib.dump', (['GridSearchCV', '(savepath / filename)'], {}), '(GridSearchCV, savepath / filename)\n', (38943, 38978), False, 'import joblib\n'), ((39386, 39419), 'aux_functions_strat.path_glob', 'path_glob', (['path', '"""GRSRCHCV_*.pkl"""'], {}), "(path, 'GRSRCHCV_*.pkl')\n", (39395, 39419), False, 'from aux_functions_strat import path_glob\n'), ((40540, 40567), 'joblib.load', 'joblib.load', (['model_files[0]'], {}), '(model_files[0])\n', (40551, 40567), False, 'import joblib\n'), ((41651, 41673), 'pandas.concat', 'pd.concat', (['best_params'], {}), '(best_params)\n', (41660, 41673), True, 'import pandas as pd\n'), ((8034, 8090), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[-180, -120, -60, 0, 60, 120, 180]'], {}), '([-180, -120, -60, 0, 60, 120, 180])\n', (8054, 8090), True, 'import matplotlib.ticker as mticker\n'), ((8113, 8165), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[-45, -30, -15, 0, 15, 30, 45]'], {}), '([-45, -30, -15, 0, 15, 30, 45])\n', (8133, 8165), True, 'import matplotlib.ticker as mticker\n'), ((8293, 8328), 'strato_figures.remove_regressors_and_set_title', 'remove_regressors_and_set_title', (['ax'], {}), '(ax)\n', (8324, 8328), False, 'from strato_figures import remove_regressors_and_set_title\n'), ((13094, 13150), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[-180, -120, -60, 0, 60, 120, 180]'], {}), '([-180, -120, -60, 0, 60, 120, 180])\n', (13114, 13150), True, 'import matplotlib.ticker as mticker\n'), ((13173, 13225), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['[-45, -30, -15, 0, 15, 30, 45]'], {}), '([-45, -30, -15, 0, 15, 30, 45])\n', (13193, 13225), True, 'import matplotlib.ticker as mticker\n'), ((14265, 14322), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(savefig_path / filename)'], {'bbox_inches': '"""tight"""'}), "(savefig_path / filename, bbox_inches='tight')\n", (14276, 14322), True, 'import matplotlib.pyplot as plt\n'), ((17089, 17101), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (17099, 17101), True, 'import xarray as xr\n'), ((17150, 17199), 'xarray.DataArray', 'xr.DataArray', (['lr.coef_'], {'dims': "['targets', reg_dim]"}), "(lr.coef_, dims=['targets', reg_dim])\n", (17162, 17199), True, 'import xarray as xr\n'), ((17748, 17780), 'xarray.DataArray', 'xr.DataArray', (['r2'], {'dims': '"""targets"""'}), "(r2, dims='targets')\n", (17760, 17780), True, 'import xarray as xr\n'), ((19256, 19273), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {}), '(x)\n', (19270, 19273), True, 'import pandas as pd\n'), ((20270, 20288), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (20286, 20288), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator\n'), ((22383, 22401), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (22399, 22401), False, 'from sklearn.model_selection import LeaveOneGroupOut\n'), ((22451, 22509), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['estimator', 'X', 'y'], {'groups': 'groups', 'cv': 'logo'}), '(estimator, X, y, groups=groups, cv=logo)\n', (22468, 22509), False, 'from sklearn.model_selection import cross_val_predict\n'), ((27371, 27443), 'pandas.DataFrame', 'pd.DataFrame', (['[rf[score], svm[score], mlp[score], lr[score], lr2[score]]'], {}), '([rf[score], svm[score], mlp[score], lr[score], lr2[score]])\n', (27383, 27443), True, 'import pandas as pd\n'), ((27760, 27820), 'pandas.DataFrame', 'pd.DataFrame', (['[rf[score], svm[score], mlp[score], lr[score]]'], {}), '([rf[score], svm[score], mlp[score], lr[score]])\n', (27772, 27820), True, 'import pandas as pd\n'), ((29386, 29411), 'sklearn.metrics.make_scorer', 'make_scorer', (['r2_adj_score'], {}), '(r2_adj_score)\n', (29397, 29411), False, 'from sklearn.metrics import make_scorer\n'), ((29955, 30030), 'sklearn.model_selection.cross_validate', 'cross_validate', (['ml_model', 'X', 'y'], {'scoring': 'scores_dict', 'cv': 'logo', 'groups': 'groups'}), '(ml_model, X, y, scoring=scores_dict, cv=logo, groups=groups)\n', (29969, 30030), False, 'from sklearn.model_selection import cross_validate\n'), ((31031, 31056), 'sklearn.metrics.make_scorer', 'make_scorer', (['r2_adj_score'], {}), '(r2_adj_score)\n', (31042, 31056), False, 'from sklearn.metrics import make_scorer\n'), ((33066, 33085), 'xarray.merge', 'xr.merge', (['[ds, dss]'], {}), '([ds, dss])\n', (33074, 33085), True, 'import xarray as xr\n'), ((33935, 33956), 'xarray.open_dataset', 'xr.open_dataset', (['file'], {}), '(file)\n', (33950, 33956), True, 'import xarray as xr\n'), ((34372, 34432), 'aux_functions_strat.anomalize_xr', 'anomalize_xr', (['da'], {'freq': '"""MS"""', 'units': 'deseason', 'time_dim': '"""time"""'}), "(da, freq='MS', units=deseason, time_dim='time')\n", (34384, 34432), False, 'from aux_functions_strat import anomalize_xr\n'), ((36007, 36050), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size'}), '(X, y, test_size=test_size)\n', (36023, 36050), False, 'from sklearn.model_selection import train_test_split\n'), ((37192, 37217), 'sklearn.metrics.make_scorer', 'make_scorer', (['r2_adj_score'], {}), '(r2_adj_score)\n', (37203, 37217), False, 'from sklearn.metrics import make_scorer\n'), ((44171, 44176), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (44174, 44176), False, 'from sklearn.svm import SVR\n'), ((45492, 45559), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'random_state': '(42)', 'max_iter': '(500)', 'learning_rate_init': '(0.1)'}), '(random_state=42, max_iter=500, learning_rate_init=0.1)\n', (45504, 45559), False, 'from sklearn.neural_network import MLPRegressor\n'), ((46556, 46605), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)', 'n_jobs': '(-1)'}), '(random_state=42, n_jobs=-1)\n', (46577, 46605), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((46699, 46726), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (46715, 46726), False, 'from sklearn.linear_model import LinearRegression\n'), ((5750, 5768), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (5766, 5768), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator\n'), ((7093, 7111), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7109, 7111), True, 'import cartopy.crs as ccrs\n'), ((11803, 11821), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11819, 11821), True, 'import cartopy.crs as ccrs\n'), ((17576, 17635), 'sklearn.metrics.r2_score', 'r2_score', (['ys_test', "rds['predict']"], {'multioutput': '"""raw_values"""'}), "(ys_test, rds['predict'], multioutput='raw_values')\n", (17584, 17635), False, 'from sklearn.metrics import r2_score\n'), ((17667, 17727), 'sklearn.metrics.r2_score', 'r2_score', (['ys_train', "rds['predict']"], {'multioutput': '"""raw_values"""'}), "(ys_train, rds['predict'], multioutput='raw_values')\n", (17675, 17727), False, 'from sklearn.metrics import r2_score\n'), ((18266, 18278), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (18276, 18278), True, 'import xarray as xr\n'), ((18327, 18365), 'xarray.DataArray', 'xr.DataArray', (['lr.coef_'], {'dims': '[reg_dim]'}), '(lr.coef_, dims=[reg_dim])\n', (18339, 18365), True, 'import xarray as xr\n'), ((18865, 18881), 'xarray.DataArray', 'xr.DataArray', (['r2'], {}), '(r2)\n', (18877, 18881), True, 'import xarray as xr\n'), ((22550, 22597), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(1)'}), '(n_splits=5, shuffle=True, random_state=1)\n', (22555, 22597), False, 'from sklearn.model_selection import KFold\n'), ((22612, 22656), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['estimator', 'X', 'y'], {'cv': 'kfold'}), '(estimator, X, y, cv=kfold)\n', (22629, 22656), False, 'from sklearn.model_selection import cross_val_predict\n'), ((23968, 23987), 'numpy.ndenumerate', 'np.ndenumerate', (['axs'], {}), '(axs)\n', (23982, 23987), True, 'import numpy as np\n'), ((30158, 30232), 'sklearn.model_selection.cross_validate', 'cross_validate', (['ml_model', 'X', 'y'], {'scoring': 'scores_dict', 'cv': 'gss', 'groups': 'groups'}), '(ml_model, X, y, scoring=scores_dict, cv=gss, groups=groups)\n', (30172, 30232), False, 'from sklearn.model_selection import cross_validate\n'), ((30286, 30344), 'sklearn.model_selection.cross_validate', 'cross_validate', (['ml_model', 'X', 'y'], {'scoring': 'scores_dict', 'cv': 'cv'}), '(ml_model, X, y, scoring=scores_dict, cv=cv)\n', (30300, 30344), False, 'from sklearn.model_selection import cross_validate\n'), ((34276, 34290), 'aux_functions_strat.detrend_ts', 'detrend_ts', (['da'], {}), '(da)\n', (34286, 34290), False, 'from aux_functions_strat import detrend_ts\n'), ((41909, 41940), 'numpy.arange', 'np.arange', (['minimal', '(maximal + 1)'], {}), '(minimal, maximal + 1)\n', (41918, 41940), True, 'import numpy as np\n'), ((690, 711), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (709, 711), False, 'from make_regressors import load_all_regressors\n'), ((742, 763), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (761, 763), False, 'from make_regressors import load_all_regressors\n'), ((796, 817), 'make_regressors.load_all_regressors', 'load_all_regressors', ([], {}), '()\n', (815, 817), False, 'from make_regressors import load_all_regressors\n'), ((1956, 1987), 'numpy.corrcoef', 'np.corrcoef', (['shap_v[i]', 'df_v[i]'], {}), '(shap_v[i], df_v[i])\n', (1967, 1987), True, 'import numpy as np\n'), ((7664, 7682), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7680, 7682), True, 'import cartopy.crs as ccrs\n'), ((7727, 7745), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7743, 7745), True, 'import cartopy.crs as ccrs\n'), ((12787, 12805), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12803, 12805), True, 'import cartopy.crs as ccrs\n'), ((18747, 18779), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', "rds['predict']"], {}), "(y_test, rds['predict'])\n", (18755, 18779), False, 'from sklearn.metrics import r2_score\n'), ((18811, 18844), 'sklearn.metrics.r2_score', 'r2_score', (['y_train', "rds['predict']"], {}), "(y_train, rds['predict'])\n", (18819, 18844), False, 'from sklearn.metrics import r2_score\n'), ((41043, 41081), 'pandas.DataFrame', 'pd.DataFrame', (["gr.cv_results_['params']"], {}), "(gr.cv_results_['params'])\n", (41055, 41081), True, 'import pandas as pd\n'), ((41212, 41250), 'pandas.DataFrame', 'pd.DataFrame', (["gr.cv_results_['params']"], {}), "(gr.cv_results_['params'])\n", (41224, 41250), True, 'import pandas as pd\n'), ((2056, 2079), 'pandas.Series', 'pd.Series', (['feature_list'], {}), '(feature_list)\n', (2065, 2079), True, 'import pandas as pd\n'), ((2081, 2101), 'pandas.Series', 'pd.Series', (['corr_list'], {}), '(corr_list)\n', (2090, 2101), True, 'import pandas as pd\n'), ((43900, 43922), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(10)'], {}), '(-2, 2, 10)\n', (43911, 43922), True, 'import numpy as np\n'), ((43988, 44010), 'numpy.logspace', 'np.logspace', (['(-5)', '(1)', '(14)'], {}), '(-5, 1, 14)\n', (43999, 44010), True, 'import numpy as np\n'), ((45014, 45035), 'numpy.logspace', 'np.logspace', (['(-5)', '(1)', '(7)'], {}), '(-5, 1, 7)\n', (45025, 45035), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Creates lists of molecules on a grid with a +-0.5 pixel
random offset.
Hazen 12/16
"""
import numpy
import random
import storm_analysis.sa_library.sa_h5py as saH5Py
def emittersOnGrid(h5_name, nx, ny, sigma, spacing, zrange, zoffset, seed = 0):
if seed is not None:
random.seed(seed)
if (nx*ny > 1):
curz = -zrange
z_inc = 2.0 * zrange/(nx*ny - 1)
else:
curz = 0.0
z_inc = 0.0
peaks = {"id" : numpy.zeros(nx*ny, dtype = numpy.int32),
"x" : numpy.zeros(nx*ny),
"y" : numpy.zeros(nx*ny),
"z" : numpy.zeros(nx*ny),
"xsigma" : sigma * numpy.ones(nx*ny),
"ysigma" : sigma * numpy.ones(nx*ny)}
curx = spacing
for i in range(nx):
cury = spacing
for j in range(ny):
k = i*ny+j
peaks['x'][k] = curx + random.random() - 0.5
peaks['y'][k] = cury + random.random() - 0.5
peaks['z'][k] = curz + zoffset
# Record emitter id in the 'id' field.
peaks['id'][k] = k
cury += spacing
curz += z_inc
curx += spacing
saH5Py.saveLocalizations(h5_name, peaks)
if (__name__ == "__main__"):
import argparse
parser = argparse.ArgumentParser(description = "Create a grid of emitters for simulations.")
parser.add_argument('--bin', dest='hdf5', type=str, required=True,
help = "The name of the HDF5 file to save the emitter locations, etc.")
parser.add_argument('--nx', dest='nx', type=int, required=True,
help = "The grid size in X.")
parser.add_argument('--ny', dest='ny', type=int, required=True,
help = "The grid size in Y.")
parser.add_argument('--sigma', dest='sigma', type=float, required=False, default = 1.5,
help = "PSF sigma in pixels.")
parser.add_argument('--spacing', dest='spacing', type=float, required=True,
help = "The grid spacing in pixels.")
parser.add_argument('--zrange', dest='zrange', type=float, required=False, default = 0.0,
help = "Range for z values in microns, -zrange to zrange")
parser.add_argument('--zoffset', dest='zoffset', type=float, required=False, default = 0.0,
help = "Offset for z values in microns")
args = parser.parse_args()
emittersOnGrid(args.hdf5, args.nx, args.ny, args.sigma, args.spacing, args.zrange, args.zoffset)
|
[
"argparse.ArgumentParser",
"storm_analysis.sa_library.sa_h5py.saveLocalizations",
"numpy.zeros",
"numpy.ones",
"random.random",
"random.seed"
] |
[((1206, 1246), 'storm_analysis.sa_library.sa_h5py.saveLocalizations', 'saH5Py.saveLocalizations', (['h5_name', 'peaks'], {}), '(h5_name, peaks)\n', (1230, 1246), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((1312, 1398), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create a grid of emitters for simulations."""'}), "(description=\n 'Create a grid of emitters for simulations.')\n", (1335, 1398), False, 'import argparse\n'), ((311, 328), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (322, 328), False, 'import random\n'), ((484, 523), 'numpy.zeros', 'numpy.zeros', (['(nx * ny)'], {'dtype': 'numpy.int32'}), '(nx * ny, dtype=numpy.int32)\n', (495, 523), False, 'import numpy\n'), ((544, 564), 'numpy.zeros', 'numpy.zeros', (['(nx * ny)'], {}), '(nx * ny)\n', (555, 564), False, 'import numpy\n'), ((583, 603), 'numpy.zeros', 'numpy.zeros', (['(nx * ny)'], {}), '(nx * ny)\n', (594, 603), False, 'import numpy\n'), ((622, 642), 'numpy.zeros', 'numpy.zeros', (['(nx * ny)'], {}), '(nx * ny)\n', (633, 642), False, 'import numpy\n'), ((674, 693), 'numpy.ones', 'numpy.ones', (['(nx * ny)'], {}), '(nx * ny)\n', (684, 693), False, 'import numpy\n'), ((725, 744), 'numpy.ones', 'numpy.ones', (['(nx * ny)'], {}), '(nx * ny)\n', (735, 744), False, 'import numpy\n'), ((897, 912), 'random.random', 'random.random', ([], {}), '()\n', (910, 912), False, 'import random\n'), ((954, 969), 'random.random', 'random.random', ([], {}), '()\n', (967, 969), False, 'import random\n')]
|
"""
desisim.pixsim
==============
Tools for DESI pixel level simulations using specter
"""
from __future__ import absolute_import, division, print_function
import sys
import os
import os.path
import random
from time import asctime
import socket
import astropy.units as u
import numpy as np
import desimodel.io
import desispec.io
from desispec.image import Image
import desispec.cosmics
from . import obs, io
from desiutil.log import get_logger
log = get_logger()
# Inhibit download of IERS-A catalog, even from a good server.
# Note that this is triggered by a call to astropy.time.Time(),
# which is subsequently used to compute sidereal_time().
# It's the initialization of astropy.time.Time() itself that makes the call.
from desiutil.iers import freeze_iers
from astropy.time import Time
def simulate_exposure(simspecfile, rawfile, cameras=None,
ccdshape=None, simpixfile=None, addcosmics=None, comm=None,
**kwargs):
"""
Simulate frames from an exposure, including I/O
Args:
simspecfile: input simspec format file with spectra
rawfile: output raw data file to write
Options:
cameras: str or list of str, e.g. b0, r1, .. z9
ccdshape: (npix_y, npix_x) primarily used to limit memory while testing
simpixfile: output file for noiseless truth pixels
addcosmics: if True (must be specified via command input), add cosmics from real data
comm: MPI communicator object
Additional keyword args are passed to pixsim.simulate()
For a lower-level pixel simulation interface that doesn't perform I/O,
see pixsim.simulate()
Note: call desi_preproc or desispec.preproc.preproc to pre-process the
output desi*.fits file for overscan subtraction, noise estimation, etc.
"""
#- Split communicator by nodes; each node processes N frames
#- Assumes / requires equal number of ranks per node
if comm is not None:
rank, size = comm.rank, comm.size
num_nodes = mpi_count_nodes(comm)
comm_node, node_index, num_nodes = mpi_split_by_node(comm, 1)
node_rank = comm_node.rank
node_size = comm_node.size
else:
log.debug('Not using MPI')
rank, size = 0, 1
comm_node = None
node_index = 0
num_nodes = 1
node_rank = 0
node_size = 1
if rank == 0:
log.debug('Starting simulate_exposure at {}'.format(asctime()))
if cameras is None:
if rank == 0:
from astropy.io import fits
fibermap = fits.getdata(simspecfile, 'FIBERMAP')
cameras = io.fibers2cameras(fibermap['FIBER'])
log.debug('Found cameras {} in input simspec file'.format(cameras))
if len(cameras) % num_nodes != 0:
raise ValueError('Number of cameras {} should be evenly divisible by number of nodes {}'.format(
len(cameras), num_nodes))
if comm is not None:
cameras = comm.bcast(cameras, root=0)
#- Fail early if camera alreaady in output file
if rank == 0 and os.path.exists(rawfile):
from astropy.io import fits
err = False
fx = fits.open(rawfile)
for camera in cameras:
if camera in fx:
log.error('Camera {} already in {}'.format(camera, rawfile))
err = True
if err:
raise ValueError('Some cameras already in output file')
#- Read simspec input; I/O layer handles MPI broadcasting
if rank == 0:
log.debug('Reading simspec at {}'.format(asctime()))
mycameras = cameras[node_index::num_nodes]
if node_rank == 0:
log.info("Assigning cameras {} to comm_exp node {}".format(mycameras, node_index))
simspec = io.read_simspec(simspecfile, cameras=mycameras,
readflux=False, comm=comm)
night = simspec.header['NIGHT']
expid = simspec.header['EXPID']
if rank == 0:
log.debug('Reading PSFs at {}'.format(asctime()))
psfs = dict()
#need to initialize previous channel
previous_channel = 'a'
for camera in mycameras:
#- Note: current PSF object can't be pickled and thus every
#- rank must read it instead of rank 0 read + bcast
channel = camera[0]
if channel not in psfs:
log.info('Reading {} PSF at {}'.format(channel, asctime()))
psfs[channel] = desimodel.io.load_psf(channel)
#- Trim effective CCD size; mainly to limit memory for testing
if ccdshape is not None:
psfs[channel].npix_y, psfs[channel].npix_x = ccdshape
psf = psfs[channel]
cosmics=None
#avoid re-broadcasting cosmics if we can
if previous_channel != channel:
if (addcosmics is True) and (node_rank == 0):
cosmics_file = io.find_cosmics(camera, simspec.header['EXPTIME'])
log.info('Reading cosmics templates {} at {}'.format(
cosmics_file, asctime()))
shape = (psf.npix_y, psf.npix_x)
cosmics = io.read_cosmics(cosmics_file, expid, shape=shape)
if (addcosmics is True) and (comm_node is not None):
if node_rank == 0:
log.info('Broadcasting cosmics at {}'.format(asctime()))
cosmics = comm_node.bcast(cosmics, root=0)
else:
log.debug("Cosmics not requested")
if node_rank == 0:
log.info("Starting simulate for camera {} on node {}".format(camera,node_index))
image, rawpix, truepix = simulate(camera, simspec, psf, comm=comm_node, preproc=False, cosmics=cosmics, **kwargs)
#- Use input communicator as barrier since multiple sub-communicators
#- will write to the same output file
if rank == 0:
log.debug('Writing outputs at {}'.format(asctime()))
tmprawfile = rawfile + '.tmp'
if comm is not None:
for i in range(comm.size):
if (i == comm.rank) and (comm_node.rank == 0):
desispec.io.write_raw(tmprawfile, rawpix, image.meta,
camera=camera)
if simpixfile is not None:
io.write_simpix(simpixfile, truepix, camera=camera,
meta=image.meta)
comm.barrier()
else:
desispec.io.write_raw(tmprawfile, rawpix, image.meta, camera=camera)
if simpixfile is not None:
io.write_simpix(simpixfile, truepix, camera=camera,
meta=image.meta)
if rank == 0:
log.info('Wrote {}'.format(rawfile))
log.debug('done at {}'.format(asctime()))
previous_channel = channel
#- All done; rename temporary raw file to final location
if comm is None or comm.rank == 0:
os.rename(tmprawfile, rawfile)
def simulate(camera, simspec, psf, nspec=None, ncpu=None,
cosmics=None, wavemin=None, wavemax=None, preproc=True, comm=None):
"""Run pixel-level simulation of input spectra
Args:
camera (string) : b0, r1, .. z9
simspec : desispec.io.SimSpec object from desispec.io.read_simspec()
psf : subclass of specter.psf.psf.PSF, e.g. from desimodel.io.load_psf()
Options:
nspec (int): number of spectra to simulate
ncpu (int): number of CPU cores to use in parallel
cosmics (desispec.image.Image): e.g. from desisim.io.read_cosmics()
wavemin (float): minimum wavelength range to simulate
wavemax (float): maximum wavelength range to simulate
preproc (boolean, optional) : also preprocess raw data (default True)
Returns:
(image, rawpix, truepix) tuple, where image is the preproc Image object
(only header is meaningful if preproc=False), rawpix is a 2D
ndarray of unprocessed raw pixel data, and truepix is a 2D ndarray
of truth for image.pix
"""
freeze_iers()
if (comm is None) or (comm.rank == 0):
log.info('Starting pixsim.simulate camera {} at {}'.format(camera,
asctime()))
#- parse camera name into channel and spectrograph number
channel = camera[0].lower()
ispec = int(camera[1])
assert channel in 'brz', \
'unrecognized channel {} camera {}'.format(channel, camera)
assert 0 <= ispec < 10, \
'unrecognized spectrograph {} camera {}'.format(ispec, camera)
assert len(camera) == 2, \
'unrecognized camera {}'.format(camera)
#- Load DESI parameters
params = desimodel.io.load_desiparams()
#- this is not necessarily true, the truth in is the fibermap
nfibers = params['spectro']['nfibers']
phot = simspec.cameras[camera].phot
if simspec.cameras[camera].skyphot is not None:
phot += simspec.cameras[camera].skyphot
if nspec is not None:
phot = phot[0:nspec]
else:
nspec = phot.shape[0]
#- Trim wavelengths if needed
wave = simspec.cameras[camera].wave
if wavemin is not None:
ii = (wave >= wavemin)
phot = phot[:, ii]
wave = wave[ii]
if wavemax is not None:
ii = (wave <= wavemax)
phot = phot[:, ii]
wave = wave[ii]
#- Project to image and append that to file
if (comm is None) or (comm.rank == 0):
log.info('Starting {} projection at {}'.format(camera, asctime()))
# The returned true pixel values will only exist on rank 0 in the
# MPI case. Otherwise it will be None.
truepix = parallel_project(psf, wave, phot, ncpu=ncpu, comm=comm)
if (comm is None) or (comm.rank == 0):
log.info('Finished {} projection at {}'.format(camera,
asctime()))
image = None
rawpix = None
if (comm is None) or (comm.rank == 0):
#- Start metadata header
header = simspec.header.copy()
header['CAMERA'] = camera
header['DOSVER'] = 'SIM'
header['FEEVER'] = 'SIM'
header['DETECTOR'] = 'SIM'
#- Add cosmics from library of dark images
ny = truepix.shape[0] // 2
nx = truepix.shape[1] // 2
if cosmics is not None:
# set to zeros values with mask bit 0 (= dead column or hot pixels)
cosmics_pix = cosmics.pix*((cosmics.mask&1)==0)
pix = np.random.poisson(truepix) + cosmics_pix
try: #- cosmics templates >= v0.3
rdnoiseA = cosmics.meta['OBSRDNA']
rdnoiseB = cosmics.meta['OBSRDNB']
rdnoiseC = cosmics.meta['OBSRDNC']
rdnoiseD = cosmics.meta['OBSRDND']
except KeyError: #- cosmics templates <= v0.2
print(cosmic.meta)
rdnoiseA = cosmics.meta['RDNOISE0']
rdnoiseB = cosmics.meta['RDNOISE1']
rdnoiseC = cosmics.meta['RDNOISE2']
rdnoiseD = cosmics.meta['RDNOISE3']
else:
pix = truepix
readnoise = params['ccd'][channel]['readnoise']
rdnoiseA = rdnoiseB = rdnoiseC = rdnoiseD = readnoise
#- data already has noise if cosmics were added
noisydata = (cosmics is not None)
#- Split by amplifier and expand into raw data
nprescan = params['ccd'][channel]['prescanpixels']
if 'overscanpixels' in params['ccd'][channel]:
noverscan = params['ccd'][channel]['overscanpixels']
else:
noverscan = 50
#- Reproducibly random overscan bias level offsets across diff exp
assert channel in 'brz'
if channel == 'b':
irand = ispec
elif channel == 'r':
irand = 10 + ispec
elif channel == 'z':
irand = 20 + ispec
seeds = np.random.RandomState(0).randint(2**32-1, size=30)
rand = np.random.RandomState(seeds[irand])
nyraw = ny
nxraw = nx + nprescan + noverscan
rawpix = np.empty( (nyraw*2, nxraw*2), dtype=np.int32 )
gain = params['ccd'][channel]['gain']
#- Amp A/1 Lower Left
rawpix[0:nyraw, 0:nxraw] = \
photpix2raw(pix[0:ny, 0:nx], gain, rdnoiseA,
readorder='lr', nprescan=nprescan, noverscan=noverscan,
offset=rand.uniform(100, 200),
noisydata=noisydata)
#- Amp B/2 Lower Right
rawpix[0:nyraw, nxraw:nxraw+nxraw] = \
photpix2raw(pix[0:ny, nx:nx+nx], gain, rdnoiseB,
readorder='rl', nprescan=nprescan, noverscan=noverscan,
offset=rand.uniform(100, 200),
noisydata=noisydata)
#- Amp C/3 Upper Left
rawpix[nyraw:nyraw+nyraw, 0:nxraw] = \
photpix2raw(pix[ny:ny+ny, 0:nx], gain, rdnoiseC,
readorder='lr', nprescan=nprescan, noverscan=noverscan,
offset=rand.uniform(100, 200),
noisydata=noisydata)
#- Amp D/4 Upper Right
rawpix[nyraw:nyraw+nyraw, nxraw:nxraw+nxraw] = \
photpix2raw(pix[ny:ny+ny, nx:nx+nx], gain, rdnoiseD,
readorder='rl', nprescan=nprescan, noverscan=noverscan,
offset=rand.uniform(100, 200),
noisydata=noisydata)
def xyslice2header(xyslice):
'''
convert 2D slice into IRAF style [a:b,c:d] header value
e.g. xyslice2header(np.s_[0:10, 5:20]) -> '[6:20,1:10]'
'''
yy, xx = xyslice
value = '[{}:{},{}:{}]'.format(xx.start+1, xx.stop,
yy.start+1, yy.stop)
return value
#- Amp order from DESI-1964 (previously 1-4 instead of A-D)
#- C D
#- A B
xoffset = nprescan+nx+noverscan
header['PRESECA'] = xyslice2header(np.s_[0:nyraw, 0:0+nprescan])
header['DATASECA'] = xyslice2header(np.s_[0:nyraw, nprescan:nprescan+nx])
header['BIASSECA'] = xyslice2header(np.s_[0:nyraw, nprescan+nx:nprescan+nx+noverscan])
header['CCDSECA'] = xyslice2header(np.s_[0:ny, 0:nx])
header['PRESECB'] = xyslice2header(np.s_[0:nyraw, xoffset+noverscan+nx:xoffset+noverscan+nx+nprescan])
header['DATASECB'] = xyslice2header(np.s_[0:nyraw, xoffset+noverscan:xoffset+noverscan+nx])
header['BIASSECB'] = xyslice2header(np.s_[0:nyraw, xoffset:xoffset+noverscan])
header['CCDSECB'] = xyslice2header(np.s_[0:ny, nx:2*nx])
header['PRESECC'] = xyslice2header(np.s_[nyraw:2*nyraw, 0:0+nprescan])
header['DATASECC'] = xyslice2header(np.s_[nyraw:2*nyraw, nprescan:nprescan+nx])
header['BIASSECC'] = xyslice2header(np.s_[nyraw:2*nyraw, nprescan+nx:nprescan+nx+noverscan])
header['CCDSECC'] = xyslice2header(np.s_[ny:2*ny, 0:nx])
header['PRESECD'] = xyslice2header(np.s_[nyraw:2*nyraw, xoffset+noverscan+nx:xoffset+noverscan+nx+nprescan])
header['DATASECD'] = xyslice2header(np.s_[nyraw:2*nyraw, xoffset+noverscan:xoffset+noverscan+nx])
header['BIASSECD'] = xyslice2header(np.s_[nyraw:2*nyraw, xoffset:xoffset+noverscan])
header['CCDSECD'] = xyslice2header(np.s_[ny:2*ny, nx:2*nx])
#- Add additional keywords to mimic real raw data
header['INSTRUME'] = 'DESI'
header['PROCTYPE'] = 'RAW'
header['PRODTYPE'] = 'image'
header['EXPFRAME'] = 0
header['REQTIME'] = simspec.header['EXPTIME']
header['TIMESYS'] = 'UTC'
#- DATE-OBS format YEAR-MM-DDThh:mm:ss.sss -> OBSID kpnoYEARMMDDthhmmss
header['OBSID']='kp4m'+header['DATE-OBS'][0:19].replace('-','').replace(':','').lower()
header['TIME-OBS'] = header['DATE-OBS'].split('T')[1]
header['DELTARA'] = 0.0
header['DELTADEC'] = 0.0
header['SPECGRPH'] = ispec
header['CCDNAME'] = 'CCDS' + str(ispec) + str(channel).upper()
header['CCDPREP'] = 'purge,clear'
header['CCDSIZE'] = str(rawpix.shape)
header['CCDTEMP'] = 850.0
header['CPUTEMP'] = 63.7
header['CASETEMP'] = 62.8
header['CCDTMING'] = 'sim_timing.txt'
header['CCDCFG'] = 'sim.cfg'
header['SETTINGS'] = 'sim_detectors.json'
header['VESSEL'] = 7 #- I don't know what this is
header['FEEBOX'] = 'sim097'
header['PGAGAIN'] = 5
header['OCSVER'] = 'SIM'
header['CONSTVER'] = 'SIM'
header['BLDTIME'] = 0.35
header['DIGITIME'] = 61.9
#- Remove some spurious header keywords from upstream
if 'BUNIT' in header and header['BUNIT'] == 'Angstrom':
del header['BUNIT']
if 'MJD' in header and 'MJD-OBS' not in header:
header['MJD-OBS'] = header['MJD']
del header['MJD']
for key in ['RA', 'DEC']:
if key in header:
del header[key]
#- Drive MJD-OBS from DATE-OBS if needed
if 'MJD-OBS' not in header:
header['MJD-OBS'] = Time(header['DATE-OBS']).mjd
#- from http://www-kpno.kpno.noao.edu/kpno-misc/mayall_params.html
kpno_longitude = -(111. + 35/60. + 59.6/3600) * u.deg
#- Convert DATE-OBS to sexigesimal (sigh) Local Sidereal Time
#- Use mean ST as close enough for sims to avoid nutation calc
t = Time(header['DATE-OBS'])
st = t.sidereal_time('mean', kpno_longitude).to('deg').value
hour = st/15
minute = (hour % 1)*60
second = (minute % 1)*60
header['ST'] = '{:02d}:{:02d}:{:0.3f}'.format(
int(hour), int(minute), second)
if preproc:
log.debug('Running preprocessing at {}'.format(asctime()))
image = desispec.preproc.preproc(rawpix, header, primary_header=simspec.header)
else:
log.debug('Skipping preprocessing')
image = Image(np.zeros(truepix.shape), np.zeros(truepix.shape), meta=header)
if (comm is None) or (comm.rank == 0):
log.info('Finished pixsim.simulate for camera {} at {}'.format(camera,
asctime()))
return image, rawpix, truepix
def photpix2raw(phot, gain=1.0, readnoise=3.0, offset=None,
nprescan=7, noverscan=50, readorder='lr', noisydata=True):
'''
Add prescan, overscan, noise, and integerization to an image
Args:
phot: 2D float array of mean input photons per pixel
gain (float, optional): electrons/ADU
readnoise (float, optional): CCD readnoise in electrons
offset (float, optional): bias offset to add
nprescan (int, optional): number of prescan pixels to add
noverscan (int, optional): number of overscan pixels to add
readorder (str, optional): 'lr' or 'rl' to indicate readout order
'lr' : add prescan on left and overscan on right of image
'rl' : add prescan on right and overscan on left of image
noisydata (boolean, optional) : if True, don't add noise,
e.g. because input signal already had noise from a cosmics image
Returns 2D integer ndarray:
image = int((poisson(phot) + offset + gauss(readnoise))/gain)
Integerization happens twice: the mean photons are poisson sampled
into integers, but then offets, readnoise, and gain are applied before
resampling into ADU integers
This is intended to be used per-amplifier, not for an entire CCD image.
'''
ny = phot.shape[0]
nx = phot.shape[1] + nprescan + noverscan
#- reading from right to left is effectively swapping pre/overscan counts
if readorder.lower() in ('rl', 'rightleft'):
nprescan, noverscan = noverscan, nprescan
img = np.zeros((ny, nx), dtype=float)
img[:, nprescan:nprescan+phot.shape[1]] = phot
if offset is None:
offset = np.random.uniform(100, 200)
if noisydata:
#- Data already has noise; just add offset and noise to pre/overscan
img += offset
img[0:ny, 0:nprescan] += np.random.normal(scale=readnoise, size=(ny, nprescan))
ix = phot.shape[1] + nprescan
img[0:ny, ix:ix+noverscan] += np.random.normal(scale=readnoise, size=(ny, noverscan))
img /= gain
else:
#- Add offset and noise to everything
noise = np.random.normal(loc=offset, scale=readnoise, size=img.shape)
img = np.random.poisson(img) + noise
img /= gain
return img.astype(np.int32)
#- Helper function for multiprocessing parallel project
def _project(args):
"""
Helper function to project photons onto a subimage
Args:
tuple/array of [psf, wave, phot, specmin]
Returns (xyrange, subimage) such that
xmin, xmax, ymin, ymax = xyrange
image[ymin:ymax, xmin:xmax] += subimage
"""
try:
psf, wave, phot, specmin = args
nspec = phot.shape[0]
if phot.shape[-1] != wave.shape[-1]:
raise ValueError('phot.shape {} vs. wave.shape {} mismatch'.format(phot.shape, wave.shape))
xyrange = psf.xyrange( [specmin, specmin+nspec], wave )
img = psf.project(wave, phot, specmin=specmin, xyrange=xyrange)
return (xyrange, img)
except Exception as e:
if os.getenv('UNITTEST_SILENT') is None:
import traceback
print('-'*60)
print('ERROR in _project', psf.wmin, psf.wmax, wave[0], wave[-1], phot.shape, specmin)
traceback.print_exc()
print('-'*60)
raise e
#- Move this into specter itself?
def parallel_project(psf, wave, phot, specmin=0, ncpu=None, comm=None):
"""
Using psf, project phot[nspec, nw] vs. wave[nw] onto image
Return 2D image
"""
img = None
if comm is not None:
# MPI version
# Get a smaller communicator if not enough spectra
nspec = phot.shape[0]
if nspec < comm.size:
keep = int(comm.rank < nspec)
comm = comm.Split(color=keep)
if not keep:
return None
specs = np.arange(phot.shape[0], dtype=np.int32)
myspecs = np.array_split(specs, comm.size)[comm.rank]
nspec = phot.shape[0]
iispec = np.linspace(specmin, nspec, int(comm.size+1)).astype(int)
args = list()
if comm.rank == 0:
for i in range(comm.size):
if iispec[i+1] > iispec[i]:
args.append( [psf, wave, phot[iispec[i]:iispec[i+1]], iispec[i]] )
args=comm.scatter(args,root=0)
#now that all ranks have args, we can call _project
xy_subimg=_project(args)
#_project calls project calls spotgrid etc
xy_subimg=comm.gather(xy_subimg,root=0)
if comm.rank ==0:
#now all the data should be back at rank 0
# use same technique as multiprocessing to recombine the data
img = np.zeros( (psf.npix_y, psf.npix_x) )
for xyrange, subimg in xy_subimg:
xmin, xmax, ymin, ymax = xyrange
img[ymin:ymax, xmin:xmax] += subimg
#end of mpi section
else:
import multiprocessing as mp
if ncpu is None:
# Avoid hyperthreading
ncpu = mp.cpu_count() // 2
if ncpu <= 1:
#- Serial version
log.debug('Not using multiprocessing (ncpu={})'.format(ncpu))
img = psf.project(wave, phot, specmin=specmin)
else:
#- multiprocessing version
#- Split the spectra into ncpu groups
log.debug('Using multiprocessing (ncpu={})'.format(ncpu))
nspec = phot.shape[0]
iispec = np.linspace(specmin, nspec, ncpu+1).astype(int)
args = list()
for i in range(ncpu):
if iispec[i+1] > iispec[i]: #- can be false if nspec < ncpu
args.append( [psf, wave, phot[iispec[i]:iispec[i+1]], iispec[i]] )
#- Create pool of workers to do the projection using _project
#- xyrange, subimg = _project( [psf, wave, phot, specmin] )
pool = mp.Pool(ncpu)
xy_subimg = pool.map(_project, args)
#print("xy_subimg from pool")
#print(xy_subimg)
#print(len(xy_subimg))
img = np.zeros( (psf.npix_y, psf.npix_x) )
for xyrange, subimg in xy_subimg:
xmin, xmax, ymin, ymax = xyrange
img[ymin:ymax, xmin:xmax] += subimg
#- Prevents hangs of Travis tests
pool.close()
pool.join()
return img
def get_nodes_per_exp(nnodes,nexposures,ncameras,user_nodes_per_comm_exp=None):
"""
Calculate how many nodes to use per exposure
Args:
nnodes: number of nodes in MPI COMM_WORLD (not number of ranks)
nexposures: number of exposures to process
ncameras: number of cameras per exposure
user_nodes_per_comm_exp (int, optional): user override of number of
nodes to use; used to check requirements
Returns number of nodes to include in sub-communicators used to process
individual exposures
Notes:
* Uses the largest number of nodes per exposure that will still
result in efficient node usage
* requires that (nexposures*ncameras) / nnodes = int
* the derived nodes_per_comm_exp * nexposures / nodes = int
* See desisim.test.test_pixsim.test_get_nodes_per_exp() for examples
* if user_nodes_per_comm_exp is given, requires that
GreatestCommonDivisor(nnodes, ncameras) / user_nodes_per_comm_exp = int
"""
from math import gcd
import desiutil.log as logging
log = logging.get_logger()
log.setLevel(logging.INFO)
#check if nframes is evenly divisible by nnodes
nframes = ncameras*nexposures
if nframes % nnodes !=0:
### msg=("nframes {} must be evenly divisible by nnodes {}, try again".format(nframes, nnodes))
### raise ValueError(msg)
msg=("nframes {} is not evenly divisible by nnodes {}; packing will be inefficient".format(nframes, nnodes))
log.warning(msg)
else:
log.debug("nframes {} is evenly divisible by nnodes {}, check passed".format(nframes, nnodes))
#find greatest common divisor between nnodes and ncameras
#greatest common divisor = greatest common factor
#we use python's built in gcd
greatest_common_factor=gcd(nnodes,ncameras)
#the greatest common factor must be greater than one UNLESS we are on one node
if nnodes > 1:
if greatest_common_factor == 1:
msg=("greatest common factor {} between nnodes {} and nframes {} must be larger than one, try again".format(greatest_common_factor, nnodes, nframes))
raise ValueError(msg)
else:
log.debug("greatest common factor {} between nnodes {} and nframes {} is greater than one, check passed".format(greatest_common_factor, nnodes, nframes))
#check to make sure the user hasn't specified a really asinine value of user_nodes_per_comm_exp
if user_nodes_per_comm_exp is not None:
if greatest_common_factor % user_nodes_per_comm_exp !=0:
msg=("user-specified value of user_nodes_per_comm_exp {} is bad, try again".format(user_nodes_per_comm_exp))
raise ValueError(msg)
else:
log.debug("user-specified value of user_nodes_per_comm_exp {} is good, check passed".format(user_nodes_per_comm_exp))
nodes_per_comm_exp=user_nodes_per_comm_exp
#if the user didn't specify anything, use the greatest common factor
if user_nodes_per_comm_exp is None:
nodes_per_comm_exp=greatest_common_factor
#finally check to make sure exposures*gcf/nnodes is an integer to avoid inefficient node use
if (nexposures*nodes_per_comm_exp) % nnodes != 0:
### msg=("nexposures {} * nodes_per_comm_exp {} does not divide evenly into nnodes {}, try again".format(nexposures, nodes_per_comm_exp, nnodes))
### raise ValueError(msg)
msg=("nexposures {} * nodes_per_comm_exp {} does not divide evenly into nnodes {}; packing will be inefficient".format(nexposures, nodes_per_comm_exp, nnodes))
log.warning(msg)
else:
log.debug("nexposures {} * nodes_per_comm_exp {} divides evenly into nnodes {}, check passed".format(nexposures, nodes_per_comm_exp, nnodes))
return nodes_per_comm_exp
#-------------------------------------------------------------------------
#- MPI utility functions
#- These functions assist with splitting a communicator across node boundaries.
#- That constraint isn't required by MPI, but can be convenient for humans
#- thinking about "I want to process one camera with one node" or "I want to
#- process 6 exposures with 20 nodes using 10 nodes per exposure"
def mpi_count_nodes(comm):
'''
Return the number of nodes in this communicator
'''
nodenames = comm.allgather(socket.gethostname())
num_nodes=len(set(nodenames))
return num_nodes
def mpi_split_by_node(comm, nodes_per_communicator):
'''
Split an MPI communicator into sub-communicators with integer numbers
of nodes per communicator
Args:
comm: MPI communicator
nodes_per_communicator: number of nodes per sub-communicator
Returns:
MPI sub-communicator, node_index, total_num_nodes
Notes:
* total number of nodes in original communicator must be an integer
multiple of nodes_per_communicator
* if comm is split into N sub-communicators, node_index is the index
of which of the N is returned for this rank
* total_num_nodes = number of nodes in original communicator
'''
num_nodes = mpi_count_nodes(comm)
if comm.size % num_nodes != 0:
raise ValueError('Variable number of ranks per node')
if num_nodes % nodes_per_communicator != 0:
raise ValueError('Input number of nodes {} must be divisible by nodes_per_communicator {}'.format(
num_nodes, nodes_per_communicator))
ranks_per_communicator = comm.size // (num_nodes // nodes_per_communicator)
node_index = comm.rank // ranks_per_communicator
comm_node = comm.Split(color = node_index)
return comm_node, node_index, num_nodes
|
[
"numpy.empty",
"numpy.arange",
"numpy.random.normal",
"multiprocessing.cpu_count",
"desiutil.log.get_logger",
"time.asctime",
"traceback.print_exc",
"astropy.io.fits.getdata",
"os.path.exists",
"numpy.random.RandomState",
"socket.gethostname",
"desiutil.iers.freeze_iers",
"numpy.random.poisson",
"numpy.linspace",
"astropy.time.Time",
"os.rename",
"math.gcd",
"astropy.io.fits.open",
"multiprocessing.Pool",
"os.getenv",
"numpy.random.uniform",
"numpy.zeros",
"numpy.array_split"
] |
[((457, 469), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (467, 469), False, 'from desiutil.log import get_logger\n'), ((8037, 8050), 'desiutil.iers.freeze_iers', 'freeze_iers', ([], {}), '()\n', (8048, 8050), False, 'from desiutil.iers import freeze_iers\n'), ((19668, 19699), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': 'float'}), '((ny, nx), dtype=float)\n', (19676, 19699), True, 'import numpy as np\n'), ((25633, 25653), 'desiutil.log.get_logger', 'logging.get_logger', ([], {}), '()\n', (25651, 25653), True, 'import desiutil.log as logging\n'), ((26372, 26393), 'math.gcd', 'gcd', (['nnodes', 'ncameras'], {}), '(nnodes, ncameras)\n', (26375, 26393), False, 'from math import gcd\n'), ((3073, 3096), 'os.path.exists', 'os.path.exists', (['rawfile'], {}), '(rawfile)\n', (3087, 3096), False, 'import os\n'), ((3167, 3185), 'astropy.io.fits.open', 'fits.open', (['rawfile'], {}), '(rawfile)\n', (3176, 3185), False, 'from astropy.io import fits\n'), ((6918, 6948), 'os.rename', 'os.rename', (['tmprawfile', 'rawfile'], {}), '(tmprawfile, rawfile)\n', (6927, 6948), False, 'import os\n'), ((11888, 11923), 'numpy.random.RandomState', 'np.random.RandomState', (['seeds[irand]'], {}), '(seeds[irand])\n', (11909, 11923), True, 'import numpy as np\n'), ((12003, 12051), 'numpy.empty', 'np.empty', (['(nyraw * 2, nxraw * 2)'], {'dtype': 'np.int32'}), '((nyraw * 2, nxraw * 2), dtype=np.int32)\n', (12011, 12051), True, 'import numpy as np\n'), ((17319, 17343), 'astropy.time.Time', 'Time', (["header['DATE-OBS']"], {}), "(header['DATE-OBS'])\n", (17323, 17343), False, 'from astropy.time import Time\n'), ((19792, 19819), 'numpy.random.uniform', 'np.random.uniform', (['(100)', '(200)'], {}), '(100, 200)\n', (19809, 19819), True, 'import numpy as np\n'), ((19971, 20025), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'readnoise', 'size': '(ny, nprescan)'}), '(scale=readnoise, size=(ny, nprescan))\n', (19987, 20025), True, 'import numpy as np\n'), ((20102, 20157), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'readnoise', 'size': '(ny, noverscan)'}), '(scale=readnoise, size=(ny, noverscan))\n', (20118, 20157), True, 'import numpy as np\n'), ((20251, 20312), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'offset', 'scale': 'readnoise', 'size': 'img.shape'}), '(loc=offset, scale=readnoise, size=img.shape)\n', (20267, 20312), True, 'import numpy as np\n'), ((21999, 22039), 'numpy.arange', 'np.arange', (['phot.shape[0]'], {'dtype': 'np.int32'}), '(phot.shape[0], dtype=np.int32)\n', (22008, 22039), True, 'import numpy as np\n'), ((28889, 28909), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (28907, 28909), False, 'import socket\n'), ((2545, 2582), 'astropy.io.fits.getdata', 'fits.getdata', (['simspecfile', '"""FIBERMAP"""'], {}), "(simspecfile, 'FIBERMAP')\n", (2557, 2582), False, 'from astropy.io import fits\n'), ((20327, 20349), 'numpy.random.poisson', 'np.random.poisson', (['img'], {}), '(img)\n', (20344, 20349), True, 'import numpy as np\n'), ((22058, 22090), 'numpy.array_split', 'np.array_split', (['specs', 'comm.size'], {}), '(specs, comm.size)\n', (22072, 22090), True, 'import numpy as np\n'), ((22834, 22868), 'numpy.zeros', 'np.zeros', (['(psf.npix_y, psf.npix_x)'], {}), '((psf.npix_y, psf.npix_x))\n', (22842, 22868), True, 'import numpy as np\n'), ((24041, 24054), 'multiprocessing.Pool', 'mp.Pool', (['ncpu'], {}), '(ncpu)\n', (24048, 24054), True, 'import multiprocessing as mp\n'), ((24231, 24265), 'numpy.zeros', 'np.zeros', (['(psf.npix_y, psf.npix_x)'], {}), '((psf.npix_y, psf.npix_x))\n', (24239, 24265), True, 'import numpy as np\n'), ((2423, 2432), 'time.asctime', 'asctime', ([], {}), '()\n', (2430, 2432), False, 'from time import asctime\n'), ((3564, 3573), 'time.asctime', 'asctime', ([], {}), '()\n', (3571, 3573), False, 'from time import asctime\n'), ((3973, 3982), 'time.asctime', 'asctime', ([], {}), '()\n', (3980, 3982), False, 'from time import asctime\n'), ((8181, 8190), 'time.asctime', 'asctime', ([], {}), '()\n', (8188, 8190), False, 'from time import asctime\n'), ((9463, 9472), 'time.asctime', 'asctime', ([], {}), '()\n', (9470, 9472), False, 'from time import asctime\n'), ((9779, 9788), 'time.asctime', 'asctime', ([], {}), '()\n', (9786, 9788), False, 'from time import asctime\n'), ((10389, 10415), 'numpy.random.poisson', 'np.random.poisson', (['truepix'], {}), '(truepix)\n', (10406, 10415), True, 'import numpy as np\n'), ((11822, 11846), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (11843, 11846), True, 'import numpy as np\n'), ((16998, 17022), 'astropy.time.Time', 'Time', (["header['DATE-OBS']"], {}), "(header['DATE-OBS'])\n", (17002, 17022), False, 'from astropy.time import Time\n'), ((17873, 17896), 'numpy.zeros', 'np.zeros', (['truepix.shape'], {}), '(truepix.shape)\n', (17881, 17896), True, 'import numpy as np\n'), ((17898, 17921), 'numpy.zeros', 'np.zeros', (['truepix.shape'], {}), '(truepix.shape)\n', (17906, 17921), True, 'import numpy as np\n'), ((18071, 18080), 'time.asctime', 'asctime', ([], {}), '()\n', (18078, 18080), False, 'from time import asctime\n'), ((21186, 21214), 'os.getenv', 'os.getenv', (['"""UNITTEST_SILENT"""'], {}), "('UNITTEST_SILENT')\n", (21195, 21214), False, 'import os\n'), ((21390, 21411), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (21409, 21411), False, 'import traceback\n'), ((23170, 23184), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (23182, 23184), True, 'import multiprocessing as mp\n'), ((4349, 4358), 'time.asctime', 'asctime', ([], {}), '()\n', (4356, 4358), False, 'from time import asctime\n'), ((5872, 5881), 'time.asctime', 'asctime', ([], {}), '()\n', (5879, 5881), False, 'from time import asctime\n'), ((6761, 6770), 'time.asctime', 'asctime', ([], {}), '()\n', (6768, 6770), False, 'from time import asctime\n'), ((17681, 17690), 'time.asctime', 'asctime', ([], {}), '()\n', (17688, 17690), False, 'from time import asctime\n'), ((23603, 23640), 'numpy.linspace', 'np.linspace', (['specmin', 'nspec', '(ncpu + 1)'], {}), '(specmin, nspec, ncpu + 1)\n', (23614, 23640), True, 'import numpy as np\n'), ((4987, 4996), 'time.asctime', 'asctime', ([], {}), '()\n', (4994, 4996), False, 'from time import asctime\n'), ((5289, 5298), 'time.asctime', 'asctime', ([], {}), '()\n', (5296, 5298), False, 'from time import asctime\n')]
|
import re
import pandas as pd
import numpy as np
import ast
import pickle
import datetime
from nltk.corpus import stopwords
import pkg_resources
# from pkg_resources import resource_string, resource_listdir
def memoize(func):
memory = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in memory:
memory[key] = func(*args, **kwargs)
return memory[key]
return memoizer
@memoize
def levenshtein( s, t):
if s == "" or t == "":
return max(len(s), len(t))
if s[-1] == t[-1]:
cost = 0
else:
cost = 1
res = min([levenshtein(s[:-1], t) + 1,
levenshtein(s, t[:-1]) + 1,
levenshtein(s[:-1], t[:-1]) + cost])
# print(res)
return res
class ICD10:
def __init__(self):
data_file = pkg_resources.resource_filename('dbaicd10.resources', "dba_icd10.csv")
vocabulary_file = pkg_resources.resource_filename('dbaicd10.resources', "vocab_list.pkl")
## setting data and vocabulary
self.data = pd.read_csv(data_file)
self.data['Approximate Synonyms'] = self.data['Approximate Synonyms']\
.apply(lambda x: ast.literal_eval(x))
self.data['Applicable To'] = self.data['Applicable To'] \
.apply(lambda x: ast.literal_eval(x))
self.data['Clinical Info'] = self.data['Clinical Info'] \
.apply(lambda x: ast.literal_eval(x))
infile = open(vocabulary_file, 'rb')
self.vocab_list = pickle.load(infile)
infile.close()
self.stop_words = set(stopwords.words('english'))
# @memoize
# @staticmethod
def auto_correct(self, sentence, remove_stop_words=False, vocab=None, threshold=70):
'''
Auto corrects a sentence from a vocabulary based on ICD10 dataset
:param sentence: (String) text that needs to be autocorrects
:param remove_stop_words: (boolean) whether to remove stopwords from sentence
:param vocab: (list of string) If need to provide a custom vocabulary
:param threshold: ( Integer: Default=70) Corrects the word if it matches atleast threshold percent from any word from vocabulary
:return: (String) autocorrected sentence
'''
## Preprocessing
sentence = sentence.lower()
### Make alphanumeric
sentence = re.sub(r'\W+', ' ', sentence)
## remove double spaces
sentence = re.sub(' +', ' ', sentence)
allowed_error = 1 - (threshold / 100)
if vocab is None:
vocab = self.vocab_list
words = sentence.split()
final_sent = ''
for word in words:
## for each wors we will find in vocabulary, the vocab_word with least distance
distance = 9999
best_match = None
for vocab_word in vocab:
dist = levenshtein(vocab_word, word)
if dist < distance:
distance = dist
best_match = vocab_word
if distance < allowed_error * len(word):
final_sent = final_sent + " " + best_match
else:
final_sent = final_sent + " " + word
return final_sent.strip()
def search_helper(self, row, keywords):
## first search in name
# print( keywords)
# Step 1: Score of Name ( score = how many words match )
name = row['name'].lower().split()
# print(name)
name_score = 0
for keyword in keywords:
if keyword.lower().strip() in name:
name_score += 1
# print(name_score)
## Step 2: Socre of approximate synonyms
## now search in approximate synonyms
synonyms = row['Approximate Synonyms']
# synonyms = ast.literal_eval(synonyms)
# print(synonyms)
syn_scores = [0] * len(synonyms)
# there are multiple synonyms for each row,
# so we find score for each of them
for i, synonym in enumerate(synonyms):
synonym = synonym.lower().split()
for keyword in keywords:
if keyword.lower() in synonym:
syn_scores[i] += 1
# score of synonym is max of score of each synonym
synonym_score = np.max(syn_scores)
## Step 3: Score of applicable two
## now search in Applicable To
applicable_tos = row['Applicable To']
# applicable_tos = ast.literal_eval(applicable_tos)
# print(applibable_tos[0])
# for dk in
# synonyms = ast.literal_eval(synonyms)
# print(synonyms)
applicable_scores = [0] * len(applicable_tos)
## there are multiple applicable to for each row
# so we find score for each of them
for i, applicable in enumerate(applicable_tos):
# if applicable == 'Tennis elbow':
# print('Tennis elbow found')
# print(applicable)
applicable = applicable.lower().split()
for keyword in keywords:
if keyword.lower() in applicable:
applicable_scores[i] += 1
# score of synonym is max of score of each synonym
applicable_score = np.max(applicable_scores)
## STEP 4: Score of Clinical Info
## now search in Applicable To
clinical_infos = row['Clinical Info']
# clinical_infos = ast.literal_eval(clinical_infos)
# print(synonyms)
clinical_scores = [0] * len(clinical_infos)
## there are multiple applicable to for each row
# so we find score for each of them
for i, clinical in enumerate(clinical_infos):
clinical = clinical.lower().split()
for keyword in keywords:
if keyword.lower() in clinical:
clinical_scores[i] += 1
# score of synonym is max of score of each synonym
clinical_score = np.max(clinical_scores)
# print(syn_score)
# we return the score which is better name or synonym
# print([name_score, synonym_score, applicable_score, clinical_score])
return np.max([name_score, synonym_score, applicable_score, clinical_score])
def search_helper2(self, row, keywords):
INCREMENT_SCORE_BY = 1
## first search in name
# print( keywords)
## just makeone big string of all columns, and see how many of keywords we can find
all_cols = ''
all_cols += row['name'].lower()
all_cols += " ".join(row['Approximate Synonyms'])
# score of clinical info should be less than others
clinical_info = " ".join(row['Clinical Info'])
all_cols += " ".join(row['Applicable To'])
# lower
all_cols = all_cols.strip().lower()
all_cols = re.sub(r'\W+', ' ', all_cols)
## remove double spaces
all_cols = re.sub(' +', ' ', all_cols)
# all_words = all_cols.split()
score = 0
## searcg for keywords
for keyword in keywords:
## SOME OPTIMIZATIONS: Here we are calculating few thinks which we will require multiple times
SPACE_IN_KEYWORD = ' ' in keyword
KEYWORD_SPLIT = keyword.split()
## if we find exact keyword ( example: "muscle fatigue" ) then score is
## increased 1
if keyword in all_cols:
## if keyword is of multple words, and it matches means it should increase score more
if SPACE_IN_KEYWORD:
score += 1.23 * INCREMENT_SCORE_BY
else:
score += INCREMENT_SCORE_BY
## else we find if keyword can be further divided into smaller keywords
elif SPACE_IN_KEYWORD:
for temp_keyword in KEYWORD_SPLIT:
if temp_keyword in all_cols:
score += 0.23 * INCREMENT_SCORE_BY
## if found in clinical info, increase the score, but less
if keyword in clinical_info:
score += INCREMENT_SCORE_BY * 0.6
elif SPACE_IN_KEYWORD:
for temp_keyword in KEYWORD_SPLIT:
if temp_keyword in clinical_info:
score += 0.23 * INCREMENT_SCORE_BY * 0.6
## extra scores
## if keyword is in name only then we give it extra score
if keyword in row['name'].lower():
score += INCREMENT_SCORE_BY * 0.23
elif SPACE_IN_KEYWORD:
for temp_keyword in KEYWORD_SPLIT:
if temp_keyword in row['name']:
score += 0.1 * INCREMENT_SCORE_BY
return score
def search(self, keyword, auto_correct_keywords=True, show_time_spent=True, return_top_n=10,
split_with_stopwords=True):
'''
Search in ICD10 dataset for the provided keywords. It performs a simple match word search.
:param keyword: (String) keywords or sentence to search for. Keywords seperated by space
:param auto_correct_keywords: (Boolean: default=True) Keep it true for spell check of the given keywords
:param show_time_spent: (Boolean: default=True) Display time utilized for search
:param return_top_n: (integer: default:10) Returns the number of top results. Is set to 10 returns top 10 results
:param split_with_stopwords: (Boolean: default=True) Keep it true if you want to split the search query from stopwords instead of space. Refer example below for more info
:return: Returns a pandas dataframe with top matches
use case:
search("<NAME>")
search("<NAME>", auto_correct_keywords=True, return_top_n=5)
Example of split_with_stopwords:
There might be cases where you want to keep two words together, instance "Right Hand"
So here we split the query from stopwords instead of spaces. Thus,
"Fracture in right hand" becomes => ["fracture", "right hand"] instead of "fracture", "right", hand"]
Note that "in" was the stopword and query got splitted from "in"
'''
before = datetime.datetime.now()
keyword = keyword.lower()
if auto_correct_keywords:
keyword = self.auto_correct(keyword)
if split_with_stopwords:
for stopword in self.stop_words:
if stopword in keyword:
keyword = keyword.replace(' ' + stopword + ' ', '#')
keywords = keyword.split('#')
else:
keywords = keyword.split()
keywords = " ".join([d for d in keywords if d not in self.stop_words])
keywords = keywords.split()
print('Searching for: "' + " ".join(keywords) + '"')
result = self.data.apply(self.search_helper2, axis=1, keywords=keywords)
after = datetime.datetime.now()
diff = after - before
if show_time_spent:
print("Search completed in", diff.seconds, "seconds")
return self.data.loc[result.nlargest(return_top_n, keep='first').index]
|
[
"pandas.read_csv",
"pkg_resources.resource_filename",
"datetime.datetime.now",
"numpy.max",
"pickle.load",
"nltk.corpus.stopwords.words",
"ast.literal_eval",
"re.sub"
] |
[((839, 909), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""dbaicd10.resources"""', '"""dba_icd10.csv"""'], {}), "('dbaicd10.resources', 'dba_icd10.csv')\n", (870, 909), False, 'import pkg_resources\n'), ((936, 1007), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""dbaicd10.resources"""', '"""vocab_list.pkl"""'], {}), "('dbaicd10.resources', 'vocab_list.pkl')\n", (967, 1007), False, 'import pkg_resources\n'), ((1068, 1090), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {}), '(data_file)\n', (1079, 1090), True, 'import pandas as pd\n'), ((1562, 1581), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1573, 1581), False, 'import pickle\n'), ((2418, 2447), 're.sub', 're.sub', (['"""\\\\W+"""', '""" """', 'sentence'], {}), "('\\\\W+', ' ', sentence)\n", (2424, 2447), False, 'import re\n'), ((2499, 2526), 're.sub', 're.sub', (['""" +"""', '""" """', 'sentence'], {}), "(' +', ' ', sentence)\n", (2505, 2526), False, 'import re\n'), ((4382, 4400), 'numpy.max', 'np.max', (['syn_scores'], {}), '(syn_scores)\n', (4388, 4400), True, 'import numpy as np\n'), ((5334, 5359), 'numpy.max', 'np.max', (['applicable_scores'], {}), '(applicable_scores)\n', (5340, 5359), True, 'import numpy as np\n'), ((6049, 6072), 'numpy.max', 'np.max', (['clinical_scores'], {}), '(clinical_scores)\n', (6055, 6072), True, 'import numpy as np\n'), ((6264, 6333), 'numpy.max', 'np.max', (['[name_score, synonym_score, applicable_score, clinical_score]'], {}), '([name_score, synonym_score, applicable_score, clinical_score])\n', (6270, 6333), True, 'import numpy as np\n'), ((6937, 6966), 're.sub', 're.sub', (['"""\\\\W+"""', '""" """', 'all_cols'], {}), "('\\\\W+', ' ', all_cols)\n", (6943, 6966), False, 'import re\n'), ((7018, 7045), 're.sub', 're.sub', (['""" +"""', '""" """', 'all_cols'], {}), "(' +', ' ', all_cols)\n", (7024, 7045), False, 'import re\n'), ((10299, 10322), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10320, 10322), False, 'import datetime\n'), ((11011, 11034), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11032, 11034), False, 'import datetime\n'), ((1636, 1662), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1651, 1662), False, 'from nltk.corpus import stopwords\n'), ((1235, 1254), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (1251, 1254), False, 'import ast\n'), ((1352, 1371), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (1368, 1371), False, 'import ast\n'), ((1469, 1488), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (1485, 1488), False, 'import ast\n')]
|
#-*- coding: utf-8 -*-
# --------------------------------------------------------------------#
# --------------------------------------------------------------------#
# ---------- Made by <NAME> @ircam on 11/2015
# ---------- Copyright (c) 2018 CREAM Lab // CNRS / IRCAM / Sorbonne Université
# ----------
# ---------- process video in diferent ways
# ---------- to use this don't forget to include these lines before your script:
# ----------
# ----------
# --------------------------------------------------------------------#
# --------------------------------------------------------------------#
from __future__ import absolute_import
from __future__ import print_function
import sys
from transform_audio import extract_sentences_tags
def extract_audio(video_file, target_name):
"""
parameters:
video_file : file to extract wav from
extract audio to wav and return the name of the extracted audio
"""
import subprocess
import os
try:
os.remove(target_name)
except:
pass
base = os.path.basename(video_file)
file_name = os.path.splitext(base)[0]
path = os.path.dirname(os.path.realpath(video_file))
command = "ffmpeg -i "+video_file+" -ab 160k -ac 2 -ar 44100 -vn "+ target_name
subprocess.call(command, shell=True)
return target_name
def replace_audio(video_file, new_audio, target_video):
"""
Replace the audio of video_file with new_audio
parameters:
video_file : video file to change the audio
new_audio : new audio to use
target_video : video to be created
"""
import subprocess
import os
#remove target video if it exists
try:
os.remove(target_video)
except:
pass
base = os.path.basename(video_file)
file_name = os.path.splitext(base)[0]
path = os.path.dirname(os.path.realpath(video_file))
#Old command to process /mov, re-encoding audio
#command = "ffmpeg -i "+video_file+" -i "+new_audio+" -map 0:v -map 1:a -c copy -shortest " + target_video
#New command to process mp4, re-encoding audio
command = "ffmpeg -i "+video_file+" -i "+new_audio+" -c:v copy -map 0:v:0 -map 1:a:0 "+target_video
subprocess.call(command, shell=True)
def erase_audio_from_video(input_video, output_video):
"""
Erase the audio from a video
parameters:
input_video : source video
output_video : video to be created without the audio
"""
import subprocess
import os
command = "ffmpeg -i "+input_video+" -vcodec copy -an "+output_video
subprocess.call(command, shell=True)
def get_movie_stream(video):
"""
Get movie stream
parameters:
video : video from which stream will be extracted
"""
import subprocess
import os
import shlex, subprocess
command = "ffmpeg -i "+video+" 2>&1 | grep \"Stream\""
output = subprocess.check_output(command, shell=True)
return output
def get_movie_duration(video):
"""
Get the duration of video
parameters:
video : videon input
"""
import subprocess
import os
import shlex, subprocess
command = "ffmpeg -i "+video+" 2>&1 | grep \"Duration\""
output = subprocess.check_output(command, shell=True)
return output
def extract_sentences_in_video(source_name, target_folder, rms_threshold = -50, WndSize = 16384, overlap = 8192):
"""
In a video with many sentences and blanks between the sentences, create separate video files with one sentence per file.
This is very usefull for indexing databases.
This only works if the video has sound.
The algorithm looks at the RMS of the audio and extracts the sentences.
parameters:
source_name : videon input
target_folder : folder in which the videos will be cretaed
rms_threshold : rms_threshold specifying where to consider there is a slience
WndSize :
overlap :
"""
import os
audio_name = "auxiliary_audio_file_14357XXX.wav"
try:
os.remove(audio_name)
except:
pass
file_name = os.path.splitext(source_name)[0]
file_name = os.path.basename(file_name)
extract_audio(source_name, audio_name)
tags, lengths = extract_sentences_tags(audio_name, rms_threshold = rms_threshold, WndSize = WndSize, overlap = overlap)
cpt=1
for tag in tags:
extract_sub_video_sentences( source_name = source_name
, target_name = target_folder +file_name+"_"+ str(cpt)+ ".mp4"
, start = tag[0]
, length = lengths[0]
)
cpt+=1
os.remove(audio_name)
def extract_sub_video_sentences(source_name, target_name, start, length):
"""
Takes a sub video in source_name begining at start and ending at end
The start time should be in this format: 00:24:00
Length in seconds.
"""
import subprocess
import os
#command = "ffmpeg -i "+source_name+" -ss "+start+" -t "+end+" -async 1 "+target_name
command = "ffmpeg -i "+source_name+" -ss "+start+" -t "+length+" -async 1 -strict -2 "+target_name
subprocess.call(command, shell=True)
def video_scaling(source_name, target_name, resolution):
"""
changes the scale of the video
source name : video to be transformed
target name : transformed video
resolution : target resolution, tuple, pair of values
"""
import subprocess
import os
try:
os.remove(target_name)
except:
pass
resolution = [str(i) for i in resolution]
command = "ffmpeg -i "+source_name+" -vf scale="+resolution[0]+":"+resolution[1]+":force_original_aspect_ratio=decrease -strict -2 "+target_name+" -hide_banner"
subprocess.call(command, shell=True)
def four_3_to_16_9(source_name, target_name):
"""
changes the scale of the video
source name : video to be transformed
target name : transformed video
resolution : target resolution, tuple, pair of values
"""
import subprocess
import os
try:
os.remove(target_name)
except:
pass
#command = "ffmpeg -i "+source_name+" -vf \"scale=640x480,setsar=1,pad=854:480:107:0\" "+target_name
command = "ffmpeg -i "+source_name+" -vf scale=1080x1920,setdar=16:9 "+target_name
#command = "ffmpeg -i "+source_name+" -vf scale=1080x1920,setdar=16:9 "+target_name
subprocess.call(command, shell=True)
def four_3_to_16_9_v2(source_name, target_name):
import subprocess
import os
try:
os.remove(target_name)
except:
pass
#command = "ffmpeg -i "+source_name+" -vf \"scale=640x480,setsar=1,pad=854:480:107:0\" "+target_name
command = "ffmpeg -i "+source_name+" -q:v 10 -g 1 "+target_name
subprocess.call(command, shell=True)
def cut_silence_in_video(source, target, rmsTreshhold = -40, WndSize = 128):
"""
cut the silence at the begining and at the end of the video
source name : video to be transformed
target name : transformed video
rmsTreshhold : threshold to use for silence
WndSize : window size to search for silence at the begining and at the end
"""
import os
try:
os.remove(target)
except:
pass
#extract audio
audio_aux = "aux_audio_15452.wav"
extract_audio(source, audio_aux)
#Extract_time tags for begining and end
from .transform_audio import get_sound_without_silence
begining_s, end_s = get_sound_without_silence(audio_aux, rmsTreshhold = rmsTreshhold, WndSize = WndSize)
len_s = end_s - begining_s
#adapt the time to the good format
from datetime import timedelta, datetime
begining_s = datetime(1,1,1) + timedelta(seconds=begining_s)
end_s = datetime(1,1,1) + timedelta(seconds=end_s)
len_s = datetime(1,1,1) + timedelta(seconds=len_s)
begining_s = "%d:%d:%d.%3d" % (begining_s.hour, begining_s.minute, begining_s.second, begining_s.microsecond)
end_s = "%d:%d:%d.%3d" % (end_s.hour, end_s.minute, end_s.second, end_s.microsecond)
len_s = "%d:%d:%d.%3d" % (len_s.hour, len_s.minute,len_s.second , len_s.microsecond)
print(begining_s)
print(end_s)
print(len_s)
#extract sub_video
extract_sub_video_sentences(source, target, begining_s, len_s)
#remove aux audio
os.remove(audio_aux)
def denoise_audio_in_video(source, target, gain_reduction =10, amp_threshold = -55, wnd_size = 8192 ):
"""
denois the sound from a video
source : video to be transformed
target : transformed video
amp_threshold : threshold to use for silence
gain_reduction : gain to reduce the noise
wnd_size : moving window size
"""
from super_vp_commands import denoise_sound
import os
try:
os.remove(target)
except:
pass
#extract audio
audio_aux = "aux_audio_1.wav"
extract_audio(source, audio_aux)
#denoise sound
audio_aux2 = "aux_audio_2.wav"
denoise_sound(audio_aux, audio_aux2
, gain_reduction = gain_reduction
, amp_threshold = amp_threshold
, wnd_size = wnd_size
)
#replace sound
replace_audio(
video_file = source
, new_audio = audio_aux2
, target_video = target
)
#remove sounds
os.remove(audio_aux)
os.remove(audio_aux2)
def change_video_format(source, target):
"""
Change the video extension of video to the one of file
"""
import subprocess
import os
try:
os.remove(target)
except:
pass
#command = "ffmpeg -i "+source+" -vcodec copy -acodec copy "+target
command = "ffmpeg -i "+source+" -ar 22050 -b 3298k "+target
subprocess.call(command, shell=True)
def convert_to_avi(source, target):
import subprocess
import os
try:
os.remove(target)
except:
pass
#command = "ffmpeg -i "+source+" -vcodec mpeg4 -vtag XVID -b 990k -bf 2 -g 300 -s 640x360 -pass 1 -an -threads 0 -f rawvideo -y /dev/null"
#subprocess.call(command, shell=True)
#command = "ffmpeg -i "+source+" -vcodec mpeg4 -vtag XVID -b 990k -bf 2 -g 300 -s 720x576 -acodec libmp3lame -ab 256k -ar 48000 -ac 2 -pass 2 -threads 0 -f avi "+target
command = "ffmpeg -i "+source+" -vcodec mpeg4 -vtag XVID -b 990k -bf 2 -g 300 -s 720x576 -acodec libmp3lame -pass 1 -ab 256 -threads 0 -f avi "+target
subprocess.call(command, shell=True)
def extract_frames_video(source, folder, tag="", fps=25):
"""
Extract the frames of a video
"""
import subprocess
import os
os.mkdir(folder)
command = "ffmpeg -i "+source+" -r "+str(fps)+" "+folder+tag+"$filename%01d.bmp"
subprocess.call(command, shell=True)
def change_frame_rate(source, target_fps, output):
import subprocess
import os
command = "ffmpeg -i "+source+" -filter:v fps="+str(target_frame_rate) +" " +output
subprocess.call(command, shell=True)
def crop_video(source_video, target_video, x=0, y=0,out_w=0 , out_h=0 ):
import subprocess
import os
#Something like this might work:
command = "ffmpeg -i "+source_video+" -strict -2 -filter:v crop="+str(out_w)+":"+str(out_h)+":"+str(x)+":"+str(y)+" "+target_video
subprocess.call(command, shell=True)
def get_fps(source):
"""
Get fps of video file
"""
import subprocess
import os
import shlex
command = "ffprobe -v error -select_streams v -of default=noprint_wrappers=1:nokey=1 -show_entries stream=r_frame_rate " + source
output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).communicate()[0]
return output
#subprocess.call(command, shell=True)
#p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#out, err = p.communicate()
#return out
#p = subprocess.Popen(shlex.split(command), bufsize=1, universal_newlines=True)
#return p.communicate()
#from subprocess import check_output
#x = check_output(["ffprobe", "-v", "error", "-select_streams", "v", "-of", "default=noprint_wrappers=1:nokey=1", "-show_entries stream=r_frame_rate", source],shell=True,stderr=subprocess.STDOUT)
#return x
def color_correction(source_video, target_video, gamma=1.5, saturation =1.3):
"""
Gamma increases ...
Saturation increases colors
"""
import subprocess
import os
command = "ffmpeg -i " + source_video + " -vf eq=gamma="+str(1.5)+":saturation="+str(saturation)+" -c:a copy "+ target_video
subprocess.call(command, shell=True)
def create_movie_from_frames(frame_name_tag, fps, img_extension , target_video, video_codec="copy", preset="ultrafast", loseless=0):
"""
Create a movie with a series of frames
frame_name_tag : if your frames are named frame_001.png, frame_name_tag="frame_"
target_video : the video that will be created
video_codec : specifiy the video copy flag to pass to ffmpeg.
Possiblities:
"copy" : will copy frames, videos will be very big, but generation will bef ast
"libx265" : generation will be slow but videos will be small
"preset" : The default is medium. The preset determines compression efficiency and therefore affects encoding speed.
options: superfast, veryfast, faster, fast, medium, slow, slower, veryslow, and placebo.
Use the slowest preset you have patience for. Ignore placebo as it provides insignificant returns for a significant increase in encoding time.
"loseless" : 1 : losseless encoding; 0 : loss encoding
"""
import subprocess
import os
#command = "ffmpeg -framerate "+str(fps)+" -i "+frame_name_tag+"%d"+img_extension+" -vcodec "+video_codec+" -acodec copy -preset ultrafast "+target_video
command = "ffmpeg -framerate "+str(fps)+" -i "+frame_name_tag+"%d"+img_extension+" -vcodec "+video_codec+" -acodec copy -preset "+preset+" -x265-params lossless="+str(loseless)+" "+target_video
subprocess.call(command, shell=True)
def compress_to_h265(video_in, video_out):
import subprocess
import os
command = "ffmpeg -i "+video_in+" -vcodec libx265 -crf 28 "+video_out
subprocess.call(command, shell=True)
def sharpen_video(source_video, target_video):
"""
Create a movie with a series of frames
frame_name_tag : if your frames are named frame_001.png, frame_name_tag="frame_"
target_video : the video that will be created
"""
import subprocess
import os
command = "ffmpeg -i "+source_video+" -vf unsharp "+target_video
subprocess.call(command, shell=True)
def combine_2_videos(left, right, output):
"""
Create a movie with 2 movies
"""
import subprocess
#command = "ffmpeg -i "+left+" -i "+right+" -filter_complex \"[0][1]scale2ref=w=oh*mdar:h=ih[left][right];[left][right]hstack\" "+output
#command = "ffmpeg -i "+left+" -i "+ right+" -filter_complex \"[0]scale=175:100:force_original_aspect_ratio=decrease,pad=175:100:-1:-1:color=gray,setsar=1[left];[1]scale=175:100:force_original_aspect_ratio=decrease,pad=175:100:-1:-1:color=gray,setsar=1[right];[left][right]hstack\" "+ output
command = "ffmpeg -i "+left+" -i "+right+" -filter_complex \"[0][1]scale2ref=w=oh*mdar:h=ih[left][right];[left][right]hstack\" "+ output
subprocess.call(command, shell=True)
def combine_videos( tl, tr, bl, br,output, audios = [] ):
"""
Create a movie with 4 movies!
tl : top left; tr : top right, bl : bottom left; br : bottom_right
"""
import subprocess
import os
if audios != []:
master_audio = "master_audio_aux_file.wav"
combine_audio(audios, master_audio)
#command = "ffmpeg -i "+tl+" -i "+tr+" -i "+bl+" -i "+bl+" -i "+ master_audio +" -filter_complex \"[0:v][1:v]hstack[t];[2:v][3:v]hstack[b];[t][b]vstack[v]\" -map \"[v]\" -c:a copy -shortest "+ output
command = "ffmpeg -i "+tl+" -i "+tr+" -i "+bl+" -i "+br+" -i "+master_audio+" -filter_complex \"[0:v][1:v]hstack[t];[2:v][3:v]hstack[b];[t][b]vstack[v]\" -map \"[v]\" -map 4:a -c:a copy -shortest "+output
else :
command = "ffmpeg -i "+tl+" -i "+tr+" -i "+bl+" -i "+bl+" -ac 2 -filter_complex \"[0:v][1:v]hstack[t];[2:v][3:v]hstack[b];[t][b]vstack[v]\" -map \"[v]\" -c:a copy -shortest "+ output
subprocess.call(command, shell=True)
#delete master audio
if audios != []:
os.remove(master_audio)
def combine_audio(files, target_audio, pre_normalisation=True):
"""
input : file names in an array (you can use videos!!)
Combine audio_files into one
"""
import soundfile
from transform_audio import wav_to_mono
import os
import numpy as np
#Extract audio from video and convert to mono
audio_files = []
for cpt, file in enumerate(files):
#extract audio
audio = str(cpt)+"_aux_audio_1439.wav"
audio_files.append(audio)
extract_audio(file, audio)
#To mono
wav_to_mono(audio, audio)
#read audios
raw_audios = []
for file in audio_files:
#read audio
x, fs = soundfile.read(file)
#normalize loudness, if needed
if pre_normalisation:
x = x / np.max(x)
raw_audios.append(x)
#Pad difference
lengths = [len(i) for i in raw_audios]
#Find longer file
max_value = max(lengths)
max_index = lengths.index(max_value)
#pad audio
paded_audio = []
for raw_audio in raw_audios:
diff = abs(len(raw_audio) - max_value)
pad = [0.0 for i in range(diff)]
pad = np.asarray(pad)
paded_audio.append(np.concatenate([raw_audio, pad]))
paded_audio = np.sum(paded_audio, axis=0)
#normalize
paded_audio = paded_audio/ np.max(paded_audio)
#Export audio
soundfile.write(target_audio, paded_audio , fs)
#delete files
for file in audio_files:
os.remove(file)
|
[
"os.mkdir",
"os.remove",
"numpy.sum",
"transform_audio.extract_sentences_tags",
"numpy.max",
"datetime.timedelta",
"soundfile.write",
"subprocess.Popen",
"soundfile.read",
"os.path.basename",
"subprocess.check_output",
"os.path.realpath",
"numpy.asarray",
"datetime.datetime",
"subprocess.call",
"numpy.concatenate",
"super_vp_commands.denoise_sound",
"transform_audio.wav_to_mono",
"os.path.splitext"
] |
[((1006, 1034), 'os.path.basename', 'os.path.basename', (['video_file'], {}), '(video_file)\n', (1022, 1034), False, 'import os\n'), ((1212, 1248), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1227, 1248), False, 'import subprocess\n'), ((1637, 1665), 'os.path.basename', 'os.path.basename', (['video_file'], {}), '(video_file)\n', (1653, 1665), False, 'import os\n'), ((2072, 2108), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (2087, 2108), False, 'import subprocess\n'), ((2410, 2446), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (2425, 2446), False, 'import subprocess\n'), ((2701, 2745), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (2724, 2745), False, 'import subprocess\n'), ((2993, 3037), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (3016, 3037), False, 'import subprocess\n'), ((3839, 3866), 'os.path.basename', 'os.path.basename', (['file_name'], {}), '(file_name)\n', (3855, 3866), False, 'import os\n'), ((3925, 4027), 'transform_audio.extract_sentences_tags', 'extract_sentences_tags', (['audio_name'], {'rms_threshold': 'rms_threshold', 'WndSize': 'WndSize', 'overlap': 'overlap'}), '(audio_name, rms_threshold=rms_threshold, WndSize=\n WndSize, overlap=overlap)\n', (3947, 4027), False, 'from transform_audio import extract_sentences_tags\n'), ((4268, 4289), 'os.remove', 'os.remove', (['audio_name'], {}), '(audio_name)\n', (4277, 4289), False, 'import os\n'), ((4738, 4774), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (4753, 4774), False, 'import subprocess\n'), ((5288, 5324), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (5303, 5324), False, 'import subprocess\n'), ((5894, 5930), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (5909, 5930), False, 'import subprocess\n'), ((6228, 6264), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (6243, 6264), False, 'import subprocess\n'), ((7665, 7685), 'os.remove', 'os.remove', (['audio_aux'], {}), '(audio_aux)\n', (7674, 7685), False, 'import os\n'), ((8247, 8366), 'super_vp_commands.denoise_sound', 'denoise_sound', (['audio_aux', 'audio_aux2'], {'gain_reduction': 'gain_reduction', 'amp_threshold': 'amp_threshold', 'wnd_size': 'wnd_size'}), '(audio_aux, audio_aux2, gain_reduction=gain_reduction,\n amp_threshold=amp_threshold, wnd_size=wnd_size)\n', (8260, 8366), False, 'from super_vp_commands import denoise_sound\n'), ((8531, 8551), 'os.remove', 'os.remove', (['audio_aux'], {}), '(audio_aux)\n', (8540, 8551), False, 'import os\n'), ((8553, 8574), 'os.remove', 'os.remove', (['audio_aux2'], {}), '(audio_aux2)\n', (8562, 8574), False, 'import os\n'), ((8887, 8923), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (8902, 8923), False, 'import subprocess\n'), ((9542, 9578), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (9557, 9578), False, 'import subprocess\n'), ((9712, 9728), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (9720, 9728), False, 'import os\n'), ((9813, 9849), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (9828, 9849), False, 'import subprocess\n'), ((10018, 10054), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (10033, 10054), False, 'import subprocess\n'), ((10328, 10364), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (10343, 10364), False, 'import subprocess\n'), ((11520, 11556), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (11535, 11556), False, 'import subprocess\n'), ((12904, 12940), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (12919, 12940), False, 'import subprocess\n'), ((13092, 13128), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (13107, 13128), False, 'import subprocess\n'), ((13458, 13494), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (13473, 13494), False, 'import subprocess\n'), ((14175, 14211), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (14190, 14211), False, 'import subprocess\n'), ((15123, 15159), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (15138, 15159), False, 'import subprocess\n'), ((16529, 16556), 'numpy.sum', 'np.sum', (['paded_audio'], {'axis': '(0)'}), '(paded_audio, axis=0)\n', (16535, 16556), True, 'import numpy as np\n'), ((16655, 16701), 'soundfile.write', 'soundfile.write', (['target_audio', 'paded_audio', 'fs'], {}), '(target_audio, paded_audio, fs)\n', (16670, 16701), False, 'import soundfile\n'), ((956, 978), 'os.remove', 'os.remove', (['target_name'], {}), '(target_name)\n', (965, 978), False, 'import os\n'), ((1048, 1070), 'os.path.splitext', 'os.path.splitext', (['base'], {}), '(base)\n', (1064, 1070), False, 'import os\n'), ((1098, 1126), 'os.path.realpath', 'os.path.realpath', (['video_file'], {}), '(video_file)\n', (1114, 1126), False, 'import os\n'), ((1588, 1611), 'os.remove', 'os.remove', (['target_video'], {}), '(target_video)\n', (1597, 1611), False, 'import os\n'), ((1679, 1701), 'os.path.splitext', 'os.path.splitext', (['base'], {}), '(base)\n', (1695, 1701), False, 'import os\n'), ((1729, 1757), 'os.path.realpath', 'os.path.realpath', (['video_file'], {}), '(video_file)\n', (1745, 1757), False, 'import os\n'), ((3741, 3762), 'os.remove', 'os.remove', (['audio_name'], {}), '(audio_name)\n', (3750, 3762), False, 'import os\n'), ((3793, 3822), 'os.path.splitext', 'os.path.splitext', (['source_name'], {}), '(source_name)\n', (3809, 3822), False, 'import os\n'), ((5041, 5063), 'os.remove', 'os.remove', (['target_name'], {}), '(target_name)\n', (5050, 5063), False, 'import os\n'), ((5581, 5603), 'os.remove', 'os.remove', (['target_name'], {}), '(target_name)\n', (5590, 5603), False, 'import os\n'), ((6019, 6041), 'os.remove', 'os.remove', (['target_name'], {}), '(target_name)\n', (6028, 6041), False, 'import os\n'), ((6629, 6646), 'os.remove', 'os.remove', (['target'], {}), '(target)\n', (6638, 6646), False, 'import os\n'), ((7075, 7092), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (7083, 7092), False, 'from datetime import timedelta, datetime\n'), ((7093, 7122), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'begining_s'}), '(seconds=begining_s)\n', (7102, 7122), False, 'from datetime import timedelta, datetime\n'), ((7132, 7149), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (7140, 7149), False, 'from datetime import timedelta, datetime\n'), ((7150, 7174), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'end_s'}), '(seconds=end_s)\n', (7159, 7174), False, 'from datetime import timedelta, datetime\n'), ((7184, 7201), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (7192, 7201), False, 'from datetime import timedelta, datetime\n'), ((7202, 7226), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'len_s'}), '(seconds=len_s)\n', (7211, 7226), False, 'from datetime import timedelta, datetime\n'), ((8081, 8098), 'os.remove', 'os.remove', (['target'], {}), '(target)\n', (8090, 8098), False, 'import os\n'), ((8721, 8738), 'os.remove', 'os.remove', (['target'], {}), '(target)\n', (8730, 8738), False, 'import os\n'), ((9001, 9018), 'os.remove', 'os.remove', (['target'], {}), '(target)\n', (9010, 9018), False, 'import os\n'), ((15203, 15226), 'os.remove', 'os.remove', (['master_audio'], {}), '(master_audio)\n', (15212, 15226), False, 'import os\n'), ((15788, 15813), 'transform_audio.wav_to_mono', 'wav_to_mono', (['audio', 'audio'], {}), '(audio, audio)\n', (15799, 15813), False, 'from transform_audio import wav_to_mono\n'), ((15921, 15941), 'soundfile.read', 'soundfile.read', (['file'], {}), '(file)\n', (15935, 15941), False, 'import soundfile\n'), ((16429, 16444), 'numpy.asarray', 'np.asarray', (['pad'], {}), '(pad)\n', (16439, 16444), True, 'import numpy as np\n'), ((16608, 16627), 'numpy.max', 'np.max', (['paded_audio'], {}), '(paded_audio)\n', (16614, 16627), True, 'import numpy as np\n'), ((16763, 16778), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (16772, 16778), False, 'import os\n'), ((16472, 16504), 'numpy.concatenate', 'np.concatenate', (['[raw_audio, pad]'], {}), '([raw_audio, pad])\n', (16486, 16504), True, 'import numpy as np\n'), ((10607, 10668), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE)\n', (10623, 10668), False, 'import subprocess\n'), ((16040, 16049), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (16046, 16049), True, 'import numpy as np\n')]
|
# Import essential libraries
import requests
import cv2
import numpy as np
import imutils
import mediapipe as mp
import threading
import pygame.mixer
from pygame import *
import time
import os
import sys
import multiprocessing
#Global variables definition
landmarks= {'thumb': [1,2,3,4], 'index': [5,6,7,8], 'middle': [9,10,11,12], 'ring': [13,14,15,16], 'little': [17,18,19,20]} #Position landmarks index corresponding to each finger. Refer to mediapipe github repo for more details
tip_landmarks = [4,8,12,16,20] #index of tip position of all fingers
dist_threshold_param= {'thumb': 8.6, 'index': 6, 'middle': 6, 'ring': 6, 'little': 5} #customized dist threshold values for calibration of finger_detect_and_compute module
left_detect=np.zeros(5);right_detect=np.zeros(5) #arrays representing detected finger presses for each hand
left_coordinates=np.zeros((5,2));right_coordinates=np.zeros((5,2)) #arrays representing pixel coordinates of each detected finger press (tip landmark)
bboxes_white=np.zeros((52,4)) #initializing bboxes for all white keys in standard 88key piano
bboxes_black=np.zeros((36,4)) #initializing bboxes for all black keys in standard 88key piano
start_x=40; start_y=250; #starting pixel coordinates of piano
white_key_width=10; white_key_height=80; black_key_width=5; black_key_height=40 #params related to piano visualization
white_key_reference=[]#list containing reference key values for all white keys.
black_key_reference=[]#list containing reference key values for all black keys.
key_index_array=[]#stores indexes and colors for all detected key presses
play_music_status=1
visualizer_status=1
class handDetector():
def __init__(self, mode=False, maxHands=4, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands #Max no of hands to be detected in one frame.
self.detectionCon = detectionCon #detection confidence
self.trackCon = trackCon #tracking confidence--enables tracking rather than detection on every frame if tracking confidence is good (improves fps)
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands()
self.mpDraw = mp.solutions.drawing_utils #drawing object used for drawing later on the image
def findHands(self, img, draw=True):
""" Function: Get results and Draw landmarks on the read image for all hands detected in the frame
Arguments: self, img: image to draw landmarks on,
draw: if True, draws landmarks on the image frame
returns: img: final image with the landmarks """
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
""" Function: Store position of all landmarks corresponding to a hand in a list
Arguments: self, img: image to draw landmarks on.
draw: If True, draws landmarks on the image frame.
handNo: index of corresponding hand (left or right)
returns: List: List of image coordinates and id's of position landmarks of a hand """
List = []
if (self.results.multi_hand_landmarks):
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
List.append([id, cx, cy])
# if draw:
# cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
List=np.array(List)
return List
def handsCount(self):
""" Function: calculates the total no of hands detected in an image frame
Arguments: self
returns: total no of hands detected in a frame """
# Returns the no of hand detected in the image frame
dim=np.shape(np.array(self.results.multi_hand_landmarks))
if(dim):
return dim[0]
else:
return 0
def check_threshold(p1,p2,p3,finger):
""" Function: Checks whether a key press is detected for a finger based on a mathematical condition
Arguments: p1,p2,p3: positions of landmarks of a finger
finger: string name of the finger pressed (not required)
returns: boolean value of whether key press is detected or not """
global dist_threshold_param
p1=p1/10
p2=p2/10
p3=p3/10
dist = np.linalg.norm(p1 - p2) + np.linalg.norm(p3 - p2) + np.linalg.norm(p1 - p3) #Calculating sum of absolute distances b/w three landmark points of a finger. This is a noobie algo. Can be improved!
return (dist<dist_threshold_param[finger]) #return True if this is smaller than a prespecified threshold during calibration
def finger_detect_and_compute(list):
""" Function: Computes whether a key is actually pressed using fingers of a hand in an image frame. Also computes the coordinates of tip_landmarks corresponding to the pressed fingers
Arguments: list: a list containing all position landmarks of a hand
returns: detected_array: boolean array representing corresponding key presses
coordinates: pixel coordinates of the tip landmakrs of the pressed keys """
detect_array=np.array([(int)(check_threshold(list[2][1:3],list[3][1:3],list[4][1:3],'thumb')),(int)(check_threshold(list[6][1:3],list[7][1:3],list[8][1:3],'index')),(int)(check_threshold(list[10][1:3],list[11][1:3],list[12][1:3],'middle')),(int)(check_threshold(list[14][1:3],list[15][1:3],list[16][1:3],'ring')),(int)(check_threshold(list[18][1:3],list[19][1:3],list[20][1:3],'little'))])
coordinates=np.zeros((5,2))
for i in range(5):
if(detect_array[i]!=0):
coordinates[i]=list[tip_landmarks][i,1:3]
return detect_array,coordinates
def initialize_visualizer(img1):
""" Function: Initialize all variables important to piano visualization on image
Arguments: img1: Image to display piano image
returns: img_background: updated background image to display piano image """
global bboxes_white, bboxes_black, start_x, start_y, white_key_width, white_key_height, black_key_width, black_key_height
curr_x=start_x; curr_y=start_y
img_background=img1.copy()
for i in range(52):#Initializing 52 white piano keys
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+white_key_width,curr_y+white_key_height), [255,255,255], 2)
bboxes_white[i]=[curr_x,curr_y,curr_x+white_key_width,curr_y+white_key_height]
curr_x=curr_x + white_key_width
#Overlaying the first odd black key
curr_x= (int)(start_x + white_key_width-black_key_width/2.0)
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[0]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + 2*white_key_width
for i in range(7): #initializing the remaining black keys
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+1]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + white_key_width
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+2]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + 2*white_key_width
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+3]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + white_key_width
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+4]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + white_key_width
img_background = cv2.rectangle(img_background, (curr_x,curr_y), (curr_x+black_key_width,curr_y+black_key_height), [0,0,0], -1)
bboxes_black[i*5+5]=[curr_x,curr_y,curr_x+black_key_width,curr_y+black_key_height]
curr_x=curr_x + 2*white_key_width
print("White_bboxes=",bboxes_white)
print("Black_bboxes=",bboxes_white)
return img_background
def visualizer(img_background):
""" Function: Visualize updated piano set on an image
Arguments: img_background:updated image to display piano image
returns: None """
global key_index_array,bboxes_white,bboxes_black
if(visualizer_status):
print("In thread1")
try:
if(len(key_index_array)!=0): #Makes the pressed piano keys in different color for better visualization
for key_index,color in key_index_array:
if(color=='white'):
xmin,ymin,xmax,ymax = bboxes_white[key_index]
start=(int(xmin),int(ymin))
end=(int(xmax),int(ymax))
print("start and end=",(start,end))
img_background_new=cv2.rectangle(img_background,start,end,(255,182,193),-1)
print('Printing key pressed-----------------------------',(key_index,color))
if(color=='black'):
xmin,ymin,xmax,ymax = bboxes_black[key_index]
start=(int(xmin),int(ymin))
end=(int(xmax),int(ymax))
print("start and end=",(start,end))
img_background_new=cv2.rectangle(img_background,start,end,(144,238,144),-1)
print('Printing key pressed-----------------------------',(key_index,color))
print("key_index_array=",key_index_array)
key_index_array=[]
except KeyboardInterrupt:
print("Exiting visualizer thread")
sys.exit()
def piano_key_initializer():
""" Function: Initialize piano keys for music. Used global variables white_key_reference and black_key_reference
Arguments: None
returns: None """
global white_key_reference, black_key_reference
white_key_reference.append('a0')
white_key_reference.append('b0')
black_key_reference.append('a-0')
for i in range(7):
white_key_reference.append('c'+str(i+1))
white_key_reference.append('d'+str(i+1))
white_key_reference.append('e'+str(i+1))
white_key_reference.append('f'+str(i+1))
white_key_reference.append('g'+str(i+1))
white_key_reference.append('a'+str(i+1))
white_key_reference.append('b'+str(i+1))
black_key_reference.append('c-'+str(i+1))
black_key_reference.append('d-'+str(i+1))
black_key_reference.append('f-'+str(i+1))
black_key_reference.append('g-'+str(i+1))
black_key_reference.append('a-'+str(i+1))
white_key_reference.append('c8')
print("Piano Keys Initialized Succesfully!")
def within_threshold(pos,item):
""" Function: check if the tip of pressed is within the threshold of piano key boundaries
Arguments: pos: x,y pixel coordinates of the tip of finger
item: boundaries of bbox of a particular key of a piano
returns: boolean value"""
if(pos[0]>item[0] and pos[0]<item[2] and pos[1]>item[1] and pos[1]<item[3]):
return True
else:
return False
def find_note(pos):
""" Function: Given coordinates of a key pressed (finger tip), returns string name of ogg file to be played!
Arguments: pos: x,y pixel coordinates of the tip of finger
returns: note: ogg file address
index: index of the pressed key
color: color of the pressed key: 'black or 'white """
x,y=pos
index=0
global bboxes_white,bboxes_black,white_key_reference,black_key_reference
for id, items in enumerate(bboxes_black):
if(within_threshold(pos,items)):
index=id
note=black_key_reference[id]
return note,index,'black'
for id, items in enumerate(bboxes_white):
if(within_threshold(pos,items)):
index=id
note=white_key_reference[id]
return note,index,'white'
return 'Wrong Press',100,'None'
def find_music_list(pos,num):
""" Function: Prepares the music list of piano keys to be played given the positions of all pressed piano keys and the no of keys pressed
Arguments: pos: positions of all finger tips corresponding to which a key press is detected
num: no of keys pressed at a time
returns: music_list: list of all piano music files to be played"""
music_list=[]; global key_index_array
for i in range(num):
note,key_index,color=find_note(pos[i])
if(note!='Wrong Press'):
key_index_array.append([key_index,color])
for fname in os.listdir('/home/abhinav/Piano_project/25405__tedagame__88-piano-keys-long-reverb/'):
if note in fname:
note=fname
break
music_list.append('/home/abhinav/Piano_project/25405__tedagame__88-piano-keys-long-reverb/'+ note)
return music_list
def build_music_list():
""" Function: Builds the list of piano keys to play music
Arguments: none
returns: music_list: list of all piano music files to be played"""
global left_detect,left_coordinates,right_detect,right_coordinates
positions=[];music_list=[]
if(play_music_status):
try:
for i in range(5):
if(left_detect[i]!=0):
positions.append(left_coordinates[i])
if(right_detect[i]!=0):
positions.append(right_coordinates[i])
num=len(positions)
print('num=',num)
if(num!=0):
music_list=find_music_list(positions,num)
print("Printing Music list in play_music:",music_list)
return music_list
except KeyboardInterrupt:
print("Exiting play music thread")
sys.exit()
def play_music(q,status):
""" Function: Plays piano music in a separate python process
Arguments: q: queue to pass music_list data among different python processes
status: can be used to switch off this process (not required)
returns: None"""
print("Processing play_music process")
while True:
try:
print("In the play_music function--Checking condition")
mixer.init()
pygame.mixer.set_num_channels(10) # default is 8
music_list=q.get()
if(len(music_list)!=0):
for id,items in enumerate(music_list):
pygame.mixer.Channel(id).play(pygame.mixer.Sound(music_list[id]))
import time
time.sleep(2)
except KeyboardInterrupt:
print("Play_music process stopped forcefully")
sys.exit()
def reinitialize():
""" Function: Reinitialize suitable global variables after every iteration
Arguments: none
returns: none"""
global right_detect,right_coordinates,left_detect,left_coordinates,key_index_array
left_detect=np.zeros(5);right_detect=np.zeros(5)
left_coordinates=np.zeros((5,2));right_coordinates=np.zeros((5,2))
key_index_array=[]
def processor(q,status):
""" Function: Primary process to read image frames from server->detect finger landmarks->find finger tip positions and build music lists
Arguments: none
returns: music_list: list of all piano music files to be played"""
# Declare useful variables
pTime = 0; cTime = 0; right_hand=1; left_hand=0
lmList=[]; rmList=[]
detector = handDetector()
global right_detect,right_coordinates,left_detect,left_coordinates,play_music_status,key_index_array
music_list_curr=[]
music_list_prev=[]
url = "http://192.168.29.189:8080/shot.jpg"
status.put(1)
# While loop to continuously fetching data from the Url
while True:
try:
print("Queue Size=",q.qsize())
# Read image data from server and preprocess
img_resp = requests.get(url)
img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)
img = cv2.imdecode(img_arr, -1)
img = imutils.resize(img, width=640, height=480)
# Detect finger landmarks in left (or/and right hand)
hands=1
img = detector.findHands(img) #draw hand landmarks on image
lmList = detector.findPosition(img,left_hand) #storing position of landmarks in an array
hands=detector.handsCount() #find total no of hands in image frame
print("No of hands are",hands)
if(hands>1):
rmList = detector.findPosition(img,right_hand)
if len(lmList) != 0:
left_detect,left_coordinates = finger_detect_and_compute(lmList)
print("Left Hand Detection Array=", left_detect)
print("left coordinates are", left_coordinates)
for i in range(5):
if(left_detect[i]!=0):
x,y=left_coordinates[i]
img=cv2.circle(img, (int(x),int(y)), 10, (10,50,50), 5)
if len(rmList) != 0 and hands>1:
right_detect,right_coordinates = finger_detect_and_compute(rmList)
print("Right Hand Detection Array=", right_detect)
print("Right coordinates are", right_coordinates)
for i in range(5):
if(right_detect[i]!=0):
x,y=right_coordinates[i]
img=cv2.circle(img, (int(x),int(y)), 10, (50,50,100), 5)
music_list_curr=build_music_list() # Build music list
if(len(music_list_curr)!=0) and music_list_curr!=music_list_prev:
q.put(music_list_curr) #Pass curr_music_list to another python process running play_music() function
music_list_prev=music_list_curr
if(len(music_list_curr)==0 and music_list_curr!=music_list_prev and status.qsize()<=1): # Empty queue if curr_music_list is empty--stop music
while not q.empty():
q.get()
img_background = initialize_visualizer(img)
visualizer(img_background) #Visualize virtual piano onscreen
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
lmList=[]
rmList=[]
reinitialize() # Reinitiaizing variables to initial values!
cv2.putText(img_background, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img_background)
cv2.waitKey(100)
time.sleep(0.1)
except KeyboardInterrupt:
print("Program Execution stopped forcefully! Killing all processes!")
play_music_status=0
visualizer_status=0
sys.exit()
# Main function for initiating target processes
def main():
piano_key_initializer()
q = multiprocessing.Queue()
status= multiprocessing.Queue()
# creating new processes
p1 = multiprocessing.Process(target=processor, args=(q,status,))
p2 = multiprocessing.Process(target=play_music, args=(q,status,))
p1.start()
p2.start()
print("Exiting main")
if __name__ == "__main__":
main()
|
[
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imdecode",
"numpy.zeros",
"time.time",
"time.sleep",
"numpy.array",
"numpy.linalg.norm",
"multiprocessing.Queue",
"cv2.rectangle",
"requests.get",
"imutils.resize",
"multiprocessing.Process",
"cv2.imshow",
"os.listdir",
"sys.exit"
] |
[((739, 750), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (747, 750), True, 'import numpy as np\n'), ((764, 775), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (772, 775), True, 'import numpy as np\n'), ((852, 868), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (860, 868), True, 'import numpy as np\n'), ((886, 902), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (894, 902), True, 'import numpy as np\n'), ((999, 1016), 'numpy.zeros', 'np.zeros', (['(52, 4)'], {}), '((52, 4))\n', (1007, 1016), True, 'import numpy as np\n'), ((1093, 1110), 'numpy.zeros', 'np.zeros', (['(36, 4)'], {}), '((36, 4))\n', (1101, 1110), True, 'import numpy as np\n'), ((6051, 6067), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (6059, 6067), True, 'import numpy as np\n'), ((7127, 7249), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (7140, 7249), False, 'import cv2\n'), ((16022, 16033), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (16030, 16033), True, 'import numpy as np\n'), ((16047, 16058), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (16055, 16058), True, 'import numpy as np\n'), ((16080, 16096), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (16088, 16096), True, 'import numpy as np\n'), ((16114, 16130), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (16122, 16130), True, 'import numpy as np\n'), ((20071, 20094), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (20092, 20094), False, 'import multiprocessing\n'), ((20107, 20130), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (20128, 20130), False, 'import multiprocessing\n'), ((20169, 20228), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'processor', 'args': '(q, status)'}), '(target=processor, args=(q, status))\n', (20192, 20228), False, 'import multiprocessing\n'), ((20238, 20298), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'play_music', 'args': '(q, status)'}), '(target=play_music, args=(q, status))\n', (20261, 20298), False, 'import multiprocessing\n'), ((2606, 2642), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2618, 2642), False, 'import cv2\n'), ((3921, 3935), 'numpy.array', 'np.array', (['List'], {}), '(List)\n', (3929, 3935), True, 'import numpy as np\n'), ((4872, 4895), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1 - p3)'], {}), '(p1 - p3)\n', (4886, 4895), True, 'import numpy as np\n'), ((6758, 6885), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + white_key_width, curr_y + white_key_height)', '[255, 255, 255]', '(2)'], {}), '(img_background, (curr_x, curr_y), (curr_x + white_key_width, \n curr_y + white_key_height), [255, 255, 255], 2)\n', (6771, 6885), False, 'import cv2\n'), ((7446, 7568), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (7459, 7568), False, 'import cv2\n'), ((7714, 7836), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (7727, 7836), False, 'import cv2\n'), ((7984, 8106), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (7997, 8106), False, 'import cv2\n'), ((8252, 8374), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (8265, 8374), False, 'import cv2\n'), ((8520, 8642), 'cv2.rectangle', 'cv2.rectangle', (['img_background', '(curr_x, curr_y)', '(curr_x + black_key_width, curr_y + black_key_height)', '[0, 0, 0]', '(-1)'], {}), '(img_background, (curr_x, curr_y), (curr_x + black_key_width, \n curr_y + black_key_height), [0, 0, 0], -1)\n', (8533, 8642), False, 'import cv2\n'), ((4241, 4284), 'numpy.array', 'np.array', (['self.results.multi_hand_landmarks'], {}), '(self.results.multi_hand_landmarks)\n', (4249, 4284), True, 'import numpy as np\n'), ((4819, 4842), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1 - p2)'], {}), '(p1 - p2)\n', (4833, 4842), True, 'import numpy as np\n'), ((4846, 4869), 'numpy.linalg.norm', 'np.linalg.norm', (['(p3 - p2)'], {}), '(p3 - p2)\n', (4860, 4869), True, 'import numpy as np\n'), ((13621, 13711), 'os.listdir', 'os.listdir', (['"""/home/abhinav/Piano_project/25405__tedagame__88-piano-keys-long-reverb/"""'], {}), "(\n '/home/abhinav/Piano_project/25405__tedagame__88-piano-keys-long-reverb/')\n", (13631, 13711), False, 'import os\n'), ((17000, 17017), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (17012, 17017), False, 'import requests\n'), ((17112, 17137), 'cv2.imdecode', 'cv2.imdecode', (['img_arr', '(-1)'], {}), '(img_arr, -1)\n', (17124, 17137), False, 'import cv2\n'), ((17156, 17198), 'imutils.resize', 'imutils.resize', (['img'], {'width': '(640)', 'height': '(480)'}), '(img, width=640, height=480)\n', (17170, 17198), False, 'import imutils\n'), ((19310, 19321), 'time.time', 'time.time', ([], {}), '()\n', (19319, 19321), False, 'import time\n'), ((19648, 19683), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img_background'], {}), "('Image', img_background)\n", (19658, 19683), False, 'import cv2\n'), ((19696, 19712), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (19707, 19712), False, 'import cv2\n'), ((19725, 19740), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (19735, 19740), False, 'import time\n'), ((10509, 10519), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10517, 10519), False, 'import sys\n'), ((14841, 14851), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14849, 14851), False, 'import sys\n'), ((15628, 15641), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (15638, 15641), False, 'import time\n'), ((15747, 15757), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15755, 15757), False, 'import sys\n'), ((19938, 19948), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19946, 19948), False, 'import sys\n'), ((9687, 9749), 'cv2.rectangle', 'cv2.rectangle', (['img_background', 'start', 'end', '(255, 182, 193)', '(-1)'], {}), '(img_background, start, end, (255, 182, 193), -1)\n', (9700, 9749), False, 'import cv2\n'), ((10160, 10222), 'cv2.rectangle', 'cv2.rectangle', (['img_background', 'start', 'end', '(144, 238, 144)', '(-1)'], {}), '(img_background, start, end, (144, 238, 144), -1)\n', (10173, 10222), False, 'import cv2\n')]
|
'''
Shows the grid world using pygame.
Globecom Tutorial - December 7, 2021
Tutorial 29: Machine Learning for MIMO Systems with Large Arrays
<NAME> (NCSU),
<NAME> (UFPA) and
<NAME>. (NCSU)
'''
import time
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
import pygame as pg
import pyscreenshot as ImageGrab
import imageio
#from beamforming_calculation import AnalogBeamformer
SLEEP_TIME = 0.3 #time to sleep and allow visualizing the world :)
class Mimo_RL_render:
def __init__(self, analogBeamformer):
self.should_save_images_as_gif = False #Not working: enables saving images in the end
self.analogBeamformer = analogBeamformer
self.Rx_position = (0,0)
self.Rx2_position = (5,5)
self.scheduled_user = 0
self.beam_index = 0
#Fixed objects, which do not move
self.Tx = [1,2]
self.wall1 = [3,4]
self.wall2 = [4,4]
# create discrete colormap
cmap = colors.ListedColormap(['gray','red', 'green', 'blue'])
cmap.set_bad(color='w', alpha=0)
fig = plt.figure()
self.pg = pg
self.pg.init()
self.screen = pg.display.set_mode((600,600))
clock = pg.time.Clock()
back = pg.image.load("./figs/grid6x6.png")
self.back = pg.transform.scale(back, (600,600))
antenna = pg.image.load("./figs/antenna.png").convert_alpha()
self.antenna = pg.transform.scale(antenna, (40,80))
wall_image = pg.image.load("./figs/wall.png").convert_alpha()
self.wall_image = pg.transform.scale(wall_image, (90,90))
carro1 = pg.image.load("./figs/carro1.png").convert_alpha()
self.carro1 = pg.transform.scale(carro1, (80,80))
carro2 = pg.image.load("./figs/carro2.png").convert_alpha()
self.carro2 = pg.transform.scale(carro2, (80,80))
if self.should_save_images_as_gif:
self.images = []
def set_positions(self, positions, scheduled_user, beam_index):
#positions = self.mimo_RL_Environment.get_UE_positions()
self.Rx_position = positions[0]
self.Rx2_position = positions[1]
self.scheduled_user = scheduled_user
self.beam_index = beam_index
def plot_beam(self, scheduled_user, beam_index):
fig, ax2 = plt.subplots(subplot_kw={'projection': 'polar'})
if(scheduled_user==0):
colorCar = 'blue'
else: #scheduled_user==1
colorCar = 'red'
angles = self.analogBeamformer.angles_for_plotting
beam_values = self.analogBeamformer.beams_for_plotting[:,beam_index]
beam_values = np.abs(beam_values) #take absolute values
ax2.plot(angles, beam_values, color=colorCar)
ax2.set_axis_off()
ax2.grid(False)
plt.savefig('chosen_beam.png', transparent=True, bbox_inches='tight')
def render_back(self):
self.screen.fill([255,255,255])
self.screen.blit(self.back,(0,0))
def render_antenna(self):
self.screen.blit(self.antenna,(self.Tx[0]*100+30,abs(self.Tx[1]-5)*100+16))
def render_beams(self):
bestBeam = pg.image.load("chosen_beam.png").convert_alpha()
bestBeam = pg.transform.scale(bestBeam, (300,300))
self.screen.blit(bestBeam,(self.Tx[0]*100-100,abs(self.Tx[1]-5)*100-100))
def render_wall1(self):
self.screen.blit(self.wall_image,(self.wall1[0]*100 + 5,abs(self.wall1[1]-5)*100 + 9))
def render_wall2(self):
self.screen.blit(self.wall_image,(self.wall2[0]*100 + 5,abs(self.wall2[1]-5)*100 + 9))
def render_Rx(self):
self.screen.blit(self.carro1,(self.Rx_position[0]*100 + 10, abs(self.Rx_position[1]-5)*100 + 10))
def render_Rx2(self):
self.screen.blit(self.carro2,(self.Rx2_position[0]*100 + 10, abs(self.Rx2_position[1]-5)*100 + 10))
def render(self):
time.sleep(SLEEP_TIME)
#plot beam
self.plot_beam(self.scheduled_user, self.beam_index)
self.render_back()
self.render_Rx()
self.render_Rx2()
self.render_antenna()
self.render_wall1()
self.render_wall2()
self.render_beams()
#plt.pause(1)
if self.should_save_images_as_gif:
raise NotImplementedError()
self.images.append(ImageGrab.grab(bbox=(1960, 1030, 2760, 1830)))
self.pg.display.update()
def save_images_as_gif(self, file_name, duration=3):
raise NotImplementedError()
gif = imageio.mimsave(file_name, self.images, 'GIF', duration=duration)
#print(gif)
print('Wrote file', file_name)
|
[
"imageio.mimsave",
"numpy.abs",
"pygame.display.set_mode",
"pyscreenshot.grab",
"matplotlib.pyplot.subplots",
"time.sleep",
"pygame.transform.scale",
"matplotlib.pyplot.figure",
"pygame.image.load",
"pygame.time.Clock",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.savefig"
] |
[((983, 1038), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (["['gray', 'red', 'green', 'blue']"], {}), "(['gray', 'red', 'green', 'blue'])\n", (1004, 1038), False, 'from matplotlib import colors\n'), ((1093, 1105), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1103, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1172, 1203), 'pygame.display.set_mode', 'pg.display.set_mode', (['(600, 600)'], {}), '((600, 600))\n', (1191, 1203), True, 'import pygame as pg\n'), ((1219, 1234), 'pygame.time.Clock', 'pg.time.Clock', ([], {}), '()\n', (1232, 1234), True, 'import pygame as pg\n'), ((1250, 1285), 'pygame.image.load', 'pg.image.load', (['"""./figs/grid6x6.png"""'], {}), "('./figs/grid6x6.png')\n", (1263, 1285), True, 'import pygame as pg\n'), ((1306, 1342), 'pygame.transform.scale', 'pg.transform.scale', (['back', '(600, 600)'], {}), '(back, (600, 600))\n', (1324, 1342), True, 'import pygame as pg\n'), ((1435, 1472), 'pygame.transform.scale', 'pg.transform.scale', (['antenna', '(40, 80)'], {}), '(antenna, (40, 80))\n', (1453, 1472), True, 'import pygame as pg\n'), ((1568, 1608), 'pygame.transform.scale', 'pg.transform.scale', (['wall_image', '(90, 90)'], {}), '(wall_image, (90, 90))\n', (1586, 1608), True, 'import pygame as pg\n'), ((1698, 1734), 'pygame.transform.scale', 'pg.transform.scale', (['carro1', '(80, 80)'], {}), '(carro1, (80, 80))\n', (1716, 1734), True, 'import pygame as pg\n'), ((1824, 1860), 'pygame.transform.scale', 'pg.transform.scale', (['carro2', '(80, 80)'], {}), '(carro2, (80, 80))\n', (1842, 1860), True, 'import pygame as pg\n'), ((2302, 2350), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': 'polar'}"}), "(subplot_kw={'projection': 'polar'})\n", (2314, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2651), 'numpy.abs', 'np.abs', (['beam_values'], {}), '(beam_values)\n', (2638, 2651), True, 'import numpy as np\n'), ((2788, 2857), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""chosen_beam.png"""'], {'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "('chosen_beam.png', transparent=True, bbox_inches='tight')\n", (2799, 2857), True, 'import matplotlib.pyplot as plt\n'), ((3199, 3239), 'pygame.transform.scale', 'pg.transform.scale', (['bestBeam', '(300, 300)'], {}), '(bestBeam, (300, 300))\n', (3217, 3239), True, 'import pygame as pg\n'), ((3867, 3889), 'time.sleep', 'time.sleep', (['SLEEP_TIME'], {}), '(SLEEP_TIME)\n', (3877, 3889), False, 'import time\n'), ((4521, 4586), 'imageio.mimsave', 'imageio.mimsave', (['file_name', 'self.images', '"""GIF"""'], {'duration': 'duration'}), "(file_name, self.images, 'GIF', duration=duration)\n", (4536, 4586), False, 'import imageio\n'), ((1360, 1395), 'pygame.image.load', 'pg.image.load', (['"""./figs/antenna.png"""'], {}), "('./figs/antenna.png')\n", (1373, 1395), True, 'import pygame as pg\n'), ((1493, 1525), 'pygame.image.load', 'pg.image.load', (['"""./figs/wall.png"""'], {}), "('./figs/wall.png')\n", (1506, 1525), True, 'import pygame as pg\n'), ((1625, 1659), 'pygame.image.load', 'pg.image.load', (['"""./figs/carro1.png"""'], {}), "('./figs/carro1.png')\n", (1638, 1659), True, 'import pygame as pg\n'), ((1751, 1785), 'pygame.image.load', 'pg.image.load', (['"""./figs/carro2.png"""'], {}), "('./figs/carro2.png')\n", (1764, 1785), True, 'import pygame as pg\n'), ((3131, 3163), 'pygame.image.load', 'pg.image.load', (['"""chosen_beam.png"""'], {}), "('chosen_beam.png')\n", (3144, 3163), True, 'import pygame as pg\n'), ((4332, 4377), 'pyscreenshot.grab', 'ImageGrab.grab', ([], {'bbox': '(1960, 1030, 2760, 1830)'}), '(bbox=(1960, 1030, 2760, 1830))\n', (4346, 4377), True, 'import pyscreenshot as ImageGrab\n')]
|
import calendar
import numpy as np
import pandas as pd
import os
import shutil
import tables
import tempfile
import unittest
from datetime import datetime
from phildb.log_handler import LogHandler
class LogHandlerTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.log_file = os.path.join(self.tmp_dir, "log_file.hdf5")
self.create_datetime = calendar.timegm(
datetime(2015, 6, 28, 15, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388620800, np.nan, 0), (1388707200, 3.0, 0)], "U": []}
with LogHandler(self.log_file, "w") as writer:
writer.create_skeleton()
with LogHandler(self.log_file, "a") as writer:
writer.write(log_entries, self.create_datetime)
self.update_datetime = calendar.timegm(
datetime(2015, 8, 1, 16, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388707200, 4.0, 0)], "U": []}
with LogHandler(self.log_file, "a") as writer:
writer.write(log_entries, self.update_datetime)
self.second_update_datetime = calendar.timegm(
datetime(2015, 8, 10, 16, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388707200, 5.0, 0)], "U": []}
with LogHandler(self.log_file, "a") as writer:
writer.write(log_entries, self.second_update_datetime)
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if e.errno != 2: # Code 2: No such file or directory.
raise
def test_logging(self):
log_file = os.path.join(self.tmp_dir, "log_file.hdf5")
create_datetime = calendar.timegm(
datetime(2015, 6, 28, 15, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388620800, 2.0, 0), (1388707200, 3.0, 0)], "U": []}
with LogHandler(log_file, "w") as writer:
writer.create_skeleton()
with LogHandler(log_file, "a") as writer:
writer.write(log_entries, create_datetime)
update_datetime = calendar.timegm(
datetime(2015, 6, 28, 16, 25, 00).utctimetuple()
)
log_entries = {"C": [(1388620800, 3.0, 0), (1388707200, 4.0, 0)], "U": []}
with LogHandler(log_file, "a") as writer:
writer.write(log_entries, update_datetime)
with tables.open_file(log_file, "r") as hdf5_file:
log_grp = hdf5_file.get_node("/data")
self.assertEqual(len(log_grp.log), 4)
self.assertSequenceEqual(
log_grp.log[0], (1388620800, 2.0, 0, create_datetime)
)
self.assertSequenceEqual(
log_grp.log[1], (1388707200, 3.0, 0, create_datetime)
)
self.assertSequenceEqual(
log_grp.log[2], (1388620800, 3.0, 0, update_datetime)
)
self.assertSequenceEqual(
log_grp.log[3], (1388707200, 4.0, 0, update_datetime)
)
def test_nan_logging(self):
# Note: The write code under test is part of the setUp method.
with tables.open_file(self.log_file, "r") as hdf5_file:
log_grp = hdf5_file.get_node("/data")
self.assertSequenceEqual(
log_grp.log[0], (1388620800, -9999, 9999, self.create_datetime)
)
self.assertSequenceEqual(
log_grp.log[1], (1388707200, 3.0, 0, self.create_datetime)
)
self.assertSequenceEqual(
log_grp.log[2], (1388707200, 4.0, 0, self.update_datetime)
)
self.assertEqual(len(log_grp.log), 4)
def test_read_log(self):
data = {}
with LogHandler(self.log_file, "r") as reader:
data["original_data"] = reader.read(self.create_datetime)
data["middle_data"] = reader.read(self.update_datetime)
data["last_data"] = reader.read(self.second_update_datetime)
for k in data.keys():
self.assertEqual(
data[k].index[0],
pd.Timestamp("2014-01-02 00:00:00"),
"Incorrect start date in {0}".format(k),
)
self.assertEqual(
data[k].index[1],
pd.Timestamp("2014-01-03 00:00:00"),
"Incorrect end date in {0}".format(k),
)
self.assertEqual(len(data[k]), 2, "Incorrect length of '{0}'.".format(k))
self.assertTrue(
np.isnan(data[k].value[0]), "Incorrect first value for '{0}'.".format(k)
)
self.assertEqual(data["original_data"].value[1], 3.0)
self.assertEqual(data["middle_data"].value[1], 4.0)
self.assertEqual(data["last_data"].value[1], 5.0)
|
[
"pandas.Timestamp",
"numpy.isnan",
"datetime.datetime",
"tempfile.mkdtemp",
"shutil.rmtree",
"tables.open_file",
"os.path.join",
"phildb.log_handler.LogHandler"
] |
[((285, 303), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (301, 303), False, 'import tempfile\n'), ((328, 371), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""log_file.hdf5"""'], {}), "(self.tmp_dir, 'log_file.hdf5')\n", (340, 371), False, 'import os\n'), ((1638, 1681), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""log_file.hdf5"""'], {}), "(self.tmp_dir, 'log_file.hdf5')\n", (1650, 1681), False, 'import os\n'), ((592, 622), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""w"""'], {}), "(self.log_file, 'w')\n", (602, 622), False, 'from phildb.log_handler import LogHandler\n'), ((685, 715), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""a"""'], {}), "(self.log_file, 'a')\n", (695, 715), False, 'from phildb.log_handler import LogHandler\n'), ((981, 1011), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""a"""'], {}), "(self.log_file, 'a')\n", (991, 1011), False, 'from phildb.log_handler import LogHandler\n'), ((1285, 1315), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""a"""'], {}), "(self.log_file, 'a')\n", (1295, 1315), False, 'from phildb.log_handler import LogHandler\n'), ((1444, 1471), 'shutil.rmtree', 'shutil.rmtree', (['self.tmp_dir'], {}), '(self.tmp_dir)\n', (1457, 1471), False, 'import shutil\n'), ((1894, 1919), 'phildb.log_handler.LogHandler', 'LogHandler', (['log_file', '"""w"""'], {}), "(log_file, 'w')\n", (1904, 1919), False, 'from phildb.log_handler import LogHandler\n'), ((1982, 2007), 'phildb.log_handler.LogHandler', 'LogHandler', (['log_file', '"""a"""'], {}), "(log_file, 'a')\n", (1992, 2007), False, 'from phildb.log_handler import LogHandler\n'), ((2286, 2311), 'phildb.log_handler.LogHandler', 'LogHandler', (['log_file', '"""a"""'], {}), "(log_file, 'a')\n", (2296, 2311), False, 'from phildb.log_handler import LogHandler\n'), ((2392, 2423), 'tables.open_file', 'tables.open_file', (['log_file', '"""r"""'], {}), "(log_file, 'r')\n", (2408, 2423), False, 'import tables\n'), ((3147, 3183), 'tables.open_file', 'tables.open_file', (['self.log_file', '"""r"""'], {}), "(self.log_file, 'r')\n", (3163, 3183), False, 'import tables\n'), ((3748, 3778), 'phildb.log_handler.LogHandler', 'LogHandler', (['self.log_file', '"""r"""'], {}), "(self.log_file, 'r')\n", (3758, 3778), False, 'from phildb.log_handler import LogHandler\n'), ((4112, 4147), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-02 00:00:00"""'], {}), "('2014-01-02 00:00:00')\n", (4124, 4147), True, 'import pandas as pd\n'), ((4301, 4336), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-03 00:00:00"""'], {}), "('2014-01-03 00:00:00')\n", (4313, 4336), True, 'import pandas as pd\n'), ((4540, 4566), 'numpy.isnan', 'np.isnan', (['data[k].value[0]'], {}), '(data[k].value[0])\n', (4548, 4566), True, 'import numpy as np\n'), ((432, 464), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(28)', '(15)', '(25)', '(0)'], {}), '(2015, 6, 28, 15, 25, 0)\n', (440, 464), False, 'from datetime import datetime\n'), ((848, 879), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(1)', '(16)', '(25)', '(0)'], {}), '(2015, 8, 1, 16, 25, 0)\n', (856, 879), False, 'from datetime import datetime\n'), ((1151, 1183), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(10)', '(16)', '(25)', '(0)'], {}), '(2015, 8, 10, 16, 25, 0)\n', (1159, 1183), False, 'from datetime import datetime\n'), ((1737, 1769), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(28)', '(15)', '(25)', '(0)'], {}), '(2015, 6, 28, 15, 25, 0)\n', (1745, 1769), False, 'from datetime import datetime\n'), ((2130, 2162), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(28)', '(16)', '(25)', '(0)'], {}), '(2015, 6, 28, 16, 25, 0)\n', (2138, 2162), False, 'from datetime import datetime\n')]
|
import numpy as np
import pymc as pm
challenger_data = np.genfromtxt(
"../../Chapter2_MorePyMC/data/challenger_data.csv",
skip_header=1, usecols=[1, 2], missing_values="NA", delimiter=",")
# drop the NA values
challenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]
temperature = challenger_data[:, 0]
D = challenger_data[:, 1] # defect or not?
beta = pm.Normal("beta", 0, 0.001, value=0)
alpha = pm.Normal("alpha", 0, 0.001, value=0)
@pm.deterministic
def p(temp=temperature, alpha=alpha, beta=beta):
return 1.0 / (1. + np.exp(beta * temperature + alpha))
observed = pm.Bernoulli("bernoulli_obs", p, value=D, observed=True)
model = pm.Model([observed, beta, alpha])
# mysterious code to be explained in Chapter 3
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(260000, 220000, 2)
|
[
"pymc.MAP",
"pymc.Model",
"numpy.genfromtxt",
"pymc.MCMC",
"numpy.isnan",
"pymc.Bernoulli",
"numpy.exp",
"pymc.Normal"
] |
[((57, 193), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../../Chapter2_MorePyMC/data/challenger_data.csv"""'], {'skip_header': '(1)', 'usecols': '[1, 2]', 'missing_values': '"""NA"""', 'delimiter': '""","""'}), "('../../Chapter2_MorePyMC/data/challenger_data.csv',\n skip_header=1, usecols=[1, 2], missing_values='NA', delimiter=',')\n", (70, 193), True, 'import numpy as np\n'), ((378, 414), 'pymc.Normal', 'pm.Normal', (['"""beta"""', '(0)', '(0.001)'], {'value': '(0)'}), "('beta', 0, 0.001, value=0)\n", (387, 414), True, 'import pymc as pm\n'), ((423, 460), 'pymc.Normal', 'pm.Normal', (['"""alpha"""', '(0)', '(0.001)'], {'value': '(0)'}), "('alpha', 0, 0.001, value=0)\n", (432, 460), True, 'import pymc as pm\n'), ((602, 658), 'pymc.Bernoulli', 'pm.Bernoulli', (['"""bernoulli_obs"""', 'p'], {'value': 'D', 'observed': '(True)'}), "('bernoulli_obs', p, value=D, observed=True)\n", (614, 658), True, 'import pymc as pm\n'), ((668, 701), 'pymc.Model', 'pm.Model', (['[observed, beta, alpha]'], {}), '([observed, beta, alpha])\n', (676, 701), True, 'import pymc as pm\n'), ((757, 770), 'pymc.MAP', 'pm.MAP', (['model'], {}), '(model)\n', (763, 770), True, 'import pymc as pm\n'), ((789, 803), 'pymc.MCMC', 'pm.MCMC', (['model'], {}), '(model)\n', (796, 803), True, 'import pymc as pm\n'), ((255, 286), 'numpy.isnan', 'np.isnan', (['challenger_data[:, 1]'], {}), '(challenger_data[:, 1])\n', (263, 286), True, 'import numpy as np\n'), ((553, 587), 'numpy.exp', 'np.exp', (['(beta * temperature + alpha)'], {}), '(beta * temperature + alpha)\n', (559, 587), True, 'import numpy as np\n')]
|
import json
import logging
import time
import numpy as np
from sklearn.svm import OneClassSVM
from sklearn.metrics import roc_auc_score
from sklearn.metrics.pairwise import pairwise_distances
from base.base_dataset import BaseADDataset
from networks.main import build_network
class OCSVM(object):
"""A class for One-Class SVM models."""
def __init__(self, kernel='linear', nu=0.1):
"""Init OCSVM instance."""
self.kernel = kernel
self.nu = nu
self.rho = None
self.gamma = None
self.model = OneClassSVM(kernel=kernel, nu=nu)
self.embedding = None
self.results = {
'train_time': None,
'test_time': None,
'test_auc': None,
'test_scores': None
}
def set_embedding(self, dataset, embedding_size=100, pretrained_word_vectors=None, embedding_reduction='mean',
use_tfidf_weights=False, normalize_embedding=False, device: str = 'cpu'):
"""Sets the word embedding for the text data."""
self.embedding = build_network('embedding',
dataset,
embedding_size=embedding_size,
pretrained_model=pretrained_word_vectors,
update_embedding=False,
embedding_reduction=embedding_reduction,
use_tfidf_weights=use_tfidf_weights,
normalize_embedding=normalize_embedding)
self.embedding = self.embedding.to(device)
def train(self, dataset: BaseADDataset, device: str = 'cpu', n_jobs_dataloader: int = 0):
"""Trains the OC-SVM model on the training data."""
logger = logging.getLogger()
train_loader, _ = dataset.loaders(batch_size=64, num_workers=n_jobs_dataloader)
# Training
logger.info('Starting training...')
X = ()
for data in train_loader:
_, text, _, weights = data
text, weights = text.to(device), weights.to(device)
X_batch = self.embedding(text, weights) # X_batch.shape = (batch_size, embedding_size)
X += (X_batch.cpu().data.numpy(),)
X = np.concatenate(X)
# if rbf-kernel, re-initialize svm with gamma minimizing the numerical error
if self.kernel == 'rbf':
self.gamma = 1 / (np.max(pairwise_distances(X)) ** 2)
self.model = OneClassSVM(kernel='rbf', nu=self.nu, gamma=self.gamma)
start_time = time.time()
self.model.fit(X)
self.results['train_time'] = time.time() - start_time
logger.info('Training Time: {:.3f}s'.format(self.results['train_time']))
logger.info('Finished training.')
def test(self, dataset: BaseADDataset, device: str = 'cpu', n_jobs_dataloader: int = 0):
"""Tests the OC-SVM model on the test data."""
logger = logging.getLogger()
_, test_loader = dataset.loaders(batch_size=64, num_workers=n_jobs_dataloader)
# Testing
logger.info('Starting testing...')
idx_label_score = []
X = ()
idxs = []
labels = []
for data in test_loader:
idx, text, label_batch, weights = data
text = text.to(device)
label_batch = label_batch.to(device)
weights = weights.to(device)
X_batch = self.embedding(text, weights) # X_batch.shape = (batch_size, embedding_size)
X += (X_batch.cpu().data.numpy(),)
idxs += idx
labels += label_batch.cpu().data.numpy().astype(np.int64).tolist()
X = np.concatenate(X)
start_time = time.time()
scores = (-1.0) * self.model.decision_function(X)
self.results['test_time'] = time.time() - start_time
scores = scores.flatten()
self.rho = -self.model.intercept_[0]
# Save triples of (idx, label, score) in a list
idx_label_score += list(zip(idxs, labels, scores.tolist()))
self.results['test_scores'] = idx_label_score
# Compute AUC
_, labels, scores = zip(*idx_label_score)
labels = np.array(labels)
scores = np.array(scores)
self.results['test_auc'] = roc_auc_score(labels, scores)
# Log results
logger.info('Test AUC: {:.2f}%'.format(100. * self.results['test_auc']))
logger.info('Test Time: {:.3f}s'.format(self.results['test_time']))
logger.info('Finished testing.')
def save_model(self, export_path):
"""Save OC-SVM model to export_path."""
pass
def load_model(self, import_path, device: str = 'cpu'):
"""Load OC-SVM model from import_path."""
pass
def save_results(self, export_json):
"""Save results dict to a JSON-file."""
with open(export_json, 'w') as fp:
json.dump(self.results, fp)
|
[
"json.dump",
"networks.main.build_network",
"sklearn.metrics.pairwise.pairwise_distances",
"logging.getLogger",
"time.time",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"sklearn.svm.OneClassSVM",
"numpy.concatenate"
] |
[((552, 585), 'sklearn.svm.OneClassSVM', 'OneClassSVM', ([], {'kernel': 'kernel', 'nu': 'nu'}), '(kernel=kernel, nu=nu)\n', (563, 585), False, 'from sklearn.svm import OneClassSVM\n'), ((1071, 1335), 'networks.main.build_network', 'build_network', (['"""embedding"""', 'dataset'], {'embedding_size': 'embedding_size', 'pretrained_model': 'pretrained_word_vectors', 'update_embedding': '(False)', 'embedding_reduction': 'embedding_reduction', 'use_tfidf_weights': 'use_tfidf_weights', 'normalize_embedding': 'normalize_embedding'}), "('embedding', dataset, embedding_size=embedding_size,\n pretrained_model=pretrained_word_vectors, update_embedding=False,\n embedding_reduction=embedding_reduction, use_tfidf_weights=\n use_tfidf_weights, normalize_embedding=normalize_embedding)\n", (1084, 1335), False, 'from networks.main import build_network\n'), ((1819, 1838), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1836, 1838), False, 'import logging\n'), ((2306, 2323), 'numpy.concatenate', 'np.concatenate', (['X'], {}), '(X)\n', (2320, 2323), True, 'import numpy as np\n'), ((2612, 2623), 'time.time', 'time.time', ([], {}), '()\n', (2621, 2623), False, 'import time\n'), ((3002, 3021), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3019, 3021), False, 'import logging\n'), ((3728, 3745), 'numpy.concatenate', 'np.concatenate', (['X'], {}), '(X)\n', (3742, 3745), True, 'import numpy as np\n'), ((3768, 3779), 'time.time', 'time.time', ([], {}), '()\n', (3777, 3779), False, 'import time\n'), ((4248, 4264), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4256, 4264), True, 'import numpy as np\n'), ((4282, 4298), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (4290, 4298), True, 'import numpy as np\n'), ((4334, 4363), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'scores'], {}), '(labels, scores)\n', (4347, 4363), False, 'from sklearn.metrics import roc_auc_score\n'), ((2534, 2589), 'sklearn.svm.OneClassSVM', 'OneClassSVM', ([], {'kernel': '"""rbf"""', 'nu': 'self.nu', 'gamma': 'self.gamma'}), "(kernel='rbf', nu=self.nu, gamma=self.gamma)\n", (2545, 2589), False, 'from sklearn.svm import OneClassSVM\n'), ((2687, 2698), 'time.time', 'time.time', ([], {}), '()\n', (2696, 2698), False, 'import time\n'), ((3874, 3885), 'time.time', 'time.time', ([], {}), '()\n', (3883, 3885), False, 'import time\n'), ((4955, 4982), 'json.dump', 'json.dump', (['self.results', 'fp'], {}), '(self.results, fp)\n', (4964, 4982), False, 'import json\n'), ((2480, 2501), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['X'], {}), '(X)\n', (2498, 2501), False, 'from sklearn.metrics.pairwise import pairwise_distances\n')]
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Utility methods."""
import numpy as np
import pandas as pd
import scipy.sparse
import sparse as sp
import itertools
from operator import getitem
from collections import defaultdict, Counter
from sklearn import clone
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.linear_model import LassoCV, MultiTaskLassoCV, Lasso, MultiTaskLasso
from functools import reduce, wraps
from sklearn.utils import check_array, check_X_y
import warnings
from warnings import warn
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from collections.abc import Iterable
from sklearn.utils.multiclass import type_of_target
import numbers
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.summary import summary_return
from statsmodels.compat.python import lmap
import copy
from inspect import signature
MAX_RAND_SEED = np.iinfo(np.int32).max
class IdentityFeatures(TransformerMixin):
"""Featurizer that just returns the input data."""
def fit(self, X):
"""Fit method (does nothing, just returns self)."""
return self
def transform(self, X):
"""Perform the identity transform, which returns the input unmodified."""
return X
def parse_final_model_params(coef, intercept, d_y, d_t, d_t_in, bias_part_of_coef, fit_cate_intercept):
dt = d_t
if (d_t_in != d_t) and (d_t[0] == 1): # binary treatment
dt = ()
cate_intercept = None
if bias_part_of_coef:
cate_coef = coef.reshape(d_y + dt + (-1,))[..., 1:]
if fit_cate_intercept:
cate_intercept = coef.reshape(d_y + dt + (-1,))[..., 0]
else:
cate_coef = coef.reshape(d_y + dt + (-1,))
if fit_cate_intercept:
cate_intercept = np.reshape(intercept, d_y + dt)
if (cate_intercept is not None) and (np.ndim(cate_intercept) == 0):
cate_intercept = cate_intercept.item()
return cate_coef, cate_intercept
def check_high_dimensional(X, T, *, threshold, featurizer=None, discrete_treatment=False, msg=""):
# Check if model is sparse enough for this model
X, T = check_input_arrays(X, T)
if X is None:
d_x = 1
elif featurizer is None:
d_x = X.shape[1]
else:
d_x = clone(featurizer, safe=False).fit_transform(X[[0], :]).shape[1]
if discrete_treatment:
d_t = len(set(T.flatten())) - 1
else:
d_t = 1 if np.ndim(T) < 2 else T.shape[1]
if d_x * d_t < threshold:
warn(msg, UserWarning)
def inverse_onehot(T):
"""
Given a one-hot encoding of a value, return a vector reversing the encoding to get numeric treatment indices.
Note that we assume that the first column has been removed from the input.
Parameters
----------
T : array (shape (n, d_t-1))
The one-hot-encoded array
Returns
-------
A : vector of int (shape (n,))
The un-encoded 0-based category indices
"""
assert ndim(T) == 2
# note that by default OneHotEncoder returns float64s, so need to convert to int
return (T @ np.arange(1, T.shape[1] + 1)).astype(int)
def issparse(X):
"""Determine whether an input is sparse.
For the purposes of this function, both `scipy.sparse` matrices and `sparse.SparseArray`
types are considered sparse.
Parameters
----------
X : array-like
The input to check
Returns
-------
bool
Whether the input is sparse
"""
return scipy.sparse.issparse(X) or isinstance(X, sp.SparseArray)
def iscoo(X):
"""Determine whether an input is a `sparse.COO` array.
Parameters
----------
X : array-like
The input to check
Returns
-------
bool
Whether the input is a `COO` array
"""
return isinstance(X, sp.COO)
def tocoo(X):
"""
Convert an array to a sparse COO array.
If the input is already an `sparse.COO` object, this returns the object directly; otherwise it is converted.
"""
if isinstance(X, sp.COO):
return X
elif isinstance(X, sp.DOK):
return sp.COO(X)
elif scipy.sparse.issparse(X):
return sp.COO.from_scipy_sparse(X)
else:
return sp.COO.from_numpy(X)
def todense(X):
"""
Convert an array to a dense numpy array.
If the input is already a numpy array, this may create a new copy.
"""
if scipy.sparse.issparse(X):
return X.toarray()
elif isinstance(X, sp.SparseArray):
return X.todense()
else:
# TODO: any way to avoid creating a copy if the array was already dense?
# the call is necessary if the input was something like a list, though
return np.array(X)
def size(X):
"""Return the number of elements in the array.
Parameters
----------
a : array_like
Input data
Returns
-------
int
The number of elements of the array
"""
return X.size if issparse(X) else np.size(X)
def shape(X):
"""Return a tuple of array dimensions."""
return X.shape if issparse(X) else np.shape(X)
def ndim(X):
"""Return the number of array dimensions."""
return X.ndim if issparse(X) else np.ndim(X)
def reshape(X, shape):
"""Return a new array that is a reshaped version of an input array.
The output will be sparse iff the input is.
Parameters
----------
X : array_like
The array to reshape
shape : tuple of ints
The desired shape of the output array
Returns
-------
ndarray or SparseArray
The reshaped output array
"""
if scipy.sparse.issparse(X):
# scipy sparse arrays don't support reshaping (even for 2D they throw not implemented errors),
# so convert to pydata sparse first
X = sp.COO.from_scipy_sparse(X)
if len(shape) == 2:
# in the 2D case, we can convert back to scipy sparse; in other cases we can't
return X.reshape(shape).to_scipy_sparse()
return X.reshape(shape)
def _apply(op, *XS):
"""
Apply a function to a sequence of sparse or dense array arguments.
If any array is sparse then all arrays are converted to COO before the function is applied;
if all of the arrays are scipy sparse arrays, and if the result is 2D,
the returned value will be a scipy sparse array as well
"""
all_scipy_sparse = all(scipy.sparse.issparse(X) for X in XS)
if any(issparse(X) for X in XS):
XS = tuple(tocoo(X) for X in XS)
result = op(*XS)
if all_scipy_sparse and len(shape(result)) == 2:
# both inputs were scipy and we can safely convert back to scipy because it's 2D
return result.to_scipy_sparse()
return result
def tensordot(X1, X2, axes):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Parameters
----------
X1, X2 : array_like, len(shape) >= 1
Tensors to "dot"
axes : int or (2,) array_like
integer_like
If an int N, sum over the last N axes of `X1` and the first N axes
of `X2` in order. The sizes of the corresponding axes must match
(2,) array_like
Or, a list of axes to be summed over, first sequence applying to `X1`,
second to `X2`. Both elements array_like must be of the same length.
"""
def td(X1, X2):
return sp.tensordot(X1, X2, axes) if iscoo(X1) else np.tensordot(X1, X2, axes)
return _apply(td, X1, X2)
def cross_product(*XS):
"""
Compute the cross product of features.
Parameters
----------
X1 : n x d1 matrix
First matrix of n samples of d1 features
(or an n-element vector, which will be treated as an n x 1 matrix)
X2 : n x d2 matrix
Second matrix of n samples of d2 features
(or an n-element vector, which will be treated as an n x 1 matrix)
Returns
-------
A : n x (d1*d2*...) matrix
Matrix of n samples of d1*d2*... cross product features,
arranged in form such that each row t of X12 contains:
[X1[t,0]*X2[t,0]*..., ..., X1[t,d1-1]*X2[t,0]*..., X1[t,0]*X2[t,1]*..., ..., X1[t,d1-1]*X2[t,1]*..., ...]
"""
for X in XS:
assert 2 >= ndim(X) >= 1
n = shape(XS[0])[0]
for X in XS:
assert n == shape(X)[0]
def cross(XS):
k = len(XS)
XS = [reshape(XS[i], (n,) + (1,) * (k - i - 1) + (-1,) + (1,) * i) for i in range(k)]
return reshape(reduce(np.multiply, XS), (n, -1))
return _apply(cross, XS)
def stack(XS, axis=0):
"""
Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if axis=0 it will be the first dimension and if axis=-1 it will be the last dimension.
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape
axis : int, optional
The axis in the result array along which the input arrays are stacked
Returns
-------
ndarray or SparseArray
The stacked array, which has one more dimension than the input arrays.
It will be sparse if the inputs are.
"""
def st(*XS):
return sp.stack(XS, axis=axis) if iscoo(XS[0]) else np.stack(XS, axis=axis)
return _apply(st, *XS)
def concatenate(XS, axis=0):
"""
Join a sequence of arrays along an existing axis.
Parameters
----------
X1, X2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
ndarray or SparseArray
The concatenated array. It will be sparse if the inputs are.
"""
def conc(*XS):
return sp.concatenate(XS, axis=axis) if iscoo(XS[0]) else np.concatenate(XS, axis=axis)
return _apply(conc, *XS)
# note: in contrast to np.hstack this only works with arrays of dimension at least 2
def hstack(XS):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis
Parameters
----------
XS : sequence of ndarrays
The arrays must have the same shape along all but the second axis.
Returns
-------
ndarray or SparseArray
The array formed by stacking the given arrays. It will be sparse if the inputs are.
"""
# Confusingly, this needs to concatenate, not stack (stack returns an array with an extra dimension)
return concatenate(XS, 1)
def vstack(XS):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after
1-D arrays of shape (N,) have been reshaped to (1,N).
Parameters
----------
XS : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
ndarray or SparseArray
The array formed by stacking the given arrays, will be at least 2-D. It will be sparse if the inputs are.
"""
# Confusingly, this needs to concatenate, not stack (stack returns an array with an extra dimension)
return concatenate(XS, 0)
def transpose(X, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
X : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes according to the values given
Returns
-------
p : ndarray or SparseArray
`X` with its axes permuted. This will be sparse if `X` is.
"""
def t(X):
if iscoo(X):
return X.transpose(axes)
else:
return np.transpose(X, axes)
return _apply(t, X)
def add_intercept(X):
"""
Adds an intercept feature to an array by prepending a column of ones.
Parameters
----------
X : array-like
Input array. Must be 2D.
Returns
-------
arr : ndarray
`X` with a column of ones prepended
"""
return hstack([np.ones((X.shape[0], 1)), X])
def reshape_Y_T(Y, T):
"""
Reshapes Y and T when Y.ndim = 2 and/or T.ndim = 1.
Parameters
----------
Y : array_like, shape (n, ) or (n, 1)
Outcome for the treatment policy. Must be a vector or single-column matrix.
T : array_like, shape (n, ) or (n, d_t)
Treatment policy.
Returns
-------
Y : array_like, shape (n, )
Flattened outcome for the treatment policy.
T : array_like, shape (n, 1) or (n, d_t)
Reshaped treatment policy.
"""
assert(len(Y) == len(T))
assert(Y.ndim <= 2)
if Y.ndim == 2:
assert(Y.shape[1] == 1)
Y = Y.flatten()
if T.ndim == 1:
T = T.reshape(-1, 1)
return Y, T
def check_inputs(Y, T, X, W=None, multi_output_T=True, multi_output_Y=True):
"""
Input validation for CATE estimators.
Checks Y, T, X, W for consistent length, enforces X, W 2d.
Standard input checks are only applied to all inputs,
such as checking that an input does not have np.nan or np.inf targets.
Converts regular Python lists to numpy arrays.
Parameters
----------
Y : array_like, shape (n, ) or (n, d_y)
Outcome for the treatment policy.
T : array_like, shape (n, ) or (n, d_t)
Treatment policy.
X : array-like, shape (n, d_x)
Feature vector that captures heterogeneity.
W : array-like, shape (n, d_w) or None (default=None)
High-dimensional controls.
multi_output_T : bool
Whether to allow more than one treatment.
multi_output_Y: bool
Whether to allow more than one outcome.
Returns
-------
Y : array_like, shape (n, ) or (n, d_y)
Converted and validated Y.
T : array_like, shape (n, ) or (n, d_t)
Converted and validated T.
X : array-like, shape (n, d_x)
Converted and validated X.
W : array-like, shape (n, d_w) or None (default=None)
Converted and validated W.
"""
X, T = check_X_y(X, T, multi_output=multi_output_T, y_numeric=True)
_, Y = check_X_y(X, Y, multi_output=multi_output_Y, y_numeric=True)
if W is not None:
W, _ = check_X_y(W, Y, multi_output=multi_output_Y, y_numeric=True)
return Y, T, X, W
def check_input_arrays(*args, validate_len=True):
"""Cast input sequences into numpy arrays.
Only inputs that are sequence-like will be converted, all other inputs will be left as is.
When `validate_len` is True, the sequences will be checked for equal length.
Parameters
----------
args : scalar or array_like
Inputs to be checked.
validate_len : bool (default=True)
Whether to check if the input arrays have the same length.
Returns
-------
args: array-like
List of inputs where sequence-like objects have been cast to numpy arrays.
"""
args = [check_array(arg, dtype=None, ensure_2d=False, accept_sparse=True)
if np.ndim(arg) > 0 else arg for arg in args]
if validate_len:
n = None
for arg in args:
if np.ndim(arg) > 0:
m = arg.shape[0]
if n is None:
n = m
else:
assert (m == n), "Input arrays have incompatible lengths: {} and {}".format(n, m)
return args
def get_input_columns(X):
"""Extracts column names from dataframe-like input object.
Currently supports column name extraction from pandas DataFrame and Series objects.
Parameters
----------
X : array_like
Input array with column names to be extracted.
Returns
-------
cols: array-like or None
List of columns corresponding to the dataframe-like object.
None if the input array is not in the supported types.
"""
# Type to column extraction function
type_to_func = {
pd.DataFrame: lambda x: x.columns.tolist(),
pd.Series: lambda x: [x.name]
}
if type(X) in type_to_func:
return type_to_func[type(X)](X)
return None
def check_models(models, n):
"""
Input validation for metalearner models.
Check whether the input models satisfy the criteria below.
Parameters
----------
models : estimator or a list/tuple of estimators
n : int
Number of models needed
Returns
----------
models : a list/tuple of estimators
"""
if isinstance(models, (tuple, list)):
if n != len(models):
raise ValueError("The number of estimators doesn't equal to the number of treatments. "
"Please provide either a tuple/list of estimators "
"with same number of treatments or an unified estimator.")
elif hasattr(models, 'fit'):
models = [clone(models, safe=False) for i in range(n)]
else:
raise ValueError(
"models must be either a tuple/list of estimators with same number of treatments "
"or an unified estimator.")
return models
def broadcast_unit_treatments(X, d_t):
"""
Generate `d_t` unit treatments for each row of `X`.
Parameters
----------
d_t: int
Number of treatments
X : array
Features
Returns
-------
X, T : (array, array)
The updated `X` array (with each row repeated `d_t` times),
and the generated `T` array
"""
d_x = shape(X)[0]
eye = np.eye(d_t)
# tile T and repeat X along axis 0 (so that the duplicated rows of X remain consecutive)
T = np.tile(eye, (d_x, 1))
Xs = np.repeat(X, d_t, axis=0)
return Xs, T
def reshape_treatmentwise_effects(A, d_t, d_y):
"""
Given an effects matrix ordered first by treatment, transform it to be ordered by outcome.
Parameters
----------
A : array
The array of effects, of size n*d_y*d_t
d_t : tuple of int
Either () if T was a vector, or a 1-tuple of the number of columns of T if it was an array
d_y : tuple of int
Either () if Y was a vector, or a 1-tuple of the number of columns of Y if it was an array
Returns
-------
A : array (shape (m, d_y, d_t))
The transformed array. Note that singleton dimensions will be dropped for any inputs which
were vectors, as in the specification of `BaseCateEstimator.marginal_effect`.
"""
A = reshape(A, (-1,) + d_t + d_y)
if d_t and d_y:
return transpose(A, (0, 2, 1)) # need to return as m by d_y by d_t matrix
else:
return A
def einsum_sparse(subscripts, *arrs):
"""
Evaluate the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional array operations can be represented
in a simple fashion. This function provides a way to compute such summations.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
Unlike `np.eisnum` elipses are not supported and the output must be explicitly included
arrs : list of COO arrays
These are the sparse arrays for the operation.
Returns
-------
SparseArray
The sparse array calculated based on the Einstein summation convention.
"""
inputs, outputs = subscripts.split('->')
inputs = inputs.split(',')
outputInds = set(outputs)
allInds = set.union(*[set(i) for i in inputs])
# same number of input definitions as arrays
assert len(inputs) == len(arrs)
# input definitions have same number of dimensions as each array
assert all(arr.ndim == len(input) for (arr, input) in zip(arrs, inputs))
# all result indices are unique
assert len(outputInds) == len(outputs)
# all result indices must match at least one input index
assert outputInds <= allInds
# map indices to all array, axis pairs for that index
indMap = {c: [(n, i) for n in range(len(inputs)) for (i, x) in enumerate(inputs[n]) if x == c] for c in allInds}
for c in indMap:
# each index has the same cardinality wherever it appears
assert len({arrs[n].shape[i] for (n, i) in indMap[c]}) == 1
# State: list of (set of letters, list of (corresponding indices, value))
# Algo: while list contains more than one entry
# take two entries
# sort both lists by intersection of their indices
# merge compatible entries (where intersection of indices is equal - in the resulting list,
# take the union of indices and the product of values), stepping through each list linearly
# TODO: might be faster to break into connected components first
# e.g. for "ab,d,bc->ad", the two components "ab,bc" and "d" are independent,
# so compute their content separately, then take cartesian product
# this would save a few pointless sorts by empty tuples
# TODO: Consider investigating other performance ideas for these cases
# where the dense method beat the sparse method (usually sparse is faster)
# e,facd,c->cfed
# sparse: 0.0335489
# dense: 0.011465999999999997
# gbd,da,egb->da
# sparse: 0.0791625
# dense: 0.007319099999999995
# dcc,d,faedb,c->abe
# sparse: 1.2868097
# dense: 0.44605229999999985
def merge(x1, x2):
(s1, l1), (s2, l2) = x1, x2
keys = {c for c in s1 if c in s2} # intersection of strings
outS = ''.join(set(s1 + s2)) # union of strings
outMap = [(True, s1.index(c)) if c in s1 else (False, s2.index(c)) for c in outS]
def keyGetter(s):
inds = [s.index(c) for c in keys]
return lambda p: tuple(p[0][ind] for ind in inds)
kg1 = keyGetter(s1)
kg2 = keyGetter(s2)
l1.sort(key=kg1)
l2.sort(key=kg2)
i1 = i2 = 0
outL = []
while i1 < len(l1) and i2 < len(l2):
k1, k2 = kg1(l1[i1]), kg2(l2[i2])
if k1 < k2:
i1 += 1
elif k2 < k1:
i2 += 1
else:
j1, j2 = i1, i2
while j1 < len(l1) and kg1(l1[j1]) == k1:
j1 += 1
while j2 < len(l2) and kg2(l2[j2]) == k2:
j2 += 1
for c1, d1 in l1[i1:j1]:
for c2, d2 in l2[i2:j2]:
outL.append((tuple(c1[charIdx] if inFirst else c2[charIdx] for inFirst, charIdx in outMap),
d1 * d2))
i1 = j1
i2 = j2
return outS, outL
# when indices are repeated within an array, pre-filter the coordinates and data
def filter_inds(coords, data, n):
counts = Counter(inputs[n])
repeated = [(c, counts[c]) for c in counts if counts[c] > 1]
if len(repeated) > 0:
mask = np.full(len(data), True)
for (k, v) in repeated:
inds = [i for i in range(len(inputs[n])) if inputs[n][i] == k]
for i in range(1, v):
mask &= (coords[:, inds[0]] == coords[:, inds[i]])
if not all(mask):
return coords[mask, :], data[mask]
return coords, data
xs = [(s, list(zip(c, d)))
for n, (s, arr) in enumerate(zip(inputs, arrs))
for c, d in [filter_inds(arr.coords.T, arr.data, n)]]
# TODO: would using einsum's paths to optimize the order of merging help?
while len(xs) > 1:
xs.append(merge(xs.pop(), xs.pop()))
results = defaultdict(int)
for (s, l) in xs:
coordMap = [s.index(c) for c in outputs]
for (c, d) in l:
results[tuple(c[i] for i in coordMap)] += d
return sp.COO(np.array(list(results.keys())).T if results else
np.empty((len(outputs), 0)),
np.array(list(results.values())),
[arrs[indMap[c][0][0]].shape[indMap[c][0][1]] for c in outputs])
def fit_with_groups(model, X, y, groups=None, **kwargs):
"""
Fit a model while correctly handling grouping if necessary.
This enables us to perform an inner-loop cross-validation of a model
which handles grouping correctly, which is not easy using typical sklearn models.
For example, GridSearchCV and RandomSearchCV both support passing 'groups' to fit,
but other CV-related estimators (such as those derived from LinearModelCV, including LassoCV),
do not support passing groups to fit which meanst that GroupKFold cannot be used as the cv instance
when using these types, because the required 'groups' argument will never be passed to the
GroupKFold's split method. See also https://github.com/scikit-learn/scikit-learn/issues/12052
The (hacky) workaround that is used here is to explicitly set the 'cv' attribute (if there is one) to
the exact set of rows and not to use GroupKFold even with the sklearn classes that could support it;
this should work with classes derived from BaseSearchCV, LinearModelCV, and CalibratedClassifierCV.
Parameters
----------
model : estimator
The model to fit
X : array-like
The features to fit against
y : array-like
The target to fit against
groups : array-like, optional
The set of groupings that should be kept together when splitting rows for
cross-validation
kwargs : dict
Any other named arguments to pass to the model's fit
"""
if groups is not None:
# assume that we should perform nested cross-validation if and only if
# the model has a 'cv' attribute; this is a somewhat brittle assumption...
if hasattr(model, 'cv'):
old_cv = model.cv
# logic copied from check_cv
cv = 5 if old_cv is None else old_cv
if isinstance(cv, numbers.Integral):
cv = GroupKFold(cv)
# otherwise we will assume the user already set the cv attribute to something
# compatible with splitting with a 'groups' argument
# now we have to compute the folds explicitly because some classifiers (like LassoCV)
# don't use the groups when calling split internally
splits = list(cv.split(X, y, groups=groups))
try:
model.cv = splits
return model.fit(X, y, **kwargs)
finally:
model.cv = old_cv
return model.fit(X, y, **kwargs)
def filter_none_kwargs(**kwargs):
"""
Filters out any keyword arguments that are None.
This is useful when specific optional keyword arguments might not be universally supported,
so that stripping them out when they are not set enables more uses to succeed.
Parameters
----------
kwargs: dict
The keyword arguments to filter
Returns
-------
filtered_kwargs: dict
The input dictionary, but with all entries having value None removed
"""
return {key: value for key, value in kwargs.items() if value is not None}
class WeightedModelWrapper:
"""Helper class for assiging weights to models without this option.
Parameters
----------
model_instance : estimator
Model that requires weights.
sample_type : string, optional (default=`weighted`)
Method for adding weights to the model. `weighted` for linear regression models
where the weights can be incorporated in the matrix multiplication,
`sampled` for other models. `sampled` samples the training set according
to the normalized weights and creates a dataset larger than the original.
"""
def __init__(self, model_instance, sample_type="weighted"):
self.model_instance = model_instance
if sample_type == "weighted":
self.data_transform = self._weighted_inputs
else:
warnings.warn("The model provided does not support sample weights. "
"Manual weighted sampling may icrease the variance in the results.", UserWarning)
self.data_transform = self._sampled_inputs
def fit(self, X, y, sample_weight=None):
"""Fit underlying model instance with weighted inputs.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples, n_outcomes)
Target values.
Returns
-------
self: an instance of the underlying estimator.
"""
if sample_weight is not None:
X, y = self.data_transform(X, y, sample_weight)
return self.model_instance.fit(X, y)
def predict(self, X):
"""Predict using the linear model.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
C : array, shape (n_samples, n_outcomes)
Returns predicted values.
"""
return self.model_instance.predict(X)
def _weighted_inputs(self, X, y, sample_weight):
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
normalized_weights = sample_weight * X.shape[0] / np.sum(sample_weight)
sqrt_weights = np.sqrt(normalized_weights)
weighted_X = sqrt_weights.reshape(-1, 1) * X
weighted_y = sqrt_weights.reshape(-1, 1) * y if y.ndim > 1 else sqrt_weights * y
return weighted_X, weighted_y
def _sampled_inputs(self, X, y, sample_weight):
# Normalize weights
normalized_weights = sample_weight / np.sum(sample_weight)
data_length = int(min(1 / np.min(normalized_weights[normalized_weights > 0]), 10) * X.shape[0])
data_indices = np.random.choice(X.shape[0], size=data_length, p=normalized_weights)
return X[data_indices], y[data_indices]
class MultiModelWrapper:
"""Helper class for training different models for each treatment.
Parameters
----------
model_list : array-like, shape (n_T, )
List of models to be trained separately for each treatment group.
"""
def __init__(self, model_list=[]):
self.model_list = model_list
self.n_T = len(model_list)
def fit(self, Xt, y, sample_weight=None):
"""Fit underlying list of models with weighted inputs.
Parameters
----------
X : array-like, shape (n_samples, n_features + n_treatments)
Training data. The last n_T columns should be a one-hot encoding of the treatment assignment.
y : array-like, shape (n_samples, )
Target values.
Returns
-------
self: an instance of the class
"""
X = Xt[:, :-self.n_T]
t = Xt[:, -self.n_T:]
if sample_weight is None:
for i in range(self.n_T):
mask = (t[:, i] == 1)
self.model_list[i].fit(X[mask], y[mask])
else:
for i in range(self.n_T):
mask = (t[:, i] == 1)
self.model_list[i].fit(X[mask], y[mask], sample_weight[mask])
return self
def predict(self, Xt):
"""Predict using the linear model.
Parameters
----------
X : array-like, shape (n_samples, n_features + n_treatments)
Samples. The last n_T columns should be a one-hot encoding of the treatment assignment.
Returns
-------
C : array, shape (n_samples, )
Returns predicted values.
"""
X = Xt[:, :-self.n_T]
t = Xt[:, -self.n_T:]
predictions = [self.model_list[np.nonzero(t[i])[0][0]].predict(X[[i]]) for i in range(len(X))]
return np.concatenate(predictions)
def _safe_norm_ppf(q, loc=0, scale=1):
if hasattr(loc, "__len__"):
prelim = loc.copy()
if np.any(scale > 0):
prelim[scale > 0] = scipy.stats.norm.ppf(q, loc=loc[scale > 0], scale=scale[scale > 0])
elif scale > 0:
prelim = scipy.stats.norm.ppf(q, loc=loc, scale=scale)
else:
prelim = loc
return prelim
class Summary:
# This class is mainly derived from statsmodels.iolib.summary.Summary
"""
Result summary
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated
tables are not saved separately.
extra_txt : str
extra lines that are added to the text output, used for warnings
and explanations.
"""
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table(self, res, header, index, title):
table = SimpleTable(res, header, index, title)
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : list[str]
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : str
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if self.extra_txt is not None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : str
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
latex = summary_return(self.tables, return_fmt='latex')
if self.extra_txt is not None:
latex = latex + '\n\n' + self.extra_txt.replace('\n', ' \\newline\n ')
return latex
def as_csv(self):
'''return tables as string
Returns
-------
csv : str
concatenated summary tables in comma delimited format
'''
csv = summary_return(self.tables, return_fmt='csv')
if self.extra_txt is not None:
csv = csv + '\n\n' + self.extra_txt
return csv
def as_html(self):
'''return tables as string
Returns
-------
html : str
concatenated summary tables in HTML format
'''
html = summary_return(self.tables, return_fmt='html')
if self.extra_txt is not None:
html = html + '<br/><br/>' + self.extra_txt.replace('\n', '<br/>')
return html
class SeparateModel:
"""
Splits the data based on the last feature and trains
a separate model for each subsample. At predict time, it
uses the last feature to choose which model to use
to predict.
"""
def __init__(self, *models):
self.models = [clone(model) for model in models]
def fit(self, XZ, T):
for (i, m) in enumerate(self.models):
inds = (XZ[:, -1] == i)
m.fit(XZ[inds, :-1], T[inds])
return self
def predict(self, XZ):
t_pred = np.zeros(XZ.shape[0])
for (i, m) in enumerate(self.models):
inds = (XZ[:, -1] == i)
if np.any(inds):
t_pred[inds] = m.predict(XZ[inds, :-1])
return t_pred
@property
def coef_(self):
return np.concatenate((model.coef_ for model in self.models))
class _EncoderWrapper:
"""
Wraps a OneHotEncoder (and optionally also a LabelEncoder).
Useful mainly so that the `encode` method can be used in a FunctionTransformer,
which would otherwise need a lambda (which can't be pickled).
"""
def __init__(self, one_hot_encoder, label_encoder=None, drop_first=False):
self._label_encoder = label_encoder
self._one_hot_encoder = one_hot_encoder
self._drop_first = drop_first
def encode(self, arr):
if self._label_encoder:
arr = self._label_encoder.transform(arr.ravel())
result = self._one_hot_encoder.transform(reshape(arr, (-1, 1)))
return result[:, 1:] if self._drop_first else result
def deprecated(message, category=FutureWarning):
"""
Enables decorating a method or class to providing a warning when it is used.
Parameters
----------
message: string
The deprecation message to use
category: optional :class:`type`, default :class:`FutureWarning`
The warning category to use
"""
def decorator(to_wrap):
# if we're decorating a class, just update the __init__ method,
# so that the result is still a class instead of a wrapper method
if isinstance(to_wrap, type):
old_init = to_wrap.__init__
@wraps(to_wrap.__init__)
def new_init(*args, **kwargs):
warn(message, category, stacklevel=2)
old_init(*args, **kwargs)
to_wrap.__init__ = new_init
return to_wrap
else:
@wraps(to_wrap)
def m(*args, **kwargs):
warn(message, category, stacklevel=2)
return to_wrap(*args, **kwargs)
return m
return decorator
def _deprecate_positional(message, bad_args, category=FutureWarning):
"""
Enables decorating a method to provide a warning when certain arguments are used positionally.
Parameters
----------
message: string
The deprecation message to use
bad_args : list of string
The positional arguments that will be keyword-only in the future
category: optional :class:`type`, default :class:`FutureWarning`
The warning category to use
"""
def decorator(to_wrap):
@wraps(to_wrap)
def m(*args, **kwargs):
# want to enforce that each bad_arg was either in kwargs,
# or else it was in neither and is just taking its default value
bound = signature(m).bind(*args, **kwargs)
wrong_args = False
for arg in bad_args:
if arg not in kwargs and arg in bound.arguments:
wrong_args = True
if wrong_args:
warn(message, category, stacklevel=2)
return to_wrap(*args, **kwargs)
return m
return decorator
def transpose_dictionary(d):
"""
Transpose a dictionary of dictionaries, bringing the keys from the second level
to the top and vice versa
Parameters
----------
d: dict
The dictionary to transpose; the values of this dictionary should all themselves
be dictionaries
Returns
-------
output: dict
The output dictionary with first- and second-level keys swapped
"""
output = defaultdict(dict)
for key1, value in d.items():
for key2, val in value.items():
output[key2][key1] = val
return output
class _RegressionWrapper:
"""
A simple wrapper that makes a binary classifier behave like a regressor.
Essentially .fit, calls the fit method of the classifier and
.predict calls the .predict_proba method of the classifier
and returns the probability of label 1.
"""
def __init__(self, clf):
"""
Parameters
----------
clf : the classifier model
"""
self._clf = clf
def fit(self, X, y, **kwargs):
"""
Parameters
----------
X : features
y : one-hot-encoding of binary label, with drop='first'
"""
if len(y.shape) > 1 and y.shape[1] > 1:
y = y @ np.arange(1, y.shape[1] + 1)
self._clf.fit(X, y, **kwargs)
return self
def predict(self, X):
"""
Parameters
----------
X : features
"""
return self._clf.predict_proba(X)[:, 1:]
@deprecated("This class will be removed from a future version of this package; "
"please use econml.sklearn_extensions.linear_model.WeightedLassoCV instead.")
class LassoCVWrapper:
"""Helper class to wrap either LassoCV or MultiTaskLassoCV depending on the shape of the target."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, X, Y):
assert shape(X)[0] == shape(Y)[0]
assert ndim(Y) <= 2
self.needs_unravel = False
if ndim(Y) == 2 and shape(Y)[1] > 1:
self.model = MultiTaskLassoCV(*self.args, **self.kwargs)
else:
if ndim(Y) == 2 and shape(Y)[1] == 1:
Y = np.ravel(Y)
self.needs_unravel = True
self.model = LassoCV(*self.args, **self.kwargs)
self.model.fit(X, Y)
return self
def predict(self, X):
predictions = self.model.predict(X)
return reshape(predictions, (-1, 1)) if self.needs_unravel else predictions
|
[
"statsmodels.iolib.summary.summary_return",
"numpy.sum",
"numpy.ravel",
"numpy.iinfo",
"numpy.ones",
"collections.defaultdict",
"numpy.shape",
"numpy.arange",
"numpy.tile",
"sklearn.model_selection.GroupKFold",
"sparse.concatenate",
"sklearn.clone",
"sklearn.linear_model.MultiTaskLassoCV",
"sklearn.utils.check_array",
"sklearn.utils.check_X_y",
"numpy.ndim",
"statsmodels.iolib.table.SimpleTable",
"sparse.stack",
"numpy.transpose",
"inspect.signature",
"numpy.reshape",
"numpy.random.choice",
"collections.Counter",
"sparse.COO.from_scipy_sparse",
"numpy.repeat",
"numpy.stack",
"numpy.size",
"numpy.tensordot",
"sparse.tensordot",
"numpy.min",
"functools.wraps",
"sparse.COO",
"numpy.concatenate",
"sklearn.linear_model.LassoCV",
"sparse.COO.from_numpy",
"numpy.zeros",
"numpy.any",
"numpy.nonzero",
"numpy.array",
"functools.reduce",
"numpy.eye",
"warnings.warn",
"numpy.sqrt"
] |
[((959, 977), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (967, 977), True, 'import numpy as np\n'), ((14297, 14357), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'T'], {'multi_output': 'multi_output_T', 'y_numeric': '(True)'}), '(X, T, multi_output=multi_output_T, y_numeric=True)\n', (14306, 14357), False, 'from sklearn.utils import check_array, check_X_y\n'), ((14369, 14429), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'Y'], {'multi_output': 'multi_output_Y', 'y_numeric': '(True)'}), '(X, Y, multi_output=multi_output_Y, y_numeric=True)\n', (14378, 14429), False, 'from sklearn.utils import check_array, check_X_y\n'), ((17732, 17743), 'numpy.eye', 'np.eye', (['d_t'], {}), '(d_t)\n', (17738, 17743), True, 'import numpy as np\n'), ((17845, 17867), 'numpy.tile', 'np.tile', (['eye', '(d_x, 1)'], {}), '(eye, (d_x, 1))\n', (17852, 17867), True, 'import numpy as np\n'), ((17877, 17902), 'numpy.repeat', 'np.repeat', (['X', 'd_t'], {'axis': '(0)'}), '(X, d_t, axis=0)\n', (17886, 17902), True, 'import numpy as np\n'), ((23886, 23902), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (23897, 23902), False, 'from collections import defaultdict, Counter\n'), ((39588, 39605), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (39599, 39605), False, 'from collections import defaultdict, Counter\n'), ((2560, 2582), 'warnings.warn', 'warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (2564, 2582), False, 'from warnings import warn\n'), ((5035, 5045), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (5042, 5045), True, 'import numpy as np\n'), ((5147, 5158), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (5155, 5158), True, 'import numpy as np\n'), ((5261, 5271), 'numpy.ndim', 'np.ndim', (['X'], {}), '(X)\n', (5268, 5271), True, 'import numpy as np\n'), ((5856, 5883), 'sparse.COO.from_scipy_sparse', 'sp.COO.from_scipy_sparse', (['X'], {}), '(X)\n', (5880, 5883), True, 'import sparse as sp\n'), ((14467, 14527), 'sklearn.utils.check_X_y', 'check_X_y', (['W', 'Y'], {'multi_output': 'multi_output_Y', 'y_numeric': '(True)'}), '(W, Y, multi_output=multi_output_Y, y_numeric=True)\n', (14476, 14527), False, 'from sklearn.utils import check_array, check_X_y\n'), ((23075, 23093), 'collections.Counter', 'Counter', (['inputs[n]'], {}), '(inputs[n])\n', (23082, 23093), False, 'from collections import defaultdict, Counter\n'), ((29446, 29496), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'y'], {'y_numeric': '(True)', 'multi_output': '(True)'}), '(X, y, y_numeric=True, multi_output=True)\n', (29455, 29496), False, 'from sklearn.utils import check_array, check_X_y\n'), ((29600, 29627), 'numpy.sqrt', 'np.sqrt', (['normalized_weights'], {}), '(normalized_weights)\n', (29607, 29627), True, 'import numpy as np\n'), ((30083, 30151), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]'], {'size': 'data_length', 'p': 'normalized_weights'}), '(X.shape[0], size=data_length, p=normalized_weights)\n', (30099, 30151), True, 'import numpy as np\n'), ((32039, 32066), 'numpy.concatenate', 'np.concatenate', (['predictions'], {}), '(predictions)\n', (32053, 32066), True, 'import numpy as np\n'), ((32179, 32196), 'numpy.any', 'np.any', (['(scale > 0)'], {}), '(scale > 0)\n', (32185, 32196), True, 'import numpy as np\n'), ((33366, 33404), 'statsmodels.iolib.table.SimpleTable', 'SimpleTable', (['res', 'header', 'index', 'title'], {}), '(res, header, index, title)\n', (33377, 33404), False, 'from statsmodels.iolib.table import SimpleTable\n'), ((33930, 33976), 'statsmodels.iolib.summary.summary_return', 'summary_return', (['self.tables'], {'return_fmt': '"""text"""'}), "(self.tables, return_fmt='text')\n", (33944, 33976), False, 'from statsmodels.iolib.summary import summary_return\n'), ((34481, 34528), 'statsmodels.iolib.summary.summary_return', 'summary_return', (['self.tables'], {'return_fmt': '"""latex"""'}), "(self.tables, return_fmt='latex')\n", (34495, 34528), False, 'from statsmodels.iolib.summary import summary_return\n'), ((34874, 34919), 'statsmodels.iolib.summary.summary_return', 'summary_return', (['self.tables'], {'return_fmt': '"""csv"""'}), "(self.tables, return_fmt='csv')\n", (34888, 34919), False, 'from statsmodels.iolib.summary import summary_return\n'), ((35220, 35266), 'statsmodels.iolib.summary.summary_return', 'summary_return', (['self.tables'], {'return_fmt': '"""html"""'}), "(self.tables, return_fmt='html')\n", (35234, 35266), False, 'from statsmodels.iolib.summary import summary_return\n'), ((35940, 35961), 'numpy.zeros', 'np.zeros', (['XZ.shape[0]'], {}), '(XZ.shape[0])\n', (35948, 35961), True, 'import numpy as np\n'), ((36202, 36254), 'numpy.concatenate', 'np.concatenate', (['(model.coef_ for model in self.models)'], {}), '(model.coef_ for model in self.models)\n', (36216, 36254), True, 'import numpy as np\n'), ((38564, 38578), 'functools.wraps', 'wraps', (['to_wrap'], {}), '(to_wrap)\n', (38569, 38578), False, 'from functools import reduce, wraps\n'), ((1841, 1872), 'numpy.reshape', 'np.reshape', (['intercept', '(d_y + dt)'], {}), '(intercept, d_y + dt)\n', (1851, 1872), True, 'import numpy as np\n'), ((1914, 1937), 'numpy.ndim', 'np.ndim', (['cate_intercept'], {}), '(cate_intercept)\n', (1921, 1937), True, 'import numpy as np\n'), ((4162, 4171), 'sparse.COO', 'sp.COO', (['X'], {}), '(X)\n', (4168, 4171), True, 'import sparse as sp\n'), ((4765, 4776), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4773, 4776), True, 'import numpy as np\n'), ((7441, 7467), 'sparse.tensordot', 'sp.tensordot', (['X1', 'X2', 'axes'], {}), '(X1, X2, axes)\n', (7453, 7467), True, 'import sparse as sp\n'), ((7486, 7512), 'numpy.tensordot', 'np.tensordot', (['X1', 'X2', 'axes'], {}), '(X1, X2, axes)\n', (7498, 7512), True, 'import numpy as np\n'), ((8533, 8556), 'functools.reduce', 'reduce', (['np.multiply', 'XS'], {}), '(np.multiply, XS)\n', (8539, 8556), False, 'from functools import reduce, wraps\n'), ((9305, 9328), 'sparse.stack', 'sp.stack', (['XS'], {'axis': 'axis'}), '(XS, axis=axis)\n', (9313, 9328), True, 'import sparse as sp\n'), ((9350, 9373), 'numpy.stack', 'np.stack', (['XS'], {'axis': 'axis'}), '(XS, axis=axis)\n', (9358, 9373), True, 'import numpy as np\n'), ((9952, 9981), 'sparse.concatenate', 'sp.concatenate', (['XS'], {'axis': 'axis'}), '(XS, axis=axis)\n', (9966, 9981), True, 'import sparse as sp\n'), ((10003, 10032), 'numpy.concatenate', 'np.concatenate', (['XS'], {'axis': 'axis'}), '(XS, axis=axis)\n', (10017, 10032), True, 'import numpy as np\n'), ((11940, 11961), 'numpy.transpose', 'np.transpose', (['X', 'axes'], {}), '(X, axes)\n', (11952, 11961), True, 'import numpy as np\n'), ((12290, 12314), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (12297, 12314), True, 'import numpy as np\n'), ((15176, 15241), 'sklearn.utils.check_array', 'check_array', (['arg'], {'dtype': 'None', 'ensure_2d': '(False)', 'accept_sparse': '(True)'}), '(arg, dtype=None, ensure_2d=False, accept_sparse=True)\n', (15187, 15241), False, 'from sklearn.utils import check_array, check_X_y\n'), ((28212, 28369), 'warnings.warn', 'warnings.warn', (['"""The model provided does not support sample weights. Manual weighted sampling may icrease the variance in the results."""', 'UserWarning'], {}), "(\n 'The model provided does not support sample weights. Manual weighted sampling may icrease the variance in the results.'\n , UserWarning)\n", (28225, 28369), False, 'import warnings\n'), ((29555, 29576), 'numpy.sum', 'np.sum', (['sample_weight'], {}), '(sample_weight)\n', (29561, 29576), True, 'import numpy as np\n'), ((29934, 29955), 'numpy.sum', 'np.sum', (['sample_weight'], {}), '(sample_weight)\n', (29940, 29955), True, 'import numpy as np\n'), ((35690, 35702), 'sklearn.clone', 'clone', (['model'], {}), '(model)\n', (35695, 35702), False, 'from sklearn import clone\n'), ((36059, 36071), 'numpy.any', 'np.any', (['inds'], {}), '(inds)\n', (36065, 36071), True, 'import numpy as np\n'), ((37588, 37611), 'functools.wraps', 'wraps', (['to_wrap.__init__'], {}), '(to_wrap.__init__)\n', (37593, 37611), False, 'from functools import reduce, wraps\n'), ((37847, 37861), 'functools.wraps', 'wraps', (['to_wrap'], {}), '(to_wrap)\n', (37852, 37861), False, 'from functools import reduce, wraps\n'), ((41272, 41315), 'sklearn.linear_model.MultiTaskLassoCV', 'MultiTaskLassoCV', (['*self.args'], {}), '(*self.args, **self.kwargs)\n', (41288, 41315), False, 'from sklearn.linear_model import LassoCV, MultiTaskLassoCV, Lasso, MultiTaskLasso\n'), ((41479, 41513), 'sklearn.linear_model.LassoCV', 'LassoCV', (['*self.args'], {}), '(*self.args, **self.kwargs)\n', (41486, 41513), False, 'from sklearn.linear_model import LassoCV, MultiTaskLassoCV, Lasso, MultiTaskLasso\n'), ((2491, 2501), 'numpy.ndim', 'np.ndim', (['T'], {}), '(T)\n', (2498, 2501), True, 'import numpy as np\n'), ((3149, 3177), 'numpy.arange', 'np.arange', (['(1)', '(T.shape[1] + 1)'], {}), '(1, T.shape[1] + 1)\n', (3158, 3177), True, 'import numpy as np\n'), ((4222, 4249), 'sparse.COO.from_scipy_sparse', 'sp.COO.from_scipy_sparse', (['X'], {}), '(X)\n', (4246, 4249), True, 'import sparse as sp\n'), ((4275, 4295), 'sparse.COO.from_numpy', 'sp.COO.from_numpy', (['X'], {}), '(X)\n', (4292, 4295), True, 'import sparse as sp\n'), ((15257, 15269), 'numpy.ndim', 'np.ndim', (['arg'], {}), '(arg)\n', (15264, 15269), True, 'import numpy as np\n'), ((15378, 15390), 'numpy.ndim', 'np.ndim', (['arg'], {}), '(arg)\n', (15385, 15390), True, 'import numpy as np\n'), ((17094, 17119), 'sklearn.clone', 'clone', (['models'], {'safe': '(False)'}), '(models, safe=False)\n', (17099, 17119), False, 'from sklearn import clone\n'), ((26225, 26239), 'sklearn.model_selection.GroupKFold', 'GroupKFold', (['cv'], {}), '(cv)\n', (26235, 26239), False, 'from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold\n'), ((37671, 37708), 'warnings.warn', 'warn', (['message', 'category'], {'stacklevel': '(2)'}), '(message, category, stacklevel=2)\n', (37675, 37708), False, 'from warnings import warn\n'), ((37914, 37951), 'warnings.warn', 'warn', (['message', 'category'], {'stacklevel': '(2)'}), '(message, category, stacklevel=2)\n', (37918, 37951), False, 'from warnings import warn\n'), ((39024, 39061), 'warnings.warn', 'warn', (['message', 'category'], {'stacklevel': '(2)'}), '(message, category, stacklevel=2)\n', (39028, 39061), False, 'from warnings import warn\n'), ((40430, 40458), 'numpy.arange', 'np.arange', (['(1)', '(y.shape[1] + 1)'], {}), '(1, y.shape[1] + 1)\n', (40439, 40458), True, 'import numpy as np\n'), ((41400, 41411), 'numpy.ravel', 'np.ravel', (['Y'], {}), '(Y)\n', (41408, 41411), True, 'import numpy as np\n'), ((38778, 38790), 'inspect.signature', 'signature', (['m'], {}), '(m)\n', (38787, 38790), False, 'from inspect import signature\n'), ((29990, 30040), 'numpy.min', 'np.min', (['normalized_weights[normalized_weights > 0]'], {}), '(normalized_weights[normalized_weights > 0])\n', (29996, 30040), True, 'import numpy as np\n'), ((2331, 2360), 'sklearn.clone', 'clone', (['featurizer'], {'safe': '(False)'}), '(featurizer, safe=False)\n', (2336, 2360), False, 'from sklearn import clone\n'), ((31960, 31976), 'numpy.nonzero', 'np.nonzero', (['t[i]'], {}), '(t[i])\n', (31970, 31976), True, 'import numpy as np\n')]
|
from typing import Any, Tuple, Union
import numpy as np
import pandas as pd
def named_aggregate_summary(series: pd.Series, key: str):
summary = {
f"max_{key}": np.max(series),
f"mean_{key}": np.mean(series),
f"median_{key}": np.median(series),
f"min_{key}": np.min(series),
}
return summary
def mad(arr, m=None):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variability of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
if m is None:
m = np.median(arr)
return np.median(np.abs(arr - m))
def numerical_summary(
series: pd.Series,
quantiles=(0.05, 0.25, 0.5, 0.75, 0.95),
count=None,
is_unique=None,
return_values=False,
) -> Union[dict, Tuple[dict, Any]]:
"""
Args:
series: series to summarize
Returns:
"""
if count is None:
count = series.count()
values = series.values
present_values = values[~np.isnan(values)]
finite_mask = np.isfinite(present_values)
finite_values = present_values[finite_mask]
summary = {
"mean": np.mean(present_values),
"std": np.std(present_values, ddof=1),
"min": np.min(present_values),
"max": np.max(present_values),
# Unbiased kurtosis obtained using Fisher's definition (kurtosis of normal == 0.0). Normalized by N-1.
"kurt": series.kurt(),
# Unbiased skew normalized by N-1
"skew": series.skew(),
"sum": np.sum(present_values),
"n_infinite": (~finite_mask).sum(),
"n_zeros": (count - np.count_nonzero(present_values)),
}
for percentile, value in series.quantile(quantiles).to_dict().items():
summary["quantile_{:d}".format(int(percentile * 100))] = value
summary["median"] = summary["quantile_50"]
summary["iqr"] = summary["quantile_75"] - summary["quantile_25"]
summary["mad"] = mad(present_values, summary["quantile_50"])
summary["variance"] = summary["std"] ** 2
summary["cv"] = summary["std"] / summary["mean"] if summary["mean"] else np.NaN
summary["range"] = summary["max"] - summary["min"]
summary["monotonic_increase"] = series.is_monotonic_increasing
summary["monotonic_decrease"] = series.is_monotonic_decreasing
summary["monotonic_increase_strict"] = (
summary["monotonic_increase"] and series.is_unique
)
summary["monotonic_decrease_strict"] = (
summary["monotonic_decrease"] and series.is_unique
)
if return_values:
return summary, finite_values
return summary
|
[
"numpy.abs",
"numpy.sum",
"numpy.count_nonzero",
"numpy.median",
"numpy.std",
"numpy.isfinite",
"numpy.isnan",
"numpy.max",
"numpy.mean",
"numpy.min"
] |
[((1053, 1080), 'numpy.isfinite', 'np.isfinite', (['present_values'], {}), '(present_values)\n', (1064, 1080), True, 'import numpy as np\n'), ((175, 189), 'numpy.max', 'np.max', (['series'], {}), '(series)\n', (181, 189), True, 'import numpy as np\n'), ((214, 229), 'numpy.mean', 'np.mean', (['series'], {}), '(series)\n', (221, 229), True, 'import numpy as np\n'), ((256, 273), 'numpy.median', 'np.median', (['series'], {}), '(series)\n', (265, 273), True, 'import numpy as np\n'), ((297, 311), 'numpy.min', 'np.min', (['series'], {}), '(series)\n', (303, 311), True, 'import numpy as np\n'), ((585, 599), 'numpy.median', 'np.median', (['arr'], {}), '(arr)\n', (594, 599), True, 'import numpy as np\n'), ((621, 636), 'numpy.abs', 'np.abs', (['(arr - m)'], {}), '(arr - m)\n', (627, 636), True, 'import numpy as np\n'), ((1162, 1185), 'numpy.mean', 'np.mean', (['present_values'], {}), '(present_values)\n', (1169, 1185), True, 'import numpy as np\n'), ((1202, 1232), 'numpy.std', 'np.std', (['present_values'], {'ddof': '(1)'}), '(present_values, ddof=1)\n', (1208, 1232), True, 'import numpy as np\n'), ((1249, 1271), 'numpy.min', 'np.min', (['present_values'], {}), '(present_values)\n', (1255, 1271), True, 'import numpy as np\n'), ((1288, 1310), 'numpy.max', 'np.max', (['present_values'], {}), '(present_values)\n', (1294, 1310), True, 'import numpy as np\n'), ((1542, 1564), 'numpy.sum', 'np.sum', (['present_values'], {}), '(present_values)\n', (1548, 1564), True, 'import numpy as np\n'), ((1017, 1033), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (1025, 1033), True, 'import numpy as np\n'), ((1638, 1670), 'numpy.count_nonzero', 'np.count_nonzero', (['present_values'], {}), '(present_values)\n', (1654, 1670), True, 'import numpy as np\n')]
|
'''
Classes for BMI decoding using the Kalman filter.
'''
import numpy as np
from scipy.io import loadmat
from . import bmi
import pickle
import re
class KalmanFilter(bmi.GaussianStateHMM):
"""
Low-level KF, agnostic to application
Model:
x_{t+1} = Ax_t + w_t; w_t ~ N(0, W)
y_t = Cx_t + q_t; q_t ~ N(0, Q)
"""
model_attrs = ['A', 'W', 'C', 'Q', 'C_xpose_Q_inv', 'C_xpose_Q_inv_C']
attrs_to_pickle = ['A', 'W', 'C', 'Q', 'C_xpose_Q_inv', 'C_xpose_Q_inv_C', 'R', 'S', 'T', 'ESS']
def __init__(self, A=None, W=None, C=None, Q=None, is_stochastic=None):
'''
Constructor for KalmanFilter
Parameters
----------
A : np.mat, optional
Model of state transition matrix
W : np.mat, optional
Model of process noise covariance
C : np.mat, optional
Model of conditional distribution between observations and hidden state
Q : np.mat, optional
Model of observation noise covariance
is_stochastic : np.array, optional
Array of booleans specifying for each state whether it is stochastic.
If 'None' specified, all states are assumed to be stochastic
Returns
-------
KalmanFilter instance
'''
if A is None and W is None and C is None and Q is None:
## This condition should only be true in the unpickling phase
pass
else:
self.A = np.mat(A)
self.W = np.mat(W)
self.C = np.mat(C)
self.Q = np.mat(Q)
if is_stochastic is None:
n_states = self.A.shape[0]
self.is_stochastic = np.ones(n_states, dtype=bool)
else:
self.is_stochastic = is_stochastic
self.state_noise = bmi.GaussianState(0.0, self.W)
self.obs_noise = bmi.GaussianState(0.0, self.Q)
self._pickle_init()
def _pickle_init(self):
"""Code common to unpickling and initialization
"""
nS = self.A.shape[0]
offset_row = np.zeros(nS)
offset_row[-1] = 1
self.include_offset = np.array_equal(np.array(self.A)[-1, :], offset_row)
self.alt = nS < self.C.shape[0] # No. of states less than no. of observations
attrs = list(self.__dict__.keys())
if not 'C_xpose_Q_inv_C' in attrs:
C, Q = self.C, self.Q
self.C_xpose_Q_inv = C.T * np.linalg.pinv(Q)
self.C_xpose_Q_inv_C = C.T * np.linalg.pinv(Q) * C
try:
self.is_stochastic
except:
n_states = self.A.shape[0]
self.is_stochastic = np.ones(n_states, dtype=bool)
def _obs_prob(self, state):
'''
Predict the observations based on the model parameters:
y_est = C*x_t + Q
Parameters
----------
state : bmi.GaussianState instance
The model-predicted state
Returns
-------
bmi.GaussianState instance
the model-predicted observations
'''
return self.C * state + self.obs_noise
def _forward_infer(self, st, obs_t, Bu=None, u=None, x_target=None, F=None, obs_is_control_independent=True, **kwargs):
'''
Estimate p(x_t | ..., y_{t-1}, y_t)
Parameters
----------
st : GaussianState
Current estimate (mean and cov) of hidden state
obs_t : np.mat of shape (N, 1)
ARG_DESCR
Bu : DATA_TYPE, optional, default=None
ARG_DESCR
u : DATA_TYPE, optional, default=None
ARG_DESCR
x_target : DATA_TYPE, optional, default=None
ARG_DESCR
obs_is_control_independent : bool, optional, default=True
ARG_DESCR
kwargs : optional kwargs
ARG_DESCR
Returns
-------
GaussianState
New state estimate incorporating the most recent observation
'''
using_control_input = (Bu is not None) or (u is not None) or (x_target is not None)
pred_state = self._ssm_pred(st, target_state=x_target, Bu=Bu, u=u, F=F)
C, Q = self.C, self.Q
P = pred_state.cov
K = self._calc_kalman_gain(P)
I = np.mat(np.eye(self.C.shape[1]))
D = self.C_xpose_Q_inv_C
KC = P*(I - D*P*(I + D*P).I)*D
F = (I - KC)*self.A
post_state = pred_state
if obs_is_control_independent and using_control_input:
post_state.mean += -KC*self.A*st.mean + K*obs_t
else:
post_state.mean += -KC*pred_state.mean + K*obs_t
post_state.cov = (I - KC) * P
return post_state
def set_state_cov(self, n_steps):
C, Q = self.C, self.Q
A, W = self.A, self.W
P = self.state.cov
for k in range(n_steps):
P = A*P*A.T + W
K = self._calc_kalman_gain(P)
I = np.mat(np.eye(self.C.shape[1]))
D = self.C_xpose_Q_inv_C
KC = P*(I - D*P*(I + D*P).I)*D
P = (I - KC) * P
return P
def _calc_kalman_gain(self, P):
'''
Calculate Kalman gain using the 'alternate' definition
Parameters
----------
P : np.matrix
Prediciton covariance matrix, i.e., cov(x_{t+1} | y_1, \cdots, y_t)
Returns
-------
K : np.matrix
Kalman gain matrix for the input next state prediciton covariance.
'''
nX = P.shape[0]
I = np.mat(np.eye(nX))
D = self.C_xpose_Q_inv_C
L = self.C_xpose_Q_inv
K = P * (I - D*P*(I + D*P).I) * L
return K
def get_sskf(self, tol=1e-15, return_P=False, dtype=np.array, max_iter=4000,
verbose=False, return_Khist=False, alt=True):
"""Calculate the steady-state KF matrices
value of P returned is the posterior error cov, i.e. P_{t|t}
Parameters
----------
Returns
-------
"""
A, W, C, Q = np.mat(self.A), np.mat(self.W), np.mat(self.C), np.mat(self.Q)
nS = A.shape[0]
P = np.mat(np.zeros([nS, nS]))
I = np.mat(np.eye(nS))
D = self.C_xpose_Q_inv_C
last_K = np.mat(np.ones(C.T.shape))*np.inf
K = np.mat(np.ones(C.T.shape))*0
K_hist = []
iter_idx = 0
last_P = None
while np.linalg.norm(K-last_K) > tol and iter_idx < max_iter:
P = A*P*A.T + W
last_K = K
K = self._calc_kalman_gain(P)
K_hist.append(K)
KC = P*(I - D*P*(I + D*P).I)*D
last_P = P
P -= KC*P;
iter_idx += 1
if verbose:
print(("Converged in %d iterations--error: %g" % (iter_idx, np.linalg.norm(K-last_K))))
n_state_vars, n_state_vars = A.shape
F = (np.mat(np.eye(n_state_vars, n_state_vars)) - KC) * A
if return_P and return_Khist:
return dtype(F), dtype(K), dtype(last_P), K_hist
elif return_P:
return dtype(F), dtype(K), dtype(last_P)
elif return_Khist:
return dtype(F), dtype(K), K_hist
else:
return dtype(F), dtype(K)
def get_kalman_gain_seq(self, N=1000, tol=1e-10, verbose=False):
'''
Calculate K_t for times {0, 1, ..., N}
Parameters
----------
N : int, optional
Number of steps to calculate Kalman gain for, default = 1000
tol : float, optional
Tolerance on K matrix convergence, default = 1e-10
verbose : bool, optional
Print intermediate/debugging information if true, default=False
Returns
-------
list
[K_0, K_1, ..., K_{N-1}]
'''
A, W, H, Q = np.mat(self.kf.A), np.mat(self.kf.W), np.mat(self.kf.H), np.mat(self.kf.Q)
P = np.mat( np.zeros(A.shape) )
K = [None]*N
ss_idx = None # index at which K is steady-state (within tol)
for n in range(N):
if not ss_idx == None and n > ss_idx:
K[n] = K[ss_idx]
else:
P = A*P*A.T + W
K[n] = (P*H.T)*linalg.pinv(H*P*H.T + Q);
P -= K[n]*H*P;
if n > 0 and np.linalg.norm(K[n] - K[n-1]) < tol:
ss_idx = n
if verbose:
print(("breaking after %d iterations" % n))
return K, ss_idx
def get_kf_system_mats(self, T):
"""
KF system matrices
x_{t+1} = F_t*x_t + K_t*y_t
Parameters
----------
T : int
Number of system iterations to calculate (F_t, K_t)
Returns
-------
tuple of lists
Each element of the tuple is (F_t, K_t) for a given 't'
"""
F = [None]*T
K, ss_idx = self.get_kalman_gain_seq(N=T, verbose=False)
nX = self.kf.A.shape[0]
I = np.mat(np.eye(nX))
for t in range(T):
if t > ss_idx: F[t] = F[ss_idx]
else: F[t] = (I - K[t]*self.kf.H)*self.kf.A
return F, K
@classmethod
def MLE_obs_model(self, hidden_state, obs, include_offset=True, drives_obs=None,
regularizer=None):
"""
Unconstrained ML estimator of {C, Q} given observations and
the corresponding hidden states
Parameters
----------
include_offset : bool, optional, default=True
A row of all 1's is added as the last row of hidden_state if one is not already present
Returns
-------
"""
assert hidden_state.shape[1] == obs.shape[1], "different numbers of time samples: %s vs %s" % (str(hidden_state.shape), str(obs.shape))
if isinstance(hidden_state, np.ma.core.MaskedArray):
mask = ~hidden_state.mask[0,:] # NOTE THE INVERTER
inds = np.nonzero([ mask[k]*mask[k+1] for k in range(len(mask)-1)])[0]
X = np.mat(hidden_state[:,mask])
T = len(np.nonzero(mask)[0])
Y = np.mat(obs[:,mask])
if include_offset:
if not np.all(X[-1,:] == 1):
X = np.vstack([ X, np.ones([1,T]) ])
else:
num_hidden_state, T = hidden_state.shape
X = np.mat(hidden_state)
if include_offset:
if not np.all(X[-1,:] == 1):
X = np.vstack([ X, np.ones([1,T]) ])
Y = np.mat(obs)
n_states = X.shape[0]
if not drives_obs is None:
X = X[drives_obs, :]
# ML estimate of C and Q
if regularizer is None:
C = np.mat(np.linalg.lstsq(X.T, Y.T)[0].T)
else:
x = X.T
y = Y.T
XtX_lamb = x.T.dot(x) + regularizer * np.eye(x.shape[1])
XtY = x.T.dot(y)
C = np.linalg.solve(XtX_lamb, XtY).T
Q = np.cov(Y - C*X, bias=1)
if np.ndim(Q) == 0:
# if "obs" only has 1 feature, Q might get collapsed to a scalar
Q = np.mat(Q.reshape(1,1))
if not drives_obs is None:
n_obs = C.shape[0]
C_tmp = np.zeros([n_obs, n_states])
C_tmp[:,drives_obs] = C
C = C_tmp
return (C, Q)
@classmethod
def MLE_state_space_model(self, hidden_state, include_offset=True):
'''
Train state space model for KF from fully observed hidden state
Parameters
----------
hidden_state : np.ndarray of shape (N, T)
N = dimensionality of state vector, T = number of observations
include_offset : boolean, optional, default=False
if True, append a "1" to each state vector to add an offset term into the
regression
Returns
-------
A : np.ndarray of shape (N, N)
W : np.ndarray of shape (N, N)
'''
X = hidden_state
T = hidden_state.shape[1]
if include_offset:
X = np.vstack([ X, np.ones([1,T]) ])
X1 = X[:,:-1]
X2 = X[:,1:]
A = np.linalg.lstsq(X1.T, X2.T)[0].T
W = np.cov(X2 - np.dot(A, X1), bias=1)
return A, W
def set_steady_state_pred_cov(self):
'''
Calculate the steady-state prediction covariance and set the current state prediction covariance to the steady-state value
'''
A, W, C, Q = np.mat(self.A), np.mat(self.W), np.mat(self.C), np.mat(self.Q)
D = self.C_xpose_Q_inv_C
nS = A.shape[0]
P = np.mat(np.zeros([nS, nS]))
I = np.mat(np.eye(nS))
last_K = np.mat(np.ones(C.T.shape))*np.inf
K = np.mat(np.ones(C.T.shape))*0
iter_idx = 0
for iter_idx in range(40):
P = A*P*A.T + W
last_K = K
KC = P*(I - D*P*(I + D*P).I)*D
P -= KC*P;
# TODO fix
P[0:3, 0:3] = 0
F, K = self.get_sskf()
F = (I - KC)*A
self._init_state(init_state=self.state.mean, init_cov=P)
def get_K_null(self):
'''
$$y_{null} = K_{null} * y_t$$ gives the "null" component of the spike inputs, i.e. $$K_t*y_{null} = 0_{N\times 1}$$
Parameters
----------
Returns
-------
'''
F, K = self.get_sskf()
K = np.mat(K)
n_neurons = K.shape[1]
K_null = np.eye(n_neurons) - np.linalg.pinv(K) * K
return K_null
class KalmanFilterDriftCorrection(KalmanFilter):
attrs_to_pickle = ['A', 'W', 'C', 'Q', 'C_xpose_Q_inv',
'C_xpose_Q_inv_C', 'R', 'S', 'T', 'ESS', 'drift_corr','prev_drift_corr']
noise_threshold = 96.*3.5
def _init_state(self):
if hasattr(self, 'prev_drift_corr'):
self.drift_corr = self.prev_drift_corr.copy()
print(('prev drift corr', np.mean(self.prev_drift_corr)))
else:
self.drift_corr = np.mat(np.zeros(( self.A.shape[0], 1)))
self.prev_drift_corr = np.mat(np.zeros(( self.A.shape[0], 1)))
if hasattr(self, 'noise_rej'):
if self.noise_rej:
print(('noise rej thresh: ', self.noise_rej_cutoff))
else:
self.noise_rej = False
self.noise_cnt = 0
super(KalmanFilterDriftCorrection, self)._init_state()
def _forward_infer(self, st, obs_t, Bu=None, u=None, x_target=None, F=None, obs_is_control_independent=True, **kwargs):
if self.noise_rej:
if np.sum(obs_t) > self.noise_rej_cutoff:
#print np.sum(obs_t), 'rejecting noise!'
self.noise_cnt += 1
obs_t = np.mat(self.noise_rej_mFR).T
state = super(KalmanFilterDriftCorrection, self)._forward_infer(st, obs_t, Bu=None, u=None, x_target=None, F=None,
obs_is_control_independent=True, **kwargs)
### Apply Drift Correction ###
decoded_vel = state.mean.copy()
state.mean[self.vel_ix] = decoded_vel[self.vel_ix] - self.drift_corr[self.vel_ix]
### Update Drift Correcton ###
self.drift_corr[self.vel_ix] = self.drift_corr[self.vel_ix]*self.drift_rho + decoded_vel[self.vel_ix]*float(1. - self.drift_rho)
self.prev_drift_corr = self.drift_corr.copy()
return state
class PCAKalmanFilter(KalmanFilter):
'''
A modified KalmanFilter where the Kalman gain is confined to produce outputs in a lower-dimensional linear subspace, i.e. some principal component space
'''
def _forward_infer(self, st, obs_t, Bu=None, u=None, target_state=None, obs_is_control_independent=True, **kwargs):
'''
See KalmanFilter._forward_infer for docs
'''
using_control_input = (Bu is not None) or (u is not None) or (target_state is not None)
pred_state = self._ssm_pred(st, target_state=target_state, Bu=Bu, u=u)
C, Q = self.C, self.Q
P = pred_state.cov
try:
M = self.M
pca_offset = self.pca_offset
except:
print("couldn't extract PCA parameters!")
M = 1
pca_offset = 0
K = self._calc_kalman_gain(P)
I = np.mat(np.eye(self.C.shape[1]))
D = self.C_xpose_Q_inv_C
KC = K*C
F = (I - KC)*self.A
post_state = pred_state
if obs_is_control_independent and using_control_input:
post_state.mean += -KC*self.A*st.mean + M*K*obs_t + pca_offset
else:
post_state.mean += -KC*pred_state.mean + M*K*obs_t + pca_offset
post_state.cov = (I - KC) * P
return post_state
def __getstate__(self):
'''
See KalmanFilter.__getstate__ for docs
'''
data = super(PCAKalmanFilter, self).__getstate__()
data['M'] = self.M
data['pca_offset'] = self.pca_offset
return data
def __setstate__(self, state):
'''
See KalmanFilter.__setstate__ for docs
'''
super(PCAKalmanFilter, self).__setstate__(state)
self.M = state['M']
self.pca_offset = state['pca_offset']
class FAKalmanFilter(KalmanFilter):
def _forward_infer(self, st, obs_t, Bu=None, u=None, target_state=None, obs_is_control_independent=True, **kwargs):
input_dict = {}
if hasattr(self, 'FA_kwargs'):
input_type = self.FA_input + '_input'
input_dict['all_input'] = obs_t.copy()
dmn = obs_t - self.FA_kwargs['fa_mu']
shar = (self.FA_kwargs['fa_sharL'] * dmn)
priv = (dmn - shar)
main_shar = (self.FA_kwargs['fa_main_shared'] * dmn)
main_priv = (dmn - main_shar)
FA = self.FA_kwargs['FA_model']
inp = obs_t.copy()
if inp.shape[1] == 1:
inp = inp.T # want 1 x neurons
z = FA.transform(dmn.T)
z = z.T #Transform to fact x 1
z = z[:self.FA_kwargs['fa_main_shar_n_dim'], :] #only use number in main space
input_dict['private_input'] = priv + self.FA_kwargs['fa_mu']
input_dict['shared_input'] = shar + self.FA_kwargs['fa_mu']
input_dict['private_scaled_input'] = np.multiply(priv, self.FA_kwargs['fa_priv_var_sc']) + self.FA_kwargs['fa_mu']
input_dict['shared_scaled_input'] = np.multiply(shar, self.FA_kwargs['fa_shar_var_sc']) + self.FA_kwargs['fa_mu']
input_dict['all_scaled_by_shar_input'] = np.multiply(dmn, self.FA_kwargs['fa_shar_var_sc']) + self.FA_kwargs['fa_mu']
input_dict['sc_shared+unsc_priv_input'] = input_dict['shared_scaled_input'] + input_dict['private_input'] - self.FA_kwargs['fa_mu']
input_dict['sc_shared+sc_priv_input'] = input_dict['shared_scaled_input'] + input_dict['private_scaled_input']- self.FA_kwargs['fa_mu']
input_dict['main_shared_input'] = main_shar + self.FA_kwargs['fa_mu']
input_dict['main_sc_shared_input'] = np.multiply(main_shar, self.FA_kwargs['fa_main_shared_sc']) + self.FA_kwargs['fa_mu']
input_dict['main_sc_shar+unsc_priv_input'] = input_dict['main_sc_shared_input'] + input_dict['private_input'] - self.FA_kwargs['fa_mu']
input_dict['main_sc_shar+sc_priv_input'] = input_dict['main_sc_shared_input'] + input_dict['private_scaled_input'] - self.FA_kwargs['fa_mu']
input_dict['main_sc_private_input'] = np.multiply(main_priv, self.FA_kwargs['fa_main_private_sc']) + self.FA_kwargs['fa_mu']
#z = self.FA_kwargs['u_svd'].T*self.FA_kwargs['uut_psi_inv']*dmn
input_dict['split_input'] = np.vstack((z, main_priv))
#print input_dict['split_input'].shape
own_pc_trans = np.mat(self.FA_kwargs['own_pc_trans'])*np.mat(dmn)
input_dict['pca_input'] = own_pc_trans + self.FA_kwargs['fa_mu']
if input_type in list(input_dict.keys()):
#print input_type
obs_t_mod = input_dict[input_type]
else:
print(input_type)
raise Exception("Error in FA_KF input_type, none of the expected inputs")
else:
obs_t_mod = obs_t.copy()
input_dict['task_input'] = obs_t_mod.copy()
post_state = super(FAKalmanFilter, self)._forward_infer(st, obs_t_mod, Bu=Bu, u=u, target_state=target_state,
obs_is_control_independent=obs_is_control_independent, **kwargs)
self.FA_input_dict = input_dict
return post_state
class KFDecoder(bmi.BMI, bmi.Decoder):
'''
Wrapper for KalmanFilter specifically for the application of BMI decoding.
'''
def __init__(self, *args, **kwargs):
'''
Constructor for KFDecoder
Parameters
----------
*args, **kwargs : see riglib.bmi.bmi.Decoder for arguments
Returns
-------
KFDecoder instance
'''
super(KFDecoder, self).__init__(*args, **kwargs)
mFR = kwargs.pop('mFR', 0.)
sdFR = kwargs.pop('sdFR', 1.)
self.mFR = mFR
self.sdFR = sdFR
self.zeromeanunits = None
self.zscore = False
self.kf = self.filt
def _pickle_init(self):
super(KFDecoder, self)._pickle_init()
if not hasattr(self.filt, 'B'):
self.filt.B = np.mat(np.vstack([np.zeros([3,3]), np.eye(3)*1000*self.binlen, np.zeros(3)]))
if not hasattr(self.filt, 'F'):
self.filt.F = np.mat(np.zeros([3,7]))
def init_zscore(self, mFR_curr, sdFR_curr):
'''
Initialize parameters for zcoring observations, if that feature is enabled in the decoder object
Parameters
----------
mFR_curr : np.array of shape (N,)
Current mean estimates (as opposed to potentially old estimates already stored in the decoder)
sdFR_curr : np.array of shape (N,)
Current standard deviation estimates (as opposed to potentially old estimates already stored in the decoder)
Returns
-------
None
'''
# if interfacing with Kinarm system, may mean and sd will be shape (n, 1)
self.zeromeanunits, = np.nonzero(mFR_curr == 0) #find any units with a mean FR of zero for this session
sdFR_curr[self.zeromeanunits] = np.nan # set mean and SD of quiet units to nan to avoid divide by 0 error
mFR_curr[self.zeromeanunits] = np.nan
#self.sdFR_ratio = self.sdFR/sdFR_curr
#self.mFR_diff = mFR_curr-self.mFR
#self.mFR_curr = mFR_curr
self.mFR = mFR_curr
self.sdFR = sdFR_curr
self.zscore = True
def update_params(self, new_params, steady_state=True):
'''
Update the decoder parameters if new parameters are available (e.g., by CLDA). See Decoder.update_params
'''
super(KFDecoder, self).update_params(new_params)
# set the KF to the new steady state
if steady_state:
self.kf.set_steady_state_pred_cov()
def __setstate__(self, state):
"""
Set decoder state after un-pickling. See Decoder.__setstate__, which runs the _pickle_init function at some point during the un-pickling process
Parameters
----------
state : dict
Variables to set as attributes of the unpickled object.
Returns
-------
None
"""
if 'kf' in state and 'filt' not in state:
state['filt'] = state['kf']
super(KFDecoder, self).__setstate__(state)
def plot_K(self, **kwargs):
'''
Plot the Kalman gain weights
Parameters
----------
**kwargs : optional kwargs
These are passed to the plot function (e.g., which rows to plot)
Returns
-------
None
'''
F, K = self.kf.get_sskf()
self.plot_pds(K.T, **kwargs)
def shuffle(self, shuffle_baselines=False):
'''
Shuffle the neural model
Parameters
----------
shuffle_baselines : bool, optional, default = False
If true, shuffle the estimates of the baseline firing rates in addition to the state-dependent neural tuning parameters.
Returns
-------
None (shuffling is done on the current decoder object)
'''
# generate random permutation
import random
inds = list(range(self.filt.C.shape[0]))
random.shuffle(inds)
# shuffle rows of C, and rows+cols of Q
C_orig = self.filt.C.copy()
self.filt.C = self.filt.C[inds, :]
if not shuffle_baselines:
self.filt.C[:,-1] = C_orig[:,-1]
self.filt.Q = self.filt.Q[inds, :]
self.filt.Q = self.filt.Q[:, inds]
self.filt.C_xpose_Q_inv = self.filt.C.T * np.linalg.pinv(self.filt.Q.I)
# RML sufficient statistics (S and T, but not R and ESS)
# shuffle rows of S, and rows+cols of T
try:
self.filt.S = self.filt.S[inds, :]
self.filt.T = self.filt.T[inds, :]
self.filt.T = self.filt.T[:, inds]
except AttributeError:
# if this decoder never had the RML sufficient statistics
# (R, S, T, and ESS) as attributes of self.filt
pass
def change_binlen(self, new_binlen, screen_update_rate=60.0):
'''
Function to change the binlen of the KFDecoder analytically.
Parameters
----------
new_binlen : float
New bin length of the decoder, in seconds
screen_update_rate: float, optional, default = 60Hz
Rate at which the __call__ function will be called
'''
bin_gain = new_binlen / self.binlen
self.binlen = new_binlen
# Alter bminum, bmicount, # of subbins
screen_update_period = 1./screen_update_rate
if self.binlen < screen_update_period:
self.n_subbins = int(screen_update_period / self.binlen)
self.bmicount = 0
if hasattr(self, 'bminum'):
del self.bminum
else:
self.n_subbins = 1
self.bminum = int(self.binlen / screen_update_period)
self.bmicount = 0
# change C matrix
self.filt.C *= bin_gain
self.filt.Q *= bin_gain**2
self.filt.C_xpose_Q_inv *= 1./bin_gain
# change state space Model
# TODO generalize this beyond endpoint
from . import state_space_models
A, W = self.ssm.get_ssm_matrices(update_rate=new_binlen)
self.filt.A = A
self.filt.W = W
def conv_to_steady_state(self):
'''
Create an SSKFDecoder object based on KalmanFilter parameters in this KFDecoder object
'''
from . import sskfdecoder
self.filt = sskfdecoder.SteadyStateKalmanFilter(A=self.filt.A, W=self.filt.W, C=self.filt.C, Q=self.filt.Q)
def subselect_units(self, units):
'''
Prune units from the KFDecoder, e.g., due to loss of recordings for a particular cell
Parameters
units : string or np.ndarray of shape (N,2)
The units which should be KEPT in the decoder
Returns
-------
KFDecoder
New KFDecoder object using only a subset of the cells of the original KFDecoder
'''
# Parse units into list of indices to keep
inds_to_keep = self._proc_units(units, 'keep')
dec_new = self._return_proc_units_decoder(inds_to_keep)
return dec_new
#self._save_new_dec(dec_new, '_subset')
def project_Q(C_v, Q_hat):
"""
Deprecated! See clda.KFRML_IVC
"""
print("projecting!")
from scipy.optimize import fmin_bfgs, fmin_ncg
C_v = np.mat(C_v)
Q_hat = np.mat(Q_hat)
Q_hat_inv = Q_hat.I
c_1 = C_v[:,0]
c_2 = C_v[:,1]
A_1 = c_1*c_1.T - c_2*c_2.T
A_2 = c_2*c_1.T
A_3 = c_1*c_2.T
A = [A_1, A_2, A_3]
if 1:
U = np.hstack([c_1 - c_2, c_2, c_1])
V = np.vstack([(c_1 + c_2).T, c_1.T, c_2.T])
C_inv_fn = lambda nu: np.mat(np.diag([1./nu[0], 1./(nu[0] + nu[1]), 1./(nu[2] - nu[0]) ]))
C_fn = lambda nu: np.mat(np.diag([nu[0], (nu[0] + nu[1]), (nu[2] - nu[0]) ]))
nu_0 = np.zeros(3)
c_scalars = np.ones(3)
else:
u_1, s_1, v_1 = np.linalg.svd(A_1)
c_scalars = np.hstack([s_1[0:2], 1, 1])
U = np.hstack([u_1[:,0:2], c_2, c_1])
V = np.vstack([v_1[0:2, :], c_1.T, c_2.T])
C_fn = lambda nu: np.mat(np.diag(nu * c_scalars))
nu_0 = np.zeros(4)
def cost_fn_gen(nu, return_type='cost'):
C = C_fn(nu)
S_star_inv = Q_hat + U*C_fn(nu)*V
#if return_type == 'cost':
# print C_v.T * S_star_inv * C_v
if np.any(np.diag(C) == 0):
S_star = S_star_inv.I
else:
C_inv = C.I
S_star = Q_hat_inv - Q_hat_inv * U * (C_inv + V*Q_hat_inv*U).I*V * Q_hat_inv;
# log-determinant using LU decomposition, required if Q is large, i.e. lots of simultaneous observations
cost = -np.log(np.linalg.det(S_star_inv))
#cost = -np.prod(np.linalg.slogdet(S_star_inv))
# TODO gradient dimension needs to be the same as nu
#grad = -np.array([np.trace(S_star*U[:,0] * c_scalars[0] * V[0,:]) for k in range(len(nu))])
#grad = -1e-4*np.array([np.trace(S_star*A[0]), np.trace(S_star*A[1]), np.trace(S_star*A[2])])
#print c_2.T*S_star*c_2
grad = -1e-4*np.array(np.hstack([c_1.T*S_star*c_1 - c_2.T*S_star*c_2, c_1.T*S_star*c_2, c_2.T*S_star*c_1])).ravel()
S = S_star
hess = np.mat([[np.trace(S*A_1*S*A_1), np.trace(S*A_2*S*A_1), np.trace(S*A_3*S*A_1)],
[np.trace(S*A_1*S*A_2), np.trace(S*A_2*S*A_2), np.trace(S*A_3*S*A_2)],
[np.trace(S*A_1*S*A_3), np.trace(S*A_2*S*A_3), np.trace(S*A_3*S*A_3)]])
#grad = hess*np.mat(grad.reshape(-1,1))
#log = logging.getLogger()
#print "nu = %s, cost = %g, grad=%s" % (nu, cost, grad)
#log.warning("nu = %s, cost = %g, grad=%s" % (nu, cost, grad))
if return_type == 'cost':
return cost
elif return_type == 'grad':
return grad
elif return_type == 'hess':
return hess
elif return_type == 'opt_val':
return S_star
else:
raise ValueError("Cost function doesn't know how to return this: %s" % return_type)
cost_fn = lambda nu: cost_fn_gen(nu, return_type = 'cost')
grad = lambda nu: cost_fn_gen(nu, return_type = 'grad')
hess = lambda nu: cost_fn_gen(nu, return_type = 'hess')
arg_opt = lambda nu: cost_fn_gen(nu, return_type = 'opt_val')
# Call optimization routine
#v_star = fmin_ncg(cost_fn, nu_0, fprime=grad, fhess=hess, maxiter=10000)
#print v_star
#v_star = fmin_bfgs(cost_fn, nu_0, maxiter=10000, gtol=1e-15)
v_star = fmin_bfgs(cost_fn, nu_0, fprime=grad, maxiter=10000, gtol=1e-15)
print(v_star)
Q_inv = arg_opt(v_star)
Q = Q_inv.I
Q = Q_hat + U * C_fn(v_star) * V
# TODO print out (log) a more useful measure of success
#print C_v.T * Q_inv * C_v
#print C_v.T * Q.I * C_v
#print v_star
return Q
|
[
"numpy.trace",
"numpy.sum",
"random.shuffle",
"numpy.ones",
"numpy.linalg.svd",
"numpy.linalg.norm",
"numpy.mean",
"numpy.diag",
"numpy.linalg.pinv",
"numpy.linalg.solve",
"numpy.mat",
"numpy.multiply",
"numpy.ndim",
"numpy.linalg.det",
"numpy.cov",
"numpy.hstack",
"scipy.optimize.fmin_bfgs",
"numpy.dot",
"numpy.vstack",
"numpy.all",
"numpy.linalg.lstsq",
"numpy.zeros",
"numpy.nonzero",
"numpy.array",
"numpy.eye"
] |
[((28104, 28115), 'numpy.mat', 'np.mat', (['C_v'], {}), '(C_v)\n', (28110, 28115), True, 'import numpy as np\n'), ((28128, 28141), 'numpy.mat', 'np.mat', (['Q_hat'], {}), '(Q_hat)\n', (28134, 28141), True, 'import numpy as np\n'), ((31330, 31394), 'scipy.optimize.fmin_bfgs', 'fmin_bfgs', (['cost_fn', 'nu_0'], {'fprime': 'grad', 'maxiter': '(10000)', 'gtol': '(1e-15)'}), '(cost_fn, nu_0, fprime=grad, maxiter=10000, gtol=1e-15)\n', (31339, 31394), False, 'from scipy.optimize import fmin_bfgs, fmin_ncg\n'), ((2140, 2152), 'numpy.zeros', 'np.zeros', (['nS'], {}), '(nS)\n', (2148, 2152), True, 'import numpy as np\n'), ((11134, 11159), 'numpy.cov', 'np.cov', (['(Y - C * X)'], {'bias': '(1)'}), '(Y - C * X, bias=1)\n', (11140, 11159), True, 'import numpy as np\n'), ((13575, 13584), 'numpy.mat', 'np.mat', (['K'], {}), '(K)\n', (13581, 13584), True, 'import numpy as np\n'), ((22470, 22495), 'numpy.nonzero', 'np.nonzero', (['(mFR_curr == 0)'], {}), '(mFR_curr == 0)\n', (22480, 22495), True, 'import numpy as np\n'), ((24795, 24815), 'random.shuffle', 'random.shuffle', (['inds'], {}), '(inds)\n', (24809, 24815), False, 'import random\n'), ((28323, 28355), 'numpy.hstack', 'np.hstack', (['[c_1 - c_2, c_2, c_1]'], {}), '([c_1 - c_2, c_2, c_1])\n', (28332, 28355), True, 'import numpy as np\n'), ((28368, 28408), 'numpy.vstack', 'np.vstack', (['[(c_1 + c_2).T, c_1.T, c_2.T]'], {}), '([(c_1 + c_2).T, c_1.T, c_2.T])\n', (28377, 28408), True, 'import numpy as np\n'), ((28609, 28620), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (28617, 28620), True, 'import numpy as np\n'), ((28641, 28651), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (28648, 28651), True, 'import numpy as np\n'), ((28686, 28704), 'numpy.linalg.svd', 'np.linalg.svd', (['A_1'], {}), '(A_1)\n', (28699, 28704), True, 'import numpy as np\n'), ((28725, 28752), 'numpy.hstack', 'np.hstack', (['[s_1[0:2], 1, 1]'], {}), '([s_1[0:2], 1, 1])\n', (28734, 28752), True, 'import numpy as np\n'), ((28765, 28799), 'numpy.hstack', 'np.hstack', (['[u_1[:, 0:2], c_2, c_1]'], {}), '([u_1[:, 0:2], c_2, c_1])\n', (28774, 28799), True, 'import numpy as np\n'), ((28811, 28849), 'numpy.vstack', 'np.vstack', (['[v_1[0:2, :], c_1.T, c_2.T]'], {}), '([v_1[0:2, :], c_1.T, c_2.T])\n', (28820, 28849), True, 'import numpy as np\n'), ((28923, 28934), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (28931, 28934), True, 'import numpy as np\n'), ((1505, 1514), 'numpy.mat', 'np.mat', (['A'], {}), '(A)\n', (1511, 1514), True, 'import numpy as np\n'), ((1536, 1545), 'numpy.mat', 'np.mat', (['W'], {}), '(W)\n', (1542, 1545), True, 'import numpy as np\n'), ((1567, 1576), 'numpy.mat', 'np.mat', (['C'], {}), '(C)\n', (1573, 1576), True, 'import numpy as np\n'), ((1598, 1607), 'numpy.mat', 'np.mat', (['Q'], {}), '(Q)\n', (1604, 1607), True, 'import numpy as np\n'), ((4341, 4364), 'numpy.eye', 'np.eye', (['self.C.shape[1]'], {}), '(self.C.shape[1])\n', (4347, 4364), True, 'import numpy as np\n'), ((5634, 5644), 'numpy.eye', 'np.eye', (['nX'], {}), '(nX)\n', (5640, 5644), True, 'import numpy as np\n'), ((6139, 6153), 'numpy.mat', 'np.mat', (['self.A'], {}), '(self.A)\n', (6145, 6153), True, 'import numpy as np\n'), ((6155, 6169), 'numpy.mat', 'np.mat', (['self.W'], {}), '(self.W)\n', (6161, 6169), True, 'import numpy as np\n'), ((6171, 6185), 'numpy.mat', 'np.mat', (['self.C'], {}), '(self.C)\n', (6177, 6185), True, 'import numpy as np\n'), ((6187, 6201), 'numpy.mat', 'np.mat', (['self.Q'], {}), '(self.Q)\n', (6193, 6201), True, 'import numpy as np\n'), ((6246, 6264), 'numpy.zeros', 'np.zeros', (['[nS, nS]'], {}), '([nS, nS])\n', (6254, 6264), True, 'import numpy as np\n'), ((6285, 6295), 'numpy.eye', 'np.eye', (['nS'], {}), '(nS)\n', (6291, 6295), True, 'import numpy as np\n'), ((7927, 7944), 'numpy.mat', 'np.mat', (['self.kf.A'], {}), '(self.kf.A)\n', (7933, 7944), True, 'import numpy as np\n'), ((7946, 7963), 'numpy.mat', 'np.mat', (['self.kf.W'], {}), '(self.kf.W)\n', (7952, 7963), True, 'import numpy as np\n'), ((7965, 7982), 'numpy.mat', 'np.mat', (['self.kf.H'], {}), '(self.kf.H)\n', (7971, 7982), True, 'import numpy as np\n'), ((7984, 8001), 'numpy.mat', 'np.mat', (['self.kf.Q'], {}), '(self.kf.Q)\n', (7990, 8001), True, 'import numpy as np\n'), ((8022, 8039), 'numpy.zeros', 'np.zeros', (['A.shape'], {}), '(A.shape)\n', (8030, 8039), True, 'import numpy as np\n'), ((9125, 9135), 'numpy.eye', 'np.eye', (['nX'], {}), '(nX)\n', (9131, 9135), True, 'import numpy as np\n'), ((10176, 10205), 'numpy.mat', 'np.mat', (['hidden_state[:, mask]'], {}), '(hidden_state[:, mask])\n', (10182, 10205), True, 'import numpy as np\n'), ((10267, 10287), 'numpy.mat', 'np.mat', (['obs[:, mask]'], {}), '(obs[:, mask])\n', (10273, 10287), True, 'import numpy as np\n'), ((10503, 10523), 'numpy.mat', 'np.mat', (['hidden_state'], {}), '(hidden_state)\n', (10509, 10523), True, 'import numpy as np\n'), ((10673, 10684), 'numpy.mat', 'np.mat', (['obs'], {}), '(obs)\n', (10679, 10684), True, 'import numpy as np\n'), ((11170, 11180), 'numpy.ndim', 'np.ndim', (['Q'], {}), '(Q)\n', (11177, 11180), True, 'import numpy as np\n'), ((11390, 11417), 'numpy.zeros', 'np.zeros', (['[n_obs, n_states]'], {}), '([n_obs, n_states])\n', (11398, 11417), True, 'import numpy as np\n'), ((12657, 12671), 'numpy.mat', 'np.mat', (['self.A'], {}), '(self.A)\n', (12663, 12671), True, 'import numpy as np\n'), ((12673, 12687), 'numpy.mat', 'np.mat', (['self.W'], {}), '(self.W)\n', (12679, 12687), True, 'import numpy as np\n'), ((12689, 12703), 'numpy.mat', 'np.mat', (['self.C'], {}), '(self.C)\n', (12695, 12703), True, 'import numpy as np\n'), ((12705, 12719), 'numpy.mat', 'np.mat', (['self.Q'], {}), '(self.Q)\n', (12711, 12719), True, 'import numpy as np\n'), ((12797, 12815), 'numpy.zeros', 'np.zeros', (['[nS, nS]'], {}), '([nS, nS])\n', (12805, 12815), True, 'import numpy as np\n'), ((12836, 12846), 'numpy.eye', 'np.eye', (['nS'], {}), '(nS)\n', (12842, 12846), True, 'import numpy as np\n'), ((13633, 13650), 'numpy.eye', 'np.eye', (['n_neurons'], {}), '(n_neurons)\n', (13639, 13650), True, 'import numpy as np\n'), ((16436, 16459), 'numpy.eye', 'np.eye', (['self.C.shape[1]'], {}), '(self.C.shape[1])\n', (16442, 16459), True, 'import numpy as np\n'), ((19868, 19893), 'numpy.vstack', 'np.vstack', (['(z, main_priv)'], {}), '((z, main_priv))\n', (19877, 19893), True, 'import numpy as np\n'), ((25160, 25189), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.filt.Q.I'], {}), '(self.filt.Q.I)\n', (25174, 25189), True, 'import numpy as np\n'), ((1727, 1756), 'numpy.ones', 'np.ones', (['n_states'], {'dtype': 'bool'}), '(n_states, dtype=bool)\n', (1734, 1756), True, 'import numpy as np\n'), ((2225, 2241), 'numpy.array', 'np.array', (['self.A'], {}), '(self.A)\n', (2233, 2241), True, 'import numpy as np\n'), ((2509, 2526), 'numpy.linalg.pinv', 'np.linalg.pinv', (['Q'], {}), '(Q)\n', (2523, 2526), True, 'import numpy as np\n'), ((2723, 2752), 'numpy.ones', 'np.ones', (['n_states'], {'dtype': 'bool'}), '(n_states, dtype=bool)\n', (2730, 2752), True, 'import numpy as np\n'), ((5031, 5054), 'numpy.eye', 'np.eye', (['self.C.shape[1]'], {}), '(self.C.shape[1])\n', (5037, 5054), True, 'import numpy as np\n'), ((6357, 6375), 'numpy.ones', 'np.ones', (['C.T.shape'], {}), '(C.T.shape)\n', (6364, 6375), True, 'import numpy as np\n'), ((6403, 6421), 'numpy.ones', 'np.ones', (['C.T.shape'], {}), '(C.T.shape)\n', (6410, 6421), True, 'import numpy as np\n'), ((6504, 6530), 'numpy.linalg.norm', 'np.linalg.norm', (['(K - last_K)'], {}), '(K - last_K)\n', (6518, 6530), True, 'import numpy as np\n'), ((11089, 11119), 'numpy.linalg.solve', 'np.linalg.solve', (['XtX_lamb', 'XtY'], {}), '(XtX_lamb, XtY)\n', (11104, 11119), True, 'import numpy as np\n'), ((12338, 12365), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['X1.T', 'X2.T'], {}), '(X1.T, X2.T)\n', (12353, 12365), True, 'import numpy as np\n'), ((12395, 12408), 'numpy.dot', 'np.dot', (['A', 'X1'], {}), '(A, X1)\n', (12401, 12408), True, 'import numpy as np\n'), ((12873, 12891), 'numpy.ones', 'np.ones', (['C.T.shape'], {}), '(C.T.shape)\n', (12880, 12891), True, 'import numpy as np\n'), ((12919, 12937), 'numpy.ones', 'np.ones', (['C.T.shape'], {}), '(C.T.shape)\n', (12926, 12937), True, 'import numpy as np\n'), ((13653, 13670), 'numpy.linalg.pinv', 'np.linalg.pinv', (['K'], {}), '(K)\n', (13667, 13670), True, 'import numpy as np\n'), ((14170, 14200), 'numpy.zeros', 'np.zeros', (['(self.A.shape[0], 1)'], {}), '((self.A.shape[0], 1))\n', (14178, 14200), True, 'import numpy as np\n'), ((14245, 14275), 'numpy.zeros', 'np.zeros', (['(self.A.shape[0], 1)'], {}), '((self.A.shape[0], 1))\n', (14253, 14275), True, 'import numpy as np\n'), ((14742, 14755), 'numpy.sum', 'np.sum', (['obs_t'], {}), '(obs_t)\n', (14748, 14755), True, 'import numpy as np\n'), ((18465, 18516), 'numpy.multiply', 'np.multiply', (['priv', "self.FA_kwargs['fa_priv_var_sc']"], {}), "(priv, self.FA_kwargs['fa_priv_var_sc'])\n", (18476, 18516), True, 'import numpy as np\n'), ((18591, 18642), 'numpy.multiply', 'np.multiply', (['shar', "self.FA_kwargs['fa_shar_var_sc']"], {}), "(shar, self.FA_kwargs['fa_shar_var_sc'])\n", (18602, 18642), True, 'import numpy as np\n'), ((18723, 18773), 'numpy.multiply', 'np.multiply', (['dmn', "self.FA_kwargs['fa_shar_var_sc']"], {}), "(dmn, self.FA_kwargs['fa_shar_var_sc'])\n", (18734, 18773), True, 'import numpy as np\n'), ((19225, 19284), 'numpy.multiply', 'np.multiply', (['main_shar', "self.FA_kwargs['fa_main_shared_sc']"], {}), "(main_shar, self.FA_kwargs['fa_main_shared_sc'])\n", (19236, 19284), True, 'import numpy as np\n'), ((19663, 19723), 'numpy.multiply', 'np.multiply', (['main_priv', "self.FA_kwargs['fa_main_private_sc']"], {}), "(main_priv, self.FA_kwargs['fa_main_private_sc'])\n", (19674, 19723), True, 'import numpy as np\n'), ((19985, 20023), 'numpy.mat', 'np.mat', (["self.FA_kwargs['own_pc_trans']"], {}), "(self.FA_kwargs['own_pc_trans'])\n", (19991, 20023), True, 'import numpy as np\n'), ((20024, 20035), 'numpy.mat', 'np.mat', (['dmn'], {}), '(dmn)\n', (20030, 20035), True, 'import numpy as np\n'), ((21748, 21764), 'numpy.zeros', 'np.zeros', (['[3, 7]'], {}), '([3, 7])\n', (21756, 21764), True, 'import numpy as np\n'), ((28446, 28514), 'numpy.diag', 'np.diag', (['[1.0 / nu[0], 1.0 / (nu[0] + nu[1]), 1.0 / (nu[2] - nu[0])]'], {}), '([1.0 / nu[0], 1.0 / (nu[0] + nu[1]), 1.0 / (nu[2] - nu[0])])\n', (28453, 28514), True, 'import numpy as np\n'), ((28541, 28587), 'numpy.diag', 'np.diag', (['[nu[0], nu[0] + nu[1], nu[2] - nu[0]]'], {}), '([nu[0], nu[0] + nu[1], nu[2] - nu[0]])\n', (28548, 28587), True, 'import numpy as np\n'), ((28883, 28906), 'numpy.diag', 'np.diag', (['(nu * c_scalars)'], {}), '(nu * c_scalars)\n', (28890, 28906), True, 'import numpy as np\n'), ((29146, 29156), 'numpy.diag', 'np.diag', (['C'], {}), '(C)\n', (29153, 29156), True, 'import numpy as np\n'), ((29471, 29496), 'numpy.linalg.det', 'np.linalg.det', (['S_star_inv'], {}), '(S_star_inv)\n', (29484, 29496), True, 'import numpy as np\n'), ((2568, 2585), 'numpy.linalg.pinv', 'np.linalg.pinv', (['Q'], {}), '(Q)\n', (2582, 2585), True, 'import numpy as np\n'), ((6990, 7024), 'numpy.eye', 'np.eye', (['n_state_vars', 'n_state_vars'], {}), '(n_state_vars, n_state_vars)\n', (6996, 7024), True, 'import numpy as np\n'), ((10225, 10241), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (10235, 10241), True, 'import numpy as np\n'), ((10341, 10362), 'numpy.all', 'np.all', (['(X[-1, :] == 1)'], {}), '(X[-1, :] == 1)\n', (10347, 10362), True, 'import numpy as np\n'), ((10578, 10599), 'numpy.all', 'np.all', (['(X[-1, :] == 1)'], {}), '(X[-1, :] == 1)\n', (10584, 10599), True, 'import numpy as np\n'), ((11025, 11043), 'numpy.eye', 'np.eye', (['x.shape[1]'], {}), '(x.shape[1])\n', (11031, 11043), True, 'import numpy as np\n'), ((12257, 12272), 'numpy.ones', 'np.ones', (['[1, T]'], {}), '([1, T])\n', (12264, 12272), True, 'import numpy as np\n'), ((14087, 14116), 'numpy.mean', 'np.mean', (['self.prev_drift_corr'], {}), '(self.prev_drift_corr)\n', (14094, 14116), True, 'import numpy as np\n'), ((14899, 14925), 'numpy.mat', 'np.mat', (['self.noise_rej_mFR'], {}), '(self.noise_rej_mFR)\n', (14905, 14925), True, 'import numpy as np\n'), ((30026, 30053), 'numpy.trace', 'np.trace', (['(S * A_1 * S * A_1)'], {}), '(S * A_1 * S * A_1)\n', (30034, 30053), True, 'import numpy as np\n'), ((30049, 30076), 'numpy.trace', 'np.trace', (['(S * A_2 * S * A_1)'], {}), '(S * A_2 * S * A_1)\n', (30057, 30076), True, 'import numpy as np\n'), ((30072, 30099), 'numpy.trace', 'np.trace', (['(S * A_3 * S * A_1)'], {}), '(S * A_3 * S * A_1)\n', (30080, 30099), True, 'import numpy as np\n'), ((30120, 30147), 'numpy.trace', 'np.trace', (['(S * A_1 * S * A_2)'], {}), '(S * A_1 * S * A_2)\n', (30128, 30147), True, 'import numpy as np\n'), ((30143, 30170), 'numpy.trace', 'np.trace', (['(S * A_2 * S * A_2)'], {}), '(S * A_2 * S * A_2)\n', (30151, 30170), True, 'import numpy as np\n'), ((30166, 30193), 'numpy.trace', 'np.trace', (['(S * A_3 * S * A_2)'], {}), '(S * A_3 * S * A_2)\n', (30174, 30193), True, 'import numpy as np\n'), ((30214, 30241), 'numpy.trace', 'np.trace', (['(S * A_1 * S * A_3)'], {}), '(S * A_1 * S * A_3)\n', (30222, 30241), True, 'import numpy as np\n'), ((30237, 30264), 'numpy.trace', 'np.trace', (['(S * A_2 * S * A_3)'], {}), '(S * A_2 * S * A_3)\n', (30245, 30264), True, 'import numpy as np\n'), ((30260, 30287), 'numpy.trace', 'np.trace', (['(S * A_3 * S * A_3)'], {}), '(S * A_3 * S * A_3)\n', (30268, 30287), True, 'import numpy as np\n'), ((6891, 6917), 'numpy.linalg.norm', 'np.linalg.norm', (['(K - last_K)'], {}), '(K - last_K)\n', (6905, 6917), True, 'import numpy as np\n'), ((8420, 8451), 'numpy.linalg.norm', 'np.linalg.norm', (['(K[n] - K[n - 1])'], {}), '(K[n] - K[n - 1])\n', (8434, 8451), True, 'import numpy as np\n'), ((10889, 10914), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['X.T', 'Y.T'], {}), '(X.T, Y.T)\n', (10904, 10914), True, 'import numpy as np\n'), ((21614, 21630), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (21622, 21630), True, 'import numpy as np\n'), ((21659, 21670), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (21667, 21670), True, 'import numpy as np\n'), ((29889, 29993), 'numpy.hstack', 'np.hstack', (['[c_1.T * S_star * c_1 - c_2.T * S_star * c_2, c_1.T * S_star * c_2, c_2.T *\n S_star * c_1]'], {}), '([c_1.T * S_star * c_1 - c_2.T * S_star * c_2, c_1.T * S_star *\n c_2, c_2.T * S_star * c_1])\n', (29898, 29993), True, 'import numpy as np\n'), ((10402, 10417), 'numpy.ones', 'np.ones', (['[1, T]'], {}), '([1, T])\n', (10409, 10417), True, 'import numpy as np\n'), ((10639, 10654), 'numpy.ones', 'np.ones', (['[1, T]'], {}), '([1, T])\n', (10646, 10654), True, 'import numpy as np\n'), ((21631, 21640), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (21637, 21640), True, 'import numpy as np\n')]
|
import numpy as np
from .real_ffts import _RealFFTBasis
class LegendreBasis(_RealFFTBasis):
r"""
Discretize a continuous field into `deg` local states using a
Legendre polynomial basis such that,
.. math::
\frac{1}{\Delta x} \int_s m(h, x) dx =
\sum_0^{L-1} m[l, s] P_l(h)
where the :math:`P_l` are Legendre polynomials and the local state space
:math:`H` is mapped into the orthogonal domain of the Legendre polynomials
.. math::
-1 \le H \le 1
The mapping of :math:`H` into the domain is done automatically in PyMKS by
using the `domain` key work argument.
>>> n_states = 3
>>> X = np.array([[0.25, 0.1],
... [0.5, 0.25]])
>>> def P(x):
... x = 4 * x - 1
... polys = np.array((np.ones_like(x), x, (3.*x**2 - 1.) / 2.))
... tmp = (2. * np.arange(3)[:, None, None] + 1.) / 2. * polys
... return np.rollaxis(tmp, 0, 3)
>>> basis = LegendreBasis(n_states, [0., 0.5])
>>> assert(np.allclose(basis.discretize(X), P(X)))
If the microstructure local state values fall outside of the specified
domain they will no longer be mapped into the orthogonal domain of the
legendre polynomials.
>>> n_states = 2
>>> X = np.array([-1, 1])
>>> leg_basis = LegendreBasis(n_states, domain=[0, 1])
>>> leg_basis.discretize(X)
Traceback (most recent call last):
...
RuntimeError: X must be within the specified domain
"""
def discretize(self, X):
"""
Discretize `X`.
Args:
X (ND array): The microstructure, an `(n_samples, n_x, ...)`
shaped array where `n_samples` is the number of samples and
`n_x` is the spatial discretization.
Returns:
Float valued field of of Legendre polynomial coefficients.
>>> X = np.array([[-1, 1],
... [0, -1]])
>>> leg_basis = LegendreBasis(3, [-1, 1])
>>> def p(x):
... polys = np.array((np.ones_like(x), x, (3.*x**2 - 1.) / 2.))
... tmp = (2. * np.arange(3)[:, None, None] + 1.) / 2. * polys
... return np.rollaxis(tmp, 0, 3)
>>> assert(np.allclose(leg_basis.discretize(X), p(X)))
"""
self.check(X)
self._select_axes(X)
leg = np.polynomial.legendre
X_scaled = (2. * X - self.domain[0] - self.domain[1]) /\
(self.domain[1] - self.domain[0])
norm = (2. * np.array(self.n_states) + 1) / 2.
X_Legendre = (leg.legval(X_scaled, np.eye(len(self.n_states)) * norm))
return np.rollaxis(X_Legendre, 0, len(X_Legendre.shape))
|
[
"numpy.array"
] |
[((2494, 2517), 'numpy.array', 'np.array', (['self.n_states'], {}), '(self.n_states)\n', (2502, 2517), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 10:58:30 2020
@author: <NAME>
"""
#%%importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%% read csv
dataset = pd.read_csv("svm_dataset.csv")
#%%
dataset.drop(["id","Unnamed: 32"],axis = 1, inplace = True)
# Malignant = 'M'
# Benign = 'B'
#%%
M = dataset[dataset.diagnosis == "M"]
B = dataset[dataset.diagnosis == "B"]
# Scatter Plot
plt.scatter(M.radius_mean,M.texture_mean,color = 'red', label = "Malignant", alpha = 0.3)
plt.scatter(B.radius_mean,B.texture_mean,color = 'green', label = "Benign", alpha = 0.3)
plt.xlabel("radius_mean")
plt.ylabel("texture_mean")
plt.legend()
plt.show()
#%%
dataset.diagnosis = [1 if each == "M" else 0 for each in dataset.diagnosis]
y = dataset.diagnosis.values
x_data = dataset.drop(["diagnosis"],axis = 1)
#%%
#Normalization
x = ((x_data - np.min(x_data)) / ((np.max(x_data)) - np.min(x_data)))
#%%
#Train-Test Split
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.3, random_state =1)
#%% SVM
from sklearn.svm import SVC
svm = SVC(random_state = 1)
svm.fit(x_train,y_train)
#%% Accuracy
print("Accuracy of SVM Algorithm :",svm.score(x_test,y_test))
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"sklearn.model_selection.train_test_split",
"numpy.min",
"numpy.max",
"sklearn.svm.SVC",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((204, 234), 'pandas.read_csv', 'pd.read_csv', (['"""svm_dataset.csv"""'], {}), "('svm_dataset.csv')\n", (215, 234), True, 'import pandas as pd\n'), ((433, 522), 'matplotlib.pyplot.scatter', 'plt.scatter', (['M.radius_mean', 'M.texture_mean'], {'color': '"""red"""', 'label': '"""Malignant"""', 'alpha': '(0.3)'}), "(M.radius_mean, M.texture_mean, color='red', label='Malignant',\n alpha=0.3)\n", (444, 522), True, 'import matplotlib.pyplot as plt\n'), ((523, 611), 'matplotlib.pyplot.scatter', 'plt.scatter', (['B.radius_mean', 'B.texture_mean'], {'color': '"""green"""', 'label': '"""Benign"""', 'alpha': '(0.3)'}), "(B.radius_mean, B.texture_mean, color='green', label='Benign',\n alpha=0.3)\n", (534, 611), True, 'import matplotlib.pyplot as plt\n'), ((612, 637), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""radius_mean"""'], {}), "('radius_mean')\n", (622, 637), True, 'import matplotlib.pyplot as plt\n'), ((638, 664), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""texture_mean"""'], {}), "('texture_mean')\n", (648, 664), True, 'import matplotlib.pyplot as plt\n'), ((665, 677), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (675, 677), True, 'import matplotlib.pyplot as plt\n'), ((678, 688), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (686, 688), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1096), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.3)', 'random_state': '(1)'}), '(x, y, test_size=0.3, random_state=1)\n', (1059, 1096), False, 'from sklearn.model_selection import train_test_split\n'), ((1142, 1161), 'sklearn.svm.SVC', 'SVC', ([], {'random_state': '(1)'}), '(random_state=1)\n', (1145, 1161), False, 'from sklearn.svm import SVC\n'), ((880, 894), 'numpy.min', 'np.min', (['x_data'], {}), '(x_data)\n', (886, 894), True, 'import numpy as np\n'), ((900, 914), 'numpy.max', 'np.max', (['x_data'], {}), '(x_data)\n', (906, 914), True, 'import numpy as np\n'), ((918, 932), 'numpy.min', 'np.min', (['x_data'], {}), '(x_data)\n', (924, 932), True, 'import numpy as np\n')]
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sunpy.image.util import to_norm, un_norm
def test_to_norm():
array_simple = np.array([10., 20., 30., 100.])
assert_allclose(to_norm(array_simple), np.array([0.1, 0.2, 0.3, 1.]))
array_simple_neg = np.array([-10., 0., 10., 90.])
assert_allclose(to_norm(array_simple_neg), np.array([0, 0.1, 0.2, 1.]))
def test_un_norm():
array_simple = np.array([10, 20, 30, 100.])
assert_allclose(un_norm(np.array([0.1, 0.2, 0.3, 1.]), array_simple), array_simple)
array_simple_neg = np.array([-10, 0, 10, 90])
assert_allclose(un_norm(np.array([0, 0.1, 0.2, 1.]), array_simple_neg), array_simple_neg)
|
[
"sunpy.image.util.to_norm",
"numpy.array"
] |
[((164, 199), 'numpy.array', 'np.array', (['[10.0, 20.0, 30.0, 100.0]'], {}), '([10.0, 20.0, 30.0, 100.0])\n', (172, 199), True, 'import numpy as np\n'), ((293, 327), 'numpy.array', 'np.array', (['[-10.0, 0.0, 10.0, 90.0]'], {}), '([-10.0, 0.0, 10.0, 90.0])\n', (301, 327), True, 'import numpy as np\n'), ((441, 470), 'numpy.array', 'np.array', (['[10, 20, 30, 100.0]'], {}), '([10, 20, 30, 100.0])\n', (449, 470), True, 'import numpy as np\n'), ((581, 607), 'numpy.array', 'np.array', (['[-10, 0, 10, 90]'], {}), '([-10, 0, 10, 90])\n', (589, 607), True, 'import numpy as np\n'), ((216, 237), 'sunpy.image.util.to_norm', 'to_norm', (['array_simple'], {}), '(array_simple)\n', (223, 237), False, 'from sunpy.image.util import to_norm, un_norm\n'), ((239, 269), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 1.0]'], {}), '([0.1, 0.2, 0.3, 1.0])\n', (247, 269), True, 'import numpy as np\n'), ((344, 369), 'sunpy.image.util.to_norm', 'to_norm', (['array_simple_neg'], {}), '(array_simple_neg)\n', (351, 369), False, 'from sunpy.image.util import to_norm, un_norm\n'), ((371, 399), 'numpy.array', 'np.array', (['[0, 0.1, 0.2, 1.0]'], {}), '([0, 0.1, 0.2, 1.0])\n', (379, 399), True, 'import numpy as np\n'), ((498, 528), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 1.0]'], {}), '([0.1, 0.2, 0.3, 1.0])\n', (506, 528), True, 'import numpy as np\n'), ((636, 664), 'numpy.array', 'np.array', (['[0, 0.1, 0.2, 1.0]'], {}), '([0, 0.1, 0.2, 1.0])\n', (644, 664), True, 'import numpy as np\n')]
|
"""
The ``risk_models`` module provides functions for estimating the covariance matrix given
historical returns. Because of the complexity of estimating covariance matrices
(and the importance of efficient computations), this module mostly provides a convenient
wrapper around the underrated `sklearn.covariance` module.
The format of the data input is the same as that in :ref:`expected-returns`.
**Currently implemented:**
- sample covariance
- semicovariance
- exponentially weighted covariance
- mininum covariance determinant
- shrunk covariance matrices:
- manual shrinkage
- Ledoit Wolf shrinkage
- Oracle Approximating shrinkage
- covariance to correlation matrix
"""
import warnings
import numpy as np
import pandas as pd
import sklearn.covariance
from .expected_returns import returns_from_prices
def sample_cov(prices, frequency=252):
"""
Calculate the annualised sample covariance matrix of (daily) asset returns.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
daily_returns = returns_from_prices(prices)
return daily_returns.cov() * frequency
def semicovariance(prices, benchmark=0.000079, frequency=252):
"""
Estimate the semicovariance matrix, i.e the covariance given that
the returns are less than the benchmark.
.. semicov = E([min(r_i - B, 0)] . [min(r_j - B, 0)])
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param benchmark: the benchmark return, defaults to the daily risk-free rate, i.e
:math:`1.02^{(1/252)} -1`.
:type benchmark: float
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year). Ensure that you use the appropriate
benchmark, e.g if ``frequency=12`` use the monthly risk-free rate.
:type frequency: int, optional
:return: semicovariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
daily_returns = returns_from_prices(prices)
drops = np.fmin(daily_returns - benchmark, 0)
return drops.cov() * frequency
def _pair_exp_cov(X, Y, span=180):
"""
Calculate the exponential covariance between two timeseries of returns.
:param X: first time series of returns
:type X: pd.Series
:param Y: second time series of returns
:type Y: pd.Series
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:return: the exponential covariance between X and Y
:rtype: float
"""
covariation = (X - X.mean()) * (Y - Y.mean())
# Exponentially weight the covariation and take the mean
if span < 10:
warnings.warn("it is recommended to use a higher span, e.g 30 days")
return covariation.ewm(span=span).mean()[-1]
def exp_cov(prices, span=180, frequency=252):
"""
Estimate the exponentially-weighted covariance matrix, which gives
greater weight to more recent data.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised estimate of exponential covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
assets = prices.columns
daily_returns = returns_from_prices(prices)
N = len(assets)
# Loop over matrix, filling entries with the pairwise exp cov
S = np.zeros((N, N))
for i in range(N):
for j in range(i, N):
S[i, j] = S[j, i] = _pair_exp_cov(
daily_returns.iloc[:, i], daily_returns.iloc[:, j], span
)
return pd.DataFrame(S * frequency, columns=assets, index=assets)
def min_cov_determinant(prices, frequency=252, random_state=None):
"""
Calculate the minimum covariance determinant, an estimator of the covariance matrix
that is more robust to noise.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param random_state: random seed to make results reproducible, defaults to None
:type random_state: int, optional
:return: annualised estimate of covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
assets = prices.columns
X = prices.pct_change().dropna(how="all")
X = np.nan_to_num(X.values)
raw_cov_array = sklearn.covariance.fast_mcd(X, random_state=random_state)[1]
return pd.DataFrame(raw_cov_array, index=assets, columns=assets) * frequency
def cov_to_corr(cov_matrix):
"""
Convert a covariance matrix to a correlation matrix.
:param cov_matrix: covariance matrix
:type cov_matrix: pd.DataFrame
:return: correlation matrix
:rtype: pd.DataFrame
"""
if not isinstance(cov_matrix, pd.DataFrame):
warnings.warn("cov_matrix is not a dataframe", RuntimeWarning)
cov_matrix = pd.DataFrame(cov_matrix)
Dinv = np.diag(1 / np.sqrt(np.diag(cov_matrix)))
corr = np.dot(Dinv, np.dot(cov_matrix, Dinv))
return pd.DataFrame(corr, index=cov_matrix.index, columns=cov_matrix.index)
class CovarianceShrinkage:
"""
Provide methods for computing shrinkage estimates of the covariance matrix, using the
sample covariance matrix and choosing the structured estimator to be an identity matrix
multiplied by the average sample variance. The shrinkage constant can be input manually,
though there exist methods (notably Ledoit Wolf) to estimate the optimal value.
Instance variables:
- ``X`` (returns)
- ``S`` (sample covariance matrix)
- ``delta`` (shrinkage constant)
"""
def __init__(self, prices, frequency=252):
"""
:param prices: adjusted closing prices of the asset, each row is a date and each column is a ticker/id.
:type prices: pd.DataFrame
:param frequency: number of time periods in a year, defaults to 252 (the number of trading days in a year)
:type frequency: int, optional
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
self.frequency = frequency
self.X = prices.pct_change().dropna(how="all")
self.S = self.X.cov().values
self.delta = None # shrinkage constant
def format_and_annualise(self, raw_cov_array):
"""
Helper method which annualises the output of shrinkage calculations,
and formats the result into a dataframe
:param raw_cov_array: raw covariance matrix of daily returns
:type raw_cov_array: np.ndarray
:return: annualised covariance matrix
:rtype: pd.DataFrame
"""
assets = self.X.columns
return (
pd.DataFrame(raw_cov_array, index=assets, columns=assets) * self.frequency
)
def shrunk_covariance(self, delta=0.2):
"""
Shrink a sample covariance matrix to the identity matrix (scaled by the average
sample variance). This method does not estimate an optimal shrinkage parameter,
it requires manual input.
:param delta: shrinkage parameter, defaults to 0.2.
:type delta: float, optional
:return: shrunk sample covariance matrix
:rtype: np.ndarray
"""
self.delta = delta
N = self.S.shape[1]
# Shrinkage target
mu = np.trace(self.S) / N
F = np.identity(N) * mu
# Shrinkage
shrunk_cov = delta * F + (1 - delta) * self.S
return self.format_and_annualise(shrunk_cov)
def ledoit_wolf(self, shrinkage_target="constant_variance"):
"""
Calculate the Ledoit-Wolf shrinkage estimate for a particular
shrinkage target.
:param shrinkage_target: choice of shrinkage target, either ``constant_variance``,
``single_factor`` or ``constant_correlation``. Defaults to
``constant_variance``.
:type shrinkage_target: str, optional
:raises NotImplementedError: if the shrinkage_target is unrecognised
:return: shrunk sample covariance matrix
:rtype: np.ndarray
"""
if shrinkage_target == "constant_variance":
X = np.nan_to_num(self.X.values)
shrunk_cov, self.delta = sklearn.covariance.ledoit_wolf(X)
elif shrinkage_target == "single_factor":
shrunk_cov, self.delta = self._ledoit_wolf_single_factor()
elif shrinkage_target == "constant_correlation":
shrunk_cov, self.delta = self._ledoit_wolf_constant_correlation()
else:
raise NotImplementedError
return self.format_and_annualise(shrunk_cov)
def _ledoit_wolf_single_factor(self):
"""
Helper method to calculate the Ledoit-Wolf shrinkage estimate
with the Sharpe single-factor matrix as the shrinkage target.
See Ledoit and Wolf (2001).
:return: shrunk sample covariance matrix, shrinkage constant
:rtype: np.ndarray, float
"""
X = np.nan_to_num(self.X.values)
# De-mean returns
t, n = np.shape(X)
Xm = X - X.mean(axis=0)
xmkt = X.mean(axis=1).reshape(t, 1)
# compute sample covariance matrix
sample = np.cov(np.append(Xm, xmkt, axis=1), rowvar=False) * (t - 1) / t
betas = sample[0:n, n].reshape(n, 1)
varmkt = sample[n, n]
sample = sample[:n, :n]
F = np.dot(betas, betas.T) / varmkt
F[np.eye(n) == 1] = np.diag(sample)
# compute shrinkage parameters
c = np.linalg.norm(sample - F, "fro") ** 2
y = Xm ** 2
p = 1 / t * np.sum(np.dot(y.T, y)) - np.sum(sample ** 2)
# r is divided into diagonal
# and off-diagonal terms, and the off-diagonal term
# is itself divided into smaller terms
rdiag = 1 / t * np.sum(y ** 2) - sum(np.diag(sample) ** 2)
z = Xm * np.tile(xmkt, (n,))
v1 = 1 / t * np.dot(y.T, z) - np.tile(betas, (n,)) * sample
roff1 = (
np.sum(v1 * np.tile(betas, (n,)).T) / varmkt
- np.sum(np.diag(v1) * betas.T) / varmkt
)
v3 = 1 / t * np.dot(z.T, z) - varmkt * sample
roff3 = (
np.sum(v3 * np.dot(betas, betas.T)) / varmkt ** 2
- np.sum(np.diag(v3).reshape(-1, 1) * betas ** 2) / varmkt ** 2
)
roff = 2 * roff1 - roff3
r = rdiag + roff
# compute shrinkage constant
k = (p - r) / c
delta = max(0, min(1, k / t))
# compute the estimator
shrunk_cov = delta * F + (1 - delta) * sample
return shrunk_cov, delta
def _ledoit_wolf_constant_correlation(self):
"""
Helper method to calculate the Ledoit-Wolf shrinkage estimate
with the constant correlation matrix as the shrinkage target.
See Ledoit and Wolf (2003)
:return: shrunk sample covariance matrix, shrinkage constant
:rtype: np.ndarray, float
"""
X = np.nan_to_num(self.X.values)
t, n = np.shape(X)
S = self.S # sample cov matrix
# Constant correlation target
var = np.diag(S).reshape(-1, 1)
std = np.sqrt(var)
_var = np.tile(var, (n,))
_std = np.tile(std, (n,))
r_bar = (np.sum(S / (_std * _std.T)) - n) / (n * (n - 1))
F = r_bar * (_std * _std.T)
F[np.eye(n) == 1] = var.reshape(-1)
# Estimate pi
Xm = X - X.mean(axis=0)
y = Xm ** 2
pi_mat = np.dot(y.T, y) / t - 2 * np.dot(Xm.T, Xm) * S / t + S ** 2
pi_hat = np.sum(pi_mat)
# Theta matrix, expanded term by term
term1 = np.dot((X ** 3).T, X) / t
help_ = np.dot(X.T, X) / t
help_diag = np.diag(help_)
term2 = np.tile(help_diag, (n, 1)).T * S
term3 = help_ * _var
term4 = _var * S
theta_mat = term1 - term2 - term3 + term4
theta_mat[np.eye(n) == 1] = np.zeros(n)
rho_hat = sum(np.diag(pi_mat)) + r_bar * np.sum(
np.dot((1 / std), std.T) * theta_mat
)
# Estimate gamma
gamma_hat = np.linalg.norm(S - F, "fro") ** 2
# Compute shrinkage constant
kappa_hat = (pi_hat - rho_hat) / gamma_hat
delta = max(0.0, min(1.0, kappa_hat / t))
# Compute shrunk covariance matrix
shrunk_cov = delta * F + (1 - delta) * S
return shrunk_cov, delta
def oracle_approximating(self):
"""
Calculate the Oracle Approximating Shrinkage estimate
:return: shrunk sample covariance matrix
:rtype: np.ndarray
"""
X = np.nan_to_num(self.X.values)
shrunk_cov, self.delta = sklearn.covariance.oas(X)
return self.format_and_annualise(shrunk_cov)
|
[
"pandas.DataFrame",
"numpy.fmin",
"numpy.trace",
"numpy.sum",
"numpy.nan_to_num",
"numpy.eye",
"numpy.zeros",
"numpy.identity",
"numpy.shape",
"numpy.append",
"numpy.linalg.norm",
"numpy.tile",
"numpy.dot",
"warnings.warn",
"numpy.diag",
"numpy.sqrt"
] |
[((2751, 2788), 'numpy.fmin', 'np.fmin', (['(daily_returns - benchmark)', '(0)'], {}), '(daily_returns - benchmark, 0)\n', (2758, 2788), True, 'import numpy as np\n'), ((4556, 4572), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (4564, 4572), True, 'import numpy as np\n'), ((4771, 4828), 'pandas.DataFrame', 'pd.DataFrame', (['(S * frequency)'], {'columns': 'assets', 'index': 'assets'}), '(S * frequency, columns=assets, index=assets)\n', (4783, 4828), True, 'import pandas as pd\n'), ((5800, 5823), 'numpy.nan_to_num', 'np.nan_to_num', (['X.values'], {}), '(X.values)\n', (5813, 5823), True, 'import numpy as np\n'), ((6505, 6573), 'pandas.DataFrame', 'pd.DataFrame', (['corr'], {'index': 'cov_matrix.index', 'columns': 'cov_matrix.index'}), '(corr, index=cov_matrix.index, columns=cov_matrix.index)\n', (6517, 6573), True, 'import pandas as pd\n'), ((1415, 1477), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (1428, 1477), False, 'import warnings\n'), ((1495, 1515), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (1507, 1515), True, 'import pandas as pd\n'), ((2590, 2652), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (2603, 2652), False, 'import warnings\n'), ((2670, 2690), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (2682, 2690), True, 'import pandas as pd\n'), ((3409, 3477), 'warnings.warn', 'warnings.warn', (['"""it is recommended to use a higher span, e.g 30 days"""'], {}), "('it is recommended to use a higher span, e.g 30 days')\n", (3422, 3477), False, 'import warnings\n'), ((4284, 4346), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (4297, 4346), False, 'import warnings\n'), ((4364, 4384), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (4376, 4384), True, 'import pandas as pd\n'), ((5617, 5679), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (5630, 5679), False, 'import warnings\n'), ((5697, 5717), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (5709, 5717), True, 'import pandas as pd\n'), ((5916, 5973), 'pandas.DataFrame', 'pd.DataFrame', (['raw_cov_array'], {'index': 'assets', 'columns': 'assets'}), '(raw_cov_array, index=assets, columns=assets)\n', (5928, 5973), True, 'import pandas as pd\n'), ((6281, 6343), 'warnings.warn', 'warnings.warn', (['"""cov_matrix is not a dataframe"""', 'RuntimeWarning'], {}), "('cov_matrix is not a dataframe', RuntimeWarning)\n", (6294, 6343), False, 'import warnings\n'), ((6365, 6389), 'pandas.DataFrame', 'pd.DataFrame', (['cov_matrix'], {}), '(cov_matrix)\n', (6377, 6389), True, 'import pandas as pd\n'), ((6468, 6492), 'numpy.dot', 'np.dot', (['cov_matrix', 'Dinv'], {}), '(cov_matrix, Dinv)\n', (6474, 6492), True, 'import numpy as np\n'), ((10590, 10618), 'numpy.nan_to_num', 'np.nan_to_num', (['self.X.values'], {}), '(self.X.values)\n', (10603, 10618), True, 'import numpy as np\n'), ((10661, 10672), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (10669, 10672), True, 'import numpy as np\n'), ((11053, 11068), 'numpy.diag', 'np.diag', (['sample'], {}), '(sample)\n', (11060, 11068), True, 'import numpy as np\n'), ((12563, 12591), 'numpy.nan_to_num', 'np.nan_to_num', (['self.X.values'], {}), '(self.X.values)\n', (12576, 12591), True, 'import numpy as np\n'), ((12607, 12618), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (12615, 12618), True, 'import numpy as np\n'), ((12753, 12765), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (12760, 12765), True, 'import numpy as np\n'), ((12781, 12799), 'numpy.tile', 'np.tile', (['var', '(n,)'], {}), '(var, (n,))\n', (12788, 12799), True, 'import numpy as np\n'), ((12815, 12833), 'numpy.tile', 'np.tile', (['std', '(n,)'], {}), '(std, (n,))\n', (12822, 12833), True, 'import numpy as np\n'), ((13148, 13162), 'numpy.sum', 'np.sum', (['pi_mat'], {}), '(pi_mat)\n', (13154, 13162), True, 'import numpy as np\n'), ((13307, 13321), 'numpy.diag', 'np.diag', (['help_'], {}), '(help_)\n', (13314, 13321), True, 'import numpy as np\n'), ((13511, 13522), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (13519, 13522), True, 'import numpy as np\n'), ((14196, 14224), 'numpy.nan_to_num', 'np.nan_to_num', (['self.X.values'], {}), '(self.X.values)\n', (14209, 14224), True, 'import numpy as np\n'), ((7536, 7598), 'warnings.warn', 'warnings.warn', (['"""prices are not in a dataframe"""', 'RuntimeWarning'], {}), "('prices are not in a dataframe', RuntimeWarning)\n", (7549, 7598), False, 'import warnings\n'), ((7620, 7640), 'pandas.DataFrame', 'pd.DataFrame', (['prices'], {}), '(prices)\n', (7632, 7640), True, 'import pandas as pd\n'), ((8263, 8320), 'pandas.DataFrame', 'pd.DataFrame', (['raw_cov_array'], {'index': 'assets', 'columns': 'assets'}), '(raw_cov_array, index=assets, columns=assets)\n', (8275, 8320), True, 'import pandas as pd\n'), ((8896, 8912), 'numpy.trace', 'np.trace', (['self.S'], {}), '(self.S)\n', (8904, 8912), True, 'import numpy as np\n'), ((8929, 8943), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (8940, 8943), True, 'import numpy as np\n'), ((9769, 9797), 'numpy.nan_to_num', 'np.nan_to_num', (['self.X.values'], {}), '(self.X.values)\n', (9782, 9797), True, 'import numpy as np\n'), ((10993, 11015), 'numpy.dot', 'np.dot', (['betas', 'betas.T'], {}), '(betas, betas.T)\n', (10999, 11015), True, 'import numpy as np\n'), ((11121, 11154), 'numpy.linalg.norm', 'np.linalg.norm', (['(sample - F)', '"""fro"""'], {}), "(sample - F, 'fro')\n", (11135, 11154), True, 'import numpy as np\n'), ((11225, 11244), 'numpy.sum', 'np.sum', (['(sample ** 2)'], {}), '(sample ** 2)\n', (11231, 11244), True, 'import numpy as np\n'), ((11474, 11493), 'numpy.tile', 'np.tile', (['xmkt', '(n,)'], {}), '(xmkt, (n,))\n', (11481, 11493), True, 'import numpy as np\n'), ((13226, 13247), 'numpy.dot', 'np.dot', (['(X ** 3).T', 'X'], {}), '((X ** 3).T, X)\n', (13232, 13247), True, 'import numpy as np\n'), ((13268, 13282), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (13274, 13282), True, 'import numpy as np\n'), ((13685, 13713), 'numpy.linalg.norm', 'np.linalg.norm', (['(S - F)', '"""fro"""'], {}), "(S - F, 'fro')\n", (13699, 13713), True, 'import numpy as np\n'), ((6422, 6441), 'numpy.diag', 'np.diag', (['cov_matrix'], {}), '(cov_matrix)\n', (6429, 6441), True, 'import numpy as np\n'), ((11035, 11044), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (11041, 11044), True, 'import numpy as np\n'), ((11414, 11428), 'numpy.sum', 'np.sum', (['(y ** 2)'], {}), '(y ** 2)\n', (11420, 11428), True, 'import numpy as np\n'), ((11515, 11529), 'numpy.dot', 'np.dot', (['y.T', 'z'], {}), '(y.T, z)\n', (11521, 11529), True, 'import numpy as np\n'), ((11532, 11552), 'numpy.tile', 'np.tile', (['betas', '(n,)'], {}), '(betas, (n,))\n', (11539, 11552), True, 'import numpy as np\n'), ((11721, 11735), 'numpy.dot', 'np.dot', (['z.T', 'z'], {}), '(z.T, z)\n', (11727, 11735), True, 'import numpy as np\n'), ((12713, 12723), 'numpy.diag', 'np.diag', (['S'], {}), '(S)\n', (12720, 12723), True, 'import numpy as np\n'), ((12851, 12878), 'numpy.sum', 'np.sum', (['(S / (_std * _std.T))'], {}), '(S / (_std * _std.T))\n', (12857, 12878), True, 'import numpy as np\n'), ((12946, 12955), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (12952, 12955), True, 'import numpy as np\n'), ((13338, 13364), 'numpy.tile', 'np.tile', (['help_diag', '(n, 1)'], {}), '(help_diag, (n, 1))\n', (13345, 13364), True, 'import numpy as np\n'), ((13493, 13502), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (13499, 13502), True, 'import numpy as np\n'), ((13545, 13560), 'numpy.diag', 'np.diag', (['pi_mat'], {}), '(pi_mat)\n', (13552, 13560), True, 'import numpy as np\n'), ((10817, 10844), 'numpy.append', 'np.append', (['Xm', 'xmkt'], {'axis': '(1)'}), '(Xm, xmkt, axis=1)\n', (10826, 10844), True, 'import numpy as np\n'), ((11207, 11221), 'numpy.dot', 'np.dot', (['y.T', 'y'], {}), '(y.T, y)\n', (11213, 11221), True, 'import numpy as np\n'), ((11435, 11450), 'numpy.diag', 'np.diag', (['sample'], {}), '(sample)\n', (11442, 11450), True, 'import numpy as np\n'), ((13072, 13086), 'numpy.dot', 'np.dot', (['y.T', 'y'], {}), '(y.T, y)\n', (13078, 13086), True, 'import numpy as np\n'), ((11658, 11669), 'numpy.diag', 'np.diag', (['v1'], {}), '(v1)\n', (11665, 11669), True, 'import numpy as np\n'), ((11796, 11818), 'numpy.dot', 'np.dot', (['betas', 'betas.T'], {}), '(betas, betas.T)\n', (11802, 11818), True, 'import numpy as np\n'), ((13592, 13614), 'numpy.dot', 'np.dot', (['(1 / std)', 'std.T'], {}), '(1 / std, std.T)\n', (13598, 13614), True, 'import numpy as np\n'), ((11604, 11624), 'numpy.tile', 'np.tile', (['betas', '(n,)'], {}), '(betas, (n,))\n', (11611, 11624), True, 'import numpy as np\n'), ((13097, 13113), 'numpy.dot', 'np.dot', (['Xm.T', 'Xm'], {}), '(Xm.T, Xm)\n', (13103, 13113), True, 'import numpy as np\n'), ((11855, 11866), 'numpy.diag', 'np.diag', (['v3'], {}), '(v3)\n', (11862, 11866), True, 'import numpy as np\n')]
|
#!/bin/python
# -*- coding: utf-8 -*-
import time
import numpy as np
import scipy.linalg as sl
import pandas as pd
from econsieve import KalmanFilter, TEnKF
from grgrlib.core import timeprint
from grgrlib.multiprocessing import serializer
from econsieve.stats import logpdf
def create_obs_cov(self, scale_obs=0.1):
self.Z = np.array(self.data)
sig_obs = np.var(self.Z, axis=0)*scale_obs**2
obs_cov = np.diagflat(sig_obs)
return obs_cov
def get_p_init_lyapunov(self, Q):
pmat = self.precalc_mat[0]
qmat = self.precalc_mat[1]
F = np.vstack((pmat[1, 0][:, :-self.neps],
qmat[1, 0][:-self.neps, :-self.neps]))
E = np.vstack((pmat[1, 0][:, -self.neps:],
qmat[1, 0][:-self.neps, -self.neps:]))
Q = E @ Q @ E.T
p4 = sl.solve_discrete_lyapunov(F[self.dimp:,:], Q[self.dimp:,self.dimp:])
return F @ p4 @ F.T + Q
def get_eps_lin(self, x, xp, rcond=1e-14):
"""Get filter-implied (smoothed) shocks for linear model
"""
qmat = self.precalc_mat[1]
if self.filter.name == 'KalmanFilter':
pmat = self.precalc_mat[0]
F = self.filter.F
E = np.vstack((pmat[1, 0][:, -self.neps:],
qmat[1, 0][:-self.neps, -self.neps:]))
else:
F = qmat[1, 0][:, :-self.neps]
E = qmat[1, 0][:, -self.neps:]
return np.linalg.pinv(E, rcond) @ (x - F@xp)
def create_filter(self, R=None, N=None, ftype=None, seed=None, incl_obs=False, reduced_form=False, **fargs):
self.Z = np.array(self.data)
if ftype == 'KalmanFilter':
ftype = 'KF'
elif ftype == 'ParticleFilter':
ftype = 'PF'
elif ftype == 'AuxiliaryParticleFilter':
ftype = 'APF'
if ftype == 'KF':
f = KalmanFilter(dim_x=self.dimx, dim_z=self.nobs)
elif ftype in ('PF', 'APF'):
print(
'Warning: Particle filter is experimental and currently not under development.')
from .pfilter import ParticleFilter
if N is None:
N = 10000
aux_bs = ftype == 'APF'
f = ParticleFilter(N=N, dim_x=self.dimx,
dim_z=self.nobs, auxiliary_bootstrap=aux_bs)
else:
ftype = 'TEnKF'
if N is None:
N = int((self.dimq-self.dimeps)**2/2) + 1
dimx = self.dimq-self.dimeps if reduced_form else self.dimx
f = TEnKF(N=N, dim_x=dimx, dim_z=self.nobs, seed=seed, **fargs)
f.reduced_form = reduced_form
if R is not None:
f.R = R
# use lyapunov equation as default. Otherwise to be defined manually via `*.filter.p`
f.init_cov = None
try:
f.Q = self.QQ(self.ppar) @ self.QQ(self.ppar)
except AttributeError:
f.Q = self.fdict['QQ'] @ self.fdict['QQ']
self.filter = f
return f
def get_ll(self, **args):
return run_filter(self, smoother=False, get_ll=True, **args)
def run_filter(self, smoother=True, get_ll=False, init_cov=None, dispatch=None, rcond=1e-14, seed=None, verbose=False):
if verbose:
st = time.time()
self.Z = np.array(self.data)
dimp = self.dimp
if init_cov is not None:
self.filter.init_cov = init_cov
# assign current transition & observation functions (of parameters)
if self.filter.name == 'KalmanFilter':
pmat = self.precalc_mat[0]
qmat = self.precalc_mat[1]
F = np.vstack((pmat[1, 0][:, :-self.neps],
qmat[1, 0][:-self.neps, :-self.neps]))
F = np.pad(F, ((0, 0), (dimp, 0)))
self.filter.F = F
self.filter.H = np.hstack((self.hx[0], self.hx[1])), self.hx[2]
if self.filter.Q.shape[0] == self.neps:
E = np.vstack((pmat[1, 0][:, -self.neps:],
qmat[1, 0][:-self.neps, -self.neps:]))
self.filter.Q = E @ self.filter.Q @ E.T
if self.filter.init_cov is None:
p4 = sl.solve_discrete_lyapunov(F[dimp:,dimp:], self.filter.Q[dimp:,dimp:])
self.filter.P = F[:,dimp:] @ p4 @ F.T[dimp:] + self.filter.Q
else:
self.filter.P = self.filter.init_cov
elif dispatch or self.filter.name == 'ParticleFilter':
from .engine import func_dispatch
t_func_jit, o_func_jit, get_eps_jit = func_dispatch(self, full=True)
self.filter.t_func = t_func_jit
self.filter.o_func = o_func_jit
self.filter.get_eps = get_eps_jit
elif self.filter.reduced_form:
self.filter.t_func = lambda *x: self.t_func(*x, get_obs=True)
self.filter.o_func = None
if self.filter.init_cov is None:
qmat = self.precalc_mat[1]
F = qmat[1, 0][:-self.neps, :-self.neps]
E = qmat[1, 0][:-self.neps, -self.neps:]
Q = E @ self.filter.Q @ E.T
self.filter.P = sl.solve_discrete_lyapunov(F, Q)
else:
self.filter.P = self.filter.init_cov
else:
self.filter.t_func = self.t_func
self.filter.o_func = self.o_func
if self.filter.init_cov is None:
self.filter.P = get_p_init_lyapunov(self, self.filter.Q)
else:
self.filter.P = self.filter.init_cov
self.filter.get_eps = self.get_eps_lin
if self.filter.name == 'KalmanFilter':
means, covs, ll = self.filter.batch_filter(self.Z)
if smoother:
means, covs, _, _ = self.filter.rts_smoother(
means, covs, inv=lambda x: np.linalg.pinv(x, rcond=rcond))
if get_ll:
res = ll
else:
means = means
res = (means, covs)
elif self.filter.name == 'ParticleFilter':
res = self.filter.batch_filter(self.Z)
if smoother:
if verbose > 0:
print('[run_filter:]'.ljust(
15, ' ')+' Filtering done after %s seconds, starting smoothing...' % np.round(time.time()-st, 3))
if isinstance(smoother, bool):
smoother = 10
res = self.filter.smoother(smoother)
else:
res = self.filter.batch_filter(
self.Z, calc_ll=get_ll, store=smoother, seed=seed, verbose=verbose > 0)
if smoother:
res = self.filter.rts_smoother(res, rcond=rcond)
if get_ll:
if np.isnan(res):
res = -np.inf
self.ll = res
else:
self.X = res
if verbose > 0:
mess = '[run_filter:]'.ljust(
15, ' ')+' Filtering done in %s.' % timeprint(time.time()-st, 3)
if get_ll:
mess += 'Likelihood is %s.' % res
print(mess)
return res
def extract(self, sample=None, nsamples=1, init_cov=None, precalc=True, seed=0, nattemps=4, accept_failure=False, verbose=True, debug=False, l_max=None, k_max=None, **npasargs):
"""Extract the timeseries of (smoothed) shocks.
Parameters
----------
sample : array, optional
Provide one or several parameter vectors used for which the smoothed shocks are calculated (default is the current `self.par`)
nsamples : int, optional
Number of `npas`-draws for each element in `sample`. Defaults to 1
nattemps : int, optional
Number of attemps per sample to crunch the sample with a different seed. Defaults to 4
Returns
-------
tuple
The result(s)
"""
import tqdm
import os
from grgrlib.core import map2arr
if sample is None:
if type(self).__name__ == "DSGE_DUMMY":
sample = None
else:
sample = self.par
if np.ndim(sample) <= 1:
sample = [sample]
np.random.seed(seed)
fname = self.filter.name
verbose = max(verbose, debug)
if hasattr(self, 'pool'):
from .estimation import create_pool
create_pool(self)
if fname == 'ParticleFilter':
raise NotImplementedError
elif fname == 'KalmanFilter':
if nsamples > 1:
print('[extract:]'.ljust(
15, ' ')+' Setting `nsamples` to 1 as the linear filter does not rely on sampling.')
nsamples = 1
debug = not hasattr(self, 'debug') or self.debug
self.debug = True
else:
if self.filter.reduced_form:
self.create_filter(
R=self.filter.R, N=self.filter.N, reduced_form=False)
print('[extract:]'.ljust(
15, ' ')+' Extraction requires filter in non-reduced form. Recreating filter instance.')
npas = serializer(self.filter.npas)
self.debug |= debug
if sample[0] is not None:
set_par = serializer(self.set_par)
run_filter = serializer(self.run_filter)
t_func = serializer(self.t_func)
edim = len(self.shocks)
xdim = len(self.vv)
odim = len(self.observables)
obs_func = serializer(self.obs)
filter_get_eps = serializer(self.get_eps_lin)
dimeps = self.dimeps
dimp = self.dimp
seeds = np.random.randint(2**31, size=nsamples) # win explodes with 2**32
sample = [(x, y) for x in sample for y in seeds]
def runner(arg):
par, seed_loc = arg
if par is not None:
set_par(par, l_max=l_max, k_max=k_max)
res = run_filter(verbose=verbose > 2, seed=seed_loc, init_cov=init_cov)
if fname == 'KalmanFilter':
means, covs = res
res = means.copy()
resid = np.empty((means.shape[0]-1, dimeps))
for t, x in enumerate(means[1:]):
resid[t] = filter_get_eps(x, res[t])
res[t+1] = t_func(res[t], resid[t], linear=True)[0]
return par, res[0], resid, 0
np.random.shuffle(res)
sample = np.dstack((obs_func(res), res[..., dimp:]))
inits = res[:, 0, :]
def t_func_loc(states, eps):
(q, pobs), flag = t_func(states, eps, get_obs=True)
return np.hstack((pobs, q)), flag
for natt in range(nattemps):
try:
init, resid, flags = npas(func=t_func_loc, X=sample, init_states=inits, verbose=max(
len(sample) == 1, verbose-1), seed=seed_loc, nsamples=1, **npasargs)
return par, init, resid[0], flags
except Exception as e:
raised_error = e
if accept_failure:
print('[extract:]'.ljust(
15, ' ') + "got an error: '%s' (after %s unsuccessful attemps)." % (raised_error, natt+1))
return None
else:
import sys
raise type(raised_error)(str(raised_error) + ' (after %s unsuccessful attemps).' %
(natt+1)).with_traceback(sys.exc_info()[2])
wrap = tqdm.tqdm if (verbose and len(sample) >
1) else (lambda x, **kwarg: x)
res = wrap(self.mapper(runner, sample), unit=' sample(s)',
total=len(sample), dynamic_ncols=True)
pars, init, resid, flags = map2arr(res)
if hasattr(self, 'pool') and self.pool:
self.pool.close()
if fname == 'KalmanFilter':
self.debug = debug
if resid.shape[0] == 1:
resid[0] = pd.DataFrame(
resid[0], index=self.data.index[:-1], columns=self.shocks)
edict = {'pars': pars,
'init': init,
'resid': resid,
'flags': flags}
return edict
|
[
"econsieve.KalmanFilter",
"numpy.random.seed",
"numpy.diagflat",
"numpy.empty",
"numpy.isnan",
"numpy.random.randint",
"sys.exc_info",
"numpy.linalg.pinv",
"econsieve.TEnKF",
"numpy.pad",
"pandas.DataFrame",
"numpy.ndim",
"grgrlib.multiprocessing.serializer",
"numpy.var",
"numpy.random.shuffle",
"numpy.hstack",
"scipy.linalg.solve_discrete_lyapunov",
"numpy.vstack",
"time.time",
"numpy.array",
"grgrlib.core.map2arr"
] |
[((332, 351), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (340, 351), True, 'import numpy as np\n'), ((416, 436), 'numpy.diagflat', 'np.diagflat', (['sig_obs'], {}), '(sig_obs)\n', (427, 436), True, 'import numpy as np\n'), ((565, 642), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, :-self.neps], qmat[1, 0][:-self.neps, :-self.neps])'], {}), '((pmat[1, 0][:, :-self.neps], qmat[1, 0][:-self.neps, :-self.neps]))\n', (574, 642), True, 'import numpy as np\n'), ((671, 748), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:])'], {}), '((pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:]))\n', (680, 748), True, 'import numpy as np\n'), ((797, 868), 'scipy.linalg.solve_discrete_lyapunov', 'sl.solve_discrete_lyapunov', (['F[self.dimp:, :]', 'Q[self.dimp:, self.dimp:]'], {}), '(F[self.dimp:, :], Q[self.dimp:, self.dimp:])\n', (823, 868), True, 'import scipy.linalg as sl\n'), ((1524, 1543), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (1532, 1543), True, 'import numpy as np\n'), ((3080, 3099), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (3088, 3099), True, 'import numpy as np\n'), ((7601, 7621), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7615, 7621), True, 'import numpy as np\n'), ((8615, 8642), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.run_filter'], {}), '(self.run_filter)\n', (8625, 8642), False, 'from grgrlib.multiprocessing import serializer\n'), ((8656, 8679), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.t_func'], {}), '(self.t_func)\n', (8666, 8679), False, 'from grgrlib.multiprocessing import serializer\n'), ((8781, 8801), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.obs'], {}), '(self.obs)\n', (8791, 8801), False, 'from grgrlib.multiprocessing import serializer\n'), ((8823, 8851), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.get_eps_lin'], {}), '(self.get_eps_lin)\n', (8833, 8851), False, 'from grgrlib.multiprocessing import serializer\n'), ((8912, 8953), 'numpy.random.randint', 'np.random.randint', (['(2 ** 31)'], {'size': 'nsamples'}), '(2 ** 31, size=nsamples)\n', (8929, 8953), True, 'import numpy as np\n'), ((10912, 10924), 'grgrlib.core.map2arr', 'map2arr', (['res'], {}), '(res)\n', (10919, 10924), False, 'from grgrlib.core import map2arr\n'), ((366, 388), 'numpy.var', 'np.var', (['self.Z'], {'axis': '(0)'}), '(self.Z, axis=0)\n', (372, 388), True, 'import numpy as np\n'), ((1159, 1236), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:])'], {}), '((pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:]))\n', (1168, 1236), True, 'import numpy as np\n'), ((1361, 1385), 'numpy.linalg.pinv', 'np.linalg.pinv', (['E', 'rcond'], {}), '(E, rcond)\n', (1375, 1385), True, 'import numpy as np\n'), ((1758, 1804), 'econsieve.KalmanFilter', 'KalmanFilter', ([], {'dim_x': 'self.dimx', 'dim_z': 'self.nobs'}), '(dim_x=self.dimx, dim_z=self.nobs)\n', (1770, 1804), False, 'from econsieve import KalmanFilter, TEnKF\n'), ((3054, 3065), 'time.time', 'time.time', ([], {}), '()\n', (3063, 3065), False, 'import time\n'), ((3392, 3469), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, :-self.neps], qmat[1, 0][:-self.neps, :-self.neps])'], {}), '((pmat[1, 0][:, :-self.neps], qmat[1, 0][:-self.neps, :-self.neps]))\n', (3401, 3469), True, 'import numpy as np\n'), ((3505, 3535), 'numpy.pad', 'np.pad', (['F', '((0, 0), (dimp, 0))'], {}), '(F, ((0, 0), (dimp, 0)))\n', (3511, 3535), True, 'import numpy as np\n'), ((6279, 6292), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (6287, 6292), True, 'import numpy as np\n'), ((7548, 7563), 'numpy.ndim', 'np.ndim', (['sample'], {}), '(sample)\n', (7555, 7563), True, 'import numpy as np\n'), ((8572, 8596), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.set_par'], {}), '(self.set_par)\n', (8582, 8596), False, 'from grgrlib.multiprocessing import serializer\n'), ((9618, 9640), 'numpy.random.shuffle', 'np.random.shuffle', (['res'], {}), '(res)\n', (9635, 9640), True, 'import numpy as np\n'), ((11104, 11175), 'pandas.DataFrame', 'pd.DataFrame', (['resid[0]'], {'index': 'self.data.index[:-1]', 'columns': 'self.shocks'}), '(resid[0], index=self.data.index[:-1], columns=self.shocks)\n', (11116, 11175), True, 'import pandas as pd\n'), ((2384, 2443), 'econsieve.TEnKF', 'TEnKF', ([], {'N': 'N', 'dim_x': 'dimx', 'dim_z': 'self.nobs', 'seed': 'seed'}), '(N=N, dim_x=dimx, dim_z=self.nobs, seed=seed, **fargs)\n', (2389, 2443), False, 'from econsieve import KalmanFilter, TEnKF\n'), ((3587, 3622), 'numpy.hstack', 'np.hstack', (['(self.hx[0], self.hx[1])'], {}), '((self.hx[0], self.hx[1]))\n', (3596, 3622), True, 'import numpy as np\n'), ((3700, 3777), 'numpy.vstack', 'np.vstack', (['(pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:])'], {}), '((pmat[1, 0][:, -self.neps:], qmat[1, 0][:-self.neps, -self.neps:]))\n', (3709, 3777), True, 'import numpy as np\n'), ((3915, 3987), 'scipy.linalg.solve_discrete_lyapunov', 'sl.solve_discrete_lyapunov', (['F[dimp:, dimp:]', 'self.filter.Q[dimp:, dimp:]'], {}), '(F[dimp:, dimp:], self.filter.Q[dimp:, dimp:])\n', (3941, 3987), True, 'import scipy.linalg as sl\n'), ((8469, 8497), 'grgrlib.multiprocessing.serializer', 'serializer', (['self.filter.npas'], {}), '(self.filter.npas)\n', (8479, 8497), False, 'from grgrlib.multiprocessing import serializer\n'), ((9362, 9400), 'numpy.empty', 'np.empty', (['(means.shape[0] - 1, dimeps)'], {}), '((means.shape[0] - 1, dimeps))\n', (9370, 9400), True, 'import numpy as np\n'), ((9854, 9874), 'numpy.hstack', 'np.hstack', (['(pobs, q)'], {}), '((pobs, q))\n', (9863, 9874), True, 'import numpy as np\n'), ((4821, 4853), 'scipy.linalg.solve_discrete_lyapunov', 'sl.solve_discrete_lyapunov', (['F', 'Q'], {}), '(F, Q)\n', (4847, 4853), True, 'import scipy.linalg as sl\n'), ((10637, 10651), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (10649, 10651), False, 'import sys\n'), ((5456, 5486), 'numpy.linalg.pinv', 'np.linalg.pinv', (['x'], {'rcond': 'rcond'}), '(x, rcond=rcond)\n', (5470, 5486), True, 'import numpy as np\n'), ((6490, 6501), 'time.time', 'time.time', ([], {}), '()\n', (6499, 6501), False, 'import time\n'), ((5891, 5902), 'time.time', 'time.time', ([], {}), '()\n', (5900, 5902), False, 'import time\n')]
|
"""Construction of 2D data tables from ASCII files obtained from models/simulations
or other sources.
Currently supported are the files generated for the papers:
| Nuclear Physics Meets the Sources of the Ultra-High Energy Cosmic Rays
| <NAME>, <NAME>, <NAME>
| Sci.Rep. 7 (2017) 1, 4882
| e-Print: 1607.07989 [astro-ph.HE]
| DOI: 10.1038/s41598-017-05120-7
| A new view on Auger data and cosmogenic neutrinos in light of different nuclear disintegration and air-shower models
| <NAME>, <NAME>, <NAME>, <NAME>
| Astrophys.J. 873 (2019) 1, 88
| e-Print: 1901.03338 [astro-ph.HE]
| DOI: 10.3847/1538-4357/ab05ce
"""
from os import listdir
from os.path import join
import numpy as np
from six import with_metaclass
from prince_data_utils import resource_path
from prince.util import info
class CrossSectionsFromAscii(object):
"""Each class derived from this one is expected to load the
data from some form of source independently and provide at the
end definitions for the parameters:
self.energy_grid, self.mothers_daughters, self.fragment_yields,
self.inel_mothers, self.inelastic_cross_sctions.
Args:
f_root (str): The root name of the tabulated files, e.g. CRP2_TALYS_.
"""
def __init__(self, f_root= 'CRP2_TALYS_'):
self.energy_grid = None
self.inel_mothers = None
self.mothers_daughters = None
self.inelastic_cross_sctions = None
self.fragment_yields = None
self._load(f_root)
assert self.energy_grid is not None
assert self.inel_mothers is not None
assert self.mothers_daughters is not None
assert self.inelastic_cross_sctions is not None
assert self.fragment_yields is not None
self._check_consistency()
def _load(self, f_root):
"""Load cross section tables from files into memory.
Needs to be defined in derived classes."""
if not f_root.endswith('_'):
f_root += '_'
f_root = join(resource_path, 'photo-nuclear',f_root)
info(0, 'Loading files', f_root + '*')
self.energy_grid = np.loadtxt(f_root + 'egrid.dat.bz2')*1e-3 # to GeV
self._inel_cs_tables = np.loadtxt(f_root + 'nonel.dat.bz2')
self._inel_fragment_yields = np.loadtxt(f_root + 'incl_i_j.dat.bz2')
assert self.energy_grid.shape[0] == \
self._inel_cs_tables.shape[1] - 1 == \
self._inel_fragment_yields.shape[1] - 2, \
'Tables e-grids inconsistent {0} != {1} != {2}'.format(
self.energy_grid.shape[0], self._inel_cs_tables.shape[1] - 1,
self._inel_fragment_yields.shape[1] - 2)
# Chunk the tables into their contents
self.inel_mothers = self._inel_cs_tables[:,0].astype('int')
self.inelastic_cross_sctions = self._inel_cs_tables[:,1:]*1e-27 #mbarn -> cm2
self.mothers_daughters = self._inel_fragment_yields[:,0:2].astype('int')
self.fragment_yields = self._inel_fragment_yields[:,2:]*1e-27 #mbarn -> cm2
def _check_consistency(self):
"""Some cross checks for dimenstions and consistency between
inelastic cross sections and yields are performed."""
assert self.inel_mothers.shape[0] == self.inelastic_cross_sctions.shape[0]
assert self.energy_grid.shape[0] == self.inelastic_cross_sctions.shape[1]
assert self.mothers_daughters.shape[0] == self.fragment_yields.shape[0]
assert self.energy_grid.shape[0] == self.fragment_yields.shape[1]
class PhotoMesonCSFromPickle(CrossSectionsFromAscii):
def _load(self, f_root_):
"""Load from pickled dictionaries"""
f_root = join(resource_path, 'photo-meson',f_root_)
info(0, 'Loading files', f_root + '*')
raw_csec = np.load(f_root + 'crosssec.npy',allow_pickle=True)
energy, csec_proton, csec_neutron = raw_csec
csec = np.load(f_root + 'redistribution_logbins.npy',allow_pickle=True)
energy_redist, xbins, redist_proton, redist_neutron = csec
# sophia crossections are in mubarn; convert here to cm^2
csec_proton, csec_neutron = csec_proton * 1e-30, csec_neutron * 1e-30
daughters_proton = np.array(list(redist_proton.keys()))
fragments_proton = np.array(list(redist_proton.values()))
daughters_neutron = np.array(list(redist_neutron.keys()))
fragments_neutron = np.array(list(redist_neutron.values()))
assert np.all(energy == energy_redist)
assert xbins.shape[0]-1 == fragments_proton.shape[-1] == fragments_neutron.shape[-1]
self.energy_grid = energy
self.xbins = xbins
self.inel_mothers = np.array([101, 100])
self.inelastic_cross_sctions = np.stack([csec_proton, csec_neutron])
channels_proton = np.stack(
[np.full(daughters_proton.shape, 101), daughters_proton],axis=1)
channels_neutron = np.stack(
[np.full(daughters_neutron.shape, 100), daughters_neutron],axis=1)
self.mothers_daughters = np.concatenate(
[channels_proton, channels_neutron])
# Fragments in raw data are in dn/dx, but we need dsigma/dx = dn/dx * sigma
self.fragment_yields = np.concatenate([
fragments_proton * csec_proton[None,:,None],
fragments_neutron * csec_neutron[None,:,None]
])
|
[
"numpy.stack",
"numpy.full",
"numpy.load",
"numpy.concatenate",
"prince.util.info",
"numpy.array",
"numpy.loadtxt",
"os.path.join",
"numpy.all"
] |
[((2030, 2074), 'os.path.join', 'join', (['resource_path', '"""photo-nuclear"""', 'f_root'], {}), "(resource_path, 'photo-nuclear', f_root)\n", (2034, 2074), False, 'from os.path import join\n'), ((2082, 2120), 'prince.util.info', 'info', (['(0)', '"""Loading files"""', "(f_root + '*')"], {}), "(0, 'Loading files', f_root + '*')\n", (2086, 2120), False, 'from prince.util import info\n'), ((2230, 2266), 'numpy.loadtxt', 'np.loadtxt', (["(f_root + 'nonel.dat.bz2')"], {}), "(f_root + 'nonel.dat.bz2')\n", (2240, 2266), True, 'import numpy as np\n'), ((2304, 2343), 'numpy.loadtxt', 'np.loadtxt', (["(f_root + 'incl_i_j.dat.bz2')"], {}), "(f_root + 'incl_i_j.dat.bz2')\n", (2314, 2343), True, 'import numpy as np\n'), ((3720, 3763), 'os.path.join', 'join', (['resource_path', '"""photo-meson"""', 'f_root_'], {}), "(resource_path, 'photo-meson', f_root_)\n", (3724, 3763), False, 'from os.path import join\n'), ((3771, 3809), 'prince.util.info', 'info', (['(0)', '"""Loading files"""', "(f_root + '*')"], {}), "(0, 'Loading files', f_root + '*')\n", (3775, 3809), False, 'from prince.util import info\n'), ((3829, 3880), 'numpy.load', 'np.load', (["(f_root + 'crosssec.npy')"], {'allow_pickle': '(True)'}), "(f_root + 'crosssec.npy', allow_pickle=True)\n", (3836, 3880), True, 'import numpy as np\n'), ((3948, 4013), 'numpy.load', 'np.load', (["(f_root + 'redistribution_logbins.npy')"], {'allow_pickle': '(True)'}), "(f_root + 'redistribution_logbins.npy', allow_pickle=True)\n", (3955, 4013), True, 'import numpy as np\n'), ((4506, 4537), 'numpy.all', 'np.all', (['(energy == energy_redist)'], {}), '(energy == energy_redist)\n', (4512, 4537), True, 'import numpy as np\n'), ((4721, 4741), 'numpy.array', 'np.array', (['[101, 100]'], {}), '([101, 100])\n', (4729, 4741), True, 'import numpy as np\n'), ((4781, 4818), 'numpy.stack', 'np.stack', (['[csec_proton, csec_neutron]'], {}), '([csec_proton, csec_neutron])\n', (4789, 4818), True, 'import numpy as np\n'), ((5081, 5132), 'numpy.concatenate', 'np.concatenate', (['[channels_proton, channels_neutron]'], {}), '([channels_proton, channels_neutron])\n', (5095, 5132), True, 'import numpy as np\n'), ((5261, 5378), 'numpy.concatenate', 'np.concatenate', (['[fragments_proton * csec_proton[None, :, None], fragments_neutron *\n csec_neutron[None, :, None]]'], {}), '([fragments_proton * csec_proton[None, :, None], \n fragments_neutron * csec_neutron[None, :, None]])\n', (5275, 5378), True, 'import numpy as np\n'), ((2148, 2184), 'numpy.loadtxt', 'np.loadtxt', (["(f_root + 'egrid.dat.bz2')"], {}), "(f_root + 'egrid.dat.bz2')\n", (2158, 2184), True, 'import numpy as np\n'), ((4868, 4904), 'numpy.full', 'np.full', (['daughters_proton.shape', '(101)'], {}), '(daughters_proton.shape, 101)\n', (4875, 4904), True, 'import numpy as np\n'), ((4982, 5019), 'numpy.full', 'np.full', (['daughters_neutron.shape', '(100)'], {}), '(daughters_neutron.shape, 100)\n', (4989, 5019), True, 'import numpy as np\n')]
|
import os
from mushroom_rl.utils.preprocessors import MinMaxPreprocessor
from mushroom_rl.utils.callbacks import PlotDataset
import numpy as np
from mushroom_rl.algorithms.policy_search import REINFORCE
from mushroom_rl.approximators.parametric import LinearApproximator
from mushroom_rl.approximators.regressor import Regressor
from mushroom_rl.core import Core, Logger
from mushroom_rl.environments import LQR
from mushroom_rl.policy import StateStdGaussianPolicy
from mushroom_rl.utils.dataset import compute_J
from mushroom_rl.utils.optimizers import AdaptiveOptimizer
from tqdm import tqdm
"""
This script shows how to use preprocessors and plot callback.
"""
tqdm.monitor_interval = 0
def experiment(n_epochs, n_iterations, ep_per_run, save_states_to_disk):
np.random.seed()
logger = Logger('plot_and_norm_example', results_dir=None)
logger.strong_line()
logger.info('Plotting and normalization example')
# MDP
mdp = LQR.generate(dimensions=2, max_pos=10., max_action=5., episodic=True)
approximator = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape)
sigma = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape)
sigma_weights = 2 * np.ones(sigma.weights_size)
sigma.set_weights(sigma_weights)
policy = StateStdGaussianPolicy(approximator, sigma)
# Agent
optimizer = AdaptiveOptimizer(eps=.01)
algorithm_params = dict(optimizer=optimizer)
agent = REINFORCE(mdp.info, policy, **algorithm_params)
# normalization callback
prepro = MinMaxPreprocessor(mdp_info=mdp.info)
# plotting callback
plotter = PlotDataset(mdp.info, obs_normalized=True)
# Train
core = Core(agent, mdp, callback_step=plotter, preprocessors=[prepro])
# training loop
for n in range(n_epochs):
core.learn(n_episodes=n_iterations * ep_per_run,
n_episodes_per_fit=ep_per_run)
dataset = core.evaluate(n_episodes=ep_per_run, render=False)
J = np.mean(compute_J(dataset,mdp.info.gamma))
logger.epoch_info(n+1, J=J)
if save_states_to_disk:
# save normalization / plot states to disk path
logger.info('Saving plotting and normalization data')
os.makedirs("./logs/plot_and_norm", exist_ok=True)
prepro.save("./logs/plot_and_norm/preprocessor.msh")
plotter.save_state("./logs/plot_and_norm/plotting_state")
# load states from disk path
logger.info('Loading preprocessor and plotter')
prerpo = MinMaxPreprocessor.load("./logs/plot_and_norm/preprocessor.msh")
plotter.load_state("./logs/plot_and_norm/plotting_state")
if __name__ == '__main__':
experiment(n_epochs=10, n_iterations=10, ep_per_run=100,
save_states_to_disk=False)
|
[
"mushroom_rl.utils.dataset.compute_J",
"mushroom_rl.core.Logger",
"numpy.random.seed",
"mushroom_rl.environments.LQR.generate",
"mushroom_rl.algorithms.policy_search.REINFORCE",
"os.makedirs",
"mushroom_rl.utils.preprocessors.MinMaxPreprocessor.load",
"numpy.ones",
"mushroom_rl.utils.preprocessors.MinMaxPreprocessor",
"mushroom_rl.core.Core",
"mushroom_rl.utils.callbacks.PlotDataset",
"mushroom_rl.policy.StateStdGaussianPolicy",
"mushroom_rl.approximators.regressor.Regressor",
"mushroom_rl.utils.optimizers.AdaptiveOptimizer"
] |
[((778, 794), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (792, 794), True, 'import numpy as np\n'), ((809, 858), 'mushroom_rl.core.Logger', 'Logger', (['"""plot_and_norm_example"""'], {'results_dir': 'None'}), "('plot_and_norm_example', results_dir=None)\n", (815, 858), False, 'from mushroom_rl.core import Core, Logger\n'), ((959, 1030), 'mushroom_rl.environments.LQR.generate', 'LQR.generate', ([], {'dimensions': '(2)', 'max_pos': '(10.0)', 'max_action': '(5.0)', 'episodic': '(True)'}), '(dimensions=2, max_pos=10.0, max_action=5.0, episodic=True)\n', (971, 1030), False, 'from mushroom_rl.environments import LQR\n'), ((1049, 1170), 'mushroom_rl.approximators.regressor.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'mdp.info.observation_space.shape', 'output_shape': 'mdp.info.action_space.shape'}), '(LinearApproximator, input_shape=mdp.info.observation_space.shape,\n output_shape=mdp.info.action_space.shape)\n', (1058, 1170), False, 'from mushroom_rl.approximators.regressor import Regressor\n'), ((1238, 1359), 'mushroom_rl.approximators.regressor.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'mdp.info.observation_space.shape', 'output_shape': 'mdp.info.action_space.shape'}), '(LinearApproximator, input_shape=mdp.info.observation_space.shape,\n output_shape=mdp.info.action_space.shape)\n', (1247, 1359), False, 'from mushroom_rl.approximators.regressor import Regressor\n'), ((1504, 1547), 'mushroom_rl.policy.StateStdGaussianPolicy', 'StateStdGaussianPolicy', (['approximator', 'sigma'], {}), '(approximator, sigma)\n', (1526, 1547), False, 'from mushroom_rl.policy import StateStdGaussianPolicy\n'), ((1577, 1604), 'mushroom_rl.utils.optimizers.AdaptiveOptimizer', 'AdaptiveOptimizer', ([], {'eps': '(0.01)'}), '(eps=0.01)\n', (1594, 1604), False, 'from mushroom_rl.utils.optimizers import AdaptiveOptimizer\n'), ((1665, 1712), 'mushroom_rl.algorithms.policy_search.REINFORCE', 'REINFORCE', (['mdp.info', 'policy'], {}), '(mdp.info, policy, **algorithm_params)\n', (1674, 1712), False, 'from mushroom_rl.algorithms.policy_search import REINFORCE\n'), ((1756, 1793), 'mushroom_rl.utils.preprocessors.MinMaxPreprocessor', 'MinMaxPreprocessor', ([], {'mdp_info': 'mdp.info'}), '(mdp_info=mdp.info)\n', (1774, 1793), False, 'from mushroom_rl.utils.preprocessors import MinMaxPreprocessor\n'), ((1833, 1875), 'mushroom_rl.utils.callbacks.PlotDataset', 'PlotDataset', (['mdp.info'], {'obs_normalized': '(True)'}), '(mdp.info, obs_normalized=True)\n', (1844, 1875), False, 'from mushroom_rl.utils.callbacks import PlotDataset\n'), ((1900, 1963), 'mushroom_rl.core.Core', 'Core', (['agent', 'mdp'], {'callback_step': 'plotter', 'preprocessors': '[prepro]'}), '(agent, mdp, callback_step=plotter, preprocessors=[prepro])\n', (1904, 1963), False, 'from mushroom_rl.core import Core, Logger\n'), ((1425, 1452), 'numpy.ones', 'np.ones', (['sigma.weights_size'], {}), '(sigma.weights_size)\n', (1432, 1452), True, 'import numpy as np\n'), ((2437, 2487), 'os.makedirs', 'os.makedirs', (['"""./logs/plot_and_norm"""'], {'exist_ok': '(True)'}), "('./logs/plot_and_norm', exist_ok=True)\n", (2448, 2487), False, 'import os\n'), ((2726, 2790), 'mushroom_rl.utils.preprocessors.MinMaxPreprocessor.load', 'MinMaxPreprocessor.load', (['"""./logs/plot_and_norm/preprocessor.msh"""'], {}), "('./logs/plot_and_norm/preprocessor.msh')\n", (2749, 2790), False, 'from mushroom_rl.utils.preprocessors import MinMaxPreprocessor\n'), ((2211, 2245), 'mushroom_rl.utils.dataset.compute_J', 'compute_J', (['dataset', 'mdp.info.gamma'], {}), '(dataset, mdp.info.gamma)\n', (2220, 2245), False, 'from mushroom_rl.utils.dataset import compute_J\n')]
|
"""
Authors: <NAME>.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tensorflow as tf
import numpy as np
import pytest
import sys
import os
# Athos DIR
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
from tests.utils import Config, Compiler, assert_almost_equal
@pytest.mark.parametrize(
"a_shape, out_shape",
[
([2, 3], [6]),
([6], [2, 3]),
([2, 3], [3, 2]),
([2, 3], [-1]), # Flatten 1-D,
([1], []), # convert to scalar,
([3, 2, 3], [2, -1]), # infer -1 as 9,
([3, 2, 3], [-1, 9]), # infer -1 as 2
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_reshape(test_dir, backend, a_shape, out_shape, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.reshape(a, out_shape, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
assert expected_output is not None
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, perm",
[([2, 3], [1, 0]), ([2, 4, 3], [0, 2, 1])], # normal transpose, with perm
)
@pytest.mark.parametrize("dtype", [np.single])
def test_transpose(test_dir, backend, a_shape, perm, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.transpose(a, perm, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, num_or_size_splits, axis",
[
([2, 10], 5, 1),
pytest.param(
[5, 7],
[1, 4, 2],
1,
marks=pytest.mark.skip(
reason="[split] don't support split into specific sizes (SplitV)"
),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_split(test_dir, backend, a_shape, num_or_size_splits, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.split(a, num_or_size_splits, axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
if type(output) == list:
tf_output = output[-1]
tf_expected_output = expected_output[-1]
else:
tf_output = output
tf_expected_output = expected_output
config = TFConfig(backend).add_input(a).add_output(tf_output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=tf_expected_output, mpc_tensor=mpc_output, precision=2
)
return
# Squeeze
@pytest.mark.parametrize(
"a_shape, axis",
[
pytest.param(
[1, 2, 1, 3, 1, 1],
None,
marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported"),
),
pytest.param(
[1, 2, 1, 3, 1, 1],
[2, 4],
marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_squeeze(test_dir, backend, a_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.squeeze(a, axis=axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, begin, size",
[
([3, 2, 3], [1, 0, 0], [1, 1, 3]),
([3, 2, 3], [1, 0, 0], [1, 2, 3]),
([3, 2, 3], [1, 0, 0], [2, 1, 3]),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_slice(test_dir, backend, a_shape, begin, size, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.slice(a, begin, size, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, b_shape, axis",
[
([2, 3], [3, 3], 0),
([2, 3, 2, 1], [2, 6, 2, 1], 1),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_concat(test_dir, backend, a_shape, b_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
b_inp = dtype(np.random.randn(*b_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
b = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=b_inp.shape, name="b")
output = tf.concat([a, b], axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp, b: b_inp})
config = TFConfig(backend).add_input(a).add_input(b).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp, b_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
# ExpandDims
@pytest.mark.parametrize(
"a_shape, axis",
[
pytest.param(
[3, 2, 3], 1, marks=pytest.mark.skip(reason="[expand_dims] not supported")
),
pytest.param(
[2, 5], 0, marks=pytest.mark.skip(reason="[expand_dims] not supported")
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_expand_dims(test_dir, backend, a_shape, axis, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
output = tf.expand_dims(a, axis, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
# Pad
@pytest.mark.parametrize(
"a_shape, paddings, mode, constant_values",
[
([1, 2, 2, 1], [[1, 1], [1, 2], [1, 1], [1, 3]], "CONSTANT", 0),
pytest.param(
[1, 2, 2, 1],
[[1, 1], [1, 2], [1, 1], [1, 3]],
"REFLECT",
0,
marks=pytest.mark.skip(reason="[pad] REFLECT not supported"),
),
pytest.param(
[1, 2, 2, 1],
[[1, 1], [1, 2], [1, 1], [1, 3]],
"SYMMETRIC",
0,
marks=pytest.mark.skip(reason="[pad] SYMMETRIC not supported"),
),
pytest.param(
[2, 3],
[
[1, 1],
[2, 2],
],
"CONSTANT",
0,
marks=pytest.mark.skip(reason="[pad] Generic pad not supported"),
),
pytest.param(
[1, 2, 2, 1],
[[1, 1], [1, 2], [1, 1], [1, 3]],
"CONSTANT",
1.2,
marks=pytest.mark.skip(reason="[pad] non-zero padding not supported"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_pad(test_dir, backend, a_shape, paddings, mode, constant_values, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
pad = tf.constant(paddings, name="paddings")
output = tf.pad(
a, pad, mode=mode, constant_values=constant_values, name="output"
)
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
# Tile
@pytest.mark.parametrize(
"a_shape, multiples", [([2, 3], [1, 2]), ([2, 3], [2, 1]), ([2, 3], [2, 2])]
)
@pytest.mark.parametrize("dtype", [np.single])
@pytest.mark.skip(reason="[tile] Not supported")
def test_tile(test_dir, backend, a_shape, multiples, dtype):
graph = tf.Graph()
a_inp = dtype(np.random.randn(*a_shape))
with graph.as_default():
a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
mults = tf.constant(multiples, name="multiples")
output = tf.tile(a, mults, name="output")
with tf.compat.v1.Session(graph=graph) as sess:
expected_output = sess.run(output, feed_dict={a: a_inp})
config = TFConfig(backend).add_input(a).add_output(output)
compiler = Compiler(graph, config, test_dir)
mpc_output = compiler.compile_and_run([a_inp])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
|
[
"tensorflow.reshape",
"pytest.mark.parametrize",
"tests.utils.Compiler",
"tensorflow.split",
"pytest.mark.skip",
"numpy.random.randn",
"os.path.dirname",
"tensorflow.pad",
"tensorflow.concat",
"tests.utils.assert_almost_equal",
"tensorflow.compat.v1.Session",
"tensorflow.squeeze",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.tile",
"tensorflow.Graph",
"tensorflow.expand_dims",
"tensorflow.as_dtype",
"tensorflow.slice"
] |
[((1331, 1505), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, out_shape"""', '[([2, 3], [6]), ([6], [2, 3]), ([2, 3], [3, 2]), ([2, 3], [-1]), ([1], []),\n ([3, 2, 3], [2, -1]), ([3, 2, 3], [-1, 9])]'], {}), "('a_shape, out_shape', [([2, 3], [6]), ([6], [2, 3]),\n ([2, 3], [3, 2]), ([2, 3], [-1]), ([1], []), ([3, 2, 3], [2, -1]), ([3,\n 2, 3], [-1, 9])])\n", (1354, 1505), False, 'import pytest\n'), ((1646, 1691), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (1669, 1691), False, 'import pytest\n'), ((2433, 2521), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, perm"""', '[([2, 3], [1, 0]), ([2, 4, 3], [0, 2, 1])]'], {}), "('a_shape, perm', [([2, 3], [1, 0]), ([2, 4, 3], [0,\n 2, 1])])\n", (2456, 2521), False, 'import pytest\n'), ((2561, 2606), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (2584, 2606), False, 'import pytest\n'), ((3635, 3680), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (3658, 3680), False, 'import pytest\n'), ((5025, 5070), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (5048, 5070), False, 'import pytest\n'), ((5769, 5931), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, begin, size"""', '[([3, 2, 3], [1, 0, 0], [1, 1, 3]), ([3, 2, 3], [1, 0, 0], [1, 2, 3]), ([3,\n 2, 3], [1, 0, 0], [2, 1, 3])]'], {}), "('a_shape, begin, size', [([3, 2, 3], [1, 0, 0], [1,\n 1, 3]), ([3, 2, 3], [1, 0, 0], [1, 2, 3]), ([3, 2, 3], [1, 0, 0], [2, 1,\n 3])])\n", (5792, 5931), False, 'import pytest\n'), ((5967, 6012), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (5990, 6012), False, 'import pytest\n'), ((6716, 6825), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, b_shape, axis"""', '[([2, 3], [3, 3], 0), ([2, 3, 2, 1], [2, 6, 2, 1], 1)]'], {}), "('a_shape, b_shape, axis', [([2, 3], [3, 3], 0), ([2,\n 3, 2, 1], [2, 6, 2, 1], 1)])\n", (6739, 6825), False, 'import pytest\n'), ((6857, 6902), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (6880, 6902), False, 'import pytest\n'), ((8081, 8126), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (8104, 8126), False, 'import pytest\n'), ((9910, 9955), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (9933, 9955), False, 'import pytest\n'), ((10793, 10899), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a_shape, multiples"""', '[([2, 3], [1, 2]), ([2, 3], [2, 1]), ([2, 3], [2, 2])]'], {}), "('a_shape, multiples', [([2, 3], [1, 2]), ([2, 3], [\n 2, 1]), ([2, 3], [2, 2])])\n", (10816, 10899), False, 'import pytest\n'), ((10902, 10947), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.single]'], {}), "('dtype', [np.single])\n", (10925, 10947), False, 'import pytest\n'), ((10949, 10996), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[tile] Not supported"""'}), "(reason='[tile] Not supported')\n", (10965, 10996), False, 'import pytest\n'), ((1768, 1778), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1776, 1778), True, 'import tensorflow as tf\n'), ((2230, 2263), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (2238, 2263), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((2319, 2408), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (2338, 2408), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((2680, 2690), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2688, 2690), True, 'import tensorflow as tf\n'), ((3101, 3134), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (3109, 3134), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((3190, 3279), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (3209, 3279), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((3770, 3780), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3778, 3780), True, 'import tensorflow as tf\n'), ((4401, 4434), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (4409, 4434), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((4490, 4582), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'tf_expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=tf_expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (4509, 4582), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((5142, 5152), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5150, 5152), True, 'import tensorflow as tf\n'), ((5566, 5599), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (5574, 5599), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((5655, 5744), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (5674, 5744), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((6089, 6099), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6097, 6099), True, 'import tensorflow as tf\n'), ((6513, 6546), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (6521, 6546), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((6602, 6691), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (6621, 6691), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((6982, 6992), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6990, 6992), True, 'import tensorflow as tf\n'), ((7559, 7592), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (7567, 7592), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((7655, 7744), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (7674, 7744), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((8202, 8212), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8210, 8212), True, 'import tensorflow as tf\n'), ((8625, 8658), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (8633, 8658), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((8714, 8803), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (8733, 8803), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((10050, 10060), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10058, 10060), True, 'import tensorflow as tf\n'), ((10583, 10616), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (10591, 10616), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((10672, 10761), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (10691, 10761), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((11070, 11080), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (11078, 11080), True, 'import tensorflow as tf\n'), ((11544, 11577), 'tests.utils.Compiler', 'Compiler', (['graph', 'config', 'test_dir'], {}), '(graph, config, test_dir)\n', (11552, 11577), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((11633, 11722), 'tests.utils.assert_almost_equal', 'assert_almost_equal', ([], {'model_output': 'expected_output', 'mpc_tensor': 'mpc_output', 'precision': '(2)'}), '(model_output=expected_output, mpc_tensor=mpc_output,\n precision=2)\n', (11652, 11722), False, 'from tests.utils import Config, Compiler, assert_almost_equal\n'), ((1220, 1245), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1235, 1245), False, 'import os\n'), ((1797, 1822), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (1812, 1822), True, 'import numpy as np\n'), ((1956, 1995), 'tensorflow.reshape', 'tf.reshape', (['a', 'out_shape'], {'name': '"""output"""'}), "(a, out_shape, name='output')\n", (1966, 1995), True, 'import tensorflow as tf\n'), ((2005, 2038), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (2025, 2038), True, 'import tensorflow as tf\n'), ((2709, 2734), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (2724, 2734), True, 'import numpy as np\n'), ((2868, 2904), 'tensorflow.transpose', 'tf.transpose', (['a', 'perm'], {'name': '"""output"""'}), "(a, perm, name='output')\n", (2880, 2904), True, 'import tensorflow as tf\n'), ((2914, 2947), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (2934, 2947), True, 'import tensorflow as tf\n'), ((3799, 3824), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (3814, 3824), True, 'import numpy as np\n'), ((3958, 4010), 'tensorflow.split', 'tf.split', (['a', 'num_or_size_splits', 'axis'], {'name': '"""output"""'}), "(a, num_or_size_splits, axis, name='output')\n", (3966, 4010), True, 'import tensorflow as tf\n'), ((4020, 4053), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (4040, 4053), True, 'import tensorflow as tf\n'), ((5171, 5196), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (5186, 5196), True, 'import numpy as np\n'), ((5330, 5369), 'tensorflow.squeeze', 'tf.squeeze', (['a'], {'axis': 'axis', 'name': '"""output"""'}), "(a, axis=axis, name='output')\n", (5340, 5369), True, 'import tensorflow as tf\n'), ((5379, 5412), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (5399, 5412), True, 'import tensorflow as tf\n'), ((6118, 6143), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (6133, 6143), True, 'import numpy as np\n'), ((6277, 6316), 'tensorflow.slice', 'tf.slice', (['a', 'begin', 'size'], {'name': '"""output"""'}), "(a, begin, size, name='output')\n", (6285, 6316), True, 'import tensorflow as tf\n'), ((6326, 6359), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (6346, 6359), True, 'import tensorflow as tf\n'), ((7011, 7036), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (7026, 7036), True, 'import numpy as np\n'), ((7056, 7081), 'numpy.random.randn', 'np.random.randn', (['*b_shape'], {}), '(*b_shape)\n', (7071, 7081), True, 'import numpy as np\n'), ((7301, 7339), 'tensorflow.concat', 'tf.concat', (['[a, b]', 'axis'], {'name': '"""output"""'}), "([a, b], axis, name='output')\n", (7310, 7339), True, 'import tensorflow as tf\n'), ((7349, 7382), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (7369, 7382), True, 'import tensorflow as tf\n'), ((8231, 8256), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (8246, 8256), True, 'import numpy as np\n'), ((8390, 8428), 'tensorflow.expand_dims', 'tf.expand_dims', (['a', 'axis'], {'name': '"""output"""'}), "(a, axis, name='output')\n", (8404, 8428), True, 'import tensorflow as tf\n'), ((8438, 8471), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (8458, 8471), True, 'import tensorflow as tf\n'), ((10079, 10104), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (10094, 10104), True, 'import numpy as np\n'), ((10235, 10273), 'tensorflow.constant', 'tf.constant', (['paddings'], {'name': '"""paddings"""'}), "(paddings, name='paddings')\n", (10246, 10273), True, 'import tensorflow as tf\n'), ((10291, 10364), 'tensorflow.pad', 'tf.pad', (['a', 'pad'], {'mode': 'mode', 'constant_values': 'constant_values', 'name': '"""output"""'}), "(a, pad, mode=mode, constant_values=constant_values, name='output')\n", (10297, 10364), True, 'import tensorflow as tf\n'), ((10396, 10429), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (10416, 10429), True, 'import tensorflow as tf\n'), ((11099, 11124), 'numpy.random.randn', 'np.random.randn', (['*a_shape'], {}), '(*a_shape)\n', (11114, 11124), True, 'import numpy as np\n'), ((11257, 11297), 'tensorflow.constant', 'tf.constant', (['multiples'], {'name': '"""multiples"""'}), "(multiples, name='multiples')\n", (11268, 11297), True, 'import tensorflow as tf\n'), ((11315, 11347), 'tensorflow.tile', 'tf.tile', (['a', 'mults'], {'name': '"""output"""'}), "(a, mults, name='output')\n", (11322, 11347), True, 'import tensorflow as tf\n'), ((11357, 11390), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (11377, 11390), True, 'import tensorflow as tf\n'), ((1890, 1908), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (1901, 1908), True, 'import tensorflow as tf\n'), ((2802, 2820), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (2813, 2820), True, 'import tensorflow as tf\n'), ((3892, 3910), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (3903, 3910), True, 'import tensorflow as tf\n'), ((5264, 5282), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (5275, 5282), True, 'import tensorflow as tf\n'), ((6211, 6229), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (6222, 6229), True, 'import tensorflow as tf\n'), ((7149, 7167), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (7160, 7167), True, 'import tensorflow as tf\n'), ((7235, 7253), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (7246, 7253), True, 'import tensorflow as tf\n'), ((8324, 8342), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (8335, 8342), True, 'import tensorflow as tf\n'), ((10172, 10190), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (10183, 10190), True, 'import tensorflow as tf\n'), ((11192, 11210), 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), '(dtype)\n', (11203, 11210), True, 'import tensorflow as tf\n'), ((3499, 3587), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[split] don\'t support split into specific sizes (SplitV)"""'}), '(reason=\n "[split] don\'t support split into specific sizes (SplitV)")\n', (3515, 3587), False, 'import pytest\n'), ((4759, 4828), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[squeeze] Parametric squeeze not supported"""'}), "(reason='[squeeze] Parametric squeeze not supported')\n", (4775, 4828), False, 'import pytest\n'), ((4933, 5002), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[squeeze] Parametric squeeze not supported"""'}), "(reason='[squeeze] Parametric squeeze not supported')\n", (4949, 5002), False, 'import pytest\n'), ((7888, 7942), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[expand_dims] not supported"""'}), "(reason='[expand_dims] not supported')\n", (7904, 7942), False, 'import pytest\n'), ((8005, 8059), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[expand_dims] not supported"""'}), "(reason='[expand_dims] not supported')\n", (8021, 8059), False, 'import pytest\n'), ((9136, 9190), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[pad] REFLECT not supported"""'}), "(reason='[pad] REFLECT not supported')\n", (9152, 9190), False, 'import pytest\n'), ((9355, 9411), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[pad] SYMMETRIC not supported"""'}), "(reason='[pad] SYMMETRIC not supported')\n", (9371, 9411), False, 'import pytest\n'), ((9600, 9658), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[pad] Generic pad not supported"""'}), "(reason='[pad] Generic pad not supported')\n", (9616, 9658), False, 'import pytest\n'), ((9824, 9887), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""[pad] non-zero padding not supported"""'}), "(reason='[pad] non-zero padding not supported')\n", (9840, 9887), False, 'import pytest\n')]
|
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.base.datetools import dates_from_str
import datetime
import time
from sklearn import svm
import json
feature_file_cases = '/Users/edwardgent/Downloads/NYT_US_COVID19.csv'
feature_file_tests = '/Users/edwardgent/Downloads/CT_US_COVID_TESTS.csv'
results_file = '/Users/edwardgent/Downloads/COVID-19_Hospital_Capacity_Metrics.csv'
test_threshold = 0.7
dataf = pd.DataFrame(columns=['New Cases', 'New Deaths', 'New Tests', 'Ventilators', 'ICU'])
chicago_cases = []
chicago_deaths = []
chicago_dates = []
chicago_ventilators = []
chicago_icu = []
chicago_tests = []
with open(feature_file_cases, newline='') as fh:
spamreader = csv.reader(fh, delimiter=',', quotechar='|')
for item in spamreader:
if item[2] == 'Illinois':
chicago_cases.append(item[8])
chicago_deaths.append(item[9])
chicago_dates.append(item[0])
fh.close()
chicago_dates = dates_from_str(chicago_dates)
dataf['New Cases'] = chicago_cases
dataf['New Deaths'] = chicago_deaths
dataf.index = pd.DatetimeIndex(chicago_dates)
temp_dates = []
with open(feature_file_tests, newline='') as fh:
spamreader = csv.reader(fh, delimiter=',', quotechar='|')
for item in spamreader:
if item[1] == 'Illinois':
chicago_tests.append(item[14])
temp_dates.append(item[2].split(' ')[0])
fh.close()
temp_dates = dates_from_str(temp_dates)
temp_df = pd.DataFrame(columns=['New Tests'])
temp_df['New Tests'] = chicago_tests
temp_df.index =pd.DatetimeIndex(temp_dates)
dataf = dataf.join(temp_df, lsuffix='_caller', rsuffix='_other')
dataf = dataf[~dataf.index.duplicated(keep='first')]
temp_dates = []
with open(results_file, newline='') as fh:
spamreader = csv.reader(fh, delimiter=',', quotechar='|')
for item in spamreader:
if item[0] == 'Date':
pass
else:
chicago_icu.append(item[14])
chicago_ventilators.append(item[1])
temp_dates.append(item[0].replace('/','-'))
fh.close()
temp_dates = dates_from_str(temp_dates)
temp_df = pd.DataFrame(columns=['Ventilators', 'ICU'])
temp_df['Ventilators'] = chicago_ventilators
temp_df['ICU'] = chicago_icu
temp_df.index = pd.DatetimeIndex(temp_dates)
dataf = dataf.join(temp_df, lsuffix='_caller', rsuffix='_other')
dataf = dataf[~dataf.index.duplicated(keep='first')]
def generate_model(dataf, train_col, train_features):
model = svm.SVC(probability=False, decision_function_shape='ovr', kernel='linear')
target_length = len(dataf['New Cases'])
target_length = int(test_threshold * target_length)
'''
cols_cases = dataf['New Cases']
cols_cases = cols_cases.tolist()[0:target_length]
cols_deaths = dataf['New Deaths']
cols_deaths = cols_deaths.tolist()[0:target_length]
cols_tests = dataf['New Tests_other']
cols_tests = cols_tests.tolist()[0:target_length]
feature_set = zip(cols_cases,cols_deaths,cols_tests)
feature_set = np.array(list(feature_set))
feature_set.reshape(133,3)
train_res = train_res.reshape(133,211)
train_res = train_res.ravel()
train_res_new = np.array_split(train_res, 133)
'''
#train_features = train_data[:,[3,4]].toarray()
#train_res = train_data[:,[1,2]].toarray()
#train_res = np.array(np.split(train_res,133))
'''
train_res_list = []
for item in train_res:
s = item[0]
train_res_list.append(''.join(str(s)))
print(train_res_list)
train_res = np.array(train_res_list)
'''
#train_res = train_res.reshape(133,211,1)
print(train_col)
#train_features = train_features.reshape(-1,1)
#train_res = train_res.reshape(-1,1)
print(train_features)
model.fit(train_features, np.array(train_col))
return model
def encode_results(dataf):
target_length = len(dataf['Ventilators_other'])
target_length = int(test_threshold*target_length)
cols_vent = dataf['Ventilators_other']
cols_vent = cols_vent.tolist()[0:target_length]
cols_icu = dataf['ICU_other']
cols_icu = cols_icu.tolist()[0:target_length]
backup_cols_vent = dataf['Ventilators_other'][target_length+1:-1]
backup_cols_icu = dataf['ICU_other'][target_length+1:-1]
dataf.drop(columns=['Ventilators_other', 'ICU_other'], inplace=True)
cols_cases = dataf['New Cases'][0:target_length]
cols_tests = dataf['New Tests_other'][0:target_length]
cols_deaths = dataf['New Deaths'][0:target_length]
backup_cols_cases = dataf['New Cases'][target_length+1:-1]
backup_cols_deaths = dataf['New Deaths'][target_length+1:-1]
backup_features = zip(backup_cols_deaths, backup_cols_cases)
backup_features = np.array(list(backup_features))
backup_features = backup_features.astype('int32')
train_icu = cols_icu
train_vent = cols_vent
train_features = zip(cols_deaths, cols_cases)
data_backup = zip(backup_cols_deaths,backup_cols_cases)
data_backup = np.array(list(data_backup))
data_backup = data_backup.astype('int32')
train_features = np.array(list(train_features)).astype('int32')
n_data=[]
for i in train_features:
n_data.append(list([int(j) for j in i]))
train_features = n_data #np.array(n_data)
for item in train_features:
for val in item:
item[item.index(val)] = int(val)
for item in train_vent:
train_vent[train_vent.index(item)] = int(item)
for item in train_icu:
train_icu[train_icu.index(item)] = int(item)
#my_list = []
#for ex in train_res:
# my_list.append(ex.tolist())
#print(train_res).
#train_res = np.array(dates_from_str())
return train_icu, train_vent, data_backup, backup_features, train_features
if __name__ == '__main__':
train_icu, train_vent, data_backup, backup_features, train_features = encode_results(dataf)
trained_model_icu = generate_model(dataf, train_icu, train_features)
trained_model_vent = generate_model(dataf, train_vent, train_features)
predictions_icu = trained_model_icu.predict(backup_features)
predictions_vent = trained_model_vent.predict(backup_features)
new_df = pd.DataFrame(columns=['Ventilators', 'ICU', 'New Deaths', 'New Cases'])
new_df['Ventilators'] = predictions_vent
new_df['ICU'] = predictions_icu
new_df['New Deaths'] = backup_features[:,0]
new_df['New Cases'] = backup_features[:,1]
new_df.reset_index(drop=True, inplace=True)
new_df = new_df.to_json(orient='records')
new_df = json.loads(new_df)
print(json.dumps(new_df, indent=4))
|
[
"pandas.DataFrame",
"csv.reader",
"json.loads",
"statsmodels.tsa.base.datetools.dates_from_str",
"json.dumps",
"pandas.DatetimeIndex",
"numpy.array",
"sklearn.svm.SVC"
] |
[((464, 552), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['New Cases', 'New Deaths', 'New Tests', 'Ventilators', 'ICU']"}), "(columns=['New Cases', 'New Deaths', 'New Tests', 'Ventilators',\n 'ICU'])\n", (476, 552), True, 'import pandas as pd\n'), ((997, 1026), 'statsmodels.tsa.base.datetools.dates_from_str', 'dates_from_str', (['chicago_dates'], {}), '(chicago_dates)\n', (1011, 1026), False, 'from statsmodels.tsa.base.datetools import dates_from_str\n'), ((1113, 1144), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['chicago_dates'], {}), '(chicago_dates)\n', (1129, 1144), True, 'import pandas as pd\n'), ((1456, 1482), 'statsmodels.tsa.base.datetools.dates_from_str', 'dates_from_str', (['temp_dates'], {}), '(temp_dates)\n', (1470, 1482), False, 'from statsmodels.tsa.base.datetools import dates_from_str\n'), ((1494, 1529), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['New Tests']"}), "(columns=['New Tests'])\n", (1506, 1529), True, 'import pandas as pd\n'), ((1584, 1612), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['temp_dates'], {}), '(temp_dates)\n', (1600, 1612), True, 'import pandas as pd\n'), ((2114, 2140), 'statsmodels.tsa.base.datetools.dates_from_str', 'dates_from_str', (['temp_dates'], {}), '(temp_dates)\n', (2128, 2140), False, 'from statsmodels.tsa.base.datetools import dates_from_str\n'), ((2152, 2196), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Ventilators', 'ICU']"}), "(columns=['Ventilators', 'ICU'])\n", (2164, 2196), True, 'import pandas as pd\n'), ((2288, 2316), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['temp_dates'], {}), '(temp_dates)\n', (2304, 2316), True, 'import pandas as pd\n'), ((736, 780), 'csv.reader', 'csv.reader', (['fh'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(fh, delimiter=',', quotechar='|')\n", (746, 780), False, 'import csv\n'), ((1228, 1272), 'csv.reader', 'csv.reader', (['fh'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(fh, delimiter=',', quotechar='|')\n", (1238, 1272), False, 'import csv\n'), ((1809, 1853), 'csv.reader', 'csv.reader', (['fh'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(fh, delimiter=',', quotechar='|')\n", (1819, 1853), False, 'import csv\n'), ((2503, 2577), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(False)', 'decision_function_shape': '"""ovr"""', 'kernel': '"""linear"""'}), "(probability=False, decision_function_shape='ovr', kernel='linear')\n", (2510, 2577), False, 'from sklearn import svm\n'), ((6199, 6270), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Ventilators', 'ICU', 'New Deaths', 'New Cases']"}), "(columns=['Ventilators', 'ICU', 'New Deaths', 'New Cases'])\n", (6211, 6270), True, 'import pandas as pd\n'), ((6555, 6573), 'json.loads', 'json.loads', (['new_df'], {}), '(new_df)\n', (6565, 6573), False, 'import json\n'), ((3802, 3821), 'numpy.array', 'np.array', (['train_col'], {}), '(train_col)\n', (3810, 3821), True, 'import numpy as np\n'), ((6584, 6612), 'json.dumps', 'json.dumps', (['new_df'], {'indent': '(4)'}), '(new_df, indent=4)\n', (6594, 6612), False, 'import json\n')]
|
# coding: utf-8
import datetime
import pytest
import numpy as np
from ...models.transition.linear import ConstantVelocity
from ...predictor.information import InformationKalmanPredictor
from ...predictor.kalman import KalmanPredictor
from ...types.state import InformationState, GaussianState
from ...types.array import StateVector, CovarianceMatrix
@pytest.mark.parametrize(
"PredictorClass, transition_model, prior_mean, prior_covar",
[
( # Standard Kalman
InformationKalmanPredictor,
ConstantVelocity(noise_diff_coeff=0.1),
StateVector([-6.45, 0.7]),
CovarianceMatrix([[4.1123, 0.0013],
[0.0013, 0.0365]])
)
],
ids=["standard"]
)
def test_information(PredictorClass, transition_model,
prior_mean, prior_covar):
# Define time related variables
timestamp = datetime.datetime.now()
timediff = 2 # 2sec
new_timestamp = timestamp + datetime.timedelta(seconds=timediff)
# First do prediction in standard way
test_state = GaussianState(prior_mean, prior_covar, timestamp=timestamp)
test_predictor = KalmanPredictor(transition_model)
test_prediction = test_predictor.predict(test_state, timestamp=new_timestamp)
# define the precision matrix and information state
precision_matrix = np.linalg.inv(prior_covar)
info_state_mean = precision_matrix @ prior_mean
# Define prior information state
prior = InformationState(info_state_mean, precision_matrix, timestamp=timestamp)
# Initialise a Information filter predictor
predictor = PredictorClass(transition_model=transition_model)
# Perform and assert state prediction
prediction = predictor.predict(prior=prior,
timestamp=new_timestamp)
# reconstruct the state vector and covariance matrix
pred_covar = np.linalg.inv(prediction.precision)
pred_mean = pred_covar @ prediction.state_vector
# And do the tests
assert(np.allclose(predictor._transition_function(prior,
time_interval=new_timestamp-timestamp),
test_prediction.state_vector, 0, atol=1e-14))
assert(np.allclose(pred_mean,
test_prediction.state_vector, 0, atol=1.e-14))
assert(np.allclose(pred_covar,
test_prediction.covar, 0, atol=1.e-14))
assert(prediction.timestamp == new_timestamp)
# test that we can get to the inverse matrix
class ConstantVelocitywithInverse(ConstantVelocity):
def inverse_matrix(self, **kwargs):
return np.linalg.inv(self.matrix(**kwargs))
transition_model_winv = ConstantVelocitywithInverse(noise_diff_coeff=0.1)
predictor_winv = PredictorClass(transition_model_winv)
# Test this still works
prediction_from_inv = predictor_winv.predict(prior=prior, timestamp=new_timestamp)
assert (np.allclose(prediction.state_vector, prediction_from_inv.state_vector, 0, atol=1.e-14))
# TODO: Test with Control Model
|
[
"numpy.allclose",
"numpy.linalg.inv",
"datetime.datetime.now",
"datetime.timedelta"
] |
[((903, 926), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (924, 926), False, 'import datetime\n'), ((1358, 1384), 'numpy.linalg.inv', 'np.linalg.inv', (['prior_covar'], {}), '(prior_covar)\n', (1371, 1384), True, 'import numpy as np\n'), ((1901, 1936), 'numpy.linalg.inv', 'np.linalg.inv', (['prediction.precision'], {}), '(prediction.precision)\n', (1914, 1936), True, 'import numpy as np\n'), ((2249, 2316), 'numpy.allclose', 'np.allclose', (['pred_mean', 'test_prediction.state_vector', '(0)'], {'atol': '(1e-14)'}), '(pred_mean, test_prediction.state_vector, 0, atol=1e-14)\n', (2260, 2316), True, 'import numpy as np\n'), ((2353, 2414), 'numpy.allclose', 'np.allclose', (['pred_covar', 'test_prediction.covar', '(0)'], {'atol': '(1e-14)'}), '(pred_covar, test_prediction.covar, 0, atol=1e-14)\n', (2364, 2414), True, 'import numpy as np\n'), ((2965, 3054), 'numpy.allclose', 'np.allclose', (['prediction.state_vector', 'prediction_from_inv.state_vector', '(0)'], {'atol': '(1e-14)'}), '(prediction.state_vector, prediction_from_inv.state_vector, 0,\n atol=1e-14)\n', (2976, 3054), True, 'import numpy as np\n'), ((984, 1020), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'timediff'}), '(seconds=timediff)\n', (1002, 1020), False, 'import datetime\n')]
|
'''Set of functions to construct a graph as a combination of
smaller subgraphs (of aparticular shape, defined in the shapes.py file)
'''
import graphwave
import math
import networkx as nx
import numpy as np
from shapes import *
from utils.utils import *
from shapes.shapes import *
def build_structure(width_basis, basis_type, list_shapes, start=0,
rdm_basis_plugins =False, add_random_edges=0,
plot=False, savefig=False):
'''This function creates a basis (torus, string, or cycle)
and attaches elements of the type in the list randomly along the basis.
Possibility to add random edges afterwards.
INPUT:
--------------------------------------------------------------------------------------
width_basis : width (in terms of number of nodes) of the basis
basis_type : (torus, string, or cycle)
shapes : list of shape list (1st arg: type of shape,
next args:args for building the shape,
except for the start)
start : initial nb for the first node
rdm_basis_plugins: boolean. Should the shapes be randomly placed
along the basis (True) or regularly (False)?
add_random_edges : nb of edges to randomly add on the structure
plot,savefig : plotting and saving parameters
OUTPUT:
--------------------------------------------------------------------------------------
basis : a nx graph with the particular shape
colors : labels for each role
'''
basis, role_id = eval(basis_type)(start, width_basis)
attrs = {}
for node in basis.nodes:
attrs[node] = {"attr": np.array([10, 10, 10, 10, 10, basis.degree[node]])}
nx.set_node_attributes(basis, attrs)
n_basis, n_shapes = nx.number_of_nodes(basis), len(list_shapes)
start += n_basis # indicator of the id of the next node
# Sample (with replacement) where to attach the new motives
if rdm_basis_plugins is True:
plugins = np.random.choice(n_basis, n_shapes, replace=False)
else:
spacing = math.floor(width_basis / n_shapes)
plugins = [int(k * spacing) for k in range(n_shapes)]
communities = [0] * n_basis
seen_shapes = {'basis': [0, n_basis]}
for p in plugins:
role_id[p] += 1
for shape_id, shape in enumerate(list_shapes):
shape_type = shape[0]
args = [start]
if len(shape)>1:
args += shape[1:]
args += [0]
args += [shape_id * 5]
print("args", *args)
print(shape_type)
graph_s, roles_graph_s = eval(shape_type)(*args)
n_s = nx.number_of_nodes(graph_s)
try:
col_start = seen_shapes[shape_type][0]
except:
col_start = np.max(role_id) + 1
seen_shapes[shape_type] = [col_start, n_s]
# Attach the shape to the basis
basis.add_nodes_from(graph_s.nodes(data=True))
basis.add_edges_from(graph_s.edges())
basis.add_edges_from([(start, plugins[shape_id])])
role_id[plugins[shape_id]] += (-2 - 10 * seen_shapes[shape_type][0])
communities += [shape_id] * n_s
temp_labels = [r + col_start for r in roles_graph_s]
temp_labels[0] += 100 * seen_shapes[shape_type][0]
role_id += temp_labels
start += n_s
if add_random_edges > 0:
# add random edges between nodes:
for p in range(add_random_edges):
src, dest = np.random.choice(nx.number_of_nodes(basis),
2, replace=False)
print (src, dest)
basis.add_edges_from([(src, dest)])
if plot is True: plot_networkx(basis, role_id)
return basis, communities, plugins, role_id
def build_lego_structure(list_shapes, start=0, plot=False, savefig=False,
bkbone_graph_type='nx.connected_watts_strogatz_graph',
bkbone_graph_args=[4, 0.4], save2text='', add_node=10):
'''This function creates a graph from a list of building blocks on top
of a backbone graph
INPUT:
---------------------------------------------------------------------------------
list_shapes : list of shape list (1st arg: type of shape,
next args: args for building the shape, except
for the start)
bkbone_graph_type : which type of backbone graph
(default= 'nx.connected_watts_strogatz_graph')
add_nodes : number of "empty nodes" to add to the graph structures, ie,
nodes in the graph that do not belong to a
particular clique
bkbone_graph_args : arguments for generating the backbone graph
(except from nb of nodes, which
is automatically computef)
start : initial node nb
plot, savefig,save2txt: plotting and saving parameters
OUTPUT:
---------------------------------------------------------------------------------
graph : a nx graph (association of cliques/motifs
planted along a backbone structure)
communities : motif Id
role_labels : role id
label_shape : label/class of the motif. This induces
different levels of similarities among motifs
'''
graph = nx.Graph()
shape_id = [] # labels for the different shapes
role_labels = [] # labels for the different shapes
communities = [] # roles in the network
seen_shapes = {}
start = graph.number_of_nodes()
for nb_shape, shape in enumerate(list_shapes):
shape_type = shape[0]
try:
role_start, shape_id_start = seen_shapes[shape_type]
except:
if len(role_labels) > 0:
seen_shapes[shape_type] = [np.max(role_labels) + 1, np.max(shape_id) + 1]
role_start, shape_id_start = seen_shapes[shape_type]
else:
seen_shapes[shape_type] = [0, 0]
role_start, shape_id_start = 0, 0
args = [start]
args += shape[1:]
args += [role_start]
graph_s, roles = eval(shape_type)(*args)
# Attach the shape to the basis
graph.add_nodes_from(graph_s.nodes())
graph.add_edges_from(graph_s.edges())
communities += [nb_shape] * nx.number_of_nodes(graph_s)
role_labels += roles
shape_id += [shape_id_start] * nx.number_of_nodes(graph_s)
start += graph_s.number_of_nodes()
# Now we link the different shapes by attaching them to the underlyin
# graph structure:
n_nodes, n_shapes = graph.number_of_nodes(), len(list_shapes)
graph.add_nodes_from(range(n_nodes, n_nodes + add_node))
role_labels += [n_shapes + 1] * add_node
communities += range(n_shapes, n_shapes + add_node)
shape_id += [-1] * add_node
# generate back_bone Graph
bkbone_graph_args.insert(0, n_shapes + add_node)
bkbone_graph = eval(bkbone_graph_type)(*bkbone_graph_args)
for e in bkbone_graph.edges():
ii = np.random.choice(np.where(np.array(communities) == e[0])[0], 1)[0]
jj = np.random.choice(np.where(np.array(communities) == e[1])[0], 1)[0]
graph.add_edges_from([(ii, jj)])
if plot is True: plot_networkx(graph, role_labels)
if len(save2text) > 0: saveNet2txt(graph, colors=role_labels, name='net', path=save2text)
return graph, communities, role_labels, shape_id
def create_bigger_network(nb_cells, width_cell, list_cell_shapes,
rdm_basis_plugins=True, cell_type="cycle"):
''' Automatically creates a big network by linking several instances of a
graph created by build_structure(width_basis, basis_type, list_shapes,..)
'''
width_basis, basis_type = width_cell[0]
list_shapes = list_cell_shapes[0]
graph, roles, plugins = build_structure(width_basis, basis_type,
list_shapes, start=0,
rdm_basis_plugins=rdm_basis_plugins,
add_random_edges=0, plot=False)
start = graph.number_of_nodes()
for i in range(1, nb_cells):
width_basis, basis_type = width_cell[i]
list_shapes = list_cell_shapes[i]
graph_i, roles_i, plugins_i = build_structure(width_basis,
basis_type,
list_shapes,
start=start,
add_random_edges=0,
plot=False)
graph.add_nodes_from(graph_i.nodes())
graph.add_edges_from(graph_i.edges())
graph.add_edges_from([(start, start + 1)])
start += graph_i.number_of_nodes()
roles += roles_i
plugins += plugins_i
return graph, roles, plugins
|
[
"networkx.set_node_attributes",
"math.floor",
"numpy.max",
"networkx.Graph",
"numpy.array",
"numpy.random.choice",
"networkx.number_of_nodes"
] |
[((1827, 1863), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['basis', 'attrs'], {}), '(basis, attrs)\n', (1849, 1863), True, 'import networkx as nx\n'), ((5620, 5630), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5628, 5630), True, 'import networkx as nx\n'), ((1888, 1913), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['basis'], {}), '(basis)\n', (1906, 1913), True, 'import networkx as nx\n'), ((2116, 2166), 'numpy.random.choice', 'np.random.choice', (['n_basis', 'n_shapes'], {'replace': '(False)'}), '(n_basis, n_shapes, replace=False)\n', (2132, 2166), True, 'import numpy as np\n'), ((2195, 2229), 'math.floor', 'math.floor', (['(width_basis / n_shapes)'], {}), '(width_basis / n_shapes)\n', (2205, 2229), False, 'import math\n'), ((2749, 2776), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['graph_s'], {}), '(graph_s)\n', (2767, 2776), True, 'import networkx as nx\n'), ((1771, 1821), 'numpy.array', 'np.array', (['[10, 10, 10, 10, 10, basis.degree[node]]'], {}), '([10, 10, 10, 10, 10, basis.degree[node]])\n', (1779, 1821), True, 'import numpy as np\n'), ((6651, 6678), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['graph_s'], {}), '(graph_s)\n', (6669, 6678), True, 'import networkx as nx\n'), ((6747, 6774), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['graph_s'], {}), '(graph_s)\n', (6765, 6774), True, 'import networkx as nx\n'), ((3600, 3625), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['basis'], {}), '(basis)\n', (3618, 3625), True, 'import networkx as nx\n'), ((2881, 2896), 'numpy.max', 'np.max', (['role_id'], {}), '(role_id)\n', (2887, 2896), True, 'import numpy as np\n'), ((6122, 6141), 'numpy.max', 'np.max', (['role_labels'], {}), '(role_labels)\n', (6128, 6141), True, 'import numpy as np\n'), ((6147, 6163), 'numpy.max', 'np.max', (['shape_id'], {}), '(shape_id)\n', (6153, 6163), True, 'import numpy as np\n'), ((7397, 7418), 'numpy.array', 'np.array', (['communities'], {}), '(communities)\n', (7405, 7418), True, 'import numpy as np\n'), ((7477, 7498), 'numpy.array', 'np.array', (['communities'], {}), '(communities)\n', (7485, 7498), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
MultiDatasetLoader class is used by DatasetLoader class to load multiple datasets
and more granular
"""
import logging
import warnings
import numpy as np
from mmf.common.registry import registry
from mmf.utils.build import build_dataloader_and_sampler, build_dataset
from mmf.utils.distributed import broadcast_scalar, is_dist_initialized, is_master
from mmf.utils.general import get_batch_size
logger = logging.getLogger(__name__)
class MultiDatasetLoader:
"""
MultiDatasetLoader class that is used for training on multiple datasets together.
"""
def __init__(self, dataset_type="train"):
self._dataset_type = dataset_type
self._is_master = is_master()
self._datasets = []
self._loaders = []
self._samplers = []
self._iterators = []
self._total_length = 0
self._per_dataset_lengths = []
self._num_datasets = 0
self._finished_iterators = {}
self._used_once = {}
@property
def dataset_type(self):
return self._dataset_type
@property
def current_dataset_name(self):
return self.current_dataset.name
@property
def num_datasets(self):
return self._num_datasets
@property
def datasets(self):
return self._datasets
@property
def loaders(self):
return self._loaders
@property
def samplers(self):
return self._samplers
@property
def iterators(self):
return self._iterators
@iterators.setter
def iterators(self, iterators):
self._iterators = iterators
@property
def current_dataset(self):
return self._chosen_dataset
# Setter only for functions which users should also be able to set
@current_dataset.setter
def current_dataset(self, dataset):
self._chosen_dataset = dataset
@property
def current_loader(self):
return self._chosen_loader
@current_loader.setter
def current_loader(self, loader):
self._chosen_loader = loader
@property
def current_index(self):
return self._loader_index
@current_index.setter
def current_index(self, index: int):
self._loader_index = index
def get_datasets(self):
return self.datasets
@property
def first_loader(self):
return self.loaders[0]
def _process_datasets(self):
if "datasets" not in self.config:
logger.warning("No datasets attribute present. Setting default to vqa2.")
datasets = "vqa2"
else:
datasets = self.config.datasets
if type(datasets) == str:
datasets = list(map(lambda x: x.strip(), datasets.split(",")))
self._given_datasets = datasets
def load(self, config):
self.build_datasets(config)
self.build_dataloaders()
def build_datasets(self, config):
self.config = config
self._process_datasets()
for dataset in self._given_datasets:
if dataset in self.config.dataset_config:
dataset_config = self.config.dataset_config[dataset]
else:
raise RuntimeError(
f"Dataset {dataset} is missing from " "dataset_config in config."
)
dataset_instance = build_dataset(dataset, dataset_config, self.dataset_type)
if dataset_instance is None:
continue
self.datasets.append(dataset_instance)
if hasattr(dataset_instance, "__len__"):
self._per_dataset_lengths.append(len(dataset_instance))
self._total_length += len(dataset_instance)
self._num_datasets = len(self.datasets)
self.current_index = 0
self.current_dataset = self.datasets[self.current_index]
self._infer_dataset_probabilities()
def build_dataloaders(self):
assert len(self._datasets) > 0, "Call build_datasets first"
for dataset_instance in self.datasets:
loader_instance, sampler_instance = build_dataloader_and_sampler(
dataset_instance, self.config.training
)
self.loaders.append(loader_instance)
self.samplers.append(sampler_instance)
self.current_loader = self.loaders[self.current_index]
def _infer_dataset_probabilities(self):
self._dataset_probabilities = [
1 / self._num_datasets for _ in range(self.num_datasets)
]
training = self.config.get("training", {})
self._proportional_sampling = training.get(
"dataset_size_proportional_sampling", True
)
if self._dataset_type != "train":
# If it is val or test, it needs to be all datasets need to be
# fully iterated as metrics will be calculated in eval mode
# over complete datasets
self._proportional_sampling = True
if self._proportional_sampling is True and len(self._per_dataset_lengths) > 0:
self._dataset_probabilities = self._per_dataset_lengths[:]
self._dataset_probabilities = [
prob / self._total_length for prob in self._dataset_probabilities
]
def __len__(self):
# Since, this is iterator, we need to return total length == number of batches
return self._total_length // get_batch_size()
def __iter__(self):
if self._num_datasets == 1:
return iter(self.loaders[0])
# Clear off old iterators
self.iterators = []
for loader in self.loaders:
self.iterators.append(iter(loader))
self._chosen_iterator = self.iterators[self.current_index]
return self
def __next__(self):
try:
next_batch = next(self._chosen_iterator)
except StopIteration:
if (
self._proportional_sampling is True
or len(self._used_once) != self.num_datasets
):
self._finished_iterators[self.current_index] = 1
if len(self._finished_iterators) == self.num_datasets:
raise
else:
self.change_dataloader()
next_batch = next(self._chosen_iterator)
else:
raise
self._used_once[self.current_index] = 1
return next_batch
def change_dataloader(self):
if self.num_datasets <= 1:
return
choice = 0
if self._is_master:
choice = np.random.choice(
self.num_datasets, 1, p=self._dataset_probabilities
)[0]
while choice in self._finished_iterators:
choice = np.random.choice(
self.num_datasets, 1, p=self._dataset_probabilities
)[0]
choice = broadcast_scalar(choice, 0, device=registry.get("current_device"))
self.current_index = choice
self.current_dataset = self.datasets[self.current_index]
self.current_loader = self.loaders[self.current_index]
self._chosen_iterator = self.iterators[self.current_index]
def verbose_dump(self, *args, **kwargs):
self._chosen_dataset.verbose_dump(*args, **kwargs)
def prepare_batch(self, batch):
if not hasattr(self._chosen_dataset, "prepare_batch"):
warnings.warn(
f"{self._chosen_dataset.dataset_name} doesn't define 'prepare_batch' "
+ "method. You are expected to prepare and move your batch to "
+ "CUDA device yourself."
)
else:
batch = self._chosen_dataset.prepare_batch(batch)
self.change_dataloader()
return batch
def seed_sampler(self, epoch):
if is_dist_initialized():
for sampler in self._samplers:
if sampler is not None and hasattr(sampler, "set_epoch"):
sampler.set_epoch(epoch)
|
[
"numpy.random.choice",
"mmf.utils.distributed.is_master",
"logging.getLogger",
"mmf.utils.general.get_batch_size",
"mmf.common.registry.registry.get",
"mmf.utils.build.build_dataset",
"mmf.utils.build.build_dataloader_and_sampler",
"warnings.warn",
"mmf.utils.distributed.is_dist_initialized"
] |
[((462, 489), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (479, 489), False, 'import logging\n'), ((735, 746), 'mmf.utils.distributed.is_master', 'is_master', ([], {}), '()\n', (744, 746), False, 'from mmf.utils.distributed import broadcast_scalar, is_dist_initialized, is_master\n'), ((7852, 7873), 'mmf.utils.distributed.is_dist_initialized', 'is_dist_initialized', ([], {}), '()\n', (7871, 7873), False, 'from mmf.utils.distributed import broadcast_scalar, is_dist_initialized, is_master\n'), ((3362, 3419), 'mmf.utils.build.build_dataset', 'build_dataset', (['dataset', 'dataset_config', 'self.dataset_type'], {}), '(dataset, dataset_config, self.dataset_type)\n', (3375, 3419), False, 'from mmf.utils.build import build_dataloader_and_sampler, build_dataset\n'), ((4111, 4179), 'mmf.utils.build.build_dataloader_and_sampler', 'build_dataloader_and_sampler', (['dataset_instance', 'self.config.training'], {}), '(dataset_instance, self.config.training)\n', (4139, 4179), False, 'from mmf.utils.build import build_dataloader_and_sampler, build_dataset\n'), ((5429, 5445), 'mmf.utils.general.get_batch_size', 'get_batch_size', ([], {}), '()\n', (5443, 5445), False, 'from mmf.utils.general import get_batch_size\n'), ((7436, 7624), 'warnings.warn', 'warnings.warn', (['(f"{self._chosen_dataset.dataset_name} doesn\'t define \'prepare_batch\' " +\n \'method. You are expected to prepare and move your batch to \' +\n \'CUDA device yourself.\')'], {}), '(\n f"{self._chosen_dataset.dataset_name} doesn\'t define \'prepare_batch\' " +\n \'method. You are expected to prepare and move your batch to \' +\n \'CUDA device yourself.\')\n', (7449, 7624), False, 'import warnings\n'), ((6609, 6678), 'numpy.random.choice', 'np.random.choice', (['self.num_datasets', '(1)'], {'p': 'self._dataset_probabilities'}), '(self.num_datasets, 1, p=self._dataset_probabilities)\n', (6625, 6678), True, 'import numpy as np\n'), ((6956, 6986), 'mmf.common.registry.registry.get', 'registry.get', (['"""current_device"""'], {}), "('current_device')\n", (6968, 6986), False, 'from mmf.common.registry import registry\n'), ((6792, 6861), 'numpy.random.choice', 'np.random.choice', (['self.num_datasets', '(1)'], {'p': 'self._dataset_probabilities'}), '(self.num_datasets, 1, p=self._dataset_probabilities)\n', (6808, 6861), True, 'import numpy as np\n')]
|
import os
import pandas as pd
from sklearn import ensemble
from sklearn import preprocessing
from sklearn import metrics
import joblib
import numpy as np
from . import dispatcher
def predict(test_data_path, model_type, model_path):
df = pd.read_csv(test_data_path)
test_idx = df["id"].values
predictions = None
for FOLD in range(5):
df = pd.read_csv(test_data_path)
encoders = joblib.load(os.path.join(model_path, f"{model_type}_{FOLD}_label_encoder.pkl"))
cols = joblib.load(os.path.join(model_path, f"{model_type}_{FOLD}_columns.pkl"))
for c in encoders:
lbl = encoders[c]
df.loc[:, c] = df.loc[:, c].astype(str).fillna("NONE")
df.loc[:, c] = lbl.transform(df[c].values.tolist())
clf = joblib.load(os.path.join(model_path, f"{model_type}_{FOLD}.pkl"))
df = df[cols]
preds = clf.predict_proba(df)[:, 1]
if FOLD == 0:
predictions = preds
else:
predictions += preds
predictions /= 5
sub = pd.DataFrame(np.column_stack((test_idx, predictions)), columns=["id", "target"])
return sub
if __name__ == "__main__":
submission = predict(test_data_path="input/test_cat.csv",
model_type="randomforest",
model_path="models/")
submission.loc[:, "id"] = submission.loc[:, "id"].astype(int)
submission.to_csv(f"models/rf_submission.csv", index=False)
|
[
"pandas.read_csv",
"os.path.join",
"numpy.column_stack"
] |
[((244, 271), 'pandas.read_csv', 'pd.read_csv', (['test_data_path'], {}), '(test_data_path)\n', (255, 271), True, 'import pandas as pd\n'), ((366, 393), 'pandas.read_csv', 'pd.read_csv', (['test_data_path'], {}), '(test_data_path)\n', (377, 393), True, 'import pandas as pd\n'), ((1086, 1126), 'numpy.column_stack', 'np.column_stack', (['(test_idx, predictions)'], {}), '((test_idx, predictions))\n', (1101, 1126), True, 'import numpy as np\n'), ((425, 491), 'os.path.join', 'os.path.join', (['model_path', 'f"""{model_type}_{FOLD}_label_encoder.pkl"""'], {}), "(model_path, f'{model_type}_{FOLD}_label_encoder.pkl')\n", (437, 491), False, 'import os\n'), ((520, 580), 'os.path.join', 'os.path.join', (['model_path', 'f"""{model_type}_{FOLD}_columns.pkl"""'], {}), "(model_path, f'{model_type}_{FOLD}_columns.pkl')\n", (532, 580), False, 'import os\n'), ((805, 857), 'os.path.join', 'os.path.join', (['model_path', 'f"""{model_type}_{FOLD}.pkl"""'], {}), "(model_path, f'{model_type}_{FOLD}.pkl')\n", (817, 857), False, 'import os\n')]
|
import numpy as np
import servo2
import math
import time
savedir="Camera_Data/"
map_A = np.load(savedir+'servoA.npy')
map_B = np.load(savedir+'servoB.npy')
steps = len(map_A)
wait = 0.3
def vect_to_deg(x, y):
servo_A, servo_B = 0, 0
try:
A = list(map(lambda k: k >= y, map_A)).index(True)-1
if map_A[A] == y:
servo_A = A*180/(steps-1)
else:
servo_A = A*180/(steps-1) + (y-map_A[A])*(180/(steps-1))/(map_A[A+1]-map_A[A])
except:
servo_A = 180
y = map_A[steps-1]
try:
B = list(map(lambda k: k >= x, map_B)).index(True)-1
if map_B[B] == x:
servo_B = B*180/(steps-1)
else:
servo_B = B*180/(steps-1) + (x-map_B[B])*(180/(steps-1))/(map_B[B+1]-map_B[B])
except:
servo_B = 180
x = map_B[steps-1]
return servo_A, servo_B, x, y
servo2.home()
cX, cY = 0.0, 0.0
print("maps : ", map_A, map_B, "\n")
# Main
while(True):
y, x = [float(j) for j in input("Location y x :").split()]
a, b, x, y = vect_to_deg(x, y)
print("angle A: ", round(a,1), " angle B: ", round(b,1))
_ = servo2.A(a, wait)
_ = servo2.B(b, wait)
time.sleep(wait)
# Calculate Displacement
s = math.sqrt((cX-x)**2+(cY-y)**2)
cX, cY = x, y
print("Servo Set. Displacement : ", s)
|
[
"servo2.A",
"numpy.load",
"math.sqrt",
"time.sleep",
"servo2.B",
"servo2.home"
] |
[((90, 121), 'numpy.load', 'np.load', (["(savedir + 'servoA.npy')"], {}), "(savedir + 'servoA.npy')\n", (97, 121), True, 'import numpy as np\n'), ((128, 159), 'numpy.load', 'np.load', (["(savedir + 'servoB.npy')"], {}), "(savedir + 'servoB.npy')\n", (135, 159), True, 'import numpy as np\n'), ((772, 785), 'servo2.home', 'servo2.home', ([], {}), '()\n', (783, 785), False, 'import servo2\n'), ((1019, 1036), 'servo2.A', 'servo2.A', (['a', 'wait'], {}), '(a, wait)\n', (1027, 1036), False, 'import servo2\n'), ((1042, 1059), 'servo2.B', 'servo2.B', (['b', 'wait'], {}), '(b, wait)\n', (1050, 1059), False, 'import servo2\n'), ((1061, 1077), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (1071, 1077), False, 'import time\n'), ((1110, 1150), 'math.sqrt', 'math.sqrt', (['((cX - x) ** 2 + (cY - y) ** 2)'], {}), '((cX - x) ** 2 + (cY - y) ** 2)\n', (1119, 1150), False, 'import math\n')]
|
'''
Model definition
'''
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from settings import *
from data_prep import calc_iou
def SSDHook(feature_map, hook_id):
"""
Takes input feature map, output the predictions tensor
hook_id is for variable_scope unqie string ID
"""
with tf.variable_scope('ssd_hook_' + hook_id):
# Note we have linear activation (i.e. no activation function)
net_conf = slim.conv2d(feature_map, NUM_PRED_CONF, [3, 3], activation_fn=None, scope='conv_conf')
net_conf = tf.contrib.layers.flatten(net_conf)
net_loc = slim.conv2d(feature_map, NUM_PRED_LOC, [3, 3], activation_fn=None, scope='conv_loc')
net_loc = tf.contrib.layers.flatten(net_loc)
return net_conf, net_loc
def ModelHelper(y_pred_conf, y_pred_loc):
"""
Define loss function, optimizer, predictions, and accuracy metric
Loss includes confidence loss and localization loss
conf_loss_mask is created at batch generation time, to mask the confidence losses
It has 1 at locations w/ positives, and 1 at select negative locations
such that negative-to-positive ratio of NEG_POS_RATIO is satisfied
Arguments:
* y_pred_conf: Class predictions from model,
a tensor of shape [batch_size, num_feature_map_cells * num_defaul_boxes * num_classes]
* y_pred_loc: Localization predictions from model,
a tensor of shape [batch_size, num_feature_map_cells * num_defaul_boxes * 4]
Returns relevant tensor references
"""
num_total_preds = 0
for fm_size in FM_SIZES:
num_total_preds += fm_size[0] * fm_size[1] * NUM_DEFAULT_BOXES
num_total_preds_conf = num_total_preds * NUM_CLASSES
num_total_preds_loc = num_total_preds * 4
# Input tensors
y_true_conf = tf.placeholder(tf.int32, [None, num_total_preds], name='y_true_conf') # classification ground-truth labels
y_true_loc = tf.placeholder(tf.float32, [None, num_total_preds_loc], name='y_true_loc') # localization ground-truth labels
conf_loss_mask = tf.placeholder(tf.float32, [None, num_total_preds], name='conf_loss_mask') # 1 mask "bit" per def. box
# Confidence loss
logits = tf.reshape(y_pred_conf, [-1, num_total_preds, NUM_CLASSES])
conf_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = y_true_conf)
conf_loss = conf_loss_mask * conf_loss # "zero-out" the loss for don't-care negatives
conf_loss = tf.reduce_sum(conf_loss)
# Localization loss (smooth L1 loss)
# loc_loss_mask is analagous to conf_loss_mask, except 4 times the size
diff = y_true_loc - y_pred_loc
loc_loss_l2 = 0.5 * (diff**2.0)
loc_loss_l1 = tf.abs(diff) - 0.5
smooth_l1_condition = tf.less(tf.abs(diff), 1.0)
loc_loss = tf.where(smooth_l1_condition, loc_loss_l2, loc_loss_l1)
loc_loss_mask = tf.minimum(y_true_conf, 1) # have non-zero localization loss only where we have matching ground-truth box
loc_loss_mask = tf.to_float(loc_loss_mask)
loc_loss_mask = tf.stack([loc_loss_mask] * 4, axis=2) # [0, 1, 1] -> [[[0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]], ...]
loc_loss_mask = tf.reshape(loc_loss_mask, [-1, num_total_preds_loc]) # removing the inner-most dimension of above
loc_loss = loc_loss_mask * loc_loss
loc_loss = tf.reduce_sum(loc_loss)
# Weighted average of confidence loss and localization loss
# Also add regularization loss
loss = conf_loss + LOC_LOSS_WEIGHT * loc_loss + tf.reduce_sum(slim.losses.get_regularization_losses())
optimizer = OPT.minimize(loss)
#reported_loss = loss #tf.reduce_sum(loss, 1) # DEBUG
# Class probabilities and predictions
probs_all = tf.nn.softmax(logits)
probs, preds_conf = tf.nn.top_k(probs_all) # take top-1 probability, and the index is the predicted class
probs = tf.reshape(probs, [-1, num_total_preds])
preds_conf = tf.reshape(preds_conf, [-1, num_total_preds])
# Return a dictionary of {tensor_name: tensor_reference}
ret_dict = {
'y_true_conf': y_true_conf,
'y_true_loc': y_true_loc,
'conf_loss_mask': conf_loss_mask,
'optimizer': optimizer,
'conf_loss': conf_loss,
'loc_loss': loc_loss,
'loss': loss,
'probs': probs,
'preds_conf': preds_conf,
'preds_loc': y_pred_loc,
}
return ret_dict
def AlexNet():
"""
AlexNet
"""
# Image batch tensor and dropout keep prob placeholders
x = tf.placeholder(tf.float32, [None, IMG_H, IMG_W, NUM_CHANNELS], name='x')
is_training = tf.placeholder(tf.bool, name='is_training')
# Classification and localization predictions
preds_conf = [] # conf -> classification b/c confidence loss -> classification loss
preds_loc = []
# Use batch normalization for all convolution layers
# FIXME: Not sure why setting is_training is not working well
#with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training}):
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm, normalizer_params={'is_training': True},\
weights_regularizer=slim.l2_regularizer(scale=REG_SCALE)):
net = slim.conv2d(x, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool1')
net = slim.conv2d(net, 192, [5, 5], scope='conv2')
net_conf, net_loc = SSDHook(net, 'conv2')
preds_conf.append(net_conf)
preds_loc.append(net_loc)
net = slim.max_pool2d(net, [3, 3], 2, scope='pool2')
net = slim.conv2d(net, 384, [3, 3], scope='conv3')
net = slim.conv2d(net, 384, [3, 3], scope='conv4')
net = slim.conv2d(net, 256, [3, 3], scope='conv5')
# The following layers added for SSD
net = slim.conv2d(net, 1024, [3, 3], scope='conv6')
net = slim.conv2d(net, 1024, [1, 1], scope='conv7')
net_conf, net_loc = SSDHook(net, 'conv7')
preds_conf.append(net_conf)
preds_loc.append(net_loc)
net = slim.conv2d(net, 256, [1, 1], scope='conv8')
net = slim.conv2d(net, 512, [3, 3], 2, scope='conv8_2')
net_conf, net_loc = SSDHook(net, 'conv8_2')
preds_conf.append(net_conf)
preds_loc.append(net_loc)
net = slim.conv2d(net, 128, [1, 1], scope='conv9')
net = slim.conv2d(net, 256, [3, 3], 2, scope='conv9_2')
net_conf, net_loc = SSDHook(net, 'conv9_2')
preds_conf.append(net_conf)
preds_loc.append(net_loc)
# Concatenate all preds together into 1 vector, for both classification and localization predictions
final_pred_conf = tf.concat(preds_conf,1)
final_pred_loc = tf.concat(preds_loc, 1)
# Return a dictionary of {tensor_name: tensor_reference}
ret_dict = {
'x': x,
'y_pred_conf': final_pred_conf,
'y_pred_loc': final_pred_loc,
'is_training': is_training,
}
return ret_dict
def SSDModel():
"""
Wrapper around the model and model helper
Returns dict of relevant tensor references
"""
if MODEL == 'AlexNet':
model = AlexNet()
else:
raise NotImplementedError('Model %s not supported' % MODEL)
model_helper = ModelHelper(model['y_pred_conf'], model['y_pred_loc'])
ssd_model = {}
for k in model.keys():
ssd_model[k] = model[k]
for k in model_helper.keys():
ssd_model[k] = model_helper[k]
return ssd_model
def nms(y_pred_conf, y_pred_loc, prob):
"""
Non-Maximum Suppression (NMS)
Performs NMS on all boxes of each class where predicted probability > CONF_THRES
For all boxes exceeding IOU threshold, select the box with highest confidence
Returns a lsit of box coordinates post-NMS
Arguments:
* y_pred_conf: Class predictions, numpy array of shape (num_feature_map_cells * num_defaul_boxes,)
* y_pred_loc: Bounding box coordinates, numpy array of shape (num_feature_map_cells * num_defaul_boxes * 4,)
These coordinates are normalized coordinates relative to center of feature map cell
* prob: Class probabilities, numpy array of shape (num_feature_map_cells * num_defaul_boxes,)
Returns:
* boxes: Numpy array of boxes, with shape (num_boxes, 6). shape[0] is interpreted as:
[x1, y1, x2, y2, class, probability], where x1/y1/x2/y2 are the coordinates of the
upper-left and lower-right corners. Box coordinates assume the image size is IMG_W x IMG_H.
Remember to rescale box coordinates if your target image has different dimensions.
"""
# Keep track of boxes for each class
class_boxes = {} # class -> [(x1, y1, x2, y2, prob), (...), ...]
with open('signnames.csv', 'r') as f:
for line in f:
cls, _ = line.split(',')
class_boxes[float(cls)] = []
# Go through all possible boxes and perform class-based greedy NMS (greedy based on class prediction confidence)
y_idx = 0
for fm_size in FM_SIZES:
fm_h, fm_w = fm_size # feature map height and width
for row in range(fm_h):
for col in range(fm_w):
for db in DEFAULT_BOXES:
# Only perform calculations if class confidence > CONF_THRESH and not background class
if prob[y_idx] > CONF_THRESH and y_pred_conf[y_idx] > 0.:
# Calculate absolute coordinates of predicted bounding box
xc, yc = col + 0.5, row + 0.5 # center of current feature map cell
center_coords = np.array([xc, yc, xc, yc])
abs_box_coords = center_coords + y_pred_loc[y_idx*4 : y_idx*4 + 4] # predictions are offsets to center of fm cell
# Calculate predicted box coordinates in actual image
scale = np.array([IMG_W/fm_w, IMG_H/fm_h, IMG_W/fm_w, IMG_H/fm_h])
box_coords = abs_box_coords * scale
box_coords = [int(round(x)) for x in box_coords]
# Compare this box to all previous boxes of this class
cls = y_pred_conf[y_idx]
cls_prob = prob[y_idx]
box = (*box_coords, cls, cls_prob)
if len(class_boxes[cls]) == 0:
class_boxes[cls].append(box)
else:
suppressed = False # did this box suppress other box(es)?
overlapped = False # did this box overlap with other box(es)?
for other_box in class_boxes[cls]:
iou = calc_iou(box[:4], other_box[:4])
if iou > NMS_IOU_THRESH:
overlapped = True
# If current box has higher confidence than other box
if box[5] > other_box[5]:
class_boxes[cls].remove(other_box)
suppressed = True
if suppressed or not overlapped:
class_boxes[cls].append(box)
y_idx += 1
# Gather all the pruned boxes and return them
boxes = []
for cls in class_boxes.keys():
for class_box in class_boxes[cls]:
boxes.append(class_box)
boxes = np.array(boxes)
return boxes
|
[
"tensorflow.reduce_sum",
"tensorflow.contrib.layers.flatten",
"tensorflow.reshape",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.nn.softmax",
"tensorflow.contrib.slim.conv2d",
"tensorflow.abs",
"tensorflow.nn.top_k",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.minimum",
"tensorflow.stack",
"tensorflow.placeholder",
"data_prep.calc_iou",
"tensorflow.to_float",
"tensorflow.where",
"tensorflow.contrib.slim.max_pool2d",
"tensorflow.contrib.slim.losses.get_regularization_losses",
"numpy.array",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits"
] |
[((1706, 1775), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, num_total_preds]'], {'name': '"""y_true_conf"""'}), "(tf.int32, [None, num_total_preds], name='y_true_conf')\n", (1720, 1775), True, 'import tensorflow as tf\n'), ((1829, 1903), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, num_total_preds_loc]'], {'name': '"""y_true_loc"""'}), "(tf.float32, [None, num_total_preds_loc], name='y_true_loc')\n", (1843, 1903), True, 'import tensorflow as tf\n'), ((1958, 2032), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, num_total_preds]'], {'name': '"""conf_loss_mask"""'}), "(tf.float32, [None, num_total_preds], name='conf_loss_mask')\n", (1972, 2032), True, 'import tensorflow as tf\n'), ((2092, 2151), 'tensorflow.reshape', 'tf.reshape', (['y_pred_conf', '[-1, num_total_preds, NUM_CLASSES]'], {}), '(y_pred_conf, [-1, num_total_preds, NUM_CLASSES])\n', (2102, 2151), True, 'import tensorflow as tf\n'), ((2165, 2251), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'y_true_conf'}), '(logits=logits, labels=\n y_true_conf)\n', (2211, 2251), True, 'import tensorflow as tf\n'), ((2352, 2376), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['conf_loss'], {}), '(conf_loss)\n', (2365, 2376), True, 'import tensorflow as tf\n'), ((2652, 2707), 'tensorflow.where', 'tf.where', (['smooth_l1_condition', 'loc_loss_l2', 'loc_loss_l1'], {}), '(smooth_l1_condition, loc_loss_l2, loc_loss_l1)\n', (2660, 2707), True, 'import tensorflow as tf\n'), ((2727, 2753), 'tensorflow.minimum', 'tf.minimum', (['y_true_conf', '(1)'], {}), '(y_true_conf, 1)\n', (2737, 2753), True, 'import tensorflow as tf\n'), ((2851, 2877), 'tensorflow.to_float', 'tf.to_float', (['loc_loss_mask'], {}), '(loc_loss_mask)\n', (2862, 2877), True, 'import tensorflow as tf\n'), ((2895, 2932), 'tensorflow.stack', 'tf.stack', (['([loc_loss_mask] * 4)'], {'axis': '(2)'}), '([loc_loss_mask] * 4, axis=2)\n', (2903, 2932), True, 'import tensorflow as tf\n'), ((3016, 3068), 'tensorflow.reshape', 'tf.reshape', (['loc_loss_mask', '[-1, num_total_preds_loc]'], {}), '(loc_loss_mask, [-1, num_total_preds_loc])\n', (3026, 3068), True, 'import tensorflow as tf\n'), ((3164, 3187), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loc_loss'], {}), '(loc_loss)\n', (3177, 3187), True, 'import tensorflow as tf\n'), ((3528, 3549), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (3541, 3549), True, 'import tensorflow as tf\n'), ((3571, 3593), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['probs_all'], {}), '(probs_all)\n', (3582, 3593), True, 'import tensorflow as tf\n'), ((3667, 3707), 'tensorflow.reshape', 'tf.reshape', (['probs', '[-1, num_total_preds]'], {}), '(probs, [-1, num_total_preds])\n', (3677, 3707), True, 'import tensorflow as tf\n'), ((3722, 3767), 'tensorflow.reshape', 'tf.reshape', (['preds_conf', '[-1, num_total_preds]'], {}), '(preds_conf, [-1, num_total_preds])\n', (3732, 3767), True, 'import tensorflow as tf\n'), ((4218, 4290), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, IMG_H, IMG_W, NUM_CHANNELS]'], {'name': '"""x"""'}), "(tf.float32, [None, IMG_H, IMG_W, NUM_CHANNELS], name='x')\n", (4232, 4290), True, 'import tensorflow as tf\n'), ((4306, 4349), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_training"""'}), "(tf.bool, name='is_training')\n", (4320, 4349), True, 'import tensorflow as tf\n'), ((6211, 6235), 'tensorflow.concat', 'tf.concat', (['preds_conf', '(1)'], {}), '(preds_conf, 1)\n', (6220, 6235), True, 'import tensorflow as tf\n'), ((6253, 6276), 'tensorflow.concat', 'tf.concat', (['preds_loc', '(1)'], {}), '(preds_loc, 1)\n', (6262, 6276), True, 'import tensorflow as tf\n'), ((10156, 10171), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (10164, 10171), True, 'import numpy as np\n'), ((317, 357), 'tensorflow.variable_scope', 'tf.variable_scope', (["('ssd_hook_' + hook_id)"], {}), "('ssd_hook_' + hook_id)\n", (334, 357), True, 'import tensorflow as tf\n'), ((437, 528), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['feature_map', 'NUM_PRED_CONF', '[3, 3]'], {'activation_fn': 'None', 'scope': '"""conv_conf"""'}), "(feature_map, NUM_PRED_CONF, [3, 3], activation_fn=None, scope=\n 'conv_conf')\n", (448, 528), True, 'import tensorflow.contrib.slim as slim\n'), ((537, 572), 'tensorflow.contrib.layers.flatten', 'tf.contrib.layers.flatten', (['net_conf'], {}), '(net_conf)\n', (562, 572), True, 'import tensorflow as tf\n'), ((586, 675), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['feature_map', 'NUM_PRED_LOC', '[3, 3]'], {'activation_fn': 'None', 'scope': '"""conv_loc"""'}), "(feature_map, NUM_PRED_LOC, [3, 3], activation_fn=None, scope=\n 'conv_loc')\n", (597, 675), True, 'import tensorflow.contrib.slim as slim\n'), ((683, 717), 'tensorflow.contrib.layers.flatten', 'tf.contrib.layers.flatten', (['net_loc'], {}), '(net_loc)\n', (708, 717), True, 'import tensorflow as tf\n'), ((2571, 2583), 'tensorflow.abs', 'tf.abs', (['diff'], {}), '(diff)\n', (2577, 2583), True, 'import tensorflow as tf\n'), ((2621, 2633), 'tensorflow.abs', 'tf.abs', (['diff'], {}), '(diff)\n', (2627, 2633), True, 'import tensorflow as tf\n'), ((4914, 4977), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['x', '(64)', '[11, 11]', '(4)'], {'padding': '"""VALID"""', 'scope': '"""conv1"""'}), "(x, 64, [11, 11], 4, padding='VALID', scope='conv1')\n", (4925, 4977), True, 'import tensorflow.contrib.slim as slim\n'), ((4986, 5032), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['net', '[3, 3]', '(2)'], {'scope': '"""pool1"""'}), "(net, [3, 3], 2, scope='pool1')\n", (5001, 5032), True, 'import tensorflow.contrib.slim as slim\n'), ((5041, 5085), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(192)', '[5, 5]'], {'scope': '"""conv2"""'}), "(net, 192, [5, 5], scope='conv2')\n", (5052, 5085), True, 'import tensorflow.contrib.slim as slim\n'), ((5198, 5244), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['net', '[3, 3]', '(2)'], {'scope': '"""pool2"""'}), "(net, [3, 3], 2, scope='pool2')\n", (5213, 5244), True, 'import tensorflow.contrib.slim as slim\n'), ((5253, 5297), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(384)', '[3, 3]'], {'scope': '"""conv3"""'}), "(net, 384, [3, 3], scope='conv3')\n", (5264, 5297), True, 'import tensorflow.contrib.slim as slim\n'), ((5306, 5350), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(384)', '[3, 3]'], {'scope': '"""conv4"""'}), "(net, 384, [3, 3], scope='conv4')\n", (5317, 5350), True, 'import tensorflow.contrib.slim as slim\n'), ((5359, 5403), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(256)', '[3, 3]'], {'scope': '"""conv5"""'}), "(net, 256, [3, 3], scope='conv5')\n", (5370, 5403), True, 'import tensorflow.contrib.slim as slim\n'), ((5452, 5497), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(1024)', '[3, 3]'], {'scope': '"""conv6"""'}), "(net, 1024, [3, 3], scope='conv6')\n", (5463, 5497), True, 'import tensorflow.contrib.slim as slim\n'), ((5506, 5551), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(1024)', '[1, 1]'], {'scope': '"""conv7"""'}), "(net, 1024, [1, 1], scope='conv7')\n", (5517, 5551), True, 'import tensorflow.contrib.slim as slim\n'), ((5664, 5708), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(256)', '[1, 1]'], {'scope': '"""conv8"""'}), "(net, 256, [1, 1], scope='conv8')\n", (5675, 5708), True, 'import tensorflow.contrib.slim as slim\n'), ((5717, 5766), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(512)', '[3, 3]', '(2)'], {'scope': '"""conv8_2"""'}), "(net, 512, [3, 3], 2, scope='conv8_2')\n", (5728, 5766), True, 'import tensorflow.contrib.slim as slim\n'), ((5881, 5925), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(128)', '[1, 1]'], {'scope': '"""conv9"""'}), "(net, 128, [1, 1], scope='conv9')\n", (5892, 5925), True, 'import tensorflow.contrib.slim as slim\n'), ((5934, 5983), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(256)', '[3, 3]', '(2)'], {'scope': '"""conv9_2"""'}), "(net, 256, [3, 3], 2, scope='conv9_2')\n", (5945, 5983), True, 'import tensorflow.contrib.slim as slim\n'), ((3345, 3384), 'tensorflow.contrib.slim.losses.get_regularization_losses', 'slim.losses.get_regularization_losses', ([], {}), '()\n', (3382, 3384), True, 'import tensorflow.contrib.slim as slim\n'), ((4867, 4903), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', ([], {'scale': 'REG_SCALE'}), '(scale=REG_SCALE)\n', (4886, 4903), True, 'import tensorflow.contrib.slim as slim\n'), ((8814, 8840), 'numpy.array', 'np.array', (['[xc, yc, xc, yc]'], {}), '([xc, yc, xc, yc])\n', (8822, 8840), True, 'import numpy as np\n'), ((9037, 9103), 'numpy.array', 'np.array', (['[IMG_W / fm_w, IMG_H / fm_h, IMG_W / fm_w, IMG_H / fm_h]'], {}), '([IMG_W / fm_w, IMG_H / fm_h, IMG_W / fm_w, IMG_H / fm_h])\n', (9045, 9103), True, 'import numpy as np\n'), ((9633, 9665), 'data_prep.calc_iou', 'calc_iou', (['box[:4]', 'other_box[:4]'], {}), '(box[:4], other_box[:4])\n', (9641, 9665), False, 'from data_prep import calc_iou\n')]
|
"""
====================================================================
ConvSCCS cross validation on simulated longitudinal features example
====================================================================
In this example we simulate longitudinal data with preset relative incidence
for each feature. We then perform a cross validation of the ConvSCCS model
and compare the estimated coefficients to the relative incidences used for
the simulation.
"""
from time import time
import numpy as np
from scipy.sparse import csr_matrix, hstack
from matplotlib import cm
import matplotlib.pylab as plt
from tick.survival.simu_sccs import CustomEffects
from tick.survival import SimuSCCS, ConvSCCS
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Simulation parameters
seed = 0
lags = 49
n_samples = 2000
n_intervals = 750
n_corr = 3
# Relative incidence functions used for the simulation
ce = CustomEffects(lags + 1)
null_effect = [ce.constant_effect(1)] * 2
intermediate_effect = ce.bell_shaped_effect(2, 30, 15, 15)
late_effects = ce.increasing_effect(2, curvature_type=4)
sim_effects = [*null_effect, intermediate_effect, late_effects]
n_features = len(sim_effects)
n_lags = np.repeat(lags, n_features).astype('uint64')
coeffs = [np.log(c) for c in sim_effects]
# Time drift (age effect) used for the simulations.
time_drift = lambda t: np.log(8 * np.sin(.01 * t) + 9)
# Simaltion of the features.
sim = SimuSCCS(n_samples, n_intervals, n_features, n_lags,
time_drift=time_drift, coeffs=coeffs, seed=seed,
n_correlations=n_corr, verbose=False)
features, censored_features, labels, censoring, coeffs = sim.simulate()
# Plot the Hawkes kernel matrix used to generate the features.
fig, ax = plt.subplots(figsize=(7, 6))
heatmap = ax.pcolor(sim.hawkes_exp_kernels.adjacency, cmap=cm.Blues)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.5)
fig.colorbar(heatmap, cax=cax)
ax.set_title('Hawkes adjacency matrix used for the simulation');
plt.show()
## Add age_groups features to feature matrices.
agegrps = [0, 125, 250, 375, 500, 625, 750]
n_agegrps = len(agegrps) - 1
feat_agegrp = np.zeros((n_intervals, n_agegrps))
for i in range(n_agegrps):
feat_agegrp[agegrps[i]:agegrps[i + 1], i] = 1
feat_agegrp = csr_matrix(feat_agegrp)
features = [hstack([f, feat_agegrp]).tocsr() for f in features]
censored_features = [hstack([f, feat_agegrp]).tocsr() for f in
censored_features]
n_lags = np.hstack([n_lags, np.zeros(n_agegrps)])
# Learning
# Example code for cross validation
# start = time()
# learner = ConvSCCS(n_lags=n_lags.astype('uint64'),
# penalized_features=np.arange(n_features),
# random_state=42)
# C_TV_range = (1, 4)
# C_L1_range = (2, 5)
# _, cv_track = learner.fit_kfold_cv(features, labels, censoring,
# C_TV_range, C_L1_range,
# confidence_intervals=True,
# n_samples_bootstrap=20, n_cv_iter=50)
# elapsed_time = time() - start
# print("Elapsed time (model training): %.2f seconds \n" % elapsed_time)
# print("Best model hyper parameters: \n")
# print("C_tv : %f \n" % cv_track.best_model['C_tv'])
# print("C_group_l1 : %f \n" % cv_track.best_model['C_group_l1'])
# cv_track.plot_cv_report(35, 45)
# plt.show()
# confidence_intervals = cv_track.best_model['confidence_intervals']
# using the parameters resulting from cross-validation
learner = ConvSCCS(n_lags=n_lags.astype('uint64'),
penalized_features=np.arange(n_features),
random_state=42, C_tv=270.2722840570933,
C_group_l1=5216.472772625124)
_, confidence_intervals = learner.fit(features, labels, censoring,
confidence_intervals=True,
n_samples_bootstrap=20)
# Plot estimated parameters
# get bootstrap confidence intervals
refitted_coeffs = confidence_intervals['refit_coeffs']
lower_bound = confidence_intervals['lower_bound']
upper_bound = confidence_intervals['upper_bound']
n_rows = int(np.ceil(n_features / 2))
remove_last_plot = (n_features % 2 != 0)
fig, axarr = plt.subplots(n_rows, 2, sharex=True, sharey=True, figsize=(10, 6))
y = confidence_intervals['refit_coeffs']
lb = confidence_intervals['lower_bound']
ub = confidence_intervals['upper_bound']
for i, c in enumerate(y[:-6]):
ax = axarr[i // 2][i % 2]
l = n_lags[i]
ax.plot(np.exp(coeffs[i]), label="True RI")
ax.step(np.arange(l + 1), np.exp(c), label="Estimated RI")
ax.fill_between(np.arange(l + 1), np.exp(lb[i]), np.exp(ub[i]), alpha=.5,
color='orange', step='pre', label="95% boostrap CI")
plt.suptitle('Estimated relative risks with 95% confidence bands')
axarr[0][1].legend(loc='best')
[ax[0].set_ylabel('Relative incidence') for ax in axarr]
[ax.set_xlabel('Time after exposure start') for ax in axarr[-1]]
if remove_last_plot:
fig.delaxes(axarr[-1][-1])
plt.show()
normalize = lambda x: x / np.sum(x)
m = np.repeat(np.hstack(refitted_coeffs[-6:]), 125)
lb = np.repeat(np.hstack(lower_bound[-6:]), 125)
ub = np.repeat(np.hstack(upper_bound[-6:]), 125)
plt.figure()
plt.plot(np.arange(n_intervals),
normalize(np.exp(time_drift(np.arange(n_intervals)))))
plt.step(np.arange(n_intervals), normalize(np.exp(m)))
plt.fill_between(np.arange(n_intervals), np.exp(lb) / np.exp(m).sum(),
np.exp(ub) / np.exp(m).sum(), alpha=.5, color='orange',
step='pre')
plt.xlabel('Age')
plt.ylabel('Normalized Age Relative Incidence')
plt.title("Normalized age effect with 95% confidence bands");
plt.show()
|
[
"numpy.sum",
"numpy.sin",
"matplotlib.pylab.suptitle",
"numpy.arange",
"numpy.exp",
"matplotlib.pylab.title",
"matplotlib.pylab.subplots",
"matplotlib.pylab.figure",
"matplotlib.pylab.show",
"tick.survival.SimuSCCS",
"numpy.repeat",
"numpy.ceil",
"numpy.hstack",
"scipy.sparse.csr_matrix",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.xlabel",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.log",
"numpy.zeros",
"tick.survival.simu_sccs.CustomEffects",
"scipy.sparse.hstack"
] |
[((903, 926), 'tick.survival.simu_sccs.CustomEffects', 'CustomEffects', (['(lags + 1)'], {}), '(lags + 1)\n', (916, 926), False, 'from tick.survival.simu_sccs import CustomEffects\n'), ((1422, 1565), 'tick.survival.SimuSCCS', 'SimuSCCS', (['n_samples', 'n_intervals', 'n_features', 'n_lags'], {'time_drift': 'time_drift', 'coeffs': 'coeffs', 'seed': 'seed', 'n_correlations': 'n_corr', 'verbose': '(False)'}), '(n_samples, n_intervals, n_features, n_lags, time_drift=time_drift,\n coeffs=coeffs, seed=seed, n_correlations=n_corr, verbose=False)\n', (1430, 1565), False, 'from tick.survival import SimuSCCS, ConvSCCS\n'), ((1738, 1766), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'figsize': '(7, 6)'}), '(figsize=(7, 6))\n', (1750, 1766), True, 'import matplotlib.pylab as plt\n'), ((1846, 1869), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (1865, 1869), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2021, 2031), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2029, 2031), True, 'import matplotlib.pylab as plt\n'), ((2169, 2203), 'numpy.zeros', 'np.zeros', (['(n_intervals, n_agegrps)'], {}), '((n_intervals, n_agegrps))\n', (2177, 2203), True, 'import numpy as np\n'), ((2296, 2319), 'scipy.sparse.csr_matrix', 'csr_matrix', (['feat_agegrp'], {}), '(feat_agegrp)\n', (2306, 2319), False, 'from scipy.sparse import csr_matrix, hstack\n'), ((4237, 4303), 'matplotlib.pylab.subplots', 'plt.subplots', (['n_rows', '(2)'], {'sharex': '(True)', 'sharey': '(True)', 'figsize': '(10, 6)'}), '(n_rows, 2, sharex=True, sharey=True, figsize=(10, 6))\n', (4249, 4303), True, 'import matplotlib.pylab as plt\n'), ((4768, 4834), 'matplotlib.pylab.suptitle', 'plt.suptitle', (['"""Estimated relative risks with 95% confidence bands"""'], {}), "('Estimated relative risks with 95% confidence bands')\n", (4780, 4834), True, 'import matplotlib.pylab as plt\n'), ((5040, 5050), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (5048, 5050), True, 'import matplotlib.pylab as plt\n'), ((5238, 5250), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (5248, 5250), True, 'import matplotlib.pylab as plt\n'), ((5576, 5593), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Age"""'], {}), "('Age')\n", (5586, 5593), True, 'import matplotlib.pylab as plt\n'), ((5594, 5641), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Normalized Age Relative Incidence"""'], {}), "('Normalized Age Relative Incidence')\n", (5604, 5641), True, 'import matplotlib.pylab as plt\n'), ((5642, 5702), 'matplotlib.pylab.title', 'plt.title', (['"""Normalized age effect with 95% confidence bands"""'], {}), "('Normalized age effect with 95% confidence bands')\n", (5651, 5702), True, 'import matplotlib.pylab as plt\n'), ((5704, 5714), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (5712, 5714), True, 'import matplotlib.pylab as plt\n'), ((1246, 1255), 'numpy.log', 'np.log', (['c'], {}), '(c)\n', (1252, 1255), True, 'import numpy as np\n'), ((4157, 4180), 'numpy.ceil', 'np.ceil', (['(n_features / 2)'], {}), '(n_features / 2)\n', (4164, 4180), True, 'import numpy as np\n'), ((5102, 5133), 'numpy.hstack', 'np.hstack', (['refitted_coeffs[-6:]'], {}), '(refitted_coeffs[-6:])\n', (5111, 5133), True, 'import numpy as np\n'), ((5155, 5182), 'numpy.hstack', 'np.hstack', (['lower_bound[-6:]'], {}), '(lower_bound[-6:])\n', (5164, 5182), True, 'import numpy as np\n'), ((5204, 5231), 'numpy.hstack', 'np.hstack', (['upper_bound[-6:]'], {}), '(upper_bound[-6:])\n', (5213, 5231), True, 'import numpy as np\n'), ((5260, 5282), 'numpy.arange', 'np.arange', (['n_intervals'], {}), '(n_intervals)\n', (5269, 5282), True, 'import numpy as np\n'), ((5357, 5379), 'numpy.arange', 'np.arange', (['n_intervals'], {}), '(n_intervals)\n', (5366, 5379), True, 'import numpy as np\n'), ((5420, 5442), 'numpy.arange', 'np.arange', (['n_intervals'], {}), '(n_intervals)\n', (5429, 5442), True, 'import numpy as np\n'), ((1190, 1217), 'numpy.repeat', 'np.repeat', (['lags', 'n_features'], {}), '(lags, n_features)\n', (1199, 1217), True, 'import numpy as np\n'), ((2515, 2534), 'numpy.zeros', 'np.zeros', (['n_agegrps'], {}), '(n_agegrps)\n', (2523, 2534), True, 'import numpy as np\n'), ((3595, 3616), 'numpy.arange', 'np.arange', (['n_features'], {}), '(n_features)\n', (3604, 3616), True, 'import numpy as np\n'), ((4518, 4535), 'numpy.exp', 'np.exp', (['coeffs[i]'], {}), '(coeffs[i])\n', (4524, 4535), True, 'import numpy as np\n'), ((4566, 4582), 'numpy.arange', 'np.arange', (['(l + 1)'], {}), '(l + 1)\n', (4575, 4582), True, 'import numpy as np\n'), ((4584, 4593), 'numpy.exp', 'np.exp', (['c'], {}), '(c)\n', (4590, 4593), True, 'import numpy as np\n'), ((4637, 4653), 'numpy.arange', 'np.arange', (['(l + 1)'], {}), '(l + 1)\n', (4646, 4653), True, 'import numpy as np\n'), ((4655, 4668), 'numpy.exp', 'np.exp', (['lb[i]'], {}), '(lb[i])\n', (4661, 4668), True, 'import numpy as np\n'), ((4670, 4683), 'numpy.exp', 'np.exp', (['ub[i]'], {}), '(ub[i])\n', (4676, 4683), True, 'import numpy as np\n'), ((5078, 5087), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (5084, 5087), True, 'import numpy as np\n'), ((5391, 5400), 'numpy.exp', 'np.exp', (['m'], {}), '(m)\n', (5397, 5400), True, 'import numpy as np\n'), ((5444, 5454), 'numpy.exp', 'np.exp', (['lb'], {}), '(lb)\n', (5450, 5454), True, 'import numpy as np\n'), ((5491, 5501), 'numpy.exp', 'np.exp', (['ub'], {}), '(ub)\n', (5497, 5501), True, 'import numpy as np\n'), ((2332, 2356), 'scipy.sparse.hstack', 'hstack', (['[f, feat_agegrp]'], {}), '([f, feat_agegrp])\n', (2338, 2356), False, 'from scipy.sparse import csr_matrix, hstack\n'), ((2405, 2429), 'scipy.sparse.hstack', 'hstack', (['[f, feat_agegrp]'], {}), '([f, feat_agegrp])\n', (2411, 2429), False, 'from scipy.sparse import csr_matrix, hstack\n'), ((1365, 1381), 'numpy.sin', 'np.sin', (['(0.01 * t)'], {}), '(0.01 * t)\n', (1371, 1381), True, 'import numpy as np\n'), ((5321, 5343), 'numpy.arange', 'np.arange', (['n_intervals'], {}), '(n_intervals)\n', (5330, 5343), True, 'import numpy as np\n'), ((5457, 5466), 'numpy.exp', 'np.exp', (['m'], {}), '(m)\n', (5463, 5466), True, 'import numpy as np\n'), ((5504, 5513), 'numpy.exp', 'np.exp', (['m'], {}), '(m)\n', (5510, 5513), True, 'import numpy as np\n')]
|
import numpy, math
from params_geo import *
import nanopores.py4gmsh.basic
import nanopores.py4gmsh.extra
from nanopores.py4gmsh import *
from warnings import warn
def get_geo(x0 = None, crosssections = True, exit_i = None, **params):
"""
writes a 3d geo file for an extruded axissymmetric geometry
for Howorka 'Self-Assembled aHem that spans lipid bilayers'
_________
| |
| _ |
| | |____|
| | |____|
| |_| |
| |
|________| *rotated around y-axis *
"""
reload(nanopores.py4gmsh.basic)
reload(nanopores.py4gmsh.extra)
globals().update(params)
params["synonymes"] = synonymes
X_aHem = numpy.array([[2.16, 0.0, 0.0],
[2.77, 0.0, -0.19],
[3.24, 0.0, -0.1 ],
[3.59, 0.0, -0.1 ],
[3.83, 0.0, -0.35],
[3.84, 0.0, -0.8 ],
[3.67, 0.0, -1.34],
[3.73, 0.0, -1.96],
[3.93, 0.0, -2.31],
[4.23, 0.0, -2.67],
[4.44, 0.0, -2.81],
[4.33, 0.0, -3.25],
[4.01, 0.0, -3.5 ],
[3.99, 0.0, -3.67],
[4.11, 0.0, -3.94],
[4.39, 0.0, -4.12],
[4.44, 0.0, -4.52],
[4.73, 0.0, -4.86],
[4.96, 0.0, -5.41],
[4.89, 0.0, -5.87],
[4.63, 0.0, -6.44],
[4.43, 0.0, -6.96],
[4.07, 0.0, -7.32],
[3.71, 0.0, -7.51],
[3.46, 0.0, -7.36],
[3.41, 0.0, -7.1 ],
[3.31, 0.0, -6.9 ],
[3.04, 0.0, -6.87],
[2.73, 0.0, -6.73],
[2.41, 0.0, -6.6 ],
[2.17, 0.0, -6.41],
[1.97, 0.0, -6.23],
[1.84, 0.0, -6.03],
[1.76, 0.0, -5.87],
[1.54, 0.0, -5.87],
[1.4 , 0.0, -5.96],
[1.31, 0.0, -6.16],
[1.39, 0.0, -6.57],
[1.6 , 0.0, -6.81],
[1.71, 0.0, -7.09],
[1.76, 0.0, -7.32],
[1.67, 0.0, -7.65],
[1.44, 0.0, -7.81],
[1.49, 0.0, -8.06],
[1.56, 0.0, -8.36],
[1.44, 0.0, -8.61],
[1.43, 0.0, -8.79],
[1.44, 0.0, -9.1 ],
[1.6 , 0.0, -9.48],
[1.74, 0.0, -9.84],
[1.63, 0.0, -10.0],
[1.47, 0.0, -10.19],
[1.26, 0.0, -10.21],
[1.07, 0.0, -10.05],
[1.03, 0.0, -9.76],
[1.09, 0.0, -9.44],
[1.07, 0.0, -9.02],
[0.86, 0.0, -8.79],
[0.64, 0.0, -8.68],
[0.63, 0.0, -8.36],
[0.8 , 0.0, -8.22],
[0.81, 0.0, -7.93],
[0.89, 0.0, -7.71],
[1.04, 0.0, -7.51],
[1.1 , 0.0, -7.25],
[0.91, 0.0, -7.02],
[0.91, 0.0, -6.76],
[0.91, 0.0, -6.48],
[0.69, 0.0, -6.25],
[0.69, 0.0, -6. ],
[0.66, 0.0, -5.68],
[0.59, 0.0, -5.36],
[0.53, 0.0, -5.12],
[0.54, 0.0, -4.92],
[0.79, 0.0, -4.84],
[1.03, 0.0, -4.89],
[1.21, 0.0, -4.7 ],
[1.36, 0.0, -4.42],
[1.49, 0.0, -4.16],
[1.66, 0.0, -3.92],
[1.66, 0.0, -3.7 ],
[1.8 , 0.0, -3.41],
[2. , 0.0, -3.22],
[1.91, 0.0, -2.93],
[1.8 , 0.0, -2.71],
[1.56, 0.0, -2.55],
[1.46, 0.0, -2.38],
[1.3 , 0.0, -2.19],
[1.21, 0.0, -1.93],
[1.09, 0.0, -1.64],
[0.9 , 0.0, -1.45],
[0.8 , 0.0, -1.28],
[0.84, 0.0, -1. ],
[1. , 0.0, -0.8 ],
[1.26, 0.0, -0.64],
[1.7 , 0.0, -0.31]])
#Anchor Points on aHem for membran (index)
ap1 = 18
ap2 = 49
apdiff=ap2-ap1
#Anchor Points in aHem for CrossS (index)
ac1 = 52
ac2 = 68
ac3 = 82
ac4 = 0
zcross = sorted([X_aHem[i][2] for i in [ac1, ac2, ac3, ac4]])
params["lbtm"] = -zcross[0] + zcross[1]
params["lctr"] = -zcross[1] + zcross[2]
params["ltop"] = -zcross[2] + zcross[3]
params["zporetop"] = zcross[3]
params["zporebtm"] = zcross[0]
params["ztop"] = params["zporetop"] + l3
params["zbtm"] = params["zporebtm"] - l4
r0=max([X_aHem[index][0] for index in [ac1, ac2, ac3, ac4]])+rMolecule
X_Fluid_ext = numpy.array([[0.0, 0.0, l3],
[R, 0.0, l3],
[R, 0.0, X_aHem[ap1][2]],
[R, 0.0, X_aHem[ap2][2]],
[R, 0.0, -l0-l1-l4],
[0.0, 0.0, -l0-l1-l4]])
X_Fluid_ctr = numpy.array([[0.0, 0.0, X_aHem[ac1][2]],
[0.0, 0.0, X_aHem[ac2][2]],
[0.0, 0.0, X_aHem[ac3][2]],
[0.0, 0.0, X_aHem[ac4][2]]])
p_Fluid = [Point(x, lcOuter) for x in X_Fluid_ext]
p_Fluid.extend([Point(y, lcCenter) for y in X_Fluid_ctr])
p_aHem = [Point(x, lcCenter) for x in X_aHem]
#Create Line Loops from the points sitting on the line
Comment(' Connect all Fluid points ')
e_Fluid = [Line(p_Fluid[k], p_Fluid[k+1]) for k in range(len(p_Fluid)-1)]
e_Fluid.append(Line(p_Fluid[-1], p_Fluid[0]))
Comment(' Connect all aHem points ')
e_aHem = [Line(p_aHem[k], p_aHem[k+1]) for k in range(len(p_aHem)-1)]
e_aHem.append(Line(p_aHem[-1], p_aHem[0]))
# e_Membrane = [Line(p_aHem[ap1],p_Fluid[2]), Line(p_Fluid[3], p_aHem[ap2])]
e_Membrane = [Line(Point(X_aHem[ap1],lcMembrane), Point(numpy.array([R,0.,X_aHem[ap1][2]]),lcMembrane)),\
Line(Point(numpy.array([R,0.,X_aHem[ap2][2]]),lcMembrane),Point(X_aHem[ap2],lcMembrane))]
edges_to_rot = [e_Fluid[0:5], e_aHem, e_Membrane]
geo_cs_str = "no crosssectional surface"
if crosssections:
Comment(' integrate crosssectional lines in fluid and check if molecule intersects lines')
e_CrossS = [Line(p_aHem[ac1], p_Fluid[6]), Line(p_aHem[ac2], p_Fluid[7]), Line(p_aHem[ac3], p_Fluid[8]), Line(p_aHem[ac4], p_Fluid[9])]
cs_pop_i = None
# check if molecule is near pore
if x0 is not None and (x0[0]**2 + x0[1]**2 <= r0**2):
# check z coordinate of molecule
if abs(x0[2] - X_aHem[ac4][2]) < rMolecule:
geo_cs_str = "top crosssection"
cs_pop_i = -1
elif abs(x0[2] - X_aHem[ac3][2]) < rMolecule:
geo_cs_str = "center top crosssection"
cs_pop_i = 2
elif abs(x0[2] - X_aHem[ac2][2]) < rMolecule:
geo_cs_str = "center bottom crosssection"
cs_pop_i = 1
elif abs(x0[2] - X_aHem[ac1][2]) < rMolecule:
geo_cs_str = "bottom crosssection"
cs_pop_i = 0
if cs_pop_i is not None:
e_CrossS.pop(cs_pop_i)
if cs_pop_i == 0:
top_acdiff = len(X_aHem)-ap1
bottom_end = ac2
elif cs_pop_i == 1:
top_acdiff = len(X_aHem)-ap1
bottom_end = ac3
elif cs_pop_i == 2:
top_acdiff = ac2-ap1
bottom_end = ac1
elif cs_pop_i == -1:
top_acdiff = ac3-ap1
bottom_end = ac1
edges_to_rot.append(e_CrossS)
top_acdiff = len(X_aHem)-ap1
bottom_end = ac1
rot_axis = [0.0, 0.0, 1.0]
point_on_rot_axis = [0.0, 0.0, 0.0]
# Extrude all edge 4 times Pi/2
surfs = []
angle = 'Pi/2'
n_e = len(edges_to_rot)
n_e_i = [len(edges_to_rot[i]) for i in range(n_e)]
for i in range(n_e):
surfs_i = []
Comment('Extrude in 4 steps around z-axis.')
previous = edges_to_rot[i]
for j in range(4):
Comment('Step %s' % (j+1))
for k in range(len(previous)):
name = Extrude('Line{%s}' % previous[k],
rotation_axis=rot_axis,
point_on_axis=point_on_rot_axis,
angle=angle
)
surfs_i.append(name + '[1]')
previous[k] = name + '[0]'
surfs.append(surfs_i)
# surfs:
# [0] --> outer cylinder <=> e_Fluid
# [1] --> ahem <=> e_aHem
# [2] --> membrane-fluid <=> e_Membrane
# [3] --> crosssections
# TODO: make this all less confusing
surfs_Fluid = surfs[0][:] # [:] is important for a shallow copy (-> del nextline)
surfs_Fluid_bulk_top = surfs[0][:] # prepare for Fluid_bulk
surfs_Fluid_bulk_bottom = surfs[0][:]
del surfs_Fluid[2::n_e_i[0]] # deletes outer membrane boundary (<=> e_Fluid[2])
for index in range(3):
del surfs_Fluid_bulk_top[2::n_e_i[0]-index] # delete equivalent of 2,3,4
del surfs_Fluid_bulk_bottom[0::n_e_i[0]-index] # delete equivalent of 0,1,2
#PhysicalSurface(surfs_Fluid,'fluidb') #Physical Surface Fluid
surfs_boundary_top = [surfs[0][s*5] for s in range(4)]
surfs_boundary_side_top = [surfs[0][1+s*5] for s in range(4)]
surfs_boundary_side_bottom = [surfs[0][3+s*5] for s in range(4)]
surfs_boundary_bottom = [surfs[0][4+s*5] for s in range(4)]
surfs_Fluid_aHem = surfs[1][:]
surfs_Fluid_aHem_add_top = surfs[1][:] # additional aHem surfs for surfs_Fluid_bulk
surfs_Fluid_aHem_add_bottom = surfs[1][:]
for index in range(apdiff):
del surfs_Fluid_aHem[ap1::n_e_i[1]-index] # deletes membrane
[surfs_Fluid.append(s) for s in surfs_Fluid_aHem]
[surfs_Fluid.append(s) for s in surfs[2]] # <=> surfs_Fluid += surfs[2] TODO
[surfs_Fluid_bulk_top.append(s) for s in [surfs[2][2*s] for s in range(4)]]
[surfs_Fluid_bulk_bottom.append(s) for s in [surfs[2][2*s+1] for s in range(4)]]
for index in range(top_acdiff):
del surfs_Fluid_aHem_add_top[ap1::n_e_i[1]-index]
for index in range(ap2):
del surfs_Fluid_aHem_add_bottom[0::n_e_i[1]-index]
for index in range(len(X_aHem)-bottom_end):
del surfs_Fluid_aHem_add_bottom[ac1-ap2::n_e_i[1]-ap2-index]
[surfs_Fluid_bulk_top.append(s) for s in surfs_Fluid_aHem_add_top]
[surfs_Fluid_bulk_bottom.append(s) for s in surfs_Fluid_aHem_add_bottom]
if cs_pop_i is not None:
if cs_pop_i == 0:
surfs_CrossS_bulk_top = [surfs[3][3+4*s] for s in range (4)]
surfs_CrossS_bulk_bottom = [surfs[3][1+4*s] for s in range (4)]
elif cs_pop_i == 1:
surfs_CrossS_bulk_top = [surfs[3][3+4*s] for s in range (4)]
surfs_CrossS_bulk_bottom = [surfs[3][2+4*s] for s in range (4)]
elif cs_pop_i == 2:
surfs_CrossS_bulk_top = [surfs[3][1+4*s] for s in range (4)]
surfs_CrossS_bulk_bottom = [surfs[3][4*s] for s in range (4)]
elif cs_pop_i == -1:
surfs_CrossS_bulk_top = [surfs[3][2+4*s] for s in range (4)]
surfs_CrossS_bulk_bottom = [surfs[3][4*s] for s in range (4)]
else: # no intersect with any crossS -> remove 2nd and 3rd crossS
surfs_CrossS_bulk_top = [surfs[3][3+4*s] for s in range (4)]
surfs_CrossS_bulk_bottom = [surfs[3][4*s] for s in range (4)]
# exit surface for exit time problem
# exit_i = 0,...,3, None <--> exit surface = pore btm,...,pore top, lowerb
if exit_i is not None and (cs_pop_i is None or cs_pop_i %4 != exit_i):
surfs_exit = [surfs[3][exit_i+4*s] for s in range(4)]
# surfs_exit = list of surfaces
PhysicalSurface(surfs_exit, "poreexit")
exittimeDomain = {"fluid_bulk_top"}
for i in range(3 - exit_i):
exittimeDomain.add(["poretop", "porecenter", "porebottom"][i])
params["synonymes"]["exittime"] = exittimeDomain
else:
params["synonymes"]["poreexit"] = {"lowerbulkb"}
[surfs_Fluid_bulk_top.append(s) for s in surfs_CrossS_bulk_top]
[surfs_Fluid_bulk_bottom.append(s) for s in surfs_CrossS_bulk_bottom]
sl_Fluid = SurfaceLoop(surfs_Fluid)
sl_Fluid_bulk_top = SurfaceLoop(surfs_Fluid_bulk_top)
sl_Fluid_bulk_bottom = SurfaceLoop(surfs_Fluid_bulk_bottom)
surfs_Fluid_bottom = surfs[1][:] # create Fluid_bottom/center/top - the aHem side
for index in range(ac1):
del surfs_Fluid_bottom[0::n_e_i[1]-index]
surfs_Fluid_center = surfs_Fluid_bottom[:]
surfs_Fluid_top = surfs_Fluid_bottom[:]
for index in range(n_e_i[1]-ac2):
del surfs_Fluid_bottom[ac2-ac1::n_e_i[1]-ac1-index]
for index in range(ac2-ac1):
del surfs_Fluid_center[0::n_e_i[1]-ac1-index]
for index in range(n_e_i[1]-ac3):
del surfs_Fluid_center[ac3-ac2::n_e_i[1]-ac2-index]
for index in range(ac3-ac1):
del surfs_Fluid_top[0::n_e_i[1]-ac1-index]
surfs_CrossS_bottom = surfs[3][:] # create Fluid_bottom/center/top - the CrossS side
surfs_CrossS_center = surfs[3][:]
surfs_CrossS_top = surfs[3][:]
del surfs_CrossS_bottom[2::4]
del surfs_CrossS_bottom[2::3]
del surfs_CrossS_center[0::4]
del surfs_CrossS_center[2::3]
del surfs_CrossS_top[0::4]
del surfs_CrossS_top[0::3]
[surfs_Fluid_bottom.append(s) for s in surfs_CrossS_bottom] # combine aHem side and CrossS side for surfs_Fluid_bottom/center/top
[surfs_Fluid_center.append(s) for s in surfs_CrossS_center]
[surfs_Fluid_top.append(s) for s in surfs_CrossS_top]
sl_Fluid_bottom = SurfaceLoop(surfs_Fluid_bottom)
sl_Fluid_center = SurfaceLoop(surfs_Fluid_center)
sl_Fluid_top = SurfaceLoop(surfs_Fluid_top)
PhysicalSurface(surfs_Fluid_aHem,'ahemb') #Physical Surface aHem
PhysicalSurface(surfs_boundary_top,'upperb') # Physical surfaces fluid bottom, side (without membran), top
PhysicalSurface(surfs_boundary_side_top,'uppersideb')
PhysicalSurface(surfs_boundary_side_bottom,'lowersideb')
PhysicalSurface(surfs_boundary_bottom,'lowerb')
sl_aHem = SurfaceLoop(surfs[1])
vol_aHem = Volume(sl_aHem)
surfs_Membrane = surfs[0][2::n_e_i[0]]
for index in range(apdiff):
[surfs_Membrane.append(s) for s in surfs[1][ap1+index::n_e_i[1]]]
[surfs_Membrane.append(s) for s in surfs[2]]
sl_Membrane = SurfaceLoop(surfs_Membrane)
vol_Membrane = Volume(sl_Membrane)
surfs_Membrane_ps = surfs[2]
x0_in_pore = None
if x0 is not None and (x0[0]**2 + x0[1]**2 <= r0**2) and cs_pop_i is None:
# check z coordinate of molecule
if x0[2]<ac4 and x0[2]>ac3:
x0_in_pore = 2 # Molecule is in surfs_Fluid_top
elif x0[2]<ac3 and x0[2]>ac2:
x0_in_pore = 1 # Molecule is in surfs_Fluid_center
elif x0[2]<ac2 and x0[2]>ac1:
x0_in_pore = 0 # Molecule is in surfs_Fluid_bottom
pv_fluid_top, pv_fluid_center, pv_fluid_bottom = True, True, True
if x0 is None:
vol_Fluid = Volume(sl_Fluid)
vol_Fluid_bulk_top = Volume(sl_Fluid_bulk_top)
vol_Fluid_bulk_bottom = Volume(sl_Fluid_bulk_bottom)
vol_Fluid_top = Volume(sl_Fluid_top)
vol_Fluid_center = Volume(sl_Fluid_center)
vol_Fluid_bottom = Volume(sl_Fluid_bottom)
NoPhysicalVolume("molecule")
NoPhysicalSurface("moleculeb")
else:
Comment('Add molecule ball')
Molecule = add_ball(numpy.asarray(x0), rMolecule, lcMolecule,
with_volume=True, holes=None, label=None
)
sl_Fluid_Molecule = Array([sl_Fluid] + [Molecule[1]])
vol_Fluid = Volume(sl_Fluid_Molecule)
# Molecule[0]->Volume, Molecule[1]->surface loop, Molecule[2]->surfs
vol_Molecule = Molecule[0]
PhysicalVolume(vol_Molecule, "molecule")
PhysicalSurface(Molecule[2], "moleculeb")
if x0_in_pore is not None: # NO CrossS and Molecule is in fluid_top/center/bottom
vol_Fluid_bulk_top = Volume(sl_Fluid_bulk_top)
vol_Fluid_bulk_bottom = Volume(sl_Fluid_bulk_bottom)
if x0_in_pore == 2:
sl_Fluid_top_Molecule = Array([sl_Fluid_top] + [Molecule[1]])
vol_Fluid_top = Volume(sl_Fluid_top_Molecule)
vol_Fluid_center = Volume(sl_Fluid_center)
vol_Fluid_bottom = Volume(sl_Fluid_bottom)
elif x0_in_pore == 1:
sl_Fluid_center_Molecule = Array([sl_Fluid_center] + [Molecule[1]])
vol_Fluid_center = Volume(sl_Fluid_center_Molecule)
vol_Fluid_top = Volume(sl_Fluid_top)
vol_Fluid_bottom = Volume(sl_Fluid_bottom)
elif x0_in_pore == 0:
sl_Fluid_bottom_Molecule = Array([sl_Fluid_bottom] + [Molecule[1]])
vol_Fluid_bottom = Volume(sl_Fluid_bottom_Molecule)
vol_Fluid_top = Volume(sl_Fluid_top)
vol_Fluid_center = Volume(sl_Fluid_center)
else:
if cs_pop_i is None: # Molecule is in fluid_bulk
if x0[2]>=X_aHem[ap1][2]:
sl_Fluid_bulk_top_Molecule = Array([sl_Fluid_bulk_top] + [Molecule[1]])
vol_Fluid_bulk_top = Volume(sl_Fluid_bulk_top_Molecule)
vol_Fluid_bulk_bottom = Volume(sl_Fluid_bulk_bottom)
elif x0[2]<=X_aHem[ap2][2]:
sl_Fluid_bulk_bottom_Molecule = Array([sl_Fluid_bulk_bottom] + [Molecule[1]])
vol_Fluid_bulk_bottom = Volume(sl_Fluid_bulk_bottom_Molecule)
vol_Fluid_bulk_top = Volume(sl_Fluid_bulk_top)
vol_Fluid_top = Volume(sl_Fluid_top)
vol_Fluid_center = Volume(sl_Fluid_center)
vol_Fluid_bottom = Volume(sl_Fluid_bottom)
else: # Molecule is in CrossS -> one or two of fluid_top/center/bottom are not going to be defined
if cs_pop_i == -1:
pv_fluid_top = False # fluid_top isn't defined
vol_Fluid_center = Volume(sl_Fluid_center)
vol_Fluid_bottom = Volume(sl_Fluid_bottom)
sl_Fluid_bulk_top_Molecule = Array([sl_Fluid_bulk_top] + [Molecule[1]])
vol_Fluid_bulk_top = Volume(sl_Fluid_bulk_top_Molecule)
vol_Fluid_bulk_bottom = Volume(sl_Fluid_bulk_bottom)
elif cs_pop_i == 2:
pv_fluid_top, pv_fluid_center = False, False # fluid_top/center isn't defined
vol_Fluid_bottom = Volume(sl_Fluid_bottom)
sl_Fluid_bulk_top_Molecule = Array([sl_Fluid_bulk_top] + [Molecule[1]])
vol_Fluid_bulk_top = Volume(sl_Fluid_bulk_top_Molecule)
vol_Fluid_bulk_bottom = Volume(sl_Fluid_bulk_bottom)
elif cs_pop_i == 1:
pv_fluid_center, pv_fluid_bottom = False, False # fluid_center/bottom isn't defined
vol_Fluid_top = Volume(sl_Fluid_top)
sl_Fluid_bulk_bottom_Molecule = Array([sl_Fluid_bulk_bottom] + [Molecule[1]])
vol_Fluid_bulk_bottom = Volume(sl_Fluid_bulk_bottom_Molecule)
vol_Fluid_bulk_top = Volume(sl_Fluid_bulk_top)
elif cs_pop_i == 0:
pv_fluid_bottom = False # fluid_bottom isn't defined
vol_Fluid_top = Volume(sl_Fluid_top)
vol_Fluid_center = Volume(sl_Fluid_center)
sl_Fluid_bulk_bottom_Molecule = Array([sl_Fluid_bulk_bottom] + [Molecule[1]])
vol_Fluid_bulk_bottom = Volume(sl_Fluid_bulk_bottom_Molecule)
vol_Fluid_bulk_top = Volume(sl_Fluid_bulk_top)
PhysicalVolume(vol_Fluid_bulk_top, 'fluid_bulk_top')
PhysicalVolume(vol_Fluid_bulk_bottom, 'fluid_bulk_bottom')
if pv_fluid_top:
PhysicalVolume(vol_Fluid_top, 'poretop')
if pv_fluid_center:
PhysicalVolume(vol_Fluid_center, 'porecenter')
if pv_fluid_bottom:
PhysicalVolume(vol_Fluid_bottom, 'porebottom')
#PhysicalVolume(vol_Fluid, 'fluid')
PhysicalVolume(vol_Membrane, 'membrane')
PhysicalVolume(vol_aHem, "ahem")
PhysicalSurface(surfs_Membrane_ps,'membraneb') #Physical Surface Membrane
if crosssections:
surfs_CrossS = surfs[3]
raw_code(['Surface{%s} In Volume{%s};' \
%(surfs_CrossS[k], vol_Fluid) for k in range(len(surfs_CrossS))])
if membraneblayer == True:
warn('Currently no membrane boundary layers implemented for this geometry')
membraneblayer_list = []
if moleculeblayer and x0 is not None:
moleculeblayer_list = Molecule[2]
else:
moleculeblayer_list = []
blayer_list = moleculeblayer_list + membraneblayer_list
if blayer_list:
blayer = BoundaryLayer(
edges_list=None, faces_list=blayer_list,
hfar=lcOuter, hwall_n=lcCenter*0.1, hwall_t=lcCenter*0.5,
thickness=1, ratio=2)
field_list = [blayer,]
raw_code(['bfield = newf;'])
raw_code(['Field[bfield] = Min;'])
raw_code(['Field[bfield].FieldsList = {%s};' %(','.join(field_list))])
# Uncomment for mesh size field
raw_code(['Background Field = bfield;'])
# to disable question dialogs
raw_code(['General.ExpertMode = 1;'])
# Meshing Algorithm: 2= ?, 5 = frontal (netgen)
#raw_code(['Mesh.Algorithm3D = 5;'])
meta = get_meta()
meta.update(params)
meta["x0"] = x0
geo_dict = {"gmsh mesh generating sript": __name__,
"xMolecule": x0,
#"crosssections": crosssections,
#"Number of crosssections": len(e_CrossS),
#"Total number of crossections": 4,
#"molecule crosses": geo_cs_str,
#"popped crossection index": cs_pop_i,
#"cs_pop_i": cs_pop_i,
"Typical length scale on aHem": lcCenter,
"geo_code": get_code(),
"meta": meta,
}
return geo_dict
# -----
if __name__ == '__main__':
from nanopores.geo2xml import geofile2geo
from dolfin import plot
gdict = get_geo()
geo = geofile2geo(gdict["geo_code"], gdict["meta"], name="alphahem", clscale=8.)
plot(geo.boundaries, interactive=True)
|
[
"numpy.asarray",
"dolfin.plot",
"numpy.array",
"nanopores.geo2xml.geofile2geo",
"warnings.warn"
] |
[((678, 2705), 'numpy.array', 'numpy.array', (['[[2.16, 0.0, 0.0], [2.77, 0.0, -0.19], [3.24, 0.0, -0.1], [3.59, 0.0, -0.1],\n [3.83, 0.0, -0.35], [3.84, 0.0, -0.8], [3.67, 0.0, -1.34], [3.73, 0.0, \n -1.96], [3.93, 0.0, -2.31], [4.23, 0.0, -2.67], [4.44, 0.0, -2.81], [\n 4.33, 0.0, -3.25], [4.01, 0.0, -3.5], [3.99, 0.0, -3.67], [4.11, 0.0, -\n 3.94], [4.39, 0.0, -4.12], [4.44, 0.0, -4.52], [4.73, 0.0, -4.86], [\n 4.96, 0.0, -5.41], [4.89, 0.0, -5.87], [4.63, 0.0, -6.44], [4.43, 0.0, \n -6.96], [4.07, 0.0, -7.32], [3.71, 0.0, -7.51], [3.46, 0.0, -7.36], [\n 3.41, 0.0, -7.1], [3.31, 0.0, -6.9], [3.04, 0.0, -6.87], [2.73, 0.0, -\n 6.73], [2.41, 0.0, -6.6], [2.17, 0.0, -6.41], [1.97, 0.0, -6.23], [1.84,\n 0.0, -6.03], [1.76, 0.0, -5.87], [1.54, 0.0, -5.87], [1.4, 0.0, -5.96],\n [1.31, 0.0, -6.16], [1.39, 0.0, -6.57], [1.6, 0.0, -6.81], [1.71, 0.0, \n -7.09], [1.76, 0.0, -7.32], [1.67, 0.0, -7.65], [1.44, 0.0, -7.81], [\n 1.49, 0.0, -8.06], [1.56, 0.0, -8.36], [1.44, 0.0, -8.61], [1.43, 0.0, \n -8.79], [1.44, 0.0, -9.1], [1.6, 0.0, -9.48], [1.74, 0.0, -9.84], [1.63,\n 0.0, -10.0], [1.47, 0.0, -10.19], [1.26, 0.0, -10.21], [1.07, 0.0, -\n 10.05], [1.03, 0.0, -9.76], [1.09, 0.0, -9.44], [1.07, 0.0, -9.02], [\n 0.86, 0.0, -8.79], [0.64, 0.0, -8.68], [0.63, 0.0, -8.36], [0.8, 0.0, -\n 8.22], [0.81, 0.0, -7.93], [0.89, 0.0, -7.71], [1.04, 0.0, -7.51], [1.1,\n 0.0, -7.25], [0.91, 0.0, -7.02], [0.91, 0.0, -6.76], [0.91, 0.0, -6.48],\n [0.69, 0.0, -6.25], [0.69, 0.0, -6.0], [0.66, 0.0, -5.68], [0.59, 0.0, \n -5.36], [0.53, 0.0, -5.12], [0.54, 0.0, -4.92], [0.79, 0.0, -4.84], [\n 1.03, 0.0, -4.89], [1.21, 0.0, -4.7], [1.36, 0.0, -4.42], [1.49, 0.0, -\n 4.16], [1.66, 0.0, -3.92], [1.66, 0.0, -3.7], [1.8, 0.0, -3.41], [2.0, \n 0.0, -3.22], [1.91, 0.0, -2.93], [1.8, 0.0, -2.71], [1.56, 0.0, -2.55],\n [1.46, 0.0, -2.38], [1.3, 0.0, -2.19], [1.21, 0.0, -1.93], [1.09, 0.0, \n -1.64], [0.9, 0.0, -1.45], [0.8, 0.0, -1.28], [0.84, 0.0, -1.0], [1.0, \n 0.0, -0.8], [1.26, 0.0, -0.64], [1.7, 0.0, -0.31]]'], {}), '([[2.16, 0.0, 0.0], [2.77, 0.0, -0.19], [3.24, 0.0, -0.1], [3.59,\n 0.0, -0.1], [3.83, 0.0, -0.35], [3.84, 0.0, -0.8], [3.67, 0.0, -1.34],\n [3.73, 0.0, -1.96], [3.93, 0.0, -2.31], [4.23, 0.0, -2.67], [4.44, 0.0,\n -2.81], [4.33, 0.0, -3.25], [4.01, 0.0, -3.5], [3.99, 0.0, -3.67], [\n 4.11, 0.0, -3.94], [4.39, 0.0, -4.12], [4.44, 0.0, -4.52], [4.73, 0.0, \n -4.86], [4.96, 0.0, -5.41], [4.89, 0.0, -5.87], [4.63, 0.0, -6.44], [\n 4.43, 0.0, -6.96], [4.07, 0.0, -7.32], [3.71, 0.0, -7.51], [3.46, 0.0, \n -7.36], [3.41, 0.0, -7.1], [3.31, 0.0, -6.9], [3.04, 0.0, -6.87], [2.73,\n 0.0, -6.73], [2.41, 0.0, -6.6], [2.17, 0.0, -6.41], [1.97, 0.0, -6.23],\n [1.84, 0.0, -6.03], [1.76, 0.0, -5.87], [1.54, 0.0, -5.87], [1.4, 0.0, \n -5.96], [1.31, 0.0, -6.16], [1.39, 0.0, -6.57], [1.6, 0.0, -6.81], [\n 1.71, 0.0, -7.09], [1.76, 0.0, -7.32], [1.67, 0.0, -7.65], [1.44, 0.0, \n -7.81], [1.49, 0.0, -8.06], [1.56, 0.0, -8.36], [1.44, 0.0, -8.61], [\n 1.43, 0.0, -8.79], [1.44, 0.0, -9.1], [1.6, 0.0, -9.48], [1.74, 0.0, -\n 9.84], [1.63, 0.0, -10.0], [1.47, 0.0, -10.19], [1.26, 0.0, -10.21], [\n 1.07, 0.0, -10.05], [1.03, 0.0, -9.76], [1.09, 0.0, -9.44], [1.07, 0.0,\n -9.02], [0.86, 0.0, -8.79], [0.64, 0.0, -8.68], [0.63, 0.0, -8.36], [\n 0.8, 0.0, -8.22], [0.81, 0.0, -7.93], [0.89, 0.0, -7.71], [1.04, 0.0, -\n 7.51], [1.1, 0.0, -7.25], [0.91, 0.0, -7.02], [0.91, 0.0, -6.76], [0.91,\n 0.0, -6.48], [0.69, 0.0, -6.25], [0.69, 0.0, -6.0], [0.66, 0.0, -5.68],\n [0.59, 0.0, -5.36], [0.53, 0.0, -5.12], [0.54, 0.0, -4.92], [0.79, 0.0,\n -4.84], [1.03, 0.0, -4.89], [1.21, 0.0, -4.7], [1.36, 0.0, -4.42], [\n 1.49, 0.0, -4.16], [1.66, 0.0, -3.92], [1.66, 0.0, -3.7], [1.8, 0.0, -\n 3.41], [2.0, 0.0, -3.22], [1.91, 0.0, -2.93], [1.8, 0.0, -2.71], [1.56,\n 0.0, -2.55], [1.46, 0.0, -2.38], [1.3, 0.0, -2.19], [1.21, 0.0, -1.93],\n [1.09, 0.0, -1.64], [0.9, 0.0, -1.45], [0.8, 0.0, -1.28], [0.84, 0.0, -\n 1.0], [1.0, 0.0, -0.8], [1.26, 0.0, -0.64], [1.7, 0.0, -0.31]])\n', (689, 2705), False, 'import numpy, math\n'), ((5728, 5880), 'numpy.array', 'numpy.array', (['[[0.0, 0.0, l3], [R, 0.0, l3], [R, 0.0, X_aHem[ap1][2]], [R, 0.0, X_aHem[\n ap2][2]], [R, 0.0, -l0 - l1 - l4], [0.0, 0.0, -l0 - l1 - l4]]'], {}), '([[0.0, 0.0, l3], [R, 0.0, l3], [R, 0.0, X_aHem[ap1][2]], [R, \n 0.0, X_aHem[ap2][2]], [R, 0.0, -l0 - l1 - l4], [0.0, 0.0, -l0 - l1 - l4]])\n', (5739, 5880), False, 'import numpy, math\n'), ((6041, 6171), 'numpy.array', 'numpy.array', (['[[0.0, 0.0, X_aHem[ac1][2]], [0.0, 0.0, X_aHem[ac2][2]], [0.0, 0.0, X_aHem[\n ac3][2]], [0.0, 0.0, X_aHem[ac4][2]]]'], {}), '([[0.0, 0.0, X_aHem[ac1][2]], [0.0, 0.0, X_aHem[ac2][2]], [0.0, \n 0.0, X_aHem[ac3][2]], [0.0, 0.0, X_aHem[ac4][2]]])\n', (6052, 6171), False, 'import numpy, math\n'), ((23498, 23573), 'nanopores.geo2xml.geofile2geo', 'geofile2geo', (["gdict['geo_code']", "gdict['meta']"], {'name': '"""alphahem"""', 'clscale': '(8.0)'}), "(gdict['geo_code'], gdict['meta'], name='alphahem', clscale=8.0)\n", (23509, 23573), False, 'from nanopores.geo2xml import geofile2geo\n'), ((23577, 23615), 'dolfin.plot', 'plot', (['geo.boundaries'], {'interactive': '(True)'}), '(geo.boundaries, interactive=True)\n', (23581, 23615), False, 'from dolfin import plot\n'), ((21778, 21853), 'warnings.warn', 'warn', (['"""Currently no membrane boundary layers implemented for this geometry"""'], {}), "('Currently no membrane boundary layers implemented for this geometry')\n", (21782, 21853), False, 'from warnings import warn\n'), ((16679, 16696), 'numpy.asarray', 'numpy.asarray', (['x0'], {}), '(x0)\n', (16692, 16696), False, 'import numpy, math\n'), ((6963, 7000), 'numpy.array', 'numpy.array', (['[R, 0.0, X_aHem[ap1][2]]'], {}), '([R, 0.0, X_aHem[ap1][2]])\n', (6974, 7000), False, 'import numpy, math\n'), ((7042, 7079), 'numpy.array', 'numpy.array', (['[R, 0.0, X_aHem[ap2][2]]'], {}), '([R, 0.0, X_aHem[ap2][2]])\n', (7053, 7079), False, 'import numpy, math\n')]
|
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.agents import VIP, Bodyguard, StreetBystander
from multiagent.scenario import VIPScenario
import copy
class Scenario(VIPScenario):
def __init__(self, num_bodyguards=4, num_bystanders=10, communication=True, env_range=1.0, comm_dim=4, seed=1):
super().__init__(num_bodyguards, num_bystanders, communication, env_range, comm_dim, seed)
def make_world(self):
""" Creates the world, the agents, the landmarks, the communication channels etc. These are for the time being all undifferentiated
"""
world = World()
self.world = world
if self.communication:
world.dim_c = self.comm_dim
### create the landmarks, among them the start and goal of VIP ###
world.landmarks = self.create_landmarks(world, 22)
### create the agents ###
for i in range(self.num_agents):
if i == 0:
agent = VIP(self)
elif i <= self.num_bodyguards:
agent = Bodyguard(self, self.communication, alpha=2.5, beta=2)
agent.name = 'bodyguard %d' % (i)
else:
agent = StreetBystander(self)
agent.name = 'bystander %d' % (i - self.num_bodyguards)
agent.accel = 3.0
agent.max_speed = 1.0
world.agents.append(agent)
self.reset_world(world)
return world
def reset_world(self, world):
""" Resets the world and agents. Chooses a new goal position for the VIP and
arranges the bodyguards accordingly
"""
""" Resets the world and agents. Chooses a new goal position for the VIP and
arranges the bodyguards accordingly
"""
self.world = world
for agent in world.agents:
agent.reset()
# set the initial state of the VIP
goal, start = copy.deepcopy(world.landmarks[:2])
self.vip_agent.state.p_pos = start.state.p_pos + start.size
self.vip_agent.goal_a = goal
# set the initial states of the bodyguards
temp_angle = 360/self.num_bodyguards
for i, agent in enumerate(self.bodyguards):
agent_angle = (temp_angle)* np.pi / 180.
agent.state.p_pos = world.agents[0].state.p_pos + np.array([np.cos(agent_angle), np.sin(agent_angle)])*agent.allowed_distance
temp_angle += 360/self.num_bodyguards
# set position of the bystanders behind the landmarks
np.random.seed(seed=None)
seed=np.random.randint(1, 10)
np.random.seed(seed)
bystander_theta = np.random.uniform(-np.pi, np.pi, self.num_bystanders)
np.random.seed(seed=None)
seed=np.random.randint(1, 10)
np.random.seed(seed)
bystander_noise = np.random.rand(self.num_bystanders)
x = np.array([-0.6, .6])
y = np.arange(-.9, 1.0, 0.4)
bystander_p_pos=np.transpose([np.repeat(x, len(y)), np.tile(y, len(x))])
for i, agent in enumerate(self.bystanders):
agent.state.p_pos = bystander_p_pos[i]
agent.theta = bystander_theta[i]
agent.noise = bystander_noise[i]
# selecting the attacker from bystanders
attacker = np.random.choice(self.bystanders)
attacker.goal_a = self.vip_agent
def create_landmarks(self, world, number_of_landmarks):
world_landmarks = []
for i in range(number_of_landmarks):
landmark = Landmark()
landmark.name = 'landmark %d' % i
landmark.color = np.array([0.75,0.75,0.75])
landmark.state.p_vel = np.zeros(world.dim_p)
landmark.collide = True
landmark.movable = False
landmark.size = 0.050
world_landmarks.append(landmark)
x = np.array([0])
y = np.array([-0.9, 0.9])
landmark_p_pos = np.transpose([np.repeat(x, len(y)), np.tile(y, len(x))])
for i, landmark in enumerate(world_landmarks[:2]):
landmark.state.p_pos = landmark_p_pos[i]
x = np.array([-0.9, 0.9])
y = np.arange(-1, 1.5, 0.25)
landmark_p_pos = np.transpose([np.repeat(x, len(y)), np.tile(y, len(x))])
for i, landmark in enumerate(world_landmarks[2:]):
landmark.state.p_pos = landmark_p_pos[i]
world_landmarks[0].color = np.array([0.15, 0.65, 0.15])
return world_landmarks
|
[
"numpy.random.uniform",
"copy.deepcopy",
"numpy.random.choice",
"numpy.random.seed",
"multiagent.agents.StreetBystander",
"numpy.zeros",
"multiagent.core.Landmark",
"multiagent.agents.VIP",
"multiagent.agents.Bodyguard",
"numpy.random.randint",
"numpy.array",
"numpy.arange",
"numpy.cos",
"multiagent.core.World",
"numpy.random.rand",
"numpy.sin"
] |
[((628, 635), 'multiagent.core.World', 'World', ([], {}), '()\n', (633, 635), False, 'from multiagent.core import World, Agent, Landmark\n'), ((1945, 1979), 'copy.deepcopy', 'copy.deepcopy', (['world.landmarks[:2]'], {}), '(world.landmarks[:2])\n', (1958, 1979), False, 'import copy\n'), ((2546, 2571), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'None'}), '(seed=None)\n', (2560, 2571), True, 'import numpy as np\n'), ((2585, 2609), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2602, 2609), True, 'import numpy as np\n'), ((2618, 2638), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2632, 2638), True, 'import numpy as np\n'), ((2665, 2718), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi', 'self.num_bystanders'], {}), '(-np.pi, np.pi, self.num_bystanders)\n', (2682, 2718), True, 'import numpy as np\n'), ((2728, 2753), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'None'}), '(seed=None)\n', (2742, 2753), True, 'import numpy as np\n'), ((2767, 2791), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2784, 2791), True, 'import numpy as np\n'), ((2800, 2820), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2814, 2820), True, 'import numpy as np\n'), ((2847, 2882), 'numpy.random.rand', 'np.random.rand', (['self.num_bystanders'], {}), '(self.num_bystanders)\n', (2861, 2882), True, 'import numpy as np\n'), ((2896, 2917), 'numpy.array', 'np.array', (['[-0.6, 0.6]'], {}), '([-0.6, 0.6])\n', (2904, 2917), True, 'import numpy as np\n'), ((2929, 2954), 'numpy.arange', 'np.arange', (['(-0.9)', '(1.0)', '(0.4)'], {}), '(-0.9, 1.0, 0.4)\n', (2938, 2954), True, 'import numpy as np\n'), ((3298, 3331), 'numpy.random.choice', 'np.random.choice', (['self.bystanders'], {}), '(self.bystanders)\n', (3314, 3331), True, 'import numpy as np\n'), ((3866, 3879), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3874, 3879), True, 'import numpy as np\n'), ((3892, 3913), 'numpy.array', 'np.array', (['[-0.9, 0.9]'], {}), '([-0.9, 0.9])\n', (3900, 3913), True, 'import numpy as np\n'), ((4122, 4143), 'numpy.array', 'np.array', (['[-0.9, 0.9]'], {}), '([-0.9, 0.9])\n', (4130, 4143), True, 'import numpy as np\n'), ((4157, 4181), 'numpy.arange', 'np.arange', (['(-1)', '(1.5)', '(0.25)'], {}), '(-1, 1.5, 0.25)\n', (4166, 4181), True, 'import numpy as np\n'), ((4412, 4440), 'numpy.array', 'np.array', (['[0.15, 0.65, 0.15]'], {}), '([0.15, 0.65, 0.15])\n', (4420, 4440), True, 'import numpy as np\n'), ((3531, 3541), 'multiagent.core.Landmark', 'Landmark', ([], {}), '()\n', (3539, 3541), False, 'from multiagent.core import World, Agent, Landmark\n'), ((3617, 3645), 'numpy.array', 'np.array', (['[0.75, 0.75, 0.75]'], {}), '([0.75, 0.75, 0.75])\n', (3625, 3645), True, 'import numpy as np\n'), ((3679, 3700), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (3687, 3700), True, 'import numpy as np\n'), ((992, 1001), 'multiagent.agents.VIP', 'VIP', (['self'], {}), '(self)\n', (995, 1001), False, 'from multiagent.agents import VIP, Bodyguard, StreetBystander\n'), ((1069, 1123), 'multiagent.agents.Bodyguard', 'Bodyguard', (['self', 'self.communication'], {'alpha': '(2.5)', 'beta': '(2)'}), '(self, self.communication, alpha=2.5, beta=2)\n', (1078, 1123), False, 'from multiagent.agents import VIP, Bodyguard, StreetBystander\n'), ((1216, 1237), 'multiagent.agents.StreetBystander', 'StreetBystander', (['self'], {}), '(self)\n', (1231, 1237), False, 'from multiagent.agents import VIP, Bodyguard, StreetBystander\n'), ((2359, 2378), 'numpy.cos', 'np.cos', (['agent_angle'], {}), '(agent_angle)\n', (2365, 2378), True, 'import numpy as np\n'), ((2380, 2399), 'numpy.sin', 'np.sin', (['agent_angle'], {}), '(agent_angle)\n', (2386, 2399), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 17 15:42:13 2018
amortization_table.py
@author: david
# source for most of Mortgage class' basic functions
# https://github.com/jbmohler/mortgage/blob/master/mortgage.py
"""
import argparse
import decimal
import os
import pandas as pd
import numpy as np
import datetime as dt
class Mortgage:
"""Contains properties of a mortgage given user inputs
Args:
_amount (float): Loan amount
_price(float): Price of house
_rate (float): Interest rate as a decimal i.e. 0.05
_term (int): Length of the loan in years
_taxes (float): Annual tax bill
_insurance (float): Annual insurance bill
_additional (float): Extra payment in each month that goes toward principal
"""
def __init__(self, amount, price, rate, term, taxes, insurance, additional=0):
"""init function for Mortgage class"""
self._amount = amount
self._price = price
self._rate = rate
self._term = term
self._taxes = taxes
self._insurance = insurance
self._add_pmt = additional
self._total_combined_payments = float(0)
self._payment_months = float(0)
self._inflation = float(0.03)
self._pv_payments = float(0)
self._pv_combined_payments = float(0)
self._first_payment = '1/1/2018' # future update
self._pay_freq = 'Monthly' # only option for now
self._compound_freq = 'Monthly' # only option for now
self._pay_type = 'End of Period' # only option for now
self.MONTHS_IN_YEAR = 12
self.DOLLAR_QUANTIZE = decimal.Decimal('.01')
def dollar(self, f, round=decimal.ROUND_CEILING):
"""Returns the passed float rounded to two decimal places"""
if not isinstance(f, decimal.Decimal):
f = decimal.Decimal(str(f)) # force f into decimal
return f.quantize(self.DOLLAR_QUANTIZE, rounding=round)
def rate(self):
"""Returns the interest rate of the loan"""
return self._rate
def monthly_growth(self):
"""Returns the monthly interest accrual of the loan"""
return 1.0 + self._rate / self.MONTHS_IN_YEAR
def loan_years(self):
"""Returns the term, in years, of the loan"""
return self._term
def loan_months(self):
"""Returns the term, in months, of the loan"""
return self._term * self.MONTHS_IN_YEAR
def price(self):
"""Returns the house price"""
return self._price
def amount(self):
"""Returns the amount of the loan"""
return self._amount
def additional_pmt(self):
"""Returns the additional monthly principal payment"""
return self._add_pmt
def taxes(self):
"""Returns the annual taxes due"""
return self._taxes
def monthly_taxes(self):
"""Returns the monthly taxes due"""
return self._taxes / self.MONTHS_IN_YEAR
def insurance(self):
"""Returns the annual insurance amount due"""
return self._insurance * self.price()
def monthly_insurance(self):
"""Returns the monthly insurance due"""
return self.insurance() / self.MONTHS_IN_YEAR
def monthly_payment(self):
"""Returns the monthly payment for the loan"""
pmt = (self.amount() * self.rate()) / (self.MONTHS_IN_YEAR * (1.0-(1.0/self.monthly_growth()) ** self.loan_months()))
return pmt
def annual_payment(self):
"""Returns the total payments during the year for the loan"""
return self.monthly_payment() * self.MONTHS_IN_YEAR
def total_payment(self):
"""Returns the total cost of the loan"""
return self.monthly_payment() * self.loan_months()
def piti(self):
"""Returns the monthly PITI"""
return self.monthly_payment() + self.monthly_taxes() + self.monthly_insurance()
def monthly_payment_schedule(self):
"""Yields amortization schedule for the given loan"""
monthly = float(self.dollar(self.monthly_payment()))
additional = float(self.dollar(self.additional_pmt()))
balance = float(self.dollar(self.amount()))
end_balance = float(self.dollar(balance))
rate = float(decimal.Decimal(str(self.rate())).quantize(decimal.Decimal('.000001')))
while True:
interest_unrounded = balance * rate * float(decimal.Decimal(1) / self.MONTHS_IN_YEAR)
interest = float(self.dollar(interest_unrounded, round=decimal.ROUND_HALF_UP))
if monthly >= balance + interest: # check if payment exceeds remaining due
# last pmt
additional = 0.0
principal = float(self.dollar(end_balance))
end_balance -= float(self.dollar(principal + additional))
yield float(self.dollar(balance)), float(self.dollar((principal + interest))), additional, interest, principal, float(self.dollar(end_balance))
break
elif (monthly + additional) >= balance + interest: # check if pmt + add exceeds remaining due
principal = float(self.dollar(monthly - interest))
additional = (balance + interest) - monthly
end_balance -= float(self.dollar(principal + additional))
yield float(self.dollar(balance)), float(self.dollar((principal + interest))), additional, interest, principal, float(self.dollar(end_balance))
break
principal = float(self.dollar(monthly - interest))
end_balance -= (principal + additional)
yield float(self.dollar(balance)), monthly, additional, interest, principal, float(self.dollar(end_balance))
balance = end_balance
def print_monthly_payment_schedule(self):
"""Prints out the monthly payment schedule"""
for index, payment in enumerate(self.monthly_payment_schedule()):
print(index + 1, payment[0], payment[1], payment[2], payment[3], payment[4], payment[5])
def amortization_dict(self):
"""Returns a dictionary with the payment schedule"""
amort_dict = {}
for index, payment in enumerate(self.monthly_payment_schedule()):
amort_dict[index + 1] = [payment[0], payment[1], payment[2], payment[3], payment[4], payment[5]]
return amort_dict
def amortization_table(self):
"""Returns a dataframe with the amortization table in it"""
names = ['Beg. Balance', 'Monthly Payment', 'Additional Payment',
'Interest', 'Principal', 'End Balance']
df = pd.DataFrame.from_dict(self.amortization_dict(), orient='index')
df.columns = names
monthly_inflation = self._inflation / 12
if sum(df['Additional Payment'].values) != 0: #check if there are additional payments
df['Total Payment'] = df['Monthly Payment'] + df['Additional Payment']
self._total_combined_payments = sum(df['Total Payment'].values)
self._payment_months = df.shape[0]
# calc PV of original terms
arr_months = np.array(range(self.loan_years() * 12))
arr_m_payment = np.array(self.monthly_payment())
list_inflation = []
for month in arr_months:
list_inflation.append((1 + monthly_inflation) ** month)
arr_inflation = np.array(list_inflation)
arr_pv_payments = np.divide(arr_m_payment, arr_inflation)
self._pv_payments = sum(arr_pv_payments)
# add combined PV factor
arr_c_months = np.array(range(self._payment_months))
list_c_inflation = []
for month in arr_c_months:
list_c_inflation.append((1 + monthly_inflation) ** month)
arr_c_inflation = np.array(list_c_inflation)
df['PV of Combined Payment'] = (df['Monthly Payment'] + df['Additional Payment']) / arr_c_inflation
self._pv_combined_payments = sum(df['PV of Combined Payment'].values)
return df
else:
# add PV factor
arr_months = np.array(range(self.loan_months()))
list_inflation = []
for month in arr_months:
list_inflation.append((1 + monthly_inflation) ** month)
arr_inflation = np.array(list_inflation)
df['PV of Payment'] = df['Monthly Payment'] / arr_inflation
self._pv_payments = sum(df['PV of Payment'].values)
return df
def amort_table_to_csv(self):
"""Outputs the amortization table to a .csv file"""
now = dt.datetime.today()
date = str(now.year) + str(now.month) + str(now.day) + '_' + str(now.hour) + str(now.minute)
self.amortization_table().to_csv('/home/david/git_repos/mortgage/output/' + date + '.csv')
def print_summary(self):
"""Prints out a summary of the given mortgage"""
print('Mortgage Summary')
print('-' * 75)
print('{0:>30s}: ${1:>11,.0f}'.format('House Price', self.price()))
print('')
print('{0:>30s}: ${1:>11,.0f}'.format('Loan Amount', self.amount()))
print('{0:>30s}: {1:>12.0f}'.format('Term (years)', self.loan_years()))
print('{0:>30s}: {1:>12.2f}%'.format('Rate', self.rate()*100))
print('{0:>30s}: ${1:>11,.0f}'.format('Monthly Mortgage Payment', self.monthly_payment()))
print('{0:>30s}: ${1:>11,.0f}'.format('Annual Mortgage Payment', self.annual_payment()))
print('{0:>30s}: ${1:>11,.0f}'.format('Total Mortgage Payment', self.total_payment()))
print('{0:>30s}: ${1:>11,.0f}'.format('Total PV of Payments', self._pv_payments))
print('')
print('{0:>30s}: ${1:>11,.0f}'.format('Annual Taxes', self.taxes()))
print('{0:>30s}: ${1:>11,.0f}'.format('Annual Insurance', self.insurance()))
print('')
print('{0:>30s}: ${1:>11,.0f}'.format('Monthly PITI', self.piti()))
print('-' * 75)
if self._total_combined_payments != 0:
new_monthly = self._total_combined_payments / self._payment_months
new_annual = self._total_combined_payments / self._payment_months * 12
change_months = self._payment_months - self.loan_months()
change_monthly = new_monthly - self.monthly_payment()
change_annual = new_annual - self.annual_payment()
change_total = self._total_combined_payments - self.total_payment()
change_pv = self._pv_combined_payments - self._pv_payments
print('Effect of paying an additional ${0:,.0f} each month:'.format(self.additional_pmt()))
print("")
print('{0:>30s}: {1:>12.1f} {2:>10.1f} years'.format('Term (years)', self._payment_months/12.0, change_months/12.0))
print('{0:>30s}: ${1:>11,.0f} ${2:>10,.0f}'.format('Monthly Mortgage Payment', new_monthly, change_monthly))
print('{0:>30s}: ${1:>11,.0f} ${2:>10,.0f}'.format('Annual Mortgage Payment', new_annual, change_annual))
print('{0:>30s}: ${1:>11,.0f} ${2:>10,.0f}'.format('Total Mortgage Payment', self._total_combined_payments, change_total))
print('{0:>30s}: ${1:>11,.0f} ${2:>10,.0f}'.format('PV of Combined Payments', self._pv_combined_payments, change_pv))
print('')
print('{0:>30s}: ${1:>11,.0f}'.format('Annual Taxes', self.taxes()))
print('{0:>30s}: ${1:>11,.0f}'.format('Annual Insurance', self.insurance()))
print('')
print('{0:>30s}: ${1:>11,.0f}'.format('Monthly PITI', new_monthly + self.monthly_taxes() + self.monthly_insurance()))
print('-' * 75)
# re-reference totals to include additional payments (new function needed)
# pv of payments
def main(self, csv=False):
"""Generates an amortization table and prints the summary"""
self.amortization_table() # print [0] for the table # need to run to get summary stats
if csv == True:
self.amort_table_to_csv() #optional, use if want to export
self.print_summary()
def main():
parser = argparse.ArgumentParser(description='Mortgage Tools')
parser.add_argument('-r', '--interest', default=5, dest='interest')
parser.add_argument('-y', '--loan-years', default=30, dest='years')
parser.add_argument('-p', '--price', default=250000, dest='price')
parser.add_argument('-a', '--amount', default=200000, dest='amount')
parser.add_argument('-t', '--taxes', default=7000, dest ='taxes')
parser.add_argument('-i', '--insurance', default=0.0035, dest='insurance')
parser.add_argument('-e', '--extra payment', default=None, dest='extra')
args = parser.parse_args()
if args.extra:
m = Mortgage(float(args.amount), float(args.price), float(args.interest) / 100.0, int(args.years), float(args.taxes), float(args.insurance), float(args.extra))
else:
m = Mortgage(float(args.amount), float(args.price), float(args.interest) / 100.0, int(args.years), float(args.taxes), float(args.insurance))
m.main()
if __name__ == '__main__':
main()
|
[
"numpy.divide",
"datetime.datetime.today",
"argparse.ArgumentParser",
"decimal.Decimal",
"numpy.array"
] |
[((12232, 12285), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Mortgage Tools"""'}), "(description='Mortgage Tools')\n", (12255, 12285), False, 'import argparse\n'), ((1697, 1719), 'decimal.Decimal', 'decimal.Decimal', (['""".01"""'], {}), "('.01')\n", (1712, 1719), False, 'import decimal\n'), ((8705, 8724), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (8722, 8724), True, 'import datetime as dt\n'), ((7470, 7494), 'numpy.array', 'np.array', (['list_inflation'], {}), '(list_inflation)\n', (7478, 7494), True, 'import numpy as np\n'), ((7525, 7564), 'numpy.divide', 'np.divide', (['arr_m_payment', 'arr_inflation'], {}), '(arr_m_payment, arr_inflation)\n', (7534, 7564), True, 'import numpy as np\n'), ((7898, 7924), 'numpy.array', 'np.array', (['list_c_inflation'], {}), '(list_c_inflation)\n', (7906, 7924), True, 'import numpy as np\n'), ((8413, 8437), 'numpy.array', 'np.array', (['list_inflation'], {}), '(list_inflation)\n', (8421, 8437), True, 'import numpy as np\n'), ((4380, 4406), 'decimal.Decimal', 'decimal.Decimal', (['""".000001"""'], {}), "('.000001')\n", (4395, 4406), False, 'import decimal\n'), ((4485, 4503), 'decimal.Decimal', 'decimal.Decimal', (['(1)'], {}), '(1)\n', (4500, 4503), False, 'import decimal\n')]
|
"""
Descriptors
============
The descriptors module offers all kinds of acoustics related descriptors.
.. toctree::
:maxdepth: 2
Descriptors from ISO/TR 25417:2007
**********************************
Descriptors from :mod:`acoustics.standards.iso_tr_25417_2007`.
.. autoattribute:: acoustics.descriptors.REFERENCE_PRESSURE
.. autofunction:: acoustics.descriptors.sound_pressure_level
.. autofunction:: acoustics.descriptors.equivalent_sound_pressure_level
.. autofunction:: acoustics.descriptors.peak_sound_pressure
.. autofunction:: acoustics.descriptors.peak_sound_pressure_level
.. autoattribute:: acoustics.descriptors.REFERENCE_SOUND_EXPOSURE
.. autofunction:: acoustics.descriptors.sound_exposure
.. autofunction:: acoustics.descriptors.sound_exposure_level
.. autoattribute:: acoustics.descriptors.REFERENCE_POWER
.. autofunction:: acoustics.descriptors.sound_power_level
.. autofunction:: acoustics.descriptors.sound_energy
.. autofunction:: acoustics.descriptors.sound_energy_level
.. autoattribute:: acoustics.descriptors.REFERENCE_ENERGY
.. autofunction:: acoustics.descriptors.sound_intensity
.. autofunction:: acoustics.descriptors.time_averaged_sound_intensity
.. autoattribute:: acoustics.descriptors.REFERENCE_INTENSITY
.. autofunction:: acoustics.descriptors.time_averaged_sound_intensity_level
.. autofunction:: acoustics.descriptors.normal_time_averaged_sound_intensity
.. autofunction:: acoustics.descriptors.normal_time_averaged_sound_intensity_level
Other descriptors
*****************
"""
from __future__ import division
import numpy as np
from acoustics.standards.iso_tr_25417_2007 import (REFERENCE_PRESSURE,
sound_pressure_level,
equivalent_sound_pressure_level,
peak_sound_pressure,
peak_sound_pressure_level,
REFERENCE_SOUND_EXPOSURE,
sound_exposure,
sound_exposure_level,
REFERENCE_POWER,
sound_power_level,
sound_energy,
REFERENCE_ENERGY,
sound_energy_level,
sound_intensity,
time_averaged_sound_intensity,
REFERENCE_INTENSITY,
time_averaged_sound_intensity_level,
normal_time_averaged_sound_intensity,
normal_time_averaged_sound_intensity_level,
)
def _leq(levels, time):
if type(levels) is list:
levels = np.array(levels)
return 10.0 * np.log10((1.0/time) * np.sum(10.0**(levels/10.0)))
def leq(levels, int_time=1.0):
"""
Equivalent level :math:`L_{eq}`.
:param levels: Levels as function of time.
:param int_time: Integration time. Default value is 1.0 second.
:returns: Equivalent level L_{eq}.
Sum of levels in dB.
"""
if type(levels) is list:
levels = np.array(levels)
time = levels.size * int_time
return _leq(levels, time)
def sel(levels):
"""
Sound Exposure Level from ``levels`` (NumPy array).
"""
if type(levels) is list:
levels = np.array(levels)
return _leq(levels, 1.0)
def lw(W, Wref=1.0e-12):
"""
Sound power level :math:`L_{w}` for sound power :math:`W` and reference power :math:`W_{ref}`.
:param W: Sound power :math:`W`.
:param Wref: Reference power :math:`W_{ref}`. Default value is :math:`10^{12}` watt.
"""
if type(W) is list:
W = np.array(W)
return 10.0 * np.log10(W/Wref)
def lden(lday, levening, lnight):
"""
Calculate :math:`L_{den}` from :math:`L_{day}`, :math:`L_{evening}` and :math:`L_{night}`.
:param lday: Equivalent level during day period :math:`L_{day}`.
:param levening: Equivalent level during evening period :math:`L_{evening}`.
:param lnight: Equivalent level during night period :math:`L_{night}`.
:returns: :math:`L_{den}`
"""
if type(lday) is list:
lday = np.array(lday)
if type(levening) is list:
levening = np.array(levening)
if type(lnight) is list:
lnight = np.array(lnight)
day = 12.0 * 10.0**(lday/10.0)
evening = 4.0 * 10.0**((levening+5.0) / 10.0)
night = 8.0 * 10.0**((lnight+10.0) / 10.0)
return 10.0 * np.log10((day + evening + night) / 24.0)
def ldn(lday, lnight):
"""
Calculate :math:`L_{dn}` from :math:`L_{day}` and :math:`L_{night}`.
:param lday: Equivalent level during day period :math:`L_{day}`.
:param lnight: Equivalent level during night period :math:`L_{night}`.
:returns: :math:`L_{dn}`
"""
if type(lday) is list:
lday = np.array(lday)
if type(lnight) is list:
lnight = np.array(lnight)
day = 15.0 * 10.0**(lday/10.0)
night = 9.0 * 10.0**((lnight+10.0) / 10.0)
return 10.0 * np.log10((day + night) / 24.0)
|
[
"numpy.log10",
"numpy.array",
"numpy.sum"
] |
[((3138, 3154), 'numpy.array', 'np.array', (['levels'], {}), '(levels)\n', (3146, 3154), True, 'import numpy as np\n'), ((3545, 3561), 'numpy.array', 'np.array', (['levels'], {}), '(levels)\n', (3553, 3561), True, 'import numpy as np\n'), ((3763, 3779), 'numpy.array', 'np.array', (['levels'], {}), '(levels)\n', (3771, 3779), True, 'import numpy as np\n'), ((4118, 4129), 'numpy.array', 'np.array', (['W'], {}), '(W)\n', (4126, 4129), True, 'import numpy as np\n'), ((4148, 4166), 'numpy.log10', 'np.log10', (['(W / Wref)'], {}), '(W / Wref)\n', (4156, 4166), True, 'import numpy as np\n'), ((4614, 4628), 'numpy.array', 'np.array', (['lday'], {}), '(lday)\n', (4622, 4628), True, 'import numpy as np\n'), ((4679, 4697), 'numpy.array', 'np.array', (['levening'], {}), '(levening)\n', (4687, 4697), True, 'import numpy as np\n'), ((4744, 4760), 'numpy.array', 'np.array', (['lnight'], {}), '(lnight)\n', (4752, 4760), True, 'import numpy as np\n'), ((4911, 4951), 'numpy.log10', 'np.log10', (['((day + evening + night) / 24.0)'], {}), '((day + evening + night) / 24.0)\n', (4919, 4951), True, 'import numpy as np\n'), ((5291, 5305), 'numpy.array', 'np.array', (['lday'], {}), '(lday)\n', (5299, 5305), True, 'import numpy as np\n'), ((5352, 5368), 'numpy.array', 'np.array', (['lnight'], {}), '(lnight)\n', (5360, 5368), True, 'import numpy as np\n'), ((5469, 5499), 'numpy.log10', 'np.log10', (['((day + night) / 24.0)'], {}), '((day + night) / 24.0)\n', (5477, 5499), True, 'import numpy as np\n'), ((3195, 3226), 'numpy.sum', 'np.sum', (['(10.0 ** (levels / 10.0))'], {}), '(10.0 ** (levels / 10.0))\n', (3201, 3226), True, 'import numpy as np\n')]
|
"""playground music_beats
"""
import numpy as np
import matplotlib.pylab as plt
# v1: 2 patterns
# v2: 16 probs for event
t = np.linspace(0, 16-1, 16)
print("t", t)
p1 = np.zeros((16, 1))
p2 = np.zeros((16, 1))
p1[[0, 4, 8, 12],0] = 1.
p2[[0, 6, 8, 14],0] = 1.
plt.subplot(211)
plt.bar(t, p1) # , "ko")
plt.subplot(212)
plt.bar(t, p2) #, "ko")
# plt.gcf().adjust_subplots()
plt.show()
|
[
"matplotlib.pylab.subplot",
"numpy.zeros",
"matplotlib.pylab.bar",
"numpy.linspace",
"matplotlib.pylab.show"
] |
[((129, 155), 'numpy.linspace', 'np.linspace', (['(0)', '(16 - 1)', '(16)'], {}), '(0, 16 - 1, 16)\n', (140, 155), True, 'import numpy as np\n'), ((175, 192), 'numpy.zeros', 'np.zeros', (['(16, 1)'], {}), '((16, 1))\n', (183, 192), True, 'import numpy as np\n'), ((198, 215), 'numpy.zeros', 'np.zeros', (['(16, 1)'], {}), '((16, 1))\n', (206, 215), True, 'import numpy as np\n'), ((269, 285), 'matplotlib.pylab.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (280, 285), True, 'import matplotlib.pylab as plt\n'), ((286, 300), 'matplotlib.pylab.bar', 'plt.bar', (['t', 'p1'], {}), '(t, p1)\n', (293, 300), True, 'import matplotlib.pylab as plt\n'), ((311, 327), 'matplotlib.pylab.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (322, 327), True, 'import matplotlib.pylab as plt\n'), ((328, 342), 'matplotlib.pylab.bar', 'plt.bar', (['t', 'p2'], {}), '(t, p2)\n', (335, 342), True, 'import matplotlib.pylab as plt\n'), ((382, 392), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (390, 392), True, 'import matplotlib.pylab as plt\n')]
|
from multiprocessing import Pool
from functools import partial
from pathlib import Path
import pandas as pd
import numpy as np
from numpy import array
import matplotlib.pyplot as plt
import tqdm
from fire import Fire
from bifacial_radiance.main import RadianceObj,AnalysisObj, _popen
Path.ls = lambda x: sorted(list(x.iterdir()))
metadata = {'Name': 'Chamberytown',
'latitude': 45.637,
'longitude': 5.881,
'altitude': 235.0,
'State':'NZ',
'USAF':1,
'TZ':0}
def get_time_interval(model, date):
sl = model.input_meteo.index.get_loc(date)
if type(sl) is slice:
return sl.start, sl.stop
else: return (sl, sl+1)
def _convert_meteo(df):
return df[['ghi', 'dni', 'dhi']].rename(columns=str.upper)
def define_meteo(model, df):
model.input_meteo = _convert_meteo(df)
return model.readInesMeteoFile(model.input_meteo, metadata)
def define_scene(model, monitor=5,rack ='rackC3',withGroupF=False):
height = 0.77
originy = -0.1
sceneObjs = []
if rack == 'rackC3':
d = 0
m1_npanels = 2
height_m1 = height
full = True
if rack == 'rackC2':
d = 0.736
m1_npanels = 1
height_m1 = 1.63
full = True
if rack == 'rackC1':
d = 0.736
m1_npanels = 1
height_m1 = 1.63
full = False
mod1 = 'mod1'
model.makeModule(name=mod1,x=0.99,y=1.65,numpanels = m1_npanels ,xgap=0.04,ygap=0.05)
mod2 = 'mod2'
model.makeModule(name=mod2, x=0.99,y=1.65,numpanels = 2,xgap=0.04,ygap=0.05)
scDict_smGC = {'tilt':30,'pitch': 9.5,'clearance_height':height_m1,'azimuth':180, 'nMods': 1, 'nRows': 2, 'appendRadfile':True,'originx': -3.09, 'originy': originy + d}
sceneObjs += [model.makeScene(moduletype=mod1,sceneDict=scDict_smGC, hpc=True)] # sm = single module G: group
scDict_GC = {'tilt':30,'pitch': 9.5,'clearance_height':height,'azimuth':180, 'nMods': 5, 'nRows': 2, 'appendRadfile':True,'originx': 0, 'originy': originy }
sceneObjs += [model.makeScene(moduletype=mod2,sceneDict=scDict_GC, hpc=True)] #makeScene creates a .rad file
scDict_smGD = {'tilt':30,'pitch': 9.5,'clearance_height':height_m1,'azimuth':180, 'nMods': 1, 'nRows': 2, 'appendRadfile':True,'originx': 3.09, 'originy': originy + d}
sceneObjs += [model.makeScene(moduletype=mod1,sceneDict=scDict_smGD, hpc=True)]
scDict_GD = {'tilt':30,'pitch': 9.5,'clearance_height':height,'azimuth':180, 'nMods': 5, 'nRows': 2, 'appendRadfile':True,'originx': 6.17, 'originy': originy }
sceneObjs += [model.makeScene(moduletype=mod2,sceneDict=scDict_GD, hpc=True)]
scDict_GB = {'tilt':30,'pitch': 9.5,'clearance_height':height,'azimuth':180, 'nMods': 5, 'nRows': 2, 'appendRadfile':True,'originx': -6.17, 'originy': originy }
sceneObjs += [model.makeScene(moduletype=mod2,sceneDict=scDict_GB, hpc=True)]
scDict_GA = {'tilt':30,'pitch': 9.5,'clearance_height':height,'azimuth':180, 'nMods': 5, 'nRows': 2, 'appendRadfile':True,'originx': -12.36, 'originy': originy }
sceneObjs += [model.makeScene(moduletype=mod2,sceneDict=scDict_GA, hpc=True)]
scDict_GE = {'tilt':30,'pitch': 9.5,'clearance_height':height,'azimuth':180, 'nMods': 5, 'nRows': 2, 'appendRadfile':True,'originx': 12.36, 'originy': originy }
sceneObjs += [model.makeScene(moduletype=mod2,sceneDict=scDict_GE, hpc=True)]
if withGroupF:
scDict_GF = {'tilt':30,'pitch': 9.5,'clearance_height':height,'azimuth':180, 'nMods': 5, 'nRows': 2, 'appendRadfile':True,'originx': 18.54, 'originy': originy }
sceneObjs += [model.makeScene(moduletype=mod2,sceneDict=scDict_GF, hpc=True)]
if full == True:
scDict_smGE = {'tilt':30,'pitch': 9.5,'clearance_height':height_m1,'azimuth':180, 'nMods': 1, 'nRows': 2, 'appendRadfile':True,'originx': 9.26, 'originy': originy + d }
sceneObjs += [model.makeScene(moduletype=mod1,sceneDict=scDict_smGE, hpc=True)]
scDict_smGB = {'tilt':30,'pitch': 9.5,'clearance_height':height_m1,'azimuth':180, 'nMods': 1, 'nRows': 2, 'appendRadfile':True,'originx': -9.26, 'originy': originy + d }
sceneObjs += [model.makeScene(moduletype=mod1,sceneDict=scDict_smGB, hpc=True)]
scDict_smGA = {'tilt':30,'pitch': 9.5,'clearance_height':height_m1,'azimuth':180, 'nMods': 1, 'nRows': 2, 'appendRadfile':True,'originx': -15.45, 'originy': originy + d }
sceneObjs += [model.makeScene(moduletype=mod1,sceneDict=scDict_smGA, hpc=True)]
if withGroupF:
scDict_smGF = {'tilt':30,'pitch': 9.5,'clearance_height':height_m1,'azimuth':180, 'nMods': 1, 'nRows': 2, 'appendRadfile':True,'originx': 15.45, 'originy': originy + d }
sceneObjs += [model.makeScene(moduletype=mod1,sceneDict=scDict_smGF, hpc=True)]
model.module6 = sceneObjs[monitor]
return model.module6
STRUCT_HEIGHT = 0.60
def genbox(model,
name,
scene_name='cScene.rad',
material='Metal_Aluminum_Anodized',
dim=(1.0,1.0,1.0),
r=(0,0,0),
t=(0.0,0.0,0.0),
hpc=True):
genbox_cmd = f'!genbox {material} {name} {dim[0]} {dim[1]} {dim[2]} '
xform_cmd = f'| xform -rx {r[0]} -ry {r[1]} -rz {r[2]} -t {t[0]} {t[1]} {t[2]}'
cmd = genbox_cmd + xform_cmd
box = model.makeCustomObject(name, cmd)
model.appendtoScene(scene_name, box, hpc=hpc)
return
def add_vert_posts(model,
scene_name='cScene.rad',
material='Metal_Aluminum_Anodized',
rowoffset=0,
hpc=True):
height = STRUCT_HEIGHT
genbox(model,'vert_post1', scene_name, material, dim=(0.12, 0.24, height), t=(-15.965, -1.45 + rowoffset, 0), hpc=hpc)
genbox(model,'vert_post2', scene_name, material, dim=(0.12, 0.24, height), t=(-12.8750, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post3', scene_name, material, dim=(0.12, 0.24, height), t=(-9.785, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post4', scene_name, material, dim=(0.12, 0.24, height), t=(-6.685, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post5', scene_name, material, dim=(0.12, 0.24, height), t=(-3.595, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post6', scene_name, material, dim=(0.12, 0.24, height), t=(-0.505, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post7', scene_name, material, dim=(0.12, 0.24, height), t=(2.585, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post8', scene_name, material, dim=(0.12, 0.24, height), t=(5.655, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post9', scene_name, material, dim=(0.12, 0.24, height), t=(8.745, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post10', scene_name, material, dim=(0.12, 0.24, height), t=(11.835, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post11', scene_name, material, dim=(0.12, 0.24, height), t=(14.925, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post12', scene_name, material, dim=(0.12, 0.24, height), t=(18.015, -1.45+ rowoffset, 0), hpc=hpc)
genbox(model,'vert_post13', scene_name, material, dim=(0.12, 0.24, height), t=(21.105, -1.45+ rowoffset, 0), hpc=hpc)
#soil rack
genbox(model,'rack_cables', scene_name, material, dim=(24.7, 0.24, 0.1), t=(-15.965, -1.45+0.24+ rowoffset, 0), hpc=hpc)
return
def pivoting_structure(model, material='Metal_Aluminum_Anodized', angle=30,rowoffset=0, hpc=True):
def _t(alpha, h, l,rowoffset):
'disgusting geometry'
n = np.sqrt(h**2 + l**2)
alpha = np.deg2rad(alpha)
beta = np.arctan(h/l)
gamma = beta-alpha
y = -l + n*np.cos(gamma)
z = h - n*np.sin(gamma)
print(f'alpha, beta, gamma: {alpha, beta, gamma}')
print(f'n: {n}')
return (0, y + rowoffset, z)
add_diag_posts(model, 'pivoting_struct.rad', material)
add_hor_posts(model, 'pivoting_struct.rad', material)
add_diag_posts_intra(model, 'pivoting_struct.rad', material)
t = _t(angle, STRUCT_HEIGHT, 1.45,rowoffset)
print(f'moving struct to {t}')
cmd = f'!xform -rx {angle} -t {t[0]} {t[1]} {t[2]} '
print(cmd)
model.radfiles.pop() #remove non pivoted scene
model.appendtoScene(f'pivoting_struct_{angle}.rad', 'objects/pivoting_struct.rad', cmd, hpc=hpc)
return
def add_diag_posts(model,
scene_name='cScene.rad',
material='Metal_Aluminum_Anodized',
hpc=True):
length = 3.5
zheight = 0.24
height = STRUCT_HEIGHT - zheight
genbox(model,'diag_post1', scene_name, material, dim=(0.12, length, 0.24), t=(-15.965, -1.45, height), hpc=hpc)
genbox(model,'diag_post2', scene_name, material, dim=(0.12, length, 0.24), t=(-12.8750, -1.45, height), hpc=hpc)
genbox(model,'diag_post3', scene_name, material, dim=(0.12, length, 0.24), t=(-9.785, -1.45, height), hpc=hpc)
genbox(model,'diag_post4', scene_name, material, dim=(0.12, length, 0.24), t=(-6.685, -1.45, height), hpc=hpc)
genbox(model,'diag_post5', scene_name, material, dim=(0.12, length, 0.24), t=(-3.595, -1.45, height), hpc=hpc)
genbox(model,'diag_post6', scene_name, material, dim=(0.12, length, 0.24), t=(-0.505, -1.45, height), hpc=hpc)
genbox(model,'diag_post7', scene_name, material, dim=(0.12, length, 0.24), t=(2.585, -1.45, height), hpc=hpc)
genbox(model,'diag_post8', scene_name, material, dim=(0.12, length, 0.24), t=(5.655, -1.45, height), hpc=hpc)
genbox(model,'diag_post9', scene_name, material, dim=(0.12, length, 0.24), t=(8.745, -1.45, height), hpc=hpc)
genbox(model,'diag_post10', scene_name, material, dim=(0.12, length, 0.24), t=(11.835, -1.45, height), hpc=hpc)
genbox(model,'diag_post11', scene_name, material, dim=(0.12, length, 0.24), t=(14.925, -1.45, height), hpc=hpc)
genbox(model,'diag_post12', scene_name, material, dim=(0.12, length, 0.24), t=(18.015, -1.45, height), hpc=hpc)
genbox(model,'diag_post13', scene_name, material, dim=(0.12, length, 0.24), t=(21.105, -1.45, height), hpc=hpc)
return
def add_hor_posts(model,
scene_name='cScene.rad',
material='Metal_Aluminum_Anodized',
hpc=True):
size = 0.09
height = STRUCT_HEIGHT
length = 3.5 - size
bottom_left = array([-15.965, -1.45, height])
top_left = array([-15.965, -1.45+length, height])
# midde_left = (top_left + bottom_left)/2
genbox(model,'hor_post_bottom', scene_name, material, dim=(37.08, size, size), t=bottom_left, hpc=hpc)
genbox(model,'hor_post_top', scene_name, material, dim=(37.08, size, size), t=top_left, hpc=hpc)
# genbox(model,'hor_post_middle', scene_name, material, dim=(24.7, size, size), t=midde_left, hpc=hpc)
return
def add_diag_posts_intra(model,
scene_name='cScene.rad',
material='Metal_Aluminum_Anodized',
hpc=True):
zsize = 0.09
xsize = 0.045
height = STRUCT_HEIGHT
length = 3.5
z_struct=0.09
modulex = 0.99 + xsize/2
t = array([-15.965, -1.45, height+z_struct])
for i in range(24):
genbox(model,f'diag_post_intra1.{i}', scene_name, material,
dim=(xsize, length, zsize), t=t + i*array([modulex,0,0]), hpc=hpc)
return
def add_box(model,
scene_name='cScene.rad',
material='beigeroof',
hpc=True):
genbox(model,'Boite_electrique', scene_name, material, dim=(0.12, 0.20, 0.24), t=(-12.875, 0.75, 1.36), hpc=hpc)
# genbox(model,'cables', scene_name, material, dim=(0.04, 0.1, 0.07), t=(-12.83, 0.75, 1.24), hpc=hpc)
return
def add_really_big_box(model,
scene_name='cScene.rad',
material='beigeroof',
hpc=True):
genbox(model,'building', scene_name, material, dim=(67, 10, 12), r=(0, 0, 15), t=(-25.875, 58, 0), hpc=hpc)
return
def add_ref_cell(model,group_ref_cell='A2',rowoffset=0):
if group_ref_cell == 'A2':
moduletype_refCell = 'ref_cell'
model.makeModule(name=moduletype_refCell,x=0.12,y=0.12,numpanels = 1)
sceneRef_rCell = {'tilt':30,'pitch': 9.5,'clearance_height':1.3,'azimuth':180,
'nMods': 1, 'nRows': 1, 'appendRadfile':True,'originx': -12.815, 'originy': 0.55+ rowoffset}
sceneObj_rCell = model.makeScene(moduletype=moduletype_refCell, sceneDict=sceneRef_rCell, hpc=True)
if group_ref_cell == 'B2':
moduletype_refCell = 'ref_cell'
model.makeModule(name=moduletype_refCell,x=0.12,y=0.12,numpanels = 1)
sceneRef_rCell = {'tilt':30,'pitch': 9.5,'clearance_height':1.3,'azimuth':180,
'nMods': 1, 'nRows': 1, 'appendRadfile':True,'originx': -6.625, 'originy': 0.55+ rowoffset}
sceneObj_rCell = model.makeScene(moduletype=moduletype_refCell, sceneDict=sceneRef_rCell, hpc=True)
if group_ref_cell == 'C2':
moduletype_refCell = 'ref_cell'
model.makeModule(name=moduletype_refCell,x=0.12,y=0.12,numpanels = 1)
sceneRef_rCell = {'tilt':30,'pitch': 9.5,'clearance_height':1.3,'azimuth':180,
'nMods': 1, 'nRows': 1, 'appendRadfile':True,'originx': -0.505, 'originy': 0.55+ rowoffset}
sceneObj_rCell = model.makeScene(moduletype=moduletype_refCell, sceneDict=sceneRef_rCell, hpc=True)
if group_ref_cell == 'D2':
moduletype_refCell = 'ref_cell'
model.makeModule(name=moduletype_refCell,x=0.12,y=0.12,numpanels = 1)
sceneRef_rCell = {'tilt':30,'pitch': 9.5,'clearance_height':1.3,'azimuth':180,
'nMods': 1, 'nRows': 1, 'appendRadfile':True,'originx': 5.68, 'originy': 0.55+ rowoffset}
sceneObj_rCell = model.makeScene(moduletype=moduletype_refCell, sceneDict=sceneRef_rCell, hpc=True)
if group_ref_cell == 'E2':
moduletype_refCell = 'ref_cell'
model.makeModule(name=moduletype_refCell,x=0.12,y=0.12,numpanels = 1)
sceneRef_rCell = {'tilt':30,'pitch': 9.5,'clearance_height':1.3,'azimuth':180,
'nMods': 1, 'nRows': 1, 'appendRadfile':True,'originx':11.86, 'originy': 0.55+ rowoffset}
sceneObj_rCell = model.makeScene(moduletype=moduletype_refCell, sceneDict=sceneRef_rCell, hpc=True)
if group_ref_cell == 'F2':
moduletype_refCell = 'ref_cell'
model.makeModule(name=moduletype_refCell,x=0.12,y=0.12,numpanels = 1)
sceneRef_rCell = {'tilt':30,'pitch': 9.5,'clearance_height':1.3,'azimuth':180,
'nMods': 1, 'nRows': 1, 'appendRadfile':True,'originx': 18.04, 'originy': 0.55+ rowoffset}
sceneObj_rCell = model.makeScene(moduletype=moduletype_refCell, sceneDict=sceneRef_rCell, hpc=True)
if group_ref_cell == '2_B':
moduletype_refCell = 'ref_cell_2_b'
model.makeModule(name=moduletype_refCell,x=0.12,y=0.12,numpanels = 1)
sceneRef_rCell = {'tilt':30,'pitch': 9.5,'clearance_height':2.0,'azimuth':180,
'nMods': 1, 'nRows': 1, 'appendRadfile':True,'originx': -0.44, 'originy': 1.63 + rowoffset}
sceneObj_rCell = model.makeScene(moduletype=moduletype_refCell, sceneDict=sceneRef_rCell, hpc=True)
if group_ref_cell == '2_A':
moduletype_refCell = 'ref_cell_2_a'
model.makeModule(name=moduletype_refCell,x=0.12,y=0.12,numpanels = 1)
sceneRef_rCell = {'tilt':30,'pitch': 9.5,'clearance_height':1.3,'azimuth':180,
'nMods': 1, 'nRows': 1, 'appendRadfile':True,'originx': -0.44, 'originy': 0.55 + rowoffset}
sceneObj_rCell = model.makeScene(moduletype=moduletype_refCell, sceneDict=sceneRef_rCell, hpc=True)
if group_ref_cell == '2_C':
moduletype_refCell = 'ref_cell_2_c'
model.makeModule(name=moduletype_refCell,x=0.12,y=0.12,numpanels = 1)
sceneRef_rCell = {'tilt':30,'pitch': 9.5,'clearance_height':0.46,'azimuth':180,
'nMods': 1, 'nRows': 1, 'appendRadfile':True,'originx': -0.44, 'originy': -0.93 + rowoffset}
sceneObj_rCell = model.makeScene(moduletype=moduletype_refCell, sceneDict=sceneRef_rCell, hpc=True)
return sceneObj_rCell
def delete_oct_files(project_path):
for f in project_path.glob('*.oct'):
f.unlink()
print(f'Deleted .oct files')
return
def delete_rad_files(project_path):
for f in (project_path).glob('*/*.rad'):
f.unlink()
print(f'Deleted .rad files')
return
def view(file_list, view='front', program='rvu'):
'Renders a view of the file'
views = {'diag': '-vp -17 3 1 -vd 2 -1 -0.3 -vu 0 0 1 -av 0.2 0.2 0.2',
'side': '-vp -14.3 0.2 1.5 -vd 1 0 0 -vu 0 0 1 -av 0.2 0.2 0.2',
'side2': '-vp -17 -6 3.5 -vd 0.2 1 -0.3 -vu 0 0 1 -av 10 10 10 -ab 2',
'back': '-vp -13.215 2 1 -vd 0.3 -1 0 -vu 0 0 1 -av 0.2 0.2 0.2',
'front': '-vp 2.5 -40 25 -vd 0 1 -0.5 -vu 0 0 1 -av 0.2 0.2 0.2',
'top': '-vp -17.5 1.6 2.7 -vd 1 0 -0.1 -vu 0 0 1',
'bottom': '-vp -17.5 -1.55 1.0 -vd 1 0.05 -0.1 -vu 0 0 1',
'front_low': '-vp -17 -5 2 -vd 0.5 1 -0.05 -vu 0 0 1 -av 0.2 0.2 0.2'}
program = 'objview' if file_list[0].endswith('rad') else program
if isinstance(file_list,list):
files = ' '.join([file_list[0]]+[s[s.find('objects'):] for s in file_list if ('objects' in s)])
if isinstance(file_list, str):
files = file_list
vp = views[view] if view in views else view
cmd = _cmd(program, vp, files)
return _popen(cmd, None)
def _cmd(program, vp, filename):
vp = ' '+vp+' ' if program =='rvu' else f' -v "{vp}" '
cmd = program + vp + filename
print(f' cmd: {cmd}')
return cmd
def run_simulation(date='18 July 2017',
outfile='new_results',
cores=10,
albedo=0.4,
rack = 'rackc3',
rowoffset = 9.5,
add_struct=True,
ref_cell=True,
group_ref_cell='A2',
withGroupF=False,
project_name = 'INCAFixed',
lspv_path = Path.home()/Path("Documents/lspv_analyseSoft/'RayTracing_simulations/dev_nbs_fc'"),
ines_meteo_file = Path.home()/'DATA/INCA/chic_bi3p/tmy_INCA_bifi_5T.hdf'):
project_path = lspv_path/project_name
if not project_path.exists(): project_path.mkdir()
delete_oct_files(project_path)
delete_rad_files(project_path)
sim_name = 'inca'
sensorsy = 9
inca = RadianceObj(sim_name, str(project_path.absolute())) # Radiance object named model_first_test
inca.setGround(albedo)
#define the scene with all the modules and scene dicts
module6 = define_scene(inca, 5,rack,withGroupF) #unused if ref_cell present
#add strcutures
if add_struct:
pivoting_structure(inca,rowoffset)
add_box(inca)
if add_really_big_box:
add_really_big_box(inca)
inca.monitored_obj = add_ref_cell(inca,group_ref_cell) if ref_cell else module6
#append the ines meteo file
meteo_df = pd.read_hdf(ines_meteo_file).loc[date,:]
define_meteo(inca, meteo_df)
#chose the date of your sim, must be in the input meteo file
results_file = date.replace(' ', '') + 'w_'+rack+'_sf_'+group_ref_cell+'.hdf'
if withGroupF:
results_file = date.replace(' ', '') + 'w_'+rack+'_'+group_ref_cell+'.hdf'
#ti, tf = get_time_interval(inca, date)
#print(f'Timeindexes : {ti}, {tf}')
ti = 528
tf = 529
import tqdm
pool = Pool(1)
res_list = []
f = partial(compute_radiance, model=inca, sim_name=sim_name, sensorsy=sensorsy)
for x in tqdm.tqdm(pool.imap(f, range(ti,tf)), total=tf-ti):
res_list.append(x)
pass
pool.close()
pool.join()
#pool = Pool(cores)
#res_list = []
#print(f'Launching simulation with {cores} processes')
#f = partial(compute_radiance, model=inca, sim_name=sim_name, sensorsy=sensorsy)
#if cores <2:
# res_list = list(map(f, range(ti, tf)))
# res_list = [f(x) for x in range(ti, tf)]
#else:
#for x in tqdm.tqdm(pool.imap(f, range(ti,tf)), total=tf-ti):
# res_list.append(x)
# pass
#pool.close()
#pool.join()
#print(f'res_list: {res_list}')
results = pd.DataFrame(data=res_list,
index = inca.input_meteo[date].index,
columns = [f'g_{i}' for i in range(sensorsy)]+[f'gb_{i}' for i in range(sensorsy)])
print(f'Results file: {results_file}')
results.to_hdf(project_path/('results/'+results_file), key='df')
#results.to_csv(project_path/('results/'+results_file+'.csv'))
print(project_path/('results/'+results_file+'.hdf'))
return inca
if __name__ == '__main__':
Fire(run_simulation)
|
[
"functools.partial",
"fire.Fire",
"pathlib.Path.home",
"numpy.deg2rad",
"pandas.read_hdf",
"bifacial_radiance.main._popen",
"pathlib.Path",
"numpy.sin",
"numpy.array",
"numpy.cos",
"multiprocessing.Pool",
"numpy.arctan",
"numpy.sqrt"
] |
[((10446, 10477), 'numpy.array', 'array', (['[-15.965, -1.45, height]'], {}), '([-15.965, -1.45, height])\n', (10451, 10477), False, 'from numpy import array\n'), ((10493, 10533), 'numpy.array', 'array', (['[-15.965, -1.45 + length, height]'], {}), '([-15.965, -1.45 + length, height])\n', (10498, 10533), False, 'from numpy import array\n'), ((11208, 11250), 'numpy.array', 'array', (['[-15.965, -1.45, height + z_struct]'], {}), '([-15.965, -1.45, height + z_struct])\n', (11213, 11250), False, 'from numpy import array\n'), ((17743, 17760), 'bifacial_radiance.main._popen', '_popen', (['cmd', 'None'], {}), '(cmd, None)\n', (17749, 17760), False, 'from bifacial_radiance.main import RadianceObj, AnalysisObj, _popen\n'), ((19815, 19822), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (19819, 19822), False, 'from multiprocessing import Pool\n'), ((19849, 19924), 'functools.partial', 'partial', (['compute_radiance'], {'model': 'inca', 'sim_name': 'sim_name', 'sensorsy': 'sensorsy'}), '(compute_radiance, model=inca, sim_name=sim_name, sensorsy=sensorsy)\n', (19856, 19924), False, 'from functools import partial\n'), ((21066, 21086), 'fire.Fire', 'Fire', (['run_simulation'], {}), '(run_simulation)\n', (21070, 21086), False, 'from fire import Fire\n'), ((7648, 7672), 'numpy.sqrt', 'np.sqrt', (['(h ** 2 + l ** 2)'], {}), '(h ** 2 + l ** 2)\n', (7655, 7672), True, 'import numpy as np\n'), ((7685, 7702), 'numpy.deg2rad', 'np.deg2rad', (['alpha'], {}), '(alpha)\n', (7695, 7702), True, 'import numpy as np\n'), ((7718, 7734), 'numpy.arctan', 'np.arctan', (['(h / l)'], {}), '(h / l)\n', (7727, 7734), True, 'import numpy as np\n'), ((18377, 18388), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (18386, 18388), False, 'from pathlib import Path\n'), ((18389, 18459), 'pathlib.Path', 'Path', (['"""Documents/lspv_analyseSoft/\'RayTracing_simulations/dev_nbs_fc\'"""'], {}), '("Documents/lspv_analyseSoft/\'RayTracing_simulations/dev_nbs_fc\'")\n', (18393, 18459), False, 'from pathlib import Path\n'), ((18498, 18509), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (18507, 18509), False, 'from pathlib import Path\n'), ((19345, 19373), 'pandas.read_hdf', 'pd.read_hdf', (['ines_meteo_file'], {}), '(ines_meteo_file)\n', (19356, 19373), True, 'import pandas as pd\n'), ((7779, 7792), 'numpy.cos', 'np.cos', (['gamma'], {}), '(gamma)\n', (7785, 7792), True, 'import numpy as np\n'), ((7811, 7824), 'numpy.sin', 'np.sin', (['gamma'], {}), '(gamma)\n', (7817, 7824), True, 'import numpy as np\n'), ((11393, 11415), 'numpy.array', 'array', (['[modulex, 0, 0]'], {}), '([modulex, 0, 0])\n', (11398, 11415), False, 'from numpy import array\n')]
|
"""Handling of skew surge data.
Two skew surge datasets are implemented: the dataset for Brest created
by Reinert et al. (2021), which can be received from the authors upon
request, and the GESLA-2 surge dataset of Marcos & Woodworth (2017).
See the file ‘tools_GESLA’ for further information on the latter.
Written by <NAME>, April 2020, May 2021.
"""
import time
import calendar
from enum import Enum
import numpy as np
from tools_GESLA import read_GESLA_surge_file, get_GESLA_surge_filename
# Path to the skew surge data file of Reinert et al. (2021)
SKEW_SURGE_FILE = "data/skew_surge_Brest.npy"
class Timeseries(Enum):
SKEW_SURGE = "skew surge (Reinert et al. 2021)"
SKEW_SURGE_GESLA = "skew surge (GESLA-2)"
def load_data(city: str, dataset: Timeseries, year_start=None, year_end=None, include_low_tide_surge=False):
"""Get ‘dataset’ for ‘city’ from ‘year_start’ to ‘year_end’.
The returned object is a dictionary with the following keys:
- city (string, the same as the input value)
- name (string, short description of the dataset)
- full_name (string, long description of the dataset)
- year_start (int, first valid year of the time series)
- year_end (int, last valid year of the time series)
- t (array, time values in s)
- h (array, height values in cm)
The returned dataset can be limited to contain only data from
‘year_start’ to ‘year_end’. Each of these two arguments is optional
and inclusive if given. They work even if no data for the given
years exists, in which case the returned values of ‘year_start’ and
‘year_end’ will be set to the actual first and last year for which
there is data within the given period.
The surge dataset by Reinert et al. (2021) is only available for
Brest. It contains both high and low tide skew surge levels, but by
default, only high tide surge levels are used. To also include low
tide surge, set the corresponding parameter to True. Surge levels
by Reinert et al. (2021) are relative to the annual mean sea level.
For the GESLA-2 skew surge dataset by <NAME> (2017),
several cities are available. This dataset contains only high tide
surge levels, so include_low_tide_surge must be False. Surge levels
in the GESLA-2 dataset contain variations in the mean sea level, in
particular the mean sea level rise.
"""
# Initialize the dictionary that is returned
data = {
"city": city,
"name": "",
"full_name": "",
"year_start": None,
"year_end": None,
"t": None,
"h": None,
}
if dataset == Timeseries.SKEW_SURGE:
if city != "Brest":
raise ValueError(
"only city Brest is available for SKEW_SURGE dataset; "
"did you want to use SKEW_SURGE_GESLA dataset?"
)
data["name"] = "surge (Reinert et al. 2021)"
data["full_name"] = "skew surge relative to annual mean sea level (Reinert et al. 2021)"
print("Reading", data["full_name"], "for", data["city"])
data["t"], data["h"], high_tides = np.load(SKEW_SURGE_FILE)
high_tides = np.array(high_tides, dtype=bool)
if include_low_tide_surge:
print("Using both high and low tide surge levels")
else:
data["name"] = "high tide " + data["name"]
data["full_name"] = "high tide " + data["full_name"]
data["t"] = data["t"][high_tides]
data["h"] = data["h"][high_tides]
elif dataset == Timeseries.SKEW_SURGE_GESLA:
if include_low_tide_surge:
raise ValueError("GESLA-2 surge dataset does not contain low tide surge levels.")
data["name"] = "surge (GESLA-2)"
data["full_name"] = "skew surge of GESLA-2 (Marcos & Woodworth 2017)"
filename = get_GESLA_surge_filename(city)
print("Reading", data["full_name"], "for", data["city"])
data["t"], data["h"] = read_GESLA_surge_file(filename)
data["t"] = np.array(data["t"])
data["h"] = np.array(data["h"])
else:
raise ValueError("unknown dataset {} requested".format(dataset))
# Limit the data to the given range
if year_start is not None:
print("Removing data before {}".format(year_start))
starttime = calendar.timegm((year_start, 1, 1, 0, 0, 0))
data["h"] = data["h"][data["t"] >= starttime]
data["t"] = data["t"][data["t"] >= starttime]
if year_end is not None:
print("Removing data after {}".format(year_end))
endtime = calendar.timegm((year_end + 1, 1, 1, 0, 0, 0))
data["h"] = data["h"][data["t"] < endtime]
data["t"] = data["t"][data["t"] < endtime]
# Convert from mm to cm
data["h"] = data["h"] / 10
# Get first and last year
data["year_start"] = time.gmtime(min(data["t"])).tm_year
data["year_end"] = time.gmtime(max(data["t"])).tm_year
print("{:9_d} records".format(len(data["t"])))
return data
|
[
"numpy.load",
"calendar.timegm",
"tools_GESLA.get_GESLA_surge_filename",
"numpy.array",
"tools_GESLA.read_GESLA_surge_file"
] |
[((3139, 3163), 'numpy.load', 'np.load', (['SKEW_SURGE_FILE'], {}), '(SKEW_SURGE_FILE)\n', (3146, 3163), True, 'import numpy as np\n'), ((3185, 3217), 'numpy.array', 'np.array', (['high_tides'], {'dtype': 'bool'}), '(high_tides, dtype=bool)\n', (3193, 3217), True, 'import numpy as np\n'), ((4331, 4375), 'calendar.timegm', 'calendar.timegm', (['(year_start, 1, 1, 0, 0, 0)'], {}), '((year_start, 1, 1, 0, 0, 0))\n', (4346, 4375), False, 'import calendar\n'), ((4588, 4634), 'calendar.timegm', 'calendar.timegm', (['(year_end + 1, 1, 1, 0, 0, 0)'], {}), '((year_end + 1, 1, 1, 0, 0, 0))\n', (4603, 4634), False, 'import calendar\n'), ((3858, 3888), 'tools_GESLA.get_GESLA_surge_filename', 'get_GESLA_surge_filename', (['city'], {}), '(city)\n', (3882, 3888), False, 'from tools_GESLA import read_GESLA_surge_file, get_GESLA_surge_filename\n'), ((3985, 4016), 'tools_GESLA.read_GESLA_surge_file', 'read_GESLA_surge_file', (['filename'], {}), '(filename)\n', (4006, 4016), False, 'from tools_GESLA import read_GESLA_surge_file, get_GESLA_surge_filename\n'), ((4037, 4056), 'numpy.array', 'np.array', (["data['t']"], {}), "(data['t'])\n", (4045, 4056), True, 'import numpy as np\n'), ((4077, 4096), 'numpy.array', 'np.array', (["data['h']"], {}), "(data['h'])\n", (4085, 4096), True, 'import numpy as np\n')]
|
import os
import sys
import datetime
from pathlib import Path
from logging import getLogger, Formatter, StreamHandler, INFO, WARNING
from logging import FileHandler
import importlib
import pandas as pd
import numpy as np
from easydict import EasyDict as edict
def is_devmode():
return True if os.uname().nodename == 'resona' else False
def prefix_path():
if not is_devmode():
return '/data'
else:
return 'data'
LOGDIR = '/wdata/working/sp5r2/models/logs/{modelname:s}'
def load_config(config_path):
mod = importlib.import_module(config_path.rstrip('.py').replace('/', '.'))
return edict(mod.CONFIG)
def set_filehandler(conf, prefix='train'):
logformat = '%(asctime)s %(levelname)s %(message)s'
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')[2:]
logfile_path = str(
Path(LOGDIR.format(modelname=conf.modelname)) /
f'{prefix:s}_{timestamp:s}.log')
Path(logfile_path).parent.mkdir(parents=True, exist_ok=True)
handler = FileHandler(logfile_path)
handler.setFormatter(Formatter(logformat))
logger = getLogger('aa')
logger.addHandler(handler)
def set_logger():
logger = getLogger('aa')
logformat = '%(asctime)s %(levelname)s %(message)s'
handler_out = StreamHandler(sys.stdout)
handler_out.setLevel(INFO)
handler_out.setFormatter(Formatter(logformat))
logger.setLevel(INFO)
logger.addHandler(handler_out)
def get_csv_folds(path, d, use_all=False):
df = pd.read_csv(path, index_col=0)
if use_all:
train = [range(len(df))]
test = [[]]
else:
m = df.max()[0] + 1
train = [[] for i in range(m)]
test = [[] for i in range(m)]
folds = {}
for i in range(m):
fold_ids = list(df[df['fold'].isin([i])].index)
folds.update({i: [n for n, l in enumerate(d) if l in fold_ids]})
for k, v in folds.items():
for i in range(m):
if i != k:
train[i].extend(v)
test[k] = v
return list(zip(np.array(train), np.array(test)))
|
[
"logging.FileHandler",
"pandas.read_csv",
"logging.StreamHandler",
"os.uname",
"logging.Formatter",
"pathlib.Path",
"numpy.array",
"easydict.EasyDict",
"datetime.datetime.now",
"logging.getLogger"
] |
[((627, 644), 'easydict.EasyDict', 'edict', (['mod.CONFIG'], {}), '(mod.CONFIG)\n', (632, 644), True, 'from easydict import EasyDict as edict\n'), ((1018, 1043), 'logging.FileHandler', 'FileHandler', (['logfile_path'], {}), '(logfile_path)\n', (1029, 1043), False, 'from logging import FileHandler\n'), ((1105, 1120), 'logging.getLogger', 'getLogger', (['"""aa"""'], {}), "('aa')\n", (1114, 1120), False, 'from logging import getLogger, Formatter, StreamHandler, INFO, WARNING\n'), ((1185, 1200), 'logging.getLogger', 'getLogger', (['"""aa"""'], {}), "('aa')\n", (1194, 1200), False, 'from logging import getLogger, Formatter, StreamHandler, INFO, WARNING\n'), ((1276, 1301), 'logging.StreamHandler', 'StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1289, 1301), False, 'from logging import getLogger, Formatter, StreamHandler, INFO, WARNING\n'), ((1500, 1530), 'pandas.read_csv', 'pd.read_csv', (['path'], {'index_col': '(0)'}), '(path, index_col=0)\n', (1511, 1530), True, 'import pandas as pd\n'), ((1069, 1089), 'logging.Formatter', 'Formatter', (['logformat'], {}), '(logformat)\n', (1078, 1089), False, 'from logging import getLogger, Formatter, StreamHandler, INFO, WARNING\n'), ((1362, 1382), 'logging.Formatter', 'Formatter', (['logformat'], {}), '(logformat)\n', (1371, 1382), False, 'from logging import getLogger, Formatter, StreamHandler, INFO, WARNING\n'), ((2077, 2092), 'numpy.array', 'np.array', (['train'], {}), '(train)\n', (2085, 2092), True, 'import numpy as np\n'), ((2094, 2108), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (2102, 2108), True, 'import numpy as np\n'), ((301, 311), 'os.uname', 'os.uname', ([], {}), '()\n', (309, 311), False, 'import os\n'), ((763, 786), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (784, 786), False, 'import datetime\n'), ((942, 960), 'pathlib.Path', 'Path', (['logfile_path'], {}), '(logfile_path)\n', (946, 960), False, 'from pathlib import Path\n')]
|
""" Functions and Operations for analysing a cluster's tidal tails
"""
__author__ = "<NAME>"
__all__ = [
"to_tail",
"tail_path",
"tail_path_match",
]
try:
from galpy.util import conversion
except:
import galpy.util.bovy_conversion as conversion
from galpy.util import _rotate_to_arbitrary_vector
from galpy import potential
from galpy.potential import MWPotential2014
import numpy as np
import matplotlib.pyplot as plt
from .orbit import orbital_path, orbital_path_match
from .operations import *
from ..util.recipes import binmaker,nbinmaker,roaming_binmaker,roaming_nbinmaker
from ..util.coordinates import cart_to_sky
from ..util.plots import starplot,skyplot,_plot,_lplot,_scatter
def to_tail(cluster):
"""Calculate positions and velocities of stars when rotated such that clusters velocity vector
points along x-axis
- no change to coordinates in StarCluster
Parameters
----------
cluster : class
StarCluster
Returns
-------
x_tail,y_tail,z_tail,vx_tail,vy_tail,vz_tail : float
rotated coordinates with cluster's velocity vector point along x-axis
History:
-------
2018 - Written - Webb (UofT)
"""
units0, origin0, rorder0, rorder_origin0 = save_cluster(cluster)
if origin0 != 'cluster' and origin0 != 'centre':
cluster.to_centre(sortstars=False)
v_vec = np.array([cluster.vxgc, cluster.vygc, cluster.vzgc])
new_v_vec = np.array([1.0, 0.0, 0.0])
rot = _rotate_to_arbitrary_vector(
np.atleast_2d(v_vec), new_v_vec, inv=False, _dontcutsmall=False
)
x_tail = (
cluster.x * rot[:, 0, 0] + cluster.y * rot[:, 1, 0] + cluster.z * rot[:, 2, 0]
)
y_tail = (
cluster.x * rot[:, 0, 1] + cluster.y * rot[:, 1, 1] + cluster.z * rot[:, 2, 1]
)
z_tail = (
cluster.x * rot[:, 0, 2] + cluster.y * rot[:, 1, 2] + cluster.z * rot[:, 2, 2]
)
vx_tail = (
cluster.vx * rot[:, 0, 0] + cluster.vy * rot[:, 1, 0] + cluster.vz * rot[:, 2, 0]
)
vy_tail = (
cluster.vx * rot[:, 0, 1] + cluster.vy * rot[:, 1, 1] + cluster.vz * rot[:, 2, 1]
)
vz_tail = (
cluster.vx * rot[:, 0, 2] + cluster.vy * rot[:, 1, 2] + cluster.vz * rot[:, 2, 2]
)
return_cluster(cluster, units0, origin0, rorder0, rorder_origin0)
return x_tail,y_tail,z_tail,vx_tail,vy_tail,vz_tail
def tail_path(
cluster, dt=0.1, no=1000, nt=100, ntail=100, pot=MWPotential2014, dmax=None, bintype = 'fix', from_centre=False, skypath=False,
to_path=False,
do_full=False,
ro=8.0, vo=220.0,
plot=False,projected=False,
**kwargs,
):
"""Calculate tail path +/- dt Gyr around the cluster
Parameters
----------
cluster : class
StarCluster
dt : float
timestep that StarCluster is to be moved to
no : int
number of timesteps for orbit integration (default:1000)
nt : int
number of points along the tail to set the tail spacing (default: 100)
ntail : int
number of points along the tail with roaming average (default: 1000)
pot : class
galpy Potential that orbit is to be integrate in (default: MWPotential2014)
dmax : float
maximum distance (assumed to be same units as cluster) from orbital path to be included in generating tail path (default: None)
bintype : str
type of binning for tail stars (default : 'fix')
from_centre : bool
genrate orbit from cluster's exact centre instead of its assigned galactocentric coordinates (default: False)
skypath : bool
return sky coordinates instead of cartesian coordinates (default: False)
to_path : bool
measure distance to the path itself instead of distance to central point along the path (default: False)
do_full : bool
calculate dpath all at once in a single numpy array (can be memory intensive) (default:False)
ro :float
galpy distance scale (Default: 8.)
vo : float
galpy velocity scale (Default: 220.)
plot : bool
plot a snapshot of the cluster in galactocentric coordinates with the orbital path (defualt: False)
projected : bool
match to projected orbital path, which means matching just x and y coordinates or Ra and Dec coordinates (not z, or dist) (default:False)
Returns
-------
t : float
times for which path is provided
x,y,z : float
tail path positions
vx,vy,vz : float
tail path velocities
History
-------
2018 - Written - Webb (UofT)
2019 - Implemented numpy array preallocation to minimize runtime - <NAME> (UofT)
"""
units0, origin0, rorder0, rorder_origin0 = save_cluster(cluster)
cluster.to_galaxy(sortstars=False)
cluster.to_kpckms()
#dmax is assumed to have same units as cluster
if dmax is not None:
if units0=='nbody':
dmax*=cluster.rbar/1000.0
elif units0=='pckms':
dmax/=1000.
elif units0=='galpy':
dmax*=ro
elif units0=='radec' and not skypath:
dist=np.sqrt(cluster.xgc**2.+cluster.ygc**2.+cluster.zgc**2.)
dmax=dist*np.tan(dmax)
elif units0=='kpckms' and skypath:
dist=np.sqrt(cluster.xgc**2.+cluster.ygc**2.+cluster.zgc**2.)
dmax=np.arctan(dmax/dist)
to, xo, yo, zo, vxo, vyo, vzo = orbital_path(
cluster,
dt=dt,
nt=no,
pot=pot,
from_centre=from_centre,
skypath=skypath,
initialize=False,
ro=ro,
vo=vo,
)
path=(to, xo, yo, zo, vxo, vyo, vzo)
if bintype=='fix':
if ntail > nt:
t_lower, t_mid, t_upper, t_hist = roaming_binmaker(to, nbin=nt,ntot=ntail)
else:
t_lower, t_mid, t_upper, t_hist = binmaker(to, nbin=nt)
elif bintype=='num':
if ntail>nt:
t_lower, t_mid, t_upper, t_hist = roaming_nbinmaker(to, nbin=nt,ntot=ntail)
else:
t_lower, t_mid, t_upper, t_hist = nbinmaker(to, nbin=nt)
tstar, dprog, dpath = orbital_path_match(
cluster=cluster, dt=dt, nt=no, pot=pot, path=path, from_centre=from_centre, skypath=skypath, to_path=to_path,do_full=do_full, ro=ro, vo=vo, projected=projected
)
if dmax is None:
dindx=np.ones(len(tstar),dtype=bool)
else:
dindx = (np.fabs(dpath) <= dmax)
ttail = np.array([])
xtail = np.array([])
ytail = np.array([])
ztail = np.array([])
vxtail = np.array([])
vytail = np.array([])
vztail = np.array([])
for i in range(0, len(t_mid)):
indx = (tstar >= t_lower[i]) * (tstar <= t_upper[i]) * dindx
if np.sum(indx) > 0:
ttail = np.append(ttail, t_mid[i])
xtail = np.append(xtail, np.mean(cluster.x[indx]))
ytail = np.append(ytail, np.mean(cluster.y[indx]))
ztail = np.append(ztail, np.mean(cluster.z[indx]))
vxtail = np.append(vxtail, np.mean(cluster.vx[indx]))
vytail = np.append(vytail, np.mean(cluster.vy[indx]))
vztail = np.append(vztail, np.mean(cluster.vz[indx]))
if skypath:
ratail,dectail,disttail,pmratail,pmdectail,vlostail=cart_to_sky(xtail, ytail, ztail, vxtail, vytail, vztail)
if plot:
filename = kwargs.pop("filename", None)
overplot = kwargs.pop("overplot", False)
if skypath:
skyplot(cluster,coords='radec',overplot=overplot)
_lplot(ratail,dectail,overplot=True)
else:
starplot(cluster,coords='xy',overplot=overplot)
_lplot(xtail,ytail,overplot=True)
if filename != None:
plt.savefig(filename)
return_cluster(cluster, units0, origin0, rorder0, rorder_origin0)
if skypath:
return ttail,ratail,dectail,disttail,pmratail,pmdectail,vlostail
else:
return ttail, xtail, ytail, ztail, vxtail, vytail, vztail
def tail_path_match(
cluster,
dt=0.1,
no=1000,
nt=100,
ntail=100,
pot=MWPotential2014,
path=None,
from_centre=False,
skypath=False,
to_path=False,
do_full=False,
ro=8.0,
vo=220.0,
plot=False,
projected=False,
**kwargs,
):
"""Match stars to a position along the tail path of the cluster
Parameters
----------
cluster : class
StarCluster
dt : float
timestep that StarCluster is to be moved to
no : int
number of timesteps for orbit integration (default:1000)
nt : int
number of points along the tail to set the tail spacing (default: 100)
ntail : int
number of points along the tail with roaming average (default: 1000)
pot : class
galpy Potential that orbit is to be integrate in (default: MWPotential2014)
path : array
array of (t,x,y,x,vx,vy,vz) corresponding to the tail path. If none path is calculated (default: None)
from_centre : bool
genrate orbit from cluster's exact centre instead of its assigned galactocentric coordinates (default: False)
skypath : bool
return sky coordinates instead of cartesian coordinates (default: False)
if True, projected is set to True
to_path : bool
measure distance to the path itself instead of distance to central point along the path (default: False)
do_full : bool
calculate dpath all at once in a single numpy array (can be memory intensive) (default:False)
ro :float
galpy distance scale (Default: 8.)
vo : float
galpy velocity scale (Default: 220.)
plot : bool
plot a snapshot of the cluster in galactocentric coordinates with the orbital path (defualt: False)
projected : bool
match to projected orbital path, which means matching just x and y coordinates or Ra and Dec coordinates (not z, or dist) (default:False)
Returns
-------
tstar : float
orbital time associated with star
dprog : float
distance along the path to the progenitor
dpath :
distance to centre of the tail path bin (default) or the tail path (to_path = True)
History
-------
2018 - Written - Webb (UofT)
"""
if path is None:
path = tail_path(
cluster, dt=dt, no=no, nt=nt, ntail=ntail, pot=pot, from_centre=from_centre, skypath=skypath, ro=ro, vo=vo
)
return orbital_path_match(cluster=cluster,dt=dt,nt=no,pot=pot,path=path,from_centre=from_centre,
skypath=skypath,to_path=to_path,do_full=do_full,ro=ro,vo=vo,plot=plot,projected=projected,**kwargs)
|
[
"numpy.sum",
"numpy.append",
"numpy.fabs",
"numpy.array",
"numpy.mean",
"numpy.tan",
"numpy.arctan",
"numpy.sqrt",
"matplotlib.pyplot.savefig",
"numpy.atleast_2d"
] |
[((1386, 1438), 'numpy.array', 'np.array', (['[cluster.vxgc, cluster.vygc, cluster.vzgc]'], {}), '([cluster.vxgc, cluster.vygc, cluster.vzgc])\n', (1394, 1438), True, 'import numpy as np\n'), ((1455, 1480), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (1463, 1480), True, 'import numpy as np\n'), ((6422, 6434), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6430, 6434), True, 'import numpy as np\n'), ((6447, 6459), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6455, 6459), True, 'import numpy as np\n'), ((6472, 6484), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6480, 6484), True, 'import numpy as np\n'), ((6497, 6509), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6505, 6509), True, 'import numpy as np\n'), ((6523, 6535), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6531, 6535), True, 'import numpy as np\n'), ((6549, 6561), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6557, 6561), True, 'import numpy as np\n'), ((6575, 6587), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6583, 6587), True, 'import numpy as np\n'), ((1529, 1549), 'numpy.atleast_2d', 'np.atleast_2d', (['v_vec'], {}), '(v_vec)\n', (1542, 1549), True, 'import numpy as np\n'), ((6385, 6399), 'numpy.fabs', 'np.fabs', (['dpath'], {}), '(dpath)\n', (6392, 6399), True, 'import numpy as np\n'), ((6704, 6716), 'numpy.sum', 'np.sum', (['indx'], {}), '(indx)\n', (6710, 6716), True, 'import numpy as np\n'), ((6742, 6768), 'numpy.append', 'np.append', (['ttail', 't_mid[i]'], {}), '(ttail, t_mid[i])\n', (6751, 6768), True, 'import numpy as np\n'), ((7695, 7716), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (7706, 7716), True, 'import matplotlib.pyplot as plt\n'), ((6806, 6830), 'numpy.mean', 'np.mean', (['cluster.x[indx]'], {}), '(cluster.x[indx])\n', (6813, 6830), True, 'import numpy as np\n'), ((6869, 6893), 'numpy.mean', 'np.mean', (['cluster.y[indx]'], {}), '(cluster.y[indx])\n', (6876, 6893), True, 'import numpy as np\n'), ((6932, 6956), 'numpy.mean', 'np.mean', (['cluster.z[indx]'], {}), '(cluster.z[indx])\n', (6939, 6956), True, 'import numpy as np\n'), ((6997, 7022), 'numpy.mean', 'np.mean', (['cluster.vx[indx]'], {}), '(cluster.vx[indx])\n', (7004, 7022), True, 'import numpy as np\n'), ((7063, 7088), 'numpy.mean', 'np.mean', (['cluster.vy[indx]'], {}), '(cluster.vy[indx])\n', (7070, 7088), True, 'import numpy as np\n'), ((7129, 7154), 'numpy.mean', 'np.mean', (['cluster.vz[indx]'], {}), '(cluster.vz[indx])\n', (7136, 7154), True, 'import numpy as np\n'), ((5111, 5180), 'numpy.sqrt', 'np.sqrt', (['(cluster.xgc ** 2.0 + cluster.ygc ** 2.0 + cluster.zgc ** 2.0)'], {}), '(cluster.xgc ** 2.0 + cluster.ygc ** 2.0 + cluster.zgc ** 2.0)\n', (5118, 5180), True, 'import numpy as np\n'), ((5190, 5202), 'numpy.tan', 'np.tan', (['dmax'], {}), '(dmax)\n', (5196, 5202), True, 'import numpy as np\n'), ((5263, 5332), 'numpy.sqrt', 'np.sqrt', (['(cluster.xgc ** 2.0 + cluster.ygc ** 2.0 + cluster.zgc ** 2.0)'], {}), '(cluster.xgc ** 2.0 + cluster.ygc ** 2.0 + cluster.zgc ** 2.0)\n', (5270, 5332), True, 'import numpy as np\n'), ((5337, 5359), 'numpy.arctan', 'np.arctan', (['(dmax / dist)'], {}), '(dmax / dist)\n', (5346, 5359), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import optuna
import mlflow
import os
import numpy as np
from numpyencoder import NumpyEncoder
from argparse import Namespace
from typing import List, Dict, Tuple, Optional
from pathlib import Path
import tempfile
import json
#from utils import *
#from config import *
#from data import *
#from models import *
from recsys import utils, config, data, models, eval
class Trainer(object):
def __init__(self,
model,
device: torch.device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
loss_fn=None,
optimizer=None,
scheduler= None,
trial=None):
# Set params
self.model = model
self.device = device
self.loss_fn = loss_fn
self.optimizer = optimizer
self.scheduler = scheduler
self.trial = trial
def train_step(self, dataloader):
self.model.train()
loss = 0.0
size = len(dataloader.dataset)
for batch, (user, item, label) in enumerate(dataloader):
user = user.to(self.device)
item = item.to(self.device)
label = label.to(self.device)
# forward
self.optimizer.zero_grad()
prediction = self.model.predict(user, item)
# backward
loss = self.loss_fn(prediction, label)
loss.backward()
self.optimizer.step()
loss += loss.item() - loss
return loss
def eval_step(self, dataloader):
"""Validation or test step"""
# Set model to eval mode
self.model.eval()
loss = 0.0
predictions, labels = [], []
with torch.no_grad():
for batch, (user, item, label) in enumerate(dataloader):
prediction = self.model.predict(user, item)
J = self.loss_fn(prediction, label).item()
loss += (J - loss)/(batch + 1)
# store outputs
prediction = prediction.numpy()
predictions.extend(prediction)
labels.extend(label.numpy())
return loss, np.vstack(labels), np.vstack(predictions)
def predict_step(self, dataloader):
""" Prediction step (inference)
Loss is not calculated for this loop.
"""
self.model.eval()
predictions, labels = [], []
# Interate over val batches
with torch.no_grad():
for batch, (user, item, label) in enumerate(dataloader):
# Forward pass w/ inputs
prediction = self.model.predict(user, item)
prediction = prediction.numpy()
predictions.extend(prediction)
labels.extend(label.numpy())
return np.vstack(labels), np.vstack(predictions)
def train(self, num_epochs, patience, train_dataloader, val_dataloader):
best_val_loss = np.inf
for epoch in range(num_epochs):
# Step
train_loss = self.train_step(dataloader=train_dataloader)
val_loss, _, _ = self.eval_step(dataloader=val_dataloader)
self.scheduler.step(val_loss)
# Early stopping
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = self.model
_patience = patience # reset _patience
else:
_patience -= 1
if not _patience: # 0
print('Stopping early')
break
# Pruning based on the intermediate value
if self.trial:
self.trial.report(val_loss, epoch)
if self.trial.should_prune():
raise optuna.TrialPruned()
# Tracking
#mlflow.log_metrics(
# {'train_loss':train_loss, 'val_loss':val_loss}, step=epoch
#)
# Logging
print(
f"Epoch: {epoch + 1} |"
f"train_loss: {train_loss:.5f},"
f"val_loss: {val_loss:.5f},"
f"lr: {self.optimizer.param_groups[0]['lr']:.2E},"
f"patience: {_patience}"
)
return best_val_loss, best_model
def train(
params_fp: Path=Path(config.config_dir, "params.json"),
#train_dataloader: torch.utils.data.DataLoader,
#val_dataloader: torch.utils.data.DataLoader,
device: torch.device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu"),
trial: optuna.trial._trial.Trial = None)->Tuple:
params = Namespace(**utils.load_dict(params_fp))
dataset = utils.get_data()
n_users = dataset['user_id'].nunique() + 1
n_items = dataset['item_id'].nunique() + 1
# left one out validation
dataloader = data.RCDataloader(params, dataset)
train_dataloader = dataloader.get_train_set()
test_dataloader = dataloader.get_test_set()
model = models.initialize_model(
n_users=n_users,
n_items=n_items,
params_fp=params_fp,
device=device
)
loss_fn = nn.MSELoss()
# Define optimizer & scheduler
optimizer = torch.optim.SGD(model.parameters(), lr=params.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode = "min", factor=0.05, patience=params.patience
)
trainer = Trainer(
model=model,
device=device,
loss_fn=loss_fn,
optimizer=optimizer,
scheduler=scheduler,
trial=trial
)
best_val_loss, best_model = trainer.train(
params.n_epochs, params.patience, train_dataloader, test_dataloader
)
return params, best_model, best_val_loss
|
[
"torch.nn.MSELoss",
"recsys.utils.get_data",
"recsys.models.initialize_model",
"recsys.data.RCDataloader",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"optuna.TrialPruned",
"pathlib.Path",
"recsys.utils.load_dict",
"torch.cuda.is_available",
"torch.no_grad",
"numpy.vstack"
] |
[((4369, 4407), 'pathlib.Path', 'Path', (['config.config_dir', '"""params.json"""'], {}), "(config.config_dir, 'params.json')\n", (4373, 4407), False, 'from pathlib import Path\n'), ((4724, 4740), 'recsys.utils.get_data', 'utils.get_data', ([], {}), '()\n', (4738, 4740), False, 'from recsys import utils, config, data, models, eval\n'), ((4883, 4917), 'recsys.data.RCDataloader', 'data.RCDataloader', (['params', 'dataset'], {}), '(params, dataset)\n', (4900, 4917), False, 'from recsys import utils, config, data, models, eval\n'), ((5029, 5127), 'recsys.models.initialize_model', 'models.initialize_model', ([], {'n_users': 'n_users', 'n_items': 'n_items', 'params_fp': 'params_fp', 'device': 'device'}), '(n_users=n_users, n_items=n_items, params_fp=\n params_fp, device=device)\n', (5052, 5127), False, 'from recsys import utils, config, data, models, eval\n'), ((5178, 5190), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5188, 5190), True, 'import torch.nn as nn\n'), ((5309, 5418), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': '(0.05)', 'patience': 'params.patience'}), "(optimizer, mode='min', factor=\n 0.05, patience=params.patience)\n", (5351, 5418), False, 'import torch\n'), ((1753, 1768), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1766, 1768), False, 'import torch\n'), ((2219, 2236), 'numpy.vstack', 'np.vstack', (['labels'], {}), '(labels)\n', (2228, 2236), True, 'import numpy as np\n'), ((2238, 2260), 'numpy.vstack', 'np.vstack', (['predictions'], {}), '(predictions)\n', (2247, 2260), True, 'import numpy as np\n'), ((2522, 2537), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2535, 2537), False, 'import torch\n'), ((2866, 2883), 'numpy.vstack', 'np.vstack', (['labels'], {}), '(labels)\n', (2875, 2883), True, 'import numpy as np\n'), ((2885, 2907), 'numpy.vstack', 'np.vstack', (['predictions'], {}), '(predictions)\n', (2894, 2907), True, 'import numpy as np\n'), ((4563, 4588), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4586, 4588), False, 'import torch\n'), ((4681, 4707), 'recsys.utils.load_dict', 'utils.load_dict', (['params_fp'], {}), '(params_fp)\n', (4696, 4707), False, 'from recsys import utils, config, data, models, eval\n'), ((580, 605), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (603, 605), False, 'import torch\n'), ((3828, 3848), 'optuna.TrialPruned', 'optuna.TrialPruned', ([], {}), '()\n', (3846, 3848), False, 'import optuna\n')]
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import cv2
from PIL import Image
import os
def label_to_name(labels, names):
"""
Utility function to map label to corresponding name
"""
arr_map = []
for i in range(0, labels.shape[0]):
label = labels[i]
name = names[names["ClassId"] == label]["SignName"].values[0]
arr_map.append({"id":i, "label":label, "name":name})
return pd.DataFrame(arr_map)
def label_count(mappings):
"""
Utility function to count labels in different classes
"""
return pd.pivot_table(mappings, index = ["label", "name"], values = ["id"], aggfunc = "count")
def show_random_dataset_images(group_label, imgs, to_show=7):
"""
This function takes a DataFrame of items group by labels as well as a set of images and randomly selects to_show images to display
"""
for (lid, lbl), group in group_label:
#print("[{0}] : {1}".format(lid, lbl))
rand_idx = np.random.randint(0, high=group['id'].size, size=to_show, dtype='int')
selected_rows = group.iloc[rand_idx]
selected_img = list(map(lambda id: imgs[id], selected_rows['id']))
selected_labels = list(map(lambda label: label, selected_rows['label']))
show_image_list(selected_img, selected_labels, "{0}: {1}".format(lid, lbl), cols=to_show, fig_size=(9, 9), show_ticks=False)
def show_image_list(img_list, img_labels, title, cols=2, fig_size=(15, 15), show_ticks=True):
"""
Utility function to show us a list of traffic sign images
"""
img_count = len(img_list)
rows = img_count // cols
cmap = None
fig, axes = plt.subplots(rows, cols, figsize=fig_size)
for i in range(0, img_count):
img_name = img_labels[i]
img = img_list[i]
if len(img.shape) < 3 or img.shape[-1] < 3:
cmap = "gray"
img = np.reshape(img, (img.shape[0], img.shape[1]))
if not show_ticks:
axes[i].axis("off")
axes[i].imshow(img, cmap=cmap)
fig.suptitle(title, fontsize=12, fontweight='bold', y = 0.6)
fig.tight_layout()
plt.show()
def grayscale(imgs):
"""
Converts an image in RGB format to grayscale
"""
return cv2.cvtColor(imgs, cv2.COLOR_RGB2GRAY)
def standard_normalization(imgs, dist):
"""
Nornalise the supplied images from data in dist
"""
std = np.std(dist)
mean = np.mean(dist)
return (imgs - mean) / std
def plot_model_results(metrics, axes, lbs, xlb, ylb, titles, fig_title, fig_size=(7, 5), epochs_interval=10):
"""
Nifty utility function to plot results of the execution of our model
"""
fig, axs = plt.subplots(nrows=1, ncols=len(axes), figsize=fig_size)
print("Length of axis: {0}".format(axs.shape))
total_epochs = metrics[0].shape[0]
x_values = np.linspace(1, total_epochs, num=total_epochs, dtype=np.int32)
for m, l in zip(metrics, lbs):
for i in range(0, len(axes)):
ax = axs[i]
axis = axes[i]
ax.plot(x_values, m[:, axis], linewidth=2, label=l)
ax.set(xlabel=xlb[i], ylabel=ylb[i], title=titles[i])
ax.xaxis.set_ticks(np.linspace(1, total_epochs, num=int(total_epochs/epochs_interval), dtype=np.int32))
ax.legend(loc='center right')
plt.suptitle(fig_title, fontsize=14, fontweight='bold')
plt.show()
def load_images(path, size=(32, 32), grayscale=False):
"""
Returns a list of images from a folder as a numpy array
"""
img_list = [os.path.join(path,f) for f in os.listdir(path) if f.endswith(".jpg") or f.endswith(".png")]
imgs = None
if grayscale:
imgs = np.empty([len(img_list), size[0], size[1]], dtype=np.uint8)
else:
imgs = np.empty([len(img_list), size[0], size[1], 3], dtype=np.uint8)
for i, img_path in enumerate(img_list):
img = Image.open(img_path).convert('RGB')
img = img.resize(size)
im = np.array(to_grayscale(img)) if grayscale else np.array(img)
imgs[i] = im
return imgs
def class_to_name(class_ids, sign_names):
return list(map(lambda class_id: sign_names[sign_names["ClassId"] == class_id] ["SignName"].values[0], class_ids))
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"pandas.pivot_table",
"os.path.join",
"cv2.cvtColor",
"numpy.std",
"matplotlib.pyplot.suptitle",
"PIL.Image.open",
"numpy.mean",
"numpy.random.randint",
"numpy.reshape",
"numpy.linspace",
"numpy.array",
"matplotlib.pyplot.subplots",
"os.listdir"
] |
[((416, 437), 'pandas.DataFrame', 'pd.DataFrame', (['arr_map'], {}), '(arr_map)\n', (428, 437), True, 'import pandas as pd\n'), ((540, 626), 'pandas.pivot_table', 'pd.pivot_table', (['mappings'], {'index': "['label', 'name']", 'values': "['id']", 'aggfunc': '"""count"""'}), "(mappings, index=['label', 'name'], values=['id'], aggfunc=\n 'count')\n", (554, 626), True, 'import pandas as pd\n'), ((1627, 1669), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': 'fig_size'}), '(rows, cols, figsize=fig_size)\n', (1639, 1669), True, 'from matplotlib import pyplot as plt\n'), ((2144, 2154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2152, 2154), True, 'from matplotlib import pyplot as plt\n'), ((2254, 2292), 'cv2.cvtColor', 'cv2.cvtColor', (['imgs', 'cv2.COLOR_RGB2GRAY'], {}), '(imgs, cv2.COLOR_RGB2GRAY)\n', (2266, 2292), False, 'import cv2\n'), ((2413, 2425), 'numpy.std', 'np.std', (['dist'], {}), '(dist)\n', (2419, 2425), True, 'import numpy as np\n'), ((2437, 2450), 'numpy.mean', 'np.mean', (['dist'], {}), '(dist)\n', (2444, 2450), True, 'import numpy as np\n'), ((2865, 2927), 'numpy.linspace', 'np.linspace', (['(1)', 'total_epochs'], {'num': 'total_epochs', 'dtype': 'np.int32'}), '(1, total_epochs, num=total_epochs, dtype=np.int32)\n', (2876, 2927), True, 'import numpy as np\n'), ((3354, 3409), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['fig_title'], {'fontsize': '(14)', 'fontweight': '"""bold"""'}), "(fig_title, fontsize=14, fontweight='bold')\n", (3366, 3409), True, 'from matplotlib import pyplot as plt\n'), ((3414, 3424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3422, 3424), True, 'from matplotlib import pyplot as plt\n'), ((955, 1025), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': "group['id'].size", 'size': 'to_show', 'dtype': '"""int"""'}), "(0, high=group['id'].size, size=to_show, dtype='int')\n", (972, 1025), True, 'import numpy as np\n'), ((3576, 3597), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (3588, 3597), False, 'import os\n'), ((1869, 1914), 'numpy.reshape', 'np.reshape', (['img', '(img.shape[0], img.shape[1])'], {}), '(img, (img.shape[0], img.shape[1]))\n', (1879, 1914), True, 'import numpy as np\n'), ((3606, 3622), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3616, 3622), False, 'import os\n'), ((4053, 4066), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4061, 4066), True, 'import numpy as np\n'), ((3927, 3947), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (3937, 3947), False, 'from PIL import Image\n')]
|
import textmining
import numpy
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
m=[]
p=0
with open ("ess1.txt", "r") as myfile:
doc1=myfile.read()
#doc1=myfile.read().replace('\n', '')
with open ("ess2.txt", "r") as myfile:
doc2=myfile.read()
#doc2=myfile.read().replace('\n', '')
# Initialize class to create term-document matrix
tdm = textmining.TermDocumentMatrix()
# Add the documents
tdm.add_doc(doc1)
tdm.add_doc(doc2)
# Write out the matrix to a csv file. Note that setting cutoff=1 means
# that words which appear in 1 or more documents will be included in
# the output (i.e. every word will appear in the output). The default
# for cutoff is 2, since we usually aren't interested in words which
# appear in a single document. For this example we want to see all
# words however, hence cutoff=1.
tdm.write_csv('matrix.csv', cutoff=1)
# Instead of writing out the matrix you can also access its rows directly.
# Let's print them to the screen.
for row in tdm.rows(cutoff=1):
m.append(row)
if p==1:
arr = row
#print("p")
#print("arr=row")
#print(arr)
#print("")
p=p+1
if p>1:
arr = numpy.vstack((arr, row))
#print(p)
#print("arr = numpy.vstack((arr, row))")
#print(arr)
#print("")
p=p+1
if p==0:
#print(p)
#print("Just Increment p .... Voila !")
#print("")
p=p+1
#arr = numpy.delete(arr, (0), axis=0)
p=0
#arr = numpy.delete(arr, (0), axis=0)
print("")
print("TDM:")
print(m)
print("")
print("")
print("")
print("TDM Sparse Matrix:")
print(arr)
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(arr)
a = tfidf.toarray()
numpy.savetxt("foo.csv", a, delimiter=",")
print(tfidf.toarray())
|
[
"textmining.TermDocumentMatrix",
"numpy.savetxt",
"sklearn.feature_extraction.text.TfidfTransformer",
"numpy.vstack"
] |
[((460, 491), 'textmining.TermDocumentMatrix', 'textmining.TermDocumentMatrix', ([], {}), '()\n', (489, 491), False, 'import textmining\n'), ((1803, 1821), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (1819, 1821), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((1884, 1926), 'numpy.savetxt', 'numpy.savetxt', (['"""foo.csv"""', 'a'], {'delimiter': '""","""'}), "('foo.csv', a, delimiter=',')\n", (1897, 1926), False, 'import numpy\n'), ((1306, 1330), 'numpy.vstack', 'numpy.vstack', (['(arr, row)'], {}), '((arr, row))\n', (1318, 1330), False, 'import numpy\n')]
|
import logging
import numpy as np
from typing import List, Tuple, Union
from autoarray import exc
from autoarray.mask import abstract_mask, mask_2d
from autoarray.structures.grids.one_d import grid_1d
from autoarray.structures.grids.one_d import grid_1d_util
from autoarray.structures.arrays.one_d import array_1d_util
logging.basicConfig()
logger = logging.getLogger(__name__)
class AbstractMask1d(abstract_mask.AbstractMask):
def __new__(
cls,
mask: np.ndarray,
pixel_scales: Tuple[float,],
sub_size: int = 1,
origin: Tuple[float,] = (0.0,),
):
"""
A 1D mask, representing 1D data on a uniform line of pixels with equal spacing.
When applied to 1D data it extracts or masks the unmasked image pixels corresponding to mask entries that
are `False` or 0).
The mask also defines the geometry of the 1D data structure it is paired to, for example how every pixel
coordinate on the 1D line of data converts to physical units via the `pixel_scales` and `origin`
parameters and a sub-grid which is used for performing calculations via super-sampling.
Parameters
----------
mask
The ndarray of shape [total_pixels] containing the bool's representing the mask, where `False`
signifies an entry is unmasked and used in calculations.
pixel_scales
The scaled units to pixel units conversion factor of each pixel.
origin
The x origin of the mask's coordinate system in scaled units.
"""
# noinspection PyArgumentList
return abstract_mask.AbstractMask.__new__(
cls=cls,
mask=mask,
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
def __array_finalize__(self, obj):
super().__array_finalize__(obj=obj)
if isinstance(obj, Mask1D):
pass
else:
self.origin = (0.0,)
@property
def shape_native(self) -> Tuple[int]:
return self.shape
@property
def sub_shape_native(self) -> Tuple[int]:
return (self.shape[0] * self.sub_size,)
@property
def mask_sub_1(self) -> "Mask1D":
"""
Returns the mask on the same scaled coordinate system but with a sub-grid of `sub_size`.
"""
return Mask1D(
mask=self, sub_size=1, pixel_scales=self.pixel_scales, origin=self.origin
)
@property
def unmasked_mask(self) -> "Mask1D":
return Mask1D.unmasked(
shape_slim=self.shape_slim,
sub_size=self.sub_size,
pixel_scales=self.pixel_scales,
origin=self.origin,
)
@property
def unmasked_grid_sub_1(self) -> grid_1d.Grid1D:
"""
The scaled-grid of (y,x) coordinates of every pixel.
This is defined from the top-left corner, such that the first pixel at location [0, 0] will have a negative x
value y value in scaled units.
"""
grid_slim = grid_1d_util.grid_1d_slim_via_mask_from(
mask_1d=self, pixel_scales=self.pixel_scales, sub_size=1, origin=self.origin
)
return grid_1d.Grid1D(grid=grid_slim, mask=self.unmasked_mask.mask_sub_1)
@property
def to_mask_2d(self) -> mask_2d.Mask2D:
"""
Map the Mask1D to a Mask2D of shape [total_mask_1d_pixel, 1].
The change in shape and dimensions of the mask is necessary for mapping results from 1D data structures to 2D.
Returns
-------
mask_2d.Mask2D
The 1D mask mapped to a 2D mask of shape [total_mask_1d_pixel, 1].
"""
return mask_2d.Mask2D.manual(
[self],
pixel_scales=(self.pixel_scale, self.pixel_scale),
sub_size=self.sub_size,
origin=(0.0, 0.0),
)
def output_to_fits(self, file_path: str, overwrite: bool = False):
"""
Write the 1D mask to a .fits file.
Parameters
----------
file_path
The full path of the file that is output, including the file name and .fits extension.
overwrite
If `True` and a file already exists with the input file_path the .fits file is overwritten. If `False`,
an error is raised.
Returns
-------
None
Examples
--------
mask = Mask1D(mask=np.full(shape=(5,), fill_value=False))
mask.output_to_fits(file_path='/path/to/file/filename.fits', overwrite=True)
"""
array_1d_util.numpy_array_1d_to_fits(
array_1d=self.astype("float"), file_path=file_path, overwrite=overwrite
)
class Mask1D(AbstractMask1d):
@classmethod
def manual(
cls,
mask: Union[List, np.ndarray],
pixel_scales: Union[float, Tuple[float]],
sub_size: int = 1,
origin: Tuple[float] = (0.0,),
invert: bool = False,
) -> "Mask1D":
if type(mask) is list:
mask = np.asarray(mask).astype("bool")
if invert:
mask = np.invert(mask)
if type(pixel_scales) is float:
pixel_scales = (pixel_scales,)
if len(mask.shape) != 1:
raise exc.MaskException("The input mask is not a one dimensional array")
return Mask1D(
mask=mask, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin
)
@classmethod
def unmasked(
cls,
shape_slim,
pixel_scales: Union[float, Tuple[float]],
sub_size: int = 1,
origin: Tuple[float] = (0.0,),
invert: bool = False,
) -> "Mask1D":
"""
Setup a 1D mask where all pixels are unmasked.
Parameters
----------
shape
The (y,x) shape of the mask in units of pixels.
pixel_scales
The scaled units to pixel units conversion factor of each pixel.
"""
return cls.manual(
mask=np.full(shape=shape_slim, fill_value=False),
pixel_scales=pixel_scales,
origin=origin,
sub_size=sub_size,
invert=invert,
)
@classmethod
def from_fits(
cls,
file_path: str,
pixel_scales: Union[float, Tuple[float]],
sub_size: int = 1,
hdu: int = 0,
origin: Tuple[float] = (0.0,),
) -> "Mask1D":
"""
Loads the 1D mask from a .fits file.
Parameters
----------
file_path
The full path of the fits file.
hdu
The HDU number in the fits file containing the image image.
pixel_scales
The scaled units to pixel units conversion factor of each pixel.
"""
return cls.manual(
array_1d_util.numpy_array_1d_from_fits(file_path=file_path, hdu=hdu),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
def output_to_fits(self, file_path: str, overwrite: bool = False):
array_1d_util.numpy_array_1d_to_fits(
array_1d=self.astype("float"), file_path=file_path, overwrite=overwrite
)
@property
def pixels_in_mask(self) -> int:
return int(np.size(self) - np.sum(self))
@property
def is_all_false(self) -> bool:
return self.pixels_in_mask == self.shape_slim[0]
@property
def shape_slim(self) -> Tuple[int]:
return self.shape
@property
def shape_slim_scaled(self) -> Tuple[float]:
return (float(self.pixel_scales[0] * self.shape_slim[0]),)
@property
def scaled_maxima(self) -> Tuple[float]:
return (float(self.shape_slim_scaled[0] / 2.0 + self.origin[0]),)
@property
def scaled_minima(self) -> Tuple[float]:
return (-float(self.shape_slim_scaled[0] / 2.0) + self.origin[0],)
@property
def extent(self):
return np.array([self.scaled_minima[0], self.scaled_maxima[0]])
|
[
"numpy.full",
"numpy.size",
"numpy.sum",
"logging.basicConfig",
"numpy.invert",
"autoarray.structures.arrays.one_d.array_1d_util.numpy_array_1d_from_fits",
"autoarray.mask.abstract_mask.AbstractMask.__new__",
"autoarray.mask.mask_2d.Mask2D.manual",
"numpy.asarray",
"autoarray.structures.grids.one_d.grid_1d_util.grid_1d_slim_via_mask_from",
"autoarray.structures.grids.one_d.grid_1d.Grid1D",
"numpy.array",
"autoarray.exc.MaskException",
"logging.getLogger"
] |
[((333, 354), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (352, 354), False, 'import logging\n'), ((365, 392), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (382, 392), False, 'import logging\n'), ((1685, 1805), 'autoarray.mask.abstract_mask.AbstractMask.__new__', 'abstract_mask.AbstractMask.__new__', ([], {'cls': 'cls', 'mask': 'mask', 'pixel_scales': 'pixel_scales', 'sub_size': 'sub_size', 'origin': 'origin'}), '(cls=cls, mask=mask, pixel_scales=\n pixel_scales, sub_size=sub_size, origin=origin)\n', (1719, 1805), False, 'from autoarray.mask import abstract_mask, mask_2d\n'), ((3177, 3299), 'autoarray.structures.grids.one_d.grid_1d_util.grid_1d_slim_via_mask_from', 'grid_1d_util.grid_1d_slim_via_mask_from', ([], {'mask_1d': 'self', 'pixel_scales': 'self.pixel_scales', 'sub_size': '(1)', 'origin': 'self.origin'}), '(mask_1d=self, pixel_scales=self.\n pixel_scales, sub_size=1, origin=self.origin)\n', (3216, 3299), False, 'from autoarray.structures.grids.one_d import grid_1d_util\n'), ((3337, 3403), 'autoarray.structures.grids.one_d.grid_1d.Grid1D', 'grid_1d.Grid1D', ([], {'grid': 'grid_slim', 'mask': 'self.unmasked_mask.mask_sub_1'}), '(grid=grid_slim, mask=self.unmasked_mask.mask_sub_1)\n', (3351, 3403), False, 'from autoarray.structures.grids.one_d import grid_1d\n'), ((3843, 3971), 'autoarray.mask.mask_2d.Mask2D.manual', 'mask_2d.Mask2D.manual', (['[self]'], {'pixel_scales': '(self.pixel_scale, self.pixel_scale)', 'sub_size': 'self.sub_size', 'origin': '(0.0, 0.0)'}), '([self], pixel_scales=(self.pixel_scale, self.\n pixel_scale), sub_size=self.sub_size, origin=(0.0, 0.0))\n', (3864, 3971), False, 'from autoarray.mask import abstract_mask, mask_2d\n'), ((8249, 8305), 'numpy.array', 'np.array', (['[self.scaled_minima[0], self.scaled_maxima[0]]'], {}), '([self.scaled_minima[0], self.scaled_maxima[0]])\n', (8257, 8305), True, 'import numpy as np\n'), ((5311, 5326), 'numpy.invert', 'np.invert', (['mask'], {}), '(mask)\n', (5320, 5326), True, 'import numpy as np\n'), ((5469, 5535), 'autoarray.exc.MaskException', 'exc.MaskException', (['"""The input mask is not a one dimensional array"""'], {}), "('The input mask is not a one dimensional array')\n", (5486, 5535), False, 'from autoarray import exc\n'), ((7079, 7147), 'autoarray.structures.arrays.one_d.array_1d_util.numpy_array_1d_from_fits', 'array_1d_util.numpy_array_1d_from_fits', ([], {'file_path': 'file_path', 'hdu': 'hdu'}), '(file_path=file_path, hdu=hdu)\n', (7117, 7147), False, 'from autoarray.structures.arrays.one_d import array_1d_util\n'), ((6247, 6290), 'numpy.full', 'np.full', ([], {'shape': 'shape_slim', 'fill_value': '(False)'}), '(shape=shape_slim, fill_value=False)\n', (6254, 6290), True, 'import numpy as np\n'), ((7554, 7567), 'numpy.size', 'np.size', (['self'], {}), '(self)\n', (7561, 7567), True, 'import numpy as np\n'), ((7570, 7582), 'numpy.sum', 'np.sum', (['self'], {}), '(self)\n', (7576, 7582), True, 'import numpy as np\n'), ((5237, 5253), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (5247, 5253), True, 'import numpy as np\n')]
|
import numpy as np
import random
class Tensor (object):
def __init__(self, data, autograd=False, creators=None, creation_op=None, id=None):
self.data = np.array(data)
self.creation_op = creation_op
self.creators = creators
self.grad = None
self.autograd = autograd
self.children = {}
if (id is None):
id = random.randint(0, 100000)
self.id = id
### Keep Track of how many children a tensor has
if (creators is not None):
for c in creators:
if self.id not in c.children:
c.children[self.id] = 1
else:
c.children[self.id] += 1
### Check whether a tensor has received the correct number of gradients from each child
def all_children_grads_accounted_for(self):
for _,cnt in self.children.items():
if(cnt != 0):
return False
return True
### Back Propogation
def backward(self, grad=None, grad_origin=None):
if(self.autograd):
if(grad_origin is not None):
if(self.children[grad_origin.id] == 0):
raise Exception("cannot backprop more than once")
else:
self.children[grad_origin.id] -= 1
if(self.grad is None):
self.grad = grad
else:
self.grad += grad
if(self.creators is not None and (self.all_children_grads_accounted_for() or grad_origin is None)):
### Addition
if(self.creation_op == "add"):
self.creators[0].backward(self.grad, self)
self.creators[1].backward(self.grad, self)
self.grad += self.grad
### Negation
if(self.creation_op == "neg"):
self.creators[0].backward(self.grad.__neg__())
### Subtraction
### Multiplication
### Matrix Multiplication
### Transpose
### Summation
### Expansion
def __add__(self, other):
if(self.autograd and other.autograd):
return Tensor(self.data + other.data, autograd=True, creators=[self,other], creation_op="add")
return Tensor(self.data + other.data, creators = [self, other], creation_op="add")
def __sub__(self, other):
if(self.autograd and other.autograd):
return Tensor(self.data - other.data, autograd=True, creators=[self, other], creation_op="sub")
return Tensor(self.data - other.data)
def __mul__(self, other):
if(self.autograd and other.autograd):
return Tensor(self.data * other.data, autograd=True, creators=[self, other], creation_op="mul")
return Tensor(self.data * other.data)
def sum(self, dim):
if(self.autograd):
return Tensor(self.data.sum(dim), autograd=True, creators=[self], creation_op="sum_"+str(dim))
return Tensor(self.data.sum(dim))
def expand(self, dim, copies):
trans_cmd = list(range(0, len(self.data.shape)))
trans_cmd.insert(dim, len(self.data.shape))
new_shape = list(self.data.shape) + [copies]
new_data = self.data.repeat(copies).reshape(new_shape)
new_data = new_data.transpose(trans_cmd)
if (self.autograd):
return Tensor(new_data, autograd=True, creators=[self], creation_op="expand_"+str(dim))
return Tensor(new_data)
def transpose(self):
if(self.autograd):
return Tensor(self.data.transpose(), autograd=True, creators=[self], creation_op="transpose")
return Tensor(self.data.transpose())
def mm(self, x):
if(self.autograd):
return Tensor(self.data.dot(x.data), autograd=True, creators=[self,x], creation_op="mm")
return Tensor(self.data.dot(x.data))
def __neg__(self):
if(self.autograd):
return Tensor(self.data * -1, autograd=True, creators=[self], creation_op="neg")
return Tensor(self.data * -1)
def __repr__(self):
return str(self.data.__repr__())
def __str__(self):
return str(self.data.__str__())
|
[
"numpy.array",
"random.randint"
] |
[((166, 180), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (174, 180), True, 'import numpy as np\n'), ((380, 405), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (394, 405), False, 'import random\n')]
|
# THIS IS A PROGRAM FOR CLUSTERING.
from clustering import clustering # 自作モジュール (clustering.py) と clusteringクラス の import
import pandas as pd
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import filedialog, image_names, ttk, messagebox
import os
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import numpy as np
# clustering の子クラスとして、クラスタリング結果をプロットするための plot_clustering クラスを作成
class plot_clustering(clustering):
# --- 元データのプロットのための関数 -------------------------------------------------------------
def data_plot(file_name):
fig = plt.Figure() # Figureインスタンスを生成
print (file_name)
csv_input = pd.read_csv(filepath_or_buffer= file_name, encoding= "utf8", sep= ",") # file_name を指定して csv を読み込む
ax = fig.add_subplot(111)
ax.set_aspect('equal') # aspect ratio is equal
ax.set_xlim([-500,1500]) # set x axis value limit
ax.set_ylim([-500,1500]) # set y axis value limit
ax.set_xlabel('x') # name x label "x"
ax.set_ylabel('y') # name y label "y"
ax.scatter(csv_input["X"], csv_input["Y"], c='red')
ax.grid()
ax.plot()
fig.savefig("original_" + file_name + ".png")
original_data_label = tk.Label(text= "original") # 元データラベルの作成
original_data_label.grid(row= 7, column= 0, sticky= 'news', padx= 5, pady= 5)
canvas_original = FigureCanvasTkAgg(fig, display)
canvas_original.get_tk_widget().grid(row= 8, column= 0, sticky= 'news', padx= 5, pady= 5)
# -----------------------------------------------------------------------------------------------------------
# --- クラスタリング済みデータのプロットのための関数 ---------------------------------------------------------------
def plot_cluster_list(file_name, cluster_list, color, method_name):
# Figureインスタンスを生成
fig = plt.Figure()
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.set_xlim([-500,1500])
ax.set_ylim([-500,1500])
for i in range(0, len(cluster_list)): # cluster_list に含まれるクラスターの数だけループする
cluster = cluster_list[i] # cluster_list から1つずつ取り出す
c = color[i % len(color)] # プロットの色
csv_input = pd.DataFrame(np.array(cluster)) # プロット用データの準備
ax.scatter(csv_input[0], csv_input[1], c=c)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.grid()
ax.plot()
fig.savefig(method_name + "_" + file_name + ".png")
return fig
# -----------------------------------------------------------------------------------------------------------
# --- 最短距離法 -----------------------------------------------------------------------------------------------------------
def shortest_method(num_of_cluster, file_name):
csv_input = pd.read_csv(filepath_or_buffer= file_name, encoding= "utf8", sep= ",") # file_name を指定して csv を読み込む
cluster_list = clustering.make_1elem_cluster(csv_input)
while len(cluster_list) > num_of_cluster:
cluster_list = clustering.shortest(cluster_list)
return cluster_list
# ---------------------------------------------------------------------------------------------------------------------------------------------
# --- 最長距離法 ----------------------------------------------------------------------------------------------------------
def longest_method(num_of_cluster, file_name):
csv_input = pd.read_csv(filepath_or_buffer= file_name, encoding= "utf8", sep= ",") # file_name を指定して csv を読み込む
cluster_list = clustering.make_1elem_cluster(csv_input)
while len(cluster_list) > num_of_cluster:
cluster_list = clustering.longest(cluster_list)
return cluster_list
# ---------------------------------------------------------------------------------------------------------------------------------------------
# --- 重心法 -----------------------------------------------------------------------------------------------------------------------
def balance_method(num_of_cluster, file_name):
csv_input = pd.read_csv(filepath_or_buffer= file_name, encoding= "utf8", sep= ",") # file_name を指定して csv を読み込む
cluster_list = clustering.make_1elem_cluster(csv_input)
while len(cluster_list) > num_of_cluster:
cluster_list = clustering.balance(cluster_list)
return cluster_list
# ---------------------------------------------------------------------------------------------------------------------------------------------
# 関数の定義
# --- 利用するデータファイルを選択するための関数 ----------------------------------------------------------------------------------------------------------------------------------------------------
def select_mode(): # この関数は使いません。file_select関数で参照ボタンからファイルを選択できるようになっています。
print("1. m2sd50.csv\n2. m2sd200.csv\n3. m3sd50.csv\n4. m3sd200.csv\n5. m4sd50.csv\n6. m4sd200.csv\n7. m5sd50.csv\n8. m5sd200.csv\n9. m9sd50.csv\n10. m9sd200.csv \n")
mode = input("mode = ")
if (mode == "1"):
csv_input = pd.read_csv(filepath_or_buffer="m2sd50.csv", encoding="utf8", sep=",")
print("データファイル : m2sd50.csv")
elif (mode == "2"):
csv_input = pd.read_csv(filepath_or_buffer="m2sd200.csv", encoding="utf8", sep=",")
print("データファイル : m2sd200.csv")
elif (mode == "3"):
csv_input = pd.read_csv(filepath_or_buffer="m3sd50.csv", encoding="utf8", sep=",")
print("データファイル : m3sd50.csv")
elif (mode == "4"):
csv_input = pd.read_csv(filepath_or_buffer="m3sd200.csv", encoding="utf8", sep=",")
print("データファイル : m3sd200.csv")
elif (mode == "5"):
csv_input = pd.read_csv(filepath_or_buffer="m4sd50.csv", encoding="utf8", sep=",")
print("データファイル : m4sd50.csv")
elif (mode == "6"):
csv_input = pd.read_csv(filepath_or_buffer="m4sd200.csv", encoding="utf8", sep=",")
print("データファイル : m4sd200.csv")
elif (mode == "7"):
csv_input = pd.read_csv(filepath_or_buffer="m5sd50.csv", encoding="utf8", sep=",")
print("データファイル : m5sd50.csv")
elif (mode == "8"):
csv_input = pd.read_csv(filepath_or_buffer="m5sd200.csv", encoding="utf8", sep=",")
print("データファイル : m5sd200.csv")
elif (mode == "9"):
csv_input = pd.read_csv(filepath_or_buffer="m9sd50.csv", encoding="utf8", sep=",")
print("データファイル : m9sd50.csv")
elif (mode == "10"):
csv_input = pd.read_csv(filepath_or_buffer="m9sd200.csv", encoding="utf8", sep=",")
print("データファイル : m9sd200.csv")
return csv_input
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# === Functions for GUI action =============================================================
# ファイル参照ボタンを押したときに実行する関数
def file_select():
dir = '/Users/kosei/yama/4EJ/4年 情報工学実験/10 クラスタリング/program4clustering' # 初期参照フォルダ
extension = [("すべて", "*"), ("CSV","*.csv")] # select file extension
file_path = tk.filedialog.askopenfilename(filetypes= extension, initialdir= dir) # get file path
file_name = os.path.basename(file_path) # get file name
ref_box.delete(0, tk.END) # 入力ボックスの初期化(空白にする)
ref_box.insert(tk.END, file_name) # show file name(入力ボックスにファイル名を入力)
os.chdir(os.path.dirname(os.path.abspath(file_path)))
print(file_path) # print
print(file_name) # print
plot_clustering.data_plot(file_name)
# 出力ボタンが押されたときに画像を表示するための関数
def draw():
file_name = ref_box.get() # get file name by getting ref_box value
scale_value = scale.get() # get scale value
num_of_cluster = scale_value
cluster_list_shortest_method = plot_clustering.shortest_method(num_of_cluster, file_name)
cluster_list_longest_method = plot_clustering.longest_method(num_of_cluster, file_name)
cluster_list_balance_method = plot_clustering.balance_method(num_of_cluster, file_name)
fig_shortest = plot_clustering.plot_cluster_list(file_name, cluster_list_shortest_method, color, "shortest")
fig_longest = plot_clustering.plot_cluster_list(file_name, cluster_list_longest_method, color, "longest")
fig_balance = plot_clustering.plot_cluster_list(file_name, cluster_list_balance_method, color, "balance")
shortest_method_name_label = tk.Label(text= 'shortest') # shortest_method_name ラベルの作成
shortest_method_name_label.grid(row= 7, column= 1, sticky= 'news', padx= 5, pady= 5)
canvas_shortest = FigureCanvasTkAgg(fig_shortest, display)
canvas_shortest.get_tk_widget().grid(row= 8, column= 1, sticky= 'news', padx= 5, pady= 5)
longest_method_name_label = tk.Label(text= 'longest') # longest_method_name ラベルの作成
longest_method_name_label.grid(row= 9, column= 0, sticky= 'news', padx= 5, pady= 5)
canvas_longest = FigureCanvasTkAgg(fig_longest, display)
canvas_longest.get_tk_widget().grid(row= 10, column= 0, sticky= 'news', padx= 5, pady= 5)
balance_method_name_label = tk.Label(text= 'balance') # balance_method_name ラベルの作成
balance_method_name_label.grid(row= 9, column= 1, sticky= 'news', padx= 5, pady= 5)
canvas_balance = FigureCanvasTkAgg(fig_balance, display)
canvas_balance.get_tk_widget().grid(row= 10, column= 1, sticky= 'news', padx= 5, pady= 5)
# ==============================================================================================
# === main ========================================================================================================================
# GUI setting
display = tk.Tk() # create instance (create main window)
display.geometry('1000x1200') # set window size
display.title('10 クラスタリング') # set title
color = ['blue', 'red', 'green', 'orange', 'blueviolet', 'gray', 'magenta'] # グラフに使用する色
# ファイル選択部分
# infoラベルの作成
select_file_label = tk.Label(text= "1. ファイル選択") # create label for selecting file
select_file_label.grid(row= 0, column= 0, sticky= 'nws', padx= 5, pady= 5) # position detail setting
# 参照先のファイル名の入力欄の作成
ref_box = tk.Entry(width= 30)
ref_box.grid(row= 1, column= 0, sticky= 'news', padx= 5, pady= 5)
# 参照ボタンの作成
ref_button = tk.Button(text= "参照", command= file_select)
ref_button.grid(row= 1, column= 1, sticky= 'nws', padx= 5, pady= 5)
# 詳細設定(クラスタ数の指定)部分
# infoラベルの作成
detail_setting_label = tk.Label(text= "2. 詳細設定")
detail_setting_label.grid(row= 2, column= 0, sticky= 'nws', padx= 5, pady= 5)
# scaleの作成
scale_label = tk.Label(text= "▷ クラスタ数")
scale_label.grid(row= 3, column= 0, sticky= 'nws')
scale = tk.Scale(orient= tk.HORIZONTAL, from_= 2, to= 100)
scale.grid(row= 4, column= 0, sticky= 'news')
# グラフ出力部分
# infoラベルの作成
graph_label = tk.Label(text= "3. グラフ")
graph_label.grid(row= 5, column= 0, sticky= 'nws', padx= 5, pady= 5)
# 出力ボタン
output_button = tk.Button(text= "グラフ出力", command= draw)
output_button.grid(row= 6, column= 0, sticky= 'nws', padx= 5, pady= 5)
# even if this program is finished, the window never disappears
display.mainloop()
# ========================================================================================
|
[
"os.path.abspath",
"clustering.clustering.balance",
"os.path.basename",
"tkinter.Button",
"pandas.read_csv",
"tkinter.Entry",
"matplotlib.pyplot.Figure",
"clustering.clustering.shortest",
"tkinter.filedialog.askopenfilename",
"tkinter.Scale",
"numpy.array",
"clustering.clustering.longest",
"clustering.clustering.make_1elem_cluster",
"tkinter.Label",
"tkinter.Tk",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
] |
[((9495, 9502), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (9500, 9502), True, 'import tkinter as tk\n'), ((9768, 9794), 'tkinter.Label', 'tk.Label', ([], {'text': '"""1. ファイル選択"""'}), "(text='1. ファイル選択')\n", (9776, 9794), True, 'import tkinter as tk\n'), ((9962, 9980), 'tkinter.Entry', 'tk.Entry', ([], {'width': '(30)'}), '(width=30)\n', (9970, 9980), True, 'import tkinter as tk\n'), ((10072, 10113), 'tkinter.Button', 'tk.Button', ([], {'text': '"""参照"""', 'command': 'file_select'}), "(text='参照', command=file_select)\n", (10081, 10113), True, 'import tkinter as tk\n'), ((10240, 10264), 'tkinter.Label', 'tk.Label', ([], {'text': '"""2. 詳細設定"""'}), "(text='2. 詳細設定')\n", (10248, 10264), True, 'import tkinter as tk\n'), ((10369, 10393), 'tkinter.Label', 'tk.Label', ([], {'text': '"""▷ クラスタ数"""'}), "(text='▷ クラスタ数')\n", (10377, 10393), True, 'import tkinter as tk\n'), ((10454, 10501), 'tkinter.Scale', 'tk.Scale', ([], {'orient': 'tk.HORIZONTAL', 'from_': '(2)', 'to': '(100)'}), '(orient=tk.HORIZONTAL, from_=2, to=100)\n', (10462, 10501), True, 'import tkinter as tk\n'), ((10589, 10612), 'tkinter.Label', 'tk.Label', ([], {'text': '"""3. グラフ"""'}), "(text='3. グラフ')\n", (10597, 10612), True, 'import tkinter as tk\n'), ((10707, 10744), 'tkinter.Button', 'tk.Button', ([], {'text': '"""グラフ出力"""', 'command': 'draw'}), "(text='グラフ出力', command=draw)\n", (10716, 10744), True, 'import tkinter as tk\n'), ((7001, 7067), 'tkinter.filedialog.askopenfilename', 'tk.filedialog.askopenfilename', ([], {'filetypes': 'extension', 'initialdir': 'dir'}), '(filetypes=extension, initialdir=dir)\n', (7030, 7067), True, 'import tkinter as tk\n'), ((7103, 7130), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (7119, 7130), False, 'import os\n'), ((8269, 8294), 'tkinter.Label', 'tk.Label', ([], {'text': '"""shortest"""'}), "(text='shortest')\n", (8277, 8294), True, 'import tkinter as tk\n'), ((8438, 8478), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig_shortest', 'display'], {}), '(fig_shortest, display)\n', (8455, 8478), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((8606, 8630), 'tkinter.Label', 'tk.Label', ([], {'text': '"""longest"""'}), "(text='longest')\n", (8614, 8630), True, 'import tkinter as tk\n'), ((8771, 8810), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig_longest', 'display'], {}), '(fig_longest, display)\n', (8788, 8810), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((8938, 8962), 'tkinter.Label', 'tk.Label', ([], {'text': '"""balance"""'}), "(text='balance')\n", (8946, 8962), True, 'import tkinter as tk\n'), ((9103, 9142), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig_balance', 'display'], {}), '(fig_balance, display)\n', (9120, 9142), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((644, 656), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (654, 656), True, 'import matplotlib.pyplot as plt\n'), ((724, 791), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'file_name', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer=file_name, encoding='utf8', sep=',')\n", (735, 791), True, 'import pandas as pd\n'), ((1307, 1332), 'tkinter.Label', 'tk.Label', ([], {'text': '"""original"""'}), "(text='original')\n", (1315, 1332), True, 'import tkinter as tk\n'), ((1460, 1491), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig', 'display'], {}), '(fig, display)\n', (1477, 1491), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((1915, 1927), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (1925, 1927), True, 'import matplotlib.pyplot as plt\n'), ((2865, 2932), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'file_name', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer=file_name, encoding='utf8', sep=',')\n", (2876, 2932), True, 'import pandas as pd\n'), ((2988, 3028), 'clustering.clustering.make_1elem_cluster', 'clustering.make_1elem_cluster', (['csv_input'], {}), '(csv_input)\n', (3017, 3028), False, 'from clustering import clustering\n'), ((3513, 3580), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'file_name', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer=file_name, encoding='utf8', sep=',')\n", (3524, 3580), True, 'import pandas as pd\n'), ((3636, 3676), 'clustering.clustering.make_1elem_cluster', 'clustering.make_1elem_cluster', (['csv_input'], {}), '(csv_input)\n', (3665, 3676), False, 'from clustering import clustering\n'), ((4171, 4238), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'file_name', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer=file_name, encoding='utf8', sep=',')\n", (4182, 4238), True, 'import pandas as pd\n'), ((4294, 4334), 'clustering.clustering.make_1elem_cluster', 'clustering.make_1elem_cluster', (['csv_input'], {}), '(csv_input)\n', (4323, 4334), False, 'from clustering import clustering\n'), ((5112, 5182), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m2sd50.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m2sd50.csv', encoding='utf8', sep=',')\n", (5123, 5182), True, 'import pandas as pd\n'), ((3107, 3140), 'clustering.clustering.shortest', 'clustering.shortest', (['cluster_list'], {}), '(cluster_list)\n', (3126, 3140), False, 'from clustering import clustering\n'), ((3755, 3787), 'clustering.clustering.longest', 'clustering.longest', (['cluster_list'], {}), '(cluster_list)\n', (3773, 3787), False, 'from clustering import clustering\n'), ((4413, 4445), 'clustering.clustering.balance', 'clustering.balance', (['cluster_list'], {}), '(cluster_list)\n', (4431, 4445), False, 'from clustering import clustering\n'), ((5251, 5322), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m2sd200.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m2sd200.csv', encoding='utf8', sep=',')\n", (5262, 5322), True, 'import pandas as pd\n'), ((7301, 7327), 'os.path.abspath', 'os.path.abspath', (['file_path'], {}), '(file_path)\n', (7316, 7327), False, 'import os\n'), ((2293, 2310), 'numpy.array', 'np.array', (['cluster'], {}), '(cluster)\n', (2301, 2310), True, 'import numpy as np\n'), ((5392, 5462), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m3sd50.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m3sd50.csv', encoding='utf8', sep=',')\n", (5403, 5462), True, 'import pandas as pd\n'), ((5531, 5602), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m3sd200.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m3sd200.csv', encoding='utf8', sep=',')\n", (5542, 5602), True, 'import pandas as pd\n'), ((5672, 5742), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m4sd50.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m4sd50.csv', encoding='utf8', sep=',')\n", (5683, 5742), True, 'import pandas as pd\n'), ((5811, 5882), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m4sd200.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m4sd200.csv', encoding='utf8', sep=',')\n", (5822, 5882), True, 'import pandas as pd\n'), ((5952, 6022), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m5sd50.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m5sd50.csv', encoding='utf8', sep=',')\n", (5963, 6022), True, 'import pandas as pd\n'), ((6091, 6162), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m5sd200.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m5sd200.csv', encoding='utf8', sep=',')\n", (6102, 6162), True, 'import pandas as pd\n'), ((6232, 6302), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m9sd50.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m9sd50.csv', encoding='utf8', sep=',')\n", (6243, 6302), True, 'import pandas as pd\n'), ((6372, 6443), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""m9sd200.csv"""', 'encoding': '"""utf8"""', 'sep': '""","""'}), "(filepath_or_buffer='m9sd200.csv', encoding='utf8', sep=',')\n", (6383, 6443), True, 'import pandas as pd\n')]
|
"""
.. module:: foreground
:platform: Unix
:synopsis: functions describing behaviour of foreground spectra.
.. moduleauthor: <NAME> <<EMAIL>>
"""
import numpy as np
def BB_scaling(nu, nu_0, T):
"""Blackbody scaling factor from frequency nu, to frequency nu_0.
Parameters
----------
nu : `float`
Frequency to be scaled to.
nu_0 : `float`
Reference frequency to be scaled from.
T : `float`
Temperature of blackbody.
Returns
-------
`float`
Ratio of BB radiance between frequencies `nu` and `nu_0`.
"""
h = 6.63e-34
kb = 1.38e-23
a = np.exp(h * nu_0 * 1.e9 / (kb * T)) - 1.
b = np.exp(h * nu * 1.e9 / (kb * T)) - 1.
return (a / b) ** 2
def synch_cl(nu, ell, A_S, alpha_S, beta_S, nu_S_0, ell_S_0):
"""Model for the synchrotron power spectrum.
Parameters
----------
nu : float
Frequency at which to evaluate the spectrum.
ell : array_like(`int`, ndim=1)
Multipole range over which to evaluate spectrum.
A_S : `float`
Amplitdue of spectrum at reference multipole `ell_S_0`.
alpha_S : `float`
Index of the frequency dependence.
beta_S : `float`
Index of the multipole dependence.
nu_S_0 :`float`
Reference frequency.
ell_S_0 : `int`
Reference multipole.
Returns
-------
array_like(`float`, ndim=1)
The synchrotron spectrum at frequency `nu`.
"""
s = (nu / nu_S_0) ** (2. * alpha_S) * (ell / ell_S_0) ** beta_S
return A_S * s
def dust_cl(nu, ell, p, T, A_D, alpha_D, beta_D, nu_D_0, ell_D_0):
"""Model for the dust power spectrum.
Parameters
----------
nu : `float`
Frequency at which to evaluate the spectrum.
ell : array_like(int, ndim=1)
Multipole range over which to evaluate spectrum.
p : `float`
Polarization fraction of the dust.
T : `float`
Temperature of the dust.
A_D : `float`
Amplitude of dust spectrum at reference multipole `ell_D_0`.
alpha_D : `float`
Index of the frequency dependence of spectrum.
beta_D : `float`
Index of multipole dependence of spectrum.
nu_D_0 : `float`
Reference frequency.
ell_D_0 : `int`
Reference multipole.
Returns
-------
array_like(`float`, ndim=1)
Dust spectrum at frequency `nu`.
"""
s = (nu / nu_D_0) ** (2. * alpha_D) * (ell / ell_D_0) ** beta_D
bb = BB_scaling(nu, nu_D_0, T)
return p ** 2 * A_D * s * bb
def fg_res_sys(nu, nu_S_ref, alpha_S, nu_D_ref, alpha_D, N_chan, n_l):
"""Systematics introduced in CMB channels by foreground removal.
Parameters
----------
nu : `float`
Frequency at which to evaluate.
nu_S_ref : `float`
Reference frequency of synchrotron spectrum.
alpha_S : `float`
Index of frequency dependence of synchrotron spectrum.
nu_D_ref : `float`
Reference frequency of dust spectrum.
alpha_D : `float`
Index of frequency dependence of dust spectrum.
N_chan : `int`
Number of foreground removal.
n_l: list(array_like(float, ndim=1))
List of the instrumental noise in foreground channels.
Returns
-------
array_like(float, ndim=1)
Total noise spectrum at frequency nu due to foreground channel
systematics.
"""
f = (nu / nu_S_ref) ** (2 * alpha_S) + (nu / nu_D_ref) ** (2 * alpha_D)
summation = 1. / sum([1. / n for n in n_l])
a = 4. / (N_chan * (N_chan - 1.))
return a * summation * f
|
[
"numpy.exp"
] |
[((626, 668), 'numpy.exp', 'np.exp', (['(h * nu_0 * 1000000000.0 / (kb * T))'], {}), '(h * nu_0 * 1000000000.0 / (kb * T))\n', (632, 668), True, 'import numpy as np\n'), ((674, 714), 'numpy.exp', 'np.exp', (['(h * nu * 1000000000.0 / (kb * T))'], {}), '(h * nu * 1000000000.0 / (kb * T))\n', (680, 714), True, 'import numpy as np\n')]
|
#################################################################
# Code written by <NAME> (<EMAIL>)
# For bug report, please contact author using the email address
#################################################################
import sys, random, time, argparse
from collections import OrderedDict
import pickle
import numpy as np
import theano
import theano.tensor as T
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
_TEST_RATIO = 0.15
_VALIDATION_RATIO = 0.1
def unzip(zipped):
new_params = OrderedDict()
for key, value in zipped.items():
new_params[key] = value.get_value()
return new_params
def numpy_floatX(data):
return np.asarray(data, dtype=config.floatX)
def get_random_weight(dim1, dim2, left=-0.1, right=0.1):
return np.random.uniform(left, right, (dim1, dim2)).astype(config.floatX)
def load_embedding(options):
m = np.load(options['embFile'])
w = (m['w'] + m['w_tilde']) / 2.0
return w
def init_params(options):
params = OrderedDict()
np.random.seed(0)
inputDimSize = options['inputDimSize']
numAncestors = options['numAncestors']
embDimSize = options['embDimSize']
hiddenDimSize = options['hiddenDimSize'] #hidden layer does not need an extra space
attentionDimSize = options['attentionDimSize']
numClass = options['numClass']
params['W_emb'] = get_random_weight(inputDimSize+numAncestors, embDimSize)
if len(options['embFile']) > 0:
params['W_emb'] = load_embedding(options)
options['embDimSize'] = params['W_emb'].shape[1]
embDimSize = options['embDimSize']
params['W_attention'] = get_random_weight(embDimSize*2, attentionDimSize)
params['b_attention'] = np.zeros(attentionDimSize).astype(config.floatX)
params['v_attention'] = np.random.uniform(-0.1, 0.1, attentionDimSize).astype(config.floatX)
params['W_gru'] = get_random_weight(embDimSize, 3*hiddenDimSize)
params['U_gru'] = get_random_weight(hiddenDimSize, 3*hiddenDimSize)
params['b_gru'] = np.zeros(3 * hiddenDimSize).astype(config.floatX)
params['W_output'] = get_random_weight(hiddenDimSize, numClass)
params['b_output'] = np.zeros(numClass).astype(config.floatX)
return params
def init_tparams(params):
tparams = OrderedDict()
for key, value in params.items():
tparams[key] = theano.shared(value, name=key)
return tparams
def dropout_layer(state_before, use_noise, trng, prob):
proj = T.switch(use_noise, (state_before * trng.binomial(state_before.shape, p=prob, n=1, dtype=state_before.dtype)), state_before * 0.5)
return proj
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def gru_layer(tparams, emb, options):
hiddenDimSize = options['hiddenDimSize']
timesteps = emb.shape[0]
if emb.ndim == 3: n_samples = emb.shape[1]
else: n_samples = 1
def stepFn(wx, h, U_gru):
uh = T.dot(h, U_gru)
r = T.nnet.sigmoid(_slice(wx, 0, hiddenDimSize) + _slice(uh, 0, hiddenDimSize))
z = T.nnet.sigmoid(_slice(wx, 1, hiddenDimSize) + _slice(uh, 1, hiddenDimSize))
h_tilde = T.tanh(_slice(wx, 2, hiddenDimSize) + r * _slice(uh, 2, hiddenDimSize))
h_new = z * h + ((1. - z) * h_tilde)
return h_new
Wx = T.dot(emb, tparams['W_gru']) + tparams['b_gru']
results, updates = theano.scan(fn=stepFn, sequences=[Wx], outputs_info=T.alloc(numpy_floatX(0.0), n_samples, hiddenDimSize), non_sequences=[tparams['U_gru']], name='gru_layer', n_steps=timesteps)
return results
# 计算注意力权重
def generate_attention(tparams, leaves, ancestors):
# f(ei,ej)
attentionInput = T.concatenate([tparams['W_emb'][leaves], tparams['W_emb'][ancestors]], axis=2)
mlpOutput = T.tanh(T.dot(attentionInput, tparams['W_attention']) + tparams['b_attention'])
preAttention = T.dot(mlpOutput, tparams['v_attention'])
# softmax f(ei,ej)
attention = T.nnet.softmax(preAttention)
return attention
def softmax_layer(tparams, emb):
nom = T.exp(T.dot(emb, tparams['W_output']) + tparams['b_output'])
denom = nom.sum(axis=2, keepdims=True)
output = nom / denom
return output
def build_model(tparams, leavesList, ancestorsList, options):
dropoutRate = options['dropoutRate']
trng = RandomStreams(123)
use_noise = theano.shared(numpy_floatX(0.))
x = T.tensor3('x', dtype=config.floatX)
y = T.tensor3('y', dtype=config.floatX)
mask = T.matrix('mask', dtype=config.floatX)
lengths = T.vector('lengths', dtype=config.floatX)
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# 计算最终表达形式 gi = sum(a_{ij} * ej)
embList = []
for leaves, ancestors in zip(leavesList, ancestorsList):
tempAttention = generate_attention(tparams, leaves, ancestors)
tempEmb = (tparams['W_emb'][ancestors] * tempAttention[:,:,None]).sum(axis=1)
embList.append(tempEmb)
emb = T.concatenate(embList, axis=0)
# 预测模型
x_emb = T.tanh(T.dot(x, emb))
hidden = gru_layer(tparams, x_emb, options)
hidden = dropout_layer(hidden, use_noise, trng, dropoutRate)
y_hat = softmax_layer(tparams, hidden) * mask[:,:,None]
# 计算损失
logEps = 1e-8
cross_entropy = -(y * T.log(y_hat + logEps) + (1. - y) * T.log(1. - y_hat + logEps))
output_loglikelihood = cross_entropy.sum(axis=2).sum(axis=0) / lengths
cost_noreg = T.mean(output_loglikelihood)
if options['L2'] > 0.:
cost = cost_noreg + options['L2'] * ((tparams['W_output']**2).sum() + (tparams['W_attention']**2).sum() + (tparams['v_attention']**2).sum())
return use_noise, x, y, mask, lengths, cost, cost_noreg, y_hat
def load_data(seqFile, labelFile, timeFile=''):
sequences = np.array(pickle.load(open(seqFile, 'rb')))
labels = np.array(pickle.load(open(labelFile, 'rb')))
if len(timeFile) > 0:
times = np.array(pickle.load(open(timeFile, 'rb')))
np.random.seed(0)
dataSize = len(labels)
ind = np.random.permutation(dataSize)
nTest = int(_TEST_RATIO * dataSize)
nValid = int(_VALIDATION_RATIO * dataSize)
test_indices = ind[:nTest]
valid_indices = ind[nTest:nTest+nValid]
train_indices = ind[nTest+nValid:]
train_set_x = sequences[train_indices]
train_set_y = labels[train_indices]
test_set_x = sequences[test_indices]
test_set_y = labels[test_indices]
valid_set_x = sequences[valid_indices]
valid_set_y = labels[valid_indices]
train_set_t = None
test_set_t = None
valid_set_t = None
if len(timeFile) > 0:
train_set_t = times[train_indices]
test_set_t = times[test_indices]
valid_set_t = times[valid_indices]
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
train_sorted_index = len_argsort(train_set_x)
train_set_x = [train_set_x[i] for i in train_sorted_index]
train_set_y = [train_set_y[i] for i in train_sorted_index]
valid_sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in valid_sorted_index]
valid_set_y = [valid_set_y[i] for i in valid_sorted_index]
test_sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in test_sorted_index]
test_set_y = [test_set_y[i] for i in test_sorted_index]
if len(timeFile) > 0:
train_set_t = [train_set_t[i] for i in train_sorted_index]
valid_set_t = [valid_set_t[i] for i in valid_sorted_index]
test_set_t = [test_set_t[i] for i in test_sorted_index]
train_set = (train_set_x, train_set_y, train_set_t)
valid_set = (valid_set_x, valid_set_y, valid_set_t)
test_set = (test_set_x, test_set_y, test_set_t)
return train_set, valid_set, test_set
def adadelta(tparams, grads, x, y, mask, lengths, cost):
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_grad' % k) for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rup2' % k) for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rgrad2' % k) for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, y, mask, lengths], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([], [], updates=ru2up + param_up, on_unused_input='ignore', name='adadelta_f_update')
return f_grad_shared, f_update
# V_{n-1}和label_{n}对应
def padMatrix(seqs, labels, options):
lengths = np.array([len(seq) for seq in seqs]) - 1
n_samples = len(seqs)
maxlen = np.max(lengths)
x = np.zeros((maxlen, n_samples, options['inputDimSize'])).astype(config.floatX)
y = np.zeros((maxlen, n_samples, options['numClass'])).astype(config.floatX)
mask = np.zeros((maxlen, n_samples)).astype(config.floatX)
for idx, (seq, lseq) in enumerate(zip(seqs,labels)):
for xvec, subseq in zip(x[:,idx,:], seq[:-1]): #将列表中对应元素打包为元组,返回由这些元组组成的列表,长度与最短的列表一致
xvec[subseq] = 1.
for yvec, subseq in zip(y[:,idx,:], lseq[1:]):
yvec[subseq] = 1.
mask[:lengths[idx], idx] = 1.
lengths = np.array(lengths, dtype=config.floatX)
return x, y, mask, lengths
def calculate_cost(test_model, dataset, options):
batchSize = options['batchSize']
n_batches = int(np.ceil(float(len(dataset[0])) / float(batchSize)))
costSum = 0.0
dataCount = 0
for index in range(n_batches):
batchX = dataset[0][index*batchSize:(index+1)*batchSize]
batchY = dataset[1][index*batchSize:(index+1)*batchSize]
x, y, mask, lengths = padMatrix(batchX, batchY, options)
cost = test_model(x, y, mask, lengths)
costSum += cost * len(batchX)
dataCount += len(batchX)
return costSum / dataCount
def calculate_pred(test_model, dataset, options):
x = dataset[0]
y = dataset[1]
batchSize = options['batchSize']
x, y, mask, lengths = padMatrix(x, y, options)
prediction = test_model(x, mask)
return prediction
def print2file(buf, outFile):
outfd = open(outFile, 'a')
outfd.write(buf + '\n')
outfd.close()
# 匹配叶子节点和其路径上的所有节点
def build_tree(treeFile):
treeMap = pickle.load(open(treeFile, 'rb'))
ancestors = np.array(list(treeMap.values())).astype('int32')
ancSize = ancestors.shape[1]
leaves = []
for k in treeMap.keys():
leaves.append([k] * ancSize)
leaves = np.array(leaves).astype('int32')
return leaves, ancestors
"""
FiveMap example:
{0: [0, 1670, 942, 943, 945, 951], 1: [1, 1670, 942, 957, 960, 963]}
ancestors = array([[0, 1670, 942, 943, 945, 951], [1, 1670, 942, 957, 960, 963]], dtype=int32)
ancSize = 6
leaves = array([[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]], dtype=int32)
theano.shared(): https://www.tutorialspoint.com/theano/theano_shared_variables.htm
"""
def train_GRAM(
seqFile = 'seqFile.txt',
labelFile = 'labelFile.txt',
treeFile='tree.txt',
embFile='embFile.txt',
outFile='out.txt',
inputDimSize= 100,
numAncestors=100,
embDimSize= 100,
hiddenDimSize=200,
attentionDimSize=200,
max_epochs=100,
L2=0.,
numClass=26679,
batchSize=100,
dropoutRate=0.5,
logEps=1e-8,
verbose=False
):
options = locals().copy()
leavesList = []
ancestorsList = []
for i in range(5, 0, -1): # 使用ccs最多五个祖先(包含root)
leaves, ancestors = build_tree(treeFile+'.level'+str(i)+'.pk')
sharedLeaves = theano.shared(leaves, name='leaves'+str(i))
sharedAncestors = theano.shared(ancestors, name='ancestors'+str(i))
leavesList.append(sharedLeaves)
ancestorsList.append(sharedAncestors)
print('Building the model'),
params = init_params(options)
tparams = init_tparams(params)
use_noise, x, y, mask, lengths, cost, cost_noreg, y_hat = build_model(tparams, leavesList, ancestorsList, options)
get_cost = theano.function(inputs=[x, y, mask, lengths], outputs=cost_noreg, name='get_cost')
get_prediction = theano.function(inputs=[x, mask], outputs=y_hat, name='get_prediction')
print('Constructing the optimizer'),
grads = T.grad(cost, wrt=list(tparams.values()))
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost)
print('Loading data'),
trainSet, validSet, testSet = load_data(seqFile, labelFile)
n_batches = int(np.ceil(float(len(trainSet[0])) / float(batchSize)))
pickle.dump(trainSet, open('trainSet', 'wb'), -1)
pickle.dump(testSet, open('testSet', 'wb'), -1)
print('Optimization')
bestTrainCost = 0.0
bestValidCost = 100000.0
bestTestCost = 0.0
epochDuration = 0.0
bestEpoch = 0
logFile = outFile + '.log'
for epoch in range(max_epochs):
iteration = 0
costVec = []
startTime = time.time()
for index in random.sample(range(n_batches), n_batches):
use_noise.set_value(1.)
batchX = trainSet[0][index*batchSize:(index+1)*batchSize]
batchY = trainSet[1][index*batchSize:(index+1)*batchSize]
x, y, mask, lengths = padMatrix(batchX, batchY, options)
costValue = f_grad_shared(x, y, mask, lengths)
f_update()
costVec.append(costValue)
if iteration % 100 == 0 and verbose:
buf = 'Epoch:%d, Iteration:%d/%d, Train_Cost:%f' % (epoch, iteration, n_batches, costValue)
print(buf)
iteration += 1
duration = time.time() - startTime
use_noise.set_value(0.)
trainCost = np.mean(costVec)
validCost = calculate_cost(get_cost, validSet, options)
testCost = calculate_cost(get_cost, testSet, options)
buf = 'Epoch:%d, Duration:%f, Train_Cost:%f, Valid_Cost:%f, Test_Cost:%f' % (epoch, duration, trainCost, validCost, testCost)
print(buf)
print2file(buf, logFile)
epochDuration += duration
if validCost < bestValidCost:
bestValidCost = validCost
bestTestCost = testCost
bestTrainCost = trainCost
bestEpoch = epoch
bestParams = tparams
tempParams = unzip(tparams)
np.savez_compressed(outFile + '.' + str(epoch), **tempParams)
buf = 'Best Epoch:%d, Avg_Duration:%f, Train_Cost:%f, Valid_Cost:%f, Test_Cost:%f' % (bestEpoch, epochDuration/max_epochs, bestTrainCost, bestValidCost, bestTestCost)
print(buf)
print2file(buf, logFile)
print('Making predictions')
tparams = bestParams
prediction = calculate_pred(get_prediction, testSet, options)
print(prediction)
pickle.dump(prediction, open('testPred', 'wb'), -1)
def parse_arguments(parser):
parser.add_argument('seq_file', type=str, metavar='<visit_file>', help='The path to the Pickled file containing visit information of patients')
parser.add_argument('label_file', type=str, metavar='<label_file>', help='The path to the Pickled file containing label information of patients')
parser.add_argument('tree_file', type=str, metavar='<tree_file>', help='The path to the Pickled files containing the ancestor information of the input medical codes. Only use the prefix and exclude ".level#.pk".')
parser.add_argument('out_file', metavar='<out_file>', help='The path to the output models. The models will be saved after every epoch')
parser.add_argument('--embed_file', type=str, default='', help='The path to the Pickled file containing the representation vectors of medical codes. If you are not using medical code representations, do not use this option')
parser.add_argument('--embed_size', type=int, default=128, help='The dimension size of the visit embedding. If you are providing your own medical code vectors, this value will be automatically decided. (default value: 128)')
parser.add_argument('--rnn_size', type=int, default=128, help='The dimension size of the hidden layer of the GRU (default value: 128)')
parser.add_argument('--attention_size', type=int, default=128, help='The dimension size of hidden layer of the MLP that generates the attention weights (default value: 128)')
parser.add_argument('--batch_size', type=int, default=100, help='The size of a single mini-batch (default value: 100)')
parser.add_argument('--n_epochs', type=int, default=100, help='The number of training epochs (default value: 100)')
parser.add_argument('--L2', type=float, default=0.001, help='L2 regularization coefficient for all weights except RNN (default value: 0.001)')
parser.add_argument('--dropout_rate', type=float, default=0.5, help='Dropout rate used for the hidden layer of RNN (default value: 0.5)')
parser.add_argument('--log_eps', type=float, default=1e-8, help='A small value to prevent log(0) (default value: 1e-8)')
parser.add_argument('--verbose', action='store_true', help='Print output after every 100 mini-batches (default false)')
args = parser.parse_args()
return args
def calculate_dimSize(seqFile):
seqs = pickle.load(open(seqFile, 'rb'))
codeSet = set()
for patient in seqs:
for visit in patient:
for code in visit:
codeSet.add(code)
return max(codeSet) + 1
def get_rootCode(treeFile):
tree = pickle.load(open(treeFile, 'rb'))
rootCode = list(tree.values())[0][1]
return rootCode
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
inputDimSize = calculate_dimSize(args.seq_file)
numClass = calculate_dimSize(args.label_file)
numAncestors = get_rootCode(args.tree_file+'.level2.pk') - inputDimSize + 1
train_GRAM(
seqFile=args.seq_file,
inputDimSize=inputDimSize,
treeFile=args.tree_file,
numAncestors=numAncestors,
labelFile=args.label_file,
numClass=numClass,
outFile=args.out_file,
embFile=args.embed_file,
embDimSize=args.embed_size,
hiddenDimSize=args.rnn_size,
attentionDimSize=args.attention_size,
batchSize=args.batch_size,
max_epochs=args.n_epochs,
L2=args.L2,
dropoutRate=args.dropout_rate,
logEps=args.log_eps,
verbose=args.verbose
)
|
[
"numpy.load",
"theano.tensor.tensor3",
"numpy.random.seed",
"argparse.ArgumentParser",
"theano.sandbox.rng_mrg.MRG_RandomStreams",
"numpy.mean",
"theano.tensor.sqrt",
"theano.tensor.log",
"theano.tensor.concatenate",
"theano.tensor.mean",
"theano.tensor.nnet.softmax",
"numpy.max",
"theano.shared",
"theano.tensor.dot",
"numpy.asarray",
"numpy.random.permutation",
"theano.tensor.matrix",
"numpy.random.uniform",
"theano.function",
"numpy.zeros",
"time.time",
"numpy.array",
"theano.tensor.vector",
"collections.OrderedDict"
] |
[((554, 567), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (565, 567), False, 'from collections import OrderedDict\n'), ((708, 745), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'config.floatX'}), '(data, dtype=config.floatX)\n', (718, 745), True, 'import numpy as np\n'), ((920, 947), 'numpy.load', 'np.load', (["options['embFile']"], {}), "(options['embFile'])\n", (927, 947), True, 'import numpy as np\n'), ((1039, 1052), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1050, 1052), False, 'from collections import OrderedDict\n'), ((1058, 1075), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1072, 1075), True, 'import numpy as np\n'), ((2303, 2316), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2314, 2316), False, 'from collections import OrderedDict\n'), ((3717, 3795), 'theano.tensor.concatenate', 'T.concatenate', (["[tparams['W_emb'][leaves], tparams['W_emb'][ancestors]]"], {'axis': '(2)'}), "([tparams['W_emb'][leaves], tparams['W_emb'][ancestors]], axis=2)\n", (3730, 3795), True, 'import theano.tensor as T\n'), ((3911, 3951), 'theano.tensor.dot', 'T.dot', (['mlpOutput', "tparams['v_attention']"], {}), "(mlpOutput, tparams['v_attention'])\n", (3916, 3951), True, 'import theano.tensor as T\n'), ((3991, 4019), 'theano.tensor.nnet.softmax', 'T.nnet.softmax', (['preAttention'], {}), '(preAttention)\n', (4005, 4019), True, 'import theano.tensor as T\n'), ((4355, 4373), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', (['(123)'], {}), '(123)\n', (4368, 4373), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((4431, 4466), 'theano.tensor.tensor3', 'T.tensor3', (['"""x"""'], {'dtype': 'config.floatX'}), "('x', dtype=config.floatX)\n", (4440, 4466), True, 'import theano.tensor as T\n'), ((4475, 4510), 'theano.tensor.tensor3', 'T.tensor3', (['"""y"""'], {'dtype': 'config.floatX'}), "('y', dtype=config.floatX)\n", (4484, 4510), True, 'import theano.tensor as T\n'), ((4522, 4559), 'theano.tensor.matrix', 'T.matrix', (['"""mask"""'], {'dtype': 'config.floatX'}), "('mask', dtype=config.floatX)\n", (4530, 4559), True, 'import theano.tensor as T\n'), ((4574, 4614), 'theano.tensor.vector', 'T.vector', (['"""lengths"""'], {'dtype': 'config.floatX'}), "('lengths', dtype=config.floatX)\n", (4582, 4614), True, 'import theano.tensor as T\n'), ((4988, 5018), 'theano.tensor.concatenate', 'T.concatenate', (['embList'], {'axis': '(0)'}), '(embList, axis=0)\n', (5001, 5018), True, 'import theano.tensor as T\n'), ((5449, 5477), 'theano.tensor.mean', 'T.mean', (['output_loglikelihood'], {}), '(output_loglikelihood)\n', (5455, 5477), True, 'import theano.tensor as T\n'), ((5980, 5997), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5994, 5997), True, 'import numpy as np\n'), ((6035, 6066), 'numpy.random.permutation', 'np.random.permutation', (['dataSize'], {}), '(dataSize)\n', (6056, 6066), True, 'import numpy as np\n'), ((8370, 8472), 'theano.function', 'theano.function', (['[x, y, mask, lengths]', 'cost'], {'updates': '(zgup + rg2up)', 'name': '"""adadelta_f_grad_shared"""'}), "([x, y, mask, lengths], cost, updates=zgup + rg2up, name=\n 'adadelta_f_grad_shared')\n", (8385, 8472), False, 'import theano\n'), ((8773, 8878), 'theano.function', 'theano.function', (['[]', '[]'], {'updates': '(ru2up + param_up)', 'on_unused_input': '"""ignore"""', 'name': '"""adadelta_f_update"""'}), "([], [], updates=ru2up + param_up, on_unused_input='ignore',\n name='adadelta_f_update')\n", (8788, 8878), False, 'import theano\n'), ((9066, 9081), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (9072, 9081), True, 'import numpy as np\n'), ((9636, 9674), 'numpy.array', 'np.array', (['lengths'], {'dtype': 'config.floatX'}), '(lengths, dtype=config.floatX)\n', (9644, 9674), True, 'import numpy as np\n'), ((12442, 12529), 'theano.function', 'theano.function', ([], {'inputs': '[x, y, mask, lengths]', 'outputs': 'cost_noreg', 'name': '"""get_cost"""'}), "(inputs=[x, y, mask, lengths], outputs=cost_noreg, name=\n 'get_cost')\n", (12457, 12529), False, 'import theano\n'), ((12546, 12617), 'theano.function', 'theano.function', ([], {'inputs': '[x, mask]', 'outputs': 'y_hat', 'name': '"""get_prediction"""'}), "(inputs=[x, mask], outputs=y_hat, name='get_prediction')\n", (12561, 12617), False, 'import theano\n'), ((17930, 17955), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17953, 17955), False, 'import sys, random, time, argparse\n'), ((2378, 2408), 'theano.shared', 'theano.shared', (['value'], {'name': 'key'}), '(value, name=key)\n', (2391, 2408), False, 'import theano\n'), ((2992, 3007), 'theano.tensor.dot', 'T.dot', (['h', 'U_gru'], {}), '(h, U_gru)\n', (2997, 3007), True, 'import theano.tensor as T\n'), ((3350, 3378), 'theano.tensor.dot', 'T.dot', (['emb', "tparams['W_gru']"], {}), "(emb, tparams['W_gru'])\n", (3355, 3378), True, 'import theano.tensor as T\n'), ((5050, 5063), 'theano.tensor.dot', 'T.dot', (['x', 'emb'], {}), '(x, emb)\n', (5055, 5063), True, 'import theano.tensor as T\n'), ((13349, 13360), 'time.time', 'time.time', ([], {}), '()\n', (13358, 13360), False, 'import sys, random, time, argparse\n'), ((14110, 14126), 'numpy.mean', 'np.mean', (['costVec'], {}), '(costVec)\n', (14117, 14126), True, 'import numpy as np\n'), ((815, 859), 'numpy.random.uniform', 'np.random.uniform', (['left', 'right', '(dim1, dim2)'], {}), '(left, right, (dim1, dim2))\n', (832, 859), True, 'import numpy as np\n'), ((1748, 1774), 'numpy.zeros', 'np.zeros', (['attentionDimSize'], {}), '(attentionDimSize)\n', (1756, 1774), True, 'import numpy as np\n'), ((1825, 1871), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', 'attentionDimSize'], {}), '(-0.1, 0.1, attentionDimSize)\n', (1842, 1871), True, 'import numpy as np\n'), ((2058, 2085), 'numpy.zeros', 'np.zeros', (['(3 * hiddenDimSize)'], {}), '(3 * hiddenDimSize)\n', (2066, 2085), True, 'import numpy as np\n'), ((2202, 2220), 'numpy.zeros', 'np.zeros', (['numClass'], {}), '(numClass)\n', (2210, 2220), True, 'import numpy as np\n'), ((3819, 3864), 'theano.tensor.dot', 'T.dot', (['attentionInput', "tparams['W_attention']"], {}), "(attentionInput, tparams['W_attention'])\n", (3824, 3864), True, 'import theano.tensor as T\n'), ((4095, 4126), 'theano.tensor.dot', 'T.dot', (['emb', "tparams['W_output']"], {}), "(emb, tparams['W_output'])\n", (4100, 4126), True, 'import theano.tensor as T\n'), ((9091, 9145), 'numpy.zeros', 'np.zeros', (["(maxlen, n_samples, options['inputDimSize'])"], {}), "((maxlen, n_samples, options['inputDimSize']))\n", (9099, 9145), True, 'import numpy as np\n'), ((9177, 9227), 'numpy.zeros', 'np.zeros', (["(maxlen, n_samples, options['numClass'])"], {}), "((maxlen, n_samples, options['numClass']))\n", (9185, 9227), True, 'import numpy as np\n'), ((9262, 9291), 'numpy.zeros', 'np.zeros', (['(maxlen, n_samples)'], {}), '((maxlen, n_samples))\n', (9270, 9291), True, 'import numpy as np\n'), ((10914, 10930), 'numpy.array', 'np.array', (['leaves'], {}), '(leaves)\n', (10922, 10930), True, 'import numpy as np\n'), ((14034, 14045), 'time.time', 'time.time', ([], {}), '()\n', (14043, 14045), False, 'import sys, random, time, argparse\n'), ((5294, 5315), 'theano.tensor.log', 'T.log', (['(y_hat + logEps)'], {}), '(y_hat + logEps)\n', (5299, 5315), True, 'import theano.tensor as T\n'), ((5329, 5356), 'theano.tensor.log', 'T.log', (['(1.0 - y_hat + logEps)'], {}), '(1.0 - y_hat + logEps)\n', (5334, 5356), True, 'import theano.tensor as T\n'), ((8504, 8523), 'theano.tensor.sqrt', 'T.sqrt', (['(rg2 + 1e-06)'], {}), '(rg2 + 1e-06)\n', (8510, 8523), True, 'import theano.tensor as T\n'), ((8483, 8502), 'theano.tensor.sqrt', 'T.sqrt', (['(ru2 + 1e-06)'], {}), '(ru2 + 1e-06)\n', (8489, 8502), True, 'import theano.tensor as T\n')]
|
# Wrapper class around hnswlib
import hnswlib
import numpy as np
import threading
import pickle
from .typings import VectorList
class Hnswlib():
def __init__(self, space, dim):
self.index = hnswlib.Index(space, dim)
self.lock = threading.Lock()
self.dict_labels = {}
self.cur_ind = 0
def init_index(self, max_elements: int, ef_construction = 200, M = 16):
self.index.init_index(max_elements = max_elements, ef_construction = ef_construction, M = M)
def get_max_elements(self):
return self.index.get_max_elements()
def get_current_count(self):
return self.index.get_current_count()
def add_items(self, data: VectorList, ids=None):
if ids is not None:
assert len(data) == len(ids)
num_added = len(data)
with self.lock:
start = self.cur_ind
self.cur_ind += num_added
int_labels = []
if ids is not None:
for dl in ids:
int_labels.append(start)
self.dict_labels[start] = dl
start += 1
else:
for _ in range(len(data)):
int_labels.append(start)
self.dict_labels[start] = start
start += 1
self.index.add_items(data=data, ids=np.asarray(int_labels))
def set_ef(self, ef: int):
self.index.set_ef(ef)
def load_index(self, path: str, max_elements=0):
self.index.load_index(path, max_elements=max_elements)
with open(path + ".pkl", "rb") as f:
self.cur_ind, self.dict_labels = pickle.load(f)
def save_index(self, path: str):
self.index.save_index(path)
with open(path + ".pkl", "wb") as f:
pickle.dump((self.cur_ind, self.dict_labels), f)
def set_num_threads(self, num_threads:int):
self.index.set_num_threads(num_threads)
def knn_query(self, data: VectorList, k=1):
labels_int, distances = self.index.knn_query(data=data, k=k)
labels = []
for li in labels_int:
line = []
for l in li:
line.append(self.dict_labels[l])
labels.append(line)
return labels, distances
|
[
"pickle.dump",
"numpy.asarray",
"threading.Lock",
"pickle.load",
"hnswlib.Index"
] |
[((203, 228), 'hnswlib.Index', 'hnswlib.Index', (['space', 'dim'], {}), '(space, dim)\n', (216, 228), False, 'import hnswlib\n'), ((249, 265), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (263, 265), False, 'import threading\n'), ((1604, 1618), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1615, 1618), False, 'import pickle\n'), ((1750, 1798), 'pickle.dump', 'pickle.dump', (['(self.cur_ind, self.dict_labels)', 'f'], {}), '((self.cur_ind, self.dict_labels), f)\n', (1761, 1798), False, 'import pickle\n'), ((1311, 1333), 'numpy.asarray', 'np.asarray', (['int_labels'], {}), '(int_labels)\n', (1321, 1333), True, 'import numpy as np\n')]
|
import argparse
import os
import yaml
import sys
import numpy as np
import time
import json
from .eval_np import PanopticEval
from .config import global_cfg
need_nuscenes_remap = False
if global_cfg.DATA_CONFIG.DATASET_NAME == 'SemanticKitti':
DATA = yaml.safe_load(open('semantic-kitti.yaml', 'r'))
# get number of interest classes, and the label mappings
class_remap = DATA["learning_map"]
class_inv_remap = DATA["learning_map_inv"]
class_ignore = DATA["learning_ignore"]
nr_classes = len(class_inv_remap)
class_strings = DATA["labels"]
# make lookup table for mapping
maxkey = max(class_remap.keys())
# +100 hack making lut bigger just in case there are unknown labels
class_lut = np.zeros((maxkey + 100), dtype=np.int32)
class_lut[list(class_remap.keys())] = list(class_remap.values())
ignore_class = [cl for cl, ignored in class_ignore.items() if ignored]
class_inv_lut = np.zeros((20), dtype=np.int32)
class_inv_lut[list(class_inv_remap.keys())] = list(class_inv_remap.values())
things = ['car', 'truck', 'bicycle', 'motorcycle', 'other-vehicle', 'person', 'bicyclist', 'motorcyclist']
stuff = [
'road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole',
'traffic-sign'
]
all_classes = things + stuff
valid_xentropy_ids = [1, 4, 2, 3, 5, 6, 7, 8]
else:
raise NotImplementedError
def init_eval(min_points = 50):
print("New evaluator with min_points of {}".format(min_points))
class_evaluator = PanopticEval(nr_classes, None, ignore_class, min_points = min_points)
return class_evaluator
def eval_one_scan(class_evaluator, gt_sem, gt_ins, pred_sem, pred_ins):
class_evaluator.addBatch(pred_sem, pred_ins, gt_sem, gt_ins)
def eval_one_scan_w_fname(class_evaluator, gt_sem, gt_ins, pred_sem, pred_ins, fname):
class_evaluator.addBatch_w_fname(pred_sem, pred_ins, gt_sem, gt_ins, fname)
def printResults(class_evaluator, logger=None, sem_only=False):
class_PQ, class_SQ, class_RQ, class_all_PQ, class_all_SQ, class_all_RQ = class_evaluator.getPQ()
class_IoU, class_all_IoU = class_evaluator.getSemIoU()
# now make a nice dictionary
output_dict = {}
# make python variables
class_PQ = class_PQ.item()
class_SQ = class_SQ.item()
class_RQ = class_RQ.item()
class_all_PQ = class_all_PQ.flatten().tolist()
class_all_SQ = class_all_SQ.flatten().tolist()
class_all_RQ = class_all_RQ.flatten().tolist()
class_IoU = class_IoU.item()
class_all_IoU = class_all_IoU.flatten().tolist()
output_dict["all"] = {}
output_dict["all"]["PQ"] = class_PQ
output_dict["all"]["SQ"] = class_SQ
output_dict["all"]["RQ"] = class_RQ
output_dict["all"]["IoU"] = class_IoU
classwise_tables = {}
for idx, (pq, rq, sq, iou) in enumerate(zip(class_all_PQ, class_all_RQ, class_all_SQ, class_all_IoU)):
class_str = class_strings[class_inv_remap[idx]]
output_dict[class_str] = {}
output_dict[class_str]["PQ"] = pq
output_dict[class_str]["SQ"] = sq
output_dict[class_str]["RQ"] = rq
output_dict[class_str]["IoU"] = iou
PQ_all = np.mean([float(output_dict[c]["PQ"]) for c in all_classes])
PQ_dagger = np.mean([float(output_dict[c]["PQ"]) for c in things] + [float(output_dict[c]["IoU"]) for c in stuff])
RQ_all = np.mean([float(output_dict[c]["RQ"]) for c in all_classes])
SQ_all = np.mean([float(output_dict[c]["SQ"]) for c in all_classes])
PQ_things = np.mean([float(output_dict[c]["PQ"]) for c in things])
RQ_things = np.mean([float(output_dict[c]["RQ"]) for c in things])
SQ_things = np.mean([float(output_dict[c]["SQ"]) for c in things])
PQ_stuff = np.mean([float(output_dict[c]["PQ"]) for c in stuff])
RQ_stuff = np.mean([float(output_dict[c]["RQ"]) for c in stuff])
SQ_stuff = np.mean([float(output_dict[c]["SQ"]) for c in stuff])
mIoU = output_dict["all"]["IoU"]
codalab_output = {}
codalab_output["pq_mean"] = float(PQ_all)
codalab_output["pq_dagger"] = float(PQ_dagger)
codalab_output["sq_mean"] = float(SQ_all)
codalab_output["rq_mean"] = float(RQ_all)
codalab_output["iou_mean"] = float(mIoU)
codalab_output["pq_stuff"] = float(PQ_stuff)
codalab_output["rq_stuff"] = float(RQ_stuff)
codalab_output["sq_stuff"] = float(SQ_stuff)
codalab_output["pq_things"] = float(PQ_things)
codalab_output["rq_things"] = float(RQ_things)
codalab_output["sq_things"] = float(SQ_things)
key_list = [
"pq_mean",
"pq_dagger",
"sq_mean",
"rq_mean",
"iou_mean",
"pq_stuff",
"rq_stuff",
"sq_stuff",
"pq_things",
"rq_things",
"sq_things"
]
if sem_only and logger != None:
evaluated_fnames = class_evaluator.evaluated_fnames
logger.info('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames), len(evaluated_fnames) - len(set(evaluated_fnames))))
logger.info('| | IoU | PQ | RQ | SQ |')
for k, v in output_dict.items():
logger.info('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(
k.ljust(8)[-8:], v['IoU'], v['PQ'], v['RQ'], v['SQ']
))
return codalab_output
if sem_only and logger is None:
evaluated_fnames = class_evaluator.evaluated_fnames
print('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames), len(evaluated_fnames) - len(set(evaluated_fnames))))
print('| | IoU | PQ | RQ | SQ |')
for k, v in output_dict.items():
print('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(
k.ljust(8)[-8:], v['IoU'], v['PQ'], v['RQ'], v['SQ']
))
return codalab_output
if logger != None:
evaluated_fnames = class_evaluator.evaluated_fnames
logger.info('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames), len(evaluated_fnames) - len(set(evaluated_fnames))))
logger.info('| | PQ | RQ | SQ | IoU |')
for k, v in output_dict.items():
logger.info('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(
k.ljust(8)[-8:], v['PQ'], v['RQ'], v['SQ'], v['IoU']
))
logger.info('True Positive: ')
logger.info('\t|\t'.join([str(x) for x in class_evaluator.pan_tp]))
logger.info('False Positive: ')
logger.info('\t|\t'.join([str(x) for x in class_evaluator.pan_fp]))
logger.info('False Negative: ')
logger.info('\t|\t'.join([str(x) for x in class_evaluator.pan_fn]))
if logger is None:
evaluated_fnames = class_evaluator.evaluated_fnames
print('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames), len(evaluated_fnames) - len(set(evaluated_fnames))))
print('| | PQ | RQ | SQ | IoU |')
for k, v in output_dict.items():
print('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(
k.ljust(8)[-8:], v['PQ'], v['RQ'], v['SQ'], v['IoU']
))
print('True Positive: ')
print('\t|\t'.join([str(x) for x in class_evaluator.pan_tp]))
print('False Positive: ')
print('\t|\t'.join([str(x) for x in class_evaluator.pan_fp]))
print('False Negative: ')
print('\t|\t'.join([str(x) for x in class_evaluator.pan_fn]))
for key in key_list:
if logger != None:
logger.info("{}:\t{}".format(key, codalab_output[key]))
else:
print("{}:\t{}".format(key, codalab_output[key]))
return codalab_output
|
[
"numpy.zeros"
] |
[((729, 767), 'numpy.zeros', 'np.zeros', (['(maxkey + 100)'], {'dtype': 'np.int32'}), '(maxkey + 100, dtype=np.int32)\n', (737, 767), True, 'import numpy as np\n'), ((935, 963), 'numpy.zeros', 'np.zeros', (['(20)'], {'dtype': 'np.int32'}), '(20, dtype=np.int32)\n', (943, 963), True, 'import numpy as np\n')]
|
import argparse
import os
import pickle
import time
# import warnings
import numpy as np
from power_planner.utils.utils import get_distance_surface
from csv import writer
import warnings
import matplotlib.pyplot as plt
# utils imports
from power_planner.utils.utils_ksp import KspUtils
from power_planner.utils.utils_costs import CostUtils
from power_planner.evaluate_path import save_path_cost_csv
from power_planner import graphs
def logging(
ID, graph, path, path_costs, cfg, N_EDGES, time_pipeline, comp_path=None
):
if comp_path is None:
max_eucl = 0
mean_eucl = 0
else:
# compute path distances and multiply with resolution to get meters
max_eucl = (
KspUtils.path_distance(path, comp_path, mode="eucl_max") *
cfg.scale * 10
)
mean_eucl = (
KspUtils.path_distance(path, comp_path, mode="eucl_mean") *
cfg.scale * 10
)
# SAVE timing test
angle_cost = round(np.sum(CostUtils.compute_angle_costs(path)), 2)
n_categories = len(cfg.class_weights)
path_costs = np.asarray(path_costs)
summed_costs = np.around(np.sum(path_costs[:, -n_categories:], axis=0), 2)
weighted_sum = round(np.dot(summed_costs, cfg.class_weights), 2)
n_pixels = np.sum(belgium_inst_corr > 0)
# csv_header = ["ID", "instance", "resolution", "graph", "number pixels"
# "space edges", "overall time",
# "time vertex adding", "time edge adding", "time shortest path",
# "angle cost", "category costs", "sum of costs"]
logs = [
ID, INST, SCALE_PARAM * 10, n_pixels, graphtype, graph.n_nodes,
N_EDGES, time_pipeline, graph.time_logs["add_nodes"],
graph.time_logs["add_all_edges"], graph.time_logs["shortest_path"],
cfg.angle_weight, angle_cost, summed_costs, weighted_sum, mean_eucl,
max_eucl
]
with open(cfg.csv_times, 'a+', newline='') as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(logs)
parser = argparse.ArgumentParser()
parser.add_argument('-cluster', action='store_true')
parser.add_argument('-i', '--instance', type=str, default="ch")
parser.add_argument('-s', '--scale', help="resolution", type=int, default=1)
args = parser.parse_args()
# define out save name
# ID = "results_" + args.instance # str(round(time.time() / 60))[-5:]
OUT_DIR = os.path.join("..", "outputs")
SCALE_PARAM = args.scale
SCENARIO = 1
INST = args.instance
height_resistance_path = None # "../data/Instance_CH.nosync/dtm_10m.tif"
PIPELINE = [(1, 0)]
USE_KSP = 0
GRAPH_TYPE = graphs.ImplicitLG
# LineGraph, WeightedGraph, RandomWeightedGraph, RandomLineGraph, ImplicitLG
# ImplicitLgKSP, WeightedKSP
print("graph type:", GRAPH_TYPE)
# summarize: mean/max/min, remove: all/surrounding, sample: simple/watershed
NOTES = "None" # "mean-all-simple"
# define IO paths
PATH_FILES = "data"
# PIPE = [(MAX_EDGES, D1), (MAX_EDGES, D2), (MAX_EDGES, 0)]
PIPELINES = [[1], [2, 1], [4, 2, 1], [3, 1]]
print("PIPELINES:", PIPELINES)
mult_factor = 13
random = 0
graph_names = ["Normal graph", "Implicit line graph", "Line graph"]
for INST, SCALE_PARAM in zip(["belgium", "de", "ch"], [1, 2, 2]):
print("")
print("---------------------------------------------------")
print(INST, SCALE_PARAM)
# LOAD DATA
IOPATH = os.path.join(
PATH_FILES, f"{INST}_data_{SCENARIO}_{SCALE_PARAM}.dat"
)
with open(IOPATH, "rb") as infile:
data = pickle.load(infile)
(
belgium_inst, belgium_edge_inst, belgium_inst_corr, belgium_config
) = data
cfg = belgium_config.graph
start_inds = belgium_config.graph.start_inds
dest_inds = belgium_config.graph.dest_inds
# iterate over pipelines
ground_truth_paths = [[], []]
for pipe_kind, PIPE in enumerate(PIPELINES):
ID = str(PIPE)
print("------------- NEW PIPELINE ", PIPE, "-----------------------")
for g, GRAPH in enumerate([graphs.WeightedGraph, graphs.ImplicitLG]):
print("")
print(GRAPH)
print("")
graphtype = graph_names[g]
graph = GRAPH(belgium_inst, belgium_inst_corr, verbose=False)
corridor = np.ones(belgium_inst_corr.shape) * 0.5
tic = time.time()
actual_pipe = []
edge_numbers = []
for pipe_step, factor in enumerate(PIPE):
if random:
factor = 1 - (1 / factor**2)
graph.set_corridor(
corridor,
cfg.start_inds,
cfg.dest_inds,
factor_or_n_edges=factor,
sample_method="simple"
)
# main path computation
path_gt, path_costs_gt, cost_sum_wg = graph.single_sp(
**vars(cfg)
)
edge_numbers.append(graph.n_edges)
if factor == 1 or factor == 0:
actual_pipe.append((1, 0))
break
corridor = get_distance_surface(
graph.hard_constraints.shape,
[path_gt],
mode="dilation",
n_dilate=10 # dist
)
# estimated edges are pixels times neighbors
# divided by resolution squared
estimated_edges_10 = len(np.where(corridor > 0)[0]) * len(
graph.shifts
) / ((PIPE[pipe_step + 1])**2)
now_dist = (mult_factor * graph.n_edges) / estimated_edges_10
# print("reduce corridor:", dist)
corridor = get_distance_surface(
graph.hard_constraints.shape, [path_gt],
mode="dilation",
n_dilate=int(np.ceil(now_dist))
)
# print(
# "estimated with distance ", int(np.ceil(now_dist)),
# len(np.where(corridor > 0)[0]) * len(graph.shifts) /
# ((PIPE[pipe_step + 1])**2)
# )
actual_pipe.append([factor, int(np.ceil(now_dist))])
graph.remove_vertices(corridor)
time_pipeline = time.time() - tic
print("OVERALL TIME:", time_pipeline)
nr_edges = np.max(edge_numbers)
if pipe_kind == 0:
ground_truth_paths[g] = path_gt
path_bl = ground_truth_paths[g]
logging(
ID,
graph,
path_gt,
path_costs_gt,
cfg,
nr_edges,
time_pipeline,
comp_path=path_bl
)
|
[
"numpy.sum",
"argparse.ArgumentParser",
"csv.writer",
"numpy.ceil",
"numpy.asarray",
"power_planner.utils.utils_costs.CostUtils.compute_angle_costs",
"numpy.ones",
"power_planner.utils.utils_ksp.KspUtils.path_distance",
"time.time",
"numpy.max",
"pickle.load",
"numpy.where",
"numpy.dot",
"power_planner.utils.utils.get_distance_surface",
"os.path.join"
] |
[((2127, 2152), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2150, 2152), False, 'import argparse\n'), ((2479, 2508), 'os.path.join', 'os.path.join', (['""".."""', '"""outputs"""'], {}), "('..', 'outputs')\n", (2491, 2508), False, 'import os\n'), ((1095, 1117), 'numpy.asarray', 'np.asarray', (['path_costs'], {}), '(path_costs)\n', (1105, 1117), True, 'import numpy as np\n'), ((1281, 1310), 'numpy.sum', 'np.sum', (['(belgium_inst_corr > 0)'], {}), '(belgium_inst_corr > 0)\n', (1287, 1310), True, 'import numpy as np\n'), ((3436, 3505), 'os.path.join', 'os.path.join', (['PATH_FILES', 'f"""{INST}_data_{SCENARIO}_{SCALE_PARAM}.dat"""'], {}), "(PATH_FILES, f'{INST}_data_{SCENARIO}_{SCALE_PARAM}.dat')\n", (3448, 3505), False, 'import os\n'), ((1147, 1192), 'numpy.sum', 'np.sum', (['path_costs[:, -n_categories:]'], {'axis': '(0)'}), '(path_costs[:, -n_categories:], axis=0)\n', (1153, 1192), True, 'import numpy as np\n'), ((1222, 1261), 'numpy.dot', 'np.dot', (['summed_costs', 'cfg.class_weights'], {}), '(summed_costs, cfg.class_weights)\n', (1228, 1261), True, 'import numpy as np\n'), ((2005, 2022), 'csv.writer', 'writer', (['write_obj'], {}), '(write_obj)\n', (2011, 2022), False, 'from csv import writer\n'), ((3574, 3593), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (3585, 3593), False, 'import pickle\n'), ((995, 1030), 'power_planner.utils.utils_costs.CostUtils.compute_angle_costs', 'CostUtils.compute_angle_costs', (['path'], {}), '(path)\n', (1024, 1030), False, 'from power_planner.utils.utils_costs import CostUtils\n'), ((4396, 4407), 'time.time', 'time.time', ([], {}), '()\n', (4405, 4407), False, 'import time\n'), ((6491, 6511), 'numpy.max', 'np.max', (['edge_numbers'], {}), '(edge_numbers)\n', (6497, 6511), True, 'import numpy as np\n'), ((715, 771), 'power_planner.utils.utils_ksp.KspUtils.path_distance', 'KspUtils.path_distance', (['path', 'comp_path'], {'mode': '"""eucl_max"""'}), "(path, comp_path, mode='eucl_max')\n", (737, 771), False, 'from power_planner.utils.utils_ksp import KspUtils\n'), ((845, 902), 'power_planner.utils.utils_ksp.KspUtils.path_distance', 'KspUtils.path_distance', (['path', 'comp_path'], {'mode': '"""eucl_mean"""'}), "(path, comp_path, mode='eucl_mean')\n", (867, 902), False, 'from power_planner.utils.utils_ksp import KspUtils\n'), ((4338, 4370), 'numpy.ones', 'np.ones', (['belgium_inst_corr.shape'], {}), '(belgium_inst_corr.shape)\n', (4345, 4370), True, 'import numpy as np\n'), ((5203, 5299), 'power_planner.utils.utils.get_distance_surface', 'get_distance_surface', (['graph.hard_constraints.shape', '[path_gt]'], {'mode': '"""dilation"""', 'n_dilate': '(10)'}), "(graph.hard_constraints.shape, [path_gt], mode=\n 'dilation', n_dilate=10)\n", (5223, 5299), False, 'from power_planner.utils.utils import get_distance_surface\n'), ((6399, 6410), 'time.time', 'time.time', ([], {}), '()\n', (6408, 6410), False, 'import time\n'), ((5973, 5990), 'numpy.ceil', 'np.ceil', (['now_dist'], {}), '(now_dist)\n', (5980, 5990), True, 'import numpy as np\n'), ((6301, 6318), 'numpy.ceil', 'np.ceil', (['now_dist'], {}), '(now_dist)\n', (6308, 6318), True, 'import numpy as np\n'), ((5551, 5573), 'numpy.where', 'np.where', (['(corridor > 0)'], {}), '(corridor > 0)\n', (5559, 5573), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import logging
import aiohttp
import asyncio
from tqdm.asyncio import tqdm_asyncio
from tqdm.contrib.logging import logging_redirect_tqdm
import pandas as pd
import numpy as np
import time
import datetime as dt
from typing import Collection, Dict, List, Optional, Tuple, Union
from yahoo_finance import _download_single_ticker_chart_data, download_ticker_sector_industry
logger = logging.getLogger(__name__)
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
async def download_tickers_sector_industry(tickers: List[str]) -> pd.DataFrame:
async with aiohttp.ClientSession(headers=HEADERS) as session:
print("\nDownloading stock industry and sector")
with logging_redirect_tqdm():
tickers_info = await tqdm_asyncio.gather(
*[download_ticker_sector_industry(session, ticker) for ticker in tickers]
)
if None in tickers_info:
errored_tickers = [ticker for ticker, ticker_info in zip(tickers, tickers_info) if ticker_info is None]
tickers_info = [ticker_info for ticker_info in tickers_info if ticker_info is not None]
print(f"Out of {len(tickers)} tickers missing info, we could get {len(tickers_info)}")
print(f"Couldn't get info for the following {len(errored_tickers)}: {', '.join(errored_tickers)}")
return pd.DataFrame(tickers_info, columns=["SYMBOL", "SECTOR", "INDUSTRY"])
async def download_tickers_quotes(
tickers: List[str], start_date: int, end_date: int, interval: str
) -> Tuple[pd.DataFrame, Dict]:
"""Download quotes and their currencies for all the specified tickers in the specified time window.
Parameters
----------
tickers : List[str]
The list of tickers to download data for
start_date : int
The start date in POSIX format.
end_date : int
The end date in POSIX format.
interval : str
The interval between each data point (e.g. "1d")
Returns
-------
Tuple[List[Dict], Dict]
A tuple containg two dicts, first the quotes, second their currencies.
"""
async with aiohttp.ClientSession(headers=HEADERS) as session:
print("\nDownloading stock quotes")
with logging_redirect_tqdm():
tickers_chart_data = await tqdm_asyncio.gather(
*[
_download_single_ticker_chart_data(session, ticker, start_date, end_date, interval)
for ticker in tickers
]
)
if None in tickers_chart_data:
errored_tickers = [ticker for ticker, ticker_info in zip(tickers, tickers_chart_data) if ticker_info is None]
tickers_chart_data = [t for t in tickers_chart_data if t is not None]
print(f"Out of {len(tickers)} tickers, we could get quotes for {len(tickers_chart_data)}")
print(f"Couldn't get quotes for: {', '.join(errored_tickers)}")
quotes = {ticker_dict["ticker"]: ticker_dict["quotes"] for ticker_dict in tickers_chart_data}
currencies = {ticker_dict["ticker"]: ticker_dict["currency"] for ticker_dict in tickers_chart_data}
return pd.concat(quotes, axis="columns", sort=True), currencies
def extract_ticker_list(tickers: Union[Collection[str], str]) -> List[str]:
if isinstance(tickers, (list, set, tuple)):
pass
elif isinstance(tickers, str):
# Replacing commas by spaces helps removing excess spaces between commas if any
tickers = tickers.replace(",", " ").split()
else:
raise ValueError("tickers must be a str consisting of a comma separated list of tickers or a list of tickers")
return list(set([ticker.upper() for ticker in tickers]))
def parse_start_end_date(
start_date: Optional[str] = None, end_date: Optional[str] = None, default_start_days_ago=365
) -> Tuple[int, int]:
end_date = int(time.time()) if end_date is None else int(dt.datetime.strptime(end_date, "%Y-%m-%d").timestamp())
start_date = (
int((dt.datetime.today() - dt.timedelta(365)).timestamp())
if start_date is None
else int(dt.datetime.strptime(start_date, "%Y-%m-%d").timestamp())
)
return start_date, end_date
def download_tickers_info(
tickers: list, start_date: Optional[str] = None, end_date: Optional[str] = None, interval: str = "1d"
) -> dict:
"""
Download historical data for tickers in the list.
Parameters
----------
tickers: list
Tickers for which to download historical information.
start: str or int
Start download data from this date.
end: str or int
End download data at this date.
interval: str
Frequency between data.
Returns
-------
data: dict
Dictionary including the following keys:
- tickers: list of tickers
- logp: array of log-adjusted closing prices, shape=(num stocks, length period);
- volume: array of volumes, shape=(num stocks, length period);
- sectors: dictionary of stock sector for each ticker;
- industries: dictionary of stock industry for each ticker.
"""
logger.info(f"Downloading data for {len(tickers)} tickers")
tickers = extract_ticker_list(tickers)
stock_info_filename = "stock_info.csv"
try:
stock_info_df = pd.read_csv(stock_info_filename)
logger.info(f"Reading stock info found in file '{stock_info_filename}'")
except FileNotFoundError:
# Creating an empty dataframe
stock_info_columns = ["SYMBOL", "CURRENCY", "SECTOR", "INDUSTRY"]
stock_info_df = pd.DataFrame(columns=stock_info_columns)
# Downloading stock quotes and currencies
start_date, end_date = parse_start_end_date(start_date, end_date)
stocks_quotes_df, currencies = asyncio.run(download_tickers_quotes(tickers, start_date, end_date, interval))
# Remove tickers with excess null values
stocks_quotes_df = stocks_quotes_df.loc[:, (stocks_quotes_df.isnull().mean() < 0.33)]
assert stocks_quotes_df.shape[0] > 0, Exception("No symbol with full information is available.")
# Fill in null values
stocks_quotes_df = stocks_quotes_df.fillna(method="bfill").fillna(method="ffill").drop_duplicates()
final_list_tickers = stocks_quotes_df.columns.get_level_values(0).unique()
failed_to_get_tickers_quotes = [ticker for ticker in tickers if ticker not in final_list_tickers]
if len(failed_to_get_tickers_quotes) > 0:
print(
f"\nRemoving {failed_to_get_tickers_quotes} from list of symbols because we could not collect complete quotes."
)
# Downloading missing stocks info
tickers_already_fetched_info = stock_info_df["SYMBOL"].values
tickers_missing_info = [ticker for ticker in tickers if ticker not in tickers_already_fetched_info]
if len(tickers_missing_info) > 0:
missing_tickers_info_df = asyncio.run(download_tickers_sector_industry(tickers_missing_info))
missing_tickers_info_df["CURRENCY"] = missing_tickers_info_df["SYMBOL"].apply(currencies.get)
stock_info_df = pd.concat([stock_info_df, missing_tickers_info_df])
stock_info_df.to_csv(stock_info_filename, index=False)
# Taking the quote currency as the one that appears the most in the data
default_currency = stock_info_df["CURRENCY"].mode()[0]
# Downloading the exchange rate between the default currency and all the others in the data
currencies = stock_info_df["CURRENCY"].to_list()
exchange_rates = get_exchange_rates(
from_currencies=stock_info_df["CURRENCY"].dropna().to_list(),
to_currency=default_currency,
dates_index=stocks_quotes_df.index,
start_date=start_date,
end_date=end_date,
interval=interval,
)
return dict(
tickers=final_list_tickers,
dates=pd.to_datetime(stocks_quotes_df.index),
price=stocks_quotes_df.xs("Adj Close", level=1, axis="columns").to_numpy().T,
volume=stocks_quotes_df.xs("Volume", level=1, axis="columns").to_numpy().T,
currencies=currencies,
exchange_rates=exchange_rates,
default_currency=default_currency,
sectors={ticker: sector for ticker, sector in zip(stock_info_df["SYMBOL"], stock_info_df["SECTOR"])},
industries={ticker: industry for ticker, industry in zip(stock_info_df["SYMBOL"], stock_info_df["INDUSTRY"])},
)
def get_exchange_rates(
from_currencies: list,
to_currency: str,
dates_index: pd.DatetimeIndex,
start_date: int,
end_date: int,
interval: str = "1d",
) -> dict:
"""
It finds the most common currency and set it as default one. For any other currency, it downloads exchange rate
closing prices to the default currency and return them as data frame.
Parameters
----------
from_currencies: list
A list of currencies to convert.
to_currency: str
Currency to convert to.
dates: date
Dates for which exchange rates should be available.
start: str or int
Start download data from this timestamp date.
end: str or int
End download data at this timestamp date.
interval: str
Frequency between data.
Returns
-------
xrates: dict
A dictionary with currencies as keys and list of exchange rates at desired dates as values.
"""
from_currencies = [currency for currency in np.unique(from_currencies) if currency != to_currency]
if len(from_currencies) == 0:
return {}
xrates = asyncio.run(async_get_exchange_rates(from_currencies, to_currency, start_date, end_date, interval))
xrates.reindex = dates_index
xrates = xrates.fillna(method="bfill").fillna(method="ffill")
return xrates.to_dict(orient="list")
async def async_get_exchange_rates(
from_currencies: list,
to_currency: str,
start_date: int,
end_date: int,
interval: str,
):
async with aiohttp.ClientSession(headers=HEADERS) as session:
currencies_chart_data = await asyncio.gather(
*[
_download_single_ticker_chart_data(
session, from_currency + to_currency + "=x", start_date, end_date, interval
)
for from_currency in from_currencies
]
)
quotes = [chart_data["quotes"]["Adj Close"] for chart_data in currencies_chart_data]
return pd.concat(quotes, keys=from_currencies, axis="columns", sort=True)
|
[
"pandas.DataFrame",
"datetime.datetime.today",
"pandas.read_csv",
"numpy.unique",
"tqdm.contrib.logging.logging_redirect_tqdm",
"time.time",
"aiohttp.ClientSession",
"datetime.datetime.strptime",
"pandas.to_datetime",
"datetime.timedelta",
"yahoo_finance.download_ticker_sector_industry",
"yahoo_finance._download_single_ticker_chart_data",
"pandas.concat",
"logging.getLogger"
] |
[((404, 431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (421, 431), False, 'import logging\n'), ((1442, 1510), 'pandas.DataFrame', 'pd.DataFrame', (['tickers_info'], {'columns': "['SYMBOL', 'SECTOR', 'INDUSTRY']"}), "(tickers_info, columns=['SYMBOL', 'SECTOR', 'INDUSTRY'])\n", (1454, 1510), True, 'import pandas as pd\n'), ((10455, 10521), 'pandas.concat', 'pd.concat', (['quotes'], {'keys': 'from_currencies', 'axis': '"""columns"""', 'sort': '(True)'}), "(quotes, keys=from_currencies, axis='columns', sort=True)\n", (10464, 10521), True, 'import pandas as pd\n'), ((686, 724), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'headers': 'HEADERS'}), '(headers=HEADERS)\n', (707, 724), False, 'import aiohttp\n'), ((2207, 2245), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'headers': 'HEADERS'}), '(headers=HEADERS)\n', (2228, 2245), False, 'import aiohttp\n'), ((3215, 3259), 'pandas.concat', 'pd.concat', (['quotes'], {'axis': '"""columns"""', 'sort': '(True)'}), "(quotes, axis='columns', sort=True)\n", (3224, 3259), True, 'import pandas as pd\n'), ((5375, 5407), 'pandas.read_csv', 'pd.read_csv', (['stock_info_filename'], {}), '(stock_info_filename)\n', (5386, 5407), True, 'import pandas as pd\n'), ((7146, 7197), 'pandas.concat', 'pd.concat', (['[stock_info_df, missing_tickers_info_df]'], {}), '([stock_info_df, missing_tickers_info_df])\n', (7155, 7197), True, 'import pandas as pd\n'), ((9991, 10029), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'headers': 'HEADERS'}), '(headers=HEADERS)\n', (10012, 10029), False, 'import aiohttp\n'), ((807, 830), 'tqdm.contrib.logging.logging_redirect_tqdm', 'logging_redirect_tqdm', ([], {}), '()\n', (828, 830), False, 'from tqdm.contrib.logging import logging_redirect_tqdm\n'), ((2315, 2338), 'tqdm.contrib.logging.logging_redirect_tqdm', 'logging_redirect_tqdm', ([], {}), '()\n', (2336, 2338), False, 'from tqdm.contrib.logging import logging_redirect_tqdm\n'), ((3943, 3954), 'time.time', 'time.time', ([], {}), '()\n', (3952, 3954), False, 'import time\n'), ((5655, 5695), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'stock_info_columns'}), '(columns=stock_info_columns)\n', (5667, 5695), True, 'import pandas as pd\n'), ((7899, 7937), 'pandas.to_datetime', 'pd.to_datetime', (['stocks_quotes_df.index'], {}), '(stocks_quotes_df.index)\n', (7913, 7937), True, 'import pandas as pd\n'), ((9464, 9490), 'numpy.unique', 'np.unique', (['from_currencies'], {}), '(from_currencies)\n', (9473, 9490), True, 'import numpy as np\n'), ((3985, 4027), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['end_date', '"""%Y-%m-%d"""'], {}), "(end_date, '%Y-%m-%d')\n", (4005, 4027), True, 'import datetime as dt\n'), ((4174, 4218), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['start_date', '"""%Y-%m-%d"""'], {}), "(start_date, '%Y-%m-%d')\n", (4194, 4218), True, 'import datetime as dt\n'), ((4073, 4092), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (4090, 4092), True, 'import datetime as dt\n'), ((4095, 4112), 'datetime.timedelta', 'dt.timedelta', (['(365)'], {}), '(365)\n', (4107, 4112), True, 'import datetime as dt\n'), ((10127, 10242), 'yahoo_finance._download_single_ticker_chart_data', '_download_single_ticker_chart_data', (['session', "(from_currency + to_currency + '=x')", 'start_date', 'end_date', 'interval'], {}), "(session, from_currency + to_currency +\n '=x', start_date, end_date, interval)\n", (10161, 10242), False, 'from yahoo_finance import _download_single_ticker_chart_data, download_ticker_sector_industry\n'), ((904, 952), 'yahoo_finance.download_ticker_sector_industry', 'download_ticker_sector_industry', (['session', 'ticker'], {}), '(session, ticker)\n', (935, 952), False, 'from yahoo_finance import _download_single_ticker_chart_data, download_ticker_sector_industry\n'), ((2439, 2526), 'yahoo_finance._download_single_ticker_chart_data', '_download_single_ticker_chart_data', (['session', 'ticker', 'start_date', 'end_date', 'interval'], {}), '(session, ticker, start_date, end_date,\n interval)\n', (2473, 2526), False, 'from yahoo_finance import _download_single_ticker_chart_data, download_ticker_sector_industry\n')]
|
from diffgram.brain.inference import Inference
import tempfile
# TODO import these only if local prediction is needed
import cv2
try:
import tensorflow as tf
except:
print("Could not import tensorflow")
import numpy as np
import requests
import scipy.misc
import diffgram.utils.visualization_utils as vis_util
class Brain():
def __init__(
self,
client,
name=None,
id=None,
local=False,
use_temp_storage=True
):
"""
client, project client object
name, string, exact match for Project AI name
local, bool, run model locally
if local is true will perform additional setup work local_setup()
"""
self.client = client
if self.client.project_string_id is None:
raise Exception("\n No project string id in client.")
self.name = name
self.id = id
self.status = None
self.local = local
self.method = None
self.sub_method = None
self.min_score_thresh = .5
self.build_complete = None
self.model_path = None
self.image_to_run = None
self.use_temp_storage = use_temp_storage
self.local_model_storage_path = None
if self.local is True:
# These are only needed for local operations
self.temp = tempfile.mkdtemp()
self.local_setup()
def inference_from_response(
self,
dict):
# Assumes object detection
# TODO condition on method
inference = Inference(
method = "object_detection",
id = dict['id'],
status = dict['status'],
box_list = dict['box_list'],
score_list = dict['score_list'],
label_list = dict['label_list']
)
return inference
def predict_from_url(
self,
url):
"""
url, string, web end point to get file
"""
if self.local is True:
raise Exception("Not supported for local models yet.")
request = {}
request['url'] = url
request['ai_name'] = self.name
endpoint = "/api/walrus/v1/project/" + self.client.project_string_id + \
"/inference/from_url"
response = self.client.session.post(
self.client.host + endpoint,
json = request)
self.client.handle_errors(response)
data = response.json()
self.client.handle_errors(response)
inference = self.inference_from_response(data['inference'])
return inference
def predict_from_local(
self,
path):
"""
Make a prediction from a local file.
Creates a Diffgram file object and runs prediction.
This is roughly equal to running file.from_local() and predict()
but in one request (instead of two).
path, string, file path
"""
if self.local is True:
self.image_to_run = open(path, "rb")
# WIP
# TODO clean up, declare options for different types of expected inputs
# this is for model that expects numpy array as input
#self.image_np = scipy.misc.imread(path)
#self.image_np = self.resize(self.image_np)
# moved this here, was part of other thing prior
self.image_to_run = self.image_to_run.read()
self.run()
inference = self.inference_from_local()
return inference
if self.local is False:
files = {'file': open(path, 'rb')}
options = { 'immediate_mode' : 'True',
'ai_name' : self.name}
endpoint = "/api/walrus/v1/project/" + self.client.project_string_id \
+ "/inference/from_local"
response = self.client.session.post(
self.client.host + endpoint,
files = files,
data = options)
self.client.handle_errors(response)
data = response.json()
inference = self.inference_from_response(data['inference'])
return inference
# TODO handle creation of Inference and Instance objects
def run(
self,
image = None):
if self.build_complete is False:
return False
if image:
self.image_to_run = image
with self.graph.as_default():
# MUST HAVE compat.as_bytes for tf slim
# https://www.tensorflow.org/api_docs/python/tf/compat/as_bytes
# https://stackoverflow.com/questions/46687348/decoding-tfrecord-with-tfslim
self.image_to_run_expanded = tf.compat.as_bytes(self.image_to_run)
self.image_to_run_expanded = np.expand_dims(self.image_to_run_expanded, axis=0)
self.method = "object_detection"
if self.sub_method == "default" or self.sub_method is None:
self.run_object_detection()
inference = self.inference_from_local()
return inference
def run_object_detection(self):
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes,
self.detection_scores,
self.detection_classes,
self.num_detections],
feed_dict = {
self.image_tensor: self.image_to_run_expanded } )
self.boxes = np.squeeze(boxes)
self.scores = np.squeeze(scores)
self.classes = np.squeeze(classes).astype(np.int32)
#print(self.boxes, self.scores, self.classes)
def nearest_iou(self, alpha, bravo):
_best_iou_hyper = .2
for i in range(len(alpha.box_list)):
best_iou = 0
best_index = None
# Find best IoU
for j in range(len(bravo.box_list)):
iou = Brain.calc_iou(alpha.box_list[i], bravo.box_list[j])
if iou >= best_iou:
best_iou = iou
best_index = j
if best_index is None:
continue
# handle large boxes, is the threat entirely inside the box?
alpha_box = alpha.box_list[i]
bravo_box = bravo.box_list[best_index]
if best_iou > _best_iou_hyper or best_iou > .01 and \
alpha_box[1] < bravo_box[1] and \
alpha_box[3] > bravo_box[3] and \
alpha_box[0] < bravo_box[0] and \
alpha_box[2] > bravo_box[2]:
# Assumes boxes have been thresholded already,
# This way threshold applies to nearest search too
class_id = bravo.label_list[best_index]
nearest_alpha_box = bravo.box_list[best_index]
# for stats
#self.average_iou = ( (best_iou + self.average_iou ) / 2)
# Where best_index is which bravo one
# is "in" which i index
print("alpha is in bravo", i, "in", best_index)
@staticmethod
def calc_iou(box_a, box_b):
# Calculate intersection, i.e. area of overlap between the 2 boxes (could be 0)
# http://math.stackexchange.com/a/99576
x_overlap = max(0, min(box_a[2], box_b[2]) - max(box_a[0], box_b[0]))
y_overlap = max(0, min(box_a[3], box_b[3]) - max(box_a[1], box_b[1]))
intersection = x_overlap * y_overlap
# Calculate union
area_box_a = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1])
area_box_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])
union = area_box_a + area_box_b - intersection
if union == 0:
return 0
iou = intersection / union
return iou
def resize(self, image):
if image.shape[0] > 600 or image.shape[1] > 600:
ratio = min((300 / image.shape[0]),
(300 / image.shape[1]))
shape_x = int(round(image.shape[0] * ratio))
shape_y = int(round(image.shape[1] * ratio))
image = scipy.misc.imresize(image,
(shape_x, shape_y))
#print(image.shape)
return image
def predict_from_file(
self,
file_id):
"""
file_id, int, diffgram file id
Assumes singular file for now
"""
if self.local is True:
raise Exception("Not supported for local models yet.")
request = {}
request['file_list'] = [{'id' : file_id}]
request['ai_name'] = self.name
request['wait_for_inference'] = True
endpoint = "/api/walrus/project/" + self.client.project_string_id + \
"/inference/add"
response = self.client.session.post(
self.client.host + endpoint,
json = request)
self.client.handle_errors(response)
data = response.json()
inference = self.inference_from_response(data['inference'])
return inference
def local_setup(self):
"""
Intial setup for local prediction
"""
self.get_checkpoint_and_label_map()
self.build()
def get_checkpoint_and_label_map(self):
"""
Get download links
Download checkpoint file for AI name
"""
request = {}
request['ai_name'] = self.name
endpoint = "/api/walrus/project/" + self.client.project_string_id + \
"/brain/local_info"
response = self.client.session.post(
self.client.host + endpoint,
json = request)
self.client.handle_errors(response)
data = response.json()
ai = data['ai']
self.id = ai['id']
# TODO continue to try and clarify label map crazinesss
self.file_id_to_model_id = ai['label_dict']
#print("Label map", self.file_id_to_model_id)
self.model_id_to_file_id = {v: k for k, v in self.file_id_to_model_id.items()}
self.file_id_to_name = {v: k for k, v in self.client.name_to_file_id.items()}
self.build_model_id_to_name()
# Data has url for models and label map
# TODO clarify difference between local path and url to download model
if self.use_temp_storage is True:
self.model_path = self.temp + "/" + str(self.id) + ".pb"
if self.use_temp_storage is False:
self.model_path = self.local_model_storage_path
self.url_model = ai['url_model']
self.download_file(
url = self.url_model,
path = self.model_path)
def build_model_id_to_name(self):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
self.model_id_to_name = {}
for file_id, label_name in self.file_id_to_name.items():
model_id = self.file_id_to_model_id.get(str(file_id), None)
if model_id:
self.model_id_to_name[model_id] = {'name' : label_name}
#print(self.model_id_to_name)
def download_file(
self,
url,
path
):
retry = 0
while retry < 3:
if url[0 : 4] != "http":
return False
response = requests.get(url, stream=True)
if response.status_code != 200:
retry += 1
content_type = response.headers.get('content-type', None)
with open(path, 'wb') as file:
file.write(response.content)
return True
return False
def check_status(
self):
"""
"""
request = {}
request['ai_name'] = self.name
endpoint = "/api/walrus/v1/project/" + self.client.project_string_id + \
"/brain/status"
response = self.client.session.post(
self.client.host + endpoint,
json = request)
self.client.handle_errors(response)
data = response.json()
self.status = data['ai']['status']
def clean(self):
try:
shutil.rmtree(self.temp) # delete directory
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
def build(self):
"""
Build graph for local prediction
Assumes it has the checkpoint ready to go
"""
self.graph = tf.Graph()
with self.graph.as_default():
#with tf.device('/cpu:0'): # for local cpu testing
graph_def = tf.GraphDef()
with tf.gfile.GFile(self.model_path, 'rb') as fid:
serialized_graph = fid.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
# TODO make this more flexible to work with different tensor types
self.image_tensor = self.graph.get_tensor_by_name('encoded_image_string_tensor:0')
self.detection_boxes = self.graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.graph.get_tensor_by_name('num_detections:0')
self.build_complete = True
return True
def inference_from_local(
self,):
box_list = []
score_list = []
label_list = []
for i in range(self.boxes.shape[0]):
if self.scores[i] is None:
pass
if self.scores[i] > self.min_score_thresh:
#print("Detection")
box_list.append(self.boxes[i].tolist())
label_list.append(self.classes[i].tolist())
score_list.append(self.scores[i].tolist())
inference = Inference(
method = self.method,
id = None,
status = None,
box_list = box_list,
score_list = score_list,
label_list = label_list
)
return inference
def visual(self,
image = None
):
if image is None:
image = self.image_backup
# WIP
#if self.sub_method == "default" or self.sub_method is None:
#print("ran visual")
vis_util.visualize_boxes_and_labels_on_image_array(
image,
self.boxes,
self.classes,
self.scores,
self.model_id_to_name,
use_normalized_coordinates=True,
line_thickness=3,
min_score_thresh=self.min_score_thresh)
return image
|
[
"diffgram.brain.inference.Inference",
"tensorflow.Session",
"numpy.expand_dims",
"tempfile.mkdtemp",
"diffgram.utils.visualization_utils.visualize_boxes_and_labels_on_image_array",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"requests.get",
"numpy.squeeze",
"tensorflow.import_graph_def",
"tensorflow.GraphDef",
"tensorflow.compat.as_bytes"
] |
[((1340, 1512), 'diffgram.brain.inference.Inference', 'Inference', ([], {'method': '"""object_detection"""', 'id': "dict['id']", 'status': "dict['status']", 'box_list': "dict['box_list']", 'score_list': "dict['score_list']", 'label_list': "dict['label_list']"}), "(method='object_detection', id=dict['id'], status=dict['status'],\n box_list=dict['box_list'], score_list=dict['score_list'], label_list=\n dict['label_list'])\n", (1349, 1512), False, 'from diffgram.brain.inference import Inference\n'), ((4540, 4557), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (4550, 4557), True, 'import numpy as np\n'), ((4574, 4592), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (4584, 4592), True, 'import numpy as np\n'), ((10700, 10710), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10708, 10710), True, 'import tensorflow as tf\n'), ((11975, 12095), 'diffgram.brain.inference.Inference', 'Inference', ([], {'method': 'self.method', 'id': 'None', 'status': 'None', 'box_list': 'box_list', 'score_list': 'score_list', 'label_list': 'label_list'}), '(method=self.method, id=None, status=None, box_list=box_list,\n score_list=score_list, label_list=label_list)\n', (11984, 12095), False, 'from diffgram.brain.inference import Inference\n'), ((12340, 12560), 'diffgram.utils.visualization_utils.visualize_boxes_and_labels_on_image_array', 'vis_util.visualize_boxes_and_labels_on_image_array', (['image', 'self.boxes', 'self.classes', 'self.scores', 'self.model_id_to_name'], {'use_normalized_coordinates': '(True)', 'line_thickness': '(3)', 'min_score_thresh': 'self.min_score_thresh'}), '(image, self.boxes, self.\n classes, self.scores, self.model_id_to_name, use_normalized_coordinates\n =True, line_thickness=3, min_score_thresh=self.min_score_thresh)\n', (12390, 12560), True, 'import diffgram.utils.visualization_utils as vis_util\n'), ((1172, 1190), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1188, 1190), False, 'import tempfile\n'), ((3934, 3971), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['self.image_to_run'], {}), '(self.image_to_run)\n', (3952, 3971), True, 'import tensorflow as tf\n'), ((4005, 4055), 'numpy.expand_dims', 'np.expand_dims', (['self.image_to_run_expanded'], {'axis': '(0)'}), '(self.image_to_run_expanded, axis=0)\n', (4019, 4055), True, 'import numpy as np\n'), ((9730, 9760), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (9742, 9760), False, 'import requests\n'), ((10813, 10826), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (10824, 10826), True, 'import tensorflow as tf\n'), ((11025, 11053), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (11035, 11053), True, 'import tensorflow as tf\n'), ((4610, 4629), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (4620, 4629), True, 'import numpy as np\n'), ((10836, 10873), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['self.model_path', '"""rb"""'], {}), "(self.model_path, 'rb')\n", (10850, 10873), True, 'import tensorflow as tf\n'), ((10969, 11008), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (10988, 11008), True, 'import tensorflow as tf\n')]
|
""" Environment with a distribution of mazes (one new maze is drawn at each episode)
Author: <NAME>
"""
import numpy as np
from deer.base_classes import Environment
#import matplotlib
#matplotlib.use('qt5agg')
#from mpl_toolkits.axes_grid1 import host_subplot
#import mpl_toolkits.axisartist as AA
#import matplotlib.pyplot as plt
import copy
import a_star_path_finding as pf
class MyEnv(Environment):
VALIDATION_MODE = 0
def __init__(self, rng, **kwargs):
self._random_state = rng
self._mode = -1
self._mode_score = 0.0
self._mode_episode_count = 0
self._episode_steps = 0
self._actions = [0,1,2,3]
self._size_maze = 8
self._higher_dim_obs=kwargs.get('higher_dim_obs',False)
self._reverse=kwargs.get('reverse',False)
self._n_walls = int((self._size_maze-2)**2/3.)#int((self._size_maze)**2/3.)
self._n_rewards = 3
self.create_map()
self.intern_dim=3
def create_map(self):
valid_map=False
while valid_map==False:
# Agent
self._pos_agent=[1,1]
# Walls
self._pos_walls=[]
for i in range(self._size_maze):
self._pos_walls.append([i,0])
self._pos_walls.append([i,self._size_maze-1])
for j in range(self._size_maze-2):
self._pos_walls.append([0,j+1])
self._pos_walls.append([self._size_maze-1,j+1])
n=0
while n < self._n_walls:
potential_wall=[self._random_state.randint(1,self._size_maze-2),self._random_state.randint(1,self._size_maze-2)]
if(potential_wall not in self._pos_walls and potential_wall!=self._pos_agent):
self._pos_walls.append(potential_wall)
n+=1
# Rewards
#self._pos_rewards=[[self._size_maze-2,self._size_maze-2]]
self._pos_rewards=[]
n=0
while n < self._n_rewards:
potential_reward=[self._random_state.randint(1,self._size_maze-1),self._random_state.randint(1,self._size_maze-1)]
if(potential_reward not in self._pos_rewards and potential_reward not in self._pos_walls and potential_reward!=self._pos_agent):
self._pos_rewards.append(potential_reward)
n+=1
valid_map=self.is_valid_map(self._pos_agent,self._pos_walls,self._pos_rewards)
def is_valid_map(self,pos_agent,pos_walls,pos_rewards):
a = pf.AStar()
pos_walls
walls = [tuple(w) for w in pos_walls]
start=tuple(pos_agent)
for r in pos_rewards:
end=tuple(r)
a.init_grid(self._size_maze, self._size_maze, walls, start, end)
maze=a
optimal_path=maze.solve()
if(optimal_path==None):
return False
return True
def reset(self, mode):
self._episode_steps = 0
self._mode=mode
self.create_map()
if mode == MyEnv.VALIDATION_MODE:
if self._mode != MyEnv.VALIDATION_MODE:
self._mode = MyEnv.VALIDATION_MODE
self._mode_score = 0.0
self._mode_episode_count = 0
else:
self._mode_episode_count += 1
return [1 * [self._size_maze * [self._size_maze * [0]]]]
def act(self, action):
self._episode_steps += 1
action = self._actions[action]
reward = -0.1
if(action==0):
if([self._pos_agent[0]+1,self._pos_agent[1]] not in self._pos_walls):
self._pos_agent[0]=self._pos_agent[0]+1
elif(action==1):
if([self._pos_agent[0],self._pos_agent[1]+1] not in self._pos_walls):
self._pos_agent[1]=self._pos_agent[1]+1
elif(action==2):
if([self._pos_agent[0]-1,self._pos_agent[1]] not in self._pos_walls):
self._pos_agent[0]=self._pos_agent[0]-1
elif(action==3):
if([self._pos_agent[0],self._pos_agent[1]-1] not in self._pos_walls):
self._pos_agent[1]=self._pos_agent[1]-1
if (self._pos_agent in self._pos_rewards):
reward = 1
self._pos_rewards.remove(self._pos_agent)
self._mode_score += reward
return reward
def summarizePerformance(self, test_data_set, learning_algo, *args, **kwargs):
print ("test_data_set.observations.shape")
print (test_data_set.observations()[0][0:1])
print ("self._mode_score:"+str(self._mode_score)+".")
def inputDimensions(self):
if(self._higher_dim_obs==True):
return [(1,self._size_maze*6,self._size_maze*6)]
else:
return [(1,self._size_maze,self._size_maze)]
def observationType(self, subject):
return np.float32
def nActions(self):
return len(self._actions)
def observe(self):
self._map=np.zeros((self._size_maze,self._size_maze))
for coord_wall in self._pos_walls:
self._map[coord_wall[0],coord_wall[1]]=1
for coord_reward in self._pos_rewards:
self._map[coord_reward[0],coord_reward[1]]=2
self._map[self._pos_agent[0],self._pos_agent[1]]=0.5
if(self._higher_dim_obs==True):
indices_reward=np.argwhere(self._map == 2)
indices_agent=np.argwhere(self._map == 0.5)
self._map=self._map/1.
self._map=np.repeat(np.repeat(self._map, 6, axis=0),6, axis=1)
# agent repr
agent_obs=np.zeros((6,6))
agent_obs[0,2]=0.8
agent_obs[1,0:5]=0.9
agent_obs[2,1:4]=0.9
agent_obs[3,1:4]=0.9
agent_obs[4,1]=0.9
agent_obs[4,3]=0.9
agent_obs[5,0:2]=0.9
agent_obs[5,3:5]=0.9
# reward repr
reward_obs=np.zeros((6,6))
reward_obs[:,1]=0.7
reward_obs[0,1:4]=0.6
reward_obs[1,3]=0.7
reward_obs[2,1:4]=0.6
reward_obs[4,2]=0.7
reward_obs[5,2:4]=0.7
for i in indices_reward:
self._map[i[0]*6:(i[0]+1)*6:,i[1]*6:(i[1]+1)*6]=reward_obs
for i in indices_agent:
self._map[i[0]*6:(i[0]+1)*6:,i[1]*6:(i[1]+1)*6]=agent_obs
self._map=(self._map*2)-1 #scaling
#print ("self._map higher_dim_obs")
#print (self._map)
#plt.imshow(self._map, cmap='gray_r')
#plt.show()
else:
self._map=self._map/2.
self._map[self._map == 0.5] = 0.99 # agent
self._map[self._map == 1.] = 0.5 # reward
if(self._reverse==True):
self._map=-self._map #1-self._map
return [self._map]
def inTerminalState(self):
if ( self._pos_rewards==[] or (self._mode>=0 and self._episode_steps >= 50) ):
return True
else:
return False
if __name__ == "__main__":
import hashlib
rng = np.random.RandomState(123456)
env = MyEnv(rng, higher_dim_obs=False)
maps=[]
for i in range(10000):
env.create_map()
one_laby=env.observe()[0]
# Hashing the labyrinths to be able to find duplicates in O(1)
one_laby=int(hashlib.sha1(str(one_laby).encode('utf-8')).hexdigest(), 16) % (10 ** 8)
# TESTING ADDING DUPLICATION
if i%1000==0:
env.reset(0)
if i%1000==500:
env.reset(1)
maps.append(copy.deepcopy(one_laby))
duplicate_laby=0
for i in range(10000):
env.create_map()
one_laby=env.observe()[0]
# Hashing the labyrinths to be able to find duplicates in O(1)
one_laby=int(hashlib.sha1(str(one_laby).encode('utf-8')).hexdigest(), 16) % (10 ** 8)
# TESTING ADDING DUPLICATION
#if i%1000==0:
# maps.append(one_laby)
# TESTING WITH RESETS
if i%1000==0:
env.reset(0)
if i%1000==500:
env.reset(1)
duplicate=min(maps.count(one_laby),1)
duplicate_laby+=duplicate
if i%1000==0:
print ("Number of duplicate labyrinths:"+str(duplicate_laby)+".")
|
[
"copy.deepcopy",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.argwhere",
"a_star_path_finding.AStar",
"numpy.repeat"
] |
[((7284, 7313), 'numpy.random.RandomState', 'np.random.RandomState', (['(123456)'], {}), '(123456)\n', (7305, 7313), True, 'import numpy as np\n'), ((2589, 2599), 'a_star_path_finding.AStar', 'pf.AStar', ([], {}), '()\n', (2597, 2599), True, 'import a_star_path_finding as pf\n'), ((5148, 5192), 'numpy.zeros', 'np.zeros', (['(self._size_maze, self._size_maze)'], {}), '((self._size_maze, self._size_maze))\n', (5156, 5192), True, 'import numpy as np\n'), ((5521, 5548), 'numpy.argwhere', 'np.argwhere', (['(self._map == 2)'], {}), '(self._map == 2)\n', (5532, 5548), True, 'import numpy as np\n'), ((5575, 5604), 'numpy.argwhere', 'np.argwhere', (['(self._map == 0.5)'], {}), '(self._map == 0.5)\n', (5586, 5604), True, 'import numpy as np\n'), ((5762, 5778), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (5770, 5778), True, 'import numpy as np\n'), ((6098, 6114), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (6106, 6114), True, 'import numpy as np\n'), ((7806, 7829), 'copy.deepcopy', 'copy.deepcopy', (['one_laby'], {}), '(one_laby)\n', (7819, 7829), False, 'import copy\n'), ((5672, 5703), 'numpy.repeat', 'np.repeat', (['self._map', '(6)'], {'axis': '(0)'}), '(self._map, 6, axis=0)\n', (5681, 5703), True, 'import numpy as np\n')]
|
"""
functions.py
In this work, we present PolymerXtal, a software designed to build and analyze molecular-level polymer crystal
structures. PolymerXtal provides a standardized process to generate polymer crystal structure based on monomer,
tacticity, helicity, chiriality and unit cell information and analyze the crystallinity in polymer systems with given
atom trajectories. These features have allowed PolymerXtal to lead further investigations of semi-crystalline polymers
where the birthplace of the important physics in play and promote future research endeavors in the area of crystalline
polymer simulations.
Handles the primary functions
"""
import os, sys, os.path # , math # noqa: E401
# import shutil
import numpy as np
def calc_polymer(mass, nm, polymer_type, p_fra, custom=0):
p = polymer_type.split("-")[0]
wunits = {
"PS": 104.1,
"PE": 28.0516,
"PMMA": 100.117,
"PP": 42.0804,
"POM": 30.0262,
"PTFE": 100.0156,
"PVC": 62.4987,
}
if p == "custom":
wunit = custom
else:
wunit = wunits[p]
m_polymer = (mass * p_fra) / (1 - p_fra)
dp = int(m_polymer / wunit)
nc = dp / nm
print("Degree of polymerization: ", dp, "Chains: ", nc)
return nc
def parameters(periodic, cell, coord, boundaries, radius):
shift = {}
if periodic == "x":
lx = cell["x"][2]
ly = (boundaries["y"][0] + radius) - (boundaries["y"][1] - radius)
lz = (boundaries["z"][0] + radius) - (boundaries["z"][1] - radius)
shift["y"] = boundaries["y"][1] - radius
shift["z"] = boundaries["z"][1] - radius
dx = 1
dy = 0
dz = 0
# cx = -lx
# cy = coord["y"] - shift["y"]
# cz = coord["z"] - shift["z"]
lc = lx
length = [lx, 4 * radius, 4 * radius]
cylinder_p = [-100, 2 * radius, 2 * radius]
elif periodic == "y":
lx = (boundaries["x"][0] + radius) - (boundaries["x"][1] - radius)
ly = cell["y"][2]
lz = (boundaries["z"][0] + radius) - (boundaries["z"][1] - radius)
shift["x"] = boundaries["x"][1] - radius
shift["z"] = boundaries["z"][1] - radius
dx = 0
dy = 1
dz = 0
print(coord, shift)
# cx = coord["x"] - shift["x"]
# cy = -ly
# cz = coord["z"] - shift["z"]
lc = ly
length = [4 * radius, ly, 4 * radius]
cylinder_p = [2 * radius, -100, 2 * radius]
else:
lx = (boundaries["x"][0] + radius) - (boundaries["x"][1] - radius)
ly = (boundaries["y"][0] + radius) - (boundaries["y"][1] - radius)
lz = cell["z"][2]
shift["x"] = boundaries["x"][1] - radius
shift["y"] = boundaries["y"][1] - radius
dx = 0
dy = 0
dz = 1
# cx = coord["x"] - shift["x"]
# cy = coord["y"] - shift["y"]
# cz = -lz
lc = lz
length = [4 * radius, 4 * radius, lz]
cylinder_p = [2 * radius, 2 * radius, -100]
cylinder_d = [dx, dy, dz]
print("Polymod cell: ", length) # , 'Shift cell: ', shift)
return shift, length, cylinder_p, cylinder_d, lc
def run_data4lammps(charge, ffname, cell):
directory = "./data4lammps/"
path_r = os.path.join(directory, "main.py")
path_2 = os.path.join(directory, "doAtomTyping.py")
new_typing_command = "python3.6 {0} {1}".format(path_2, ffname)
data4lammps_command = "python {0} {1} {2} {3} {4} {5} {6} {7}".format(
path_r, 0, cell[0], 0, cell[1], 0, cell[2], charge
)
return new_typing_command, data4lammps_command
def read_dump(ifile, multiframe=0):
Dir = {}
src = open(ifile)
current = ""
timestep = 0
for line in src.readlines():
ln = line.split()
if ln[0] == "ITEM:":
if ln[1] == "TIMESTEP" or ln[1] == "BOX" or ln[1] == "ATOMS":
current = ln[1]
if ln[1] == "BOX":
Dir[timestep][current] = []
if ln[1] == "ATOMS":
Dir[timestep][current] = {}
Dir[timestep][current]["id"] = 0
dump = {}
for i, j in enumerate(ln[2:]):
dump[i] = j
if ln[1:] == "NUMBER OF ATOMS".split():
current = "NUMBER OF ATOMS"
continue
if current == "TIMESTEP":
timestep = eval(ln[0])
if timestep not in Dir:
Dir[timestep] = {}
if current == "NUMBER OF ATOMS":
Dir[timestep][current] = eval(ln[0])
if current == "BOX":
Dir[timestep][current] += [eval(i) for i in ln]
if current == "ATOMS":
cid = Dir[timestep][current]["id"]
Dir[timestep][current][cid] = {}
for i, j in enumerate(
[(eval(k) if k[0].isdigit or k[0] == "-" else k) for k in ln]
):
Dir[timestep][current][cid][dump[i]] = j
Dir[timestep][current]["id"] += 1
return Dir
# def dump2data(datafile, dumpfile, ofile):
# Dir = read_data(datafile)
# if "Velocities" not in Dir:
# Dir["Velocities"] = {}
# src = open(dumpfile)
# box = "x"
# for line in src.readlines():
# ln = line.split()
# if len(ln) == 2 and ln[0] != "ITEM:":
# Dir[box + "lo"] = eval(ln[0])
# Dir[box + "hi"] = eval(ln[1])
# if box == "x":
# box = "y"
# continue
# if box == "y":
# box = "z"
# continue
# continue
# if len(ln) == 8:
# Dir["Atoms"][eval(ln[0])][3] = eval(ln[2])
# Dir["Atoms"][eval(ln[0])][4] = eval(ln[3])
# Dir["Atoms"][eval(ln[0])][5] = eval(ln[4])
# Dir["Atoms"][eval(ln[0])][6] = 0
# Dir["Atoms"][eval(ln[0])][7] = 0
# Dir["Atoms"][eval(ln[0])][8] = 0
# if eval(ln[0]) not in Dir['Velocities']:
# Dir['Velocities'][eval(ln[0])]=[]
# Dir['Velocities'][eval(ln[0])].append(eval(ln[5]))
# Dir['Velocities'][eval(ln[0])].append(eval(ln[6]))
# Dir['Velocities'][eval(ln[0])].append(eval(ln[7]))
# Dir['Velocities'][eval(ln[0])][0]=eval(ln[5])
# Dir['Velocities'][eval(ln[0])][1]=eval(ln[6])
# Dir['Velocities'][eval(ln[0])][2]=eval(ln[7])
# if len(ln)==11:
# Dir['Atoms'][eval(ln[0])][3]=eval(ln[2])
# Dir['Atoms'][eval(ln[0])][4]=eval(ln[3])
# Dir['Atoms'][eval(ln[0])][5]=eval(ln[4])
# Dir['Atoms'][eval(ln[0])][6]=eval(ln[8])
# Dir['Atoms'][eval(ln[0])][7]=eval(ln[9])
# Dir['Atoms'][eval(ln[0])][8]=eval(ln[10])
# if eval(ln[0]) not in Dir['Velocities']:
# Dir['Velocities'][eval(ln[0])]=[]
# Dir['Velocities'][eval(ln[0])].append(eval(ln[5]))
# Dir['Velocities'][eval(ln[0])].append(eval(ln[6]))
# Dir['Velocities'][eval(ln[0])].append(eval(ln[7]))
# Dir['Velocities'][eval(ln[0])][0]=eval(ln[5])
# Dir['Velocities'][eval(ln[0])][1]=eval(ln[6])
# Dir['Velocities'][eval(ln[0])][2]=eval(ln[7])
# src.close()
# write_data(Dir, ofile)
def write_data(Dir, ofile, v=1, a=0):
des = open(ofile, "w")
des.write("LAMMPS data file via Tongtong\n")
des.write("\n")
if a:
ilist = ["atom"]
List = ["Masses", "Atoms", "Velocities"]
else:
ilist = ["atom", "bond", "angle", "dihedral", "improper"]
List = [
"Masses",
"Pair Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"Atoms",
"Velocities",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]
for i in ilist:
if (i + "s") in Dir:
des.write("%d %s\n" % (Dir[i + "s"], (i + "s")))
des.write("%d %s\n" % (Dir[i + " types"], (i + " types")))
des.write("\n")
for i in ["x", "y", "z"]:
des.write(
"%f %f %s %s\n" % (Dir[i + "lo"], Dir[i + "hi"], (i + "lo"), (i + "hi"))
)
des.write("\n")
if v == 0:
List.remove("Velocities")
for key in List:
if key in Dir and len(Dir[key]) > 0:
des.write(key + "\n")
des.write("\n")
for i in Dir[key]:
des.write(str(i) + " " + " ".join(str(j) for j in Dir[key][i]) + "\n")
des.write("\n")
des.close()
def input_coat4(na, cpos, idata, odata):
des = open("run_coat4.in", "w")
des.write("# General parameters\n")
des.write("units real\n")
des.write("atom_style full\n")
des.write("boundary p p p\n")
des.write("special_bonds lj/coul 0.0 0.0 1.0 dihedral yes\n")
des.write("dielectric 1.0\n")
des.write("pair_style lj/cut/coul/long 12.0\n")
des.write("bond_style harmonic\n")
des.write("angle_style harmonic\n")
des.write("dihedral_style harmonic\n")
des.write("improper_style harmonic\n")
des.write("kspace_style pppm 1.0e-6\n")
des.write("\n")
des.write("read_data %s\n" % idata)
des.write("\n")
des.write("thermo_style custom temp vol density pxx pyy pzz lx ly lz\n")
des.write("thermo 100\n")
des.write("thermo_modify flush yes line multi\n")
des.write("\n")
des.write("dump 1 all atom 500 coat2.dump\n")
des.write("\n")
des.write("label loopa\n")
des.write("variable i loop %d\n" % na)
des.write(" variable k equal $i\n")
des.write(" variable h equal x[$k]\n")
des.write(" variable p equal y[$k]\n")
des.write(' print "x= $h"\n')
des.write(' print "y= $p"\n')
des.write(" variable dx equal $h-%f\n" % cpos[0])
des.write(" variable dy equal $p-%f\n" % cpos[1])
des.write(" variable dr equal sqrt(${dx}*${dx}+${dy}*${dy})\n")
des.write(" variable nvx equal -${dx}/${dr}*0.01\n")
des.write(" variable nvy equal -${dy}/${dr}*0.01\n")
des.write(" set atom $k vx ${nvx} vy ${nvy} vz 0.0\n")
des.write("next i\n")
des.write("jump SELF loopa\n")
des.write("\n")
des.write("write_data %s\n" % odata)
des.write("\n")
des.close()
# with open('coat4.in', 'r') as file:
# filedata = file.read()
# Replace the target string
# filedata = filedata.replace('na', str(na))
# filedata = filedata.replace('ccx', str(cpos[0]))
# filedata = filedata.replace('ccy', str(cpos[1]))
# filedata = filedata.replace('idata', idata)
# filedata = filedata.replace('odata', odata)
# Write the file out again
# with open('run_coat4.in', 'w') as file:
# file.write(filedata)
def input_coat5(cpos, radius, oradius, idata, odata, tstep, X6paircoeffs, mctemp):
des = open("run_coat5.in", "w")
des.write("# General parameters\n")
des.write("units real\n")
des.write("atom_style full\n")
des.write("boundary p p p\n")
des.write("special_bonds lj/coul 0.0 0.0 1.0 dihedral yes\n")
des.write("dielectric 1.0\n")
des.write("#pair_style lj/cut/coul/long 12.0\n")
des.write("pair_style buck/coul/cut 10.0\n")
des.write("bond_style harmonic\n")
des.write("angle_style harmonic\n")
des.write("dihedral_style harmonic\n")
des.write("improper_style harmonic\n")
des.write("\n")
des.write("read_data %s\n" % idata)
des.write("\n")
src = open(X6paircoeffs)
for line in src.readlines():
des.write(line)
src.close()
des.write("\n")
des.write("thermo_style custom temp vol density pxx pyy pzz lx ly lz\n")
des.write("thermo 100\n")
des.write("thermo_modify flush yes line one\n")
des.write("\n")
des.write(
"region inner cylinder z %f %f %f INF INF units box side out\n"
% (cpos[0], cpos[1], radius - 2)
)
des.write(
"region outter cylinder z %f %f %f INF INF units box side in\n"
% (cpos[0], cpos[1], oradius + 3.5)
)
des.write("\n")
des.write("comm_style tiled\n")
des.write("fix LB all balance 1000 1.1 rcb\n")
des.write("\n")
des.write("fix 1 all wall/region inner lj126 5.0 3.5 12.0\n")
des.write("fix 8 all wall/region outter lj126 5.0 3.5 3.5\n")
des.write("\n")
des.write("fix 2 all nvt temp %f %f 100 \n" % (mctemp, mctemp))
des.write("\n")
des.write("reset_timestep %d\n" % tstep)
des.write("\n")
des.write("dump 1 all atom 500 coat5.*.dump\n")
des.write("\n")
des.write("run 5000\n")
des.write("write_data %s\n" % odata)
des.write("\n")
des.close()
# with open('coat5.in', 'r') as file:
# filedata = file.read()
# Replace the target string
# filedata = filedata.replace('ccx', str(cpos[0]))
# filedata = filedata.replace('ccy', str(cpos[1]))
# filedata = filedata.replace('radius', str(radius-2))
# filedata = filedata.replace('oadius', str(oradius+3.5))
# filedata = filedata.replace('idata', idata)
# filedata = filedata.replace('odata', odata)
# filedata = filedata.replace('tstep', str(tstep))
# Write the file out again
# with open('run_coat5.in', 'w') as file:
# file.write(filedata)
def input_coat6(na, idata, odata):
des = open("run_coat6.in", "w")
des.write("# General parameters\n")
des.write("units real\n")
des.write("atom_style full\n")
des.write("boundary p p p\n")
des.write("#special_bonds lj/coul 0.0 0.0 1.0 dihedral yes\n")
des.write("#dielectric 1.0\n")
des.write("#pair_style lj/cut/coul/long 12.0\n")
des.write("#bond_style harmonic\n")
des.write("#angle_style harmonic\n")
des.write("#improper_style harmonic\n")
des.write("#kspace_style pppm 1.0e-6\n")
des.write("\n")
des.write("read_data %s\n" % idata)
des.write("\n")
des.write("thermo_style custom temp vol density pxx pyy pzz lx ly lz\n")
des.write("thermo 100\n")
des.write("thermo_modify flush yes line multi\n")
des.write("\n")
des.write("dump 1 all atom 500 coat2.dump\n")
des.write("\n")
des.write("label loopa\n")
des.write("variable i loop %d\n" % na)
des.write(" variable k equal $i\n")
des.write(" variable h equal x[$k]\n")
des.write(" variable p equal y[$k]\n")
des.write(' print "x= $h"\n')
des.write(' print "y= $p"\n')
des.write(" variable dx equal $h \n")
des.write(" variable dy equal $p \n")
des.write(" variable dr equal sqrt(${dx}*${dx}+${dy}*${dy})\n")
des.write(" variable nvx equal -${dx}/${dr}*0.001\n")
des.write(" variable nvy equal -${dy}/${dr}*0.001\n")
des.write(" set atom $k vx ${nvx} vy ${nvy} vz 0.0\n")
des.write("next i\n")
des.write("jump SELF loopa\n")
des.write("\n")
des.write("write_data %s\n" % odata)
des.write("\n")
des.close()
# with open('coat6.in', 'r') as file:
# filedata = file.read()
# Replace the target string
# filedata = filedata.replace('na', str(na))
# filedata = filedata.replace('idata', idata)
# filedata = filedata.replace('odata', odata)
# Write the file out again
# with open('run_coat6.in', 'w') as file:
# file.write(filedata)
def input_coat7(oradius, idata, odata, tstep, polymer_type, HE_type, mctemp):
des = open("run_coat7.in", "w")
des.write("newton on\n")
des.write("boundary p p p\n")
des.write("units real\n")
des.write("box tilt large\n")
des.write("\n")
des.write("include ../potential_head.mod\n")
des.write("read_data %s\n" % idata)
des.write("\n")
des.write("group polymer type 1:%d\n" % polymer_type)
des.write("group HE type %d:%d\n" % (polymer_type + 1, HE_type + polymer_type))
des.write("\n")
des.write("include potential.mod\n")
des.write("\n")
des.write(" compute stress all stress/atom NULL\n")
des.write(" compute PEbond all pe/atom bond\n")
des.write(" compute PEangle all pe/atom angle\n")
des.write(
" compute PEdihed all pe/atom dihedral\n"
)
des.write(" compute PEimp all pe/atom improper\n")
des.write(" compute PEinter all pe/atom pair\n")
des.write("\n")
des.write(
"thermo_style custom step time etotal pe ke temp press pxx pyy pzz pxy pxz pyz density evdwl ecoul epair \
ebond eangle edihed eimp lx ly evdwl\n"
)
des.write("thermo 5\n")
des.write("\n")
des.write("#-------------------------------------------------------------\n")
des.write("# SIMULATION SETUP\n")
des.write("#-------------------------------------------------------------\n")
des.write("#\n")
des.write("\n")
des.write("comm_style tiled\n")
des.write("fix 2 all balance 1000 1.1 rcb\n")
des.write("\n")
des.write("run_style verlet\n")
des.write("\n")
des.write(
"region outter cylinder z 0.0 0.0 %f INF INF units box side in\n"
% (oradius + 3.5)
)
des.write("\n")
des.write(
"fix 6 HE rigid group 1 HE force * off off off torque * off off off\n"
)
des.write("fix 7 polymer nvt temp %f %f 50\n" % (mctemp, mctemp))
des.write("fix 8 all wall/region outter lj126 5.0 3.5 3.5\n")
des.write("dump 1 all atom 500 coat7.*.dump\n")
des.write("\n")
des.write("reset_timestep %d\n" % tstep)
des.write("\n")
des.write("run 5000\n")
des.write("unfix 6\n")
des.write("unfix 7\n")
des.write("\n")
des.write("write_data %s\n" % odata)
des.write("\n")
des.close()
# with open('coat7.in', 'r') as file:
# filedata = file.read()
# Replace the target string
# filedata = filedata.replace('oadius', str(oradius+3.5))
# filedata = filedata.replace('idata', idata)
# filedata = filedata.replace('odata', odata)
# filedata = filedata.replace('tstep', str(tstep))
# filedata = filedata.replace('polymer_type', ('1:%d' %polymer_type))
# filedata = filedata.replace('HE_type', ('%d:%d' %(polymer_type+1,HE_type+polymer_type)))
# Write the file out again
# with open('run_coat7.in', 'w') as file:
# file.write(filedata)
def Get_Mass_Radius(Dir, center, dimension=3, plane="xy"):
D = 0
if dimension == 3:
for a_id in Dir["Atoms"]:
d = 0
for i in range(3):
d += (Dir["Atoms"][a_id][3 + i] - center[i]) ** 2
if D < d:
D = d
else:
i_range = []
index = {"x": 0, "y": 1, "z": 2}
for xyz in ["x", "y", "z"]:
if xyz in plane:
i_range.append(index[xyz])
for a_id in Dir["Atoms"]:
d = 0
for i in i_range:
d += (Dir["Atoms"][a_id][3 + i] - center[i]) ** 2
if D < d:
D = d
return np.sqrt(D), center
def Get_Center_of_Mass(Dir_ori):
M = 0
Mx = 0
My = 0
Mz = 0
for a_id in Dir_ori["Atoms"]:
m = Dir_ori["Masses"][Dir_ori["Atoms"][a_id][1]][0]
M += m
Mx += m * Dir_ori["Atoms"][a_id][3]
My += m * Dir_ori["Atoms"][a_id][4]
Mz += m * Dir_ori["Atoms"][a_id][5]
return [Mx / M, My / M, Mz / M]
def data_Translation(Dir, vector, box=0):
for a_id in Dir["Atoms"]:
Dir["Atoms"][a_id][3] += vector[0]
Dir["Atoms"][a_id][4] += vector[1]
Dir["Atoms"][a_id][5] += vector[2]
if box:
for i, xyz in enumerate(["x", "y", "z"]):
for lh in ["lo", "hi"]:
Dir[xyz + lh] += vector[i]
return Dir
def add_data(Dir_1, Dir_2, add="append"):
Dir_data = {}
for i in ["atom", "bond", "angle", "dihedral", "improper"]:
if (i + "s") in Dir_1:
Dir_data[i + "s"] = Dir_1[i + "s"]
Dir_data[i + " types"] = Dir_1[i + " types"]
if (i + "s") in Dir_2:
if (i + "s") in Dir_data:
Dir_data[i + "s"] += Dir_2[i + "s"]
Dir_data[i + " types"] += Dir_2[i + " types"]
else:
Dir_data[i + "s"] = Dir_2[i + "s"]
Dir_data[i + " types"] = Dir_2[i + " types"]
if ("extra " + i + " per atom") in Dir_1:
Dir_data["extra " + i + " per atom"] = Dir_1["extra " + i + " per atom"]
if ("extra " + i + " per atom") in Dir_2:
if ("extra " + i + " per atom") in Dir_data:
Dir_data["extra " + i + " per atom"] = max(
Dir_1["extra " + i + " per atom"], Dir_2["extra " + i + " per atom"]
)
else:
Dir_data["extra " + i + " per atom"] = Dir_2["extra " + i + " per atom"]
for i in ["x", "y", "z"]:
if (i + "lo") in Dir_1:
Dir_data[i + "lo"] = Dir_1[i + "lo"]
Dir_data[i + "hi"] = Dir_1[i + "hi"]
if (i + "lo") in Dir_2:
if (i + "lo") not in Dir_data:
Dir_data[i + "lo"] = Dir_2[i + "lo"]
Dir_data[i + "hi"] = Dir_2[i + "hi"]
List = [
"Masses",
"Atoms",
"Velocities",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]
for key in List:
if key in Dir_1 and len(Dir_1[key]) > 0:
Dir_data[key] = Dir_1[key]
if key in Dir_2 and len(Dir_2[key]) > 0:
if key in Dir_data:
if key in [
"Masses",
"Atoms",
"Velocities",
"Bonds",
"Angles",
"Dihedrals",
"Impropers",
]:
if key == "Atoms":
a_ids = {}
inimol = max([Dir_data[key][i][0] for i in Dir_data[key]])
ini = max([i for i in Dir_data[key]])
for a_id in Dir_2[key]:
if key == "Atoms":
a_ids[a_id] = ini + a_id
Dir_data[key][ini + a_id] = Dir_2[key][a_id]
if key == "Atoms":
Dir_data[key][ini + a_id][0] = Dir_2[key][a_id][0] + inimol
Dir_data[key][ini + a_id][1] = (
Dir_2[key][a_id][1] + Dir_1["atom types"]
)
if key == "Bonds":
Dir_data[key][ini + a_id][0] = (
Dir_2[key][a_id][0] + Dir_1["bond types"]
)
if key == "Angles":
Dir_data[key][ini + a_id][0] = (
Dir_2[key][a_id][0] + Dir_1["angle types"]
)
if key == "Dihedrals":
Dir_data[key][ini + a_id][0] = (
Dir_2[key][a_id][0] + Dir_1["dihedral types"]
)
if key == "Impropers":
Dir_data[key][ini + a_id][0] = (
Dir_2[key][a_id][0] + Dir_1["improper types"]
)
if key in ["Bonds", "Angles", "Dihedrals", "Impropers"]:
for i in range(1, len(Dir_2[key][a_id])):
Dir_data[key][ini + a_id][i] = a_ids[
Dir_data[key][ini + a_id][i]
]
else:
Dir_data[key] = Dir_2[key]
return Dir_data
# def grabprocessors(ifile):
# src = open(ifile)
# for line in src.readlines():
# ln = line.split()
# if ln and ln[0] == "read_data":
# datafile = ln[1]
# break
# src.close()
# Dir = read_data(datafile)
# atoms = Dir["atoms"]
# if "bonds" in Dir:
# return atoms / 2000 + 1
# else:
# return atoms / 1000 + 1
def correctppn(ppn):
nodes = ppn / 20 + 1 if (ppn / 20 and ppn % 20) else ppn / 20 if ppn / 20 else 1
if not ppn / 20:
ppn = (
20
if ppn > 10
else 10
if ppn > 8
else 8
if ppn > 4
else 4
if ppn > 2
else 2
if ppn > 1
else 1
)
return nodes, 20 if ppn / 20 else ppn, nodes * 20 if ppn / 20 else ppn
def shiftpotential(ifile1, ifile2, ofile, Dir_polymer):
src = open(ifile1)
des = open(ofile, "w")
for line in src.readlines():
ln = line.split("#")[0].split()
if ln:
if ln[0] == "pair_coeff":
des.write(" ".join(ln[0:3]) + " buck " + " ".join(ln[3:]) + "\n")
src.close()
src = open(ifile2)
for line in src.readlines():
ln = line.split("#")[0].split()
if ln:
if ln[0] == "pair_coeff":
if ln[1] != "*":
ln[1] = str(eval(ln[1]) + Dir_polymer["atom types"])
if ln[2] != "*":
ln[2] = str(eval(ln[2]) + Dir_polymer["atom types"])
if ln[0] == "bond_coeff":
ln[1] = str(eval(ln[1]) + Dir_polymer["bond types"])
if ln[0] == "angle_coeff":
ln[1] = str(eval(ln[1]) + Dir_polymer["angle types"])
if ln[0] == "dihedral_coeff":
ln[1] = str(eval(ln[1]) + Dir_polymer["dihedral types"])
if ln[0] == "improper_coeff":
ln[1] = str(eval(ln[1]) + Dir_polymer["improper types"])
des.write(" ".join(ln) + "\n")
src.close()
for key in ["Bond", "Angle", "Dihedral", "Improper"]:
if (key + " Coeffs") in Dir_polymer:
for i in Dir_polymer[key + " Coeffs"]:
if key == "Bond":
des.write(
"bond_coeff %i harmonic %s\n"
% (
i,
" ".join([str(k) for k in Dir_polymer[key + " Coeffs"][i]]),
)
)
elif key == "Angle":
des.write(
"angle_coeff %i %s\n"
% (
i,
" ".join([str(k) for k in Dir_polymer[key + " Coeffs"][i]]),
)
)
elif key == "Dihedral":
des.write(
"dihedral_coeff %i %s\n"
% (
i,
" ".join([str(k) for k in Dir_polymer[key + " Coeffs"][i]]),
)
)
elif key == "Improper":
des.write(
"improper_coeff %i %s\n"
% (
i,
" ".join([str(k) for k in Dir_polymer[key + " Coeffs"][i]]),
)
)
des.close()
def addatomtype(ifile, ofile, elementlist):
src = open(ifile)
des = open(ofile, "w")
a = 1
for line in src.readlines():
ln = line.split()
if ln:
des.write(" ".join(ln) + "\n")
a += 1
src.close()
for e in elementlist:
if e == "C":
des.write("%d C_3\n" % a)
else:
des.write("%d %s\n" % (a, e))
a += 1
des.close()
# def writeghostdata(ifile):
# Dir = read_data(ifile)
# write_data(Dir, "ghost_grain.data", v=0, a=1)
def writepolymodfile(polymer_type_custom, ofile1):
mw = {}
des = open(ofile1, "w")
des.write("#\n")
des.write("# PolymerBuilder input file: order of inputs is critical\n")
des.write("#\n")
des.write("\n")
des.write("# Temperature (K)\n")
des.write("mctemp # Temperature (K)\n")
des.write("\n")
des.write("# Elements: atomic type indices for all monomers\n")
des.write(
"%d # Number of atom types\n"
% polymer_type_custom["atom_type"]
)
atom_type = {}
a_id = 0
for i in range(polymer_type_custom["nm"]):
for j in range(len(polymer_type_custom["monomer"][i + 1]["info"])):
if polymer_type_custom["monomer"][i + 1]["info"][j][0] not in atom_type:
a_id += 1
atom_type[a_id] = polymer_type_custom["monomer"][i + 1]["info"][j][0]
atom_type[polymer_type_custom["monomer"][i + 1]["info"][j][0]] = a_id
ofile = open("str2lammps/types/custom_atom_type.dat", "w")
for i in range(polymer_type_custom["atom_type"]):
des.write(
"%s # Type %d element\n"
% (atom_type[i + 1].split("_")[0], (i + 1))
)
ofile.write("%d %s\n" % ((i + 1), atom_type[i + 1]))
ofile.close()
des.write("\n")
des.write("# Number of monomers\n")
des.write("%d # Number of monomer types\n" % polymer_type_custom["nm"])
des.write("\n")
for i in range(polymer_type_custom["nm"]):
des.write("# Monomer: z-matrix\n")
des.write(
"%d # Number of atoms in z-matrix (lengths in A, angles in degrees)\n"
% len(polymer_type_custom["monomer"][i + 1]["info"])
)
for j in range(len(polymer_type_custom["monomer"][i + 1]["info"])):
des.write(
str(atom_type[polymer_type_custom["monomer"][i + 1]["info"][j][0]])
+ " "
+ " ".join(polymer_type_custom["monomer"][i + 1]["info"][j][1:])
+ "\n"
)
des.write(
"%d # Number of backbone atoms, z-matrix top\n"
% polymer_type_custom["monomer"][i + 1]["torsion"]["len"]
)
des.write("\n")
des.write("# Monomer: backbone torsion angle probabilities\n")
des.write(
"# Starting with backbone atom 3, specify whether the torsion should change, \n"
)
des.write("# and, if so, how.\n")
des.write("# Values for specification:\n")
des.write("# 0: no change\n")
des.write("# 1: uniform probability for all angles\n")
des.write("# 2: energy levels associated with specific angles\n")
des.write("# 3: probability associated with specific angles\n")
des.write("\n")
if "all" in polymer_type_custom["monomer"][i + 1]["torsion"]:
for j in range(polymer_type_custom["monomer"][i + 1]["torsion"]["len"] - 2):
tor_type = int(
polymer_type_custom["monomer"][i + 1]["torsion"]["all"][0]
)
des.write(
"%d # Torsion %d specification\n"
% (tor_type, (j + 3))
)
if tor_type == 2 or tor_type == 3:
src = open(
"../"
+ polymer_type_custom["monomer"][i + 1]["torsion"]["all"][1]
)
for line in src.readlines():
des.write(line)
src.close()
des.write("\n")
else:
for j in range(polymer_type_custom["monomer"][i + 1]["torsion"]["len"] - 2):
if str(j + 3) in polymer_type_custom["monomer"][i + 1]["torsion"]:
tor_type = int(
polymer_type_custom["monomer"][i + 1]["torsion"][str(j + 3)][0]
)
des.write(
"%d # Torsion %d specification\n"
% (tor_type, (j + 3))
)
if tor_type == 2 or tor_type == 3:
src = open(
"../"
+ polymer_type_custom["monomer"][i + 1]["torsion"][
str(j + 3)
][1]
)
for line in src.readlines():
des.write(line)
src.close()
des.write("\n")
else:
print(
"Backbone torsion angle %d probabilities type for monomer %d not specified, use default with \
no change"
% ((j + 3), (i + 1))
)
tor_type = 0
des.write(
"%d # Torsion %d specification: no change\n"
% (tor_type, (j + 3))
)
des.write("\n")
des.write(
"3.0 # Torsion delta: change in torsions is +/- this value\n"
)
des.write(
"10 # Number of torsion delta steps to minimize torsion energy\n"
)
des.write("\n")
des.write("# Backbone bond length between all monomers (A)\n")
des.write("1.53\n")
des.write("\n")
des.write("# Monomer arrangements\n")
des.write(
"%d # Number of monomer arrangements\n"
% polymer_type_custom["nc"]
)
for i in range(polymer_type_custom["nc"]):
mw[i] = 0
if polymer_type_custom["chain"][i + 1]["arrangement"]["type"]:
des.write(
"1 # Arrangement: 0 = pattern, 1 = probability\n"
)
des.write(
"%s # Probability of monomer(s)\n"
% (
" ".join(
polymer_type_custom["chain"][i + 1]["arrangement"]["sequence"]
)
)
)
length = len(polymer_type_custom["chain"][i + 1]["arrangement"]["sequence"])
for j in range(length):
mw[i] += polymer_type_custom["monomer"][j + 1]["mass"] * float(
polymer_type_custom["chain"][i + 1]["arrangement"]["sequence"][j]
)
else:
des.write(
"0 # Arrangement: 0 = pattern, 1 = probability\n"
)
des.write(
"%d # Number of monomers in first pattern\n"
% polymer_type_custom["chain"][i + 1]["arrangement"]["len"]
)
des.write(
"%s # Repeat...\n"
% (
" ".join(
polymer_type_custom["chain"][i + 1]["arrangement"]["sequence"]
)
)
)
length = polymer_type_custom["chain"][i + 1]["arrangement"]["len"]
for j in polymer_type_custom["chain"][i + 1]["arrangement"]["sequence"]:
mw[i] += polymer_type_custom["monomer"][int(j)]["mass"] / length
r_mw = 0
for i in range(polymer_type_custom["nc"]):
des.write("%f " % polymer_type_custom["chain"][i + 1]["probability"])
r_mw += mw[i] * polymer_type_custom["chain"][i + 1]["probability"]
des.write(" # Probabilities of each monomer arrangement\n")
des.write("\n")
des.write("# System\n")
des.write("nc # Number of chains to build\n")
des.write("nm # Number of monomers per chain\n")
des.write(
"lcx lcy lcz # Dimensions of cell (A); not used if density > 0.0\n"
)
des.write("0.0 # Density in g/cm^3\n")
des.write("\n")
des.write("# Excluded cylinders -- define volumes in which no polymer exists\n")
des.write("1 # Number of excluded cylinders\n")
des.write(
"ccx ccy ccz # Cylinder 1: start position x y z -- cylinder end center \n"
)
des.write(
"dx dy dz # Cylinder 1: direction from start position -- axis\n"
)
des.write("crad # Cylinder 1: radius, extension from axis\n")
des.write("clc # Cylinder 1: length\n")
des.write("\n")
des.write("1 # Included cylinders -- define volumes in which polymers exist\n")
des.write(
"ccx ccy ccz # Cylinder 1 : start position x y z -- cylinder end center\n"
)
des.write("dx dy dz # Cylinder 1 : direction from start position -- axis\n")
des.write("irad # Cylinder 1 : radius, extension from axis\n")
des.write("clc # Cylinder 1 : length\n")
des.write("\n")
des.write("# Excluded slabs -- define volumes in which no polymer exists\n")
des.write("0 # Number of excluded slabs\n")
des.write("\n")
des.write("# Configurations and interactions\n")
des.write("40 # Max number of configurations to test\n")
des.write(
"1.0 # Keep chains which are this fraction of desired length\n"
)
des.write("1 # 1: Self-avoiding\n")
des.write("1 # Self-avoiding cutoff (A)\n")
des.write("1 # 1: Long range interactions\n")
des.write("5 # Interaction cutoff (A)\n")
des.write("5.0 # Bin size (A)\n")
des.write("4 # Bond cutoff\n")
des.write("\n")
des.write("# Output\n")
des.write(
"1 # 1: Write (unwrapped) PDB file of constructed chains\n"
)
des.write(
"1 # 1: Write wrapped PDB file of constructed chains\n"
)
des.write("0 # 1: Write PDB files of monomer rotation\n")
des.write(
"0 # 1: Write output file of chain length vs. # monomers\n"
)
des.write("0 # 1: Write output file of chain length histogram\n")
des.write(
"0 # 1: Write torsion angle probabilities, selection histogram\n"
)
des.write("0 # 1: Write z-matrices of all chains\n")
des.write("0 # 1: Write final system energy\n")
des.write("\n")
des.write("#\n")
des.write("# Status and messages\n")
des.write("#\n")
des.write("1 # 1: Write messages to stdout; 0: Write to file\n")
des.write(
"# If previous line is 1, nothing more is needed; if it is 0, file name follows\n"
)
des.write("#/path/to/log.build\n")
des.write("1 # 1: Write status to stdout; 0: Write to file\n")
des.write(
"# XXX Only one of status.build or bar/Rappture flag should be uncommented!!\n"
)
des.write("# If writing status to file, file name follows\n")
des.write("#/path/to/status.build\n")
des.write(
"# ELSE if writing status to stdout, 1: terminal bar, 0: Rappture status lines\n"
)
des.write("1\n")
des.write("\n")
des.write("# RNG seed; use time if 0\n")
des.write("0 \n")
des.write("\n")
des.write(
"# Scale factor used to compare atom distances in monomer to equilibrium bond\n"
)
des.write(
"# distances when searching for bonds not specified in monomer z-matrix\n"
)
des.write("1.1\n")
des.write("\n")
des.close()
return r_mw
def write_minimize(ofile, X6file, afile):
des = open(ofile, "w")
des.write("# General parameters\n")
des.write("units real\n")
des.write("atom_style full\n")
des.write("boundary p p p\n")
des.write("special_bonds lj/coul 0.0 0.0 1.0 dihedral yes\n")
des.write("dielectric 1.0\n")
des.write("pair_style lj/cut 12.0\n")
des.write("bond_style harmonic\n")
des.write("angle_style harmonic\n")
des.write("dihedral_style harmonic\n")
des.write("improper_style harmonic\n")
des.write("read_data step0.data # polymer_relax.data\n")
des.write("neighbor 0.3 bin\n")
des.write(
"thermo_style custom step etotal ke temp pe ebond eangle edihed eimp evdwl ecoul elong press pxx pyy pzz \
pxy pxz pyz lx ly lz vol density\n"
)
des.write("thermo 10\n")
des.write("thermo_modify flush yes\n")
des.write("fix LB all balance 1000 1.05 shift xy 10 1.05\n")
des.write("# Minimization parameters\n")
des.write("min_style cg # hftn\n")
des.write("min_modify dmax 0.02\n")
des.write("min_modify line quadratic # backtrack\n")
des.write("neigh_modify every 1 delay 0\n")
LJ_params = {
"H": [3.195, 0.0152],
"C": [3.8983, 0.0951],
"N": [3.6621, 0.0774],
"O": [3.4046, 0.0957],
"F": [3.4720, 0.0725],
"S": [4.0300, 0.3440],
"Cl": [3.9503, 0.2833],
"Si": [4.27, 0.31],
}
d0_to_epsilon = 1.0
r0_to_sigma = 1.0 / (2.0 ** (1.0 / 6.0))
lj_D0 = {}
lj_R0 = {}
src = open(afile)
el_types = {}
for line in src.readlines():
ln = line.split()
if ln:
el_types[int(ln[0])] = ln[1].split("_")[0]
ntypes = len(el_types)
for i in range(ntypes):
lj_R0[i] = {}
lj_D0[i] = {}
param_i = LJ_params[el_types[i + 1]]
for j in range(i, ntypes):
param_j = LJ_params[el_types[j + 1]]
lj_R0[i][j] = np.sqrt(param_i[0] * param_j[0])
lj_D0[i][j] = np.sqrt(param_i[1] * param_j[1])
lammps_min_steps = 5000
lammps_min_levels = 3
lammps_min_init = 0.5
dp = (1.0 - lammps_min_init) / (lammps_min_levels - 1)
for level in range(lammps_min_levels):
des.write("# Minimization %d\n" % (i + 1))
for i in range(ntypes):
for j in range(i, ntypes):
des.write("pair_coeff %d %d " % (i + 1, j + 1))
d0 = lj_D0[i][j] * (lammps_min_init + dp * level)
r0 = lj_R0[i][j] * (lammps_min_init + dp * level)
des.write("%f %f\n" % (d0 * d0_to_epsilon, r0 * r0_to_sigma))
des.write("minimize 1.0e-9 1.0e-9 %d 100000\n" % lammps_min_steps)
des.write("# Dump minimized system\n")
des.write("dump 1 all atom 1 min.dump\n")
des.write("dump_modify 1 image yes scale no\n")
des.write("run 0\n")
des.write("undump 1\n")
des.write("# MD parameters\n")
des.write("neigh_modify every 1 delay 5\n")
des.write("pair_style buck/coul/long 12.0 12.0\n")
des.write("kspace_style pppm 1e-4\n")
src = open(X6file)
for line in src.readlines():
des.write(line)
des.write("minimize 1.0e-9 1.0e-9 %d 100000\n" % lammps_min_steps)
des.write("write_data step1.data\n")
des.write("\n")
# def main(args):
# args = complete_args(args)
# os.system("cp polymer_types/" + polymer_type + "/* .")
# Get number of chains, molecules
# p_fra = 0.05
# if "p_fra" in args:
# if len(args["p_fra"]) == 1:
# p_fra = eval(args["p_fra"][0])
# if p_fra < 0 or p_fra > 1:
# print("Please input a valid number between 0 to 1")
# return False
# else:
# print(
# "Molecular weight fraction of polymer coated: "
# + str(p_fra * 100)
# + "%"
# )
# else:
# print("Please specify molecular weight fraction of polymer coated")
# return False
# else:
# print("Default 5% molecular weight fraction of polymer coated")
# nm = 40
# if "nm" in args:
# if len(args["nm"]) == 1:
# nm = eval(args["nm"][0])
# if nm < 1:
# print("Please input a valid number equal or larger than 1")
# return False
# else:
# print(str(int(nm)) + " monomers per chain")
# else:
# print("Please specify number of monomers per chain")
# return False
# else:
# print("Default 40 monomers per chain")
# mctemp = 600
# if "mctemp" in args:
# if len(args["mctemp"]) == 1:
# mctemp = eval(args["mctemp"][0])
# if mctemp < 0:
# print("Please input a valid number equal or larger than 0")
# return False
# else:
# print("Monte Carlo temperature: " + str(mctemp) + " K")
# else:
# print("Please specify Monte Carlo temperature")
# return False
# else:
# print("Default Monte Carlo temperature: 600 K")
# np_flag = 0
# rnp_flag = 0
# if "parallel" in args:
# np_flag = 1
# print("LAMMPS will run in parallel mode")
# if len(args["parallel"]) > 0:
# if args["parallel"] == ["np"]:
# if len(args["parallel"]) > 1:
# np = int(eval(args["parallel"][1]))
# if np <= 0:
# print("Please input a valid number larger than 0")
# return False
# else:
# print("%d of processors will be in use" % np)
# else:
# print("Please input a number of processors you want to use")
# return False
# else:
# rnp_flag = 1
# print(
# "Will calculating recommended number of processors after initial polymer configuration generated"
# )
# else:
# rnp_flag = 1
# print(
# "Will calculating recommended number of processors after initial polymer configuration generated"
# )
# print("Running Lammps, get information about grain")
# Run Lammps, get information about grain
# Center of cylinder from upper and lower coordinates
# input_file(datafile)
# os.chdir('..')
# writeinlammps(datafile, potential_headfile, potentialfile)
# run_lammps()
# os.system('mv inr.lammps code')
# os.system('mv log.lammps code')
# os.system('mv tmp.out code')
# os.chdir('code')
# cell_sizes, delta_cell = read_cell_sizes("../" + datafile)
# periodic = min(delta_cell.items(), key=lambda x: x[1])[0]
# mass, com, boundaries, coord, radius = get_boundaries(periodic)
# dimension = 2
# plane = "xy"
# writeghostdata("../" + datafile)
# Dir_seed = read_data("ghost_grain.data")
# center = Get_Center_of_Mass(Dir_seed)
# D, center = Get_Mass_Radius(Dir_seed, center, dimension, plane)
# radius = math.ceil(D) + 5
# print("Grain Radius:", D) #'Coordinates cylinder: ', coord,
# GD = D
# nc = calc_polymer(mass, nm, polymer_type, p_fra, custom=mw)
# Get new cell for polymod
# Run polymod
# shift, cell_polymod, cpos, caxis, clength = parameters(
# periodic, cell_sizes, coord, boundaries, radius
# )
# input_polymod(nc, nm, cell_polymod, cpos, caxis, radius, clength, mctemp)
# p_flag = 1
# while p_flag:
# print("Running PolymerModeler, generate initial configuration of polymers")
# run_polymod()
# shutil.copy("atoms.dat", "./str2lammps/types")
# shutil.copy("bonds.dat", "./str2lammps/types")
# p = polymer_type.split("-")[0]
# shutil.copy(
# "./str2lammps/types/%s_atom_type.dat" % p,
# "./str2lammps/types/atom_type.dat",
# )
# shutil.copy("./str2lammps/types/%s_atom_type.dat" % p, "%s_atom_type.dat" % p)
# shutil.copy("bond_type.dat", "./str2lammps/types")
# addatomtype("%s_atom_type.dat" % p, "atom_type.dat", elementlist)
# Str2Lammps
# os.chdir("./str2lammps/")
# print os.getcwd()
# new_typing, data4lammps = run_data4lammps("Gasteiger", "Dreiding", cell_polymod)
# subprocess.Popen(new_typing, shell=True,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# print(data4lammps)
# print("Generating initial polymer datafile")
# os.system(data4lammps)
# return_code = subprocess.Popen(data4lammps, shell=True,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# stdout,stderr = return_code.communicate()
# os.chdir("../unwrap/")
# print("Unwrap polymers")
# os.system("../lmp_mpi < uw.in")
# return_code = subprocess.Popen('../lmp_mpi < uw.in', shell=True,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# stdout,stderr = return_code.communicate()
# dump2data("expanded.data", "unwrap.dump", "step0.data")
# os.chdir("..")
# os.system("cp unwrap/step0.data .")
# os.system("cp unwrap/unwrap.dump .")
# Dir_dump = read_dump("unwrap.dump")
# na = 0
# for t in Dir_dump:
# na = Dir_dump[t]["NUMBER OF ATOMS"]
# break
# write_minimize(
# "minimize.in",
# "str2lammps/X6paircoeffs.txt",
# "str2lammps/types/%s_atom_type.dat" % p,
# )
# if rnp_flag:
# print("Calculating recommended number of processors")
# ppn = grabprocessors("minimize.in")
# print(ppn)
# nodes, ppn, np = correctppn(ppn)
# print("%d of processors will be in use" % np)
# print("Running LAMMPS, minimize the intial configuration")
# if np_flag:
# os.system("mpiexec -np %d ./lmp_mpi < minimize.in" % np)
# else:
# os.system("./lmp_mpi < minimize.in")
# return_code = subprocess.Popen('./lmp_mpi < minimize.in', shell=True,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# stdout,stderr = return_code.communicate()
# os.system("cp log.lammps log.minimize")
# print("Runing 10 steps for coating")
# center = [cpos[0], cpos[1], 0]
# for i in range(10):
# print("Step %d" % (i + 1))
# input_coat4(
# na, cpos, "step%d.data" % (i * 3 + 1), "step%d.data" % (i * 3 + 2)
# )
# print("Running LAMMPS, assign velocities")
# os.system("./lmp_mpi < run_coat4.in")
# return_code = subprocess.Popen('./lmp_mpi < run_coat4.in', shell=True,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# stdout,stderr = return_code.communicate()
# os.system("cp log.lammps log.run_coat4_%d" % i)
# Dir_seed = read_data("step%d.data" % (i * 3 + 2))
# oradius, center = Get_Mass_Radius(Dir_seed, center, dimension, plane)
# print("Current outter radius:", oradius)
# Dir = read_data("step%d.data" % (i * 3 + 2))
# if "Pair Coeffs" in Dir:
# del Dir["Pair Coeffs"]
# write_data(Dir, "step%d.data" % (i * 3 + 3))
# input_coat5(
# cpos,
# radius - 5,
# oradius,
# "step%d.data" % (i * 3 + 3),
# "step%d.data" % (i * 3 + 4),
# i * 5000,
# "str2lammps/X6paircoeffs.txt",
# mctemp,
# )
# print("Running LAMMPS, coat polymers")
# if np_flag:
# os.system("mpiexec -np %d ./lmp_mpi < run_coat5.in" % np)
# else:
# os.system("./lmp_mpi < run_coat5.in")
# return_code = subprocess.Popen('./lmp_mpi < run_coat5.in', shell=True,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# stdout,stderr = return_code.communicate()
# os.system("cp log.lammps log.run_coat5_%d" % i)
# if not os.path.exists("step%d.data" % (i * 3 + 4)):
# break
# if os.path.exists("step31.data"):
# os.system("mv step31.data ../polymer.data")
# p_flag = 0
# else:
# print("Failed. Generating another configuration")
# print("Polymer datafile polymer.data is ready, now combining with grain datafile")
# Dir_polymer = read_data("../polymer.data")
# vector = [-cpos[0], -cpos[1], 0]
# Dir_polymer = data_Translation(Dir_polymer, vector, box=1)
# vector = [-coord["x"], -coord["y"], 0]
# Dir_RDX = data_Translation(Dir_RDX, vector)
# Dir_data = add_data(Dir_polymer, Dir_RDX)
# write_data(Dir_data, "../initial_polymer_grain.data", v=0)
# print("Initial combined datafile initial_polymer_grain.data is ready to use")
# print("Center Coordinates: x, ", 0, "y, ", 0)
# print("Calculating initial polymer thickness")
# Dir_seed = read_data("../initial_polymer_grain.data")
# center = [0, 0, 0]
# D, center = Get_Mass_Radius(Dir_seed, center, dimension, plane)
# PT = D - GD
# print("Initial polymer thickness:", PT)
# des=open('p_thickness.dat','w')
# des.write(str(PT))
# des.close()
# shutil.copy("atom_type.dat", "./str2lammps/types")
# os.chdir("./str2lammps/")
# new_typing, data4lammps = run_data4lammps("Gasteiger", "Dreiding", cell_polymod)
# print("Generating new potentialfile")
# os.system(data4lammps)
# os.chdir("..")
# shutil.copy("./str2lammps/X6paircoeffs.txt", "X6paircoeffs.txt")
# shiftpotential("X6paircoeffs.txt", "../potential.mod", "potential.mod", Dir_polymer)
# print("Runing 10 steps for coating with grain")
# center = [0, 0, 0]
# input_coat6(na, "../initial_polymer_grain.data", "step32.data")
# if rnp_flag:
# print("Calculating recommended number of processors")
# ppn = grabprocessors("run_coat6.in")
# print(ppn)
# nodes, ppn, np = correctppn(ppn)
# print("%d of processors will be in use" % np)
# for i in range(10):
# print("Step %d" % (i + 1))
# if i:
# input_coat6(na, "step%d.data" % (i * 3 + 31), "step%d.data" % (i * 3 + 32))
# print("Running LAMMPS, assign velocities")
# os.system("./lmp_mpi < run_coat6.in")
# return_code = subprocess.Popen('./lmp_mpi < run_coat4.in', shell=True,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# stdout,stderr = return_code.communicate()
# os.system("cp log.lammps log.run_coat6_%d" % i)
# Dir_seed = read_data("step%d.data" % (i * 3 + 32))
# oradius, center = Get_Mass_Radius(Dir_seed, center, dimension, plane)
# print("Current outter radius:", oradius)
# input_coat7(
# oradius,
# "step%d.data" % (i * 3 + 32),
# "step%d.data" % (i * 3 + 33),
# i * 5000,
# Dir_polymer["atom types"],
# len(elementlist),
# mctemp,
# )
# print("Running LAMMPS, coat polymers")
# if np_flag:
# os.system("mpiexec -np %d ./lmp_mpi < run_coat7.in" % np)
# else:
# os.system("./lmp_mpi < run_coat7.in")
# return_code = subprocess.Popen('./lmp_mpi < run_coat5.in', shell=True,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# stdout,stderr = return_code.communicate()
# os.system("cp log.lammps log.run_coat7_%d" % i)
# Dir = read_data("step%d.data" % (i * 3 + 33))
# if "Pair Coeffs" in Dir:
# del Dir["Pair Coeffs"]
# if "Bond Coeffs" in Dir:
# del Dir["Bond Coeffs"]
# if "Angle Coeffs" in Dir:
# del Dir["Angle Coeffs"]
# if "Dihedral Coeffs" in Dir:
# del Dir["Dihedral Coeffs"]
# if "Improper Coeffs" in Dir:
# del Dir["Improper Coeffs"]
# write_data(Dir, "step%d.data" % (i * 3 + 34))
# os.system("mv step61.data ../%s" % output)
# print("Datafile %s is ready to use" % output)
# print("Center Coordinates: x, ", 0, "y, ", 0)
# print("Calculating initial polymer thickness")
# Dir_seed = read_data("../%s" % output)
# center = [0, 0, 0]
# D, center = Get_Mass_Radius(Dir_seed, center, dimension, plane)
# PT = D - GD
# print("Polymer thickness:", PT)
# des = open("p_thickness.dat", "w")
# des.write(str(PT))
# des.close()
# return True
def zen(with_attribution=True):
quote = """Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"""
if with_attribution:
quote += "\n\t<NAME>"
return quote
def canvas(with_attribution=True):
"""
Placeholder function to show example docstring (NumPy format)
Replace this function and doc string for your own project
Parameters
----------
with_attribution : bool, Optional, default: True
Set whether or not to display who the quote is from
Returns
-------
quote : str
Compiled string including quote and optional attribution
"""
quote = "The code is but a canvas to our imagination."
if with_attribution:
quote += "\n\t- Adapted from <NAME>"
return quote
if __name__ == "__main__":
# Do something if this file is invoked on its own
print(canvas())
infile = sys.argv[1]
# args = polymerxtal.read_input(infile)
# main(arg)
|
[
"os.path.join",
"numpy.sqrt"
] |
[((3267, 3301), 'os.path.join', 'os.path.join', (['directory', '"""main.py"""'], {}), "(directory, 'main.py')\n", (3279, 3301), False, 'import os, sys, os.path\n'), ((3315, 3357), 'os.path.join', 'os.path.join', (['directory', '"""doAtomTyping.py"""'], {}), "(directory, 'doAtomTyping.py')\n", (3327, 3357), False, 'import os, sys, os.path\n'), ((19477, 19487), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (19484, 19487), True, 'import numpy as np\n'), ((42059, 42091), 'numpy.sqrt', 'np.sqrt', (['(param_i[0] * param_j[0])'], {}), '(param_i[0] * param_j[0])\n', (42066, 42091), True, 'import numpy as np\n'), ((42118, 42150), 'numpy.sqrt', 'np.sqrt', (['(param_i[1] * param_j[1])'], {}), '(param_i[1] * param_j[1])\n', (42125, 42150), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import tools
import tiles
import interp
import computational as cpt
import matplotlib.pyplot as plt
import gsw
from scipy import interpolate
from scipy import integrate
import os
# plt.ion()
time_flag = 'annual' # 'DJF' # 'annual'
typestat = 'zmean'
seasons = ['DJF', 'MAM', 'JJA', 'SON']
var_stats = ["W", "CT", "SA", "CT_STD", "BVF2_STD", "RSTAR", "CF"]
attributes = {"W": ("weight (dimensionless)", "1", [0., 10000.]),
"CT": ("conservative temperature", "degC", [-3., 50.]),
"SA": ("absolute salinity", "g kg-1", [0., 50.]),
"CT_STD": ("CT standard deviation", "degC", [0., 20.]),
"BVF2_STD": ("square of Brunt Vaisala Frequency standard deviation", "s-1", [0., 1e-2]),
"RSTAR": ("compensated density", "kg m-3", [1000., 1032.]),
"CF": ("compressibility factor (dimensionless)", "1", [0.99, 1.])}
global_attributes = {
"title": "World Ocean Climatology of mean temperature, salinity and compensated density"
}
tiles_dir = "%s/%g/stats" % (tiles.tiles_dir, tiles.reso)
var_dir = {v: tiles_dir+"/%s" % v for v in var_stats}
tiles_file = "%s/stats_%s.pkl" # % (var_dir[var], tile)
zref = tools.zref
threshold = 5e-2
def create_folders():
for d in [tiles_dir]+list(var_dir.values()):
if os.path.exists(d):
pass
else:
os.makedirs(d)
def compute_avg(j, i, lonr, latr, latdeg, LONr, LATr, resor, data):
# if True:
w = cpt.compute_weight(lonr[i], latr[j], LONr, LATr, resor)
clim = pd.DataFrame(0., columns=var_stats, index=zref)
profiles_to_use = (w > threshold)
tags = profiles_to_use.index
w = w[tags]
totalweight = w.sum()
print(" weight=%2.1f" % (totalweight), end="")
if totalweight < 2:
clim[:] = np.nan
else:
CT = data["CT"].loc[tags, :]
SA = data["SA"].loc[tags, :]
BVF2 = data["BVF2"].loc[tags, :]
bad = CT.isna() | (CT < -3) | (CT > 50) | (SA < 0) | (SA > 50)
nz = len(zref)
#weight = w[:, np.newaxis]*np.ones((nz,))
weight = CT.copy()
weight.iloc[:, :] = w[:, np.newaxis]
weight[bad] = 0.
CT[bad] = 0.
SA[bad] = 0.
W = np.sum(weight, axis=0)
clim.W[:] = W
z0 = np.sum(CT * weight, axis=0)
z2 = np.sum(CT*CT * weight, axis=0)
clim.CT[:] = z0/W
sigma = np.sqrt((z2-z0*z0/W)/(W-1))
clim.CT_STD[:] = sigma
z0 = np.sum(SA * weight, axis=0)
clim.SA[:] = z0/W
z0 = np.sum(BVF2 * weight, axis=0)
z2 = np.sum(BVF2*BVF2 * weight, axis=0)
sigma = np.sqrt((z2-z0*z0/W)/(W-1))
clim.BVF2_STD[:] = sigma
if True:
rhostar, compf = comp_rhostar(clim.SA, clim.CT, latdeg)
clim.RSTAR[:] = rhostar
clim.CF[:] = compf
clim[clim == 0.] = np.nan
return clim, tags
def comp_rhostar(Si, Ti, lat):
pi = gsw.p_from_z(-zref, lat)
cs = gsw.sound_speed(Si, Ti, pi)
Ri = gsw.rho(Si, Ti, pi)
g = gsw.grav(lat, pi[0])
E = np.zeros((len(zref),))
#plt.plot(Ri, -zref)
f = interpolate.interp1d(zref, cs)
def e(x): return -g/f(x)**2
if True:
for k, z in enumerate(zref):
if k == 0:
r, E[k] = 0., 1.
else:
#r1,p = integrate.quad(e,zref[k-1],z,epsrel=1e-1)
x = np.linspace(zref[k-1], z, 10)
dx = x[1]-x[0]
r1 = integrate.trapz(e(x), dx=dx)
r += r1
E[k] = np.exp(r)
return Ri*E, E
def get_grid_on_box(b):
reso = tiles.reso
lonmin = np.ceil(b["LONMIN"]/reso)*reso
lonmax = np.floor(b["LONMAX"]/reso)*reso
latmin = np.ceil(b["LATMIN"]/reso)*reso
latmax = np.floor(b["LATMAX"]/reso)*reso
latmin = max(latmin, -80) # TODO: replace with min(latglo)
latmax = min(latmax, 80) # TODO
lon = np.arange(lonmin, lonmax+reso, reso)
lat = np.arange(latmin, latmax+reso, reso)
return lat, lon
def compute_stats(bb, tile):
# read more than just one tile => to cope with the halo
tile_list, rect = tiles.tiles_with_halo(bb, tile)
argo = tiles.read_argo_tile(tile_list)
data = interp.read_profiles(tile_list)
#argo = tiles.extract_in_tile(argo, rect)
argo = argo[argo.STATUS == "D"]
for var in data.keys():
data[var] = data[var].loc[argo.index, :]
reso = tiles.reso
zref = tools.zref
CT = data['CT']
SA = data['SA']
# patch TO REMOVE LATER
CT.iloc[:, 1] = 0.5*(CT.iloc[:, 0]+CT.iloc[:, 2])
SA.iloc[:, 1] = 0.5*(SA.iloc[:, 0]+SA.iloc[:, 2])
LON = argo['LONGITUDE']
LAT = argo['LATITUDE']
lat, lon = get_grid_on_box(bb[tile])
LONr = np.deg2rad(LON)
LATr = np.deg2rad(LAT)
lonr = np.deg2rad(lon)
latr = np.deg2rad(lat)
resor = np.deg2rad(reso)
nlon, nlat, nz = len(lon), len(lat), len(zref)
CTbar = np.zeros((nlat, nlon, nz))
SAbar = np.zeros((nlat, nlon, nz))
CTstd = np.zeros((nlat, nlon, nz))
BVF2std = np.zeros((nlat, nlon, nz))
RHOSTAR = np.zeros((nlat, nlon, nz))
CF = np.zeros((nlat, nlon, nz))
W = np.zeros((nlat, nlon, nz))
monitor_file = "monitor_%s.txt" % tile
with open(monitor_file, "w") as fid:
fid.write("MEANSTATE / #profiles: %i / nlat x nlon: %i" % (len(argo), nlat*nlon))
#fig = plt.figure()
for j in range(nlat):
for i in range(nlon):
print("\r j=%2i/%i-%2i/%i" % (j, nlat, i, nlon), end="")
clim, tags = compute_avg(
j, i, lonr, latr, lat[j], LONr, LATr, resor, data)
CTbar[j, i, :] = clim["CT"]
SAbar[j, i, :] = clim["SA"]
CTstd[j, i, :] = clim["CT_STD"]
BVF2std[j, i, :] = clim["BVF2_STD"]
RHOSTAR[j, i, :] = clim["RSTAR"]
CF[j, i, :] = clim["CF"]
W[j, i, :] = clim["W"]
# fig.canvas.draw()
mapvar = {"CT": CTbar, "SA": SAbar, "CT_STD": CTstd,
"BVF2_STD": BVF2std,
"RSTAR": RHOSTAR, "CF": CF, "W": W}
print()
for var in var_stats:
v = mapvar[var]
d = var_dir[var]
f = tiles_file % (d, tile)
print("write %s" % f)
pd.to_pickle(v, f)
os.system("rm %s" % monitor_file)
def read(tile, var, transpose=True):
d = var_dir[var]
f = tiles_file % (d, tile)
print(f)
if os.path.exists(f):
data = pd.read_pickle(f)
if transpose:
data = np.transpose(data, (2, 0, 1))
else:
data = None
return data
|
[
"numpy.sum",
"numpy.floor",
"numpy.arange",
"numpy.exp",
"pandas.to_pickle",
"scipy.interpolate.interp1d",
"gsw.p_from_z",
"pandas.DataFrame",
"os.path.exists",
"numpy.transpose",
"tiles.tiles_with_halo",
"numpy.linspace",
"gsw.rho",
"computational.compute_weight",
"numpy.ceil",
"os.system",
"interp.read_profiles",
"gsw.grav",
"os.makedirs",
"numpy.deg2rad",
"numpy.zeros",
"tiles.read_argo_tile",
"gsw.sound_speed",
"pandas.read_pickle",
"numpy.sqrt"
] |
[((1512, 1567), 'computational.compute_weight', 'cpt.compute_weight', (['lonr[i]', 'latr[j]', 'LONr', 'LATr', 'resor'], {}), '(lonr[i], latr[j], LONr, LATr, resor)\n', (1530, 1567), True, 'import computational as cpt\n'), ((1579, 1627), 'pandas.DataFrame', 'pd.DataFrame', (['(0.0)'], {'columns': 'var_stats', 'index': 'zref'}), '(0.0, columns=var_stats, index=zref)\n', (1591, 1627), True, 'import pandas as pd\n'), ((2995, 3019), 'gsw.p_from_z', 'gsw.p_from_z', (['(-zref)', 'lat'], {}), '(-zref, lat)\n', (3007, 3019), False, 'import gsw\n'), ((3029, 3056), 'gsw.sound_speed', 'gsw.sound_speed', (['Si', 'Ti', 'pi'], {}), '(Si, Ti, pi)\n', (3044, 3056), False, 'import gsw\n'), ((3067, 3086), 'gsw.rho', 'gsw.rho', (['Si', 'Ti', 'pi'], {}), '(Si, Ti, pi)\n', (3074, 3086), False, 'import gsw\n'), ((3095, 3115), 'gsw.grav', 'gsw.grav', (['lat', 'pi[0]'], {}), '(lat, pi[0])\n', (3103, 3115), False, 'import gsw\n'), ((3180, 3210), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['zref', 'cs'], {}), '(zref, cs)\n', (3200, 3210), False, 'from scipy import interpolate\n'), ((3977, 4015), 'numpy.arange', 'np.arange', (['lonmin', '(lonmax + reso)', 'reso'], {}), '(lonmin, lonmax + reso, reso)\n', (3986, 4015), True, 'import numpy as np\n'), ((4024, 4062), 'numpy.arange', 'np.arange', (['latmin', '(latmax + reso)', 'reso'], {}), '(latmin, latmax + reso, reso)\n', (4033, 4062), True, 'import numpy as np\n'), ((4195, 4226), 'tiles.tiles_with_halo', 'tiles.tiles_with_halo', (['bb', 'tile'], {}), '(bb, tile)\n', (4216, 4226), False, 'import tiles\n'), ((4239, 4270), 'tiles.read_argo_tile', 'tiles.read_argo_tile', (['tile_list'], {}), '(tile_list)\n', (4259, 4270), False, 'import tiles\n'), ((4282, 4313), 'interp.read_profiles', 'interp.read_profiles', (['tile_list'], {}), '(tile_list)\n', (4302, 4313), False, 'import interp\n'), ((4807, 4822), 'numpy.deg2rad', 'np.deg2rad', (['LON'], {}), '(LON)\n', (4817, 4822), True, 'import numpy as np\n'), ((4834, 4849), 'numpy.deg2rad', 'np.deg2rad', (['LAT'], {}), '(LAT)\n', (4844, 4849), True, 'import numpy as np\n'), ((4861, 4876), 'numpy.deg2rad', 'np.deg2rad', (['lon'], {}), '(lon)\n', (4871, 4876), True, 'import numpy as np\n'), ((4888, 4903), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (4898, 4903), True, 'import numpy as np\n'), ((4916, 4932), 'numpy.deg2rad', 'np.deg2rad', (['reso'], {}), '(reso)\n', (4926, 4932), True, 'import numpy as np\n'), ((4998, 5024), 'numpy.zeros', 'np.zeros', (['(nlat, nlon, nz)'], {}), '((nlat, nlon, nz))\n', (5006, 5024), True, 'import numpy as np\n'), ((5037, 5063), 'numpy.zeros', 'np.zeros', (['(nlat, nlon, nz)'], {}), '((nlat, nlon, nz))\n', (5045, 5063), True, 'import numpy as np\n'), ((5076, 5102), 'numpy.zeros', 'np.zeros', (['(nlat, nlon, nz)'], {}), '((nlat, nlon, nz))\n', (5084, 5102), True, 'import numpy as np\n'), ((5117, 5143), 'numpy.zeros', 'np.zeros', (['(nlat, nlon, nz)'], {}), '((nlat, nlon, nz))\n', (5125, 5143), True, 'import numpy as np\n'), ((5158, 5184), 'numpy.zeros', 'np.zeros', (['(nlat, nlon, nz)'], {}), '((nlat, nlon, nz))\n', (5166, 5184), True, 'import numpy as np\n'), ((5194, 5220), 'numpy.zeros', 'np.zeros', (['(nlat, nlon, nz)'], {}), '((nlat, nlon, nz))\n', (5202, 5220), True, 'import numpy as np\n'), ((5229, 5255), 'numpy.zeros', 'np.zeros', (['(nlat, nlon, nz)'], {}), '((nlat, nlon, nz))\n', (5237, 5255), True, 'import numpy as np\n'), ((6343, 6376), 'os.system', 'os.system', (["('rm %s' % monitor_file)"], {}), "('rm %s' % monitor_file)\n", (6352, 6376), False, 'import os\n'), ((6488, 6505), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (6502, 6505), False, 'import os\n'), ((1339, 1356), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (1353, 1356), False, 'import os\n'), ((2265, 2287), 'numpy.sum', 'np.sum', (['weight'], {'axis': '(0)'}), '(weight, axis=0)\n', (2271, 2287), True, 'import numpy as np\n'), ((2323, 2350), 'numpy.sum', 'np.sum', (['(CT * weight)'], {'axis': '(0)'}), '(CT * weight, axis=0)\n', (2329, 2350), True, 'import numpy as np\n'), ((2364, 2396), 'numpy.sum', 'np.sum', (['(CT * CT * weight)'], {'axis': '(0)'}), '(CT * CT * weight, axis=0)\n', (2370, 2396), True, 'import numpy as np\n'), ((2438, 2475), 'numpy.sqrt', 'np.sqrt', (['((z2 - z0 * z0 / W) / (W - 1))'], {}), '((z2 - z0 * z0 / W) / (W - 1))\n', (2445, 2475), True, 'import numpy as np\n'), ((2511, 2538), 'numpy.sum', 'np.sum', (['(SA * weight)'], {'axis': '(0)'}), '(SA * weight, axis=0)\n', (2517, 2538), True, 'import numpy as np\n'), ((2579, 2608), 'numpy.sum', 'np.sum', (['(BVF2 * weight)'], {'axis': '(0)'}), '(BVF2 * weight, axis=0)\n', (2585, 2608), True, 'import numpy as np\n'), ((2622, 2658), 'numpy.sum', 'np.sum', (['(BVF2 * BVF2 * weight)'], {'axis': '(0)'}), '(BVF2 * BVF2 * weight, axis=0)\n', (2628, 2658), True, 'import numpy as np\n'), ((2673, 2710), 'numpy.sqrt', 'np.sqrt', (['((z2 - z0 * z0 / W) / (W - 1))'], {}), '((z2 - z0 * z0 / W) / (W - 1))\n', (2680, 2710), True, 'import numpy as np\n'), ((3701, 3728), 'numpy.ceil', 'np.ceil', (["(b['LONMIN'] / reso)"], {}), "(b['LONMIN'] / reso)\n", (3708, 3728), True, 'import numpy as np\n'), ((3745, 3773), 'numpy.floor', 'np.floor', (["(b['LONMAX'] / reso)"], {}), "(b['LONMAX'] / reso)\n", (3753, 3773), True, 'import numpy as np\n'), ((3790, 3817), 'numpy.ceil', 'np.ceil', (["(b['LATMIN'] / reso)"], {}), "(b['LATMIN'] / reso)\n", (3797, 3817), True, 'import numpy as np\n'), ((3834, 3862), 'numpy.floor', 'np.floor', (["(b['LATMAX'] / reso)"], {}), "(b['LATMAX'] / reso)\n", (3842, 3862), True, 'import numpy as np\n'), ((6319, 6337), 'pandas.to_pickle', 'pd.to_pickle', (['v', 'f'], {}), '(v, f)\n', (6331, 6337), True, 'import pandas as pd\n'), ((6522, 6539), 'pandas.read_pickle', 'pd.read_pickle', (['f'], {}), '(f)\n', (6536, 6539), True, 'import pandas as pd\n'), ((1401, 1415), 'os.makedirs', 'os.makedirs', (['d'], {}), '(d)\n', (1412, 1415), False, 'import os\n'), ((6581, 6610), 'numpy.transpose', 'np.transpose', (['data', '(2, 0, 1)'], {}), '(data, (2, 0, 1))\n', (6593, 6610), True, 'import numpy as np\n'), ((3453, 3484), 'numpy.linspace', 'np.linspace', (['zref[k - 1]', 'z', '(10)'], {}), '(zref[k - 1], z, 10)\n', (3464, 3484), True, 'import numpy as np\n'), ((3611, 3620), 'numpy.exp', 'np.exp', (['r'], {}), '(r)\n', (3617, 3620), True, 'import numpy as np\n')]
|
# Copyright (c) 2019 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import numpy as np
from phylanx import Phylanx
@Phylanx
def in_top_k(predictions, targets, k):
top_k = np.argsort(-predictions)[:, :k]
target = reshape(targets, [-1, 1]) # noqa
return np.any(target == top_k, -1)
|
[
"numpy.any",
"numpy.argsort"
] |
[((384, 411), 'numpy.any', 'np.any', (['(target == top_k)', '(-1)'], {}), '(target == top_k, -1)\n', (390, 411), True, 'import numpy as np\n'), ((294, 318), 'numpy.argsort', 'np.argsort', (['(-predictions)'], {}), '(-predictions)\n', (304, 318), True, 'import numpy as np\n')]
|
import numpy as np
import pylab as pl
from gls import sinefitm
from multiplot import dofig, doaxes
fac1 = 100
fac2 = 1
fac3 = 1
ls = ['-','--',':','-.']
mrk = ['.',',','+','x']
col = ['k','c','m','y']
def plotTS(time, y1, y2, y3 = None, figno = 1, discrete = True, \
savefile = None, period = None, xper = False):
'''Plot light and RV curve(s)'''
M, N = np.shape(y1)
if discrete == True:
m1 = np.copy(mrk)
else:
m1 = np.copy(ls)
if (xper == True) * (not(period is None)):
tt = time / period - 0.5
xr = [-0.5,0.5]
xttl = 'phase'
else:
tt = time
xr = np.nanmin(time), np.nanmax(time)
xttl = 'time (days)'
if y3 is None:
ny = 2
else:
ny = 3
ee = dofig(figno, 1, ny, aspect = 1)
ax1 = doaxes(ee, 1, ny, 0, 0)
for i in np.arange(M):
pl.plot(tt, y1[i,:] * fac1, m1[i], c = col[i])
pl.ylabel(r"$\Delta F$ (\%)")
ymin = np.nanmin(y1) * fac1
ymax = np.nanmax(y1) * fac1
yr = ymax - ymin
pl.ylim(ymin - 0.1 * yr, ymax + 0.1 * yr)
ax2 = doaxes(ee, 1, ny, 0, 1, sharex = ax1)
for i in np.arange(M):
pl.plot(tt, y2[i,:] * fac2, m1[i], c = col[i])
pl.ylabel(r"$\Delta V$ (m/s)")
ymin = np.nanmin(y2) * fac2
ymax = np.nanmax(y2) * fac2
yr = ymax - ymin
pl.ylim(ymin - 0.1 * yr, ymax + 0.1 * yr)
if not(y3 is None):
ax3 = doaxes(ee, 1, ny, 0, 2, sharex = ax1)
for i in np.arange(M):
pl.plot(tt, y3[i,:] * fac2, m1[i], c = col[i])
pl.ylabel(r"$V_{\rm{bis}}$ (m/s)")
ymin = np.nanmin(y3) * fac2
ymax = np.nanmax(y3) * fac2
yr = ymax - ymin
pl.ylim(ymin - 0.1 * yr, ymax + 0.1 * yr)
pl.xlabel(xttl)
pl.xlim(xr[0], xr[1])
if savefile: pl.savefig(savefile)
return
def plotPer(time, y1, y2, y3 = None, figno = 2, \
savefile = None, period = None, fmp = 8):
'''Plot light curve and RV amplitude spectra'''
M, N = np.shape(y1)
pmax = 2* (np.nanmax(time) - np.nanmin(time))
if period is None:
dt = np.median(time[1:]-time[:N-1])
pmin = dt * 2.
else:
pmin = period / fmp
nper = 1000
if period is None:
fac = 1.0
else:
fac = period
if y3 is None:
ny = 2
else:
ny = 3
y = np.zeros((M*ny, N))
y[:M,:] = y1
y[M:2*M,:] = y2
if not (y3 is None):
y[2*M:,:] = y3
res = sinefitm(time, y, fmin = 1./pmax, fmax = 1./pmin, \
nfreq = nper)
freq, amps, ampc = res[0], res[2], res[3]
pers = 1.0 / freq
amp = np.sqrt(amps**2 + ampc**2)
amp1 = amp[:M,:]
amp2 = amp[M:2*M,:]
if not (y3 is None):
amp3 = amp[2*M:,:]
ee = dofig(figno, 1, ny, aspect = 1)
ax1 = doaxes(ee, 1, ny, 0, 0)
pl.setp(ax1.get_xticklabels(), visible = False)
pl.ylabel(r"$A_F$ (\%)")
for i in np.arange(M):
pl.plot(fac / pers, amp1[i,:] * fac1, ls[i], c = col[i])
pl.ylim(0, 1.1 * np.nanmax(amp1) * fac1)
ax2 = doaxes(ee, 1, ny, 0, 1, sharex = ax1)
pl.ylabel(r"$A_V$ (m/s)")
for i in np.arange(M):
pl.plot(fac / pers, amp2[i,:] * fac2, ls[i], c = col[i])
pl.ylim(0, 1.1 * np.nanmax(amp2) * fac2)
if not(y3 is None):
ax3 = doaxes(ee, 1, ny, 0, 2, sharex = ax1)
pl.ylabel(r"$A_{\mathrm{bis}}$ (m/s)")
for i in np.arange(M):
pl.plot(fac / pers, amp3[i,:] * fac3, ls[i], c = col[i])
pl.ylim(0, 1.1 * np.nanmax(amp3) * fac3)
if period is None:
pl.xlabel(r"Frequency (cycles/day)")
else:
pl.xlabel(r"Frequency (cycles/$P_{\mathrm{rot}}^{-1}$)")
if savefile:
pl.savefig(savefile)
return
def plotTSPer(time, y1, y2, y3 = None, figno = [1,2], savefile = [None, None], \
discrete = False, period = None, xper = False, \
fmp = 8):
'''Plot both time series and amplitude spectra for light and RV'''
plotTS(time, y1, y2, y3 = y3, figno = figno[0], discrete = discrete, \
savefile = savefile[0], period = period, xper = xper)
plotPer(time, y1, y2, y3 = y3, figno = figno[1], savefile = savefile[1], \
period = period, fmp = fmp)
return
|
[
"gls.sinefitm",
"numpy.copy",
"numpy.median",
"pylab.ylabel",
"numpy.zeros",
"multiplot.doaxes",
"pylab.plot",
"numpy.nanmin",
"numpy.shape",
"pylab.savefig",
"numpy.arange",
"pylab.ylim",
"pylab.xlabel",
"pylab.xlim",
"multiplot.dofig",
"numpy.nanmax",
"numpy.sqrt"
] |
[((382, 394), 'numpy.shape', 'np.shape', (['y1'], {}), '(y1)\n', (390, 394), True, 'import numpy as np\n'), ((779, 808), 'multiplot.dofig', 'dofig', (['figno', '(1)', 'ny'], {'aspect': '(1)'}), '(figno, 1, ny, aspect=1)\n', (784, 808), False, 'from multiplot import dofig, doaxes\n'), ((821, 844), 'multiplot.doaxes', 'doaxes', (['ee', '(1)', 'ny', '(0)', '(0)'], {}), '(ee, 1, ny, 0, 0)\n', (827, 844), False, 'from multiplot import dofig, doaxes\n'), ((858, 870), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (867, 870), True, 'import numpy as np\n'), ((931, 961), 'pylab.ylabel', 'pl.ylabel', (['"""$\\\\Delta F$ (\\\\%)"""'], {}), "('$\\\\Delta F$ (\\\\%)')\n", (940, 961), True, 'import pylab as pl\n'), ((1050, 1091), 'pylab.ylim', 'pl.ylim', (['(ymin - 0.1 * yr)', '(ymax + 0.1 * yr)'], {}), '(ymin - 0.1 * yr, ymax + 0.1 * yr)\n', (1057, 1091), True, 'import pylab as pl\n'), ((1102, 1137), 'multiplot.doaxes', 'doaxes', (['ee', '(1)', 'ny', '(0)', '(1)'], {'sharex': 'ax1'}), '(ee, 1, ny, 0, 1, sharex=ax1)\n', (1108, 1137), False, 'from multiplot import dofig, doaxes\n'), ((1153, 1165), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (1162, 1165), True, 'import numpy as np\n'), ((1226, 1256), 'pylab.ylabel', 'pl.ylabel', (['"""$\\\\Delta V$ (m/s)"""'], {}), "('$\\\\Delta V$ (m/s)')\n", (1235, 1256), True, 'import pylab as pl\n'), ((1346, 1387), 'pylab.ylim', 'pl.ylim', (['(ymin - 0.1 * yr)', '(ymax + 0.1 * yr)'], {}), '(ymin - 0.1 * yr, ymax + 0.1 * yr)\n', (1353, 1387), True, 'import pylab as pl\n'), ((1748, 1763), 'pylab.xlabel', 'pl.xlabel', (['xttl'], {}), '(xttl)\n', (1757, 1763), True, 'import pylab as pl\n'), ((1768, 1789), 'pylab.xlim', 'pl.xlim', (['xr[0]', 'xr[1]'], {}), '(xr[0], xr[1])\n', (1775, 1789), True, 'import pylab as pl\n'), ((2007, 2019), 'numpy.shape', 'np.shape', (['y1'], {}), '(y1)\n', (2015, 2019), True, 'import numpy as np\n'), ((2353, 2374), 'numpy.zeros', 'np.zeros', (['(M * ny, N)'], {}), '((M * ny, N))\n', (2361, 2374), True, 'import numpy as np\n'), ((2468, 2531), 'gls.sinefitm', 'sinefitm', (['time', 'y'], {'fmin': '(1.0 / pmax)', 'fmax': '(1.0 / pmin)', 'nfreq': 'nper'}), '(time, y, fmin=1.0 / pmax, fmax=1.0 / pmin, nfreq=nper)\n', (2476, 2531), False, 'from gls import sinefitm\n'), ((2631, 2661), 'numpy.sqrt', 'np.sqrt', (['(amps ** 2 + ampc ** 2)'], {}), '(amps ** 2 + ampc ** 2)\n', (2638, 2661), True, 'import numpy as np\n'), ((2769, 2798), 'multiplot.dofig', 'dofig', (['figno', '(1)', 'ny'], {'aspect': '(1)'}), '(figno, 1, ny, aspect=1)\n', (2774, 2798), False, 'from multiplot import dofig, doaxes\n'), ((2811, 2834), 'multiplot.doaxes', 'doaxes', (['ee', '(1)', 'ny', '(0)', '(0)'], {}), '(ee, 1, ny, 0, 0)\n', (2817, 2834), False, 'from multiplot import dofig, doaxes\n'), ((2891, 2915), 'pylab.ylabel', 'pl.ylabel', (['"""$A_F$ (\\\\%)"""'], {}), "('$A_F$ (\\\\%)')\n", (2900, 2915), True, 'import pylab as pl\n'), ((2929, 2941), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (2938, 2941), True, 'import numpy as np\n'), ((3067, 3102), 'multiplot.doaxes', 'doaxes', (['ee', '(1)', 'ny', '(0)', '(1)'], {'sharex': 'ax1'}), '(ee, 1, ny, 0, 1, sharex=ax1)\n', (3073, 3102), False, 'from multiplot import dofig, doaxes\n'), ((3109, 3133), 'pylab.ylabel', 'pl.ylabel', (['"""$A_V$ (m/s)"""'], {}), "('$A_V$ (m/s)')\n", (3118, 3133), True, 'import pylab as pl\n'), ((3148, 3160), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (3157, 3160), True, 'import numpy as np\n'), ((433, 445), 'numpy.copy', 'np.copy', (['mrk'], {}), '(mrk)\n', (440, 445), True, 'import numpy as np\n'), ((469, 480), 'numpy.copy', 'np.copy', (['ls'], {}), '(ls)\n', (476, 480), True, 'import numpy as np\n'), ((880, 925), 'pylab.plot', 'pl.plot', (['tt', '(y1[i, :] * fac1)', 'm1[i]'], {'c': 'col[i]'}), '(tt, y1[i, :] * fac1, m1[i], c=col[i])\n', (887, 925), True, 'import pylab as pl\n'), ((972, 985), 'numpy.nanmin', 'np.nanmin', (['y1'], {}), '(y1)\n', (981, 985), True, 'import numpy as np\n'), ((1004, 1017), 'numpy.nanmax', 'np.nanmax', (['y1'], {}), '(y1)\n', (1013, 1017), True, 'import numpy as np\n'), ((1175, 1220), 'pylab.plot', 'pl.plot', (['tt', '(y2[i, :] * fac2)', 'm1[i]'], {'c': 'col[i]'}), '(tt, y2[i, :] * fac2, m1[i], c=col[i])\n', (1182, 1220), True, 'import pylab as pl\n'), ((1268, 1281), 'numpy.nanmin', 'np.nanmin', (['y2'], {}), '(y2)\n', (1277, 1281), True, 'import numpy as np\n'), ((1300, 1313), 'numpy.nanmax', 'np.nanmax', (['y2'], {}), '(y2)\n', (1309, 1313), True, 'import numpy as np\n'), ((1426, 1461), 'multiplot.doaxes', 'doaxes', (['ee', '(1)', 'ny', '(0)', '(2)'], {'sharex': 'ax1'}), '(ee, 1, ny, 0, 2, sharex=ax1)\n', (1432, 1461), False, 'from multiplot import dofig, doaxes\n'), ((1481, 1493), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (1490, 1493), True, 'import numpy as np\n'), ((1562, 1596), 'pylab.ylabel', 'pl.ylabel', (['"""$V_{\\\\rm{bis}}$ (m/s)"""'], {}), "('$V_{\\\\rm{bis}}$ (m/s)')\n", (1571, 1596), True, 'import pylab as pl\n'), ((1702, 1743), 'pylab.ylim', 'pl.ylim', (['(ymin - 0.1 * yr)', '(ymax + 0.1 * yr)'], {}), '(ymin - 0.1 * yr, ymax + 0.1 * yr)\n', (1709, 1743), True, 'import pylab as pl\n'), ((1807, 1827), 'pylab.savefig', 'pl.savefig', (['savefile'], {}), '(savefile)\n', (1817, 1827), True, 'import pylab as pl\n'), ((2106, 2140), 'numpy.median', 'np.median', (['(time[1:] - time[:N - 1])'], {}), '(time[1:] - time[:N - 1])\n', (2115, 2140), True, 'import numpy as np\n'), ((2951, 3006), 'pylab.plot', 'pl.plot', (['(fac / pers)', '(amp1[i, :] * fac1)', 'ls[i]'], {'c': 'col[i]'}), '(fac / pers, amp1[i, :] * fac1, ls[i], c=col[i])\n', (2958, 3006), True, 'import pylab as pl\n'), ((3170, 3225), 'pylab.plot', 'pl.plot', (['(fac / pers)', '(amp2[i, :] * fac2)', 'ls[i]'], {'c': 'col[i]'}), '(fac / pers, amp2[i, :] * fac2, ls[i], c=col[i])\n', (3177, 3225), True, 'import pylab as pl\n'), ((3314, 3349), 'multiplot.doaxes', 'doaxes', (['ee', '(1)', 'ny', '(0)', '(2)'], {'sharex': 'ax1'}), '(ee, 1, ny, 0, 2, sharex=ax1)\n', (3320, 3349), False, 'from multiplot import dofig, doaxes\n'), ((3360, 3398), 'pylab.ylabel', 'pl.ylabel', (['"""$A_{\\\\mathrm{bis}}$ (m/s)"""'], {}), "('$A_{\\\\mathrm{bis}}$ (m/s)')\n", (3369, 3398), True, 'import pylab as pl\n'), ((3416, 3428), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (3425, 3428), True, 'import numpy as np\n'), ((3583, 3618), 'pylab.xlabel', 'pl.xlabel', (['"""Frequency (cycles/day)"""'], {}), "('Frequency (cycles/day)')\n", (3592, 3618), True, 'import pylab as pl\n'), ((3638, 3694), 'pylab.xlabel', 'pl.xlabel', (['"""Frequency (cycles/$P_{\\\\mathrm{rot}}^{-1}$)"""'], {}), "('Frequency (cycles/$P_{\\\\mathrm{rot}}^{-1}$)')\n", (3647, 3694), True, 'import pylab as pl\n'), ((3720, 3740), 'pylab.savefig', 'pl.savefig', (['savefile'], {}), '(savefile)\n', (3730, 3740), True, 'import pylab as pl\n'), ((649, 664), 'numpy.nanmin', 'np.nanmin', (['time'], {}), '(time)\n', (658, 664), True, 'import numpy as np\n'), ((666, 681), 'numpy.nanmax', 'np.nanmax', (['time'], {}), '(time)\n', (675, 681), True, 'import numpy as np\n'), ((1507, 1552), 'pylab.plot', 'pl.plot', (['tt', '(y3[i, :] * fac2)', 'm1[i]'], {'c': 'col[i]'}), '(tt, y3[i, :] * fac2, m1[i], c=col[i])\n', (1514, 1552), True, 'import pylab as pl\n'), ((1612, 1625), 'numpy.nanmin', 'np.nanmin', (['y3'], {}), '(y3)\n', (1621, 1625), True, 'import numpy as np\n'), ((1648, 1661), 'numpy.nanmax', 'np.nanmax', (['y3'], {}), '(y3)\n', (1657, 1661), True, 'import numpy as np\n'), ((2035, 2050), 'numpy.nanmax', 'np.nanmax', (['time'], {}), '(time)\n', (2044, 2050), True, 'import numpy as np\n'), ((2053, 2068), 'numpy.nanmin', 'np.nanmin', (['time'], {}), '(time)\n', (2062, 2068), True, 'import numpy as np\n'), ((3442, 3497), 'pylab.plot', 'pl.plot', (['(fac / pers)', '(amp3[i, :] * fac3)', 'ls[i]'], {'c': 'col[i]'}), '(fac / pers, amp3[i, :] * fac3, ls[i], c=col[i])\n', (3449, 3497), True, 'import pylab as pl\n'), ((3029, 3044), 'numpy.nanmax', 'np.nanmax', (['amp1'], {}), '(amp1)\n', (3038, 3044), True, 'import numpy as np\n'), ((3248, 3263), 'numpy.nanmax', 'np.nanmax', (['amp2'], {}), '(amp2)\n', (3257, 3263), True, 'import numpy as np\n'), ((3524, 3539), 'numpy.nanmax', 'np.nanmax', (['amp3'], {}), '(amp3)\n', (3533, 3539), True, 'import numpy as np\n')]
|
import numpy as np
import imutils
import time
import cv2
video = cv2.VideoCapture(0)
video.set(cv2.CAP_PROP_BUFFERSIZE, 2)
while True:
ret, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
all_chans = []
for chan in frame[:, :]:
_, binary = cv2.threshold(chan, 70, 255, cv2.THRESH_BINARY)
# binary = cv2.adaptiveThreshold(chan, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 1.9)
all_chans.append(binary)
all_chans = np.array(all_chans)
"Mean color value and post conversion, not in mean!"
chan_mean = all_chans.mean(axis=2).astype("uint8")
kernel = np.ones((3, 3))
kernel = kernel / kernel.sum()
dil = cv2.dilate(chan_mean, kernel=kernel, iterations=1)
ero = cv2.erode(chan_mean, kernel=kernel, iterations=2)
# cv2.imshow("Frame", frame)
# cv2.imshow("all_chans", all_chans)
cv2.imshow("chan_mean", chan_mean)
# cv2.imshow("dil", dil)
# cv2.imshow("ero", ero)
key = cv2.waitKey(100)
if key == ord("q"):
break
elif key == 32:
cv2.imwrite("capture.png", frame)
print("Frame captured")
elif key > 0:
print("Pressed:", key)
|
[
"cv2.dilate",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.threshold",
"cv2.imwrite",
"numpy.ones",
"cv2.VideoCapture",
"numpy.array",
"cv2.erode",
"cv2.imshow"
] |
[((66, 85), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (82, 85), False, 'import cv2\n'), ((178, 217), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (190, 217), False, 'import cv2\n'), ((498, 517), 'numpy.array', 'np.array', (['all_chans'], {}), '(all_chans)\n', (506, 517), True, 'import numpy as np\n'), ((645, 660), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (652, 660), True, 'import numpy as np\n'), ((706, 756), 'cv2.dilate', 'cv2.dilate', (['chan_mean'], {'kernel': 'kernel', 'iterations': '(1)'}), '(chan_mean, kernel=kernel, iterations=1)\n', (716, 756), False, 'import cv2\n'), ((767, 816), 'cv2.erode', 'cv2.erode', (['chan_mean'], {'kernel': 'kernel', 'iterations': '(2)'}), '(chan_mean, kernel=kernel, iterations=2)\n', (776, 816), False, 'import cv2\n'), ((896, 930), 'cv2.imshow', 'cv2.imshow', (['"""chan_mean"""', 'chan_mean'], {}), "('chan_mean', chan_mean)\n", (906, 930), False, 'import cv2\n'), ((1000, 1016), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (1011, 1016), False, 'import cv2\n'), ((287, 334), 'cv2.threshold', 'cv2.threshold', (['chan', '(70)', '(255)', 'cv2.THRESH_BINARY'], {}), '(chan, 70, 255, cv2.THRESH_BINARY)\n', (300, 334), False, 'import cv2\n'), ((1083, 1116), 'cv2.imwrite', 'cv2.imwrite', (['"""capture.png"""', 'frame'], {}), "('capture.png', frame)\n", (1094, 1116), False, 'import cv2\n')]
|
import os
import sys
import csv
import cv2
import math
import time
import numbers
import numpy as np
from multiprocessing import Process
# local imported codes
import parameters as parm
from object_tracking_util import Camera, scalar_to_rgb, setup_system_objects, \
single_cam_detector, multi_cam_detector
class SingleCameraDetector(Process):
"""
Process for single camera detection
"""
def __init__(self, index, queue, FPS):
super().__init__()
self.queue = queue
self.index = index
self.realtime = isinstance(self.index, numbers.Number)
self.fps = FPS
self.frame_h = None
self.frame_w = None
self.scale_factor = None
self.aspect_ratio = None
self.cap = None
self.fgbg = None
self.detector = None
self.video_ends_indicator = 0
self.frame_count = 0
self.frame = None
self.good_tracks = None
self.origin = np.array([0, 0])
self.tracks = []
self.next_id = 0
def run(self):
self.cap = cv2.VideoCapture(self.index)
self.frame_w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.frame_h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.scale_factor = math.sqrt(self.frame_w ** 2 + self.frame_h ** 2) / math.sqrt(848 ** 2 + 480 ** 2)
self.aspect_ratio = self.frame_w / self.frame_h
downsample = False
if self.frame_w * self.frame_h > 1920 * 1080:
downsample = True
self.frame_w = 1920
self.frame_h = int(1920 / aspect_ratio)
self.scale_factor = math.sqrt(self.frame_w ** 2 + self.frame_h ** 2) / math.sqrt(848 ** 2 + 480 ** 2)
self.fgbg, self.detector = setup_system_objects(self.scale_factor)
# check if video capturing is successful
ret, self.frame = self.cap.read()
if ret:
if self.realtime:
print(f"Video Capture {self.index}: PASS")
else:
print(f"File Read \"{self.index}\": PASS")
else:
if self.realtime:
print(f"Video Capture {self.index}: FAIL")
else:
print(f"File Read \"{self.index}\": FAIL")
self.cap.release()
while self.cap.isOpened():
ret, self.frame = self.cap.read()
if ret:
self.frame = cv2.resize(self.frame, (self.frame_w, self.frame_h))
self.good_tracks, self.tracks, self.next_id, self.frame = single_cam_detector(
self.tracks, self.next_id, self.index, self.fgbg, self.detector, self.fps,
self.frame_w, self.frame_h, self.scale_factor, self.origin, self.frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
self.video_ends_indicator = 1
break
self.queue.put((self.good_tracks, self.frame_count, self.frame))
self.frame_count += 1
if self.video_ends_indicator == 1:
break
self.cap.release()
cv2.destroyAllWindows()
class MultiCameraDetector(Process):
"""
Process for multi camera detection
"""
def __init__(self, filenames, queue, FPS):
super().__init__()
self.filenames = filenames
self.queue = queue
self.realtime = isinstance(self.filenames[0], numbers.Number)
self.cameras = []
self.fps = FPS
self.video_ends_indicator = 0
self.frame_count = 0
self.good_tracks = None
self.start_timer = None
self.end_timer = None
def run(self):
for filename in self.filenames:
camera = Camera(filename, self.fps)
ret, self.frame = camera.cap.read()
if ret:
self.cameras.append(camera)
if self.realtime:
print(f"Video Capture {filename}: PASS")
else:
print(f"File Read \"{filename}\": PASS")
else:
if self.realtime:
print(f"Video Capture {filename}: FAIL")
else:
print(f"File Read \"{filename}\": FAIL")
camera.cap.release()
while True:
self.start_timer = time.time()
sendList = []
for index, camera in enumerate(self.cameras):
ret, frame = camera.cap.read()
if ret:
frame = cv2.resize(frame, (camera.frame_w, camera.frame_h))
self.good_tracks, frame = multi_cam_detector(camera, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.video_ends_indicator = 1
break
else:
self.video_ends_indicator = 1
break
sendList.append((self.good_tracks, frame, camera.dead_tracks))
# sendList: [(good_tracks_0, frame_0, dead_tracks_0), (good_tracks_1, frame_1, dead_tracks_1), frame_count]
sendList.append((self.frame_count))
self.queue.put(sendList)
self.frame_count += 1
if self.video_ends_indicator == 1:
break
self.end_timer = time.time()
print(f"Detection process took: {self.end_timer - self.start_timer}")
cv2.destroyAllWindows()
for index, camera in enumerate(self.cameras):
camera.cap.release()
with open(f"data_out_{index}.csv", 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
for row in camera.output_log:
writer.writerow(row)
|
[
"object_tracking_util.setup_system_objects",
"object_tracking_util.Camera",
"math.sqrt",
"csv.writer",
"cv2.waitKey",
"time.time",
"cv2.VideoCapture",
"numpy.array",
"object_tracking_util.multi_cam_detector",
"object_tracking_util.single_cam_detector",
"cv2.destroyAllWindows",
"cv2.resize"
] |
[((995, 1011), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1003, 1011), True, 'import numpy as np\n'), ((1101, 1129), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.index'], {}), '(self.index)\n', (1117, 1129), False, 'import cv2\n'), ((1785, 1824), 'object_tracking_util.setup_system_objects', 'setup_system_objects', (['self.scale_factor'], {}), '(self.scale_factor)\n', (1805, 1824), False, 'from object_tracking_util import Camera, scalar_to_rgb, setup_system_objects, single_cam_detector, multi_cam_detector\n'), ((3163, 3186), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3184, 3186), False, 'import cv2\n'), ((5472, 5495), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5493, 5495), False, 'import cv2\n'), ((1293, 1341), 'math.sqrt', 'math.sqrt', (['(self.frame_w ** 2 + self.frame_h ** 2)'], {}), '(self.frame_w ** 2 + self.frame_h ** 2)\n', (1302, 1341), False, 'import math\n'), ((1344, 1374), 'math.sqrt', 'math.sqrt', (['(848 ** 2 + 480 ** 2)'], {}), '(848 ** 2 + 480 ** 2)\n', (1353, 1374), False, 'import math\n'), ((3777, 3803), 'object_tracking_util.Camera', 'Camera', (['filename', 'self.fps'], {}), '(filename, self.fps)\n', (3783, 3803), False, 'from object_tracking_util import Camera, scalar_to_rgb, setup_system_objects, single_cam_detector, multi_cam_detector\n'), ((4379, 4390), 'time.time', 'time.time', ([], {}), '()\n', (4388, 4390), False, 'import time\n'), ((5369, 5380), 'time.time', 'time.time', ([], {}), '()\n', (5378, 5380), False, 'import time\n'), ((1667, 1715), 'math.sqrt', 'math.sqrt', (['(self.frame_w ** 2 + self.frame_h ** 2)'], {}), '(self.frame_w ** 2 + self.frame_h ** 2)\n', (1676, 1715), False, 'import math\n'), ((1718, 1748), 'math.sqrt', 'math.sqrt', (['(848 ** 2 + 480 ** 2)'], {}), '(848 ** 2 + 480 ** 2)\n', (1727, 1748), False, 'import math\n'), ((2441, 2493), 'cv2.resize', 'cv2.resize', (['self.frame', '(self.frame_w, self.frame_h)'], {}), '(self.frame, (self.frame_w, self.frame_h))\n', (2451, 2493), False, 'import cv2\n'), ((2569, 2745), 'object_tracking_util.single_cam_detector', 'single_cam_detector', (['self.tracks', 'self.next_id', 'self.index', 'self.fgbg', 'self.detector', 'self.fps', 'self.frame_w', 'self.frame_h', 'self.scale_factor', 'self.origin', 'self.frame'], {}), '(self.tracks, self.next_id, self.index, self.fgbg, self.\n detector, self.fps, self.frame_w, self.frame_h, self.scale_factor, self\n .origin, self.frame)\n', (2588, 2745), False, 'from object_tracking_util import Camera, scalar_to_rgb, setup_system_objects, single_cam_detector, multi_cam_detector\n'), ((5685, 5704), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (5695, 5704), False, 'import csv\n'), ((4575, 4626), 'cv2.resize', 'cv2.resize', (['frame', '(camera.frame_w, camera.frame_h)'], {}), '(frame, (camera.frame_w, camera.frame_h))\n', (4585, 4626), False, 'import cv2\n'), ((4673, 4706), 'object_tracking_util.multi_cam_detector', 'multi_cam_detector', (['camera', 'frame'], {}), '(camera, frame)\n', (4691, 4706), False, 'from object_tracking_util import Camera, scalar_to_rgb, setup_system_objects, single_cam_detector, multi_cam_detector\n'), ((2797, 2811), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2808, 2811), False, 'import cv2\n'), ((4731, 4745), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4742, 4745), False, 'import cv2\n')]
|
import re
from functools import reduce
from itertools import chain
from typing import Union, Dict, List
import pandas as pd
import numpy as np
from .common import *
DataFrameType = Union[pd.DataFrame, Dict[str, pd.DataFrame], List[Dict[str, pd.DataFrame]]]
# Serialization helper functions
# -------------------------------
def _serializer(series) -> pd.DataFrame:
df = pd.DataFrame(series.get('values', []), columns=series['columns'])
if 'time' not in df.columns:
return df
df: pd.DataFrame = df.set_index(pd.to_datetime(df['time'])).drop('time', axis=1)
df.index = df.index.tz_localize('UTC')
df.index.name = None
if 'tags' in series:
for k, v in series['tags'].items():
df[k] = v
if 'name' in series:
df.name = series['name']
return df
def _get_name(series):
tags = [f'{k}={v}' for k, v in series.get('tags', {}).items()]
return ','.join(filter(None, [series.get('name'), *tags])) or None
def _drop_zero_index(df):
if isinstance(df.index, pd.DatetimeIndex):
if all(i.value == 0 for i in df.index):
return df.reset_index(drop=True)
return df
def parse(resp) -> DataFrameType:
"""Makes a dictionary of DataFrames from a response object"""
statements = []
for statement in resp['results']:
series = {}
for s in statement.get('series', []):
series[_get_name(s)] = _drop_zero_index(_serializer(s))
statements.append(series)
if len(statements) == 1:
series: dict = statements[0]
if len(series) == 1:
return list(series.values())[0] # DataFrame
else:
return series # dict
return statements # list
# Parsing helper functions
# -------------------------
def _itertuples(df):
"""Custom implementation of ``DataFrame.itertuples`` that
returns plain tuples instead of namedtuples. About 50% faster.
"""
cols = [df.iloc[:, k] for k in range(len(df.columns))]
return zip(df.index, *cols)
def _replace(df):
obj_cols = {k for k, v in dict(df.dtypes).items() if v is np.dtype('O')}
other_cols = set(df.columns) - obj_cols
obj_nans = (f'{k}="nan"' for k in obj_cols)
other_nans = (f'{k}=nani?' for k in other_cols)
replacements = [
('|'.join(chain(obj_nans, other_nans)), ''),
(',{2,}', ','),
('|'.join([', ,', ', ', ' ,']), ' '),
]
return replacements
def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes:
"""Converts a Pandas DataFrame into line protocol format"""
# Pre-processing
if measurement is None:
raise ValueError("Missing 'measurement'")
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError('DataFrame index is not DatetimeIndex')
tag_columns = set(tag_columns or [])
isnull = df.isnull().any(axis=1)
# Make parser function
tags = []
fields = []
for k, v in extra_tags.items():
tags.append(f"{k}={escape(v, key_escape)}")
for i, (k, v) in enumerate(df.dtypes.items()):
k = k.translate(key_escape)
if k in tag_columns:
tags.append(f"{k}={{p[{i+1}]}}")
elif issubclass(v.type, np.integer):
fields.append(f"{k}={{p[{i+1}]}}i")
elif issubclass(v.type, (np.float, np.bool_, np.floating)):
fields.append(f"{k}={{p[{i+1}]}}")
else:
# String escaping is skipped for performance reasons
# Strings containing double-quotes can cause strange write errors
# and should be sanitized by the user.
# e.g., df[k] = df[k].astype('str').str.translate(str_escape)
fields.append(f"{k}=\"{{p[{i+1}]}}\"")
fmt = (f'{measurement}', f'{"," if tags else ""}', ','.join(tags),
' ', ','.join(fields), ' {p[0].value}')
f = eval("lambda p: f'{}'".format(''.join(fmt)))
# Map/concat
if isnull.any():
lp = map(f, _itertuples(df[~isnull]))
rep = _replace(df)
lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p))
for p in _itertuples(df[isnull]))
return '\n'.join(chain(lp, lp_nan)).encode('utf-8')
else:
return '\n'.join(map(f, _itertuples(df))).encode('utf-8')
|
[
"numpy.dtype",
"pandas.to_datetime",
"itertools.chain",
"re.sub"
] |
[((533, 559), 'pandas.to_datetime', 'pd.to_datetime', (["df['time']"], {}), "(df['time'])\n", (547, 559), True, 'import pandas as pd\n'), ((2106, 2119), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (2114, 2119), True, 'import numpy as np\n'), ((2304, 2331), 'itertools.chain', 'chain', (['obj_nans', 'other_nans'], {}), '(obj_nans, other_nans)\n', (2309, 2331), False, 'from itertools import chain\n'), ((4044, 4057), 're.sub', 're.sub', (['*b', 'a'], {}), '(*b, a)\n', (4050, 4057), False, 'import re\n'), ((4147, 4164), 'itertools.chain', 'chain', (['lp', 'lp_nan'], {}), '(lp, lp_nan)\n', (4152, 4164), False, 'from itertools import chain\n')]
|
import cv2
import numpy as np
from plantcv.plantcv import gaussian_blur
def test_gaussian_blur(test_data):
"""Test for PlantCV."""
# Read in test data
img = cv2.imread(test_data.small_rgb_img)
gaussian_img = gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
assert np.average(img) != np.average(gaussian_img)
def test_gaussian_blur_grayscale(test_data):
"""Test for PlantCV."""
# Read in test data
gray_img = cv2.imread(test_data.small_gray_img, -1)
gaussian_img = gaussian_blur(img=gray_img, ksize=(51, 51), sigma_x=0, sigma_y=None)
assert np.average(gray_img) != np.average(gaussian_img)
|
[
"cv2.imread",
"numpy.average",
"plantcv.plantcv.gaussian_blur"
] |
[((171, 206), 'cv2.imread', 'cv2.imread', (['test_data.small_rgb_img'], {}), '(test_data.small_rgb_img)\n', (181, 206), False, 'import cv2\n'), ((226, 289), 'plantcv.plantcv.gaussian_blur', 'gaussian_blur', ([], {'img': 'img', 'ksize': '(51, 51)', 'sigma_x': '(0)', 'sigma_y': 'None'}), '(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)\n', (239, 289), False, 'from plantcv.plantcv import gaussian_blur\n'), ((459, 499), 'cv2.imread', 'cv2.imread', (['test_data.small_gray_img', '(-1)'], {}), '(test_data.small_gray_img, -1)\n', (469, 499), False, 'import cv2\n'), ((519, 587), 'plantcv.plantcv.gaussian_blur', 'gaussian_blur', ([], {'img': 'gray_img', 'ksize': '(51, 51)', 'sigma_x': '(0)', 'sigma_y': 'None'}), '(img=gray_img, ksize=(51, 51), sigma_x=0, sigma_y=None)\n', (532, 587), False, 'from plantcv.plantcv import gaussian_blur\n'), ((301, 316), 'numpy.average', 'np.average', (['img'], {}), '(img)\n', (311, 316), True, 'import numpy as np\n'), ((320, 344), 'numpy.average', 'np.average', (['gaussian_img'], {}), '(gaussian_img)\n', (330, 344), True, 'import numpy as np\n'), ((599, 619), 'numpy.average', 'np.average', (['gray_img'], {}), '(gray_img)\n', (609, 619), True, 'import numpy as np\n'), ((623, 647), 'numpy.average', 'np.average', (['gaussian_img'], {}), '(gaussian_img)\n', (633, 647), True, 'import numpy as np\n')]
|
from __future__ import print_function, division
import numpy as np
import sys
scalarTypes = (complex, float, int, np.number)
if sys.version_info < (3,):
scalarTypes += (long, )
def isScalar(f):
if isinstance(f, scalarTypes):
return True
elif isinstance(f, np.ndarray) and f.size == 1 and isinstance(f[0], scalarTypes):
return True
return False
def asArray_N_x_Dim(pts, dim):
if type(pts) == list:
pts = np.array(pts)
assert isinstance(pts, np.ndarray), "pts must be a numpy array"
if dim > 1:
pts = np.atleast_2d(pts)
elif len(pts.shape) == 1:
pts = pts[:, np.newaxis]
assert pts.shape[1] == dim, "pts must be a column vector of shape (nPts, {0:d}) not ({1:d}, {2:d})".format(*((dim,)+pts.shape))
return pts
def requires(modules):
"""Decorator to wrap functions with soft dependencies.
This function was inspired by the `requires` function of pysal,
which is released under the 'BSD 3-Clause "New" or "Revised" License'.
https://github.com/pysal/pysal/blob/master/pysal/lib/common.py
Parameters
----------
modules : dict
Dictionary containing soft dependencies, e.g.,
{'matplotlib': matplotlib}.
Returns
-------
decorated_function : function
Original function if all soft dependencies are met, otherwise
it returns an empty function which prints why it is not running.
"""
# Check the required modules, add missing ones in the list `missing`.
missing = []
for key, item in modules.items():
if item is False:
missing.append(key)
def decorated_function(function):
"""Wrap function."""
if not missing:
return function
else:
def passer(*args, **kwargs):
print(('Missing dependencies: {d}.'.format(d=missing)))
print(('Not running `{}`.'.format(function.__name__)))
return passer
return decorated_function
|
[
"numpy.array",
"numpy.atleast_2d"
] |
[((460, 473), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (468, 473), True, 'import numpy as np\n'), ((585, 603), 'numpy.atleast_2d', 'np.atleast_2d', (['pts'], {}), '(pts)\n', (598, 603), True, 'import numpy as np\n')]
|
import copy
import warnings
import numpy as np
import pandas as pd
import colorcet
import bokeh.models
import bokeh.plotting
from . import utils
def strip(
data=None,
q=None,
cats=None,
q_axis="x",
palette=None,
order=None,
p=None,
show_legend=False,
color_column=None,
parcoord_column=None,
tooltips=None,
marker="circle",
jitter=False,
marker_kwargs=None,
jitter_kwargs=None,
parcoord_kwargs=None,
horizontal=None,
val=None,
**kwargs,
):
"""
Make a strip plot.
Parameters
----------
data : Pandas DataFrame, 1D Numpy array, or xarray
DataFrame containing tidy data for plotting. If a Numpy array,
a single category is assumed and a strip plot generated from
data.
q : hashable
Name of column to use as quantitative variable if `data` is a
Pandas DataFrame. Otherwise, `q` is used as the quantitative
axis label.
cats : hashable or list of hashables
Name of column(s) to use as categorical variable(s).
q_axis : str, either 'x' or 'y', default 'x'
Axis along which the quantitative value varies.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
order : list or None
If not None, must be a list of unique group names when the input
data frame is grouped by `cats`. The order of the list specifies
the ordering of the categorical variables on the categorical
axis and legend. If None, the categories appear in the order in
which they appeared in the inputted data frame.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
show_legend : bool, default False
If True, display legend.
color_column : hashable, default None
Column of `data` to use in determining color of glyphs. If None,
then `cats` is used.
parcoord_column : hashable, default None
Column of `data` to use to construct a parallel coordinate plot.
Data points with like entries in the parcoord_column are
connected with lines.
tooltips : list of 2-tuples
Specification for tooltips as per Bokeh specifications. For
example, if we want `col1` and `col2` tooltips, we can use
`tooltips=[('label 1': '@col1'), ('label 2': '@col2')]`.
marker : str, default 'circle'
Name of marker to be used in the plot (ignored if `formal` is
False). Must be one of['asterisk', 'circle', 'circle_cross',
'circle_x', 'cross', 'dash', 'diamond', 'diamond_cross', 'hex',
'inverted_triangle', 'square', 'square_cross', 'square_x',
'triangle', 'x']
jitter : bool, default False
If True, apply a jitter transform to the glyphs.
marker_kwargs : dict
Keyword arguments to pass when adding markers to the plot.
["x", "y", "source", "cat", "legend"] are note allowed because
they are determined by other inputs.
jitter_kwargs : dict
Keyword arguments to be passed to `bokeh.transform.jitter()`. If
not specified, default is
`{'distribution': 'normal', 'width': 0.1}`. If the user
specifies `{'distribution': 'uniform'}`, the `'width'` entry is
adjusted to 0.4.
horizontal : bool or None, default None
Deprecated. Use `q_axis`.
val : hashable
Deprecated, use `q`.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()` when
instantiating the figure.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with a strip plot.
"""
# Protect against mutability of dicts
jitter_kwargs = copy.copy(jitter_kwargs)
marker_kwargs = copy.copy(marker_kwargs)
q = utils._parse_deprecations(q, q_axis, val, horizontal, "x")
if palette is None:
palette = colorcet.b_glasbey_category10
data, q, cats, show_legend = utils._data_cats(data, q, cats, show_legend, None)
cats, cols = utils._check_cat_input(
data, cats, q, color_column, parcoord_column, tooltips, palette, order, kwargs
)
grouped = data.groupby(cats, sort=False)
if p is None:
p, factors, color_factors = _cat_figure(
data, grouped, q, order, color_column, q_axis, kwargs
)
else:
if type(p.x_range) == bokeh.models.ranges.FactorRange and q_axis == "x":
raise RuntimeError("`q_axis` is 'x', but `p` has a categorical x-axis.")
elif type(p.y_range) == bokeh.models.ranges.FactorRange and q_axis == "y":
raise RuntimeError("`q_axis` is 'y', but `p` has a categorical y-axis.")
_, factors, color_factors = _get_cat_range(
data, grouped, order, color_column, q_axis
)
if tooltips is not None:
p.add_tools(bokeh.models.HoverTool(tooltips=tooltips))
if jitter_kwargs is None:
jitter_kwargs = dict(width=0.1, mean=0, distribution="normal")
elif type(jitter_kwargs) != dict:
raise RuntimeError("`jitter_kwargs` must be a dict.")
elif "width" not in jitter_kwargs:
if (
"distribution" not in jitter_kwargs
or jitter_kwargs["distribution"] == "uniform"
):
jitter_kwargs["width"] = 0.4
else:
jitter_kwargs["width"] = 0.1
if marker_kwargs is None:
marker_kwargs = {}
elif type(marker_kwargs) != dict:
raise RuntimeError("`marker_kwargs` must be a dict.")
if "color" not in marker_kwargs:
if color_column is None:
color_column = "cat"
marker_kwargs["color"] = bokeh.transform.factor_cmap(
color_column, palette=palette, factors=color_factors
)
if marker == "tick":
marker = "dash"
marker_fun = utils._get_marker(p, marker)
if marker == "dash":
if "angle" not in marker_kwargs and q_axis == "x":
marker_kwargs["angle"] = np.pi / 2
if "size" not in marker_kwargs:
if q_axis == "x":
marker_kwargs["size"] = p.plot_height * 0.25 / len(grouped)
else:
marker_kwargs["size"] = p.plot_width * 0.25 / len(grouped)
source = _cat_source(data, cats, cols, color_column)
if show_legend and "legend_field" not in marker_kwargs:
marker_kwargs["legend_field"] = "__label"
if q_axis == "x":
x = q
if jitter:
jitter_kwargs["range"] = p.y_range
y = bokeh.transform.jitter("cat", **jitter_kwargs)
else:
y = "cat"
p.ygrid.grid_line_color = None
else:
y = q
if jitter:
jitter_kwargs["range"] = p.x_range
x = bokeh.transform.jitter("cat", **jitter_kwargs)
else:
x = "cat"
p.xgrid.grid_line_color = None
if parcoord_column is not None:
source_pc = _parcoord_source(data, q, cats, q_axis, parcoord_column, factors)
if parcoord_kwargs is None:
line_color = "gray"
parcoord_kwargs = {}
elif type(parcoord_kwargs) != dict:
raise RuntimeError("`parcoord_kwargs` must be a dict.")
if "color" in parcoord_kwargs and "line_color" not in parcoord_kwargs:
line_color = parcoord_kwargs.pop("color")
else:
line_color = parcoord_kwargs.pop("line_color", "gray")
p.multi_line(
source=source_pc, xs="xs", ys="ys", line_color=line_color, **parcoord_kwargs
)
marker_fun(source=source, x=x, y=y, **marker_kwargs)
return p
def box(
data=None,
q=None,
cats=None,
q_axis="x",
palette=None,
order=None,
p=None,
whisker_caps=False,
display_points=True,
outlier_marker="circle",
min_data=5,
box_kwargs=None,
median_kwargs=None,
whisker_kwargs=None,
outlier_kwargs=None,
display_outliers=None,
horizontal=None,
val=None,
**kwargs,
):
"""
Make a box-and-whisker plot.
Parameters
----------
data : Pandas DataFrame, 1D Numpy array, or xarray
DataFrame containing tidy data for plotting. If a Numpy array,
a single category is assumed and a box plot with a single box is
generated from data.
q : hashable
Name of column to use as quantitative variable if `data` is a
Pandas DataFrame. Otherwise, `q` is used as the quantitative
axis label.
cats : hashable or list of hashables
Name of column(s) to use as categorical variable(s).
q_axis : str, either 'x' or 'y', default 'x'
Axis along which the quantitative value varies.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
order : list or None
If not None, must be a list of unique group names when the input
data frame is grouped by `cats`. The order of the list specifies
the ordering of the categorical variables on the categorical
axis and legend. If None, the categories appear in the order in
which they appeared in the inputted data frame.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
whisker_caps : bool, default False
If True, put caps on whiskers. If False, omit caps.
display_points : bool, default True
If True, display outliers and any other points that arise from
categories with fewer than `min_data` data points; otherwise
suppress them. This should only be False when using the boxes
as annotation on another plot.
outlier_marker : str, default 'circle'
Name of marker to be used in the plot (ignored if `formal` is
False). Must be one of['asterisk', 'circle', 'circle_cross',
'circle_x', 'cross', 'dash', 'diamond', 'diamond_cross', 'hex',
'inverted_triangle', 'square', 'square_cross', 'square_x',
'triangle', 'x']
min_data : int, default 5
Minimum number of data points in a given category in order to
make a box and whisker. Otherwise, individual data points are
plotted as in a strip plot.
box_kwargs : dict, default None
A dictionary of kwargs to be passed into `p.hbar()` or
`p.vbar()` when constructing the boxes for the box plot.
median_kwargs : dict, default None
A dictionary of kwargs to be passed into `p.hbar()` or
`p.vbar()` when constructing the median line for the box plot.
whisker_kwargs : dict, default None
A dictionary of kwargs to be passed into `p.segment()`
when constructing the whiskers for the box plot.
outlier_kwargs : dict, default None
A dictionary of kwargs to be passed into `p.circle()`
when constructing the outliers for the box plot.
horizontal : bool or None, default None
Deprecated. Use `q_axis`.
val : hashable
Deprecated, use `q`.
kwargs
Kwargs that are passed to bokeh.plotting.figure() in contructing
the figure.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with box-and-whisker plot.
Notes
-----
Uses the Tukey convention for box plots. The top and bottom of
the box are respectively the 75th and 25th percentiles of the
data. The line in the middle of the box is the median. The top
whisker extends to the maximum of the set of data points that are
less than 1.5 times the IQR beyond the top of the box, with an
analogous definition for the lower whisker. Data points not
between the ends of the whiskers are considered outliers and are
plotted as individual points.
"""
# Protect against mutability of dicts
box_kwargs = copy.copy(box_kwargs)
median_kwargs = copy.copy(median_kwargs)
whisker_kwargs = copy.copy(whisker_kwargs)
outlier_kwargs = copy.copy(outlier_kwargs)
q = utils._parse_deprecations(q, q_axis, val, horizontal, "x")
if display_outliers is not None:
warnings.warn(
f"`display_outliers` is deprecated. Use `display_points`. Using `display_points={display_outliers}.",
DeprecationWarning,
)
display_points = display_outliers
if palette is None:
palette = colorcet.b_glasbey_category10
data, q, cats, _ = utils._data_cats(data, q, cats, False, None)
cats, cols = utils._check_cat_input(
data, cats, q, None, None, None, palette, order, box_kwargs
)
if outlier_kwargs is None:
outlier_kwargs = dict()
elif type(outlier_kwargs) != dict:
raise RuntimeError("`outlier_kwargs` must be a dict.")
if box_kwargs is None:
box_kwargs = {"line_color": None}
box_width = 0.4
elif type(box_kwargs) != dict:
raise RuntimeError("`box_kwargs` must be a dict.")
else:
box_width = box_kwargs.pop("width", 0.4)
if "line_color" not in box_kwargs:
box_kwargs["line_color"] = None
if whisker_kwargs is None:
if "fill_color" in box_kwargs:
whisker_kwargs = {"line_color": box_kwargs["fill_color"]}
else:
whisker_kwargs = {"line_color": "black"}
elif type(whisker_kwargs) != dict:
raise RuntimeError("`whisker_kwargs` must be a dict.")
if median_kwargs is None:
median_kwargs = {"line_color": "white"}
elif type(median_kwargs) != dict:
raise RuntimeError("`median_kwargs` must be a dict.")
elif "line_color" not in median_kwargs:
median_kwargs["line_color"] = white
if q_axis == "x":
if "height" in box_kwargs:
warnings.warn("'height' entry in `box_kwargs` ignored; using `box_width`.")
del box_kwargs["height"]
else:
if "width" in box_kwargs:
warnings.warn("'width' entry in `box_kwargs` ignored; using `box_width`.")
del box_kwargs["width"]
grouped = data.groupby(cats, sort=False)
if p is None:
p, factors, color_factors = _cat_figure(
data, grouped, q, order, None, q_axis, kwargs
)
else:
_, factors, color_factors = _get_cat_range(data, grouped, order, None, q_axis)
marker_fun = utils._get_marker(p, outlier_marker)
source_box, source_outliers = _box_source(data, cats, q, cols, min_data)
if "color" in outlier_kwargs:
if "line_color" in outlier_kwargs or "fill_color" in outlier_kwargs:
raise RuntimeError(
"If `color` is in `outlier_kwargs`, `line_color` and `fill_color` cannot be."
)
else:
if "fill_color" in box_kwargs:
if "fill_color" not in outlier_kwargs:
outlier_kwargs["fill_color"] = box_kwargs["fill_color"]
if "line_color" not in outlier_kwargs:
outlier_kwargs["line_color"] = box_kwargs["fill_color"]
else:
if "fill_color" not in outlier_kwargs:
outlier_kwargs["fill_color"] = bokeh.transform.factor_cmap(
"cat", palette=palette, factors=factors
)
if "line_color" not in outlier_kwargs:
outlier_kwargs["line_color"] = bokeh.transform.factor_cmap(
"cat", palette=palette, factors=factors
)
if "fill_color" not in box_kwargs:
box_kwargs["fill_color"] = bokeh.transform.factor_cmap(
"cat", palette=palette, factors=factors
)
if q_axis == "x":
p.segment(
source=source_box,
y0="cat",
y1="cat",
x0="top",
x1="top_whisker",
**whisker_kwargs,
)
p.segment(
source=source_box,
y0="cat",
y1="cat",
x0="bottom",
x1="bottom_whisker",
**whisker_kwargs,
)
if whisker_caps:
p.hbar(
source=source_box,
y="cat",
left="top_whisker",
right="top_whisker",
height=box_width / 4,
**whisker_kwargs,
)
p.hbar(
source=source_box,
y="cat",
left="bottom_whisker",
right="bottom_whisker",
height=box_width / 4,
**whisker_kwargs,
)
p.hbar(
source=source_box,
y="cat",
left="bottom",
right="top",
height=box_width,
**box_kwargs,
)
p.hbar(
source=source_box,
y="cat",
left="middle",
right="middle",
height=box_width,
**median_kwargs,
)
if display_points:
marker_fun(source=source_outliers, y="cat", x=q, **outlier_kwargs)
p.ygrid.grid_line_color = None
else:
p.segment(
source=source_box,
x0="cat",
x1="cat",
y0="top",
y1="top_whisker",
**whisker_kwargs,
)
p.segment(
source=source_box,
x0="cat",
x1="cat",
y0="bottom",
y1="bottom_whisker",
**whisker_kwargs,
)
if whisker_caps:
p.vbar(
source=source_box,
x="cat",
bottom="top_whisker",
top="top_whisker",
width=box_width / 4,
**whisker_kwargs,
)
p.vbar(
source=source_box,
x="cat",
bottom="bottom_whisker",
top="bottom_whisker",
width=box_width / 4,
**whisker_kwargs,
)
p.vbar(
source=source_box,
x="cat",
bottom="bottom",
top="top",
width=box_width,
**box_kwargs,
)
p.vbar(
source=source_box,
x="cat",
bottom="middle",
top="middle",
width=box_width,
**median_kwargs,
)
if display_points:
marker_fun(source=source_outliers, x="cat", y=q, **outlier_kwargs)
p.xgrid.grid_line_color = None
return p
def stripbox(
data=None,
q=None,
cats=None,
q_axis="x",
palette=None,
order=None,
p=None,
show_legend=False,
top_level="strip",
color_column=None,
parcoord_column=None,
tooltips=None,
marker="circle",
jitter=False,
marker_kwargs=None,
jitter_kwargs=None,
parcoord_kwargs=None,
whisker_caps=True,
display_points=True,
min_data=5,
box_kwargs=None,
median_kwargs=None,
whisker_kwargs=None,
horizontal=None,
val=None,
**kwargs,
):
"""
Make a strip plot with a box plot as annotation.
Parameters
----------
data : Pandas DataFrame, 1D Numpy array, or xarray
DataFrame containing tidy data for plotting. If a Numpy array,
a single category is assumed and a strip plot generated from
data.
q : hashable
Name of column to use as quantitative variable if `data` is a
Pandas DataFrame. Otherwise, `q` is used as the quantitative
axis label.
cats : hashable or list of hashables
Name of column(s) to use as categorical variable(s).
q_axis : str, either 'x' or 'y', default 'x'
Axis along which the quantitative value varies.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
order : list or None
If not None, must be a list of unique group names when the input
data frame is grouped by `cats`. The order of the list specifies
the ordering of the categorical variables on the categorical
axis and legend. If None, the categories appear in the order in
which they appeared in the inputted data frame.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
top_level : str, default 'strip'
If 'box', the box plot is overlaid. If 'strip', the strip plot
is overlaid.
show_legend : bool, default False
If True, display legend.
color_column : hashable, default None
Column of `data` to use in determining color of glyphs. If None,
then `cats` is used.
parcoord_column : hashable, default None
Column of `data` to use to construct a parallel coordinate plot.
Data points with like entries in the parcoord_column are
connected with lines in the strip plot.
tooltips : list of 2-tuples
Specification for tooltips as per Bokeh specifications. For
example, if we want `col1` and `col2` tooltips, we can use
`tooltips=[('label 1': '@col1'), ('label 2': '@col2')]`.
marker : str, default 'circle'
Name of marker to be used in the plot (ignored if `formal` is
False). Must be one of['asterisk', 'circle', 'circle_cross',
'circle_x', 'cross', 'dash', 'diamond', 'diamond_cross', 'hex',
'inverted_triangle', 'square', 'square_cross', 'square_x',
'triangle', 'x']
jitter : bool, default False
If True, apply a jitter transform to the glyphs.
marker_kwargs : dict
Keyword arguments to pass when adding markers to the plot.
["x", "y", "source", "cat", "legend"] are note allowed because
they are determined by other inputs.
jitter_kwargs : dict
Keyword arguments to be passed to `bokeh.transform.jitter()`. If
not specified, default is
`{'distribution': 'normal', 'width': 0.1}`. If the user
specifies `{'distribution': 'uniform'}`, the `'width'` entry is
adjusted to 0.4.
whisker_caps : bool, default True
If True, put caps on whiskers. If False, omit caps.
min_data : int, default 5
Minimum number of data points in a given category in order to
make a box and whisker. Otherwise, individual data points are
plotted as in a strip plot.
box_kwargs : dict, default None
A dictionary of kwargs to be passed into `p.hbar()` or
`p.vbar()` when constructing the boxes for the box plot.
median_kwargs : dict, default None
A dictionary of kwargs to be passed into `p.hbar()` or
`p.vbar()` when constructing the median line for the box plot.
whisker_kwargs : dict, default None
A dictionary of kwargs to be passed into `p.segment()`
when constructing the whiskers for the box plot.
horizontal : bool or None, default None
Deprecated. Use `q_axis`.
val : hashable
Deprecated, use `q`.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()` when
instantiating the figure.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with a strip plot.
"""
# Protect against mutability of dicts
box_kwargs = copy.copy(box_kwargs)
median_kwargs = copy.copy(median_kwargs)
whisker_kwargs = copy.copy(whisker_kwargs)
jitter_kwargs = copy.copy(jitter_kwargs)
marker_kwargs = copy.copy(marker_kwargs)
parcoord_kwargs = copy.copy(parcoord_kwargs)
# Set defaults
if box_kwargs is None:
box_kwargs = dict(line_color="gray", fill_alpha=0)
if "color" not in box_kwargs and "line_color" not in box_kwargs:
box_kwargs["line_color"] = "gray"
if "fill_alpha" not in box_kwargs:
box_kwargs["fill_alpha"] = 0
if median_kwargs is None:
median_kwargs = dict(line_color="gray")
if "color" not in box_kwargs and "line_color" not in median_kwargs:
median_kwargs["line_color"] = "gray"
if whisker_kwargs is None:
whisker_kwargs = dict(line_color="gray")
if "color" not in box_kwargs and "line_color" not in whisker_kwargs:
whisker_kwargs["line_color"] = "gray"
if top_level == "box":
p = strip(
data=data,
q=q,
cats=cats,
q_axis=q_axis,
palette=palette,
order=order,
p=p,
show_legend=show_legend,
color_column=color_column,
parcoord_column=parcoord_column,
tooltips=tooltips,
marker=marker,
jitter=jitter,
marker_kwargs=marker_kwargs,
jitter_kwargs=jitter_kwargs,
parcoord_kwargs=parcoord_kwargs,
horizontal=horizontal,
val=val,
**kwargs,
)
p = box(
data=data,
q=q,
cats=cats,
q_axis=q_axis,
palette=palette,
order=order,
p=p,
display_points=False,
whisker_caps=whisker_caps,
min_data=min_data,
box_kwargs=box_kwargs,
median_kwargs=median_kwargs,
whisker_kwargs=whisker_kwargs,
horizontal=horizontal,
val=val,
)
elif top_level == "strip":
p = box(
data=data,
q=q,
cats=cats,
q_axis=q_axis,
palette=palette,
order=order,
p=p,
display_points=False,
whisker_caps=whisker_caps,
min_data=min_data,
box_kwargs=box_kwargs,
median_kwargs=median_kwargs,
whisker_kwargs=whisker_kwargs,
horizontal=horizontal,
val=val,
**kwargs,
)
p = strip(
data=data,
q=q,
cats=cats,
q_axis=q_axis,
palette=palette,
order=order,
p=p,
show_legend=show_legend,
color_column=color_column,
parcoord_column=parcoord_column,
tooltips=tooltips,
marker=marker,
jitter=jitter,
marker_kwargs=marker_kwargs,
jitter_kwargs=jitter_kwargs,
parcoord_kwargs=parcoord_kwargs,
horizontal=horizontal,
val=val,
)
else:
raise RuntimeError("Invalid `top_level`. Allowed values are 'box' and 'strip'.")
return p
def _get_cat_range(df, grouped, order, color_column, q_axis):
if order is None:
if isinstance(list(grouped.groups.keys())[0], tuple):
factors = tuple(
[tuple([str(k) for k in key]) for key in grouped.groups.keys()]
)
else:
factors = tuple([str(key) for key in grouped.groups.keys()])
else:
if type(order[0]) in [list, tuple]:
factors = tuple([tuple([str(k) for k in key]) for key in order])
else:
factors = tuple([str(entry) for entry in order])
if q_axis == "x":
cat_range = bokeh.models.FactorRange(*(factors[::-1]))
elif q_axis == "y":
cat_range = bokeh.models.FactorRange(*factors)
if color_column is None:
color_factors = factors
else:
color_factors = tuple(sorted(list(df[color_column].unique().astype(str))))
return cat_range, factors, color_factors
def _cat_figure(df, grouped, q, order, color_column, q_axis, kwargs):
cat_range, factors, color_factors = _get_cat_range(
df, grouped, order, color_column, q_axis
)
kwargs = utils._fig_dimensions(kwargs)
if q_axis == "x":
if "x_axis_label" not in kwargs:
kwargs["x_axis_label"] = q
if "y_axis_type" in kwargs:
warnings.warn("`y_axis_type` specified for categorical axis. Ignoring.")
del kwargs["y_axis_type"]
kwargs["y_range"] = cat_range
elif q_axis == "y":
if "y_axis_label" not in kwargs:
kwargs["y_axis_label"] = q
if "x_axis_type" in kwargs:
warnings.warn("`x_axis_type` specified for categorical axis. Ignoring.")
del kwargs["x_axis_type"]
kwargs["x_range"] = cat_range
return bokeh.plotting.figure(**kwargs), factors, color_factors
def _cat_source(df, cats, cols, color_column):
cat_source, labels = utils._source_and_labels_from_cats(df, cats)
if type(cols) in [list, tuple, pd.core.indexes.base.Index]:
source_dict = {col: list(df[col].values) for col in cols}
else:
source_dict = {cols: list(df[cols].values)}
source_dict["cat"] = cat_source
if color_column in [None, "cat"]:
source_dict["__label"] = labels
else:
source_dict["__label"] = list(df[color_column].astype(str).values)
source_dict[color_column] = list(df[color_column].astype(str).values)
return bokeh.models.ColumnDataSource(source_dict)
def _parcoord_source(data, q, cats, q_axis, parcoord_column, factors):
if type(cats) not in [list, tuple]:
cats = [cats]
tuple_factors = False
else:
tuple_factors = True
grouped_parcoord = data.groupby(parcoord_column)
xs = []
ys = []
for t, g in grouped_parcoord:
xy = []
for _, r in g.iterrows():
if tuple_factors:
xy.append([tuple([r[cat] for cat in cats]), r[q]])
else:
xy.append([r[cats[0]], r[q]])
if len(xy) > 1:
xy.sort(key=lambda a: factors.index(a[0]))
xs_pc = []
ys_pc = []
for pair in xy:
xs_pc.append(pair[0])
ys_pc.append(pair[1])
if q_axis == "y":
xs.append(xs_pc)
ys.append(ys_pc)
else:
xs.append(ys_pc)
ys.append(xs_pc)
return bokeh.models.ColumnDataSource(dict(xs=xs, ys=ys))
def _outliers(data, min_data):
if len(data) >= min_data:
bottom, middle, top = np.percentile(data, [25, 50, 75])
iqr = top - bottom
outliers = data[(data > top + 1.5 * iqr) | (data < bottom - 1.5 * iqr)]
return outliers
else:
return data
def _box_and_whisker(data, min_data):
if len(data) >= min_data:
middle = data.median()
bottom = data.quantile(0.25)
top = data.quantile(0.75)
iqr = top - bottom
top_whisker = max(data[data <= top + 1.5 * iqr].max(), top)
bottom_whisker = min(data[data >= bottom - 1.5 * iqr].min(), bottom)
return pd.Series(
{
"middle": middle,
"bottom": bottom,
"top": top,
"top_whisker": top_whisker,
"bottom_whisker": bottom_whisker,
}
)
else:
return pd.Series(
{
"middle": np.nan,
"bottom": np.nan,
"top": np.nan,
"top_whisker": np.nan,
"bottom_whisker": np.nan,
}
)
def _box_source(df, cats, q, cols, min_data):
"""Construct a data frame for making box plot."""
# Need to reset index for use in slicing outliers
df_source = df.reset_index(drop=True)
if cats is None:
grouped = df_source
else:
grouped = df_source.groupby(cats, sort=False)
# Data frame for boxes and whiskers
df_box = grouped[q].apply(_box_and_whisker, min_data).unstack().reset_index()
df_box = df_box.dropna()
source_box = _cat_source(
df_box, cats, ["middle", "bottom", "top", "top_whisker", "bottom_whisker"], None
)
# Data frame for outliers
s_outliers = grouped[q].apply(_outliers, min_data)
# If no cat has enough data, just use everything as an "outlier"
if len(s_outliers) == len(df_source):
df_outliers = df_source.copy()
inds = df_source.index
else:
df_outliers = s_outliers.reset_index()
inds = s_outliers.index.get_level_values(-1)
df_outliers.index = inds
df_outliers[cols] = df_source.loc[inds, cols]
source_outliers = _cat_source(df_outliers, cats, cols, None)
return source_box, source_outliers
|
[
"numpy.percentile",
"warnings.warn",
"copy.copy",
"pandas.Series"
] |
[((3977, 4001), 'copy.copy', 'copy.copy', (['jitter_kwargs'], {}), '(jitter_kwargs)\n', (3986, 4001), False, 'import copy\n'), ((4022, 4046), 'copy.copy', 'copy.copy', (['marker_kwargs'], {}), '(marker_kwargs)\n', (4031, 4046), False, 'import copy\n'), ((12267, 12288), 'copy.copy', 'copy.copy', (['box_kwargs'], {}), '(box_kwargs)\n', (12276, 12288), False, 'import copy\n'), ((12309, 12333), 'copy.copy', 'copy.copy', (['median_kwargs'], {}), '(median_kwargs)\n', (12318, 12333), False, 'import copy\n'), ((12355, 12380), 'copy.copy', 'copy.copy', (['whisker_kwargs'], {}), '(whisker_kwargs)\n', (12364, 12380), False, 'import copy\n'), ((12402, 12427), 'copy.copy', 'copy.copy', (['outlier_kwargs'], {}), '(outlier_kwargs)\n', (12411, 12427), False, 'import copy\n'), ((23815, 23836), 'copy.copy', 'copy.copy', (['box_kwargs'], {}), '(box_kwargs)\n', (23824, 23836), False, 'import copy\n'), ((23857, 23881), 'copy.copy', 'copy.copy', (['median_kwargs'], {}), '(median_kwargs)\n', (23866, 23881), False, 'import copy\n'), ((23903, 23928), 'copy.copy', 'copy.copy', (['whisker_kwargs'], {}), '(whisker_kwargs)\n', (23912, 23928), False, 'import copy\n'), ((23949, 23973), 'copy.copy', 'copy.copy', (['jitter_kwargs'], {}), '(jitter_kwargs)\n', (23958, 23973), False, 'import copy\n'), ((23994, 24018), 'copy.copy', 'copy.copy', (['marker_kwargs'], {}), '(marker_kwargs)\n', (24003, 24018), False, 'import copy\n'), ((24041, 24067), 'copy.copy', 'copy.copy', (['parcoord_kwargs'], {}), '(parcoord_kwargs)\n', (24050, 24067), False, 'import copy\n'), ((12542, 12687), 'warnings.warn', 'warnings.warn', (['f"""`display_outliers` is deprecated. Use `display_points`. Using `display_points={display_outliers}."""', 'DeprecationWarning'], {}), "(\n f'`display_outliers` is deprecated. Use `display_points`. Using `display_points={display_outliers}.'\n , DeprecationWarning)\n", (12555, 12687), False, 'import warnings\n'), ((30635, 30668), 'numpy.percentile', 'np.percentile', (['data', '[25, 50, 75]'], {}), '(data, [25, 50, 75])\n', (30648, 30668), True, 'import numpy as np\n'), ((31189, 31314), 'pandas.Series', 'pd.Series', (["{'middle': middle, 'bottom': bottom, 'top': top, 'top_whisker': top_whisker,\n 'bottom_whisker': bottom_whisker}"], {}), "({'middle': middle, 'bottom': bottom, 'top': top, 'top_whisker':\n top_whisker, 'bottom_whisker': bottom_whisker})\n", (31198, 31314), True, 'import pandas as pd\n'), ((31453, 31568), 'pandas.Series', 'pd.Series', (["{'middle': np.nan, 'bottom': np.nan, 'top': np.nan, 'top_whisker': np.nan,\n 'bottom_whisker': np.nan}"], {}), "({'middle': np.nan, 'bottom': np.nan, 'top': np.nan, 'top_whisker':\n np.nan, 'bottom_whisker': np.nan})\n", (31462, 31568), True, 'import pandas as pd\n'), ((14160, 14235), 'warnings.warn', 'warnings.warn', (['"""\'height\' entry in `box_kwargs` ignored; using `box_width`."""'], {}), '("\'height\' entry in `box_kwargs` ignored; using `box_width`.")\n', (14173, 14235), False, 'import warnings\n'), ((14329, 14403), 'warnings.warn', 'warnings.warn', (['"""\'width\' entry in `box_kwargs` ignored; using `box_width`."""'], {}), '("\'width\' entry in `box_kwargs` ignored; using `box_width`.")\n', (14342, 14403), False, 'import warnings\n'), ((28376, 28448), 'warnings.warn', 'warnings.warn', (['"""`y_axis_type` specified for categorical axis. Ignoring."""'], {}), "('`y_axis_type` specified for categorical axis. Ignoring.')\n", (28389, 28448), False, 'import warnings\n'), ((28679, 28751), 'warnings.warn', 'warnings.warn', (['"""`x_axis_type` specified for categorical axis. Ignoring."""'], {}), "('`x_axis_type` specified for categorical axis. Ignoring.')\n", (28692, 28751), False, 'import warnings\n')]
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import random
from functools import lru_cache
from subprocess import check_output
from typing import List, Optional, Sequence, Set, Tuple
import numpy as np
import trimesh
import trimesh.scene
from cached_property import cached_property
from shapely.geometry import Point as shPoint
from shapely.geometry import Polygon
from shapely.ops import nearest_points, snap
from trimesh.exchange import gltf
from smarts.sstudio.types import MapSpec
from .coordinates import BoundingBox, Heading, Point, Pose, RefLinePoint
from .lanepoints import LanePoints, LinkedLanePoint
from .road_map import RoadMap, Waypoint
from .utils.geometry import buffered_shape, generate_mesh_from_polygons
from .utils.math import inplace_unwrap, radians_to_vec, vec_2d
from smarts.core.utils.sumo import sumolib # isort:skip
from sumolib.net.edge import Edge # isort:skip
def _convert_camera(camera):
result = {
"name": camera.name,
"type": "perspective",
"perspective": {
"aspectRatio": camera.fov[0] / camera.fov[1],
"yfov": np.radians(camera.fov[1]),
"znear": float(camera.z_near),
# HACK: The trimesh gltf export doesn't include a zfar which Panda3D GLB
# loader expects. Here we override to make loading possible.
"zfar": float(camera.z_near + 100),
},
}
return result
gltf._convert_camera = _convert_camera
class _GLBData:
def __init__(self, bytes_):
self._bytes = bytes_
def write_glb(self, output_path):
"""Generate a `.glb` geometry file."""
with open(output_path, "wb") as f:
f.write(self._bytes)
class SumoRoadNetwork(RoadMap):
"""A road network for a SUMO source."""
DEFAULT_LANE_WIDTH = 3.2
"""3.2 is the default Sumo road network lane width if it's not specified
explicitly in Sumo's NetEdit or the map.net.xml file.
This corresponds on a 1:1 scale to lanes 3.2m wide, which is typical
in North America (although US highway lanes are wider at ~3.7m)."""
def __init__(self, graph, net_file: str, map_spec: MapSpec):
self._log = logging.getLogger(self.__class__.__name__)
self._graph = graph
self._net_file = net_file
self._map_spec = map_spec
self._default_lane_width = SumoRoadNetwork._spec_lane_width(map_spec)
self._surfaces = {}
self._lanes = {}
self._roads = {}
self._waypoints_cache = SumoRoadNetwork._WaypointsCache()
self._lanepoints = None
if map_spec.lanepoint_spacing is not None:
assert map_spec.lanepoint_spacing > 0
# XXX: this should be last here since LanePoints() calls road_network methods immediately
self._lanepoints = LanePoints.from_sumo(
self, spacing=map_spec.lanepoint_spacing
)
@staticmethod
def _check_net_origin(bbox):
assert len(bbox) == 4
return bbox[0] <= 0.0 and bbox[1] <= 0.0 and bbox[2] >= 0.0 and bbox[3] >= 0.0
shifted_net_file_name = "shifted_map-AUTOGEN.net.xml"
@classmethod
def shifted_net_file_path(cls, net_file_path):
"""The path of the modified map file after coordinate normalization."""
net_file_folder = os.path.dirname(net_file_path)
return os.path.join(net_file_folder, cls.shifted_net_file_name)
@classmethod
@lru_cache(maxsize=1)
def _shift_coordinates(cls, net_file_path, shifted_path):
assert shifted_path != net_file_path
logger = logging.getLogger(cls.__name__)
logger.info(f"normalizing net coordinates into {shifted_path}...")
## Translate the map's origin to remove huge (imprecise) offsets.
## See https://sumo.dlr.de/docs/netconvert.html#usage_description
## for netconvert options description.
try:
stdout = check_output(
[
"netconvert",
"--offset.disable-normalization=FALSE",
"-s",
net_file_path,
"-o",
shifted_path,
]
)
logger.debug(f"netconvert output: {stdout}")
return True
except Exception as e:
logger.warning(
f"unable to use netconvert tool to normalize coordinates: {e}"
)
return False
@classmethod
def from_spec(cls, map_spec: MapSpec):
"""Generate a road network from the given map specification."""
net_file = SumoRoadNetwork._map_path(map_spec)
# Connections to internal lanes are implicit. If `withInternal=True` is
# set internal junctions and the connections from internal lanes are
# loaded into the network graph.
G = sumolib.net.readNet(net_file, withInternal=True)
if not cls._check_net_origin(G.getBoundary()):
shifted_net_file = cls.shifted_net_file_path(net_file)
if os.path.isfile(shifted_net_file) or (
map_spec.shift_to_origin
and cls._shift_coordinates(net_file, shifted_net_file)
):
G = sumolib.net.readNet(shifted_net_file, withInternal=True)
assert cls._check_net_origin(G.getBoundary())
net_file = shifted_net_file
# keep track of having shifted the graph by
# injecting state into the network graph.
# this is needed because some maps have been pre-shifted,
# and will already have a locationOffset, but for those
# the offset should not be used (because all their other
# coordinates are relative to the origin).
G._shifted_by_smarts = True
return cls(G, net_file, map_spec)
@property
def source(self) -> str:
"""This is the net.xml file that corresponds with our possibly-offset coordinates."""
return self._net_file
@staticmethod
def _spec_lane_width(map_spec: MapSpec) -> float:
return (
map_spec.default_lane_width
if map_spec.default_lane_width is not None
else SumoRoadNetwork.DEFAULT_LANE_WIDTH
)
@staticmethod
def _map_path(map_spec: MapSpec) -> str:
if os.path.isdir(map_spec.source):
# map.net.xml is the default Sumo map name; try that:
return os.path.join(map_spec.source, "map.net.xml")
return map_spec.source
def is_same_map(self, map_spec: MapSpec) -> bool:
"""Test if the road network is identical to the given map specification."""
return (
(
map_spec.source == self._map_spec.source
or SumoRoadNetwork._map_path(map_spec)
== SumoRoadNetwork._map_path(self._map_spec)
)
and map_spec.lanepoint_spacing == self._map_spec.lanepoint_spacing
and (
map_spec.default_lane_width == self._map_spec.default_lane_width
or SumoRoadNetwork._spec_lane_width(map_spec)
== SumoRoadNetwork._spec_lane_width(self._map_spec)
)
and (
map_spec.shift_to_origin == self._map_spec.shift_to_origin
or (not map_spec.shift_to_origin and not self._graph._shifted_by_smarts)
)
)
@cached_property
def bounding_box(self) -> BoundingBox:
"""Get the minimal axis aligned bounding box that contains all map geometry."""
# maps are assumed to start at the origin
bb = self._graph.getBoundary() # 2D bbox in format (xmin, ymin, xmax, ymax)
return BoundingBox(
min_pt=Point(x=bb[0], y=bb[1]), max_pt=Point(x=bb[2], y=bb[3])
)
@property
def scale_factor(self) -> float:
"""Get the scale factor between the default lane width and the default lane width."""
# map units per meter
return self._default_lane_width / SumoRoadNetwork.DEFAULT_LANE_WIDTH
def to_glb(self, at_path):
"""Build a glb file for camera rendering and envision"""
polys = self._compute_road_polygons()
glb = self._make_glb_from_polys(polys)
glb.write_glb(at_path)
class Surface(RoadMap.Surface):
"""Describes a surface."""
def __init__(self, surface_id: str, road_map):
self._surface_id = surface_id
self._map = road_map
@property
def surface_id(self) -> str:
"""The identifier for this surface."""
return self._surface_id
@property
def is_drivable(self) -> bool:
"""If it is possible to drive on this surface."""
# all surfaces on Sumo road networks are drivable
return True
def surface_by_id(self, surface_id: str) -> RoadMap.Surface:
"""Find a surface by its identifier."""
return self._surfaces.get(surface_id)
class Lane(RoadMap.Lane, Surface):
"""Describes a lane."""
def __init__(self, lane_id: str, sumo_lane, road_map):
super().__init__(lane_id, road_map)
self._lane_id = lane_id
self._sumo_lane = sumo_lane
self._road = road_map.road_by_id(sumo_lane.getEdge().getID())
assert self._road
@property
def lane_id(self) -> str:
return self._lane_id
@property
def road(self) -> RoadMap.Road:
return self._road
@cached_property
def speed_limit(self) -> float:
return self._sumo_lane.getSpeed()
@cached_property
def length(self) -> float:
return self._sumo_lane.getLength()
@cached_property
def _width(self) -> float:
return self._sumo_lane.getWidth()
@property
def in_junction(self) -> bool:
"""If this lane is part of a junction/intersection."""
return self._road.is_junction
@cached_property
def index(self) -> int:
"""The index of this lane within the road it is part of."""
return self._sumo_lane.getIndex()
@cached_property
def lanes_in_same_direction(self) -> List[RoadMap.Lane]:
"""Find nearby lanes heading in the same direction as this lane."""
if not self.in_junction:
# When not in an intersection, all SUMO Lanes for an Edge go in the same direction.
return [l for l in self.road.lanes if l != self]
result = []
in_roads = set(il.road for il in self.incoming_lanes)
out_roads = set(il.road for il in self.outgoing_lanes)
for lane in self.road.lanes:
if self == lane:
continue
other_in_roads = set(il.road for il in lane.incoming_lanes)
if in_roads & other_in_roads:
other_out_roads = set(il.road for il in self.outgoing_lanes)
if out_roads & other_out_roads:
result.append(lane)
return result
@cached_property
def lane_to_left(self) -> Tuple[RoadMap.Lane, bool]:
"""Get the lane to the left of this lane assuming right hand driving."""
result = None
for other in self.lanes_in_same_direction:
if other.index > self.index and (
not result or other.index < result.index
):
result = other
return result, True
@cached_property
def lane_to_right(self) -> Tuple[RoadMap.Lane, bool]:
"""Get the lane to the right of this lane assuming right hand driving."""
result = None
for other in self.lanes_in_same_direction:
if other.index < self.index and (
not result or other.index > result.index
):
result = other
return result, True
@cached_property
def incoming_lanes(self) -> List[RoadMap.Lane]:
"""Lanes leading into this lane."""
return [
self._map.lane_by_id(incoming.getID())
for incoming in self._sumo_lane.getIncoming()
]
@cached_property
def outgoing_lanes(self) -> List[RoadMap.Lane]:
"""Lanes leading out of this lane."""
return [
self._map.lane_by_id(
outgoing.getViaLaneID() or outgoing.getToLane().getID()
)
for outgoing in self._sumo_lane.getOutgoing()
]
@cached_property
def entry_surfaces(self) -> List[RoadMap.Surface]:
"""All surfaces leading into this lane."""
return self.incoming_lanes
@cached_property
def exit_surfaces(self) -> List[RoadMap.Surface]:
"""All surfaces leading out of this lane."""
return self.outgoing_lanes
@lru_cache(maxsize=16)
def oncoming_lanes_at_offset(self, offset: float) -> List[RoadMap.Lane]:
"""Adjacent lanes travelling in the opposite direction to this lane."""
result = []
radius = 1.1 * self.width_at_offset(offset)
pt = self.from_lane_coord(RefLinePoint(offset))
nearby_lanes = self._map.nearest_lanes(pt, radius=radius)
if not nearby_lanes:
return result
my_vect = self.vector_at_offset(offset)
my_norm = np.linalg.norm(my_vect)
if my_norm == 0:
return result
threshold = -0.995562 # cos(175*pi/180)
for lane, _ in nearby_lanes:
if lane == self:
continue
lane_refline_pt = lane.to_lane_coord(pt)
lv = lane.vector_at_offset(lane_refline_pt.s)
lv_norm = np.linalg.norm(lv)
if lv_norm == 0:
continue
lane_angle = np.dot(my_vect, lv) / (my_norm * lv_norm)
if lane_angle < threshold:
result.append(lane)
return result
@cached_property
def foes(self) -> List[RoadMap.Lane]:
"""Lanes that cross over this lane (useful in junctions.)"""
# TODO: we might do better here since Sumo/Traci determines right-of-way for their connections/links. See:
# https://sumo.dlr.de/pydoc/traci._lane.html#LaneDomain-getFoes
result = [
incoming
for outgoing in self.outgoing_lanes
for incoming in outgoing.incoming_lanes
if incoming != self
]
if self.in_junction:
in_roads = set(il.road for il in self.incoming_lanes)
for foe in self.road.lanes:
foe_in_roads = set(il.road for il in foe.incoming_lanes)
if not bool(in_roads & foe_in_roads):
result.append(foe)
return list(set(result))
def waypoint_paths_for_pose(
self, pose: Pose, lookahead: int, route: RoadMap.Route = None
) -> List[List[Waypoint]]:
road_ids = [road.road_id for road in route.roads] if route else None
return self._waypoint_paths_at(pose.position, lookahead, road_ids)
def waypoint_paths_at_offset(
self, offset: float, lookahead: int = 30, route: RoadMap.Route = None
) -> List[List[Waypoint]]:
wp_start = self.from_lane_coord(RefLinePoint(offset))
road_ids = [road.road_id for road in route.roads] if route else None
return self._waypoint_paths_at(wp_start, lookahead, road_ids)
def _waypoint_paths_at(
self,
point: Sequence,
lookahead: int,
filter_road_ids: Optional[Sequence[str]] = None,
) -> List[List[Waypoint]]:
"""Waypoints on this lane leading on from the given point."""
closest_linked_lp = (
self._map._lanepoints.closest_linked_lanepoint_on_lane_to_point(
point, self._lane_id
)
)
return self._map._waypoints_starting_at_lanepoint(
closest_linked_lp,
lookahead,
tuple(filter_road_ids) if filter_road_ids else (),
tuple(point),
)
@lru_cache(maxsize=4)
def shape(
self, buffer_width: float = 0.0, default_width: Optional[float] = None
) -> Polygon:
"""The lane geometry as a shape."""
new_width = buffer_width
if default_width:
new_width += default_width
else:
new_width += self._width
assert new_width >= 0.0
assert new_width >= 0.0
if new_width > 0:
return buffered_shape(self._sumo_lane.getShape(), new_width)
line = self._sumo_lane.getShape()
bline = buffered_shape(line, 0.0)
return line if bline.is_empty else bline
@lru_cache(maxsize=8)
def contains_point(self, point: Point) -> bool:
"""If the given point is within this lane."""
# TAI: could use (cached) self._sumo_lane.getBoundingBox(...) as a quick first-pass check...
lane_point = self.to_lane_coord(point)
return (
abs(lane_point.t) <= self._width / 2 and 0 <= lane_point.s < self.length
)
@lru_cache(maxsize=8)
def offset_along_lane(self, world_point: Point) -> float:
shape = self._sumo_lane.getShape(False)
point = world_point[:2]
if point not in shape:
return sumolib.geomhelper.polygonOffsetWithMinimumDistanceToPoint(
point, shape, perpendicular=False
)
# SUMO geomhelper.polygonOffset asserts when the point is part of the shape.
# We get around the assertion with a check if the point is part of the shape.
offset = 0
for i in range(len(shape) - 1):
if shape[i] == point:
break
offset += sumolib.geomhelper.distance(shape[i], shape[i + 1])
return offset
def width_at_offset(self, offset: float) -> float:
return self._width
@lru_cache(maxsize=8)
def project_along(
self, start_offset: float, distance: float
) -> Set[Tuple[RoadMap.Lane, float]]:
return super().project_along(start_offset, distance)
@lru_cache(maxsize=8)
def from_lane_coord(self, lane_point: RefLinePoint) -> Point:
shape = self._sumo_lane.getShape(False)
x, y = sumolib.geomhelper.positionAtShapeOffset(shape, lane_point.s)
return Point(x=x, y=y)
@lru_cache(maxsize=8)
def to_lane_coord(self, world_point: Point) -> RefLinePoint:
return super().to_lane_coord(world_point)
@lru_cache(maxsize=8)
def center_at_point(self, point: Point) -> Point:
return super().center_at_point(point)
@lru_cache(8)
def edges_at_point(self, point: Point) -> Tuple[Point, Point]:
return super().edges_at_point(point)
@lru_cache(8)
def vector_at_offset(self, start_offset: float) -> np.ndarray:
return super().vector_at_offset(start_offset)
@lru_cache(maxsize=8)
def center_pose_at_point(self, point: Point) -> Pose:
return super().center_pose_at_point(point)
@lru_cache(maxsize=8)
def curvature_radius_at_offset(
self, offset: float, lookahead: int = 5
) -> float:
return super().curvature_radius_at_offset(offset, lookahead)
def lane_by_id(self, lane_id: str) -> RoadMap.Lane:
lane = self._lanes.get(lane_id)
if lane:
return lane
sumo_lane = self._graph.getLane(lane_id)
if not sumo_lane:
self._log.warning(
f"SumoRoadNetwork got request for unknown lane_id '{lane_id}'"
)
return None
lane = SumoRoadNetwork.Lane(lane_id, sumo_lane, self)
self._lanes[lane_id] = lane
assert lane_id not in self._surfaces
self._surfaces[lane_id] = lane
return lane
class Road(RoadMap.Road, Surface):
"""This is akin to a 'road segment' in real life.
Many of these might correspond to a single named road in reality."""
def __init__(self, road_id: str, sumo_edge: Edge, road_map):
super().__init__(road_id, road_map)
self._road_id = road_id
self._sumo_edge = sumo_edge
@cached_property
def is_junction(self) -> bool:
return self._sumo_edge.isSpecial()
@cached_property
def length(self) -> float:
return self._sumo_edge.getLength()
@property
def road_id(self) -> str:
return self._road_id
@cached_property
def incoming_roads(self) -> List[RoadMap.Road]:
return [
self._map.road_by_id(edge.getID())
for edge in self._sumo_edge.getIncoming().keys()
]
@cached_property
def outgoing_roads(self) -> List[RoadMap.Road]:
return [
self._map.road_by_id(edge.getID())
for edge in self._sumo_edge.getOutgoing().keys()
]
@cached_property
def entry_surfaces(self) -> List[RoadMap.Surface]:
# TAI: also include lanes here?
return self.incoming_roads
@cached_property
def exit_surfaces(self) -> List[RoadMap.Surface]:
# TAI: also include lanes here?
return self.outgoing_roads
@lru_cache(maxsize=16)
def oncoming_roads_at_point(self, point: Point) -> List[RoadMap.Road]:
result = []
for lane in self.lanes:
offset = lane.to_lane_coord(point).s
result += [
ol.road
for ol in lane.oncoming_lanes_at_offset(offset)
if ol.road != self
]
return result
@cached_property
def parallel_roads(self) -> List[RoadMap.Road]:
from_node, to_node = (
self._sumo_edge.getFromNode(),
self._sumo_edge.getToNode(),
)
return [
self._map.road_by_id(edge.getID())
for edge in from_node.getOutgoing()
if self.road_id != edge.getID()
and edge.getToNode().getID() == to_node.getID()
]
@cached_property
def lanes(self) -> List[RoadMap.Lane]:
return [
self._map.lane_by_id(sumo_lane.getID())
for sumo_lane in self._sumo_edge.getLanes()
]
def lane_at_index(self, index: int) -> RoadMap.Lane:
return self.lanes[index]
@lru_cache(maxsize=8)
def contains_point(self, point: Point) -> bool:
# TAI: could use (cached) self._sumo_edge.getBoundingBox(...) as a quick first-pass check...
for lane in self.lanes:
if lane.contains_point(point):
return True
return False
@lru_cache(maxsize=8)
def edges_at_point(self, point: Point) -> Tuple[Point, Point]:
lanes = self.lanes
_, right_edge = lanes[0].edges_at_point(point)
left_edge, _ = lanes[-1].edges_at_point(point)
return left_edge, right_edge
@lru_cache(maxsize=4)
def shape(
self, buffer_width: float = 0.0, default_width: Optional[float] = None
) -> Polygon:
new_width = buffer_width
if default_width:
new_width += default_width
assert new_width >= 0.0
if new_width > 0:
return buffered_shape(self._sumo_edge.getShape(), new_width)
line = self._sumo_edge.getShape()
bline = buffered_shape(line, 0.0)
return line if bline.is_empty else bline
def road_by_id(self, road_id: str) -> RoadMap.Road:
road = self._roads.get(road_id)
if road:
return road
sumo_edge = self._graph.getEdge(road_id)
if not sumo_edge:
self._log.warning(
f"SumoRoadNetwork got request for unknown road_id '{road_id}'"
)
return None
road = SumoRoadNetwork.Road(road_id, sumo_edge, self)
self._roads[road_id] = road
assert road_id not in self._surfaces
self._surfaces[road_id] = road
return road
@lru_cache(maxsize=16)
def nearest_lanes(
self, point: Point, radius: Optional[float] = None, include_junctions=True
) -> List[Tuple[RoadMap.Lane, float]]:
if radius is None:
radius = max(10, 2 * self._default_lane_width)
# XXX: note that this getNeighboringLanes() call is fairly heavy/expensive (as revealed by profiling)
# The includeJunctions parameter is the opposite of include_junctions because
# what it does in the Sumo query is attach the "node" that is the junction (node)
# shape to the shape of the non-special lanes that connect to it. So if
# includeJunctions is True, we are more likely to hit "normal" lanes
# even when in an intersection where we want to hit "special"
# lanes when we specify include_junctions=True. Note that "special"
# lanes are always candidates to be returned, no matter what.
candidate_lanes = self._graph.getNeighboringLanes(
point[0],
point[1],
r=radius,
includeJunctions=not include_junctions,
allowFallback=False,
)
if not include_junctions:
candidate_lanes = [
lane for lane in candidate_lanes if not lane[0].getEdge().isSpecial()
]
candidate_lanes.sort(key=lambda lane_dist_tup: lane_dist_tup[1])
return [(self.lane_by_id(lane.getID()), dist) for lane, dist in candidate_lanes]
@lru_cache(maxsize=16)
def road_with_point(self, point: Point) -> RoadMap.Road:
radius = max(5, 2 * self._default_lane_width)
for nl, dist in self.nearest_lanes(point, radius):
if dist < 0.5 * nl._width + 1e-1:
return nl.road
return None
def generate_routes(
self,
start_road: RoadMap.Road,
end_road: RoadMap.Road,
via: Optional[Sequence[RoadMap.Road]] = None,
max_to_gen: int = 1,
) -> List[RoadMap.Route]:
assert max_to_gen == 1, "multiple route generation not yet supported for Sumo"
newroute = SumoRoadNetwork.Route(self)
result = [newroute]
roads = [start_road]
if via:
roads += via
if end_road != start_road:
roads.append(end_road)
edges = []
for cur_road, next_road in zip(roads, roads[1:] + [None]):
if not next_road:
edges.append(cur_road._sumo_edge)
break
sub_route = (
self._graph.getShortestPath(cur_road._sumo_edge, next_road._sumo_edge)[
0
]
or []
)
if len(sub_route) < 2:
self._log.warning(
f"Unable to find valid path between {(cur_road.road_id, next_road.road_id)}."
)
return result
# The sub route includes the boundary roads (cur_road, next_road).
# We clip the latter to prevent duplicates
edges.extend(sub_route[:-1])
if len(edges) == 1:
# route is within a single road
newroute.add_road(self.road_by_id(edges[0].getID()))
return result
used_edges = []
edge_ids = []
adjacent_edge_pairs = zip(edges, edges[1:])
for cur_edge, next_edge in adjacent_edge_pairs:
internal_routes = self._internal_routes_between(cur_edge, next_edge)
for internal_route in internal_routes:
used_edges.extend(internal_route)
edge_ids.extend([edge.getID() for edge in internal_route])
_, indices = np.unique(edge_ids, return_index=True)
for idx in sorted(indices):
newroute.add_road(self.road_by_id(used_edges[idx].getID()))
return result
def _internal_routes_between(
self, start_edge: Edge, end_edge: Edge
) -> List[List[Edge]]:
routes = []
outgoing = start_edge.getOutgoing()
assert end_edge in outgoing, (
f"{end_edge.getID()} not in {[e.getID() for e in outgoing.keys()]}. "
"Perhaps you're using a LapMission on a route that is not a closed loop?"
)
connections = outgoing[end_edge]
for connection in connections:
conn_route = [start_edge]
# This connection may have some intermediate 'via' lanes.
# we need to follow these to eventually leave the junction.
via_lane_id = connection.getViaLaneID()
while via_lane_id:
via_lane = self.lane_by_id(via_lane_id)
via_road = via_lane.road
via_edge = via_road._sumo_edge
conn_route.append(via_edge)
# Sometimes we get the same via lane id multiple times.
# We convert to a set to remove duplicates.
next_via_lane_ids = set(
conn.getViaLaneID() for conn in via_edge.getOutgoing()[end_edge]
)
assert (
len(next_via_lane_ids) == 1
), f"Expected exactly one next via lane id at {via_lane_id}, got: {next_via_lane_ids}"
via_lane_id = list(next_via_lane_ids)[0]
conn_route.append(end_edge)
routes.append(conn_route)
return routes
def random_route(self, max_route_len: int = 10) -> RoadMap.Route:
route = SumoRoadNetwork.Route(self)
next_edges = self._graph.getEdges(False)
while next_edges and len(route.roads) < max_route_len:
cur_edge = random.choice(next_edges)
route.add_road(self.road_by_id(cur_edge.getID()))
next_edges = list(cur_edge.getOutgoing().keys())
return route
def empty_route(self) -> RoadMap.Route:
return SumoRoadNetwork.Route(self)
def waypoint_paths(
self,
pose: Pose,
lookahead: int,
within_radius: float = 5,
route: RoadMap.Route = None,
) -> List[List[Waypoint]]:
if route:
if route.roads:
road_ids = [road.road_id for road in route.roads]
else:
road_ids = self._resolve_in_junction(pose)
if road_ids:
return self._waypoint_paths_along_route(
pose.position, lookahead, road_ids
)
closest_lps = self._lanepoints.closest_lanepoints(
[pose], within_radius=within_radius
)
closest_lane = closest_lps[0].lane
# TAI: the above lines could be replaced by:
# closest_lane = self.nearest_lane(pose.position, radius=within_radius)
waypoint_paths = []
for lane in closest_lane.road.lanes:
waypoint_paths += lane._waypoint_paths_at(pose.position, lookahead)
return sorted(waypoint_paths, key=lambda p: p[0].lane_index)
def _resolve_in_junction(self, pose: Pose) -> List[str]:
# This is so that the waypoints don't jump between connections
# when we don't know which lane we're on in a junction.
# We take the 10 closest lanepoints then filter down to that which has
# the closest heading. This way we get the lanepoint on our lane instead of
# a potentially closer lane that is on a different junction connection.
closest_lps = self._lanepoints.closest_lanepoints([pose], within_radius=None)
closest_lps.sort(key=lambda lp: abs(pose.heading - lp.pose.heading))
lane = closest_lps[0].lane
if not lane.in_junction:
return []
road_ids = [lane.road.road_id]
next_roads = lane.road.outgoing_roads
assert (
len(next_roads) <= 1
), "A junction is expected to have <= 1 outgoing roads"
if next_roads:
road_ids.append(next_roads[0].road_id)
return road_ids
def _waypoint_paths_along_route(
self, point, lookahead: int, route: Sequence[str]
) -> List[List[Waypoint]]:
"""finds the closest lane to vehicle's position that is on its route,
then gets waypoint paths from all lanes in its edge there."""
assert len(route) > 0, f"Expected at least 1 road in the route, got: {route}"
closest_llp_on_each_route_road = [
self._lanepoints.closest_linked_lanepoint_on_road(point, road)
for road in route
]
closest_linked_lp = min(
closest_llp_on_each_route_road,
key=lambda l_lp: np.linalg.norm(
vec_2d(l_lp.lp.pose.position) - vec_2d(point)
),
)
closest_lane = closest_linked_lp.lp.lane
waypoint_paths = []
for lane in closest_lane.road.lanes:
waypoint_paths += lane._waypoint_paths_at(point, lookahead, route)
return sorted(waypoint_paths, key=lambda p: p[0].lane_index)
class Route(RoadMap.Route):
"""Describes a route between two roads."""
def __init__(self, road_map):
self._roads = []
self._length = 0
self._map = road_map
@property
def roads(self) -> List[RoadMap.Road]:
return self._roads
@property
def road_length(self) -> float:
return self._length
def add_road(self, road: RoadMap.Road):
"""Add a road to this route."""
self._length += road.length
self._roads.append(road)
@cached_property
def geometry(self) -> Sequence[Sequence[Tuple[float, float]]]:
return [
list(
road.shape(
0.0, sum([lane._width for lane in road.lanes])
).exterior.coords
)
for road in self.roads
]
@lru_cache(maxsize=8)
def distance_between(self, start: Point, end: Point) -> Optional[float]:
for cand_start_lane, _ in self._map.nearest_lanes(start, 30.0, False):
try:
sind = self._roads.index(cand_start_lane.road)
break
except ValueError:
pass
else:
logging.warning("unable to find road on route near start point")
return None
start_road = cand_start_lane.road
for cand_end_lane, _ in self._map.nearest_lanes(end, 30.0, False):
try:
eind = self._roads.index(cand_end_lane.road)
break
except ValueError:
pass
else:
logging.warning("unable to find road on route near end point")
return None
end_road = cand_end_lane.road
d = 0
start_offset = cand_start_lane.offset_along_lane(start)
end_offset = cand_end_lane.offset_along_lane(end)
if start_road == end_road:
return end_offset - start_offset
negate = False
if sind > eind:
cand_start_lane = cand_end_lane
start_road, end_road = end_road, start_road
start_offset, end_offset = end_offset, start_offset
negate = True
for road in self._roads:
if d == 0 and road == start_road:
d += cand_start_lane.length - start_offset
elif road == end_road:
d += end_offset
break
elif d > 0:
d += road.length
return -d if negate else d
@lru_cache(maxsize=8)
def project_along(
self, start: Point, distance: float
) -> Optional[Set[Tuple[RoadMap.Lane, float]]]:
route_roads = set(self._roads)
for cand_start_lane, _ in self._map.nearest_lanes(start, 30.0, False):
if cand_start_lane.road in route_roads:
break
else:
logging.warning("unable to find road on route near start point")
return None
started = False
for road in self._roads:
if not started:
if road != cand_start_lane.road:
continue
started = True
lane_pt = cand_start_lane.to_lane_coord(start)
start_offset = lane_pt.s
else:
start_offset = 0
if distance > road.length - start_offset:
distance -= road.length - start_offset
continue
return {(lane, distance) for lane in road.lanes}
return set()
def _compute_road_polygons(self):
lane_to_poly = {}
for edge in self._graph.getEdges():
for lane in edge.getLanes():
shape = buffered_shape(lane.getShape(), lane.getWidth())
# Check if "shape" is just a point.
if len(set(shape.exterior.coords)) == 1:
logging.debug(
f"Lane:{lane.getID()} has provided non-shape values {lane.getShape()}"
)
continue
lane_to_poly[lane.getID()] = shape
# Remove holes created at tight junctions due to crude map geometry
self._snap_internal_holes(lane_to_poly)
self._snap_external_holes(lane_to_poly)
# Remove break in visible lane connections created when lane enters an intersection
self._snap_internal_edges(lane_to_poly)
polys = list(lane_to_poly.values())
for node in self._graph.getNodes():
line = node.getShape()
if len(line) <= 2 or len(set(line)) == 1:
self._log.debug(
"Skipping {}-type node with <= 2 vertices".format(node.getType())
)
continue
polys.append(Polygon(line))
return polys
def _snap_internal_edges(self, lane_to_poly, snap_threshold=2):
# HACK: Internal edges that have tight curves, when buffered their ends do not
# create a tight seam with the connected lanes. This procedure attempts
# to remedy that with snapping.
for lane_id in lane_to_poly:
lane = self._graph.getLane(lane_id)
# Only do snapping for internal edge lanes
if not lane.getEdge().isSpecial():
continue
lane_shape = lane_to_poly[lane_id]
incoming = self._graph.getLane(lane_id).getIncoming()[0]
incoming_shape = lane_to_poly.get(incoming.getID())
if incoming_shape:
lane_shape = Polygon(snap(lane_shape, incoming_shape, snap_threshold))
lane_to_poly[lane_id] = lane_shape
outgoing = self._graph.getLane(lane_id).getOutgoing()[0].getToLane()
outgoing_shape = lane_to_poly.get(outgoing.getID())
if outgoing_shape:
lane_shape = Polygon(snap(lane_shape, outgoing_shape, snap_threshold))
lane_to_poly[lane_id] = lane_shape
def _snap_internal_holes(self, lane_to_poly, snap_threshold=2):
for lane_id in lane_to_poly:
lane = self._graph.getLane(lane_id)
# Only do snapping for internal edge lane holes
if not lane.getEdge().isSpecial():
continue
lane_shape = lane_to_poly[lane_id]
new_coords = []
last_added = None
for x, y in lane_shape.exterior.coords:
p = shPoint(x, y)
snapped_to = set()
moved = True
thresh = snap_threshold
while moved:
moved = False
for nl, dist in self.nearest_lanes(
Point(p.x, p.y),
include_junctions=False,
):
if not nl or nl.lane_id == lane_id or nl in snapped_to:
continue
nl_shape = lane_to_poly.get(nl.lane_id)
if nl_shape:
_, np = nearest_points(p, nl_shape)
if p.distance(np) < thresh:
p = np # !!!! :)
# allow vertices to snap to more than one thing, but
# try to avoid infinite loops and making things worse instead of better here...
# (so reduce snap dist threshold by an arbitrary amount each pass.)
moved = True
snapped_to.add(nl)
thresh *= 0.75
if p != last_added:
new_coords.append(p)
last_added = p
if new_coords:
lane_to_poly[lane_id] = Polygon(new_coords)
def _snap_external_holes(self, lane_to_poly, snap_threshold=2):
for lane_id in lane_to_poly:
lane = self._graph.getLane(lane_id)
# Only do snapping for external edge lane holes
if lane.getEdge().isSpecial():
continue
incoming = lane.getIncoming()
if incoming and incoming[0].getEdge().isSpecial():
continue
outgoing = lane.getOutgoing()
if outgoing:
outgoing_lane = outgoing[0].getToLane()
if outgoing_lane.getEdge().isSpecial():
continue
lane_shape = lane_to_poly[lane_id]
new_coords = []
last_added = None
for x, y in lane_shape.exterior.coords:
p = shPoint(x, y)
snapped_to = set()
moved = True
thresh = snap_threshold
while moved:
moved = False
for nl, dist in self.nearest_lanes(
Point(p.x, p.y),
include_junctions=False,
):
if (
not nl
or nl.in_junction
or nl.lane_id == lane_id
or nl in snapped_to
):
continue
nl_shape = lane_to_poly.get(nl.lane_id)
if nl_shape:
_, np = nearest_points(p, nl_shape)
if p.distance(np) < thresh:
p = np # !!!! :)
# allow vertices to snap to more than one thing, but
# try to avoid infinite loops and making things worse instead of better here...
# (so reduce snap dist threshold by an arbitrary amount each pass.)
moved = True
snapped_to.add(nl)
thresh *= 0.75
if p != last_added:
new_coords.append(p)
last_added = p
if new_coords:
lane_to_poly[lane_id] = Polygon(new_coords)
def _make_glb_from_polys(self, polygons):
scene = trimesh.Scene()
mesh = generate_mesh_from_polygons(polygons)
# Attach additional information for rendering as metadata in the map glb
metadata = {}
# <2D-BOUNDING_BOX>: four floats separated by ',' (<FLOAT>,<FLOAT>,<FLOAT>,<FLOAT>),
# which describe x-minimum, y-minimum, x-maximum, and y-maximum
metadata["bounding_box"] = self._graph.getBoundary()
# lane markings information
lane_dividers, edge_dividers = self._compute_traffic_dividers()
metadata["lane_dividers"] = lane_dividers
metadata["edge_dividers"] = edge_dividers
mesh.visual = trimesh.visual.TextureVisuals(
material=trimesh.visual.material.PBRMaterial()
)
scene.add_geometry(mesh)
return _GLBData(gltf.export_glb(scene, extras=metadata, include_normals=True))
def _compute_traffic_dividers(self, threshold=1):
lane_dividers = [] # divider between lanes with same traffic direction
edge_dividers = [] # divider between lanes with opposite traffic direction
edge_borders = []
for edge in self._graph.getEdges():
# Omit intersection for now
if edge.getFunction() == "internal":
continue
lanes = edge.getLanes()
for i in range(len(lanes)):
shape = lanes[i].getShape()
left_side = sumolib.geomhelper.move2side(
shape, -lanes[i].getWidth() / 2
)
right_side = sumolib.geomhelper.move2side(
shape, lanes[i].getWidth() / 2
)
if i == 0:
edge_borders.append(right_side)
if i == len(lanes) - 1:
edge_borders.append(left_side)
else:
lane_dividers.append(left_side)
# The edge borders that overlapped in positions form an edge divider
for i in range(len(edge_borders) - 1):
for j in range(i + 1, len(edge_borders)):
edge_border_i = np.array(
[edge_borders[i][0], edge_borders[i][-1]]
) # start and end position
edge_border_j = np.array(
[edge_borders[j][-1], edge_borders[j][0]]
) # start and end position with reverse traffic direction
# The edge borders of two lanes do not always overlap perfectly, thus relax the tolerance threshold to 1
if np.linalg.norm(edge_border_i - edge_border_j) < threshold:
edge_dividers.append(edge_borders[i])
return lane_dividers, edge_dividers
# specific to SUMO road networks
def get_edge_in_junction(
self, start_edge_id, start_lane_index, end_edge_id, end_lane_index
) -> str:
"""Returns the id of the edge between the start and end edge. Can be used for any edge but
is mainly useful for junctions.
"""
start_edge = self._graph.getEdge(start_edge_id)
start_lane = start_edge.getLane(start_lane_index)
end_edge = self._graph.getEdge(end_edge_id)
end_lane = end_edge.getLane(end_lane_index)
connection = start_lane.getConnection(end_lane)
# If there is no connection beween try and do the best
if connection is None:
# The first id is good enough since we just need to determine the junction edge id
connection = start_edge.getConnections(end_edge)[0]
connection_lane_id = connection.getViaLaneID()
connection_lane = self._graph.getLane(connection_lane_id)
return connection_lane.getEdge().getID()
class _WaypointsCache:
def __init__(self):
self.lookahead = 0
self.point = (0, 0, 0)
self.filter_road_ids = ()
self._starts = {}
# XXX: all vehicles share this cache now (as opposed to before
# when it was in Plan.py and each vehicle had its own cache).
# TODO: probably need to add vehicle_id to the key somehow (or just make it bigger)
def _match(self, lookahead, point, filter_road_ids) -> bool:
return (
lookahead <= self.lookahead
and point[0] == self.point[0]
and point[1] == self.point[1]
and filter_road_ids == self.filter_road_ids
)
def update(
self,
lookahead: int,
point: Tuple[float, float, float],
filter_road_ids: tuple,
llp,
paths: List[List[Waypoint]],
):
"""Update the current cache if not already cached."""
if not self._match(lookahead, point, filter_road_ids):
self.lookahead = lookahead
self.point = point
self.filter_road_ids = filter_road_ids
self._starts = {}
self._starts[llp.lp.lane.index] = paths
def query(
self,
lookahead: int,
point: Tuple[float, float, float],
filter_road_ids: tuple,
llp,
) -> Optional[List[List[Waypoint]]]:
"""Attempt to find previously cached waypoints"""
if self._match(lookahead, point, filter_road_ids):
hit = self._starts.get(llp.lp.lane.index, None)
if hit:
# consider just returning all of them (not slicing)?
return [path[: (lookahead + 1)] for path in hit]
return None
def _waypoints_starting_at_lanepoint(
self,
lanepoint: LinkedLanePoint,
lookahead: int,
filter_road_ids: tuple,
point: Tuple[float, float, float],
) -> List[List[Waypoint]]:
"""computes equally-spaced Waypoints for all lane paths starting at lanepoint
up to lookahead waypoints ahead, constrained to filter_road_ids if specified."""
# The following acts sort of like lru_cache(1), but it allows
# for lookahead to be <= to the cached value...
cache_paths = self._waypoints_cache.query(
lookahead, point, filter_road_ids, lanepoint
)
if cache_paths:
return cache_paths
lanepoint_paths = self._lanepoints.paths_starting_at_lanepoint(
lanepoint, lookahead, filter_road_ids
)
result = [
SumoRoadNetwork._equally_spaced_path(
path, point, self._map_spec.lanepoint_spacing
)
for path in lanepoint_paths
]
self._waypoints_cache.update(
lookahead, point, filter_road_ids, lanepoint, result
)
return result
@staticmethod
def _equally_spaced_path(
path: Sequence[LinkedLanePoint],
point: Tuple[float, float, float],
lp_spacing: float,
) -> List[Waypoint]:
"""given a list of LanePoints starting near point, that may not be evenly spaced,
returns the same number of Waypoints that are evenly spaced and start at point."""
continuous_variables = [
"positions_x",
"positions_y",
"headings",
"lane_width",
"speed_limit",
]
discrete_variables = ["lane_id", "lane_index"]
ref_lanepoints_coordinates = {
parameter: [] for parameter in (continuous_variables + discrete_variables)
}
for idx, lanepoint in enumerate(path):
if lanepoint.is_inferred and 0 < idx < len(path) - 1:
continue
ref_lanepoints_coordinates["positions_x"].append(
lanepoint.lp.pose.position[0]
)
ref_lanepoints_coordinates["positions_y"].append(
lanepoint.lp.pose.position[1]
)
ref_lanepoints_coordinates["headings"].append(
lanepoint.lp.pose.heading.as_bullet
)
ref_lanepoints_coordinates["lane_id"].append(lanepoint.lp.lane.lane_id)
ref_lanepoints_coordinates["lane_index"].append(lanepoint.lp.lane.index)
ref_lanepoints_coordinates["lane_width"].append(lanepoint.lp.lane._width)
ref_lanepoints_coordinates["speed_limit"].append(
lanepoint.lp.lane.speed_limit
)
ref_lanepoints_coordinates["headings"] = inplace_unwrap(
ref_lanepoints_coordinates["headings"]
)
first_lp_heading = ref_lanepoints_coordinates["headings"][0]
lp_position = path[0].lp.pose.position[:2]
vehicle_pos = np.array(point[:2])
heading_vec = np.array(radians_to_vec(first_lp_heading))
projected_distant_lp_vehicle = np.inner(
(vehicle_pos - lp_position), heading_vec
)
ref_lanepoints_coordinates["positions_x"][0] = (
lp_position[0] + projected_distant_lp_vehicle * heading_vec[0]
)
ref_lanepoints_coordinates["positions_y"][0] = (
lp_position[1] + projected_distant_lp_vehicle * heading_vec[1]
)
# To ensure that the distance between waypoints are equal, we used
# interpolation approach inspired by:
# https://stackoverflow.com/a/51515357
cumulative_path_dist = np.cumsum(
np.sqrt(
np.ediff1d(ref_lanepoints_coordinates["positions_x"], to_begin=0) ** 2
+ np.ediff1d(ref_lanepoints_coordinates["positions_y"], to_begin=0) ** 2
)
)
if len(cumulative_path_dist) <= lp_spacing:
lp = path[0].lp
return [
Waypoint(
pos=lp.pose.position,
heading=lp.pose.heading,
lane_width=lp.lane._width,
speed_limit=lp.lane.speed_limit,
lane_id=lp.lane.lane_id,
lane_index=lp.lane.index,
)
]
evenly_spaced_cumulative_path_dist = np.linspace(
0, cumulative_path_dist[-1], len(path)
)
evenly_spaced_coordinates = {}
for variable in continuous_variables:
evenly_spaced_coordinates[variable] = np.interp(
evenly_spaced_cumulative_path_dist,
cumulative_path_dist,
ref_lanepoints_coordinates[variable],
)
for variable in discrete_variables:
ref_coordinates = ref_lanepoints_coordinates[variable]
evenly_spaced_coordinates[variable] = []
jdx = 0
for idx in range(len(path)):
while (
jdx + 1 < len(cumulative_path_dist)
and evenly_spaced_cumulative_path_dist[idx]
> cumulative_path_dist[jdx + 1]
):
jdx += 1
evenly_spaced_coordinates[variable].append(ref_coordinates[jdx])
evenly_spaced_coordinates[variable].append(ref_coordinates[-1])
equally_spaced_path = []
for idx in range(len(path)):
equally_spaced_path.append(
Waypoint(
pos=np.array(
[
evenly_spaced_coordinates["positions_x"][idx],
evenly_spaced_coordinates["positions_y"][idx],
]
),
heading=Heading(evenly_spaced_coordinates["headings"][idx]),
lane_width=evenly_spaced_coordinates["lane_width"][idx],
speed_limit=evenly_spaced_coordinates["speed_limit"][idx],
lane_id=evenly_spaced_coordinates["lane_id"][idx],
lane_index=evenly_spaced_coordinates["lane_index"][idx],
)
)
return equally_spaced_path
|
[
"smarts.core.utils.sumo.sumolib.geomhelper.positionAtShapeOffset",
"os.path.isfile",
"numpy.linalg.norm",
"numpy.inner",
"trimesh.exchange.gltf.export_glb",
"numpy.interp",
"os.path.join",
"numpy.unique",
"shapely.geometry.Point",
"trimesh.Scene",
"shapely.geometry.Polygon",
"logging.warning",
"os.path.dirname",
"numpy.radians",
"subprocess.check_output",
"smarts.core.utils.sumo.sumolib.geomhelper.polygonOffsetWithMinimumDistanceToPoint",
"smarts.core.utils.sumo.sumolib.net.readNet",
"smarts.core.utils.sumo.sumolib.geomhelper.distance",
"numpy.dot",
"shapely.ops.nearest_points",
"shapely.ops.snap",
"os.path.isdir",
"random.choice",
"trimesh.visual.material.PBRMaterial",
"numpy.array",
"functools.lru_cache",
"logging.getLogger",
"numpy.ediff1d"
] |
[((4537, 4557), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (4546, 4557), False, 'from functools import lru_cache\n'), ((26149, 26170), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(16)'}), '(maxsize=16)\n', (26158, 26170), False, 'from functools import lru_cache\n'), ((27621, 27642), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(16)'}), '(maxsize=16)\n', (27630, 27642), False, 'from functools import lru_cache\n'), ((3288, 3330), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (3305, 3330), False, 'import logging\n'), ((4411, 4441), 'os.path.dirname', 'os.path.dirname', (['net_file_path'], {}), '(net_file_path)\n', (4426, 4441), False, 'import os\n'), ((4457, 4513), 'os.path.join', 'os.path.join', (['net_file_folder', 'cls.shifted_net_file_name'], {}), '(net_file_folder, cls.shifted_net_file_name)\n', (4469, 4513), False, 'import os\n'), ((4682, 4713), 'logging.getLogger', 'logging.getLogger', (['cls.__name__'], {}), '(cls.__name__)\n', (4699, 4713), False, 'import logging\n'), ((5950, 5998), 'smarts.core.utils.sumo.sumolib.net.readNet', 'sumolib.net.readNet', (['net_file'], {'withInternal': '(True)'}), '(net_file, withInternal=True)\n', (5969, 5998), False, 'from smarts.core.utils.sumo import sumolib\n'), ((7458, 7488), 'os.path.isdir', 'os.path.isdir', (['map_spec.source'], {}), '(map_spec.source)\n', (7471, 7488), False, 'import os\n'), ((14204, 14225), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(16)'}), '(maxsize=16)\n', (14213, 14225), False, 'from functools import lru_cache\n'), ((17696, 17716), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(4)'}), '(maxsize=4)\n', (17705, 17716), False, 'from functools import lru_cache\n'), ((18393, 18413), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (18402, 18413), False, 'from functools import lru_cache\n'), ((18819, 18839), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (18828, 18839), False, 'from functools import lru_cache\n'), ((19699, 19719), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (19708, 19719), False, 'from functools import lru_cache\n'), ((19923, 19943), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (19932, 19943), False, 'from functools import lru_cache\n'), ((20192, 20212), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (20201, 20212), False, 'from functools import lru_cache\n'), ((20346, 20366), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (20355, 20366), False, 'from functools import lru_cache\n'), ((20485, 20497), 'functools.lru_cache', 'lru_cache', (['(8)'], {}), '(8)\n', (20494, 20497), False, 'from functools import lru_cache\n'), ((20628, 20640), 'functools.lru_cache', 'lru_cache', (['(8)'], {}), '(8)\n', (20637, 20640), False, 'from functools import lru_cache\n'), ((20780, 20800), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (20789, 20800), False, 'from functools import lru_cache\n'), ((20928, 20948), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (20937, 20948), False, 'from functools import lru_cache\n'), ((23185, 23206), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(16)'}), '(maxsize=16)\n', (23194, 23206), False, 'from functools import lru_cache\n'), ((24412, 24432), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (24421, 24432), False, 'from functools import lru_cache\n'), ((24745, 24765), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (24754, 24765), False, 'from functools import lru_cache\n'), ((25037, 25057), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(4)'}), '(maxsize=4)\n', (25046, 25057), False, 'from functools import lru_cache\n'), ((29803, 29841), 'numpy.unique', 'np.unique', (['edge_ids'], {'return_index': '(True)'}), '(edge_ids, return_index=True)\n', (29812, 29841), True, 'import numpy as np\n'), ((35980, 36000), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (35989, 36000), False, 'from functools import lru_cache\n'), ((37792, 37812), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (37801, 37812), False, 'from functools import lru_cache\n'), ((45593, 45608), 'trimesh.Scene', 'trimesh.Scene', ([], {}), '()\n', (45606, 45608), False, 'import trimesh\n'), ((54227, 54246), 'numpy.array', 'np.array', (['point[:2]'], {}), '(point[:2])\n', (54235, 54246), True, 'import numpy as np\n'), ((54351, 54399), 'numpy.inner', 'np.inner', (['(vehicle_pos - lp_position)', 'heading_vec'], {}), '(vehicle_pos - lp_position, heading_vec)\n', (54359, 54399), True, 'import numpy as np\n'), ((2213, 2238), 'numpy.radians', 'np.radians', (['camera.fov[1]'], {}), '(camera.fov[1])\n', (2223, 2238), True, 'import numpy as np\n'), ((5018, 5131), 'subprocess.check_output', 'check_output', (["['netconvert', '--offset.disable-normalization=FALSE', '-s', net_file_path,\n '-o', shifted_path]"], {}), "(['netconvert', '--offset.disable-normalization=FALSE', '-s',\n net_file_path, '-o', shifted_path])\n", (5030, 5131), False, 'from subprocess import check_output\n'), ((7575, 7619), 'os.path.join', 'os.path.join', (['map_spec.source', '"""map.net.xml"""'], {}), "(map_spec.source, 'map.net.xml')\n", (7587, 7619), False, 'import os\n'), ((14738, 14761), 'numpy.linalg.norm', 'np.linalg.norm', (['my_vect'], {}), '(my_vect)\n', (14752, 14761), True, 'import numpy as np\n'), ((20085, 20146), 'smarts.core.utils.sumo.sumolib.geomhelper.positionAtShapeOffset', 'sumolib.geomhelper.positionAtShapeOffset', (['shape', 'lane_point.s'], {}), '(shape, lane_point.s)\n', (20125, 20146), False, 'from smarts.core.utils.sumo import sumolib\n'), ((31756, 31781), 'random.choice', 'random.choice', (['next_edges'], {}), '(next_edges)\n', (31769, 31781), False, 'import random\n'), ((46382, 46443), 'trimesh.exchange.gltf.export_glb', 'gltf.export_glb', (['scene'], {'extras': 'metadata', 'include_normals': '(True)'}), '(scene, extras=metadata, include_normals=True)\n', (46397, 46443), False, 'from trimesh.exchange import gltf\n'), ((55834, 55943), 'numpy.interp', 'np.interp', (['evenly_spaced_cumulative_path_dist', 'cumulative_path_dist', 'ref_lanepoints_coordinates[variable]'], {}), '(evenly_spaced_cumulative_path_dist, cumulative_path_dist,\n ref_lanepoints_coordinates[variable])\n', (55843, 55943), True, 'import numpy as np\n'), ((6137, 6169), 'os.path.isfile', 'os.path.isfile', (['shifted_net_file'], {}), '(shifted_net_file)\n', (6151, 6169), False, 'import os\n'), ((6322, 6378), 'smarts.core.utils.sumo.sumolib.net.readNet', 'sumolib.net.readNet', (['shifted_net_file'], {'withInternal': '(True)'}), '(shifted_net_file, withInternal=True)\n', (6341, 6378), False, 'from smarts.core.utils.sumo import sumolib\n'), ((15122, 15140), 'numpy.linalg.norm', 'np.linalg.norm', (['lv'], {}), '(lv)\n', (15136, 15140), True, 'import numpy as np\n'), ((19052, 19149), 'smarts.core.utils.sumo.sumolib.geomhelper.polygonOffsetWithMinimumDistanceToPoint', 'sumolib.geomhelper.polygonOffsetWithMinimumDistanceToPoint', (['point', 'shape'], {'perpendicular': '(False)'}), '(point, shape,\n perpendicular=False)\n', (19110, 19149), False, 'from smarts.core.utils.sumo import sumolib\n'), ((19520, 19571), 'smarts.core.utils.sumo.sumolib.geomhelper.distance', 'sumolib.geomhelper.distance', (['shape[i]', 'shape[i + 1]'], {}), '(shape[i], shape[i + 1])\n', (19547, 19571), False, 'from smarts.core.utils.sumo import sumolib\n'), ((36373, 36437), 'logging.warning', 'logging.warning', (['"""unable to find road on route near start point"""'], {}), "('unable to find road on route near start point')\n", (36388, 36437), False, 'import logging\n'), ((36797, 36859), 'logging.warning', 'logging.warning', (['"""unable to find road on route near end point"""'], {}), "('unable to find road on route near end point')\n", (36812, 36859), False, 'import logging\n'), ((38186, 38250), 'logging.warning', 'logging.warning', (['"""unable to find road on route near start point"""'], {}), "('unable to find road on route near start point')\n", (38201, 38250), False, 'import logging\n'), ((40150, 40163), 'shapely.geometry.Polygon', 'Polygon', (['line'], {}), '(line)\n', (40157, 40163), False, 'from shapely.geometry import Polygon\n'), ((41817, 41830), 'shapely.geometry.Point', 'shPoint', (['x', 'y'], {}), '(x, y)\n', (41824, 41830), True, 'from shapely.geometry import Point as shPoint\n'), ((43174, 43193), 'shapely.geometry.Polygon', 'Polygon', (['new_coords'], {}), '(new_coords)\n', (43181, 43193), False, 'from shapely.geometry import Polygon\n'), ((43995, 44008), 'shapely.geometry.Point', 'shPoint', (['x', 'y'], {}), '(x, y)\n', (44002, 44008), True, 'from shapely.geometry import Point as shPoint\n'), ((45510, 45529), 'shapely.geometry.Polygon', 'Polygon', (['new_coords'], {}), '(new_coords)\n', (45517, 45529), False, 'from shapely.geometry import Polygon\n'), ((46276, 46313), 'trimesh.visual.material.PBRMaterial', 'trimesh.visual.material.PBRMaterial', ([], {}), '()\n', (46311, 46313), False, 'import trimesh\n'), ((47682, 47733), 'numpy.array', 'np.array', (['[edge_borders[i][0], edge_borders[i][-1]]'], {}), '([edge_borders[i][0], edge_borders[i][-1]])\n', (47690, 47733), True, 'import numpy as np\n'), ((47830, 47881), 'numpy.array', 'np.array', (['[edge_borders[j][-1], edge_borders[j][0]]'], {}), '([edge_borders[j][-1], edge_borders[j][0]])\n', (47838, 47881), True, 'import numpy as np\n'), ((15232, 15251), 'numpy.dot', 'np.dot', (['my_vect', 'lv'], {}), '(my_vect, lv)\n', (15238, 15251), True, 'import numpy as np\n'), ((40937, 40985), 'shapely.ops.snap', 'snap', (['lane_shape', 'incoming_shape', 'snap_threshold'], {}), '(lane_shape, incoming_shape, snap_threshold)\n', (40941, 40985), False, 'from shapely.ops import nearest_points, snap\n'), ((41252, 41300), 'shapely.ops.snap', 'snap', (['lane_shape', 'outgoing_shape', 'snap_threshold'], {}), '(lane_shape, outgoing_shape, snap_threshold)\n', (41256, 41300), False, 'from shapely.ops import nearest_points, snap\n'), ((48118, 48163), 'numpy.linalg.norm', 'np.linalg.norm', (['(edge_border_i - edge_border_j)'], {}), '(edge_border_i - edge_border_j)\n', (48132, 48163), True, 'import numpy as np\n'), ((54956, 55021), 'numpy.ediff1d', 'np.ediff1d', (["ref_lanepoints_coordinates['positions_x']"], {'to_begin': '(0)'}), "(ref_lanepoints_coordinates['positions_x'], to_begin=0)\n", (54966, 55021), True, 'import numpy as np\n'), ((55045, 55110), 'numpy.ediff1d', 'np.ediff1d', (["ref_lanepoints_coordinates['positions_y']"], {'to_begin': '(0)'}), "(ref_lanepoints_coordinates['positions_y'], to_begin=0)\n", (55055, 55110), True, 'import numpy as np\n'), ((56792, 56900), 'numpy.array', 'np.array', (["[evenly_spaced_coordinates['positions_x'][idx], evenly_spaced_coordinates[\n 'positions_y'][idx]]"], {}), "([evenly_spaced_coordinates['positions_x'][idx],\n evenly_spaced_coordinates['positions_y'][idx]])\n", (56800, 56900), True, 'import numpy as np\n'), ((42421, 42448), 'shapely.ops.nearest_points', 'nearest_points', (['p', 'nl_shape'], {}), '(p, nl_shape)\n', (42435, 42448), False, 'from shapely.ops import nearest_points, snap\n'), ((44757, 44784), 'shapely.ops.nearest_points', 'nearest_points', (['p', 'nl_shape'], {}), '(p, nl_shape)\n', (44771, 44784), False, 'from shapely.ops import nearest_points, snap\n')]
|
import os
import pickle
import random
import numpy as np
import pandas as pd
from sklearn.neighbors import KDTree
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
base_path = cfg.DATASET_FOLDER
runs_folder = "oxford/"
filename = "pointcloud_locations_20m_10overlap.csv"
pointcloud_fols = "/pointcloud_20m_10overlap/"
all_folders = sorted(os.listdir(os.path.join(BASE_DIR,base_path,runs_folder)))
folders = []
# All runs are used for training (both full and partial)
index_list = range(len(all_folders)-1)
print("Number of runs: "+str(len(index_list)))
for index in index_list:
folders.append(all_folders[index])
print(folders)
#####For training and test data split#####
x_width = 150
y_width = 150
p1 = [5735712.768124,620084.402381]
p2 = [5735611.299219,620540.270327]
p3 = [5735237.358209,620543.094379]
p4 = [5734749.303802,619932.693364]
p = [p1,p2,p3,p4]
def check_in_test_set(northing, easting, points, x_width, y_width):
in_test_set = False
for point in points:
if(point[0]-x_width < northing and northing < point[0]+x_width and point[1]-y_width < easting and easting < point[1]+y_width):
in_test_set = True
break
return in_test_set
##########################################
def construct_query_dict(df_centroids, filename):
tree = KDTree(df_centroids[['northing','easting']])
ind_nn = tree.query_radius(df_centroids[['northing','easting']],r=10)
ind_r = tree.query_radius(df_centroids[['northing','easting']], r=50)
queries = {}
for i in range(len(ind_nn)):
query = df_centroids.iloc[i]["file"]
positives = np.setdiff1d(ind_nn[i],[i]).tolist()
negatives = np.setdiff1d(
df_centroids.index.values.tolist(),ind_r[i]).tolist()
random.shuffle(negatives)
queries[i] = {"query":query,
"positives":positives,"negatives":negatives}
with open(filename, 'wb') as handle:
pickle.dump(queries, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
# Initialize pandas DataFrame
df_train = pd.DataFrame(columns=['file','northing','easting'])
df_test = pd.DataFrame(columns=['file','northing','easting'])
for folder in folders:
df_locations = pd.read_csv(os.path.join(
base_path,runs_folder,folder,filename),sep=',')
df_locations['timestamp'] = runs_folder+folder + \
pointcloud_fols+df_locations['timestamp'].astype(str)+'.bin'
df_locations = df_locations.rename(columns={'timestamp':'file'})
for index, row in df_locations.iterrows():
if(check_in_test_set(row['northing'], row['easting'], p, x_width, y_width)):
df_test = df_test.append(row, ignore_index=True)
else:
df_train = df_train.append(row, ignore_index=True)
print("Number of training submaps: "+str(len(df_train['file'])))
print("Number of non-disjoint test submaps: "+str(len(df_test['file'])))
construct_query_dict(df_train,"training_queries_baseline.pickle")
construct_query_dict(df_test,"test_queries_baseline.pickle")
|
[
"pandas.DataFrame",
"os.path.abspath",
"pickle.dump",
"os.path.join",
"random.shuffle",
"numpy.setdiff1d",
"sklearn.neighbors.KDTree"
] |
[((2077, 2130), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['file', 'northing', 'easting']"}), "(columns=['file', 'northing', 'easting'])\n", (2089, 2130), True, 'import pandas as pd\n'), ((2139, 2192), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['file', 'northing', 'easting']"}), "(columns=['file', 'northing', 'easting'])\n", (2151, 2192), True, 'import pandas as pd\n'), ((143, 168), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (158, 168), False, 'import os\n'), ((1308, 1353), 'sklearn.neighbors.KDTree', 'KDTree', (["df_centroids[['northing', 'easting']]"], {}), "(df_centroids[['northing', 'easting']])\n", (1314, 1353), False, 'from sklearn.neighbors import KDTree\n'), ((358, 404), 'os.path.join', 'os.path.join', (['BASE_DIR', 'base_path', 'runs_folder'], {}), '(BASE_DIR, base_path, runs_folder)\n', (370, 404), False, 'import os\n'), ((1761, 1786), 'random.shuffle', 'random.shuffle', (['negatives'], {}), '(negatives)\n', (1775, 1786), False, 'import random\n'), ((1941, 2003), 'pickle.dump', 'pickle.dump', (['queries', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(queries, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (1952, 2003), False, 'import pickle\n'), ((2246, 2300), 'os.path.join', 'os.path.join', (['base_path', 'runs_folder', 'folder', 'filename'], {}), '(base_path, runs_folder, folder, filename)\n', (2258, 2300), False, 'import os\n'), ((1616, 1644), 'numpy.setdiff1d', 'np.setdiff1d', (['ind_nn[i]', '[i]'], {}), '(ind_nn[i], [i])\n', (1628, 1644), True, 'import numpy as np\n')]
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import numpy as np
from pyquil.paulis import PauliTerm, PauliSum
import pyquil.api as api
from scipy.optimize import minimize
from grove.pyqaoa.qaoa import QAOA
CXN = api.QVMConnection()
def numpart_qaoa(asset_list, A=1.0, minimizer_kwargs=None, steps=1):
"""
generate number partition driver and cost functions
:param asset_list: list to binary parition
:param A: (float) optional constant for level separation. Default=1.
:param minimizer_kwargs: Arguments for the QAOA minimizer
:param steps: (int) number of steps approximating the solution.
"""
cost_operators = []
ref_operators = []
for ii in range(len(asset_list)):
for jj in range(ii + 1, len(asset_list)):
cost_operators.append(PauliSum([PauliTerm("Z", ii, 2*asset_list[ii]) *
PauliTerm("Z", jj, A*asset_list[jj])]))
ref_operators.append(PauliSum([PauliTerm("X", ii, -1.0)]))
cost_operators.append(PauliSum([PauliTerm("I", 0, len(asset_list))]))
if minimizer_kwargs is None:
minimizer_kwargs = {'method': 'Nelder-Mead',
'options': {'ftol': 1.0e-2,
'xtol': 1.0e-2,
'disp': True}}
n_qubits = len(asset_list)
qaoa_inst = QAOA(CXN, n_qubits, steps=steps, cost_ham=cost_operators,
ref_hamiltonian=ref_operators, store_basis=True,
minimizer=minimize, minimizer_kwargs=minimizer_kwargs,
vqe_options={'disp': True})
return qaoa_inst
if __name__ == "__main__":
# Sample Run.
# result should be an even partition of nodes
inst = numpart_qaoa([1, 1, 1, 1, 1, 1], A=1.0, steps=1)
betas, gammas = inst.get_angles()
print(betas)
print(gammas)
probs = inst.probabilities(np.hstack((betas, gammas)))
for state, prob in zip(inst.states, probs):
print(state, prob)
print("Most frequent bitstring from sampling")
most_freq_string, sampling_results = inst.get_string(
betas, gammas, samples=100)
print(most_freq_string)
|
[
"pyquil.api.QVMConnection",
"pyquil.paulis.PauliTerm",
"grove.pyqaoa.qaoa.QAOA",
"numpy.hstack"
] |
[((939, 958), 'pyquil.api.QVMConnection', 'api.QVMConnection', ([], {}), '()\n', (956, 958), True, 'import pyquil.api as api\n'), ((2098, 2297), 'grove.pyqaoa.qaoa.QAOA', 'QAOA', (['CXN', 'n_qubits'], {'steps': 'steps', 'cost_ham': 'cost_operators', 'ref_hamiltonian': 'ref_operators', 'store_basis': '(True)', 'minimizer': 'minimize', 'minimizer_kwargs': 'minimizer_kwargs', 'vqe_options': "{'disp': True}"}), "(CXN, n_qubits, steps=steps, cost_ham=cost_operators, ref_hamiltonian=\n ref_operators, store_basis=True, minimizer=minimize, minimizer_kwargs=\n minimizer_kwargs, vqe_options={'disp': True})\n", (2102, 2297), False, 'from grove.pyqaoa.qaoa import QAOA\n'), ((2634, 2660), 'numpy.hstack', 'np.hstack', (['(betas, gammas)'], {}), '((betas, gammas))\n', (2643, 2660), True, 'import numpy as np\n'), ((1694, 1718), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""X"""', 'ii', '(-1.0)'], {}), "('X', ii, -1.0)\n", (1703, 1718), False, 'from pyquil.paulis import PauliTerm, PauliSum\n'), ((1532, 1570), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'ii', '(2 * asset_list[ii])'], {}), "('Z', ii, 2 * asset_list[ii])\n", (1541, 1570), False, 'from pyquil.paulis import PauliTerm, PauliSum\n'), ((1615, 1653), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'jj', '(A * asset_list[jj])'], {}), "('Z', jj, A * asset_list[jj])\n", (1624, 1653), False, 'from pyquil.paulis import PauliTerm, PauliSum\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import sys
import shutil
import random
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import scipy.io as scio
import tensorflow_datasets as tfds
import tensorflow as tf
import numpy as np
from common.inputs import data_input
from common import utils
SRC = 'Pascal VOC 2010/VOCdevkit/VOC2010'
OUTPUT = '../../../data/Pascal VOC 2010'
CATEGORIES = ['bird', 'cat', 'cow', 'dog', 'horse', 'sheep']
class BoundingBox(object):
pass
def GetItem(name, root, index=0):
count = 0
for item in root.iter(name):
if count == index:
return item.text
count += 1
# Failed to find "index" occurrence of item.
return -1
def GetInt(name, root, index=0):
# In some XML annotation files, the point values are not integers, but floats.
# So we add a float function to avoid ValueError.
return int(float(GetItem(name, root, index)))
def FindNumberBoundingBoxes(root):
index = 0
while True:
if GetInt('xmin', root, index) == -1:
break
index += 1
return index
def ProcessXMLAnnotation(xml_file):
"""Process a single XML file containing a bounding box."""
# pylint: disable=broad-except
try:
tree = ET.parse(xml_file)
except Exception:
print('Failed to parse: ' + xml_file, file=sys.stderr)
return None
# pylint: enable=broad-except
root = tree.getroot()
num_boxes = FindNumberBoundingBoxes(root)
boxes = []
for index in range(num_boxes):
box = BoundingBox()
# Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index)
box.ymin = GetInt('ymin', root, index)
box.xmax = GetInt('xmax', root, index)
box.ymax = GetInt('ymax', root, index)
box.width = GetInt('width', root)
box.height = GetInt('height', root)
box.filename = GetItem('filename', root) + '.JPEG'
box.label = GetItem('name', root)
xmin = float(box.xmin) / float(box.width)
xmax = float(box.xmax) / float(box.width)
ymin = float(box.ymin) / float(box.height)
ymax = float(box.ymax) / float(box.height)
# Some images contain bounding box annotations that
# extend outside of the supplied image. See, e.g.
# n03127925/n03127925_147.xml
# Additionally, for some bounding boxes, the min > max
# or the box is entirely outside of the image.
min_x = min(xmin, xmax)
max_x = max(xmin, xmax)
box.xmin_scaled = min(max(min_x, 0.0), 1.0)
box.xmax_scaled = min(max(max_x, 0.0), 1.0)
min_y = min(ymin, ymax)
max_y = max(ymin, ymax)
box.ymin_scaled = min(max(min_y, 0.0), 1.0)
box.ymax_scaled = min(max(max_y, 0.0), 1.0)
boxes.append(box)
return boxes
def image_normalize(image, arch, inverse=False):
if arch == 'InceptionV3':
if inverse:
image = (image + 1) * 127.5 / 255.
image = tf.clip_by_value(image, 0, 1)
else:
image = image / 127.5 - 1
else:
mean = [123.68, 116.779, 103.939]
if inverse:
image = (image + mean) / 255.
image = tf.clip_by_value(image, 0, 1)
else:
image -= mean
return image
def find_animals_file():
datasets = {'train': [], 'valid': []}
for category in CATEGORIES:
train_paths = category + '_train.txt'
with open(os.path.join(SRC, 'ImageSets/Main', train_paths)) as f:
for line in f.readlines():
line = line.strip().split()
if line[-1] == '1':
datasets['train'].append((line[0], category))
valid_paths = category + '_val.txt'
with open(os.path.join(SRC, 'ImageSets/Main', valid_paths)) as f:
for line in f.readlines():
line = line.strip().split()
if line[-1] == '1':
datasets['valid'].append((line[0], category))
with open(os.path.join(OUTPUT, 'animals_train.txt'), 'w') as f:
for sample in datasets['train']:
f.write(sample[0] + ' ' + sample[1])
f.write('\n')
with open(os.path.join(OUTPUT, 'animals_valid.txt'), 'w') as f:
for sample in datasets['valid']:
f.write(sample[0] + ' ' + sample[1])
f.write('\n')
for category in CATEGORIES:
os.makedirs(os.path.join(OUTPUT, 'animal_train', category))
os.makedirs(os.path.join(OUTPUT, 'animal_valid', category))
source = os.path.join(SRC, 'JPEGImages')
for sample in datasets['train']:
shutil.copy(os.path.join(source, sample[0] + '.jpg'), os.path.join(OUTPUT, 'animal_train', sample[1]))
for sample in datasets['valid']:
shutil.copy(os.path.join(source, sample[0] + '.jpg'), os.path.join(OUTPUT, 'animal_valid', sample[1]))
def test_find_multi_label():
datasets = {'train': {}, 'valid': {}}
with open(os.path.join(OUTPUT, 'animals_train.txt')) as f:
for line in f.readlines():
line = line.strip().split()
if line[0] in datasets['train']:
datasets['train'][line[0]].append(line[1])
else:
datasets['train'][line[0]] = [line[1]]
with open(os.path.join(OUTPUT, 'animals_train_mul.txt'), 'w') as f:
for sample in datasets['train']:
label = ''
for item in datasets['train'][sample]:
label += item
label += ' '
f.write(sample + ' ' + label)
f.write('\n')
with open(os.path.join(OUTPUT, 'animals_valid.txt')) as f:
for line in f.readlines():
line = line.strip().split()
if line[0] in datasets['valid']:
datasets['valid'][line[0]].append(line[1])
else:
datasets['valid'][line[0]] = [line[1]]
with open(os.path.join(OUTPUT, 'animals_valid_mul.txt'), 'w') as f:
for sample in datasets['valid']:
label = ''
for item in datasets['valid'][sample]:
label += item
label += ' '
f.write(sample + ' ' + label)
f.write('\n')
def test_generate_masks():
mask_dir = os.path.join(OUTPUT, 'animal_obj_mask')
if not os.path.exists(mask_dir):
os.makedirs(mask_dir)
write_obj_masks('animals_train_mul.txt', mask_dir)
write_obj_masks('animals_valid_mul.txt', mask_dir)
def write_obj_masks(source, mask_dir):
from scipy import misc
with open(os.path.join(OUTPUT, source)) as f:
for line in f.readlines():
line = line.strip().split()
file = os.path.join(OUTPUT, 'Annotations_Part', line[0] + '.mat')
labels = line[1:]
objects = scio.loadmat(file)['anno'][0][0][1][0]
valid_obj = []
for obj in objects:
if obj[0] in labels:
valid_obj.append(obj)
masks = []
for item in valid_obj:
masks.append(np.expand_dims(item[2], -1))
masks = np.concatenate(masks, -1)
masks = np.sum(masks, -1, keepdims=False)
masks[masks > 1] = 1
misc.imsave(os.path.join(mask_dir, line[0] + '.jpg'), masks)
def build_dataset(data_dir='data', batch_size=128, shape=(224, 224, 3), flip=True, crop=True):
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
labels = ['bird', 'cat', 'cow', 'dog', 'horse', 'sheep']
train_path = os.path.join(data_path, 'animal_train_crop')
valid_path = os.path.join(data_path, 'animal_valid_crop')
train_images = []
train_labels = []
valid_images = []
valid_labels = []
for i, l in enumerate(labels):
train_files = os.listdir(os.path.join(train_path, l))
for file in train_files:
file = os.path.join(train_path, l, file)
train_images.append(file)
train_labels.append(i)
valid_files = os.listdir(os.path.join(valid_path, l))
for file in valid_files:
file = os.path.join(valid_path, l, file)
valid_images.append(file)
valid_labels.append(i)
train_images = tf.constant(train_images)
train_labels = tf.constant(train_labels)
valid_images = tf.constant(valid_images)
valid_labels = tf.constant(valid_labels)
train = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(2816).\
map(build_parse((shape[0], shape[1]), flip), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
test = tf.data.Dataset.from_tensor_slices((valid_images, valid_labels)).\
map(build_parse((shape[0], shape[1])), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=len(labels))}),
{'train_examples': 2816,
'test_examples': 2839})
return train, test, info
def build_dataset2(data_dir='data', batch_size=128, shape=(224, 224, 3), target=None):
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC)
train_path = os.path.join(data_path, 'animal_train')
valid_path = os.path.join(data_path, 'animal_valid')
train_images = []
train_boxes = []
train_labels = []
valid_images = []
valid_boxes = []
valid_labels = []
for i, l in enumerate(CATEGORIES):
train_files = os.listdir(os.path.join(train_path, l))
for file in train_files:
obj_anno = file.split('.')[0] + '.xml'
obj_anno = os.path.join(SRC_path, 'Annotations', obj_anno)
tree = ET.parse(obj_anno)
file = os.path.join(train_path, l, file)
train_images.append(file)
train_labels.append(i)
# img = Image.open(file).size
area = 0
box = []
for obj in tree.getroot().iter('object'):
if obj.find('name').text == l:
bndbox = obj.find('bndbox')
xmin = int(bndbox.find('xmin').text)
ymin = int(bndbox.find('ymin').text)
xmax = int(bndbox.find('xmax').text)
ymax = int(bndbox.find('ymax').text)
new_area = (xmax-xmin)*(ymax-ymin)
if new_area > area:
area = new_area
box = [ymin, xmin, ymax, xmax]
train_boxes.append(box)
valid_files = os.listdir(os.path.join(valid_path, l))
for file in valid_files:
obj_anno = file.split('.')[0] + '.xml'
obj_anno = os.path.join(SRC_path, 'Annotations', obj_anno)
tree = ET.parse(obj_anno)
area = 0
box = []
for obj in tree.getroot().iter('object'):
if obj.find('name').text == l:
bndbox = obj.find('bndbox')
xmin = int(bndbox.find('xmin').text)
ymin = int(bndbox.find('ymin').text)
xmax = int(bndbox.find('xmax').text)
ymax = int(bndbox.find('ymax').text)
new_area = (xmax - xmin) * (ymax - ymin)
if new_area > area:
area = new_area
box = [ymin, xmin, ymax, xmax]
valid_boxes.append(box)
file = os.path.join(valid_path, l, file)
valid_images.append(file)
valid_labels.append(i)
train_images = tf.constant(train_images)
train_boxes = tf.constant(train_boxes, dtype=tf.float32)
train_labels = tf.constant(train_labels)
valid_images = tf.constant(valid_images)
valid_boxes = tf.constant(valid_boxes, dtype=tf.float32)
valid_labels = tf.constant(valid_labels)
train = tf.data.Dataset.from_tensor_slices((train_images, train_boxes, train_labels)).shuffle(1919).\
map(build_parse2((shape[0], shape[1]), train=True), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
test = tf.data.Dataset.from_tensor_slices((valid_images, valid_boxes, valid_labels)).\
map(build_parse2((shape[0], shape[1])), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=len(CATEGORIES))}),
{'train_examples': 1919,
'test_examples': 1914})
if target:
target = CATEGORIES.index(target)
def single_parse(image, label):
label = tf.equal(label, target)
label = tf.cast(label, tf.int32)
return image, label
train = train.map(single_parse)
test = test.map(single_parse)
return train, test, info
def get_test_set_with_landmark3(data_dir='data', category=None, batch_size=128, shape=(224, 224, 3), arch='InceptionV3'): # full image
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC)
valid_path = os.path.join(data_path, 'animals_valid_mul.txt')
valid_images = []
valid_labels = []
valid_masks = []
def get_parse(size):
def parse(path, label, mask):
image_str = tf.io.read_file(path)
image = tf.image.decode_jpeg(image_str)
image = tf.image.resize(image, size=size)
image = image_normalize(image, arch)
return image, label, mask
return parse
with open(valid_path) as f:
for line in f.readlines():
line = line.strip().split()
file = os.path.join(SRC_path, 'JPEGImages', line[0] + '.jpg')
labels = line[1:]
labels_one_hot = np.zeros([6,])
if category is not None and category not in labels:
continue
for label in labels:
idx = CATEGORIES.index(label)
labels_one_hot[idx] = 1
valid_images.append(file)
valid_labels.append(labels_one_hot)
part_mask = line[0] + '.mat'
part_mask = os.path.join(data_path, 'Annotations_Part', part_mask)
valid_masks.append(part_mask)
valid_images = tf.constant(valid_images)
valid_labels = tf.constant(valid_labels)
valid_landmarks = tf.constant(valid_masks)
num = len(valid_images)
test = tf.data.Dataset.from_tensor_slices((valid_images, valid_labels, valid_landmarks)).\
map(get_parse((shape[0], shape[1])), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=6)}),
{'test_examples': num})
return test, info
def build_dataset3(data_dir='data', batch_size=128, shape=(224, 224, 3), target=None, with_mask=False, arch='InceptionV3', multi=False, shuffle_test=False):
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC, 'JPEGImages')
mask_path = os.path.join(data_path, 'animal_obj_mask')
train_path = os.path.join(data_path, 'animals_train_mul.txt')
valid_path = os.path.join(data_path, 'animals_valid_mul.txt')
train_images = []
train_masks = []
train_labels = []
valid_images = []
valid_masks = []
valid_labels = []
with open(train_path) as f:
for line in f.readlines():
line = line.strip().split()
if multi and len(line) <= 2:
continue
file = os.path.join(SRC_path, line[0] + '.jpg')
mask_file = os.path.join(mask_path, line[0] + '.jpg')
train_images.append(file)
train_masks.append(mask_file)
labels = line[1:]
labels_one_hot = np.zeros([6,])
for label in labels:
idx = CATEGORIES.index(label)
labels_one_hot[idx] = 1
train_labels.append(labels_one_hot)
with open(valid_path) as f:
for line in f.readlines():
line = line.strip().split()
if multi and len(line) <= 2:
continue
file = os.path.join(SRC_path, line[0] + '.jpg')
mask_file = os.path.join(mask_path, line[0] + '.jpg')
valid_images.append(file)
valid_masks.append(mask_file)
labels = line[1:]
labels_one_hot = np.zeros([6,])
for label in labels:
idx = CATEGORIES.index(label)
labels_one_hot[idx] = 1
valid_labels.append(labels_one_hot)
train_num = len(train_images)
valid_num = len(valid_images)
if shuffle_test:
idx = [i for i in range(valid_num)]
np.random.shuffle(idx)
valid_images = np.array(valid_images)[idx]
valid_labels = np.array(valid_labels)[idx]
train_images = tf.constant(train_images)
train_masks = tf.constant(train_masks)
train_labels = tf.constant(train_labels)
valid_images = tf.constant(valid_images)
valid_masks = tf.constant(valid_masks)
valid_labels = tf.constant(valid_labels)
train = tf.data.Dataset.from_tensor_slices(((train_images, train_masks), train_labels)).shuffle(train_num).\
map(build_parse4((shape[0], shape[1]), train=True, with_mask=with_mask, arch=arch), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
test = tf.data.Dataset.from_tensor_slices(((valid_images, valid_masks), valid_labels)).\
map(build_parse4((shape[0], shape[1]), with_mask=with_mask, arch=arch), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=6)}),
{'train_examples': train_num,
'test_examples': valid_num})
if target:
target = CATEGORIES.index(target)
def single_parse(image, label):
label = tf.equal(label, target)
label = tf.cast(label, tf.int32)
label = tf.reduce_sum(label, -1)
return image, label
train = train.map(single_parse)
test = test.map(single_parse)
return train, test, info
def multi2single(target, label):
target = CATEGORIES.index(target)
if isinstance(label, tf.Tensor):
label = label.numpy()
label = label == target
label = label.astype(int)
return label
def build_parse2(size, train=False, brightness=False, contrast=False, arch='InceptionV3'):
def parse(path, bbox, label):
image_str = tf.io.read_file(path)
image = tf.image.decode_jpeg(image_str)
if train:
float_shape = tf.cast(tf.shape(image), tf.float32)
ymin = bbox[0] / float_shape[0]
xmin = bbox[1] / float_shape[1]
ymax = bbox[2] / float_shape[0]
xmax = bbox[3] / float_shape[1]
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
# tf.image.extract_jpeg_shape(image_str),
tf.shape(image),
bounding_boxes=[[[ymin, xmin, ymax, xmax]]],
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Reassemble the bounding box in the format the crop op requires.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
# Use the fused decode and crop op here, which is faster than each in series.
image = tf.image.crop_to_bounding_box(
image, offset_y, offset_x, target_height, target_width)
else:
bbox = tf.cast(bbox, tf.int32)
image = tf.image.crop_to_bounding_box(
image, bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1])
if train:
image = tf.image.random_flip_left_right(image)
# if brightness:
# image = tf.image.random_brightness(image, max_delta=63)
# if contrast:
# image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
image = tf.image.resize(image, size=size)
image = image_normalize(image, arch)
return image, label
return parse
def build_parse3(size, train=False, brightness=False, contrast=False, with_mask=False, arch='InceptionV3'):
def parse(path, label):
image_path, mask_path = path
label = tf.cast(label, tf.float32)
image_str = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image_str)
mask_str = tf.io.read_file(mask_path)
mask = tf.image.decode_jpeg(mask_str)
if train:
if with_mask:
mask = tf.tile(mask, [1, 1, 3])
image = tf.concat([image, mask], 0)
image = tf.image.random_flip_left_right(image)
if with_mask:
image, mask = tf.split(image, 2, axis=0)
mask = tf.split(mask, 3, axis=2)[0]
if brightness:
image = tf.image.random_brightness(image, max_delta=63)
if contrast:
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
image = tf.image.resize(image, size=size)
image = image_normalize(image, arch)
if with_mask:
mask = tf.image.resize(mask, size=size)
mask /= 255.
return (image, mask), label
else:
return image, label
return parse
def build_parse4(size, train=False, with_mask=None, arch='InceptionV3'):
def parse(path, label):
image_path, _ = path
label = tf.cast(label, tf.float32)
image_str = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image_str)
shape = tf.shape(input=image)
height, width = shape[0], shape[1]
if train:
image = tf.image.random_flip_left_right(image)
smaller_dim = tf.minimum(height, width)
image = tf.image.random_crop(image, [smaller_dim-80, smaller_dim-80,3])
image = tf.image.resize(image, size=size)
else:
new_height, new_width = utils.smallest_size_at_least(height, width, size[0])
image = tf.image.resize(image, size=(new_height+1, new_width+1))
image = utils.central_crop(image, size[0], size[1])
image = image_normalize(image, arch)
return image, label
return parse
def build_parse(size, flip=False, arch='InceptionV3'):
def parse(path, label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image)
image = tf.image.resize(image, size=size)
if flip:
image = tf.image.random_flip_left_right(image)
image = image_normalize(image, arch)
return image, label
return parse
def get_test_set_with_landmark(data_dir='data', category=None, batch_size=128, shape=(224, 224, 3), arch='InceptionV3'):
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC)
labels = ['bird', 'cat', 'cow', 'dog', 'horse', 'sheep']
valid_path = os.path.join(data_path, 'animal_valid')
valid_images = []
valid_boxes = []
valid_labels = []
valid_masks = []
def get_parse(size):
def parse(path, bbox, label, mask):
image_str = tf.io.read_file(path)
image = tf.image.decode_jpeg(image_str)
bbox = tf.cast(bbox, tf.int32)
image = tf.image.crop_to_bounding_box(
image, bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1])
image = tf.image.resize(image, size=size)
image = image_normalize(image, arch)
return image, bbox, label, mask
return parse
for i, l in enumerate(labels):
if category is not None and category != l:
continue
valid_files = os.listdir(os.path.join(valid_path, l))
for file in valid_files:
obj_anno = file.split('.')[0] + '.xml'
obj_anno = os.path.join(SRC_path, 'Annotations', obj_anno)
tree = ET.parse(obj_anno)
boxes = get_boxes(tree, l)
if len(boxes) != 1:
continue
part_mask = file.split('.')[0] + '.mat'
part_mask = os.path.join(data_path, 'Annotations_Part', part_mask)
file = os.path.join(valid_path, l, file)
objects = scio.loadmat(part_mask)['anno'][0][0][1][0]
valid_obj = []
for obj in objects:
if obj[0] == l:
valid_obj.append(obj)
if len(valid_obj) != 1:
raise Exception('more than 1 obj!')
if len(valid_obj[0][3]) == 0:
continue
valid_boxes += boxes
valid_images.append(file)
valid_labels.append(i)
valid_masks.append(part_mask)
valid_images = tf.constant(valid_images)
valid_boxes = tf.constant(valid_boxes, dtype=tf.float32)
valid_labels = tf.constant(valid_labels)
valid_landmarks = tf.constant(valid_masks)
test = tf.data.Dataset.from_tensor_slices((valid_images, valid_boxes, valid_labels, valid_landmarks)).\
map(get_parse((shape[0], shape[1])), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=len(labels))}),
{'test_examples': 1460})
return test, info
def parse_masks(images, files, labels, boxes):
filtered_images = []
filtered_labels = []
filtered_masks = []
shape = images.get_shape().as_list()
h, w = shape[1], shape[2]
for image, file, label, box in zip(images, files, labels, boxes):
# file = tf.constant('../../../data\\Pascal VOC 2010\\Annotations_Part\\2009_002002.mat')
mask = {}
objects = scio.loadmat(file.numpy())['anno'][0][0][1][0]
valid_obj = []
for obj in objects:
if obj[0] == label:
valid_obj.append(obj)
if len(valid_obj) != 1:
raise Exception('more than 1 obj!')
for i, item in enumerate(valid_obj[0]):
if i == 2:
item = crop_resize_mask(item, box, h, w)
mask['obj'] = item
if i == 3:
parts = {}
if len(item) == 0:
print(item)
continue
for part in item[0]:
name = part[0][0]
value = part[1]
parts[name] = crop_resize_mask(value, box, h, w)
parts = merge_parts(parts, label, h, w)
mask['parts'] = parts
if 'parts' in mask:
filtered_images.append(image)
filtered_labels.append(label)
filtered_masks.append(mask)
return filtered_images, filtered_labels, filtered_masks
def get_boxes(tree, label):
boxes = []
for obj in tree.getroot().iter('object'):
if obj.find('name').text == label:
bndbox = obj.find('bndbox')
xmin = int(bndbox.find('xmin').text)
ymin = int(bndbox.find('ymin').text)
xmax = int(bndbox.find('xmax').text)
ymax = int(bndbox.find('ymax').text)
box = [ymin, xmin, ymax, xmax]
boxes.append(box)
return boxes
def test_test_landmark():
batch = 16
test, info = get_test_set_with_landmark('../../../data', batch_size=batch)
# test = test.shuffle(1000)
count1 = 0
count2 = 0
for images, boxes, labels, masks in test:
count1 += images.get_shape().as_list()[0]
txt_label = [CATEGORIES[l] for l in labels]
images, labels, masks = parse_masks(images, masks, txt_label, boxes)
count2 += len(images)
parts = get_parts(masks)
viz_image_mask(images, masks)
print('count1:{}'.format(count1))
print('count2:{}'.format(count2))
def get_parts(masks):
result = []
for mask in masks:
result.append(mask['parts'])
return result
def viz_image_mask(images, masks):
for image, mask in zip(images, masks):
mask_img = [np.tile(mask['obj'], [1, 1, 3])]
for part in mask['parts']:
part_img = mask['parts'][part]
part_img = np.tile(part_img, [1, 1, 3])
mask_img.append(part_img)
show = np.concatenate([image] + mask_img, 1)
plt.imshow(show)
plt.show()
def crop_resize_mask(mask, box, h, w):
mask = mask[box[0]:box[2], box[1]:box[3], np.newaxis]
mask = tf.image.resize(mask, [h, w]).numpy()
mask[mask > 0] = 1
return mask
def merge_parts(parts, label, h, w):
results = {}
if label == 'bird':
head = []
torso = []
leg = []
tail = []
results = {'head': head, 'torso': torso, 'leg': leg, 'tail': tail}
for part in parts:
if part in ['head', 'leye', 'reye', 'beak']:
head.append(parts[part])
if part in ['torso', 'neck', 'lwing', 'rwing']:
torso.append(parts[part])
if part in ['lleg', 'rleg', 'lfoot', 'rfoot']:
leg.append(parts[part])
if part in ['tail']:
tail.append(parts[part])
if label in ['cat', 'dog', 'cow', 'sheep', 'horse']:
head = []
torso = []
bleg = []
fleg = []
tail = []
results = {'head': head, 'torso': torso, 'bleg': bleg, 'fleg': fleg}
if label in ['cat', 'dog']:
results['tail'] = tail
for part in parts:
if part in ['head', 'leye', 'reye', 'lear', 'rear', 'nose', 'muzzle', 'rhorn', 'lhorn']:
head.append(parts[part])
if part in ['torso', 'neck']:
torso.append(parts[part])
if part in ['lbleg', 'rbleg', 'lbpa', 'rbpa', 'lblleg', 'lbuleg', 'rblleg', 'rbuleg', 'rbho', 'lbho']:
bleg.append(parts[part])
if part in ['lfleg', 'rfleg', 'lfpa', 'rfpa', 'lflleg', 'lfuleg', 'rflleg', 'rfuleg', 'rfho', 'lfho']:
fleg.append(parts[part])
if part in ['tail']:
tail.append(parts[part])
final = {}
for merged in results:
if len(results[merged]) > 1:
summed = np.sum(results[merged], 0)
summed[summed > 0] = 1
final[merged] = summed
elif len(results[merged]) == 1:
summed = results[merged][0]
summed[summed > 0] = 1
final[merged] = summed
elif len(results[merged]) == 0:
final[merged] = np.zeros(shape=(h, w, 1))
return final
def test_find_shapes():
from scipy import misc
data_dir = '../../../data'
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC, 'JPEGImages')
train_path = os.path.join(data_path, 'animals_train_mul.txt')
valid_path = os.path.join(data_path, 'animals_valid_mul.txt')
hs = []
ws = []
with open(train_path) as f:
for line in f.readlines():
line = line.strip().split()
file = os.path.join(SRC_path, line[0] + '.jpg')
image = misc.imread(file)
h, w = image.shape[0], image.shape[1]
hs.append(h)
ws.append(w)
print('h min:{},h max:{}'.format(min(hs), max(hs)))
print('w min:{},w max:{}'.format(min(ws), max(ws)))
def test_read():
train, test, info = build_dataset3('../../../data', multi=True)
count = 0
for image, label in train:
count += image.shape[0]
print('train num', count)
count = 0
for image, label in test:
count += image.shape[0]
print('test num', count)
def test_view_data():
train, test, info = build_dataset3('../../../data', with_mask=False, multi=True)
for image, label in train:
# image, mask = image
# h,w =image
image = (image + 1)*127.5/255
# mask = np.tile(mask, [1, 1, 1, 3])
# image = np.concatenate([image, mask], 2)
out_image(image, label)
break
for image, label in test:
# image, mask = image
image = (image + 1)*127.5/255
out_image(image, label)
break
def out_image(images, labels, preds=None, photos=16):
fig = plt.figure()
fig.tight_layout()
plt.subplots_adjust(wspace=0.05, hspace=0.05, top=0.95, bottom=0.05, right=0.95, left=0.05)
for i in range(photos):
plt.subplot(photos/2, 2, i+1)
plt.axis('off')
if preds is None:
title = str(labels[i])
else:
title = str(labels[i]) + '_' + str(preds[i])
plt.title(title)
image = images[i, :, :, :]
if image.shape[-1] == 1:
image = np.squeeze(image, -1)
plt.imshow(image, cmap='gray')
else:
plt.imshow(image)
plt.subplots_adjust(hspace=0.5)
plt.show()
|
[
"matplotlib.pyplot.title",
"tensorflow.reduce_sum",
"numpy.sum",
"tensorflow.clip_by_value",
"tensorflow_datasets.features.Image",
"scipy.io.loadmat",
"matplotlib.pyplot.figure",
"numpy.tile",
"tensorflow.split",
"tensorflow.image.crop_to_bounding_box",
"tensorflow.image.random_crop",
"matplotlib.pyplot.imshow",
"tensorflow.image.random_contrast",
"tensorflow.concat",
"tensorflow.minimum",
"tensorflow.cast",
"tensorflow.io.read_file",
"common.utils.central_crop",
"tensorflow.equal",
"numpy.random.shuffle",
"xml.etree.ElementTree.parse",
"matplotlib.pyplot.show",
"tensorflow.constant",
"tensorflow.image.random_flip_left_right",
"tensorflow.tile",
"matplotlib.pyplot.subplots_adjust",
"numpy.squeeze",
"tensorflow.image.random_brightness",
"numpy.concatenate",
"scipy.misc.imread",
"matplotlib.pyplot.subplot",
"tensorflow_datasets.features.ClassLabel",
"common.utils.smallest_size_at_least",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.expand_dims",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.shape",
"numpy.array",
"tensorflow.image.decode_jpeg",
"tensorflow.image.resize",
"tensorflow.unstack"
] |
[((8348, 8373), 'tensorflow.constant', 'tf.constant', (['train_images'], {}), '(train_images)\n', (8359, 8373), True, 'import tensorflow as tf\n'), ((8393, 8418), 'tensorflow.constant', 'tf.constant', (['train_labels'], {}), '(train_labels)\n', (8404, 8418), True, 'import tensorflow as tf\n'), ((8438, 8463), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (8449, 8463), True, 'import tensorflow as tf\n'), ((8483, 8508), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (8494, 8508), True, 'import tensorflow as tf\n'), ((12032, 12057), 'tensorflow.constant', 'tf.constant', (['train_images'], {}), '(train_images)\n', (12043, 12057), True, 'import tensorflow as tf\n'), ((12076, 12118), 'tensorflow.constant', 'tf.constant', (['train_boxes'], {'dtype': 'tf.float32'}), '(train_boxes, dtype=tf.float32)\n', (12087, 12118), True, 'import tensorflow as tf\n'), ((12138, 12163), 'tensorflow.constant', 'tf.constant', (['train_labels'], {}), '(train_labels)\n', (12149, 12163), True, 'import tensorflow as tf\n'), ((12183, 12208), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (12194, 12208), True, 'import tensorflow as tf\n'), ((12227, 12269), 'tensorflow.constant', 'tf.constant', (['valid_boxes'], {'dtype': 'tf.float32'}), '(valid_boxes, dtype=tf.float32)\n', (12238, 12269), True, 'import tensorflow as tf\n'), ((12289, 12314), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (12300, 12314), True, 'import tensorflow as tf\n'), ((15003, 15028), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (15014, 15028), True, 'import tensorflow as tf\n'), ((15048, 15073), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (15059, 15073), True, 'import tensorflow as tf\n'), ((15096, 15120), 'tensorflow.constant', 'tf.constant', (['valid_masks'], {}), '(valid_masks)\n', (15107, 15120), True, 'import tensorflow as tf\n'), ((17822, 17847), 'tensorflow.constant', 'tf.constant', (['train_images'], {}), '(train_images)\n', (17833, 17847), True, 'import tensorflow as tf\n'), ((17866, 17890), 'tensorflow.constant', 'tf.constant', (['train_masks'], {}), '(train_masks)\n', (17877, 17890), True, 'import tensorflow as tf\n'), ((17910, 17935), 'tensorflow.constant', 'tf.constant', (['train_labels'], {}), '(train_labels)\n', (17921, 17935), True, 'import tensorflow as tf\n'), ((17955, 17980), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (17966, 17980), True, 'import tensorflow as tf\n'), ((17999, 18023), 'tensorflow.constant', 'tf.constant', (['valid_masks'], {}), '(valid_masks)\n', (18010, 18023), True, 'import tensorflow as tf\n'), ((18043, 18068), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (18054, 18068), True, 'import tensorflow as tf\n'), ((26364, 26389), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (26375, 26389), True, 'import tensorflow as tf\n'), ((26408, 26450), 'tensorflow.constant', 'tf.constant', (['valid_boxes'], {'dtype': 'tf.float32'}), '(valid_boxes, dtype=tf.float32)\n', (26419, 26450), True, 'import tensorflow as tf\n'), ((26470, 26495), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (26481, 26495), True, 'import tensorflow as tf\n'), ((26518, 26542), 'tensorflow.constant', 'tf.constant', (['valid_masks'], {}), '(valid_masks)\n', (26529, 26542), True, 'import tensorflow as tf\n'), ((33982, 33994), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (33992, 33994), True, 'import matplotlib.pyplot as plt\n'), ((34022, 34118), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.05)', 'hspace': '(0.05)', 'top': '(0.95)', 'bottom': '(0.05)', 'right': '(0.95)', 'left': '(0.05)'}), '(wspace=0.05, hspace=0.05, top=0.95, bottom=0.05, right=\n 0.95, left=0.05)\n', (34041, 34118), True, 'import matplotlib.pyplot as plt\n'), ((34562, 34593), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (34581, 34593), True, 'import matplotlib.pyplot as plt\n'), ((34598, 34608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (34606, 34608), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1381), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_file'], {}), '(xml_file)\n', (1371, 1381), True, 'import xml.etree.ElementTree as ET\n'), ((17677, 17699), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (17694, 17699), True, 'import numpy as np\n'), ((19807, 19828), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (19822, 19828), True, 'import tensorflow as tf\n'), ((19845, 19876), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (19865, 19876), True, 'import tensorflow as tf\n'), ((21550, 21583), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (21565, 21583), True, 'import tensorflow as tf\n'), ((21865, 21891), 'tensorflow.cast', 'tf.cast', (['label', 'tf.float32'], {}), '(label, tf.float32)\n', (21872, 21891), True, 'import tensorflow as tf\n'), ((21912, 21939), 'tensorflow.io.read_file', 'tf.io.read_file', (['image_path'], {}), '(image_path)\n', (21927, 21939), True, 'import tensorflow as tf\n'), ((21956, 21987), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (21976, 21987), True, 'import tensorflow as tf\n'), ((22007, 22033), 'tensorflow.io.read_file', 'tf.io.read_file', (['mask_path'], {}), '(mask_path)\n', (22022, 22033), True, 'import tensorflow as tf\n'), ((22049, 22079), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['mask_str'], {}), '(mask_str)\n', (22069, 22079), True, 'import tensorflow as tf\n'), ((22636, 22669), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (22651, 22669), True, 'import tensorflow as tf\n'), ((23066, 23092), 'tensorflow.cast', 'tf.cast', (['label', 'tf.float32'], {}), '(label, tf.float32)\n', (23073, 23092), True, 'import tensorflow as tf\n'), ((23113, 23140), 'tensorflow.io.read_file', 'tf.io.read_file', (['image_path'], {}), '(image_path)\n', (23128, 23140), True, 'import tensorflow as tf\n'), ((23157, 23188), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (23177, 23188), True, 'import tensorflow as tf\n'), ((23205, 23226), 'tensorflow.shape', 'tf.shape', ([], {'input': 'image'}), '(input=image)\n', (23213, 23226), True, 'import tensorflow as tf\n'), ((23972, 23993), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (23987, 23993), True, 'import tensorflow as tf\n'), ((24010, 24037), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image'], {}), '(image)\n', (24030, 24037), True, 'import tensorflow as tf\n'), ((24054, 24087), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (24069, 24087), True, 'import tensorflow as tf\n'), ((30039, 30076), 'numpy.concatenate', 'np.concatenate', (['([image] + mask_img)', '(1)'], {}), '([image] + mask_img, 1)\n', (30053, 30076), True, 'import numpy as np\n'), ((30085, 30101), 'matplotlib.pyplot.imshow', 'plt.imshow', (['show'], {}), '(show)\n', (30095, 30101), True, 'import matplotlib.pyplot as plt\n'), ((30110, 30120), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30118, 30120), True, 'import matplotlib.pyplot as plt\n'), ((34150, 34183), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(photos / 2)', '(2)', '(i + 1)'], {}), '(photos / 2, 2, i + 1)\n', (34161, 34183), True, 'import matplotlib.pyplot as plt\n'), ((34188, 34203), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (34196, 34203), True, 'import matplotlib.pyplot as plt\n'), ((34344, 34360), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (34353, 34360), True, 'import matplotlib.pyplot as plt\n'), ((3116, 3145), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (3132, 3145), True, 'import tensorflow as tf\n'), ((3332, 3361), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (3348, 3361), True, 'import tensorflow as tf\n'), ((7237, 7262), 'numpy.concatenate', 'np.concatenate', (['masks', '(-1)'], {}), '(masks, -1)\n', (7251, 7262), True, 'import numpy as np\n'), ((7283, 7316), 'numpy.sum', 'np.sum', (['masks', '(-1)'], {'keepdims': '(False)'}), '(masks, -1, keepdims=False)\n', (7289, 7316), True, 'import numpy as np\n'), ((10146, 10164), 'xml.etree.ElementTree.parse', 'ET.parse', (['obj_anno'], {}), '(obj_anno)\n', (10154, 10164), True, 'import xml.etree.ElementTree as ET\n'), ((11215, 11233), 'xml.etree.ElementTree.parse', 'ET.parse', (['obj_anno'], {}), '(obj_anno)\n', (11223, 11233), True, 'import xml.etree.ElementTree as ET\n'), ((13369, 13392), 'tensorflow.equal', 'tf.equal', (['label', 'target'], {}), '(label, target)\n', (13377, 13392), True, 'import tensorflow as tf\n'), ((13413, 13437), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (13420, 13437), True, 'import tensorflow as tf\n'), ((14034, 14055), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (14049, 14055), True, 'import tensorflow as tf\n'), ((14076, 14107), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (14096, 14107), True, 'import tensorflow as tf\n'), ((14128, 14161), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (14143, 14161), True, 'import tensorflow as tf\n'), ((14511, 14524), 'numpy.zeros', 'np.zeros', (['[6]'], {}), '([6])\n', (14519, 14524), True, 'import numpy as np\n'), ((16733, 16746), 'numpy.zeros', 'np.zeros', (['[6]'], {}), '([6])\n', (16741, 16746), True, 'import numpy as np\n'), ((17353, 17366), 'numpy.zeros', 'np.zeros', (['[6]'], {}), '([6])\n', (17361, 17366), True, 'import numpy as np\n'), ((17723, 17745), 'numpy.array', 'np.array', (['valid_images'], {}), '(valid_images)\n', (17731, 17745), True, 'import numpy as np\n'), ((17774, 17796), 'numpy.array', 'np.array', (['valid_labels'], {}), '(valid_labels)\n', (17782, 17796), True, 'import numpy as np\n'), ((19192, 19215), 'tensorflow.equal', 'tf.equal', (['label', 'target'], {}), '(label, target)\n', (19200, 19215), True, 'import tensorflow as tf\n'), ((19236, 19260), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (19243, 19260), True, 'import tensorflow as tf\n'), ((19281, 19305), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['label', '(-1)'], {}), '(label, -1)\n', (19294, 19305), True, 'import tensorflow as tf\n'), ((20771, 20793), 'tensorflow.unstack', 'tf.unstack', (['bbox_begin'], {}), '(bbox_begin)\n', (20781, 20793), True, 'import tensorflow as tf\n'), ((20839, 20860), 'tensorflow.unstack', 'tf.unstack', (['bbox_size'], {}), '(bbox_size)\n', (20849, 20860), True, 'import tensorflow as tf\n'), ((20972, 21061), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['image', 'offset_y', 'offset_x', 'target_height', 'target_width'], {}), '(image, offset_y, offset_x, target_height,\n target_width)\n', (21001, 21061), True, 'import tensorflow as tf\n'), ((21112, 21135), 'tensorflow.cast', 'tf.cast', (['bbox', 'tf.int32'], {}), '(bbox, tf.int32)\n', (21119, 21135), True, 'import tensorflow as tf\n'), ((21156, 21253), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['image', 'bbox[0]', 'bbox[1]', '(bbox[2] - bbox[0])', '(bbox[3] - bbox[1])'], {}), '(image, bbox[0], bbox[1], bbox[2] - bbox[0], \n bbox[3] - bbox[1])\n', (21185, 21253), True, 'import tensorflow as tf\n'), ((21301, 21339), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (21332, 21339), True, 'import tensorflow as tf\n'), ((22244, 22282), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (22275, 22282), True, 'import tensorflow as tf\n'), ((22756, 22788), 'tensorflow.image.resize', 'tf.image.resize', (['mask'], {'size': 'size'}), '(mask, size=size)\n', (22771, 22788), True, 'import tensorflow as tf\n'), ((23308, 23346), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (23339, 23346), True, 'import tensorflow as tf\n'), ((23373, 23398), 'tensorflow.minimum', 'tf.minimum', (['height', 'width'], {}), '(height, width)\n', (23383, 23398), True, 'import tensorflow as tf\n'), ((23419, 23487), 'tensorflow.image.random_crop', 'tf.image.random_crop', (['image', '[smaller_dim - 80, smaller_dim - 80, 3]'], {}), '(image, [smaller_dim - 80, smaller_dim - 80, 3])\n', (23439, 23487), True, 'import tensorflow as tf\n'), ((23503, 23536), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (23518, 23536), True, 'import tensorflow as tf\n'), ((23587, 23639), 'common.utils.smallest_size_at_least', 'utils.smallest_size_at_least', (['height', 'width', 'size[0]'], {}), '(height, width, size[0])\n', (23615, 23639), False, 'from common import utils\n'), ((23660, 23720), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': '(new_height + 1, new_width + 1)'}), '(image, size=(new_height + 1, new_width + 1))\n', (23675, 23720), True, 'import tensorflow as tf\n'), ((23737, 23780), 'common.utils.central_crop', 'utils.central_crop', (['image', 'size[0]', 'size[1]'], {}), '(image, size[0], size[1])\n', (23755, 23780), False, 'from common import utils\n'), ((24125, 24163), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (24156, 24163), True, 'import tensorflow as tf\n'), ((24776, 24797), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (24791, 24797), True, 'import tensorflow as tf\n'), ((24818, 24849), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (24838, 24849), True, 'import tensorflow as tf\n'), ((24869, 24892), 'tensorflow.cast', 'tf.cast', (['bbox', 'tf.int32'], {}), '(bbox, tf.int32)\n', (24876, 24892), True, 'import tensorflow as tf\n'), ((24913, 25010), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['image', 'bbox[0]', 'bbox[1]', '(bbox[2] - bbox[0])', '(bbox[3] - bbox[1])'], {}), '(image, bbox[0], bbox[1], bbox[2] - bbox[0], \n bbox[3] - bbox[1])\n', (24942, 25010), True, 'import tensorflow as tf\n'), ((25047, 25080), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (25062, 25080), True, 'import tensorflow as tf\n'), ((25540, 25558), 'xml.etree.ElementTree.parse', 'ET.parse', (['obj_anno'], {}), '(obj_anno)\n', (25548, 25558), True, 'import xml.etree.ElementTree as ET\n'), ((29823, 29854), 'numpy.tile', 'np.tile', (["mask['obj']", '[1, 1, 3]'], {}), "(mask['obj'], [1, 1, 3])\n", (29830, 29854), True, 'import numpy as np\n'), ((29957, 29985), 'numpy.tile', 'np.tile', (['part_img', '[1, 1, 3]'], {}), '(part_img, [1, 1, 3])\n', (29964, 29985), True, 'import numpy as np\n'), ((30231, 30260), 'tensorflow.image.resize', 'tf.image.resize', (['mask', '[h, w]'], {}), '(mask, [h, w])\n', (30246, 30260), True, 'import tensorflow as tf\n'), ((31971, 31997), 'numpy.sum', 'np.sum', (['results[merged]', '(0)'], {}), '(results[merged], 0)\n', (31977, 31997), True, 'import numpy as np\n'), ((32871, 32888), 'scipy.misc.imread', 'misc.imread', (['file'], {}), '(file)\n', (32882, 32888), False, 'from scipy import misc\n'), ((34449, 34470), 'numpy.squeeze', 'np.squeeze', (['image', '(-1)'], {}), '(image, -1)\n', (34459, 34470), True, 'import numpy as np\n'), ((34483, 34513), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (34493, 34513), True, 'import matplotlib.pyplot as plt\n'), ((34540, 34557), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (34550, 34557), True, 'import matplotlib.pyplot as plt\n'), ((9086, 9118), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (9105, 9118), True, 'import tensorflow_datasets as tfds\n'), ((12926, 12958), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (12945, 12958), True, 'import tensorflow_datasets as tfds\n'), ((15477, 15509), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (15496, 15509), True, 'import tensorflow_datasets as tfds\n'), ((15579, 15618), 'tensorflow_datasets.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(6)'}), '(num_classes=6)\n', (15603, 15618), True, 'import tensorflow_datasets as tfds\n'), ((18753, 18785), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (18772, 18785), True, 'import tensorflow_datasets as tfds\n'), ((18855, 18894), 'tensorflow_datasets.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(6)'}), '(num_classes=6)\n', (18879, 18894), True, 'import tensorflow_datasets as tfds\n'), ((19930, 19945), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (19938, 19945), True, 'import tensorflow as tf\n'), ((20293, 20308), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (20301, 20308), True, 'import tensorflow as tf\n'), ((22147, 22171), 'tensorflow.tile', 'tf.tile', (['mask', '[1, 1, 3]'], {}), '(mask, [1, 1, 3])\n', (22154, 22171), True, 'import tensorflow as tf\n'), ((22196, 22223), 'tensorflow.concat', 'tf.concat', (['[image, mask]', '(0)'], {}), '([image, mask], 0)\n', (22205, 22223), True, 'import tensorflow as tf\n'), ((22339, 22365), 'tensorflow.split', 'tf.split', (['image', '(2)'], {'axis': '(0)'}), '(image, 2, axis=0)\n', (22347, 22365), True, 'import tensorflow as tf\n'), ((22469, 22516), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': '(63)'}), '(image, max_delta=63)\n', (22495, 22516), True, 'import tensorflow as tf\n'), ((22566, 22619), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.2)', 'upper': '(1.8)'}), '(image, lower=0.2, upper=1.8)\n', (22590, 22619), True, 'import tensorflow as tf\n'), ((26884, 26916), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (26903, 26916), True, 'import tensorflow_datasets as tfds\n'), ((7188, 7215), 'numpy.expand_dims', 'np.expand_dims', (['item[2]', '(-1)'], {}), '(item[2], -1)\n', (7202, 7215), True, 'import numpy as np\n'), ((22389, 22414), 'tensorflow.split', 'tf.split', (['mask', '(3)'], {'axis': '(2)'}), '(mask, 3, axis=2)\n', (22397, 22414), True, 'import tensorflow as tf\n'), ((32286, 32311), 'numpy.zeros', 'np.zeros', ([], {'shape': '(h, w, 1)'}), '(shape=(h, w, 1))\n', (32294, 32311), True, 'import numpy as np\n'), ((8785, 8849), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(valid_images, valid_labels)'], {}), '((valid_images, valid_labels))\n', (8819, 8849), True, 'import tensorflow as tf\n'), ((12611, 12688), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(valid_images, valid_boxes, valid_labels)'], {}), '((valid_images, valid_boxes, valid_labels))\n', (12645, 12688), True, 'import tensorflow as tf\n'), ((15161, 15246), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(valid_images, valid_labels, valid_landmarks)'], {}), '((valid_images, valid_labels,\n valid_landmarks))\n', (15195, 15246), True, 'import tensorflow as tf\n'), ((18404, 18483), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['((valid_images, valid_masks), valid_labels)'], {}), '(((valid_images, valid_masks), valid_labels))\n', (18438, 18483), True, 'import tensorflow as tf\n'), ((26555, 26653), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(valid_images, valid_boxes, valid_labels, valid_landmarks)'], {}), '((valid_images, valid_boxes, valid_labels,\n valid_landmarks))\n', (26589, 26653), True, 'import tensorflow as tf\n'), ((6924, 6942), 'scipy.io.loadmat', 'scio.loadmat', (['file'], {}), '(file)\n', (6936, 6942), True, 'import scipy.io as scio\n'), ((25863, 25886), 'scipy.io.loadmat', 'scio.loadmat', (['part_mask'], {}), '(part_mask)\n', (25875, 25886), True, 'import scipy.io as scio\n'), ((8522, 8586), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_images, train_labels)'], {}), '((train_images, train_labels))\n', (8556, 8586), True, 'import tensorflow as tf\n'), ((12328, 12405), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_images, train_boxes, train_labels)'], {}), '((train_images, train_boxes, train_labels))\n', (12362, 12405), True, 'import tensorflow as tf\n'), ((18082, 18161), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['((train_images, train_masks), train_labels)'], {}), '(((train_images, train_masks), train_labels))\n', (18116, 18161), True, 'import tensorflow as tf\n')]
|
import numpy as np
from ..utils import GeneticAlgorithm as GA
from ..utils import round_vars
from .lcb_merit import lcb_merit
def lcb_ga(num_pts, opt_prob, surrogate, X, fX, Xpend=None, kappa=2.0, dtol=1e-3, lcb_target=None):
"""Minimize the LCB using a genetic algorithm.
:param num_pts: Number of points to generate
:type num_pts: int
:param opt_prob: Optimization problem
:type opt_prob: object
:param surrogate: Surrogate model object
:type surrogate: object
:param X: Previously evaluated points, of size n x dim
:type X: numpy.array
:param fX: Values at previously evaluated points, of size n x 1
:type fX: numpy.array
:param Xpend: Pending evaluations
:type Xpend: numpy.array
:param dtol: Minimum distance between evaluated and pending points
:type dtol: float
:param lcb_target: Return None if we don't find an LCB value <= lcb_target
:type lcb_target: float
:return: num_pts new points to evaluate
:rtype: numpy.array of size num_pts x dim
"""
if Xpend is None: # cdist can't handle None arguments
Xpend = np.empty([0, opt_prob.dim])
XX = np.vstack((X, Xpend))
new_points = np.zeros((num_pts, opt_prob.dim))
for i in range(num_pts):
def obj(Y):
"""Round integer variables and compute LCB."""
Y = round_vars(Y.copy(), opt_prob.int_var, opt_prob.lb, opt_prob.ub)
return lcb_merit(X=Y, surrogate=surrogate, fX=fX, XX=XX, dtol=dtol, kappa=kappa)
ga = GA(
function=obj,
dim=opt_prob.dim,
lb=opt_prob.lb,
ub=opt_prob.ub,
int_var=opt_prob.int_var,
pop_size=max([2 * opt_prob.dim, 100]),
num_gen=100,
)
x_best, f_min = ga.optimize()
if f_min > lcb_target:
return None # Give up
new_points[i, :] = x_best
XX = np.vstack((XX, x_best))
return new_points
|
[
"numpy.empty",
"numpy.zeros",
"numpy.vstack"
] |
[((1152, 1173), 'numpy.vstack', 'np.vstack', (['(X, Xpend)'], {}), '((X, Xpend))\n', (1161, 1173), True, 'import numpy as np\n'), ((1192, 1225), 'numpy.zeros', 'np.zeros', (['(num_pts, opt_prob.dim)'], {}), '((num_pts, opt_prob.dim))\n', (1200, 1225), True, 'import numpy as np\n'), ((1115, 1142), 'numpy.empty', 'np.empty', (['[0, opt_prob.dim]'], {}), '([0, opt_prob.dim])\n', (1123, 1142), True, 'import numpy as np\n'), ((1916, 1939), 'numpy.vstack', 'np.vstack', (['(XX, x_best)'], {}), '((XX, x_best))\n', (1925, 1939), True, 'import numpy as np\n')]
|
import os
from random import random, sample
import numpy as np
from PIL import Image, ImageDraw
from skimage.segmentation import felzenszwalb
from skimage.morphology import skeletonize, remove_small_objects
from skimage.util import invert
from tqdm import tqdm
import cv2
def cv2pil(cv2_img):
if len(cv2_img.shape) == 2 or cv2_img.shape[2]==1:
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_GRAY2RGB)
else:
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
pil_img = Image.fromarray(cv2_img.astype('uint8'))
return pil_img
def pil2cv(pil_img):
pil_img = pil_img.convert('RGB')
cv2_img = np.array(pil_img)
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_RGB2BGR)
cv2_img = cv2_img[:, :, ::-1].copy()
return cv2_img
def posterize(im, n):
indices = np.arange(0,256) # List of all colors
divider = np.linspace(0,255,n+1)[1] # we get a divider
quantiz = np.int0(np.linspace(0,255,n)) # we get quantization colors
color_levels = np.clip(np.int0(indices/divider),0,n-1) # color levels 0,1,2..
palette = quantiz[color_levels] # Creating the palette
im2 = palette[im] # Applying palette on image
im2 = cv2.convertScaleAbs(im2) # Converting image back to uint8
return im2
def canny(im1):
im1 = pil2cv(im1)
im2 = cv2.GaussianBlur(im1, (5, 5), 0)
im2 = cv2.Canny(im2, 100, 150)
im2 = cv2.cvtColor(im2, cv2.COLOR_GRAY2RGB)
im2 = cv2pil(im2)
return im2
def image2colorlabels(img, colors):
h, w = img.height, img.width
pixels = np.array(list(img.getdata()))
dists = np.array([np.sum(np.abs(pixels-c), axis=1) for c in colors])
classes = np.argmin(dists, axis=0)
def colorize_labels(img, colors):
h, w = img.height, img.width
classes = image2colorlabels(img, colors)
img = Image.fromarray(np.uint8(classes.reshape((h, w, 3))))
return img
def quantize_colors(img, colors):
h, w = img.height, img.width
classes = image2colorlabels(img, colors)
pixels_clr = np.array([colors[p] for p in classes]).reshape((h, w, 3))
img = Image.fromarray(np.uint8(pixels_clr))
return img
def segment(img):
img = pil2cv(img)
h, w = img.shape[0:2]
img = cv2.bilateralFilter(img, 9, 100, 100)
scale = int(h * w / 1000)
segments = felzenszwalb(img, scale=scale, sigma=0.5, min_size=150)
out_image = np.zeros((h, w, 3))
num_segments = len(np.unique(segments))
for s in tqdm(range(num_segments)):
label_map = segments==s
label_map3 = np.dstack([label_map] * 3)
masked_img = np.multiply(label_map3, img)
#avg_color = np.sum(np.sum(masked_img, axis=0), axis=0) / np.count_nonzero(label_map) # maybe median is better
nonzeros = [ masked_img[:, :, c].reshape((h * w)) for c in range(3) ]
median_color = [ np.median(np.take(nonzeros[c], nonzeros[c].nonzero())) for c in range(3) ]
smooth_segment = (label_map3 * median_color).astype('uint8')
out_image += smooth_segment
out_image = Image.fromarray(out_image.astype('uint8'))
return out_image
def trace(img):
img = pil2cv(img)
im2 = cv2.GaussianBlur(img, (5, 5), 0)
im3 = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)
ret, im4 = cv2.threshold(im3, 127, 255, 0)
ret, img = cv2.threshold(im3, 255, 255, 0)
im5, contours, hierarchy = cv2.findContours(im4, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [ c for c in contours if cv2.arcLength(c, True) > 8 ] #and cv2.contourArea(c) > 10]
for contour in contours:
cv2.drawContours(img, [contour], 0, (255), 2)
img = cv2pil(img)
return img
def simplify(img, hed_model_path):
import hed_processing
w, h = img.width, img.height
size_thresh = 0.001 * w * h
img = pil2cv(img)
img = cv2.GaussianBlur(img, (3, 3), 0)
img = cv2.GaussianBlur(img, (3, 3), 0)
img = hed_processing.run_hed(cv2pil(img), hed_model_path)
ret, img = cv2.threshold(pil2cv(img), 50, 255, 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = remove_small_objects(img.astype('bool'), size_thresh)
img = 255 * skeletonize(img).astype('uint8')
img = cv2pil(img)
return img
def upsample(img, w2, h2):
h1, w1 = img.height, img.width
r = max(float(w2)/w1, float(h2)/h1)
img = img.resize((int(r*w1), int(r*h1)), resample=Image.BICUBIC)
return img
def crop_rot_resize(img, frac, w2, h2, ang, stretch, centered):
if w2 is None:
w2 = img.width
if h2 is None:
h2 = img.height
if img.height < h2 or img.width < w2:
img = upsample(img, w2, h2)
if stretch != 0:
v = random() < 0.5
h = 1.0 if not v else (1.0 + stretch)
w = 1.0 if v else (1.0 + stretch)
img = img.resize((int(img.width * w), int(img.height * h)), resample=Image.BICUBIC)
if ang > 0:
img = img.rotate(ang, resample=Image.BICUBIC, expand=False)
ar = float(w2 / h2)
h1, w1 = img.height, img.width
if float(w1) / h1 > ar:
h1_crop = max(h2, h1 * frac)
w1_crop = h1_crop * ar
else:
w1_crop = max(w2, w1 * frac)
h1_crop = w1_crop / ar
xr, yr = (0.5, 0.5) if centered else (random(), random())
x_crop, y_crop = (w1 - w1_crop - 1) * xr, (h1 - h1_crop - 1) * yr
h1_crop, w1_crop, y_crop, x_crop = int(h1_crop), int(w1_crop), int(y_crop), int(x_crop)
img_crop = img.crop((x_crop, y_crop, x_crop+w1_crop, y_crop+h1_crop))
img_resize = img_crop.resize((w2, h2), resample=Image.BICUBIC)
return img_resize
|
[
"cv2.GaussianBlur",
"numpy.abs",
"cv2.arcLength",
"numpy.argmin",
"cv2.bilateralFilter",
"numpy.arange",
"numpy.unique",
"numpy.multiply",
"cv2.cvtColor",
"skimage.morphology.skeletonize",
"cv2.convertScaleAbs",
"numpy.linspace",
"cv2.drawContours",
"numpy.dstack",
"cv2.Canny",
"numpy.uint8",
"numpy.int0",
"random.random",
"cv2.threshold",
"numpy.zeros",
"numpy.array",
"skimage.segmentation.felzenszwalb",
"cv2.findContours"
] |
[((628, 645), 'numpy.array', 'np.array', (['pil_img'], {}), '(pil_img)\n', (636, 645), True, 'import numpy as np\n'), ((661, 701), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img', 'cv2.COLOR_RGB2BGR'], {}), '(cv2_img, cv2.COLOR_RGB2BGR)\n', (673, 701), False, 'import cv2\n'), ((808, 825), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (817, 825), True, 'import numpy as np\n'), ((1183, 1207), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['im2'], {}), '(im2)\n', (1202, 1207), False, 'import cv2\n'), ((1306, 1338), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im1', '(5, 5)', '(0)'], {}), '(im1, (5, 5), 0)\n', (1322, 1338), False, 'import cv2\n'), ((1349, 1373), 'cv2.Canny', 'cv2.Canny', (['im2', '(100)', '(150)'], {}), '(im2, 100, 150)\n', (1358, 1373), False, 'import cv2\n'), ((1384, 1421), 'cv2.cvtColor', 'cv2.cvtColor', (['im2', 'cv2.COLOR_GRAY2RGB'], {}), '(im2, cv2.COLOR_GRAY2RGB)\n', (1396, 1421), False, 'import cv2\n'), ((1660, 1684), 'numpy.argmin', 'np.argmin', (['dists'], {'axis': '(0)'}), '(dists, axis=0)\n', (1669, 1684), True, 'import numpy as np\n'), ((2216, 2253), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['img', '(9)', '(100)', '(100)'], {}), '(img, 9, 100, 100)\n', (2235, 2253), False, 'import cv2\n'), ((2299, 2354), 'skimage.segmentation.felzenszwalb', 'felzenszwalb', (['img'], {'scale': 'scale', 'sigma': '(0.5)', 'min_size': '(150)'}), '(img, scale=scale, sigma=0.5, min_size=150)\n', (2311, 2354), False, 'from skimage.segmentation import felzenszwalb\n'), ((2371, 2390), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {}), '((h, w, 3))\n', (2379, 2390), True, 'import numpy as np\n'), ((3138, 3170), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (3154, 3170), False, 'import cv2\n'), ((3181, 3218), 'cv2.cvtColor', 'cv2.cvtColor', (['im2', 'cv2.COLOR_RGB2GRAY'], {}), '(im2, cv2.COLOR_RGB2GRAY)\n', (3193, 3218), False, 'import cv2\n'), ((3234, 3265), 'cv2.threshold', 'cv2.threshold', (['im3', '(127)', '(255)', '(0)'], {}), '(im3, 127, 255, 0)\n', (3247, 3265), False, 'import cv2\n'), ((3281, 3312), 'cv2.threshold', 'cv2.threshold', (['im3', '(255)', '(255)', '(0)'], {}), '(im3, 255, 255, 0)\n', (3294, 3312), False, 'import cv2\n'), ((3344, 3405), 'cv2.findContours', 'cv2.findContours', (['im4', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(im4, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3360, 3405), False, 'import cv2\n'), ((3785, 3817), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(3, 3)', '(0)'], {}), '(img, (3, 3), 0)\n', (3801, 3817), False, 'import cv2\n'), ((3828, 3860), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(3, 3)', '(0)'], {}), '(img, (3, 3), 0)\n', (3844, 3860), False, 'import cv2\n'), ((3987, 4024), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3999, 4024), False, 'import cv2\n'), ((368, 409), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img', 'cv2.COLOR_GRAY2RGB'], {}), '(cv2_img, cv2.COLOR_GRAY2RGB)\n', (380, 409), False, 'import cv2\n'), ((438, 478), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img, cv2.COLOR_BGR2RGB)\n', (450, 478), False, 'import cv2\n'), ((863, 889), 'numpy.linspace', 'np.linspace', (['(0)', '(255)', '(n + 1)'], {}), '(0, 255, n + 1)\n', (874, 889), True, 'import numpy as np\n'), ((930, 952), 'numpy.linspace', 'np.linspace', (['(0)', '(255)', 'n'], {}), '(0, 255, n)\n', (941, 952), True, 'import numpy as np\n'), ((1008, 1034), 'numpy.int0', 'np.int0', (['(indices / divider)'], {}), '(indices / divider)\n', (1015, 1034), True, 'import numpy as np\n'), ((2101, 2121), 'numpy.uint8', 'np.uint8', (['pixels_clr'], {}), '(pixels_clr)\n', (2109, 2121), True, 'import numpy as np\n'), ((2414, 2433), 'numpy.unique', 'np.unique', (['segments'], {}), '(segments)\n', (2423, 2433), True, 'import numpy as np\n'), ((2528, 2554), 'numpy.dstack', 'np.dstack', (['([label_map] * 3)'], {}), '([label_map] * 3)\n', (2537, 2554), True, 'import numpy as np\n'), ((2576, 2604), 'numpy.multiply', 'np.multiply', (['label_map3', 'img'], {}), '(label_map3, img)\n', (2587, 2604), True, 'import numpy as np\n'), ((3542, 3585), 'cv2.drawContours', 'cv2.drawContours', (['img', '[contour]', '(0)', '(255)', '(2)'], {}), '(img, [contour], 0, 255, 2)\n', (3558, 3585), False, 'import cv2\n'), ((2017, 2055), 'numpy.array', 'np.array', (['[colors[p] for p in classes]'], {}), '([colors[p] for p in classes])\n', (2025, 2055), True, 'import numpy as np\n'), ((4648, 4656), 'random.random', 'random', ([], {}), '()\n', (4654, 4656), False, 'from random import random, sample\n'), ((5217, 5225), 'random.random', 'random', ([], {}), '()\n', (5223, 5225), False, 'from random import random, sample\n'), ((5227, 5235), 'random.random', 'random', ([], {}), '()\n', (5233, 5235), False, 'from random import random, sample\n'), ((1602, 1620), 'numpy.abs', 'np.abs', (['(pixels - c)'], {}), '(pixels - c)\n', (1608, 1620), True, 'import numpy as np\n'), ((3446, 3468), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (3459, 3468), False, 'import cv2\n'), ((4105, 4121), 'skimage.morphology.skeletonize', 'skeletonize', (['img'], {}), '(img)\n', (4116, 4121), False, 'from skimage.morphology import skeletonize, remove_small_objects\n')]
|
import glob
import numpy as np
def _word_to_bool(word):
"""convert a string to boolean according the first 2 characters."""
_accepted_bool_prefixes = ("T", ".T")
return word.upper().startswith(_accepted_bool_prefixes)
class Photons:
pass
class Wavelengths:
pass
class Physics:
pass
class Dust:
component = []
pass
class DustComponent:
pass
class Grid:
pass
class Map:
pass
class Zone:
dust = []
pass
class Mol:
molecule = []
pass
class Molecule:
pass
class Star:
pass
class Simu:
version = float()
pass
class Params:
simu = Simu()
phot = Photons()
wavelengths = Wavelengths()
map = Map()
grid = Grid()
zones = []
mol = Mol()
stars = []
_minimum_version = 3.0
def __init__(self, filename=None, **kwargs):
self.filename = filename
self._read(**kwargs)
def _read(self):
with open(self.filename, mode="rt") as file:
f = []
# Reading file and removing comments
for line in file:
# Skipping comments and empty lines
if (not line.startswith("#")) and (len(line.strip()) > 0):
f += [line]
f = iter(f)
# -- Version of the parameter file --
line = next(f).split()
self.simu.version = float(line[0])
if self.simu.version < self._minimum_version - 1e-3:
print("Parameter file version is ", self.simu.version)
raise Exception(
'Parameter file version must be at least {ver:.2f}'.format(
ver=self._minimum_version
)
)
# -- Number of photon packages --
# to support float notations (e.g. "1.28e8" or "64000.0"),
# we read as float but convert to int
line = next(f).split()
self.phot.nphot_T = int(float(line[0]))
line = next(f).split()
self.phot.nphot_SED = int(float(line[0]))
line = next(f).split()
self.phot.nphot_image = int(float(line[0]))
# -- Wavelengths --
line = next(f).split()
self.wavelengths.n_wl = int(line[0])
self.wavelengths.wl_min = float(line[1])
self.wavelengths.wl_max = float(line[2])
line = next(f).split()
self.simu.compute_T = _word_to_bool(line[0])
self.simu.compute_SED = _word_to_bool(line[1])
self.simu.use_default_wl = _word_to_bool(line[2])
line = next(f).split()
self.wavelengths.file = line[0]
line = next(f).split()
self.simu.separate_contrib = _word_to_bool(line[0])
self.simu.separate_pola = _word_to_bool(line[1])
# -- Grid --
line = next(f).split()
self.grid.type = int(line[0])
line = next(f).split()
self.grid.n_rad = int(line[0])
self.grid.nz = int(line[1])
self.grid.n_az = int(line[2])
self.grid.n_rad_in = int(line[3])
# -- Maps --
line = next(f).split()
self.map.nx = int(line[0])
self.map.ny = int(line[1])
self.map.size = float(line[2])
line = next(f).split()
self.map.RT_imin = float(line[0])
self.map.RT_imax = float(line[1])
self.map.RT_ntheta = int(line[2])
self.map.lRT_centered = _word_to_bool(line[3])
line = next(f).split()
self.map.RT_az_min = float(line[0])
self.map.RT_az_max = float(line[1])
self.map.RT_n_az = int(line[2])
line = next(f).split()
self.map.distance = float(line[0])
line = next(f).split()
self.map.PA = float(line[0])
# -- Scattering method --
line = next(f).split()
self.simu.scattering_method = int(line[0])
line = next(f).split()
self.simu.phase_function_method = int(line[0])
# -- Symetries --
line = next(f).split()
self.simu.image_symmetry = _word_to_bool(line[0])
line = next(f).split()
self.simu.central_symmetry = _word_to_bool(line[0])
line = next(f).split()
self.simu.axial_symmetry = _word_to_bool(line[0])
# -- Disk physics --
line = next(f).split()
self.simu.dust_settling_type = int(line[0])
self.simu.dust_settling_exp = float(line[1])
self.simu.a_settling = float(line[2])
line = next(f).split()
self.simu.radial_migration = _word_to_bool(line[0])
line = next(f).split()
self.simu.dust_sublimation = _word_to_bool(line[0])
line = next(f).split()
self.simu.hydrostatic_eq = _word_to_bool(line[0])
line = next(f).split()
self.simu.viscous_heating = _word_to_bool(line[0])
self.simu.viscosity = float(line[1])
# -- Number of zones --
line = next(f).split()
n_zones = int(line[0])
self.simu.n_zones = n_zones
# -- Density structure --
z = Zone()
for k in range(n_zones):
self.zones.append(z)
line = next(f).split()
self.zones[k].geometry = int(line[0])
line = next(f).split()
self.zones[k].dust_mass = float(line[0])
self.zones[k].gas_to_dust_ratio = float(line[1])
line = next(f).split()
self.zones[k].h0 = float(line[0])
self.zones[k].Rref = float(line[1])
self.zones[k].vertical_exp = float(line[2])
line = next(f).split()
self.zones[k].Rin = float(line[0])
self.zones[k].edge = float(line[1])
self.zones[k].Rout = float(line[2])
self.zones[k].Rc = float(line[3])
line = next(f).split()
self.zones[k].flaring_exp = float(line[0])
line = next(f).split()
self.zones[k].surface_density_exp = float(line[0])
self.zones[k].m_gamma_exp = float(line[1])
# -- Grain properties --
d = Dust
for k in range(n_zones):
line = next(f).split()
n_species = int(line[0])
self.zones[k].n_species = n_species
for j in range(n_species):
self.zones[k].dust.append(d)
line = next(f).split()
self.zones[k].dust[j].type = line[0]
n_components = int(line[1])
self.zones[k].dust[j].n_components = n_components
self.zones[k].dust[j].mixing_rule = int(line[2])
self.zones[k].dust[j].porosity = float(line[3])
self.zones[k].dust[j].mass_fraction = float(line[4])
self.zones[k].dust[j].DHS_Vmax = float(line[5])
c = DustComponent()
for l in range(n_components):
self.zones[k].dust[j].component.append(c)
line = next(f).split()
self.zones[k].dust[j].component[l].file = line[0]
self.zones[k].dust[j].component[l].volume_fraction = float(line[1])
line = next(f).split()
self.zones[k].dust[j].heating_method = int(line[0])
line = next(f).split()
self.zones[k].dust[j].amin = float(line[0])
self.zones[k].dust[j].amax = float(line[1])
self.zones[k].dust[j].aexp = float(line[2])
self.zones[k].dust[j].n_grains = int(line[3])
# -- Molecular settings --
line = next(f).split()
self.mol.compute_pop = _word_to_bool(line[0])
self.mol.compute_pop_accurate = _word_to_bool(line[1])
self.mol.LTE = _word_to_bool(line[2])
self.mol.profile_width = float(line[3])
line = next(f).split()
self.mol.v_turb = float(line[0])
line = next(f).split()
n_mol = int(line[0])
self.mol.n_mol = n_mol
m = Molecule()
for k in range(n_mol):
self.mol.molecule.append(m)
line = next(f).split()
self.mol.molecule[k].file = line[0]
self.mol.molecule[k].level_max = int(line[1])
line = next(f).split()
self.mol.molecule[k].v_max = float(line[0])
self.mol.molecule[k].nv = int(line[1])
line = next(f).split()
self.mol.molecule[k].cst_abundance = _word_to_bool(line[0])
self.mol.molecule[k].abundance = line[1]
self.mol.molecule[k].abundance_file = line[2]
line = next(f).split()
self.mol.molecule[k].ray_tracing = _word_to_bool(line[0])
nTrans = int(line[1])
self.mol.molecule[k].n_trans = nTrans
line = next(f).split()
self.mol.molecule[k].transitions = list(
map(int, line[0:nTrans])
) # convert list of str to int
# -- Star properties --
line = next(f).split()
n_stars = int(line[0])
self.simu.n_stars = n_stars
s = Star()
for k in range(n_stars):
self.stars.append(s)
line = next(f).split()
self.stars[k].Teff = float(line[0])
self.stars[k].R = float(line[1])
self.stars[k].M = float(line[2])
self.stars[k].x = float(line[3])
self.stars[k].y = float(line[4])
self.stars[k].z = float(line[5])
self.stars[k].is_bb = _word_to_bool(line[6])
line = next(f).split()
self.stars[k].file = line[0]
line = next(f).split()
self.stars[k].fUV = float(line[0])
self.stars[k].slope_UV = float(line[1])
# -- Command line options --
for line in f:
if (len(line) > 0):
line = line.split()
if (len(line) > 0): # we test again in case there were only spaces
if (line[0] == "Executed"):
self.options = " ".join(line[6:])
if (line[0] == "sha"):
self.mcfost_sha = line[2]
def __str__(self):
""" Return a formatted parameter file. Currently returns v3.0 format
"""
# -- Photon packets --
txt = f"""3.0 mcfost version\n
#-- Number of photon packages --
{self.phot.nphot_T:<10.5g} nbr_photons_eq_th : T computation
{self.phot.nphot_SED:<10.5g} nbr_photons_lambda : SED computation
{self.phot.nphot_image:<10.5g} nbr_photons_image : images computation\n\n"""
# -- Wavelengths --
txt += f"""#-- Wavelength --
{self.wavelengths.n_wl:<4d} {self.wavelengths.wl_min:<5.1f} {self.wavelengths.wl_max:<7g} n_lambda, lambda_min, lambda_max [microns]
{self.simu.compute_T} {self.simu.compute_SED} {self.simu.use_default_wl} compute temperature?, compute sed?, use default wavelength grid ?
{self.wavelengths.file} wavelength file (if previous parameter is F)
{self.simu.separate_contrib} {self.simu.separate_pola} separation of different contributions?, stokes parameters?\n\n"""
# -- Grid --
txt += f"""#-- Grid geometry and size --
{self.grid.type:>1d} 1 = cylindrical, 2 = spherical
{self.grid.n_rad} {self.grid.nz} {self.grid.n_az} {self.grid.n_rad_in} n_rad (log distribution), nz (or n_theta), n_az, n_rad_in\n\n"""
# -- Maps --
txt += f"""#-- Maps --
{self.map.nx} {self.map.ny} {self.map.size:5.1f} grid (nx,ny), size [au]
{self.map.RT_imin:<4.1f} {self.map.RT_imax:<4.1f} {self.map.RT_ntheta:>2d} {self.map.lRT_centered} RT: imin, imax, n_incl, centered ?
{self.map.RT_az_min:<4.1f} {self.map.RT_az_max:<4.1f} {self.map.RT_n_az:>2d} RT: az_min, az_max, n_az
{self.map.distance:<6.2f} distance (pc)
{self.map.PA:<6.2f} disk PA\n\n"""
# -- Scattering method --
txt += f"""#-- Scattering method --
{self.simu.scattering_method} 0=auto, 1=grain prop, 2=cell prop
{self.simu.phase_function_method} 1=Mie, 2=hg (2 implies the loss of polarizarion)\n\n"""
# -- Symetries --
txt += f"""#-- Symmetries --
{self.simu.image_symmetry} image symmetry
{self.simu.central_symmetry} central symmetry
{self.simu.axial_symmetry} axial symmetry (important only if N_phi > 1)\n\n"""
# -- Disk physics --
txt += f"""#Disk physics
{self.simu.dust_settling_type} {self.simu.dust_settling_exp:<6.2f} {self.simu.a_settling:<6.2f} dust_settling (0=no settling, 1=parametric, 2=Dubrulle, 3=Fromang), exp_strat, a_strat (for parametric settling)
{self.simu.radial_migration} dust radial migration
{self.simu.dust_sublimation} sublimate dust
{self.simu.hydrostatic_eq} hydrostatic equilibrium
{self.simu.viscous_heating} {self.simu.viscosity:4.1g} viscous heating, alpha_viscosity\n\n"""
# -- Number of zones --
txt += f"""#-- Number of zones -- 1 zone = 1 density structure + corresponding grain properties
{self.simu.n_zones}\n\n"""
# -- Density structure --
txt += f"#-- Density structure --\n"
for k in range(self.simu.n_zones):
txt += f""" {self.zones[k].geometry} zone type : 1 = disk, 2 = tapered-edge disk, 3 = envelope, 4 = debris disk, 5 = wall
{self.zones[k].dust_mass:<10.2e} {self.zones[k].gas_to_dust_ratio:<5.1f} dust mass, gas-to-dust mass ratio
{self.zones[k].h0:<5.1f} {self.zones[k].Rref:<6.1f} {self.zones[k].vertical_exp:<6.1f} scale height, reference radius (AU), unused for envelope, vertical profile exponent (only for debris disk)
{self.zones[k].Rin:<6.1f} {self.zones[k].edge:<6.1f} {self.zones[k].Rout:<6.1f} {self.zones[k].Rc:<6.1f} Rin, edge, Rout, Rc (AU) Rc is only used for tappered-edge & debris disks (Rout set to 8*Rc if Rout==0)
{self.zones[k].flaring_exp:<8.3f} flaring exponent, unused for envelope
{self.zones[k].surface_density_exp} {self.zones[k].m_gamma_exp} surface density exponent (or -gamma for tappered-edge disk or volume density for envelope), usually < 0, -gamma_exp (or alpha_in & alpha_out for debris disk)\n\n"""
txt += f"\n"
# -- Grain properties --
txt += f"#-- Grain properties --\n"
for k in range(self.simu.n_zones):
txt += (
f" {self.zones[k].n_species} Number of species\n"
)
for j in range(self.zones[k].n_species):
txt += f" Mie {self.zones[k].dust[j].n_components} {self.zones[k].dust[j].mixing_rule} {self.zones[k].dust[j].porosity:<5.2f} {self.zones[k].dust[j].mass_fraction:<5.2f} {self.zones[k].dust[j].DHS_Vmax} Grain type (Mie or DHS), N_components, mixing rule (1 = EMT or 2 = coating), porosity, mass fraction, Vmax (for DHS)\n"
for l in range(self.zones[k].dust[j].n_components):
txt += f" {self.zones[k].dust[j].component[l].file} {self.zones[k].dust[j].component[l].volume_fraction} Optical indices file, volume fraction\n"
txt += f""" {self.zones[k].dust[j].heating_method} Heating method : 1 = RE + LTE, 2 = RE + NLTE, 3 = NRE
{self.zones[k].dust[j].amin} {self.zones[k].dust[j].amax} {self.zones[k].dust[j].aexp} {self.zones[k].dust[j].n_grains} amin, amax, aexp, nbr_grains\n\n"""
# -- Molecular settings --
txt += f"""#-- Molecular RT settings --
{self.mol.compute_pop} {self.mol.compute_pop_accurate} {self.mol.LTE} {self.mol.profile_width} lpop, laccurate_pop, LTE, profile width
{self.mol.v_turb} v_turb [km/s]
{self.mol.n_mol} nmol\n"""
for k in range(self.mol.n_mol):
txt += f""" {self.mol.molecule[k].file} {self.mol.molecule[k].level_max} molecular data filename, level_max
{self.mol.molecule[k].v_max} {self.mol.molecule[k].nv} vmax (km.s-1), n_speed
{self.mol.molecule[k].cst_abundance} {self.mol.molecule[k].abundance} {self.mol.molecule[k].abundance_file} cst molecule abundance ?, abundance, abundance file
{self.mol.molecule[k].ray_tracing} {self.mol.molecule[k].n_trans} ray tracing ?, number of lines in ray-tracing\n """
for j in range(self.mol.molecule[k].n_trans):
txt += f" {self.mol.molecule[k].transitions[j]}"
txt += f" transition numbers\n"
txt += f"\n"
# -- Star properties --
txt += f"""#-- Star properties --
{self.simu.n_stars} Number of stars\n"""
for k in range(self.simu.n_stars):
txt += f""" {self.stars[k].Teff} {self.stars[k].R} {self.stars[k].M} {self.stars[k].x} {self.stars[k].y} {self.stars[k].x} {self.stars[k].is_bb} Temp, radius (solar radius),M (solar mass),x,y,z (AU), is a blackbody?
{self.stars[k].file}
{self.stars[k].fUV} {self.stars[k].slope_UV} fUV, slope_UV\n"""
return txt
def writeto(self, outname):
""" Write an MCFOST parameter file to disk. """
with open(outname, mode="wt") as file:
file.write(str(self))
def calc_inclinations(self):
# Calculate the inclinations for the ray-traced SEDs and images
if self.map.RT_ntheta == 1:
return self.map.RT_imin
else:
cos_min, cos_max = np.cos(np.deg2rad([self.map.RT_imin, self.map.RT_imax]))
if self.map.lRT_centered:
return (
np.rad2deg(np.arccos(
cos_min
+ (np.arange(self.map.RT_ntheta) + 0.5)
/ self.map.RT_ntheta
* (cos_max - cos_min)
))
)
else:
return (
np.rad2deg(np.arccos(
cos_min
+ (np.arange(self.map.RT_ntheta))
/ (self.map.RT_ntheta - 1)
* (cos_max - cos_min)
))
)
def find_parameter_file(directory="./"):
list = glob.glob(directory + "/*.par*")
if len(list) == 1:
return list[0]
elif len(list) > 1:
raise ValueError("Multiple parameter files found in " + directory)
else:
raise ValueError("No parameter files found in " + directory)
|
[
"numpy.deg2rad",
"numpy.arange",
"glob.glob"
] |
[((18379, 18411), 'glob.glob', 'glob.glob', (["(directory + '/*.par*')"], {}), "(directory + '/*.par*')\n", (18388, 18411), False, 'import glob\n'), ((17628, 17676), 'numpy.deg2rad', 'np.deg2rad', (['[self.map.RT_imin, self.map.RT_imax]'], {}), '([self.map.RT_imin, self.map.RT_imax])\n', (17638, 17676), True, 'import numpy as np\n'), ((18155, 18184), 'numpy.arange', 'np.arange', (['self.map.RT_ntheta'], {}), '(self.map.RT_ntheta)\n', (18164, 18184), True, 'import numpy as np\n'), ((17842, 17871), 'numpy.arange', 'np.arange', (['self.map.RT_ntheta'], {}), '(self.map.RT_ntheta)\n', (17851, 17871), True, 'import numpy as np\n')]
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Tuple, List, Dict, Any
import numpy as np
from InnerEyeDataQuality.evaluation.metrics import compute_accuracy, compute_label_entropy, total_variation
STAT_FIELDS = ["relabelling_score", "ambiguity", "label_correctness"]
@dataclass(frozen=True)
class SelectionType(Enum):
"""
Defines the 5 possible types of selections that can be made in an iteration
"""
MISLABELLED_CASE_SELECTED_CORRECTED = 1
MISLABELLED_CASE_SELECTED_NOT_CORRECTED = 2
AMBIGUOUS_CASE_SELECTED_CORRECTED = 3
AMBIGUOUS_CASE_SELECTED_NOT_CORRECTED = 4
CLEAN_CASE_SELECTED = 5
def compute_selection_type_of_current_iter(sample_id: int,
true_ambiguous_cases: np.ndarray,
true_label_counts: np.ndarray,
mislabelled_ids_current: np.ndarray,
ambiguous_case_ids_current: np.ndarray,
mislabelled_ids_prev: np.ndarray,
ambiguous_case_ids_prev: np.ndarray) -> SelectionType:
"""
Compute the type of selection that occurred between the previous and current iteration.
:param sample_id: The sample id.
:param true_ambiguous_cases: The ids for the true ambiguous samples.
:param true_label_counts: The label counts for the true label distribution.
:param mislabelled_ids_current: The ids for the current iteration remaining not ambiguous mislabelled samples.
:param ambiguous_case_ids_current: The ids for the current iteration remaining ambiguous mislabelled samples.
:param mislabelled_ids_prev: The ids for the previous iteration remaining not ambiguous mislabelled samples.
:param ambiguous_case_ids_prev: The ids for the previous iteration remaining ambiguous mislabelled samples.
:return: An enum representing the selection type that occurred between the previous and current iteration.
"""
if sample_id in true_ambiguous_cases:
if len(set(ambiguous_case_ids_prev) - set(ambiguous_case_ids_current)) > 0:
return SelectionType.AMBIGUOUS_CASE_SELECTED_CORRECTED
else:
return SelectionType.AMBIGUOUS_CASE_SELECTED_NOT_CORRECTED
else:
if len(set(mislabelled_ids_prev) - set(mislabelled_ids_current)) > 0:
return SelectionType.MISLABELLED_CASE_SELECTED_CORRECTED
elif len(np.unique(np.where(true_label_counts[sample_id])[0])) == 1:
return SelectionType.CLEAN_CASE_SELECTED
else:
return SelectionType.MISLABELLED_CASE_SELECTED_NOT_CORRECTED
def get_mislabelled_sample_ids(true_label_counts: np.ndarray, current_label_counts: np.ndarray) -> np.ndarray:
"""
Compute which samples are mislabelled.
:param true_label_counts: The label counts for the true label distribution.
:param current_label_counts: The label counts for the current distribution.
:return: An array with the ids of the mislabeled samples (majority voting)
"""
true_class = np.argmax(true_label_counts, axis=1)
current_class = np.argmax(current_label_counts, axis=1)
return np.where(true_class != current_class)
def get_ambiguous_sample_ids(true_label_counts: np.ndarray, threshold: float = 0.30) -> np.ndarray:
"""
Compute which samples are ambiguous
:param true_label_counts: The label counts for the true label distribution.
:param threshold: The label entropy threshold above which a sample is considered ambiguous
:return: An array with the ids of the ambiguous samples
"""
label_entropy = compute_label_entropy(true_label_counts)
return np.where(label_entropy > threshold)[0]
class SimulationStats:
"""
A class that keeps track of statistics/metrics during the simulation
"""
def __init__(self, name: str, true_label_counts: np.ndarray, initial_labels: np.ndarray):
"""
:param name: The name of the simulation
:param true_label_counts: The label counts for the true label distribution
np.ndarray [num_samples x num_classes]
:param initial_labels: The initial label counts, np.ndarray [num_samples x num_classes]
"""
self.name = name
self.initial_labels = np.copy(initial_labels)
self.true_label_counts = true_label_counts
self.true_ambiguous_cases = get_ambiguous_sample_ids(true_label_counts)
self.true_distribution = true_label_counts / np.sum(true_label_counts, axis=-1, keepdims=True)
self.selected_sample_id: List[int] = list()
self.num_fetches: List[int] = list()
self.accuracy: List[float] = list()
self.avg_total_variation: List[float] = list()
self.selection_type: List[SelectionType] = list()
self.selector_stats: Dict[str, Any] = {key: list() for key in STAT_FIELDS}
mislabelled_ids_current, ambiguous_case_ids_current = self.get_noisy_and_ambiguous_cases(initial_labels)
self.mislabelled_not_ambiguous_sample_ids = [mislabelled_ids_current]
self.mislabelled_ambiguous_sample_ids = [ambiguous_case_ids_current]
self.num_initial_mislabelled_not_ambiguous = self.mislabelled_not_ambiguous_sample_ids[0].size
self.num_initial_mislabelled_ambiguous = self.mislabelled_ambiguous_sample_ids[0].size
self.num_remaining_mislabelled_not_ambiguous: List[int] = list()
self.num_remaining_mislabelled_ambiguous: List[int] = list()
def get_noisy_and_ambiguous_cases(self, current_label_counts: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute which of the current labels are still mislabelled, separate the former into ambiguous and not ambiguous
samples
:param current_label_counts: The label counts of the current iteration
:return: A tuple containing an array with the current mislabelled not ambiguous sample ids and an array with
the current mislabelled ambiguous sample ids.
"""
# Find the potential label noise and ambiguous cases
label_mismatch_ids_current = get_mislabelled_sample_ids(self.true_label_counts, current_label_counts)
# Split the label mismatch cases into ambiguous and clear label noise types
mislabelled_ids_current = np.setdiff1d(label_mismatch_ids_current, self.true_ambiguous_cases)
ambiguous_case_ids_current = np.array(np.intersect1d(label_mismatch_ids_current, self.true_ambiguous_cases))
return mislabelled_ids_current, ambiguous_case_ids_current
def record_selector_stats(self, selector_stats: Dict[str, Any]) -> None:
"""
"""
if len(selector_stats) == 0:
return
for key in STAT_FIELDS:
if key in selector_stats:
self.selector_stats[key].append(selector_stats[key])
def record_iteration(self, selected_sample_id: int, num_fetches: int, current_label_counts: np.ndarray) -> None:
"""
:param selected_sample_id: The sample id that was selected at this iteration
:param num_fetches: The number of fetches (relabels) it took to achieve a majority
:param current_label_counts: The labels counts for the current iteration
:return:
"""
self.selected_sample_id.append(selected_sample_id)
self.num_fetches.append(num_fetches)
self.accuracy.append(compute_accuracy(current_label_counts, self.true_label_counts))
current_distribution = current_label_counts / np.sum(current_label_counts, axis=-1, keepdims=True)
self.avg_total_variation.append(np.nanmean(total_variation(self.true_distribution, current_distribution)))
mislabelled_ids_current, ambiguous_case_ids_current = self.get_noisy_and_ambiguous_cases(current_label_counts)
mislabelled_ids_prev = self.mislabelled_not_ambiguous_sample_ids[-1]
ambiguous_case_ids_prev = self.mislabelled_ambiguous_sample_ids[-1]
selection_type = compute_selection_type_of_current_iter(selected_sample_id,
self.true_ambiguous_cases,
self.true_label_counts,
mislabelled_ids_current, ambiguous_case_ids_current,
mislabelled_ids_prev, ambiguous_case_ids_prev)
self.selection_type.append(selection_type)
self.num_remaining_mislabelled_not_ambiguous.append(len(mislabelled_ids_current))
self.num_remaining_mislabelled_ambiguous.append(len(ambiguous_case_ids_current))
self.mislabelled_not_ambiguous_sample_ids.append(mislabelled_ids_current)
self.mislabelled_ambiguous_sample_ids.append(ambiguous_case_ids_current)
def log_last_iter(self) -> None:
"""
Log the statistics of the last iteration
:return: None
"""
logging.info(f"Method: {self.name}, selected_id: {self.selected_sample_id[-1]} "
f"accuracy: {self.accuracy[-1]}")
logging.info(f"Remaining label clear noise cases: {self.num_remaining_mislabelled_not_ambiguous[-1]} "
f"and ambiguous noise cases: {self.num_remaining_mislabelled_ambiguous[-1]}")
class SimulationStatsDistribution(object):
"""
A class that takes a list of simulation statistics and creates a distribution over them.
"""
def __init__(self, simulation_stats_list: List[SimulationStats]):
"""
:param simulation_stats_list: A list of SimulationStats objects
"""
self.simulation_stats = simulation_stats_list
end_point = max([np.max(np.cumsum(sim_stats.num_fetches)) for sim_stats in simulation_stats_list])
start_point = min([np.min(np.cumsum(sim_stats.num_fetches)) for sim_stats in simulation_stats_list])
self.num_initial_mislabelled_not_ambiguous = simulation_stats_list[0].num_initial_mislabelled_not_ambiguous
self.num_initial_mislabelled_ambiguous = simulation_stats_list[0].num_initial_mislabelled_ambiguous
self.name = simulation_stats_list[0].name
self.num_fetches = np.arange(start_point, end_point)
self.accuracy = self._interpolate_and_make_dist_array(self.num_fetches, simulation_stats_list, 'accuracy')
self.avg_total_variation = self._interpolate_and_make_dist_array(
self.num_fetches, simulation_stats_list, 'avg_total_variation')
self.num_remaining_mislabelled_not_ambiguous =\
self._interpolate_and_make_dist_array(self.num_fetches, simulation_stats_list,
'num_remaining_mislabelled_not_ambiguous')
self.num_remaining_mislabelled_ambiguous = \
self._interpolate_and_make_dist_array(self.num_fetches, simulation_stats_list,
'num_remaining_mislabelled_ambiguous')
@staticmethod
def _interpolate_and_make_dist_array(num_fetches: np.ndarray,
simulation_stats_list: List[SimulationStats],
fp_attr_name: str) -> np.ndarray:
return np.array([np.interp(num_fetches, np.cumsum(sim_stats.num_fetches),
sim_stats.__getattribute__(fp_attr_name)) for sim_stats in simulation_stats_list])
|
[
"numpy.sum",
"InnerEyeDataQuality.evaluation.metrics.compute_accuracy",
"numpy.copy",
"numpy.argmax",
"InnerEyeDataQuality.evaluation.metrics.total_variation",
"numpy.setdiff1d",
"logging.info",
"InnerEyeDataQuality.evaluation.metrics.compute_label_entropy",
"numpy.where",
"numpy.arange",
"numpy.cumsum",
"numpy.intersect1d",
"dataclasses.dataclass"
] |
[((658, 680), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (667, 680), False, 'from dataclasses import dataclass\n'), ((3503, 3539), 'numpy.argmax', 'np.argmax', (['true_label_counts'], {'axis': '(1)'}), '(true_label_counts, axis=1)\n', (3512, 3539), True, 'import numpy as np\n'), ((3560, 3599), 'numpy.argmax', 'np.argmax', (['current_label_counts'], {'axis': '(1)'}), '(current_label_counts, axis=1)\n', (3569, 3599), True, 'import numpy as np\n'), ((3611, 3648), 'numpy.where', 'np.where', (['(true_class != current_class)'], {}), '(true_class != current_class)\n', (3619, 3648), True, 'import numpy as np\n'), ((4061, 4101), 'InnerEyeDataQuality.evaluation.metrics.compute_label_entropy', 'compute_label_entropy', (['true_label_counts'], {}), '(true_label_counts)\n', (4082, 4101), False, 'from InnerEyeDataQuality.evaluation.metrics import compute_accuracy, compute_label_entropy, total_variation\n'), ((4113, 4148), 'numpy.where', 'np.where', (['(label_entropy > threshold)'], {}), '(label_entropy > threshold)\n', (4121, 4148), True, 'import numpy as np\n'), ((4740, 4763), 'numpy.copy', 'np.copy', (['initial_labels'], {}), '(initial_labels)\n', (4747, 4763), True, 'import numpy as np\n'), ((6757, 6824), 'numpy.setdiff1d', 'np.setdiff1d', (['label_mismatch_ids_current', 'self.true_ambiguous_cases'], {}), '(label_mismatch_ids_current, self.true_ambiguous_cases)\n', (6769, 6824), True, 'import numpy as np\n'), ((9443, 9563), 'logging.info', 'logging.info', (['f"""Method: {self.name}, selected_id: {self.selected_sample_id[-1]} accuracy: {self.accuracy[-1]}"""'], {}), "(\n f'Method: {self.name}, selected_id: {self.selected_sample_id[-1]} accuracy: {self.accuracy[-1]}'\n )\n", (9455, 9563), False, 'import logging\n'), ((9587, 9773), 'logging.info', 'logging.info', (['f"""Remaining label clear noise cases: {self.num_remaining_mislabelled_not_ambiguous[-1]} and ambiguous noise cases: {self.num_remaining_mislabelled_ambiguous[-1]}"""'], {}), "(\n f'Remaining label clear noise cases: {self.num_remaining_mislabelled_not_ambiguous[-1]} and ambiguous noise cases: {self.num_remaining_mislabelled_ambiguous[-1]}'\n )\n", (9599, 9773), False, 'import logging\n'), ((10683, 10716), 'numpy.arange', 'np.arange', (['start_point', 'end_point'], {}), '(start_point, end_point)\n', (10692, 10716), True, 'import numpy as np\n'), ((4948, 4997), 'numpy.sum', 'np.sum', (['true_label_counts'], {'axis': '(-1)', 'keepdims': '(True)'}), '(true_label_counts, axis=-1, keepdims=True)\n', (4954, 4997), True, 'import numpy as np\n'), ((6871, 6940), 'numpy.intersect1d', 'np.intersect1d', (['label_mismatch_ids_current', 'self.true_ambiguous_cases'], {}), '(label_mismatch_ids_current, self.true_ambiguous_cases)\n', (6885, 6940), True, 'import numpy as np\n'), ((7857, 7919), 'InnerEyeDataQuality.evaluation.metrics.compute_accuracy', 'compute_accuracy', (['current_label_counts', 'self.true_label_counts'], {}), '(current_label_counts, self.true_label_counts)\n', (7873, 7919), False, 'from InnerEyeDataQuality.evaluation.metrics import compute_accuracy, compute_label_entropy, total_variation\n'), ((7975, 8027), 'numpy.sum', 'np.sum', (['current_label_counts'], {'axis': '(-1)', 'keepdims': '(True)'}), '(current_label_counts, axis=-1, keepdims=True)\n', (7981, 8027), True, 'import numpy as np\n'), ((8079, 8140), 'InnerEyeDataQuality.evaluation.metrics.total_variation', 'total_variation', (['self.true_distribution', 'current_distribution'], {}), '(self.true_distribution, current_distribution)\n', (8094, 8140), False, 'from InnerEyeDataQuality.evaluation.metrics import compute_accuracy, compute_label_entropy, total_variation\n'), ((10198, 10230), 'numpy.cumsum', 'np.cumsum', (['sim_stats.num_fetches'], {}), '(sim_stats.num_fetches)\n', (10207, 10230), True, 'import numpy as np\n'), ((10307, 10339), 'numpy.cumsum', 'np.cumsum', (['sim_stats.num_fetches'], {}), '(sim_stats.num_fetches)\n', (10316, 10339), True, 'import numpy as np\n'), ((11751, 11783), 'numpy.cumsum', 'np.cumsum', (['sim_stats.num_fetches'], {}), '(sim_stats.num_fetches)\n', (11760, 11783), True, 'import numpy as np\n'), ((2886, 2924), 'numpy.where', 'np.where', (['true_label_counts[sample_id]'], {}), '(true_label_counts[sample_id])\n', (2894, 2924), True, 'import numpy as np\n')]
|
import copy
import logging
from itertools import chain
import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit import config
from rltoolkit.algorithms.ddpg import DDPG
from rltoolkit.algorithms.sac.models import SAC_Actor, SAC_Critic
logger = logging.getLogger(__name__)
class SAC(DDPG):
def __init__(
self,
alpha_lr: float = config.ALPHA_LR,
alpha: float = config.ALPHA,
tau: float = config.TAU,
pi_update_freq: int = config.PI_UPDATE_FREQ,
act_noise: float = 0,
*args,
**kwargs,
):
f"""Soft Actor-Critic implementation
Args:
alpha_lr (float, optional): Learning rate of the alpha.
Defaults to { config.ALPHA_LR }.
alpha (float, optional): Initial alpha value. Defaults to { config.ALPHA }.
pi_update_freq (int, optional): Frequency of policy updates
(in SAC updates). Defaults to { config.PI_UPDATE_FREQ }.
act_noise (float, optional): Actions noise multiplier.
Defaults to { 0 }.
actor_lr (float, optional): Learning rate of the actor.
Defaults to { config.DDPG_LR }.
critic_lr (float, optional): Learning rate of the critic.
Defaults to { config.DDPG_LR }.
tau (float, optional): Tau coefficient for polyak averaging.
Defaults to { config.TAU }.
update_batch_size (int, optional): Batch size for gradient step.
Defaults to { config.UPDATE_BATCH_SIZE }.
buffer_size (int, optional): Size of replay buffer.
Defaults to { config.BUFFER_SIZE }.
random_frames (int, optional): Number of frames with random actions at
the beggining. Defaults to { config.RANDOM_FRAMES }.
update_freq (int, optional): Freqency of SAC updates (in frames).
Defaults to { config.UPDATE_FREQ }.
grad_steps (int, optional): Number of SAC updates for one step.
Defaults to { config.GRAD_STEPS }.
env_name (str, optional): Name of the gym environment.
Defaults to { config.ENV_NAME }.
gamma (float, optional): Discount factor. Defaults to { config.GAMMA }.
stats_freq (int, optional): Frequency of logging the progress.
Defaults to { config.STATS_FREQ }.
steps_per_epoch (int, optional): Number of frames used for one algorithm step
(could be higher because batch collection stops when rollout ends).
Defaults to { config.STEPS_PER_EPOCH }.
iterations (int, optional): Number of algorithms iterations.
Defaults to { config.ITERATIONS }.
max_frames (int, optional): Limit of frames for training.
Defaults to { None }.
return_done (Union[int, None], optional): target return, which will stop
training if reached. Defaults to { config.RETURN_DONE }.
log_dir (str, optional): Path for basic logs which includes final model.
Defaults to { config.LOG_DIR }.
use_gpu (bool, optional): Use CUDA. Defaults to { config.USE_GPU }.
tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.
Defaults to { config.TENSORBOARD_DIR }.
tensorboard_comment (str, optional): Comment for tensorboard files.
Defaults to { config.TENSORBOARD_COMMENT }.
verbose (int, optional): Verbose level. Defaults to { config.VERBOSE }.
render (bool, optional): Render rollouts to tensorboard.
Defaults to { config.RENDER }.
"""
super().__init__(*args, **kwargs)
self._critic_1 = None
self.critic_1_optimizer = None
self.critic_1_targ = None
self._critic_2 = None
self.critic_2_optimizer = None
self.critic_2_targ = None
self.alpha_lr = alpha_lr
self.alpha = alpha
self.pi_update_freq = pi_update_freq
self.actor = SAC_Actor(self.ob_dim, self.ac_lim, self.ac_dim, self.discrete)
self.critic_1 = SAC_Critic(self.ob_dim, self.ac_dim, self.discrete)
self.critic_2 = SAC_Critic(self.ob_dim, self.ac_dim, self.discrete)
self.loss = {"actor": 0.0, "critic_1": 0.0, "critic_2": 0.0}
new_hparams = {
"hparams/alpha_lr": self.alpha_lr,
"hparams/alpha": self.alpha,
"hparams/pi_update_freq": self.pi_update_freq,
}
self.hparams.update(new_hparams)
self.target_entropy = -torch.prod(
torch.tensor(self.ac_dim, dtype=torch.float32)
).item()
self.log_alpha = torch.tensor(
np.log(self.alpha), requires_grad=True, device=self.device
)
self.alpha_opt = self.opt([self.log_alpha], lr=alpha_lr)
@property
def actor(self):
return self._actor
@actor.setter
def actor(self, model: torch.nn.Module):
self._actor, self.actor_optimizer = self.set_model(model, self.actor_lr)
@property
def critic_1(self):
return self._critic_1
@critic_1.setter
def critic_1(self, model: torch.nn.Module):
self._critic_1, self.critic_1_optimizer = self.set_model(model, self.critic_lr)
self.critic_1_targ = copy.deepcopy(self._critic_1)
@property
def critic_2(self):
return self._critic_2
@critic_2.setter
def critic_2(self, model: torch.nn.Module):
self._critic_2, self.critic_2_optimizer = self.set_model(model, self.critic_lr)
self.critic_2_targ = copy.deepcopy(self._critic_2)
def compute_qfunc_targ(
self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor
):
"""Compute targets for Q-functions
Args:
reward (torch.Tensor): batch of rewards
next_obs (torch.Tensor): batch of next observations
done (torch.Tensor): batch of done
Returns:
torch.Tensor: Q-function targets for the batch
"""
with torch.no_grad():
sampled_next_action, sampled_next_logprob = self._actor(next_obs)
q1_target = self.critic_1_targ(next_obs, sampled_next_action)
q2_target = self.critic_2_targ(next_obs, sampled_next_action)
q_target = torch.min(q1_target, q2_target)
qfunc_target = reward + self.gamma * (1 - done) * (
q_target - self.alpha * sampled_next_logprob
)
return qfunc_target
def compute_pi_loss(
self,
obs: torch.Tensor,
sampled_action: torch.Tensor,
sampled_logprob: torch.Tensor,
):
"""Loss for the policy
Args:
obs (torch.Tensor): batch of observations
sampled_action (torch.Tensor): actions sampled from policy
sampled_logprob (torch.Tensor): log-probabilities of actions
Returns:
torch.Tensor: policy loss
"""
q1 = self._critic_1(obs, sampled_action)
q2 = self._critic_2(obs, sampled_action)
q = torch.min(q1, q2)
loss = (self.alpha * sampled_logprob - q).mean()
return loss
def update_target_q(self):
"""Update target networks with Polyak averaging
"""
with torch.no_grad():
# Polyak averaging:
critics_params = chain(
self._critic_1.parameters(), self._critic_2.parameters()
)
targets_params = chain(
self.critic_1_targ.parameters(), self.critic_2_targ.parameters()
)
for q_params, targ_params in zip(critics_params, targets_params):
targ_params.data.mul_(1 - self.tau)
targ_params.data.add_((self.tau) * q_params.data)
def compute_alpha_loss(self, sampled_logprob: torch.Tensor):
"""Compute loss for temperature update
Args:
sampled_logprob (torch.Tensor): batch of sampled log-probabilities
from the actor
Returns:
torch.Tensor: loss for temperature (alpha)
"""
# alpha_loss = (
# self.log_alpha * (-sampled_logprob.detach() - self.target_entropy)
# ).mean()
sampled_logprob = sampled_logprob.detach()
alpha_loss = self.log_alpha.exp() * (-sampled_logprob - self.target_entropy)
return alpha_loss.mean()
def update(
self,
obs: torch.Tensor,
next_obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
):
"""Soft Actor-Critic update:
Args:
obs (torch.Tensor): observations tensor
next_obs (torch.Tensor): next observations tensor
action (torch.Tensor): actions tensor
reward (torch.Tensor): rewards tensor
done (torch.Tensor): dones tensor
"""
y = self.compute_qfunc_targ(reward, next_obs, done)
# Update Q-functions by one step
y_q1 = self._critic_1(obs, action)
loss_q1 = F.mse_loss(y_q1, y)
y_q2 = self._critic_2(obs, action)
loss_q2 = F.mse_loss(y_q2, y)
self.loss["critic_1"] = loss_q1.item()
self.loss["critic_2"] = loss_q2.item()
self.critic_1_optimizer.zero_grad()
loss_q1.backward()
self.critic_1_optimizer.step()
self.critic_2_optimizer.zero_grad()
loss_q2.backward()
self.critic_2_optimizer.step()
# Update policy by one step
self._critic_1.eval()
self._critic_2.eval()
sampled_action, sampled_logprob = self._actor(obs)
# if self.stats_logger.frames % (self.update_freq * self.pi_update_freq) == 0:
loss = self.compute_pi_loss(obs, sampled_action, sampled_logprob)
self.loss["actor"] = loss.item()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
# Update target networks
self.update_target_q()
self._critic_1.train()
self._critic_2.train()
# Update temperature
alpha_loss = self.compute_alpha_loss(sampled_logprob)
self.alpha_opt.zero_grad()
alpha_loss.backward()
self.alpha_opt.step()
self.alpha = self.log_alpha.exp().item()
def add_tensorboard_logs(self, *args, **kwargs):
super().add_tensorboard_logs(*args, **kwargs)
if self.debug_mode:
self.tensorboard_writer.log_sac_alpha(self.iteration, self.alpha)
def collect_params_dict(self):
params_dict = {}
params_dict["actor"] = self.actor.state_dict()
params_dict["critic_1"] = self.critic_1.state_dict()
params_dict["critic_2"] = self.critic_2.state_dict()
params_dict["obs_mean"] = self.replay_buffer.obs_mean
params_dict["obs_std"] = self.replay_buffer.obs_std
params_dict["min_obs"] = self.replay_buffer.min_obs
params_dict["max_obs"] = self.replay_buffer.max_obs
return params_dict
def apply_params_dict(self, params_dict):
self.actor.load_state_dict(params_dict["actor"])
self.critic_1.load_state_dict(params_dict["critic_1"])
self.critic_2.load_state_dict(params_dict["critic_2"])
self.obs_mean = params_dict["obs_mean"]
self.obs_std = params_dict["obs_std"]
self.min_obs = params_dict["min_obs"]
self.max_obs = params_dict["max_obs"]
self.replay_buffer.obs_mean = self.obs_mean
self.replay_buffer.obs_std = self.obs_std
self.replay_buffer.min_obs = self.min_obs
self.replay_buffer.max_obs = self.max_obs
def save_model(self, save_path=None) -> str:
if self.filename is None and save_path is None:
raise AttributeError
elif save_path is None:
save_path = str(self.log_path)
torch.save(self._actor.state_dict(), save_path + "_actor_model.pt")
torch.save(self._critic_1.state_dict(), save_path + "_critic_1_model.pt")
torch.save(self._critic_2.state_dict(), save_path + "_critic_2_model.pt")
return save_path
if __name__ == "__main__":
#with torch.cuda.device(1):
model = SAC(
env_name="HalfCheetah-v2",
iterations=200,
gamma=0.99,
steps_per_epoch=1000,
stats_freq=5,
test_episodes=2,
update_batch_size=100,
update_freq=50,
grad_steps=50,
# random_frames=10000,
use_gpu=True,
obs_norm=False,
tensorboard_dir="logs_norm",
tensorboard_comment="",
)
model.train()
|
[
"copy.deepcopy",
"torch.tensor",
"numpy.log",
"torch.nn.functional.mse_loss",
"rltoolkit.algorithms.sac.models.SAC_Actor",
"rltoolkit.algorithms.sac.models.SAC_Critic",
"torch.no_grad",
"torch.min",
"logging.getLogger"
] |
[((274, 301), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (291, 301), False, 'import logging\n'), ((4135, 4198), 'rltoolkit.algorithms.sac.models.SAC_Actor', 'SAC_Actor', (['self.ob_dim', 'self.ac_lim', 'self.ac_dim', 'self.discrete'], {}), '(self.ob_dim, self.ac_lim, self.ac_dim, self.discrete)\n', (4144, 4198), False, 'from rltoolkit.algorithms.sac.models import SAC_Actor, SAC_Critic\n'), ((4223, 4274), 'rltoolkit.algorithms.sac.models.SAC_Critic', 'SAC_Critic', (['self.ob_dim', 'self.ac_dim', 'self.discrete'], {}), '(self.ob_dim, self.ac_dim, self.discrete)\n', (4233, 4274), False, 'from rltoolkit.algorithms.sac.models import SAC_Actor, SAC_Critic\n'), ((4299, 4350), 'rltoolkit.algorithms.sac.models.SAC_Critic', 'SAC_Critic', (['self.ob_dim', 'self.ac_dim', 'self.discrete'], {}), '(self.ob_dim, self.ac_dim, self.discrete)\n', (4309, 4350), False, 'from rltoolkit.algorithms.sac.models import SAC_Actor, SAC_Critic\n'), ((5412, 5441), 'copy.deepcopy', 'copy.deepcopy', (['self._critic_1'], {}), '(self._critic_1)\n', (5425, 5441), False, 'import copy\n'), ((5698, 5727), 'copy.deepcopy', 'copy.deepcopy', (['self._critic_2'], {}), '(self._critic_2)\n', (5711, 5727), False, 'import copy\n'), ((7206, 7223), 'torch.min', 'torch.min', (['q1', 'q2'], {}), '(q1, q2)\n', (7215, 7223), False, 'import torch\n'), ((9203, 9222), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_q1', 'y'], {}), '(y_q1, y)\n', (9213, 9222), True, 'from torch.nn import functional as F\n'), ((9284, 9303), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_q2', 'y'], {}), '(y_q2, y)\n', (9294, 9303), True, 'from torch.nn import functional as F\n'), ((4814, 4832), 'numpy.log', 'np.log', (['self.alpha'], {}), '(self.alpha)\n', (4820, 4832), True, 'import numpy as np\n'), ((6166, 6181), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6179, 6181), False, 'import torch\n'), ((6432, 6463), 'torch.min', 'torch.min', (['q1_target', 'q2_target'], {}), '(q1_target, q2_target)\n', (6441, 6463), False, 'import torch\n'), ((7415, 7430), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7428, 7430), False, 'import torch\n'), ((4699, 4745), 'torch.tensor', 'torch.tensor', (['self.ac_dim'], {'dtype': 'torch.float32'}), '(self.ac_dim, dtype=torch.float32)\n', (4711, 4745), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Test Generic Map
"""
import os
import pytest
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import sunpy
import sunpy.map
import sunpy.coordinates
import sunpy.data.test
from sunpy.tests.helpers import figure_test
testpath = sunpy.data.test.rootdir
@pytest.fixture
def aia171_test_map():
return sunpy.map.Map(os.path.join(testpath, 'aia_171_level1.fits'))
@pytest.fixture
def heliographic_test_map():
return sunpy.map.Map(os.path.join(testpath, 'heliographic_phase_map.fits.gz'))
@pytest.fixture
def aia171_test_map_with_mask(aia171_test_map):
shape = aia171_test_map.data.shape
mask = np.zeros_like(aia171_test_map.data, dtype=bool)
mask[0:shape[0] // 2, 0:shape[1] // 2] = True
return sunpy.map.Map(np.ma.array(
aia171_test_map.data, mask=mask),
aia171_test_map.meta)
@figure_test
def test_plot_aia171(aia171_test_map):
aia171_test_map.plot()
@figure_test
def test_plot_aia171_clip(aia171_test_map):
aia171_test_map.plot(clip_interval=(5., 99.)*u.percent)
@figure_test
def test_peek_aia171(aia171_test_map):
aia171_test_map.peek()
@figure_test
def test_peek_grid_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=True)
@figure_test
def test_peek_grid_spacing_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=(5, 5) * u.deg)
@figure_test
def test_peek_limb_aia171(aia171_test_map):
aia171_test_map.peek(draw_limb=True)
@figure_test
def test_draw_grid_aia171(aia171_test_map):
aia171_test_map.plot()
aia171_test_map.draw_grid(grid_spacing=(30, 40) * u.deg)
@figure_test
def test_peek_grid_limb_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=True, draw_limb=True)
@figure_test
def test_plot_aia171_nowcsaxes(aia171_test_map):
ax = plt.gca()
aia171_test_map.plot(axes=ax)
@figure_test
def test_rectangle_aia171(aia171_test_map):
aia171_test_map.plot()
bottom_left = SkyCoord(
0 * u.arcsec, 0 * u.arcsec, frame=aia171_test_map.coordinate_frame)
w = 100 * u.arcsec
h = 100 * u.arcsec
aia171_test_map.draw_rectangle(bottom_left, w, h)
@figure_test
def test_plot_masked_aia171(aia171_test_map_with_mask):
aia171_test_map_with_mask.plot()
@figure_test
def test_plot_masked_aia171_nowcsaxes(aia171_test_map_with_mask):
ax = plt.gca()
aia171_test_map_with_mask.plot(axes=ax)
@figure_test
def test_plot_aia171_superpixel(aia171_test_map):
aia171_test_map.superpixel((9, 7) * u.pix, offset=(4, 4) * u.pix).plot()
@figure_test
def test_plot_aia171_superpixel_nowcsaxes(aia171_test_map):
ax = plt.gca()
aia171_test_map.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot(axes=ax)
@figure_test
def test_plot_masked_aia171_superpixel(aia171_test_map_with_mask):
aia171_test_map_with_mask.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot()
@figure_test
def test_plot_masked_aia171_superpixel_nowcsaxes(aia171_test_map_with_mask):
ax = plt.gca()
aia171_test_map_with_mask.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot(axes=ax)
@figure_test
def test_draw_contours_aia(aia171_test_map):
aia171_test_map.plot()
aia171_test_map.draw_contours(u.Quantity(np.arange(1, 100, 10), 'percent'))
@figure_test
def test_heliographic_peek(heliographic_test_map):
heliographic_test_map.peek()
@figure_test
def test_heliographic_rectangle(heliographic_test_map):
heliographic_test_map.plot()
bottom = SkyCoord(
60 * u.deg, 50 * u.deg, frame=heliographic_test_map.coordinate_frame)
w = 13 * u.deg
h = 13 * u.deg
heliographic_test_map.draw_rectangle(bottom, w, h, color='cyan')
@figure_test
def test_heliographic_grid_annotations(heliographic_test_map):
heliographic_test_map.plot()
heliographic_test_map.draw_grid(annotate=False)
|
[
"numpy.zeros_like",
"os.path.join",
"numpy.ma.array",
"numpy.arange",
"matplotlib.pyplot.gca",
"astropy.coordinates.SkyCoord"
] |
[((711, 758), 'numpy.zeros_like', 'np.zeros_like', (['aia171_test_map.data'], {'dtype': 'bool'}), '(aia171_test_map.data, dtype=bool)\n', (724, 758), True, 'import numpy as np\n'), ((1876, 1885), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1883, 1885), True, 'import matplotlib.pyplot as plt\n'), ((2024, 2100), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0 * u.arcsec)', '(0 * u.arcsec)'], {'frame': 'aia171_test_map.coordinate_frame'}), '(0 * u.arcsec, 0 * u.arcsec, frame=aia171_test_map.coordinate_frame)\n', (2032, 2100), False, 'from astropy.coordinates import SkyCoord\n'), ((2408, 2417), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2415, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2688, 2697), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2695, 2697), True, 'import matplotlib.pyplot as plt\n'), ((3070, 3079), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3077, 3079), True, 'import matplotlib.pyplot as plt\n'), ((3566, 3644), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(60 * u.deg)', '(50 * u.deg)'], {'frame': 'heliographic_test_map.coordinate_frame'}), '(60 * u.deg, 50 * u.deg, frame=heliographic_test_map.coordinate_frame)\n', (3574, 3644), False, 'from astropy.coordinates import SkyCoord\n'), ((418, 463), 'os.path.join', 'os.path.join', (['testpath', '"""aia_171_level1.fits"""'], {}), "(testpath, 'aia_171_level1.fits')\n", (430, 463), False, 'import os\n'), ((537, 593), 'os.path.join', 'os.path.join', (['testpath', '"""heliographic_phase_map.fits.gz"""'], {}), "(testpath, 'heliographic_phase_map.fits.gz')\n", (549, 593), False, 'import os\n'), ((834, 878), 'numpy.ma.array', 'np.ma.array', (['aia171_test_map.data'], {'mask': 'mask'}), '(aia171_test_map.data, mask=mask)\n', (845, 878), True, 'import numpy as np\n'), ((3315, 3336), 'numpy.arange', 'np.arange', (['(1)', '(100)', '(10)'], {}), '(1, 100, 10)\n', (3324, 3336), True, 'import numpy as np\n')]
|
"""
Contains methods for YOLOv3 models for object detection.
Trained on COCO dataset.
"""
import re
import numpy as np
from collections import defaultdict
from kenning.core.model import ModelWrapper
from kenning.datasets.open_images_dataset import DectObject, compute_iou, Dataset # noqa: E501
import sys
if sys.version_info.minor < 9:
from importlib_resources import path
else:
from importlib.resources import path
from kenning.resources import coco_detection
from pathlib import Path
class TVMDarknetCOCOYOLOV3(ModelWrapper):
def __init__(
self,
modelpath,
dataset,
from_file,
class_names: str = "coco"):
self.thresh = 0.2
self.iouthresh = 0.5
super().__init__(modelpath, dataset, from_file)
# for work with dataproviders, this is handling dataset-less operation
if self.dataset is None:
self.batch_size = 1
self.classnames = []
if class_names == 'coco':
with path(coco_detection, 'cocov6.classes') as p:
with open(p, 'r') as f:
for line in f:
self.classnames.append(line.split(',')[1].strip())
else:
with Path(class_names) as p:
with open(p, 'r') as f:
for line in f:
self.classnames.append(line.split(',')[1].strip())
else:
self.batch_size = self.dataset.batch_size
self.classnames = self.dataset.classnames
self.numclasses = len(self.classnames)
@classmethod
def form_argparse(cls, no_dataset: bool = False):
parser, group = super().form_argparse(no_dataset)
if no_dataset:
group.add_argument(
'--classes',
help='File containing Open Images class IDs and class names in CSV format to use (can be generated using kenning.scenarios.open_images_classes_extractor) or class type', # noqa: E501
type=str,
default='coco'
)
return parser, group
@classmethod
def from_argparse(
cls,
dataset: Dataset,
args,
from_file: bool = True):
return cls(args.model_path, dataset, from_file, args.classes)
def load_model(self, modelpath):
self.keyparams = {}
self.perlayerparams = defaultdict(list)
keyparamsrgx = re.compile(r'(width|height|classes)=(\d+)')
perlayerrgx = re.compile(r'(mask|anchors|num)=((\d+,?)+)')
with open(self.modelpath.with_suffix('.cfg'), 'r') as config:
for line in config:
line = line.replace(' ', '')
res = keyparamsrgx.match(line)
if res:
self.keyparams[res.group(1)] = int(res.group(2))
continue
res = perlayerrgx.match(line)
if res:
self.perlayerparams[res.group(1)].append(res.group(2))
self.perlayerparams = {
k: [np.array([int(x) for x in s.split(',')]) for s in v]
for k, v in self.perlayerparams.items()
}
def prepare_model(self):
self.load_model(self.modelpath)
def get_input_spec(self):
return {
'data': (
1, 3, self.keyparams['width'], self.keyparams['height']
)
}, 'float32'
def preprocess_input(self, X):
return np.array(X)
def convert_to_dectobject(self, entry):
# array x, y, w, h, classid, score
x1 = entry[0] - entry[2] / 2
x2 = entry[0] + entry[2] / 2
y1 = entry[1] - entry[3] / 2
y2 = entry[1] + entry[3] / 2
return DectObject(
self.classnames[entry[4]],
x1, y1, x2, y2,
entry[5]
)
def parse_outputs(self, data):
# get all bounding boxes with objectness score over given threshold
boxdata = []
for i in range(len(data)):
ids = np.asarray(np.where(data[i][:, 4, :, :] > self.thresh))
ids = np.transpose(ids)
if ids.shape[0] > 0:
ids = np.append([[i]] * ids.shape[0], ids, axis=1)
boxdata.append(ids)
if len(boxdata) > 0:
boxdata = np.concatenate(boxdata)
# each entry in boxdata contains:
# - layer id
# - det id
# - y id
# - x id
bboxes = []
for box in boxdata:
# x and y values from network are coordinates in a chunk
# to get the actual coordinates, we need to compute
# new_coords = (chunk_coords + out_coords) / out_resolution
x = (box[3] + data[box[0]][box[1], 0, box[2], box[3]]) / data[box[0]].shape[2] # noqa: E501
y = (box[2] + data[box[0]][box[1], 1, box[2], box[3]]) / data[box[0]].shape[3] # noqa: E501
# width and height are computed using following formula:
# w = anchor_w * exp(out_w) / input_w
# h = anchor_h * exp(out_h) / input_h
# anchors are computed based on dataset analysis
maskid = self.perlayerparams['mask'][2 - box[0]][box[1]]
anchors = self.perlayerparams['anchors'][box[0]][2 * maskid:2 * maskid + 2] # noqa: E501
w = anchors[0] * np.exp(data[box[0]][box[1], 2, box[2], box[3]]) / self.keyparams['width'] # noqa: E501
h = anchors[1] * np.exp(data[box[0]][box[1], 3, box[2], box[3]]) / self.keyparams['height'] # noqa: E501
# get objectness score
objectness = data[box[0]][box[1], 4, box[2], box[3]]
# get class with highest probability
classid = np.argmax(data[box[0]][box[1], 5:, box[2], box[3]])
# compute final class score (objectness * class probability
score = objectness * data[box[0]][box[1], classid + 5, box[2], box[3]] # noqa: E501
# drop the bounding box if final score is below threshold
if score < self.thresh:
continue
bboxes.append([x, y, w, h, classid, score])
# sort the bboxes by score descending
bboxes.sort(key=lambda x: x[5], reverse=True)
bboxes = [self.convert_to_dectobject(b) for b in bboxes]
# group bboxes by class to perform NMS sorting
grouped_bboxes = defaultdict(list)
for item in bboxes:
grouped_bboxes[item.clsname].append(item)
# perform NMS sort to drop overlapping predictions for the same class
cleaned_bboxes = []
for clsbboxes in grouped_bboxes.values():
for i in range(len(clsbboxes)):
# if score equals 0, the bbox is dropped
if clsbboxes[i].score == 0:
continue
# add current bbox to final results
cleaned_bboxes.append(clsbboxes[i])
# look for overlapping bounding boxes with lower probability
# and IoU exceeding specified threshold
for j in range(i + 1, len(clsbboxes)):
if compute_iou(clsbboxes[i], clsbboxes[j]) > self.iouthresh: # noqa: E501
clsbboxes[j] = clsbboxes[j]._replace(score=0)
return cleaned_bboxes
def postprocess_outputs(self, y):
# YOLOv3 has three stages of outputs
# each one contains:
# - real output
# - masks
# - biases
# TVM-based model output provides 12 arrays
# Those are subdivided into three groups containing
# - actual YOLOv3 output
# - masks IDs
# - anchors
# - 6 integers holding number of dects per cluster, actual output
# number of channels, actual output height and width, number of
# classes and unused parameter
# iterate over each group
lastid = 0
outputs = []
for i in range(3):
# first extract the actual output
# each output layer shape follows formula:
# (BS, B * (4 + 1 + C), w / (8 * (i + 1)), h / (8 * (i + 1)))
# BS is the batch size
# w, h are width and height of the input image
# the resolution is reduced over the network, and is 8 times
# smaller in each dimension for each output
# the "pixels" in the outputs are responsible for the chunks of
# image - in the first output each pixel is responsible for 8x8
# squares of input image, the second output covers objects from
# 16x16 chunks etc.
# Each "pixel" can predict up to B bounding boxes.
# Each bounding box is described by its 4 coordinates,
# objectness prediction and per-class predictions
outshape = (
self.batch_size,
len(self.perlayerparams['mask'][i]),
4 + 1 + self.numclasses,
self.keyparams['width'] // (8 * 2 ** i),
self.keyparams['height'] // (8 * 2 ** i)
)
outputs.append(
y[lastid:(lastid + np.prod(outshape))].reshape(outshape)
)
# drop additional info provided in the TVM output
# since it's all 4-bytes values, ignore the insides
lastid += (
np.prod(outshape)
+ len(self.perlayerparams['mask'][i])
+ len(self.perlayerparams['anchors'][i])
+ 6 # layer parameters
)
# change the dimensions so the output format is
# batches layerouts dets params width height
perbatchoutputs = []
for i in range(outputs[0].shape[0]):
perbatchoutputs.append([
outputs[0][i],
outputs[1][i],
outputs[2][i]
])
result = []
# parse the combined outputs for each image in batch, and return result
for out in perbatchoutputs:
result.append(self.parse_outputs(out))
return result
def convert_input_to_bytes(self, inputdata):
return inputdata.tobytes()
def convert_output_from_bytes(self, outputdata):
return np.frombuffer(outputdata, dtype='float32')
def get_framework_and_version(self):
return ('darknet', 'alexeyab')
|
[
"kenning.datasets.open_images_dataset.DectObject",
"numpy.argmax",
"numpy.frombuffer",
"numpy.transpose",
"numpy.prod",
"collections.defaultdict",
"numpy.append",
"pathlib.Path",
"numpy.where",
"numpy.array",
"importlib.resources.path",
"numpy.exp",
"kenning.datasets.open_images_dataset.compute_iou",
"numpy.concatenate",
"re.compile"
] |
[((2457, 2474), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2468, 2474), False, 'from collections import defaultdict\n'), ((2498, 2541), 're.compile', 're.compile', (['"""(width|height|classes)=(\\\\d+)"""'], {}), "('(width|height|classes)=(\\\\d+)')\n", (2508, 2541), False, 'import re\n'), ((2564, 2608), 're.compile', 're.compile', (['"""(mask|anchors|num)=((\\\\d+,?)+)"""'], {}), "('(mask|anchors|num)=((\\\\d+,?)+)')\n", (2574, 2608), False, 'import re\n'), ((3532, 3543), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3540, 3543), True, 'import numpy as np\n'), ((3795, 3858), 'kenning.datasets.open_images_dataset.DectObject', 'DectObject', (['self.classnames[entry[4]]', 'x1', 'y1', 'x2', 'y2', 'entry[5]'], {}), '(self.classnames[entry[4]], x1, y1, x2, y2, entry[5])\n', (3805, 3858), False, 'from kenning.datasets.open_images_dataset import DectObject, compute_iou, Dataset\n'), ((6445, 6462), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6456, 6462), False, 'from collections import defaultdict\n'), ((10305, 10347), 'numpy.frombuffer', 'np.frombuffer', (['outputdata'], {'dtype': '"""float32"""'}), "(outputdata, dtype='float32')\n", (10318, 10347), True, 'import numpy as np\n'), ((4165, 4182), 'numpy.transpose', 'np.transpose', (['ids'], {}), '(ids)\n', (4177, 4182), True, 'import numpy as np\n'), ((4371, 4394), 'numpy.concatenate', 'np.concatenate', (['boxdata'], {}), '(boxdata)\n', (4385, 4394), True, 'import numpy as np\n'), ((5786, 5837), 'numpy.argmax', 'np.argmax', (['data[box[0]][box[1], 5:, box[2], box[3]]'], {}), '(data[box[0]][box[1], 5:, box[2], box[3]])\n', (5795, 5837), True, 'import numpy as np\n'), ((4102, 4145), 'numpy.where', 'np.where', (['(data[i][:, 4, :, :] > self.thresh)'], {}), '(data[i][:, 4, :, :] > self.thresh)\n', (4110, 4145), True, 'import numpy as np\n'), ((4238, 4282), 'numpy.append', 'np.append', (['([[i]] * ids.shape[0])', 'ids'], {'axis': '(1)'}), '([[i]] * ids.shape[0], ids, axis=1)\n', (4247, 4282), True, 'import numpy as np\n'), ((1033, 1071), 'importlib.resources.path', 'path', (['coco_detection', '"""cocov6.classes"""'], {}), "(coco_detection, 'cocov6.classes')\n", (1037, 1071), False, 'from importlib.resources import path\n'), ((1279, 1296), 'pathlib.Path', 'Path', (['class_names'], {}), '(class_names)\n', (1283, 1296), False, 'from pathlib import Path\n'), ((5407, 5454), 'numpy.exp', 'np.exp', (['data[box[0]][box[1], 2, box[2], box[3]]'], {}), '(data[box[0]][box[1], 2, box[2], box[3]])\n', (5413, 5454), True, 'import numpy as np\n'), ((5524, 5571), 'numpy.exp', 'np.exp', (['data[box[0]][box[1], 3, box[2], box[3]]'], {}), '(data[box[0]][box[1], 3, box[2], box[3]])\n', (5530, 5571), True, 'import numpy as np\n'), ((7192, 7231), 'kenning.datasets.open_images_dataset.compute_iou', 'compute_iou', (['clsbboxes[i]', 'clsbboxes[j]'], {}), '(clsbboxes[i], clsbboxes[j])\n', (7203, 7231), False, 'from kenning.datasets.open_images_dataset import DectObject, compute_iou, Dataset\n'), ((9430, 9447), 'numpy.prod', 'np.prod', (['outshape'], {}), '(outshape)\n', (9437, 9447), True, 'import numpy as np\n'), ((9211, 9228), 'numpy.prod', 'np.prod', (['outshape'], {}), '(outshape)\n', (9218, 9228), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import numpy as np
import os
import time
from zhiqiang.utils.data_parallelism import DataParallelism
# define a simple model
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.random_start = torch.tensor(np.ones([1, 3, 7, 7]), dtype=torch.float32)
self.conv1 = nn.Conv2d(3, 32, 2)
def infer(self, input_batch):
conved = self.conv1(input_batch)
return conved
# define subprocess run function
def process_function(list_data, idx, queue, settings):
"""
"""
model = settings["model"][idx]
name = settings["name"][idx]
print("subprocess id:%d,run:%s" % (os.getpid(), name))
result = model.infer(model.random_start)
print(result.size())
def merge_function(queue, settings):
pass
#
if __name__ == "__main__":
# work
simple_model = SimpleModel()
result = simple_model.infer(simple_model.random_start)
print(result.size()) # torch.Size([1, 32, 6, 6]), as expected
# work
model_0 = SimpleModel()
settings = {"model": [model_0], "name": ["model_0"]}
process_function([], 0, None, settings)
#
model_1 = SimpleModel()
model_2 = SimpleModel()
import multiprocessing as mp
mp.set_start_method("spawn")
print("main process id:%d" % os.getpid())
#
settings = {"model": [model_1, model_2],
"name": ["model_1", "model_2"]}
data_paral = DataParallelism(2)
data_paral.do_processing([0, 1], process_function, merge_function, settings)
|
[
"os.getpid",
"torch.nn.Conv2d",
"multiprocessing.set_start_method",
"numpy.ones",
"zhiqiang.utils.data_parallelism.DataParallelism"
] |
[((1281, 1309), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (1300, 1309), True, 'import multiprocessing as mp\n'), ((1474, 1492), 'zhiqiang.utils.data_parallelism.DataParallelism', 'DataParallelism', (['(2)'], {}), '(2)\n', (1489, 1492), False, 'from zhiqiang.utils.data_parallelism import DataParallelism\n'), ((370, 389), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(32)', '(2)'], {}), '(3, 32, 2)\n', (379, 389), True, 'import torch.nn as nn\n'), ((305, 326), 'numpy.ones', 'np.ones', (['[1, 3, 7, 7]'], {}), '([1, 3, 7, 7])\n', (312, 326), True, 'import numpy as np\n'), ((1346, 1357), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1355, 1357), False, 'import os\n'), ((704, 715), 'os.getpid', 'os.getpid', ([], {}), '()\n', (713, 715), False, 'import os\n')]
|
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create TF graphs for calculating log-mel-spectral features.
NOTE: This code is very experimental and will likely change, both in interface
and what it outputs.
The single published method is build_mel_calculation_graph, which
will assemble a TF graph from a provided waveform input vector
through to a (num_frames, frame_width, num_mel_bins) tensor of log-
transformed mel spectrogram patches, suitable for feeding the input
to a typical classifier. All the mel calculation parameters
are available as options, but default to their standard values
(e.g. frame_width=96, frame_hop=10). The input waveform can have
size (None,), meaning it will be specified at run-time.
with tflite_compatible=True, the returned graph is constructed only
from tflite-compatible ops (i.e., it uses matmul for the DFT, and
explicitly unrolled framing). In this case, the input waveform tensor
must have an explicit size at graph-building time.
"""
import fractions
import math
from magenta.music import mfcc_mel
import numpy as np
import tensorflow.compat.v1 as tf
def _stft_magnitude_full_tf(waveform_input, window_length_samples,
hop_length_samples, fft_length):
"""Calculate STFT magnitude (spectrogram) using tf.signal ops."""
stft_magnitude = tf.abs(
tf.signal.stft(
waveform_input,
frame_length=window_length_samples,
frame_step=hop_length_samples,
fft_length=fft_length),
name='magnitude_spectrogram')
return stft_magnitude
def _dft_matrix(dft_length):
"""Calculate the full DFT matrix in numpy."""
omega = (0 + 1j) * 2.0 * np.pi / float(dft_length)
# Don't include 1/sqrt(N) scaling, tf.signal.rfft doesn't apply it.
return np.exp(omega * np.outer(np.arange(dft_length), np.arange(dft_length)))
def _naive_rdft(signal_tensor, fft_length):
"""Implement real-input Fourier Transform by matmul."""
# We are right-multiplying by the DFT matrix, and we are keeping
# only the first half ("positive frequencies").
# So discard the second half of rows, but transpose the array for
# right-multiplication.
# The DFT matrix is symmetric, so we could have done it more
# directly, but this reflects our intention better.
complex_dft_matrix_kept_values = _dft_matrix(fft_length)[:(
fft_length // 2 + 1), :].transpose()
real_dft_tensor = tf.constant(
np.real(complex_dft_matrix_kept_values).astype(np.float32),
name='real_dft_matrix')
imag_dft_tensor = tf.constant(
np.imag(complex_dft_matrix_kept_values).astype(np.float32),
name='imaginary_dft_matrix')
signal_frame_length = signal_tensor.shape[-1].value
half_pad = (fft_length - signal_frame_length) // 2
pad_values = tf.concat([
tf.zeros([tf.rank(signal_tensor) - 1, 2], tf.int32),
[[half_pad, fft_length - signal_frame_length - half_pad]]
],
axis=0)
padded_signal = tf.pad(signal_tensor, pad_values)
result_real_part = tf.matmul(padded_signal, real_dft_tensor)
result_imag_part = tf.matmul(padded_signal, imag_dft_tensor)
return result_real_part, result_imag_part
def _fixed_frame(signal, frame_length, frame_step, first_axis=False):
"""tflite-compatible tf.signal.frame for fixed-size input.
Args:
signal: Tensor containing signal(s).
frame_length: Number of samples to put in each frame.
frame_step: Sample advance between successive frames.
first_axis: If true, framing is applied to first axis of tensor; otherwise,
it is applied to last axis.
Returns:
A new tensor where the last axis (or first, if first_axis) of input
signal has been replaced by a (num_frames, frame_length) array of individual
frames where each frame is drawn frame_step samples after the previous one.
Raises:
ValueError: if signal has an undefined axis length. This routine only
supports framing of signals whose shape is fixed at graph-build time.
"""
signal_shape = signal.shape.as_list()
if first_axis:
length_samples = signal_shape[0]
else:
length_samples = signal_shape[-1]
if length_samples <= 0:
raise ValueError('fixed framing requires predefined constant signal length')
num_frames = max(0, 1 + (length_samples - frame_length) // frame_step)
if first_axis:
inner_dimensions = signal_shape[1:]
result_shape = [num_frames, frame_length] + inner_dimensions
gather_axis = 0
else:
outer_dimensions = signal_shape[:-1]
result_shape = outer_dimensions + [num_frames, frame_length]
# Currently tflite's gather only supports axis==0, but that may still
# work if we want the last of 1 axes.
gather_axis = len(outer_dimensions)
subframe_length = fractions.gcd(frame_length, frame_step) # pylint: disable=deprecated-method
subframes_per_frame = frame_length // subframe_length
subframes_per_hop = frame_step // subframe_length
num_subframes = length_samples // subframe_length
if first_axis:
trimmed_input_size = [num_subframes * subframe_length] + inner_dimensions
subframe_shape = [num_subframes, subframe_length] + inner_dimensions
else:
trimmed_input_size = outer_dimensions + [num_subframes * subframe_length]
subframe_shape = outer_dimensions + [num_subframes, subframe_length]
subframes = tf.reshape(
tf.slice(
signal,
begin=np.zeros(len(signal_shape), np.int32),
size=trimmed_input_size), subframe_shape)
# frame_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate frame in subframes. For example:
# [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]]
frame_selector = np.reshape(
np.arange(num_frames) * subframes_per_hop, [num_frames, 1])
# subframe_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate subframe within a frame. For example:
# [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
subframe_selector = np.reshape(
np.arange(subframes_per_frame), [1, subframes_per_frame])
# Adding the 2 selector tensors together produces a [num_frames,
# subframes_per_frame] tensor of indices to use with tf.gather to select
# subframes from subframes. We then reshape the inner-most subframes_per_frame
# dimension to stitch the subframes together into frames. For example:
# [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]].
selector = frame_selector + subframe_selector
frames = tf.reshape(
tf.gather(subframes, selector.astype(np.int32), axis=gather_axis),
result_shape)
return frames
def _stft_tflite(signal, frame_length, frame_step, fft_length):
"""tflite-compatible implementation of tf.signal.stft.
Compute the short-time Fourier transform of a 1D input while avoiding tf ops
that are not currently supported in tflite (Rfft, Range, SplitV).
fft_length must be fixed. A Hann window is of frame_length is always
applied.
Since fixed (precomputed) framing must be used, signal.shape[-1] must be a
specific value (so "?"/None is not supported).
Args:
signal: 1D tensor containing the time-domain waveform to be transformed.
frame_length: int, the number of points in each Fourier frame.
frame_step: int, the number of samples to advance between successive frames.
fft_length: int, the size of the Fourier transform to apply.
Returns:
Two (num_frames, fft_length) tensors containing the real and imaginary parts
of the short-time Fourier transform of the input signal.
"""
# Make the window be shape (1, frame_length) instead of just frame_length
# in an effort to help the tflite broadcast logic.
window = tf.reshape(
tf.constant(
(0.5 - 0.5 * np.cos(2 * np.pi * np.arange(0, 1.0, 1.0 / frame_length))
).astype(np.float32),
name='window'), [1, frame_length])
framed_signal = _fixed_frame(
signal, frame_length, frame_step, first_axis=False)
framed_signal *= window
real_spectrogram, imag_spectrogram = _naive_rdft(framed_signal, fft_length)
return real_spectrogram, imag_spectrogram
def _stft_magnitude_tflite(waveform_input, window_length_samples,
hop_length_samples, fft_length):
"""Calculate spectrogram avoiding tflite incompatible ops."""
real_stft, imag_stft = _stft_tflite(
waveform_input,
frame_length=window_length_samples,
frame_step=hop_length_samples,
fft_length=fft_length)
stft_magnitude = tf.sqrt(
tf.add(real_stft * real_stft, imag_stft * imag_stft),
name='magnitude_spectrogram')
return stft_magnitude
def build_mel_calculation_graph(waveform_input,
sample_rate=16000,
window_length_seconds=0.025,
hop_length_seconds=0.010,
num_mel_bins=64,
lower_edge_hz=125.0,
upper_edge_hz=7500.0,
frame_width=96,
frame_hop=10,
tflite_compatible=False):
"""Build a TF graph to go from waveform to mel spectrum patches.
Args:
waveform_input: 1D Tensor which will be filled with 16 kHz waveform as
tf.float32.
sample_rate: Scalar giving the sampling rate of the waveform. Only 16 kHz
is acceptable at present.
window_length_seconds: Duration of window used for each Fourier transform.
hop_length_seconds: Time shift between successive analysis time frames.
num_mel_bins: The number of mel frequency bins to calculate.
lower_edge_hz: Frequency boundary at bottom edge of mel mapping.
upper_edge_hz: Frequency boundary at top edge of mel mapping.
frame_width: The number of successive time frames to include in each patch.
frame_hop: The frame advance between successive patches.
tflite_compatible: Avoid ops not currently supported in tflite.
Returns:
Tensor holding [num_patches, frame_width, num_mel_bins] log-mel-spectrogram
patches.
"""
# `waveform_input` is a [?] vector as a tensor.
# `magnitude_spectrogram` is a [?, fft_length/2 + 1] tensor of spectrograms.
# Derive the dependent parameters.
window_length_samples = int(round(window_length_seconds * sample_rate))
hop_length_samples = int(round(hop_length_seconds * sample_rate))
fft_length = 2**int(
math.ceil(math.log(window_length_samples) / math.log(2.0)))
if tflite_compatible:
magnitude_spectrogram = _stft_magnitude_tflite(
waveform_input, window_length_samples, hop_length_samples, fft_length)
else:
magnitude_spectrogram = _stft_magnitude_full_tf(
waveform_input, window_length_samples, hop_length_samples, fft_length)
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = magnitude_spectrogram.shape[-1].value
if tflite_compatible:
linear_to_mel_weight_matrix = tf.constant(
mfcc_mel.SpectrogramToMelMatrix(num_mel_bins, num_spectrogram_bins,
sample_rate, lower_edge_hz,
upper_edge_hz).astype(np.float32),
name='linear_to_mel_matrix')
else:
# In full tf, the mel weight matrix is calculated at run time within the
# TF graph. This avoids including a matrix of 64 x 256 float values (i.e.,
# 100 kB or more, depending on the representation) in the exported graph.
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hz,
upper_edge_hz)
mel_spectrogram = tf.matmul(
magnitude_spectrogram,
linear_to_mel_weight_matrix,
name='mel_spectrogram')
log_offset = 0.001
log_mel_spectrogram = tf.log(
mel_spectrogram + log_offset, name='log_mel_spectrogram')
# log_mel_spectrogram is a [?, num_mel_bins] gram.
if tflite_compatible:
features = _fixed_frame(
log_mel_spectrogram,
frame_length=frame_width,
frame_step=frame_hop,
first_axis=True)
else:
features = tf.signal.frame(
log_mel_spectrogram,
frame_length=frame_width,
frame_step=frame_hop,
axis=0)
# features is [num_patches, frame_width, num_mel_bins].
return features
|
[
"tensorflow.compat.v1.pad",
"fractions.gcd",
"magenta.music.mfcc_mel.SpectrogramToMelMatrix",
"tensorflow.compat.v1.rank",
"tensorflow.compat.v1.signal.linear_to_mel_weight_matrix",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.signal.frame",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.add",
"numpy.imag",
"tensorflow.compat.v1.signal.stft",
"numpy.arange",
"numpy.real",
"math.log"
] |
[((3488, 3521), 'tensorflow.compat.v1.pad', 'tf.pad', (['signal_tensor', 'pad_values'], {}), '(signal_tensor, pad_values)\n', (3494, 3521), True, 'import tensorflow.compat.v1 as tf\n'), ((3543, 3584), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['padded_signal', 'real_dft_tensor'], {}), '(padded_signal, real_dft_tensor)\n', (3552, 3584), True, 'import tensorflow.compat.v1 as tf\n'), ((3606, 3647), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['padded_signal', 'imag_dft_tensor'], {}), '(padded_signal, imag_dft_tensor)\n', (3615, 3647), True, 'import tensorflow.compat.v1 as tf\n'), ((5271, 5310), 'fractions.gcd', 'fractions.gcd', (['frame_length', 'frame_step'], {}), '(frame_length, frame_step)\n', (5284, 5310), False, 'import fractions\n'), ((12182, 12272), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['magnitude_spectrogram', 'linear_to_mel_weight_matrix'], {'name': '"""mel_spectrogram"""'}), "(magnitude_spectrogram, linear_to_mel_weight_matrix, name=\n 'mel_spectrogram')\n", (12191, 12272), True, 'import tensorflow.compat.v1 as tf\n'), ((12332, 12396), 'tensorflow.compat.v1.log', 'tf.log', (['(mel_spectrogram + log_offset)'], {'name': '"""log_mel_spectrogram"""'}), "(mel_spectrogram + log_offset, name='log_mel_spectrogram')\n", (12338, 12396), True, 'import tensorflow.compat.v1 as tf\n'), ((1870, 1994), 'tensorflow.compat.v1.signal.stft', 'tf.signal.stft', (['waveform_input'], {'frame_length': 'window_length_samples', 'frame_step': 'hop_length_samples', 'fft_length': 'fft_length'}), '(waveform_input, frame_length=window_length_samples,\n frame_step=hop_length_samples, fft_length=fft_length)\n', (1884, 1994), True, 'import tensorflow.compat.v1 as tf\n'), ((6516, 6546), 'numpy.arange', 'np.arange', (['subframes_per_frame'], {}), '(subframes_per_frame)\n', (6525, 6546), True, 'import numpy as np\n'), ((9000, 9052), 'tensorflow.compat.v1.add', 'tf.add', (['(real_stft * real_stft)', '(imag_stft * imag_stft)'], {}), '(real_stft * real_stft, imag_stft * imag_stft)\n', (9006, 9052), True, 'import tensorflow.compat.v1 as tf\n'), ((12027, 12147), 'tensorflow.compat.v1.signal.linear_to_mel_weight_matrix', 'tf.signal.linear_to_mel_weight_matrix', (['num_mel_bins', 'num_spectrogram_bins', 'sample_rate', 'lower_edge_hz', 'upper_edge_hz'], {}), '(num_mel_bins, num_spectrogram_bins,\n sample_rate, lower_edge_hz, upper_edge_hz)\n', (12064, 12147), True, 'import tensorflow.compat.v1 as tf\n'), ((12651, 12748), 'tensorflow.compat.v1.signal.frame', 'tf.signal.frame', (['log_mel_spectrogram'], {'frame_length': 'frame_width', 'frame_step': 'frame_hop', 'axis': '(0)'}), '(log_mel_spectrogram, frame_length=frame_width, frame_step=\n frame_hop, axis=0)\n', (12666, 12748), True, 'import tensorflow.compat.v1 as tf\n'), ((6224, 6245), 'numpy.arange', 'np.arange', (['num_frames'], {}), '(num_frames)\n', (6233, 6245), True, 'import numpy as np\n'), ((2328, 2349), 'numpy.arange', 'np.arange', (['dft_length'], {}), '(dft_length)\n', (2337, 2349), True, 'import numpy as np\n'), ((2351, 2372), 'numpy.arange', 'np.arange', (['dft_length'], {}), '(dft_length)\n', (2360, 2372), True, 'import numpy as np\n'), ((2951, 2990), 'numpy.real', 'np.real', (['complex_dft_matrix_kept_values'], {}), '(complex_dft_matrix_kept_values)\n', (2958, 2990), True, 'import numpy as np\n'), ((3080, 3119), 'numpy.imag', 'np.imag', (['complex_dft_matrix_kept_values'], {}), '(complex_dft_matrix_kept_values)\n', (3087, 3119), True, 'import numpy as np\n'), ((10944, 10975), 'math.log', 'math.log', (['window_length_samples'], {}), '(window_length_samples)\n', (10952, 10975), False, 'import math\n'), ((10978, 10991), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (10986, 10991), False, 'import math\n'), ((11502, 11616), 'magenta.music.mfcc_mel.SpectrogramToMelMatrix', 'mfcc_mel.SpectrogramToMelMatrix', (['num_mel_bins', 'num_spectrogram_bins', 'sample_rate', 'lower_edge_hz', 'upper_edge_hz'], {}), '(num_mel_bins, num_spectrogram_bins,\n sample_rate, lower_edge_hz, upper_edge_hz)\n', (11533, 11616), False, 'from magenta.music import mfcc_mel\n'), ((3325, 3347), 'tensorflow.compat.v1.rank', 'tf.rank', (['signal_tensor'], {}), '(signal_tensor)\n', (3332, 3347), True, 'import tensorflow.compat.v1 as tf\n'), ((8251, 8288), 'numpy.arange', 'np.arange', (['(0)', '(1.0)', '(1.0 / frame_length)'], {}), '(0, 1.0, 1.0 / frame_length)\n', (8260, 8288), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on 01/26/2022
@author: maxcurie
"""
import numpy as np
import csv
from mpi4py import MPI
from Dispersion import VectorFinder_auto_Extensive
from MPI_tools import task_dis
comm=MPI.COMM_WORLD
rank=comm.Get_rank()
size=comm.Get_size()
print('*******rank='+str(rank)+'*************')
if rank==0:
#**********Start of user block***************
path='.'
Output_csv=path+'/0MTM_scan.csv'
nu_list=np.arange(0.1,10.,0.5)
zeff_list=np.arange(1,2.5,0.2)
eta_list=np.arange(0.5,3.,0.2)
shat_list=np.arange(0.02,0.1,0.01)
beta_list=np.arange(0.0005,0.003,0.0003)
ky_list=np.arange(0.01,0.1,0.01)
mu_list=np.arange(0,4.,0.1)
xstar=10.
ModIndex=1 #global dispersion
#**********end of user block****************
with open(Output_csv, 'w', newline='') as csvfile: #clear all and then write a row
csv_data = csv.writer(csvfile, delimiter=',')
csv_data.writerow(['omega_omega_n','gamma_omega_n',\
'nu','zeff','eta','shat','beta','ky',\
'ModIndex','mu','xstar'])
csvfile.close()
para_list=[]
for nu in nu_list:
for zeff in zeff_list:
for eta in eta_list:
for shat in shat_list:
for beta in beta_list:
for ky in ky_list:
for mu in mu_list:
para_list.append([nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar,Output_csv])
np.random.shuffle(para_list)
task_list = task_dis(size,para_list)
for i in range(size-1):
comm.send(task_list[i],dest=i+1) #sending the data
elif rank!=0:
task_list_rank=comm.recv(source=0) #recieve the data
for para in task_list_rank:
[nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar,Output_csv]=para
w0=VectorFinder_auto_Extensive(nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar)
#w0=0.+0j
omega=np.real(w0)
gamma=np.imag(w0)
print(str(omega)+','+str(gamma)+','+str(nu)+','+str(zeff)+','\
+str(eta)+','+str(shat)+','+str(beta)+','+str(ky)+','\
+str(ModIndex)+','+str(mu)+','+str(xstar))
with open(Output_csv, 'a+', newline='') as csvfile: #adding a row
csv_data = csv.writer(csvfile, delimiter=',')
csv_data.writerow([ omega,gamma,nu,zeff,eta,shat,beta,ky,\
ModIndex,mu,xstar ])
csvfile.close()
print(para)
|
[
"csv.writer",
"MPI_tools.task_dis",
"numpy.imag",
"numpy.arange",
"Dispersion.VectorFinder_auto_Extensive",
"numpy.real",
"numpy.random.shuffle"
] |
[((455, 480), 'numpy.arange', 'np.arange', (['(0.1)', '(10.0)', '(0.5)'], {}), '(0.1, 10.0, 0.5)\n', (464, 480), True, 'import numpy as np\n'), ((492, 514), 'numpy.arange', 'np.arange', (['(1)', '(2.5)', '(0.2)'], {}), '(1, 2.5, 0.2)\n', (501, 514), True, 'import numpy as np\n'), ((526, 550), 'numpy.arange', 'np.arange', (['(0.5)', '(3.0)', '(0.2)'], {}), '(0.5, 3.0, 0.2)\n', (535, 550), True, 'import numpy as np\n'), ((562, 588), 'numpy.arange', 'np.arange', (['(0.02)', '(0.1)', '(0.01)'], {}), '(0.02, 0.1, 0.01)\n', (571, 588), True, 'import numpy as np\n'), ((601, 633), 'numpy.arange', 'np.arange', (['(0.0005)', '(0.003)', '(0.0003)'], {}), '(0.0005, 0.003, 0.0003)\n', (610, 633), True, 'import numpy as np\n'), ((644, 670), 'numpy.arange', 'np.arange', (['(0.01)', '(0.1)', '(0.01)'], {}), '(0.01, 0.1, 0.01)\n', (653, 670), True, 'import numpy as np\n'), ((681, 703), 'numpy.arange', 'np.arange', (['(0)', '(4.0)', '(0.1)'], {}), '(0, 4.0, 0.1)\n', (690, 703), True, 'import numpy as np\n'), ((1509, 1537), 'numpy.random.shuffle', 'np.random.shuffle', (['para_list'], {}), '(para_list)\n', (1526, 1537), True, 'import numpy as np\n'), ((1554, 1579), 'MPI_tools.task_dis', 'task_dis', (['size', 'para_list'], {}), '(size, para_list)\n', (1562, 1579), False, 'from MPI_tools import task_dis\n'), ((909, 943), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (919, 943), False, 'import csv\n'), ((1857, 1936), 'Dispersion.VectorFinder_auto_Extensive', 'VectorFinder_auto_Extensive', (['nu', 'zeff', 'eta', 'shat', 'beta', 'ky', 'ModIndex', 'mu', 'xstar'], {}), '(nu, zeff, eta, shat, beta, ky, ModIndex, mu, xstar)\n', (1884, 1936), False, 'from Dispersion import VectorFinder_auto_Extensive\n'), ((1962, 1973), 'numpy.real', 'np.real', (['w0'], {}), '(w0)\n', (1969, 1973), True, 'import numpy as np\n'), ((1988, 1999), 'numpy.imag', 'np.imag', (['w0'], {}), '(w0)\n', (1995, 1999), True, 'import numpy as np\n'), ((2306, 2340), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (2316, 2340), False, 'import csv\n')]
|
import os, sys
import numpy as np
import time
import glob
import random
import math
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
from PIL import Image
import pickle
import torch as tc
from torchvision import transforms
from torchvision import datasets
#from torchvision.datasets.folder import default_loader
from torch.utils.data import DataLoader, Dataset
from data import get_aug_tforms
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
# TODO: specify the return type
def accimage_loader(path: str) -> Any:
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
f.close()
return img
## legacy?
def shuffle_initial_data_order(ld, seed):
n_data = len(ld.dataset)
np.random.seed(seed)
idx_rnd = np.random.permutation(n_data)
##TODO: generalize
if hasattr(ld.dataset, "samples"):
ld.dataset.samples = [ld.dataset.samples[i] for i in idx_rnd]
if hasattr(ld.dataset, "targets"):
ld.dataset.targets = [ld.dataset.targets[i] for i in idx_rnd]
if hasattr(ld.dataset, "imgs"):
ld.dataset.imgs = [ld.dataset.imgs[i] for i in idx_rnd]
if hasattr(ld.dataset, "frames_pair"):
ld.dataset.frames_pair = [ld.dataset.frames_pair[i] for i in idx_rnd]
if hasattr(ld.dataset, "fn"):
ld.dataset.fn = [ld.dataset.fn[i] for i in idx_rnd]
np.random.seed(int(time.time()%2**32))
def shuffle_list(list_ori, seed):
random.seed(seed)
random.shuffle(list_ori)
return list_ori
def split_list(split_ratio, list_ori):
list_split = []
n_start = 0
for i, ratio in enumerate(split_ratio):
n = math.floor(len(list_ori)*ratio)
if i+1 == len(split_ratio):
list_split.append(list_ori[n_start:])
else:
list_split.append(list_ori[n_start:n_start+n])
n_start += n
random.seed(time.time())
return list_split
def get_split_list(split_ratio, root, ext, seed):
fns_train = glob.glob(os.path.join(root, 'train', '**', '**.'+ext)) ##TODO:
fns_val = glob.glob(os.path.join(root, 'val', '**', '**.'+ext))
fns_test = glob.glob(os.path.join(root, 'test', '**', '**.'+ext))
## shuffle list since usually it's sorted
random.seed(seed)
random.shuffle(fns_train)
random.seed(seed)
random.shuffle(fns_val)
random.seed(seed)
random.shuffle(fns_test)
## set splits
fns_split = []
for name, ratio in split_ratio.items():
if ratio is None:
vars()['split_'+name] = vars()['fns_'+name]
else:
fns_split += vars()['fns_'+name]
## random split
random.seed(seed)
random.shuffle(fns_split)
n_start = 0
for name, ratio in split_ratio.items():
if ratio is None:
continue
n = math.floor(len(fns_split)*ratio)
vars()['split_'+name] = fns_split[n_start:n_start+n]
n_start += n
random.seed(time.time())
return {'train': vars()['split_train'], 'val': vars()['split_val'], 'test': vars()['split_test']}
def split_data(split_ratio, data, seed):
## shuffle data
np.random.seed(seed)
random.shuffle(data)
np.random.seed(int(time.time()))
## split data
ratio_list = [(name, ratio) for name, ratio in split_ratio.items()]
name_list = [name for name, _ in ratio_list]
n_list = [math.floor(len(data)*ratio) for _, ratio in ratio_list[:-1]]
n_list = n_list + [len(data) - np.sum(n_list)]
data_split = np.split(data, np.cumsum(n_list))[:-1]
return {n: v for n, v in zip(name_list, data_split)}
# def split_data(data_fns, val_ratio, test_ratio, seed):
# n_data = len(data_fn)
# n_val = round(n_data*val_ratio)
# n_test = round(n_data*test_ratio)
# n_train = n_data - n_val - n_test
# np.random.seed(seed)
# idx_rnd = np.random.permutation(n_data)
# train_fns = data_fns[idx_rnd[:n_train]]
# val_fns = data_fns[idx_rnd[n_train:n_train+n_val]]
# test_fns = data_fns[idx_rnd[n_train+n_val:n_train+n_val+n_test]]
# return train_fns, val_fns, test_fns
def init_loader(dataset_fn, split_list, classes, class_to_idx, domain_label, tforms, rnd, batch_size, num_workers):
dataset = dataset_fn(split_list, classes, class_to_idx, transform=transforms.Compose(tforms), domain_label=domain_label)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=rnd, num_workers=num_workers)
return loader
def init_loader_reg(dataset_fn, data_split, tforms, tforms_y, rnd, batch_size, num_workers):
dataset = dataset_fn(data_split, transform_x=transforms.Compose(tforms), transform_y=transforms.Compose(tforms_y))
loader = DataLoader(dataset, batch_size=batch_size, shuffle=rnd, num_workers=num_workers)
return loader
def find_classes(root):
classes = [d.name for s in ['train', 'val', 'test'] for d in os.scandir(os.path.join(root, s)) if d.is_dir()]
classes = list(set(classes))
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def get_class_name(fn):
return fn.split('/')[-2]
def make_dataset(fn_list, class_to_idx):
instances = []
for fn in fn_list:
class_idx = class_to_idx[get_class_name(fn)]
item = fn, class_idx
instances.append(item)
return instances
class JointLoader:
def __init__(self, lds):
self.lds = lds
def __iter__(self):
self.iters = [iter(ld) for ld in self.lds]
self.iter_end = [False for ld in self.lds]
return self
def __next__(self):
x_list, y_list = [], []
for i, it in enumerate(self.iters):
try:
x, y = next(it)
except StopIteration:
self.iter_end[i] = True
if all(self.iter_end):
raise StopIteration
else:
self.iters[i] = iter(self.lds[i])
x, y = next(self.iters[i])
x_list.append(x)
y_list.append(y)
# maintain the same batch size
bs_min = min([o.shape[0] for o in x_list])
x_list = [o[:bs_min] for o in x_list]
x_list = tc.cat(x_list, 0)
y_list = [o[:bs_min] for o in y_list]
y_list = tc.cat(y_list, 0)
return x_list, y_list
class DomainData:
def __init__(self, dsld_src, dsld_tar):
self.train = JointLoader([dsld_src.train, dsld_tar.train])
self.val = JointLoader([dsld_src.val, dsld_tar.val])
self.test = JointLoader([dsld_src.test, dsld_tar.test])
class ImageList:
def __init__(self, fn_list, classes, class_to_idx, transform=None, target_transform=None, loader=default_loader, domain_label=None):
self.loader = loader
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.samples = make_dataset(fn_list, class_to_idx)
self.domain_label = domain_label
def __getitem__(self, index):
path, target = self.samples[index]
target = target if self.domain_label is None else self.domain_label
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
class ClassificationData:
def __init__(self, root, batch_size,
dataset_fn,
split_ratio,
sample_size,
domain_label,
train_rnd, val_rnd, test_rnd,
train_aug, val_aug, test_aug,
aug_types,
num_workers,
tforms_dft, tforms_dft_rnd,
ext,
seed,
):
## data augmentation tforms
tforms_aug = get_aug_tforms(aug_types)
## tforms for each data split
tforms_train = tforms_dft_rnd if train_rnd else tforms_dft
tforms_train = tforms_train + tforms_aug if train_aug else tforms_train
tforms_val = tforms_dft_rnd if val_rnd else tforms_dft
tforms_val = tforms_val + tforms_aug if val_aug else tforms_val
tforms_test = tforms_dft_rnd if test_rnd else tforms_dft
tforms_test = tforms_test + tforms_aug if test_aug else tforms_test
print("[tforms_train] ", tforms_train)
print("[tforms_val] ", tforms_val)
print("[tforms_test] ", tforms_test)
## splits
split_list = get_split_list(split_ratio, root, ext, seed)
classes, class_to_idx = find_classes(root)
## truncate samples
for name, value in split_list.items():
if sample_size[name] is None:
continue
split_list[name] = value[:sample_size[name]]
## create loaders
self.train = init_loader(dataset_fn, split_list['train'], classes, class_to_idx, domain_label, tforms_train, train_rnd, batch_size, num_workers)
self.val = init_loader(dataset_fn, split_list['val'], classes, class_to_idx, domain_label, tforms_val, val_rnd, batch_size, num_workers)
self.test = init_loader(dataset_fn, split_list['test'], classes, class_to_idx, domain_label, tforms_test, test_rnd, batch_size, num_workers)
class DetectionListDataset:
def __init__(self, split, transform=None, target_transform=None, loader=default_loader, domain_label=None):
self.loader = loader
self.transform = transform
self.target_transform = target_transform
self.samples = [(fn, label) for fn, label in zip(split['fn'], split['label'])]
self.domain_label = domain_label
def __getitem__(self, index):
path, target = self.samples[index]
target = target if self.domain_label is None else self.domain_label
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
class DetectionData:
def __init__(self, root, batch_size,
dataset_fn,
data_split,
#split_ratio,
sample_size,
domain_label,
train_rnd, val_rnd, test_rnd,
train_aug, val_aug, test_aug,
aug_types,
num_workers,
tforms_dft, tforms_dft_rnd,
collate_fn=None,
#ext,
#seed,
):
## data augmentation tforms
tforms_aug = get_aug_tforms(aug_types)
## tforms for each data split
tforms_train = tforms_dft_rnd if train_rnd else tforms_dft
tforms_train = tforms_train + tforms_aug if train_aug else tforms_train
tforms_val = tforms_dft_rnd if val_rnd else tforms_dft
tforms_val = tforms_val + tforms_aug if val_aug else tforms_val
tforms_test = tforms_dft_rnd if test_rnd else tforms_dft
tforms_test = tforms_test + tforms_aug if test_aug else tforms_test
print("[tforms_train] ", tforms_train)
print("[tforms_val] ", tforms_val)
print("[tforms_test] ", tforms_test)
# ## splits
# split_list = get_split_list(split_ratio, root, ext, seed)
# classes, class_to_idx = find_classes(root)
## truncate samples
for name, value in data_split.items():
if sample_size[name] is None:
continue
data_split[name] = {k: v[:sample_size[name]] for k, v in value.items()}
## create loaders
dataset = dataset_fn(data_split['train'], transform=transforms.Compose(tforms_train), domain_label=domain_label)
self.train = DataLoader(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=num_workers, collate_fn=collate_fn)
dataset = dataset_fn(data_split['val'], transform=transforms.Compose(tforms_val), domain_label=domain_label)
self.val = DataLoader(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=num_workers, collate_fn=collate_fn)
dataset = dataset_fn(data_split['test'], transform=transforms.Compose(tforms_test), domain_label=domain_label)
self.test = DataLoader(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=num_workers, collate_fn=collate_fn)
# class Data_old:
# def __init__(self, root, batch_size,
# dataset_fn,
# train_rnd, val_rnd, test_rnd,
# train_aug, val_aug, test_aug,
# aug_types,
# num_workers,
# tforms_dft, tforms_dft_rnd,
# seed=0,
# ):
# ## data augmentation tforms
# tforms_aug = get_aug_tforms(aug_types)
# ## tforms for each data split
# tforms_train = tforms_dft_rnd if train_rnd else tforms_dft
# tforms_train += tforms_aug if train_aug else []
# tforms_val = tforms_dft if val_rnd else tforms_dft
# tforms_val += tforms_aug if val_aug else []
# tforms_test = tforms_dft if test_rnd else tforms_dft
# tforms_test += tforms_aug if test_aug else []
# ## create loaders
# subroot = os.path.join(root, "train")
# if os.path.exists(subroot):
# dataset = dataset_fn(subroot, transform=tforms.Compose(tforms_train))
# self.train = DataLoader(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=num_workers)
# shuffle_initial_data_order(self.train, seed)
# else:
# self.train = None
# subroot = os.path.join(root, "val")
# if os.path.exists(subroot):
# dataset = dataset_fn(subroot, transform=tforms.Compose(tforms_val))
# self.val = DataLoader(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=num_workers)
# shuffle_initial_data_order(self.val, seed)
# else:
# self.val = None
# subroot = os.path.join(root, "test")
# if os.path.exists(subroot):
# dataset = dataset_fn(subroot, transform=tforms.Compose(tforms_test))
# self.test = DataLoader(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=num_workers)
# shuffle_initial_data_order(self.test, seed)
# else:
# self.test = None
class ImageDataset(datasets.ImageFolder):
def __init__(self, root, transform, domain_label=None):
super().__init__(root, transform=transform)
self.domain_label = domain_label
def __getitem__(self, index):
sample, target = super().__getitem__(index)
target = target if self.domain_label is None else self.domain_label
return sample, target
class ImageData:
def __init__(self, root, batch_size,
train_rnd, val_rnd, test_rnd,
train_aug, val_aug, test_aug,
aug_types,
num_workers,
tforms_dft, tforms_dft_rnd,
domain_label=None,
seed=0,
):
## data augmentation tforms
tforms_aug = get_aug_tforms(aug_types)
## tforms for each data split
tforms_train = tforms_dft_rnd if train_rnd else tforms_dft
tforms_train += tforms_aug if train_aug else []
tforms_val = tforms_dft if val_rnd else tforms_dft
tforms_val += tforms_aug if val_aug else []
tforms_test = tforms_dft if test_rnd else tforms_dft
tforms_test += tforms_aug if test_aug else []
## create loaders
#dataset = datasets.ImageFolder(os.path.join(root, "train"), transform=transforms.Compose(tforms_train))
dataset = ImageDataset(os.path.join(root, "train"), transform=transforms.Compose(tforms_train), domain_label=domain_label)
self.train = DataLoader(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=num_workers)
#dataset = datasets.ImageFolder(os.path.join(root, "val"), transform=transforms.Compose(tforms_val))
dataset = ImageDataset(os.path.join(root, "val"), transform=transforms.Compose(tforms_val), domain_label=domain_label)
self.val = DataLoader(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=num_workers)
#dataset = datasets.ImageFolder(os.path.join(root, "test"), transform=transforms.Compose(tforms_test))
dataset = ImageDataset(os.path.join(root, "test"), transform=transforms.Compose(tforms_test), domain_label=domain_label)
self.test = DataLoader(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=num_workers)
## shuffle initial order
shuffle_initial_data_order(self.train, seed)
shuffle_initial_data_order(self.val, seed)
shuffle_initial_data_order(self.test, seed)
##
## regression
##
class RegressionDatasetLight(Dataset):
def __init__(self, data, transform_x, transform_y, label_index=-1):
self.label_index = label_index
self.data = data
self.transform_x = transform_x
self.transform_y = transform_y
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
data_i = self.data[idx]
y = [data_i[self.label_index]]
x = np.delete(data_i, self.label_index)
return self.transform_x(x), self.transform_y(y)
class RegressionDataset(Dataset):
def __init__(self, root, singlefile=False, label_index=-1):
self.singlefile = singlefile
self.label_index = label_index
if self.singlefile:
fn = glob.glob(os.path.join(root, "*.pk"))[0]
self.data = pickle.load(open(fn, 'rb'))
else:
self.fns = glob.glob(os.path.join(root, "*.pk"))
def __len__(self):
if self.singlefile:
return len(self.fns)
else:
return len(self.data)
def __getitem__(self, idx):
if self.singlefile:
with open(self.fns[idx], "rb") as f:
return pickle.load(f)
else:
data_i = data[idx]
y = data[self.label_index]
x = np.delete(data, self.label_index)
return x, y
class RegressionDataLight:
def __init__(self, root, batch_size,
dataset_fn,
split_ratio,
sample_size,
#domain_label,
train_rnd, val_rnd, test_rnd,
train_aug, val_aug, test_aug,
aug_types,
num_workers,
tforms_x_dft, tforms_x_dft_rnd,
tforms_y_dft, tforms_y_dft_rnd,
#ext,
seed,
):
## data augmentation tforms
tforms_aug = get_aug_tforms(aug_types)
## tforms for each data split
tforms_train = tforms_x_dft_rnd if train_rnd else tforms_x_dft
tforms_train = tforms_train + tforms_aug if train_aug else tforms_train
tforms_val = tforms_x_dft_rnd if val_rnd else tforms_x_dft
tforms_val = tforms_val + tforms_aug if val_aug else tforms_val
tforms_test = tforms_x_dft_rnd if test_rnd else tforms_x_dft
tforms_test = tforms_test + tforms_aug if test_aug else tforms_test
tforms_y_train = tforms_y_dft_rnd if train_rnd else tforms_y_dft
tforms_y_val= tforms_y_dft_rnd if val_rnd else tforms_y_dft
tforms_y_test = tforms_y_dft_rnd if test_rnd else tforms_y_dft
print("[tforms_train] ", tforms_train)
print("[tforms_val] ", tforms_val)
print("[tforms_test] ", tforms_test)
## load data
fn = glob.glob(os.path.join(root, "*.pk"))[0]
data = pickle.load(open(fn, 'rb'))
## splits
data_split = split_data(split_ratio, data, seed)
## truncate samples
for name, value in data_split.items():
if sample_size[name] is None:
continue
data_split[name] = value[:sample_size[name]]
## create loaders
self.train = init_loader_reg(dataset_fn, data_split['train'], tforms_train, tforms_y_train, train_rnd, batch_size, num_workers)
self.val = init_loader_reg(dataset_fn, data_split['val'], tforms_val, tforms_y_val, val_rnd, batch_size, num_workers)
self.test = init_loader_reg(dataset_fn, data_split['test'], tforms_test, tforms_y_test, test_rnd, batch_size, num_workers)
def compute_num_exs(ld, verbose=False):
n = 0
t = time.time()
for x, _ in ld:
n += x.shape[0]
if verbose:
print("[%f sec.] n = %d"%(time.time()-t, n))
t = time.time()
return n
def xywh2xyxy(xywh):
xyxy = xywh.clone()
if len(xyxy.size()) == 2:
xyxy[:, 2:] = xywh[:, :2] + xywh[:, 2:]
else:
xyxy[2:] = xywh[:2] + xywh[2:]
return xyxy
def xyxy2xywh(xyxy):
xywh = xyxy.clone()
xywh[:, 2:] = xyxy[:, 2:] - xyxy[:, :2]
return xywh
def plot_bb(img, bb_xywh, fn=None):
img_PIL = transforms.ToPILImage()(img)
draw = ImageDraw.Draw(img_PIL)
draw.rectangle((*bb_xywh[:2], *(bb_xywh[:2]+bb_xywh[2:])), outline="white", width=2)
if fn is not None:
img_PIL.save(fn)
else:
return img_PIL
|
[
"numpy.random.seed",
"numpy.sum",
"torch.utils.data.DataLoader",
"random.shuffle",
"accimage.Image",
"torchvision.transforms.ToPILImage",
"torch.cat",
"time.time",
"PIL.Image.open",
"numpy.cumsum",
"data.get_aug_tforms",
"random.seed",
"torchvision.transforms.Compose",
"pickle.load",
"torchvision.get_image_backend",
"numpy.random.permutation",
"os.path.join",
"numpy.delete"
] |
[((1242, 1262), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1256, 1262), True, 'import numpy as np\n'), ((1277, 1306), 'numpy.random.permutation', 'np.random.permutation', (['n_data'], {}), '(n_data)\n', (1298, 1306), True, 'import numpy as np\n'), ((1951, 1968), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1962, 1968), False, 'import random\n'), ((1973, 1997), 'random.shuffle', 'random.shuffle', (['list_ori'], {}), '(list_ori)\n', (1987, 1997), False, 'import random\n'), ((2748, 2765), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2759, 2765), False, 'import random\n'), ((2770, 2795), 'random.shuffle', 'random.shuffle', (['fns_train'], {}), '(fns_train)\n', (2784, 2795), False, 'import random\n'), ((2800, 2817), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2811, 2817), False, 'import random\n'), ((2822, 2845), 'random.shuffle', 'random.shuffle', (['fns_val'], {}), '(fns_val)\n', (2836, 2845), False, 'import random\n'), ((2850, 2867), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2861, 2867), False, 'import random\n'), ((2872, 2896), 'random.shuffle', 'random.shuffle', (['fns_test'], {}), '(fns_test)\n', (2886, 2896), False, 'import random\n'), ((3157, 3174), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3168, 3174), False, 'import random\n'), ((3179, 3204), 'random.shuffle', 'random.shuffle', (['fns_split'], {}), '(fns_split)\n', (3193, 3204), False, 'import random\n'), ((3638, 3658), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3652, 3658), True, 'import numpy as np\n'), ((3663, 3683), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (3677, 3683), False, 'import random\n'), ((4872, 4957), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=rnd, num_workers=num_workers\n )\n', (4882, 4957), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((5198, 5283), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=rnd, num_workers=num_workers\n )\n', (5208, 5283), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((21443, 21454), 'time.time', 'time.time', ([], {}), '()\n', (21452, 21454), False, 'import time\n'), ((508, 527), 'torchvision.get_image_backend', 'get_image_backend', ([], {}), '()\n', (525, 527), False, 'from torchvision import get_image_backend\n'), ((739, 759), 'accimage.Image', 'accimage.Image', (['path'], {}), '(path)\n', (753, 759), False, 'import accimage\n'), ((1073, 1086), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (1083, 1086), False, 'from PIL import Image\n'), ((2391, 2402), 'time.time', 'time.time', ([], {}), '()\n', (2400, 2402), False, 'import time\n'), ((2504, 2550), 'os.path.join', 'os.path.join', (['root', '"""train"""', '"""**"""', "('**.' + ext)"], {}), "(root, 'train', '**', '**.' + ext)\n", (2516, 2550), False, 'import os, sys\n'), ((2583, 2627), 'os.path.join', 'os.path.join', (['root', '"""val"""', '"""**"""', "('**.' + ext)"], {}), "(root, 'val', '**', '**.' + ext)\n", (2595, 2627), False, 'import os, sys\n'), ((2652, 2697), 'os.path.join', 'os.path.join', (['root', '"""test"""', '"""**"""', "('**.' + ext)"], {}), "(root, 'test', '**', '**.' + ext)\n", (2664, 2697), False, 'import os, sys\n'), ((3455, 3466), 'time.time', 'time.time', ([], {}), '()\n', (3464, 3466), False, 'import time\n'), ((6766, 6783), 'torch.cat', 'tc.cat', (['x_list', '(0)'], {}), '(x_list, 0)\n', (6772, 6783), True, 'import torch as tc\n'), ((6847, 6864), 'torch.cat', 'tc.cat', (['y_list', '(0)'], {}), '(y_list, 0)\n', (6853, 6864), True, 'import torch as tc\n'), ((8578, 8603), 'data.get_aug_tforms', 'get_aug_tforms', (['aug_types'], {}), '(aug_types)\n', (8592, 8603), False, 'from data import get_aug_tforms\n'), ((11440, 11465), 'data.get_aug_tforms', 'get_aug_tforms', (['aug_types'], {}), '(aug_types)\n', (11454, 11465), False, 'from data import get_aug_tforms\n'), ((12610, 12724), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'train_rnd', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=\n num_workers, collate_fn=collate_fn)\n', (12620, 12724), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((12857, 12969), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'val_rnd', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=\n num_workers, collate_fn=collate_fn)\n', (12867, 12969), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((13105, 13218), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'test_rnd', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=\n num_workers, collate_fn=collate_fn)\n', (13115, 13218), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((16046, 16071), 'data.get_aug_tforms', 'get_aug_tforms', (['aug_types'], {}), '(aug_types)\n', (16060, 16071), False, 'from data import get_aug_tforms\n'), ((16760, 16851), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'train_rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=\n num_workers)\n', (16770, 16851), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((17102, 17191), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'val_rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=\n num_workers)\n', (17112, 17191), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((17447, 17537), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'test_rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=\n num_workers)\n', (17457, 17537), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((18202, 18237), 'numpy.delete', 'np.delete', (['data_i', 'self.label_index'], {}), '(data_i, self.label_index)\n', (18211, 18237), True, 'import numpy as np\n'), ((19711, 19736), 'data.get_aug_tforms', 'get_aug_tforms', (['aug_types'], {}), '(aug_types)\n', (19725, 19736), False, 'from data import get_aug_tforms\n'), ((21965, 21988), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (21986, 21988), False, 'from torchvision import transforms\n'), ((3707, 3718), 'time.time', 'time.time', ([], {}), '()\n', (3716, 3718), False, 'import time\n'), ((4024, 4041), 'numpy.cumsum', 'np.cumsum', (['n_list'], {}), '(n_list)\n', (4033, 4041), True, 'import numpy as np\n'), ((4804, 4830), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms'], {}), '(tforms)\n', (4822, 4830), False, 'from torchvision import transforms\n'), ((5115, 5141), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms'], {}), '(tforms)\n', (5133, 5141), False, 'from torchvision import transforms\n'), ((5155, 5183), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_y'], {}), '(tforms_y)\n', (5173, 5183), False, 'from torchvision import transforms\n'), ((16639, 16666), 'os.path.join', 'os.path.join', (['root', '"""train"""'], {}), "(root, 'train')\n", (16651, 16666), False, 'import os, sys\n'), ((16987, 17012), 'os.path.join', 'os.path.join', (['root', '"""val"""'], {}), "(root, 'val')\n", (16999, 17012), False, 'import os, sys\n'), ((17329, 17355), 'os.path.join', 'os.path.join', (['root', '"""test"""'], {}), "(root, 'test')\n", (17341, 17355), False, 'import os, sys\n'), ((19094, 19127), 'numpy.delete', 'np.delete', (['data', 'self.label_index'], {}), '(data, self.label_index)\n', (19103, 19127), True, 'import numpy as np\n'), ((21592, 21603), 'time.time', 'time.time', ([], {}), '()\n', (21601, 21603), False, 'import time\n'), ((1887, 1898), 'time.time', 'time.time', ([], {}), '()\n', (1896, 1898), False, 'import time\n'), ((3975, 3989), 'numpy.sum', 'np.sum', (['n_list'], {}), '(n_list)\n', (3981, 3989), True, 'import numpy as np\n'), ((5399, 5420), 'os.path.join', 'os.path.join', (['root', 's'], {}), '(root, s)\n', (5411, 5420), False, 'import os, sys\n'), ((12528, 12560), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_train'], {}), '(tforms_train)\n', (12546, 12560), False, 'from torchvision import transforms\n'), ((12779, 12809), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_val'], {}), '(tforms_val)\n', (12797, 12809), False, 'from torchvision import transforms\n'), ((13025, 13056), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_test'], {}), '(tforms_test)\n', (13043, 13056), False, 'from torchvision import transforms\n'), ((16678, 16710), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_train'], {}), '(tforms_train)\n', (16696, 16710), False, 'from torchvision import transforms\n'), ((17024, 17054), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_val'], {}), '(tforms_val)\n', (17042, 17054), False, 'from torchvision import transforms\n'), ((17367, 17398), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_test'], {}), '(tforms_test)\n', (17385, 17398), False, 'from torchvision import transforms\n'), ((18663, 18689), 'os.path.join', 'os.path.join', (['root', '"""*.pk"""'], {}), "(root, '*.pk')\n", (18675, 18689), False, 'import os, sys\n'), ((18979, 18993), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (18990, 18993), False, 'import pickle\n'), ((20605, 20631), 'os.path.join', 'os.path.join', (['root', '"""*.pk"""'], {}), "(root, '*.pk')\n", (20617, 20631), False, 'import os, sys\n'), ((18533, 18559), 'os.path.join', 'os.path.join', (['root', '"""*.pk"""'], {}), "(root, '*.pk')\n", (18545, 18559), False, 'import os, sys\n'), ((21557, 21568), 'time.time', 'time.time', ([], {}), '()\n', (21566, 21568), False, 'import time\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.