code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
iris dataset
"""
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from qiskit.aqua import MissingOptionalLibraryError
def iris(training_size, test_size, n, plot_data=False):
""" returns iris dataset """
class_labels = [r'A', r'B', r'C']
data, target = datasets.load_iris(return_X_y=True)
sample_train, sample_test, label_train, label_test = \
train_test_split(data, target, test_size=1, random_state=42)
# Now we standardize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_test[label_test == k, :])[:test_size]
for k, key in enumerate(class_labels)}
if plot_data:
try:
import matplotlib.pyplot as plt
except ImportError as ex:
raise MissingOptionalLibraryError(
libname='Matplotlib',
name='iris',
pip_install='pip install matplotlib') from ex
for k in range(0, 3):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("Iris dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
|
[
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.decomposition.PCA",
"numpy.append",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.scatter",
"qiskit.aqua.MissingOptionalLibraryError",
"matplotlib.pyplot.title",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.show"
] |
[((908, 943), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (926, 943), False, 'from sklearn import datasets\n'), ((1011, 1071), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'target'], {'test_size': '(1)', 'random_state': '(42)'}), '(data, target, test_size=1, random_state=42)\n', (1027, 1071), False, 'from sklearn.model_selection import train_test_split\n'), ((1539, 1583), 'numpy.append', 'np.append', (['sample_train', 'sample_test'], {'axis': '(0)'}), '(sample_train, sample_test, axis=0)\n', (1548, 1583), True, 'import numpy as np\n'), ((2550, 2575), 'matplotlib.pyplot.title', 'plt.title', (['"""Iris dataset"""'], {}), "('Iris dataset')\n", (2559, 2575), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2592, 2594), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1171), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1169, 1171), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((1361, 1380), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n'}), '(n_components=n)\n', (1364, 1380), False, 'from sklearn.decomposition import PCA\n'), ((1603, 1624), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', (['(-1, 1)'], {}), '((-1, 1))\n', (1615, 1624), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((2403, 2521), 'matplotlib.pyplot.scatter', 'plt.scatter', (['sample_train[label_train == k, 0][:training_size]', 'sample_train[label_train == k, 1][:training_size]'], {}), '(sample_train[label_train == k, 0][:training_size], sample_train\n [label_train == k, 1][:training_size])\n', (2414, 2521), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2308), 'qiskit.aqua.MissingOptionalLibraryError', 'MissingOptionalLibraryError', ([], {'libname': '"""Matplotlib"""', 'name': '"""iris"""', 'pip_install': '"""pip install matplotlib"""'}), "(libname='Matplotlib', name='iris', pip_install=\n 'pip install matplotlib')\n", (2230, 2308), False, 'from qiskit.aqua import MissingOptionalLibraryError\n')]
|
import os
import sys
import random
import datetime
import gym
from gym import spaces
import numpy as np
from env.IDM import IDM
from env.Road import Road
from env.Vehicle import Vehicle
import math
# add sumo/tools into python environment
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
print('success')
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
######################################################################
# simulation environments
class LaneChangeEnv(gym.Env):
def __init__(self, id=None, traffic=1, gui=False, seed=None):
# todo check traffic flow density
if traffic == 0:
# average 9 vehicles
self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg'
elif traffic == 2:
# average 19 vehicles
self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg'
else:
# average 14 vehicles
self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg'
# arguments must be string, if float/int, must be converted to str(float/int), instead of '3.0'
self.sumoBinary = "/usr/local/Cellar/sumo/1.2.0/bin/sumo"
self.sumoCmd = ['-c', self.cfg,
# '--lanechange.duration', str(3), # using 'Simple Continuous lane-change model'
'--lateral-resolution', str(0.8), # using 'Sublane-Model'
'--step-length', str(0.1),
'--default.action-step-length', str(0.1)]
# randomness
if seed is None:
self.sumoCmd += ['--random']
else:
self.sumoCmd += ['--seed', str(seed)]
# gui
if gui is True:
self.sumoBinary += '-gui'
self.sumoCmd = [self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True),
'--start', str(True)]
else:
self.sumoCmd = [self.sumoBinary] + self.sumoCmd
traci.start(self.sumoCmd)
self.rd = Road()
self.timestep = 0
self.dt = traci.simulation.getDeltaT()
self.randomseed = None
self.sumoseed = None
self.veh_dict = {}
self.vehID_tuple_all = ()
self.egoID = id
self.ego = None
# self.tgtLane = tgtlane
self.is_success = False
self.collision_num = 0
self.lateral_action = 2
# self.observation = [[0, 0, 0], # ego lane position and speed
# [0, 0, 0], # leader
# [0, 0, 0], # target lane leader
# [0, 0, 0]] # target lane follower
self.observation = np.empty(20)
self.reward = None # (float) : amount of reward returned after previous action
self.done = True # (bool): whether the episode has ended, in which case further step() calls will return undefined results
self.info = {
'resetFlag': 0} # (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
self.action_space = spaces.Discrete(6)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,))
def update_veh_dict(self, veh_id_tuple):
for veh_id in veh_id_tuple:
if veh_id not in self.veh_dict.keys():
self.veh_dict[veh_id] = Vehicle(veh_id, self.rd)
for veh_id in list(self.veh_dict.keys()):
if veh_id not in veh_id_tuple:
self.veh_dict.pop(veh_id)
for veh_id in list(self.veh_dict.keys()):
self.veh_dict[veh_id].update_info(self.rd, self.veh_dict)
def _updateObservationSingle(self, name, veh):
"""
:param name: 0:ego; 1:leader; 2:target leader; 3:target follower
:param id: vehicle id corresponding to name
:return:
"""
if veh is not None:
self.observation[name * 4 + 0] = veh.lanePos
self.observation[name * 4 + 1] = veh.speed
self.observation[name * 4 + 2] = veh.pos_lat
self.observation[name * 4 + 3] = veh.acce
else:
self.observation[name * 4 + 0] = self.observation[0] + 300.
self.observation[name * 4 + 1] = self.observation[1]
self.observation[name * 4 + 2] = 4.8
self.observation[name * 4 + 3] = 0
# todo check if rational
def updateObservation(self):
self.observation[0] = self.ego.lanePos
self.observation[1] = self.ego.speed
self.observation[2] = self.ego.pos_lat
self.observation[3] = self.ego.acce
self._updateObservationSingle(1, self.ego.orig_leader)
self._updateObservationSingle(2, self.ego.orig_follower)
self._updateObservationSingle(3, self.ego.trgt_leader)
self._updateObservationSingle(4, self.ego.trgt_follower)
# self.observation = np.array(self.observation).flatten()
# print(self.observation.shape)
def updateReward(self):
return -self.ego.dis2tgtLane
def updateReward2(self):
wc1 = 1
wc2 = 1
wt = 1
ws = 1
we = 1
# reward related to comfort
r_comf = wc1 * self.ego.acce ** 2 + wc2 * self.ego.delta_acce ** 2
# reward related to efficiency
r_time = - wt * self.timestep
r_speed = ws * (self.ego.speed - self.ego_speedLimit)
r_effi = we * self.ego.dis2tgtLane / self.ego.dis2entrance
r_effi_all = r_time + r_speed + r_effi
# reward related to safety
w_lateral = 1
w_longi = 1
if self.ego.leaderID is not None:
# compute longitudinal time gap
delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed
delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce
if delta_A == 0:
TTC = - abs(self.ego.leaderDis)/delta_V
else:
TTC = -delta_V - math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis)
TTC = TTC/delta_A
if self.lateral_action != 1 and 0 < TTC < 2:
r_long_c = - math.exp(-2*TTC+5)
else:
r_long_c = 0
if self.lateral_action == 0: #abort lane change
alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2
assert 0 <= alpha <= 1.1
r_lat_c = -math.exp(-4*alpha+5)
else:
r_lat_c = 0
if self.ego.targetLeaderID is not None:
# compute longitudinal time gap
delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed
delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce
delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos
if delta_A2 == 0:
TTC2 = - abs(delta_D2) / delta_V2
else:
TTC2 = -delta_V2 - math.sqrt(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)
TTC2 = TTC2 / delta_A2
if self.lateral_action == 1 and 0 < TTC2 < 2:
r_long_t = - math.exp(-2 * TTC2 + 5)
else:
r_long_t = 0
if self.lateral_action == 1: # lane change
alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2
assert 0 <= alpha <= 1.1
r_lat_t = -math.exp(-4*alpha+5)
else:
r_lat_t = 0
r_safe = w_lateral * (r_lat_c + r_lat_t) + w_longi * (r_long_c+ r_long_t)
#
# if self.ego.leaderID is not None:
# # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat))
# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2
# assert 0 <= alpha <= 1.1
# r_safe_leader = w_lateral * alpha + w_longi * (1 - alpha) * abs(self.ego.leaderDis)
# else:
# r_safe_leader = 0
# if self.ego.targetLeaderID is not None:
# # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat))
# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2
# # print('alpha', alpha)
# assert 0 <= alpha <= 1.1
#
# r_safe_tgtleader = w_lateral * alpha + w_longi * (1 - alpha) * abs(
# self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos)
# else:
# r_safe_tgtleader = 0
#
#
# r_safe = r_safe_leader + r_safe_tgtleader
# total reward
r_total = r_comf + r_effi_all + r_safe
return r_total
def is_done(self):
# lane change successfully executed, episode ends, reset env
# todo modify
if self.is_success:
self.done = True
# print('reset on: successfully lane change, dis2targetlane:',
# self.ego.dis2tgtLane)
# too close to ramp entrance
if self.ego.dis2entrance < 10.0:
self.done = True
# print('reset on: too close to ramp entrance, dis2targetlane:',
# self.ego.dis2tgtLane)
# ego vehicle out of env
if self.egoID not in self.vehID_tuple_all:
self.done = True
# print('reset on: self.ego not in env:', self.egoID not in self.vehID_tuple_all)
# collision occurs
self.collision_num = traci.simulation.getCollidingVehiclesNumber()
if self.collision_num > 0:
self.done = True
# print('reset on: self.collision_num:', self.collision_num)
def preStep(self):
traci.simulationStep()
self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)
self.update_veh_dict(self.vehID_tuple_all)
def step(self, action=2):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, call `reset()` outside env!! to reset this
environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): longitudinal0: action[0] = 1: accelerate
action[0] = -1: decelerate
action[0] = 0: use SUMO default
action[0] = others: acce = 0.0
longitudinal1: action[0] = 0: follow original lane leader
action[0] = 1: follow closer leader
longitudinal2: action[0] = 0: follow original lane leader
action[0] = 1: follow target lane leader
**important**: orginal/target lane leader will not change despite the lateral position of
the ego may change
lateral: action[1] = 1: lane change
action[1] = 0: abort lane change, change back to original lane
action[1] = 2: keep in current lateral position
Returns:
described in __init__
"""
action_longi = action // 3
action_lateral = action % 3
self.lateral_action = action_lateral
# action_longi = action[0]
# action_lateral = action[1]
assert self.done is False, 'self.done is not False'
assert action is not None, 'action is None'
assert self.egoID in self.vehID_tuple_all, 'vehicle not in env'
self.timestep += 1
# lateral control-------------------------
# episode in progress; 0:change back to original line; 1:lane change to target lane; 2:keep current
# lane change to target lane
if not self.is_success:
if action_lateral == 1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01:
self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd)
# print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth)
# print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth))
# abort lane change, change back to ego's original lane
if action_lateral == 0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01:
self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd)
# print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat)
# keep current lateral position
if action_lateral == 2:
self.is_success = self.ego.changeLane(True, -1, self.rd)
# longitudinal control2---------------------
acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi)
# print(acceNext)
vNext = self.ego.speed + acceNext * 0.1
traci.vehicle.setSpeed(self.egoID, vNext)
# update info------------------------------
traci.simulationStep()
self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)
self.update_veh_dict(self.vehID_tuple_all)
# check if episode ends
self.is_done()
if self.done is True:
self.info['resetFlag'] = True
return self.observation, 0.0, self.done, self.info
else:
self.updateObservation()
self.reward = self.updateReward()
return self.observation, self.reward, self.done, self.info
def seed(self, seed=None):
if seed is None:
self.randomseed = datetime.datetime.now().microsecond
else:
self.randomseed = seed
random.seed(self.randomseed)
def reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None):
"""
reset env
:param id: ego vehicle id
:param tfc: int. 0:light; 1:medium; 2:dense
:return: initial observation
"""
self.seed(randomseed)
if sumoseed is None:
self.sumoseed = self.randomseed
traci.close()
self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed)
# continue step until ego appears in env
if self.egoID is not None:
while self.egoID not in self.veh_dict.keys():
# must ensure safety in preStpe
self.preStep()
if self.timestep > 5000:
raise Exception('cannot find ego after 5000 timesteps')
assert self.egoID in self.vehID_tuple_all, "cannot start training while ego is not in env"
self.done = False
self.ego = self.veh_dict[self.egoID]
self.ego.trgt_laneIndex = tlane
self.ego.is_ego = 1
# set ego vehicle speed mode
traci.vehicle.setSpeedMode(self.ego.veh_id, 0)
self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid)
self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID))
self.ego.idm_obj = IDM()
self.ego.idm_obj.__init__(self.ego_speedLimit)
self.ego.update_info(self.rd, self.veh_dict)
self.updateObservation()
return self.observation
return
def close(self):
traci.close()
|
[
"math.sqrt",
"env.IDM.IDM",
"traci.vehicle.getSpeedFactor",
"sys.exit",
"env.Vehicle.Vehicle",
"math.exp",
"sys.path.append",
"traci.simulation.getDeltaT",
"numpy.empty",
"traci.vehicle.setSpeed",
"traci.vehicle.getLaneID",
"env.Road.Road",
"traci.vehicle.setSpeedMode",
"gym.spaces.Discrete",
"traci.simulationStep",
"traci.close",
"traci.simulation.getCollidingVehiclesNumber",
"traci.edge.getLastStepVehicleIDs",
"traci.start",
"os.path.join",
"random.seed",
"gym.spaces.Box",
"datetime.datetime.now"
] |
[((282, 328), 'os.path.join', 'os.path.join', (["os.environ['SUMO_HOME']", '"""tools"""'], {}), "(os.environ['SUMO_HOME'], 'tools')\n", (294, 328), False, 'import os\n'), ((333, 355), 'sys.path.append', 'sys.path.append', (['tools'], {}), '(tools)\n', (348, 355), False, 'import sys\n'), ((387, 446), 'sys.exit', 'sys.exit', (['"""please declare environment variable \'SUMO_HOME\'"""'], {}), '("please declare environment variable \'SUMO_HOME\'")\n', (395, 446), False, 'import sys\n'), ((2097, 2122), 'traci.start', 'traci.start', (['self.sumoCmd'], {}), '(self.sumoCmd)\n', (2108, 2122), False, 'import traci\n'), ((2142, 2148), 'env.Road.Road', 'Road', ([], {}), '()\n', (2146, 2148), False, 'from env.Road import Road\n'), ((2193, 2221), 'traci.simulation.getDeltaT', 'traci.simulation.getDeltaT', ([], {}), '()\n', (2219, 2221), False, 'import traci\n'), ((2800, 2812), 'numpy.empty', 'np.empty', (['(20)'], {}), '(20)\n', (2808, 2812), True, 'import numpy as np\n'), ((3213, 3231), 'gym.spaces.Discrete', 'spaces.Discrete', (['(6)'], {}), '(6)\n', (3228, 3231), False, 'from gym import spaces\n'), ((3265, 3314), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(20,)'}), '(low=-np.inf, high=np.inf, shape=(20,))\n', (3275, 3314), False, 'from gym import spaces\n'), ((9695, 9740), 'traci.simulation.getCollidingVehiclesNumber', 'traci.simulation.getCollidingVehiclesNumber', ([], {}), '()\n', (9738, 9740), False, 'import traci\n'), ((9910, 9932), 'traci.simulationStep', 'traci.simulationStep', ([], {}), '()\n', (9930, 9932), False, 'import traci\n'), ((9964, 10020), 'traci.edge.getLastStepVehicleIDs', 'traci.edge.getLastStepVehicleIDs', (['self.rd.entranceEdgeID'], {}), '(self.rd.entranceEdgeID)\n', (9996, 10020), False, 'import traci\n'), ((13256, 13297), 'traci.vehicle.setSpeed', 'traci.vehicle.setSpeed', (['self.egoID', 'vNext'], {}), '(self.egoID, vNext)\n', (13278, 13297), False, 'import traci\n'), ((13359, 13381), 'traci.simulationStep', 'traci.simulationStep', ([], {}), '()\n', (13379, 13381), False, 'import traci\n'), ((13413, 13469), 'traci.edge.getLastStepVehicleIDs', 'traci.edge.getLastStepVehicleIDs', (['self.rd.entranceEdgeID'], {}), '(self.rd.entranceEdgeID)\n', (13445, 13469), False, 'import traci\n'), ((14059, 14087), 'random.seed', 'random.seed', (['self.randomseed'], {}), '(self.randomseed)\n', (14070, 14087), False, 'import random\n'), ((14455, 14468), 'traci.close', 'traci.close', ([], {}), '()\n', (14466, 14468), False, 'import traci\n'), ((15703, 15716), 'traci.close', 'traci.close', ([], {}), '()\n', (15714, 15716), False, 'import traci\n'), ((15196, 15242), 'traci.vehicle.setSpeedMode', 'traci.vehicle.setSpeedMode', (['self.ego.veh_id', '(0)'], {}), '(self.ego.veh_id, 0)\n', (15222, 15242), False, 'import traci\n'), ((15278, 15313), 'traci.vehicle.getSpeedFactor', 'traci.vehicle.getSpeedFactor', (['egoid'], {}), '(egoid)\n', (15306, 15313), False, 'import traci\n'), ((15463, 15468), 'env.IDM.IDM', 'IDM', ([], {}), '()\n', (15466, 15468), False, 'from env.IDM import IDM\n'), ((3488, 3512), 'env.Vehicle.Vehicle', 'Vehicle', (['veh_id', 'self.rd'], {}), '(veh_id, self.rd)\n', (3495, 3512), False, 'from env.Vehicle import Vehicle\n'), ((13966, 13989), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13987, 13989), False, 'import datetime\n'), ((6097, 6155), 'math.sqrt', 'math.sqrt', (['(delta_V ** 2 + 2 * delta_A * self.ego.leaderDis)'], {}), '(delta_V ** 2 + 2 * delta_A * self.ego.leaderDis)\n', (6106, 6155), False, 'import math\n'), ((6274, 6296), 'math.exp', 'math.exp', (['(-2 * TTC + 5)'], {}), '(-2 * TTC + 5)\n', (6282, 6296), False, 'import math\n'), ((6564, 6588), 'math.exp', 'math.exp', (['(-4 * alpha + 5)'], {}), '(-4 * alpha + 5)\n', (6572, 6588), False, 'import math\n'), ((7117, 7167), 'math.sqrt', 'math.sqrt', (['(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)'], {}), '(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)\n', (7126, 7167), False, 'import math\n'), ((7295, 7318), 'math.exp', 'math.exp', (['(-2 * TTC2 + 5)'], {}), '(-2 * TTC2 + 5)\n', (7303, 7318), False, 'import math\n'), ((7591, 7615), 'math.exp', 'math.exp', (['(-4 * alpha + 5)'], {}), '(-4 * alpha + 5)\n', (7599, 7615), False, 'import math\n'), ((15394, 15429), 'traci.vehicle.getLaneID', 'traci.vehicle.getLaneID', (['self.egoID'], {}), '(self.egoID)\n', (15417, 15429), False, 'import traci\n')]
|
#!/usr/bin/env python3
from __future__ import absolute_import, division, print_function
import curses
import sys
from collections import deque
from datetime import datetime
import numpy as np
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText
from scipy.spatial.transform import Rotation as R
from sensor_msgs.msg import BatteryState, Image, NavSatFix
GPS_FIX_DICT = {
0: ('No GPS', curses.COLOR_RED),
1: ('No fix', curses.COLOR_RED),
2: ('2D lock', curses.COLOR_BLUE),
3: ('3D lock', curses.COLOR_BLUE),
4: ('DGPS', curses.COLOR_MAGENTA),
5: ('RTK float', curses.COLOR_YELLOW),
6: ('RTK fix', curses.COLOR_GREEN)
}
def get_color(color):
return curses.color_pair(color)
def frequency_from_messages(messages):
durations = []
for i in range(len(messages) - 1):
duration = messages[i + 1].header.stamp - messages[i].header.stamp
durations.append(duration.to_sec())
frequency = 1 / np.mean(durations)
if np.isnan(frequency):
return 0
return frequency
class StatusNode:
def __init__(self, screen):
rospy.init_node('status_node', argv=sys.argv)
self.rate = rospy.get_param('~rate', default=1.0)
# Curses setup
self.screen = curses.initscr()
self.rows, self.cols = self.screen.getmaxyx()
height_status = 15
self.status = curses.newwin(height_status, self.cols, 1, 2)
# self.console = curses.newwin(self.rows - height_status, self.cols, 12, 2)
self.lines = 0
self.text = ''
self.screen.keypad(True)
curses.curs_set(False) # Hide cursor
colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN,
curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED,
curses.COLOR_WHITE, curses.COLOR_YELLOW]
# Curses color setup
curses.use_default_colors()
for color in colors:
curses.init_pair(color, color, -1)
# Default variables
self.status_battery_perc = None
self.state = State()
self.state_sub = rospy.Subscriber('mavros/state', State,
callback=self.state_callback,
queue_size=1)
self.battery = BatteryState()
self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState,
callback=self.battery_callback,
queue_size=1)
self.extended = ExtendedState()
self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState,
callback=self.extended_callback,
queue_size=1)
# self.statustext = StatusText()
# self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText,
# callback=self.statustext_callback,
# queue_size=1)
self.gps = NavSatFix()
self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix,
callback=self.gps_callback,
queue_size=1)
self.local_pose = PoseStamped()
self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped,
callback=self.local_pose_callback,
queue_size=1)
self.global_pose = PoseStamped()
self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped,
callback=self.global_pose_callback,
queue_size=1)
self.diagnostics = DiagnosticArray()
self.diagnostic_gps = DiagnosticStatus()
self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray,
callback=self.diagnostics_callback,
queue_size=1)
self.setpoint = PositionTarget()
self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget,
callback=self.setpoint_callback,
queue_size=1)
self.cameras = ['front', 'right', 'back', 'left']
self.image_subscribers = []
self.images = {c: deque(maxlen=10) for c in self.cameras}
for camera in self.cameras:
topic = f'camera_{camera}/image_raw'
subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback,
callback_args=camera, queue_size=1,
buff_size=2 ** 24)
self.image_subscribers.append(subscriber)
def battery_callback(self, battery_msg):
if battery_msg.location == 'id0':
self.battery = battery_msg
def state_callback(self, state_msg):
self.state = state_msg
def extended_callback(self, extended_msg):
self.extended = extended_msg
def diagnostics_callback(self, diagnostics_msg):
for status in diagnostics_msg.status:
if 'GPS' in status.name:
self.diagnostic_gps = status
def gps_callback(self, gps_msg):
self.gps = gps_msg
def local_pose_callback(self, pose_msg):
self.local_pose = pose_msg
def global_pose_callback(self, pose_msg):
self.global_pose = pose_msg
def setpoint_callback(self, setpoint_msg):
self.setpoint = setpoint_msg
def image_callback(self, image_msg, camera):
self.images[camera].append(image_msg)
def statustext_callback(self, statustext_msg):
screen = self.console
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# time_str = datetime.datetime.fromtimestamp(unix_time)
text = statustext_msg.text
severity = statustext_msg.severity
msg = statustext_msg
severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR]
severity_yellow = [msg.WARNING, msg.NOTICE]
severity_neutral = [msg.INFO, msg.DEBUG]
color = curses.COLOR_CYAN
if severity in severity_red:
color = curses.COLOR_RED
elif severity in severity_yellow:
color = curses.COLOR_YELLOW
elif severity in severity_neutral:
color = curses.COLOR_WHITE
self.text = f'{time_str}: {text} ({color})'
# screen.addstr(self.lines, 0, log, get_color(color))
self.lines += 1
screen.refresh()
def print_status(self):
screen = self.status
screen.clear()
# rospy.loginfo(status)
# print(status)
x_tab = 0
x_indent = 14
row = 0
# Battery
battery_percentage = int(self.battery.percentage * 100)
color = curses.COLOR_CYAN
if battery_percentage > 50:
color = curses.COLOR_GREEN
elif battery_percentage > 25:
color = curses.COLOR_YELLOW
elif battery_percentage > 0:
color = curses.COLOR_RED
status_battery = str(battery_percentage) + '%'
screen.addstr(row, x_tab, 'Battery: ')
screen.addstr(row, x_indent, status_battery, get_color(color))
row += 1
# Armed
if self.state.armed:
color = curses.COLOR_RED
status_armed = 'Yes'
else:
color = curses.COLOR_GREEN
status_armed = 'No'
screen.addstr(row, x_tab, 'Armed: ')
screen.addstr(row, x_indent, status_armed, get_color(color))
row += 1
# Mode
color = curses.COLOR_CYAN
mode = self.state.mode
if mode.startswith('AUTO'):
mode = mode.split('.')[-1]
mode = mode.capitalize()
if mode == 'Offboard':
color = curses.COLOR_RED
else:
color = curses.COLOR_BLUE
if mode == '':
mode = 'None'
elif mode == 'Posctl':
mode = 'Position'
elif mode == 'Rtl':
mode = 'Return'
status_mode = '{}'.format(mode)
screen.addstr(row, x_tab, 'Mode: ')
screen.addstr(row, x_indent, status_mode, get_color(color))
row += 1
# Extended status
if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR:
status_extended = 'Air'
color = curses.COLOR_RED
elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING:
status_extended = 'Landed'
color = curses.COLOR_GREEN
elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND:
status_extended = 'Ground'
color = curses.COLOR_GREEN
elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF:
status_extended = 'Takeoff'
color = curses.COLOR_RED
elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED:
status_extended = 'Undefined'
color = curses.COLOR_CYAN
screen.addstr(row, x_tab, 'State: ')
screen.addstr(row, x_indent, status_extended, get_color(color))
row += 1
# GPS info
satellites = 0
fix_type, color = GPS_FIX_DICT[0]
for value in self.diagnostic_gps.values:
if value.key == 'Satellites visible':
satellites = value.value
elif value.key == 'Fix type':
fix_type, color = GPS_FIX_DICT[int(value.value)]
screen.addstr(row, x_tab, 'GPS info: ')
screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color))
row += 2
# GPS pos
latitude = self.gps.latitude
longitude = self.gps.longitude
altitude = round(self.gps.altitude, 2)
status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)'
screen.addstr(row, x_tab, 'GPS pos: ')
screen.addstr(row, x_indent, status_gps)
row += 1
# Local pose
p = self.local_pose.pose.position
q = self.local_pose.pose.orientation
quaternion = [q.x, q.y, q.z, q.w]
try:
rot = R.from_quat(quaternion)
except ValueError:
rot = R.from_euler('zyx', [0.0, 0.0, 0.0])
yaw, pitch, roll = rot.as_euler('zyx', degrees=True)
x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2)
yaw, pitch, roll = int(yaw), int(pitch), int(roll)
screen.addstr(row, x_tab, 'Local pos: ')
screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)')
row += 1
# Global pose
p = self.global_pose.pose.position
q = self.global_pose.pose.orientation
quaternion = [q.x, q.y, q.z, q.w]
try:
rot = R.from_quat(quaternion)
except ValueError:
rot = R.from_euler('zyx', [0.0, 0.0, 0.0])
yaw, pitch, roll = rot.as_euler('zyx', degrees=True)
x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2)
yaw, pitch, roll = int(yaw), int(pitch), int(roll)
screen.addstr(row, x_tab, 'Global pos: ')
screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)')
row += 1
# Setpoint
v = self.setpoint.velocity
vx, vy, vz = round(v.x, 2), round(v.y, 2), round(v.z, 2)
yaw = int(np.rad2deg(self.setpoint.yaw))
screen.addstr(row, x_tab, 'Setpoint: ')
screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)')
row += 1
# Cameras
freqs = {c: 0 for c in self.cameras}
for cam, messages in self.images.items():
freqs[cam] = frequency_from_messages(messages)
ff, fr, fb, fl = [int(round(v)) for k, v in freqs.items()]
screen.addstr(row, x_tab, 'Cameras: ')
screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl} (front right back left [Hz])')
row += 1
screen.refresh()
self.screen.refresh()
def run(self):
rate = rospy.Rate(self.rate)
try:
while not rospy.is_shutdown():
self.print_status()
rate.sleep()
except rospy.ROSInterruptException:
curses.nocbreak()
self.screen.keypad(False)
curses.echo()
def curses_main(screen):
StatusNode(screen).run()
def main():
try:
curses.wrapper(curses_main)
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
|
[
"rospy.init_node",
"mavros_msgs.msg.State",
"curses.curs_set",
"rospy.Rate",
"curses.nocbreak",
"numpy.mean",
"collections.deque",
"scipy.spatial.transform.Rotation.from_euler",
"sensor_msgs.msg.NavSatFix",
"curses.init_pair",
"rospy.Subscriber",
"numpy.rad2deg",
"curses.color_pair",
"curses.wrapper",
"mavros_msgs.msg.PositionTarget",
"diagnostic_msgs.msg.DiagnosticStatus",
"rospy.get_param",
"sensor_msgs.msg.BatteryState",
"curses.use_default_colors",
"diagnostic_msgs.msg.DiagnosticArray",
"numpy.isnan",
"curses.newwin",
"curses.initscr",
"mavros_msgs.msg.ExtendedState",
"curses.echo",
"rospy.is_shutdown",
"scipy.spatial.transform.Rotation.from_quat",
"datetime.datetime.now",
"geometry_msgs.msg.PoseStamped"
] |
[((831, 855), 'curses.color_pair', 'curses.color_pair', (['color'], {}), '(color)\n', (848, 855), False, 'import curses\n'), ((1120, 1139), 'numpy.isnan', 'np.isnan', (['frequency'], {}), '(frequency)\n', (1128, 1139), True, 'import numpy as np\n'), ((1094, 1112), 'numpy.mean', 'np.mean', (['durations'], {}), '(durations)\n', (1101, 1112), True, 'import numpy as np\n'), ((1241, 1286), 'rospy.init_node', 'rospy.init_node', (['"""status_node"""'], {'argv': 'sys.argv'}), "('status_node', argv=sys.argv)\n", (1256, 1286), False, 'import rospy\n'), ((1308, 1345), 'rospy.get_param', 'rospy.get_param', (['"""~rate"""'], {'default': '(1.0)'}), "('~rate', default=1.0)\n", (1323, 1345), False, 'import rospy\n'), ((1392, 1408), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (1406, 1408), False, 'import curses\n'), ((1514, 1559), 'curses.newwin', 'curses.newwin', (['height_status', 'self.cols', '(1)', '(2)'], {}), '(height_status, self.cols, 1, 2)\n', (1527, 1559), False, 'import curses\n'), ((1732, 1754), 'curses.curs_set', 'curses.curs_set', (['(False)'], {}), '(False)\n', (1747, 1754), False, 'import curses\n'), ((2022, 2049), 'curses.use_default_colors', 'curses.use_default_colors', ([], {}), '()\n', (2047, 2049), False, 'import curses\n'), ((2217, 2224), 'mavros_msgs.msg.State', 'State', ([], {}), '()\n', (2222, 2224), False, 'from mavros_msgs.msg import ExtendedState, PositionTarget, State\n'), ((2250, 2337), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/state"""', 'State'], {'callback': 'self.state_callback', 'queue_size': '(1)'}), "('mavros/state', State, callback=self.state_callback,\n queue_size=1)\n", (2266, 2337), False, 'import rospy\n'), ((2442, 2456), 'sensor_msgs.msg.BatteryState', 'BatteryState', ([], {}), '()\n', (2454, 2456), False, 'from sensor_msgs.msg import BatteryState, Image, NavSatFix\n'), ((2484, 2583), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/battery"""', 'BatteryState'], {'callback': 'self.battery_callback', 'queue_size': '(1)'}), "('mavros/battery', BatteryState, callback=self.\n battery_callback, queue_size=1)\n", (2500, 2583), False, 'import rospy\n'), ((2692, 2707), 'mavros_msgs.msg.ExtendedState', 'ExtendedState', ([], {}), '()\n', (2705, 2707), False, 'from mavros_msgs.msg import ExtendedState, PositionTarget, State\n'), ((2736, 2844), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/extended_state"""', 'ExtendedState'], {'callback': 'self.extended_callback', 'queue_size': '(1)'}), "('mavros/extended_state', ExtendedState, callback=self.\n extended_callback, queue_size=1)\n", (2752, 2844), False, 'import rospy\n'), ((3226, 3237), 'sensor_msgs.msg.NavSatFix', 'NavSatFix', ([], {}), '()\n', (3235, 3237), False, 'from sensor_msgs.msg import BatteryState, Image, NavSatFix\n'), ((3261, 3369), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/global_position/raw/fix"""', 'NavSatFix'], {'callback': 'self.gps_callback', 'queue_size': '(1)'}), "('mavros/global_position/raw/fix', NavSatFix, callback=self\n .gps_callback, queue_size=1)\n", (3277, 3369), False, 'import rospy\n'), ((3472, 3485), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (3483, 3485), False, 'from geometry_msgs.msg import PoseStamped\n'), ((3516, 3629), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/local_position/pose"""', 'PoseStamped'], {'callback': 'self.local_pose_callback', 'queue_size': '(1)'}), "('mavros/local_position/pose', PoseStamped, callback=self.\n local_pose_callback, queue_size=1)\n", (3532, 3629), False, 'import rospy\n'), ((3747, 3760), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (3758, 3760), False, 'from geometry_msgs.msg import PoseStamped\n'), ((3792, 3900), 'rospy.Subscriber', 'rospy.Subscriber', (['"""global_position/pose"""', 'PoseStamped'], {'callback': 'self.global_pose_callback', 'queue_size': '(1)'}), "('global_position/pose', PoseStamped, callback=self.\n global_pose_callback, queue_size=1)\n", (3808, 3900), False, 'import rospy\n'), ((4020, 4037), 'diagnostic_msgs.msg.DiagnosticArray', 'DiagnosticArray', ([], {}), '()\n', (4035, 4037), False, 'from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus\n'), ((4068, 4086), 'diagnostic_msgs.msg.DiagnosticStatus', 'DiagnosticStatus', ([], {}), '()\n', (4084, 4086), False, 'from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus\n'), ((4118, 4222), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/diagnostics"""', 'DiagnosticArray'], {'callback': 'self.diagnostics_callback', 'queue_size': '(1)'}), "('/diagnostics', DiagnosticArray, callback=self.\n diagnostics_callback, queue_size=1)\n", (4134, 4222), False, 'import rospy\n'), ((4339, 4355), 'mavros_msgs.msg.PositionTarget', 'PositionTarget', ([], {}), '()\n', (4353, 4355), False, 'from mavros_msgs.msg import ExtendedState, PositionTarget, State\n'), ((4384, 4497), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/setpoint_raw/local"""', 'PositionTarget'], {'callback': 'self.setpoint_callback', 'queue_size': '(1)'}), "('mavros/setpoint_raw/local', PositionTarget, callback=self\n .setpoint_callback, queue_size=1)\n", (4400, 4497), False, 'import rospy\n'), ((12475, 12496), 'rospy.Rate', 'rospy.Rate', (['self.rate'], {}), '(self.rate)\n', (12485, 12496), False, 'import rospy\n'), ((12843, 12870), 'curses.wrapper', 'curses.wrapper', (['curses_main'], {}), '(curses_main)\n', (12857, 12870), False, 'import curses\n'), ((2091, 2125), 'curses.init_pair', 'curses.init_pair', (['color', 'color', '(-1)'], {}), '(color, color, -1)\n', (2107, 2125), False, 'import curses\n'), ((4704, 4720), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (4709, 4720), False, 'from collections import deque\n'), ((4854, 4974), 'rospy.Subscriber', 'rospy.Subscriber', (['topic', 'Image'], {'callback': 'self.image_callback', 'callback_args': 'camera', 'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), '(topic, Image, callback=self.image_callback, callback_args=\n camera, queue_size=1, buff_size=2 ** 24)\n', (4870, 4974), False, 'import rospy\n'), ((10572, 10595), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['quaternion'], {}), '(quaternion)\n', (10583, 10595), True, 'from scipy.spatial.transform import Rotation as R\n'), ((11212, 11235), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['quaternion'], {}), '(quaternion)\n', (11223, 11235), True, 'from scipy.spatial.transform import Rotation as R\n'), ((11806, 11835), 'numpy.rad2deg', 'np.rad2deg', (['self.setpoint.yaw'], {}), '(self.setpoint.yaw)\n', (11816, 11835), True, 'import numpy as np\n'), ((6087, 6101), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6099, 6101), False, 'from datetime import datetime\n'), ((10641, 10677), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""zyx"""', '[0.0, 0.0, 0.0]'], {}), "('zyx', [0.0, 0.0, 0.0])\n", (10653, 10677), True, 'from scipy.spatial.transform import Rotation as R\n'), ((11281, 11317), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""zyx"""', '[0.0, 0.0, 0.0]'], {}), "('zyx', [0.0, 0.0, 0.0])\n", (11293, 11317), True, 'from scipy.spatial.transform import Rotation as R\n'), ((12532, 12551), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (12549, 12551), False, 'import rospy\n'), ((12674, 12691), 'curses.nocbreak', 'curses.nocbreak', ([], {}), '()\n', (12689, 12691), False, 'import curses\n'), ((12742, 12755), 'curses.echo', 'curses.echo', ([], {}), '()\n', (12753, 12755), False, 'import curses\n')]
|
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import images
from gym import Env, spaces
from time import time
import numpy as np
from copy import copy
import colorsys
import pygame
from pygame.transform import scale
class MinesweeperEnv(Env):
def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False):
self.grid_shape = grid_shape
self.grid_size = np.prod(grid_shape)
self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs
self.n_bombs = min(self.grid_size - 1, self.n_bombs)
self.flaged_bombs = 0
self.flaged_empty = 0
self.max_time = max_time
if impact_size % 2 == 0:
raise ValueError('Impact_size must be an odd number !')
self.impact_size = impact_size
# Define constants
self.HIDDEN = 0
self.REVEAL = 1
self.FLAG = 2
self.BOMB = self.impact_size ** 2
# Setting up gym Env conventions
nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape)
self.observation_space = spaces.MultiDiscrete(nvec_observation)
nvec_action = np.array(self.grid_shape + (2,))
self.action_space = spaces.MultiDiscrete(nvec_action)
# Initalize state
self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8)
## Setup bombs places
idx = np.indices(self.grid_shape).reshape(2, -1)
bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False)
self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids]
## Place numbers
self.semi_impact_size = (self.impact_size-1)//2
bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8)
for bombs_id in bombs_ids:
bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id]
x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0)
y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1)
bomb_region = self.state[x_min:x_max, y_min:y_max, 0]
bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max]
## Place bombs
self.state[self.bombs_positions + (0,)] = self.BOMB
self.start_time = time()
self.time_left = int(time() - self.start_time)
# Setup rendering
self.pygame_is_init = False
self.chicken = chicken
self.done = False
self.score = 0
def get_observation(self):
observation = copy(self.state[:, :, 1])
revealed = observation == 1
flaged = observation == 2
observation += self.impact_size ** 2 + 1
observation[revealed] = copy(self.state[:, :, 0][revealed])
observation[flaged] -= 1
return observation
def reveal_around(self, coords, reward, done, without_loss=False):
if not done:
x_min, x_max, _, _ = self.clip_index(coords[0], 0)
y_min, y_max, _, _ = self.clip_index(coords[1], 1)
region = self.state[x_min:x_max, y_min:y_max, :]
unseen_around = np.sum(region[..., 1] == 0)
if unseen_around == 0:
if not without_loss:
reward -= 0.001
return
flags_around = np.sum(region[..., 1] == 2)
if flags_around == self.state[coords + (0,)]:
unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[..., 1] == self.HIDDEN)
if np.any(unrevealed_zeros_around):
zeros_coords = np.argwhere(unrevealed_zeros_around)
for zero in zeros_coords:
coord = (x_min + zero[0], y_min + zero[1])
self.state[coord + (1,)] = 1
self.reveal_around(coord, reward, done, without_loss=True)
self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1
unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG)
if np.any(unflagged_bombs_around):
self.done = True
reward, done = -1, True
else:
if not without_loss:
reward -= 0.001
def clip_index(self, x, axis):
max_idx = self.grid_shape[axis]
x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1)
dx_min, dx_max = x_min - (x - self.semi_impact_size), x_max - (x + self.semi_impact_size + 1) + self.impact_size
return x_min, x_max, dx_min, dx_max
def step(self, action):
coords = action[:2]
action_type = action[2] + 1 # 0 -> 1 = reveal; 1 -> 2 = toggle_flag
case_state = self.state[coords + (1,)]
case_content = self.state[coords + (0,)]
NO_BOMBS_AROUND = 0
reward, done = 0, False
self.time_left = self.max_time - time() + self.start_time
if self.time_left <= 0:
score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs
reward, done = score, True
return self.get_observation(), reward, done, {'passed':False}
if action_type == self.REVEAL:
if case_state == self.HIDDEN:
self.state[coords + (1,)] = action_type
if case_content == self.BOMB:
if self.pygame_is_init: self.done = True
reward, done = -1, True
return self.get_observation(), reward, done, {'passed':False}
elif case_content == NO_BOMBS_AROUND:
self.reveal_around(coords, reward, done)
elif case_state == self.REVEAL:
self.reveal_around(coords, reward, done)
reward -= 0.01
else:
reward -= 0.001
self.score += reward
return self.get_observation(), reward, done, {'passed':True}
elif action_type == self.FLAG:
if case_state == self.REVEAL:
reward -= 0.001
else:
flaging = 1
if case_state == self.FLAG:
flaging = -1
self.state[coords + (1,)] = self.HIDDEN
else:
self.state[coords + (1,)] = self.FLAG
if case_content == self.BOMB:
self.flaged_bombs += flaging
else:
self.flaged_empty += flaging
if self.flaged_bombs == self.n_bombs and self.flaged_empty == 0:
reward, done = 2 + self.time_left/self.max_time, True
if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done:
reward, done = -1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True
self.score += reward
return self.get_observation(), reward, done, {'passed':False}
def reset(self):
self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken)
return self.get_observation()
def render(self):
if not self.pygame_is_init:
self._init_pygame()
self.pygame_is_init = True
for event in pygame.event.get():
if event.type == pygame.QUIT: # pylint: disable=E1101
pygame.quit() # pylint: disable=E1101
# Plot background
pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height, self.width))
# Plot grid
for index, state in np.ndenumerate(self.state[..., 1]):
self._plot_block(index, state)
# Plot infos
## Score
score_text = self.score_font.render("SCORE", 1, (255, 10, 10))
score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10))
self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width))
self.window.blit(score, (0.1*self.header_size, 0.8*self.width))
## Time left
time_text = self.num_font.render("TIME", 1, (255, 10, 10))
self.time_left = self.max_time - time() + self.start_time
time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10))
self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width))
self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width))
## Bombs left
bombs_text = self.num_font.render("BOMBS", 1, (255, 255, 10))
left_text = self.num_font.render("LEFT", 1, (255, 255, 10))
potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty
potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10))
self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width))
self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width))
self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width))
pygame.display.flip()
pygame.time.wait(10)
if self.done:
pygame.time.wait(3000)
@staticmethod
def _get_color(n, max_n):
BLUE_HUE = 0.6
RED_HUE = 0.0
HUE = RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n - n) / max_n)**3
color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7))
return color
def _plot_block(self, index, state):
position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0])))
label = None
if state == self.HIDDEN and not self.done:
img_key = 'hidden'
elif state == self.FLAG:
if not self.done:
img_key = 'flag'
else:
content = self.state[index][0]
if content == self.BOMB:
img_key = 'disabled_mine' if not self.chicken else 'disabled_chicken'
else:
img_key = 'misplaced_flag'
else:
content = self.state[index][0]
if content == self.BOMB:
if state == self.HIDDEN:
img_key = 'mine' if not self.chicken else 'chicken'
else:
img_key = 'exploded_mine' if not self.chicken else 'exploded_chicken'
else:
img_key = 'revealed'
label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB))
self.window.blit(self.images[img_key], position)
if label: self.window.blit(label, position + self.font_offset - (content > 9) * self.decimal_font_offset)
def _init_pygame(self):
pygame.init() # pylint: disable=E1101
# Open Pygame window
self.scale_factor = 2 * min(12 / self.grid_shape[0], 25 / self.grid_shape[1])
self.BLOCK_SIZE = 32
self.header_size = self.scale_factor * 100
self.origin = np.array([self.header_size, 0])
self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0])
self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size)
self.window = pygame.display.set_mode((self.height, self.width))
# Setup font for numbers
num_font_size = 20
self.num_font = pygame.font.SysFont("monospace", int(self.scale_factor * num_font_size))
self.font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15])
self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0])
self.score_font = pygame.font.SysFont("monospace", int(self.scale_factor * 12))
# Load images
def scale_image(img, scale_factor=self.scale_factor):
return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height())))
images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag']
if self.chicken:
images_names += ['chicken', 'exploded_chicken', 'disabled_chicken']
else:
images_names += ['mine', 'exploded_mine', 'disabled_mine']
self.images = {}
for img_name in images_names:
with pkg_resources.path(images, img_name + '.png') as path:
img = pygame.image.load(str(path)).convert()
self.images[img_name] = scale_image(img)
|
[
"numpy.prod",
"pygame.init",
"pygame.quit",
"colorsys.hsv_to_rgb",
"numpy.array",
"copy.copy",
"importlib_resources.path",
"pygame.display.set_mode",
"pygame.display.flip",
"numpy.ndenumerate",
"pygame.draw.rect",
"numpy.ones",
"gym.spaces.MultiDiscrete",
"numpy.indices",
"numpy.any",
"time.time",
"numpy.logical_and",
"pygame.event.get",
"pygame.time.wait",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere"
] |
[((567, 586), 'numpy.prod', 'np.prod', (['grid_shape'], {}), '(grid_shape)\n', (574, 586), True, 'import numpy as np\n'), ((1274, 1312), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['nvec_observation'], {}), '(nvec_observation)\n', (1294, 1312), False, 'from gym import Env, spaces\n'), ((1336, 1368), 'numpy.array', 'np.array', (['(self.grid_shape + (2,))'], {}), '(self.grid_shape + (2,))\n', (1344, 1368), True, 'import numpy as np\n'), ((1397, 1430), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['nvec_action'], {}), '(nvec_action)\n', (1417, 1430), False, 'from gym import Env, spaces\n'), ((1479, 1527), 'numpy.zeros', 'np.zeros', (['(self.grid_shape + (2,))'], {'dtype': 'np.uint8'}), '(self.grid_shape + (2,), dtype=np.uint8)\n', (1487, 1527), True, 'import numpy as np\n'), ((1882, 1943), 'numpy.ones', 'np.ones', (['(self.impact_size, self.impact_size)'], {'dtype': 'np.uint8'}), '((self.impact_size, self.impact_size), dtype=np.uint8)\n', (1889, 1943), True, 'import numpy as np\n'), ((2428, 2434), 'time.time', 'time', ([], {}), '()\n', (2432, 2434), False, 'from time import time\n'), ((2687, 2712), 'copy.copy', 'copy', (['self.state[:, :, 1]'], {}), '(self.state[:, :, 1])\n', (2691, 2712), False, 'from copy import copy\n'), ((2866, 2901), 'copy.copy', 'copy', (['self.state[:, :, 0][revealed]'], {}), '(self.state[:, :, 0][revealed])\n', (2870, 2901), False, 'from copy import copy\n'), ((7579, 7597), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7595, 7597), False, 'import pygame\n'), ((7762, 7838), 'pygame.draw.rect', 'pygame.draw.rect', (['self.window', '(60, 56, 53)', '(0, 0, self.height, self.width)'], {}), '(self.window, (60, 56, 53), (0, 0, self.height, self.width))\n', (7778, 7838), False, 'import pygame\n'), ((7888, 7922), 'numpy.ndenumerate', 'np.ndenumerate', (['self.state[..., 1]'], {}), '(self.state[..., 1])\n', (7902, 7922), True, 'import numpy as np\n'), ((9316, 9337), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (9335, 9337), False, 'import pygame\n'), ((9346, 9366), 'pygame.time.wait', 'pygame.time.wait', (['(10)'], {}), '(10)\n', (9362, 9366), False, 'import pygame\n'), ((10977, 10990), 'pygame.init', 'pygame.init', ([], {}), '()\n', (10988, 10990), False, 'import pygame\n'), ((11233, 11264), 'numpy.array', 'np.array', (['[self.header_size, 0]'], {}), '([self.header_size, 0])\n', (11241, 11264), True, 'import numpy as np\n'), ((11474, 11524), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.height, self.width)'], {}), '((self.height, self.width))\n', (11497, 11524), False, 'import pygame\n'), ((1216, 1240), 'numpy.ones', 'np.ones', (['self.grid_shape'], {}), '(self.grid_shape)\n', (1223, 1240), True, 'import numpy as np\n'), ((3272, 3299), 'numpy.sum', 'np.sum', (['(region[..., 1] == 0)'], {}), '(region[..., 1] == 0)\n', (3278, 3299), True, 'import numpy as np\n'), ((3460, 3487), 'numpy.sum', 'np.sum', (['(region[..., 1] == 2)'], {}), '(region[..., 1] == 2)\n', (3466, 3487), True, 'import numpy as np\n'), ((9401, 9423), 'pygame.time.wait', 'pygame.time.wait', (['(3000)'], {}), '(3000)\n', (9417, 9423), False, 'import pygame\n'), ((11748, 11771), 'numpy.array', 'np.array', (['[0.325, 0.15]'], {}), '([0.325, 0.15])\n', (11756, 11771), True, 'import numpy as np\n'), ((11845, 11865), 'numpy.array', 'np.array', (['[0.225, 0]'], {}), '([0.225, 0])\n', (11853, 11865), True, 'import numpy as np\n'), ((1573, 1600), 'numpy.indices', 'np.indices', (['self.grid_shape'], {}), '(self.grid_shape)\n', (1583, 1600), True, 'import numpy as np\n'), ((2464, 2470), 'time.time', 'time', ([], {}), '()\n', (2468, 2470), False, 'from time import time\n'), ((3588, 3654), 'numpy.logical_and', 'np.logical_and', (['(region[..., 0] == 0)', '(region[..., 1] == self.HIDDEN)'], {}), '(region[..., 0] == 0, region[..., 1] == self.HIDDEN)\n', (3602, 3654), True, 'import numpy as np\n'), ((3674, 3705), 'numpy.any', 'np.any', (['unrevealed_zeros_around'], {}), '(unrevealed_zeros_around)\n', (3680, 3705), True, 'import numpy as np\n'), ((4183, 4255), 'numpy.logical_and', 'np.logical_and', (['(region[..., 0] == self.BOMB)', '(region[..., 1] != self.FLAG)'], {}), '(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG)\n', (4197, 4255), True, 'import numpy as np\n'), ((4275, 4305), 'numpy.any', 'np.any', (['unflagged_bombs_around'], {}), '(unflagged_bombs_around)\n', (4281, 4305), True, 'import numpy as np\n'), ((5152, 5158), 'time.time', 'time', ([], {}), '()\n', (5156, 5158), False, 'from time import time\n'), ((6920, 6984), 'numpy.logical_and', 'np.logical_and', (['(self.state[..., 0] == 9)', '(self.state[..., 1] == 1)'], {}), '(self.state[..., 0] == 9, self.state[..., 1] == 1)\n', (6934, 6984), True, 'import numpy as np\n'), ((7681, 7694), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (7692, 7694), False, 'import pygame\n'), ((8451, 8457), 'time.time', 'time', ([], {}), '()\n', (8455, 8457), False, 'from time import time\n'), ((9625, 9657), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['HUE', '(1)', '(0.7)'], {}), '(HUE, 1, 0.7)\n', (9644, 9657), False, 'import colorsys\n'), ((12495, 12540), 'importlib_resources.path', 'pkg_resources.path', (['images', "(img_name + '.png')"], {}), "(images, img_name + '.png')\n", (12513, 12540), True, 'import importlib_resources as pkg_resources\n'), ((3742, 3778), 'numpy.argwhere', 'np.argwhere', (['unrevealed_zeros_around'], {}), '(unrevealed_zeros_around)\n', (3753, 3778), True, 'import numpy as np\n'), ((9799, 9829), 'numpy.array', 'np.array', (['(index[1], index[0])'], {}), '((index[1], index[0]))\n', (9807, 9829), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import os
import json
import glob
from PIL import Image, ImageDraw
plate_diameter = 25 #cm
plate_depth = 1.5 #cm
plate_thickness = 0.2 #cm
def Max(x, y):
if (x >= y):
return x
else:
return y
def polygons_to_mask(img_shape, polygons):
mask = np.zeros(img_shape, dtype=np.uint8)
mask = Image.fromarray(mask)
xy = list(map(tuple, polygons))
ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
def mask2box(mask):
index = np.argwhere(mask == 1)
rows = index[:, 0]
clos = index[:, 1]
left_top_r = np.min(rows)
left_top_c = np.min(clos)
right_bottom_r = np.max(rows)
right_bottom_c = np.max(clos)
return [left_top_c, left_top_r, right_bottom_c, right_bottom_r]
def get_bbox(points, h, w):
polygons = points
mask = polygons_to_mask([h,w], polygons)
return mask2box(mask)
def get_scale(points, img, lowest):
bbox = get_bbox(points, img.shape[0], img.shape[1])
diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2
len_per_pix = plate_diameter/float(diameter)
avg = 0
k = 0
for point in points:
avg += img[point[1]][point[0]]
k += 1
avg = avg/float(k)
depth = lowest - avg
depth_per_pix = plate_depth/depth
return len_per_pix, depth_per_pix
def cal_volume(points, img, len_per_pix, depth_per_pix, lowest):
volume = 0.0
bbox = get_bbox(points, img.shape[0], img.shape[1])
points = np.array(points)
shape = points.shape
points = points.reshape(shape[0], 1, shape[1])
for i in range(bbox[0], bbox[2]+1):
for j in range(bbox[1], bbox[3]+1):
if (cv2.pointPolygonTest(points, (i,j), False) >= 0):
volume += Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix * len_per_pix
return volume
def get_volume(img, json_path):
lowest = np.max(img)
vol_dict = {}
#print(lowest)
len_per_pix = 0.0
depth_per_pix = 0.0
with open(json_path, 'r') as json_file:
data = json.load(json_file)
for shape in data['shapes']:
if (shape['label'] == "plate"):
len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest)
#print(len_per_pix, depth_per_pix)
break
for shape in data['shapes']:
label = shape['label']
if (label == "plate"):
continue
points = shape['points']
volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest)
if (label in vol_dict):
vol_dict[label] += volume
else:
vol_dict[label] = volume
return vol_dict
img = cv2.imread("out.png",0)
print(get_volume(img,"test.json"))
|
[
"PIL.Image.fromarray",
"cv2.pointPolygonTest",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.argwhere",
"PIL.ImageDraw.Draw",
"numpy.min",
"json.load",
"cv2.imread"
] |
[((2774, 2798), 'cv2.imread', 'cv2.imread', (['"""out.png"""', '(0)'], {}), "('out.png', 0)\n", (2784, 2798), False, 'import cv2\n'), ((302, 337), 'numpy.zeros', 'np.zeros', (['img_shape'], {'dtype': 'np.uint8'}), '(img_shape, dtype=np.uint8)\n', (310, 337), True, 'import numpy as np\n'), ((349, 370), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (364, 370), False, 'from PIL import Image, ImageDraw\n'), ((477, 503), 'numpy.array', 'np.array', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (485, 503), True, 'import numpy as np\n'), ((554, 576), 'numpy.argwhere', 'np.argwhere', (['(mask == 1)'], {}), '(mask == 1)\n', (565, 576), True, 'import numpy as np\n'), ((640, 652), 'numpy.min', 'np.min', (['rows'], {}), '(rows)\n', (646, 652), True, 'import numpy as np\n'), ((670, 682), 'numpy.min', 'np.min', (['clos'], {}), '(clos)\n', (676, 682), True, 'import numpy as np\n'), ((704, 716), 'numpy.max', 'np.max', (['rows'], {}), '(rows)\n', (710, 716), True, 'import numpy as np\n'), ((738, 750), 'numpy.max', 'np.max', (['clos'], {}), '(clos)\n', (744, 750), True, 'import numpy as np\n'), ((1520, 1536), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1528, 1536), True, 'import numpy as np\n'), ((1945, 1956), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1951, 1956), True, 'import numpy as np\n'), ((2099, 2119), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2108, 2119), False, 'import json\n'), ((411, 431), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['mask'], {}), '(mask)\n', (425, 431), False, 'from PIL import Image, ImageDraw\n'), ((1713, 1756), 'cv2.pointPolygonTest', 'cv2.pointPolygonTest', (['points', '(i, j)', '(False)'], {}), '(points, (i, j), False)\n', (1733, 1756), False, 'import cv2\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cntk as C
import numpy as np
from .common import floatx, epsilon, image_dim_ordering, image_data_format
from collections import defaultdict
from contextlib import contextmanager
import warnings
C.set_global_option('align_axis', 1)
b_any = any
dev = C.device.use_default_device()
if dev.type() == 0:
warnings.warn(
'CNTK backend warning: GPU is not detected. '
'CNTK\'s CPU version is not fully optimized,'
'please run with GPU to get better performance.')
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
# LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase
_LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')
# static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor.
_LEARNING_PHASE = -1
_UID_PREFIXES = defaultdict(int)
# cntk doesn't support gradient as symbolic op, to hook up with keras model,
# we will create gradient as a constant placeholder, here use this global
# map to keep the mapping from grad placeholder to parameter
grad_parameter_dict = {}
NAME_SCOPE_STACK = []
@contextmanager
def name_scope(name):
global NAME_SCOPE_STACK
NAME_SCOPE_STACK.append(name)
yield
NAME_SCOPE_STACK.pop()
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def learning_phase():
# If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor
return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER
def set_learning_phase(value):
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('CNTK Backend: Set learning phase '
'with value %s is not supported, '
'expected 0 or 1.' % value)
_LEARNING_PHASE = value
def clear_session():
"""Reset learning phase flag for cntk backend.
"""
global _LEARNING_PHASE
global _LEARNING_PHASE_PLACEHOLDER
_LEARNING_PHASE = -1
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0)
def in_train_phase(x, alt, training=None):
global _LEARNING_PHASE
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
# CNTK currently don't support cond op, so here we use
# element_select approach as workaround. It may have
# perf issue, will resolve it later with cntk cond op.
if callable(x) and isinstance(x, C.cntk_py.Function) is False:
x = x()
if callable(alt) and isinstance(alt, C.cntk_py.Function) is False:
alt = alt()
if training is True:
x._uses_learning_phase = uses_learning_phase
return x
else:
# if _LEARNING_PHASE is static
if isinstance(training, int) or isinstance(training, bool):
result = x if training == 1 or training is True else alt
else:
result = C.element_select(training, x, alt)
result._uses_learning_phase = uses_learning_phase
return result
def in_test_phase(x, alt, training=None):
return in_train_phase(alt, x, training=training)
def _convert_string_dtype(dtype):
# cntk only support float32 and float64
if dtype == 'float32':
return np.float32
elif dtype == 'float64':
return np.float64
else:
# cntk only running with float,
# try to cast to float to run the model
return np.float32
def _convert_dtype_string(dtype):
if dtype == np.float32:
return 'float32'
elif dtype == np.float64:
return 'float64'
else:
raise ValueError('CNTK Backend: Unsupported dtype: %s. '
'CNTK only supports float32 and '
'float64.' % dtype)
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
"""
if dtype is None:
dtype = floatx()
if name is None:
name = ''
if isinstance(
value,
C.variables.Constant) or isinstance(
value,
C.variables.Parameter):
value = value.value
# we don't support init parameter with symbolic op, so eval it first as
# workaround
if isinstance(value, C.cntk_py.Function):
value = eval(value)
shape = value.shape if hasattr(value, 'shape') else ()
if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0:
value = value.astype(dtype)
# TODO: remove the conversion when cntk supports int32, int64
# https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter
dtype = 'float32' if 'int' in str(dtype) else dtype
v = C.parameter(shape=shape,
init=value,
dtype=dtype,
name=_prepare_name(name, 'variable'))
v._keras_shape = v.shape
v._uses_learning_phase = False
v.constraint = constraint
return v
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
dims = len(x.shape)
if dims > 0 and x.shape[0] == C.InferredDimension:
dims -= 1
bias_dims = len(bias.shape)
if bias_dims != 1 and bias_dims != dims:
raise ValueError('Unexpected bias dimensions %d, '
'expected 1 or %d dimensions' % (bias_dims, dims))
if dims == 4:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1, 1)
else:
shape = (bias.shape[3],) + bias.shape[:3]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 3:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1)
else:
shape = (bias.shape[2],) + bias.shape[:2]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 2:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1)
else:
shape = (bias.shape[1],) + bias.shape[:1]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, bias.shape[0])
else:
shape = bias.shape
else:
shape = bias.shape
return x + reshape(bias, shape)
def eval(x):
if isinstance(x, C.cntk_py.Function):
return x.eval()
elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter):
return x.value
else:
raise ValueError('CNTK Backend: `eval` method on '
'`%s` type is not supported. '
'CNTK only supports `eval` with '
'`Function`, `Constant` or '
'`Parameter`.' % type(x))
def placeholder(
shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
dynamic_axis_num=1):
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension
cntk_shape = [dynamic_dimension if s is None else s for s in shape]
cntk_shape = tuple(cntk_shape)
if dynamic_axis_num > len(cntk_shape):
raise ValueError('CNTK backend: creating placeholder with '
'%d dimension is not supported, at least '
'%d dimensions are needed.'
% (len(cntk_shape, dynamic_axis_num)))
if name is None:
name = ''
cntk_shape = cntk_shape[dynamic_axis_num:]
x = C.input(
shape=cntk_shape,
dtype=_convert_string_dtype(dtype),
is_sparse=sparse,
name=name)
x._keras_shape = shape
x._uses_learning_phase = False
x._cntk_placeholder = True
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
# Arguments
x: A candidate placeholder.
# Returns
Boolean.
"""
return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder
def is_keras_tensor(x):
if not is_tensor(x):
raise ValueError('Unexpectedly found an instance of type `' +
str(type(x)) + '`. '
'Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
def is_tensor(x):
return isinstance(x, (C.variables.Constant,
C.variables.Variable,
C.variables.Parameter,
C.ops.functions.Function))
def shape(x):
shape = list(int_shape(x))
num_dynamic = _get_dynamic_axis_num(x)
non_dyn_shape = []
for i in range(len(x.shape)):
if shape[i + num_dynamic] is None:
non_dyn_shape.append(x.shape[i])
else:
non_dyn_shape.append(shape[i + num_dynamic])
return shape[:num_dynamic] + non_dyn_shape
def is_sparse(tensor):
return tensor.is_sparse
def int_shape(x):
if hasattr(x, '_keras_shape'):
return x._keras_shape
shape = x.shape
if hasattr(x, 'dynamic_axes'):
dynamic_shape = [None for a in x.dynamic_axes]
shape = tuple(dynamic_shape) + shape
return shape
def ndim(x):
shape = int_shape(x)
return len(shape)
def _prepare_name(name, default):
prefix = '_'.join(NAME_SCOPE_STACK)
if name is None or name == '':
return prefix + '/' + default
return prefix + '/' + name
def constant(value, dtype=None, shape=None, name=None):
if dtype is None:
dtype = floatx()
if shape is None:
shape = ()
np_value = value * np.ones(shape)
const = C.constant(np_value,
dtype=dtype,
name=_prepare_name(name, 'constant'))
const._keras_shape = const.shape
const._uses_learning_phase = False
return const
def random_binomial(shape, p=0.0, dtype=None, seed=None):
# use numpy workaround now
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
np.random.seed(seed)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
size = 1
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
size *= _
binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape)
return variable(value=binomial, dtype=dtype)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
return random_uniform_variable(shape, minval, maxval, dtype, seed)
def random_uniform_variable(shape, low, high,
dtype=None, name=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e3)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
scale = (high - low) / 2
p = C.parameter(
shape,
init=C.initializer.uniform(
scale,
seed=seed),
dtype=dtype,
name=name)
return variable(value=p.value + low + scale)
def random_normal_variable(
shape,
mean,
scale,
dtype=None,
name=None,
seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
return C.parameter(
shape=shape,
init=C.initializer.normal(
scale=scale,
seed=seed),
dtype=dtype,
name=name)
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
# how to apply mean and stddev
return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if seed is None:
seed = np.random.randint(1, 10e6)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
return C.parameter(
shape, init=C.initializer.truncated_normal(
stddev, seed=seed), dtype=dtype)
def dtype(x):
return _convert_dtype_string(x.dtype)
def zeros(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name)
def ones(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.ones(shape, ctype), dtype=dtype, name=name)
def eye(size, dtype=None, name=None):
if dtype is None:
dtype = floatx()
return variable(np.eye(size), dtype, name)
def zeros_like(x, dtype=None, name=None):
return x * 0
def ones_like(x, dtype=None, name=None):
return zeros_like(x) + 1
def count_params(x):
for _ in x.shape:
if _ == C.InferredDimension or _ == C.FreeDimension:
raise ValueError('CNTK backend: `count_params` with dynamic '
'shape is not supported. Please provide '
'fixed dimension instead of `None`.')
return np.prod(int_shape(x))
def cast(x, dtype):
# cntk calculate everything in float, so don't need case from bool / int
return x
def dot(x, y):
if len(x.shape) > 2 or len(y.shape) > 2:
y_shape = int_shape(y)
if len(y_shape) > 2:
permutation = [len(y_shape) - 2]
permutation += list(range(len(y_shape) - 2))
permutation += [len(y_shape) - 1]
y = C.transpose(y, perm=permutation)
return C.times(x, y, len(y_shape) - 1)
else:
return C.times(x, y)
def batch_dot(x, y, axes=None):
x_shape = int_shape(x)
y_shape = int_shape(y)
if isinstance(axes, int):
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [len(x_shape) - 1, len(y_shape) - 2]
if b_any([isinstance(a, (list, tuple)) for a in axes]):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
if len(x_shape) == 2 and len(y_shape) == 2:
if axes[0] == axes[1]:
result = sum(x * y, axis=axes[0], keepdims=True)
return result if axes[0] == 1 else transpose(result)
else:
return sum(x * transpose(y), axis=axes[0], keepdims=True)
else:
if len(y_shape) == 2:
y = expand_dims(y)
normalized_axis = []
normalized_axis.append(_normalize_axis(axes[0], x)[0])
normalized_axis.append(_normalize_axis(axes[1], y)[0])
# transpose
i = normalized_axis[0]
while i < len(x.shape) - 1:
x = C.swapaxes(x, i, i + 1)
i += 1
i = normalized_axis[1]
while i > 0:
y = C.swapaxes(y, i, i - 1)
i -= 1
result = C.times(x, y, output_rank=(len(y.shape) - 1)
if len(y.shape) > 1 else 1)
if len(y_shape) == 2:
result = squeeze(result, -1)
return result
def transpose(x):
return C.swapaxes(x, 0, 1)
def gather(reference, indices):
# There is a bug in cntk gather op which may cause crash.
# We have made a fix but not catched in CNTK 2.1 release.
# Will update with gather op in next release
if _get_cntk_version() >= 2.2:
return C.ops.gather(reference, indices)
else:
num_classes = reference.shape[0]
one_hot_matrix = C.ops.one_hot(indices, num_classes)
return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1)
def _remove_dims(x, axis, keepdims=False):
if keepdims is False and isinstance(axis, list):
# sequence axis is removed by default, so don't need reshape on it
reduce_axes = []
for a in axis:
if isinstance(a, C.Axis) is False:
reduce_axes.append(a)
return _reshape_dummy_dim(x, reduce_axes)
else:
if isinstance(axis, list):
has_seq = False
for a in axis:
if isinstance(a, C.Axis):
has_seq = True
break
if has_seq:
nones = _get_dynamic_axis_num(x)
x = expand_dims(x, nones)
return x
def max(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_max')
return _remove_dims(output, axis, keepdims)
def min(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_min')
return _remove_dims(output, axis, keepdims)
def sum(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_sum')
return _remove_dims(output, axis, keepdims)
def prod(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_prod')
return _remove_dims(output, axis, keepdims)
def logsumexp(x, axis=None, keepdims=False):
return log(sum(exp(x), axis=axis, keepdims=keepdims))
def var(x, axis=None, keepdims=False):
m = mean(x, axis, keepdims=True)
devs_squared = C.square(x - m)
return mean(devs_squared, axis=axis, keepdims=keepdims)
def std(x, axis=None, keepdims=False):
return C.sqrt(var(x, axis=axis, keepdims=keepdims))
def expand_dims(x, axis=-1):
shape = list(int_shape(x))
nones = _get_dynamic_axis_num(x)
index = axis if axis >= 0 else len(shape) + 1
shape.insert(index, 1)
new_shape = shape[nones:]
new_shape = tuple(
[C.InferredDimension if _ is None else _ for _ in new_shape])
result = C.reshape(x, new_shape)
if index < nones:
result._keras_shape = shape
return result
def squeeze(x, axis):
if isinstance(axis, tuple):
axis = list(axis)
if not isinstance(axis, list):
axis = [axis]
shape = list(int_shape(x))
_axis = []
for _ in axis:
if isinstance(_, int):
_axis.append(_ if _ >= 0 else _ + len(shape))
if len(_axis) == 0:
return x
nones = _get_dynamic_axis_num(x)
for _ in sorted(_axis, reverse=True):
del shape[_]
new_shape = shape[nones:]
new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in new_shape])
return C.reshape(x, new_shape)
def tile(x, n):
if isinstance(n, int):
n = (n,)
elif isinstance(n, list):
n = tuple(n)
shape = int_shape(x)
num_dynamic_axis = _get_dynamic_axis_num(x)
# Padding the axis
if len(n) < len(shape):
n = tuple([1 for _ in range(len(shape) - len(n))]) + n
if len(n) != len(shape):
raise NotImplementedError
i = num_dynamic_axis
for i, rep in enumerate(n):
if i >= num_dynamic_axis and shape[i] is not None:
tmp = [x] * rep
x = C.splice(*tmp, axis=i - num_dynamic_axis)
i += 1
return x
def _normalize_axis(axis, x):
shape = int_shape(x)
ndim = len(shape)
nones = _get_dynamic_axis_num(x)
if nones > ndim:
raise ValueError('CNTK Backend: tensor with keras shape: `%s` has '
'%d cntk dynamic axis, this is not expected, please '
'double check the keras shape history.' % (str(shape), nones))
# Current cntk does not support shape like (1, batch). so using the workaround
# here to mapping the correct axis. Will remove this tricky after we add support
# in native cntk op
cntk_axis = []
dynamic_axis_index = 0
for i in range(ndim):
if shape[i] is None and dynamic_axis_index < nones:
cntk_axis.append(x.dynamic_axes[dynamic_axis_index])
dynamic_axis_index += 1
else:
cntk_axis.append(i - dynamic_axis_index)
if dynamic_axis_index < nones:
i = 0
while dynamic_axis_index < nones:
cntk_axis[i] = x.dynamic_axes[dynamic_axis_index]
i += 1
dynamic_axis_index += 1
while i < len(cntk_axis):
cntk_axis[i] -= nones
i += 1
if isinstance(axis, tuple):
_axis = list(axis)
elif isinstance(axis, int):
_axis = [axis]
elif isinstance(axis, list):
_axis = list(axis)
else:
_axis = axis
if isinstance(_axis, list):
for i, a in enumerate(_axis):
if a is not None and a < 0:
_axis[i] = (a % ndim)
if _axis[i] is not None:
_axis[i] = cntk_axis[_axis[i]]
else:
if _axis is None:
_axis = C.Axis.all_axes()
return _axis
def _reshape_dummy_dim(x, axis):
shape = list(x.shape)
_axis = [_ + len(shape) if _ < 0 else _ for _ in axis]
if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1:
result = x
for index in sorted(_axis, reverse=True):
result = C.reshape(result,
shape=(),
begin_axis=index,
end_axis=index + 1)
return result
else:
for index in sorted(_axis, reverse=True):
del shape[index]
shape = [C.InferredDimension if _ == C.FreeDimension else _ for _ in shape]
return C.reshape(x, shape)
def mean(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_mean')
return _remove_dims(output, axis, keepdims)
def any(x, axis=None, keepdims=False):
reduce_result = sum(x, axis, keepdims=keepdims)
any_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(any_matrix)
else:
return any_matrix
def all(x, axis=None, keepdims=False):
reduce_result = prod(x, axis, keepdims=keepdims)
all_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(all_matrix)
else:
return all_matrix
def classification_error(target, output, axis=-1):
return C.ops.reduce_mean(
C.equal(
argmax(
output,
axis=-1),
argmax(
target,
axis=-1)),
axis=C.Axis.all_axes())
def argmax(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmax(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def argmin(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmin(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def square(x):
return C.square(x)
def abs(x):
return C.abs(x)
def sqrt(x):
return C.sqrt(x)
def exp(x):
return C.exp(x)
def log(x):
return C.log(x)
def round(x):
return C.round(x)
def sigmoid(x):
return C.sigmoid(x)
def sign(x):
return x / C.abs(x)
def pow(x, a):
return C.pow(x, a)
def clip(x, min_value, max_value):
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
if min_value is None:
min_value = -np.inf
return C.clip(x, min_value, max_value)
def binary_crossentropy(target, output, from_logits=False):
if from_logits:
output = C.sigmoid(output)
output = C.clip(output, epsilon(), 1.0 - epsilon())
output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output)
return output
def get_variable_shape(x):
return int_shape(x)
def update(x, new_x):
return C.assign(x, new_x)
def moving_average_update(variable, value, momentum):
return C.assign(variable, variable * momentum + value * (1. - momentum))
def update_add(x, increment):
result = x + increment
return C.assign(x, result)
def gradients(loss, variables):
# cntk does not support gradients as symbolic op,
# to hook up with keras model
# we will return a constant as place holder, the cntk learner will apply
# the gradient during training.
global grad_parameter_dict
if isinstance(variables, list) is False:
variables = [variables]
grads = []
for v in variables:
g = C.constant(0, shape=v.shape, name='keras_grad_placeholder')
grads.append(g)
grad_parameter_dict[g] = v
return grads
def equal(x, y):
return C.equal(x, y)
def not_equal(x, y):
return C.not_equal(x, y)
def greater(x, y):
return C.greater(x, y)
def greater_equal(x, y):
return C.greater_equal(x, y)
def less(x, y):
return C.less(x, y)
def less_equal(x, y):
return C.less_equal(x, y)
def maximum(x, y):
return C.element_max(x, y)
def minimum(x, y):
return C.element_min(x, y)
def sin(x):
return C.sin(x)
def cos(x):
return C.cos(x)
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
if gamma is None:
if beta is None:
gamma = ones_like(x)
else:
gamma = ones_like(beta)
if beta is None:
if gamma is None:
beta = zeros_like(x)
else:
beta = zeros_like(gamma)
mean, variant = _moments(x, _normalize_axis(reduction_axes, x))
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
normalized = batch_normalization(
x, mean, variant, beta, gamma, epsilon)
else:
# need broadcasting
target_shape = []
x_shape = int_shape(x)
# skip the batch axis
for axis in range(1, ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
if ndim(gamma) > axis:
gamma = C.reduce_mean(gamma, axis - 1)
beta = C.reduce_mean(beta, axis - 1)
else:
target_shape.append(x_shape[axis])
broadcast_mean = C.reshape(mean, target_shape)
broadcast_var = C.reshape(variant, target_shape)
broadcast_gamma = C.reshape(gamma, target_shape)
broadcast_beta = C.reshape(beta, target_shape)
normalized = batch_normalization(
x,
broadcast_mean,
broadcast_var,
broadcast_beta,
broadcast_gamma,
epsilon)
return normalized, mean, variant
def _moments(x, axes=None, shift=None, keep_dims=False):
_axes = tuple(axes)
if shift is None:
shift = x
# Compute true mean while keeping the dims for proper broadcasting.
for axis in _axes:
shift = C.reduce_mean(shift, axis=axis)
shift = C.stop_gradient(shift)
shifted_mean = C.minus(x, shift)
for axis in _axes:
shifted_mean = C.reduce_mean(shifted_mean, axis=axis)
variance_mean = C.square(C.minus(x, shift))
for axis in _axes:
variance_mean = C.reduce_mean(variance_mean, axis=axis)
variance = C.minus(variance_mean, C.square(shifted_mean))
mean = C.plus(shifted_mean, shift)
if not keep_dims:
mean = squeeze(mean, _axes)
variance = squeeze(variance, _axes)
return mean, variance
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
# The mean / var / beta / gamma may be processed by broadcast
# so it may have an extra batch axis with 1, it is not needed
# in cntk, need to remove those dummy axis.
if ndim(mean) == ndim(x) and shape(mean)[0] == 1:
mean = _reshape_dummy_dim(mean, [0])
if ndim(var) == ndim(x) and shape(var)[0] == 1:
var = _reshape_dummy_dim(var, [0])
if gamma is None:
gamma = ones_like(var)
elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1:
gamma = _reshape_dummy_dim(gamma, [0])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) == ndim(x) and shape(beta)[0] == 1:
beta = _reshape_dummy_dim(beta, [0])
return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta
def concatenate(tensors, axis=-1):
if len(tensors) == 0:
return None
axis = [axis]
axis = _normalize_axis(axis, tensors[0])
return C.splice(*tensors, axis=axis[0])
def flatten(x):
return reshape(x, (-1,))
def reshape(x, shape):
shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in shape])
if isinstance(x, C.variables.Parameter):
return C.reshape(x, shape)
else:
num_dynamic_axis = _get_dynamic_axis_num(x)
if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1:
# collapse axis with batch axis
if b_any(_ == C.InferredDimension for _ in x.shape) or b_any(
_ == C.FreeDimension for _ in x.shape):
warnings.warn(
'Warning: CNTK backend does not support '
'collapse of batch axis with inferred dimension. '
'The reshape did not take place.')
return x
return _reshape_batch(x, shape)
else:
# no collapse, then first need to padding the shape
if num_dynamic_axis >= len(shape):
i = 0
while i < len(shape):
if shape[i] is None or shape[i] == -1:
i += 1
else:
break
shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape
new_shape = list(shape)
new_shape = new_shape[num_dynamic_axis:]
new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape]
return C.reshape(x, new_shape)
def permute_dimensions(x, pattern):
dims = len(int_shape(x))
num_dynamic_axis = _get_dynamic_axis_num(x)
if isinstance(pattern, list):
current_layout = [i for i in range(dims)]
else:
current_layout = tuple([i for i in range(dims)])
if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]:
raise ValueError('CNTK backend: the permute pattern %s '
'requested permute on dynamic axis, '
'which is not supported. Please do permute '
'on static axis.' % pattern)
axis = list(pattern)
axis = axis[num_dynamic_axis:]
axis = _normalize_axis(axis, x)
return C.transpose(x, axis)
def resize_images(x, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def repeat_elements(x, rep, axis):
axis = _normalize_axis(axis, x)
axis = axis[0]
slices = []
shape = x.shape
i = 0
while i < shape[axis]:
tmp = C.ops.slice(x, axis, i, i + 1)
for _ in range(rep):
slices.append(tmp)
i += 1
return C.splice(*slices, axis=axis)
def repeat(x, n):
# this is a workaround for recurrent layer
# if n is inferred dimension,
# we can't figure out how to repeat it in cntk now
# return the same x to take cntk broadcast feature
# to make the recurrent layer work.
# need to be fixed in GA.
if n is C.InferredDimension or n is C.FreeDimension:
return x
index = 1 - _get_dynamic_axis_num(x)
if index < 0 or index > 1:
raise NotImplementedError
new_shape = list(x.shape)
new_shape.insert(index, 1)
new_shape = tuple(new_shape)
x = C.reshape(x, new_shape)
temp = [x] * n
return C.splice(*temp, axis=index)
def tanh(x):
return C.tanh(x)
def _static_rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
uses_learning_phase = False
if dims < 3:
raise ValueError('Input should be at least 3D.')
# if the second axis is static axis, CNTK will do unroll by default
if shape[1] is None:
raise ValueError('CNTK Backend: the input of static rnn '
'has shape `%s`, the second axis '
'is not static. If you want to run '
'rnn with non-static axis, please try '
'dynamic rnn with sequence axis.' % shape)
if constants is None:
constants = []
if mask is not None:
mask_shape = int_shape(mask)
if len(mask_shape) == dims - 1:
mask = expand_dims(mask)
nones = _get_dynamic_axis_num(inputs)
states = tuple(initial_states)
outputs = []
time_axis = 1 - nones if nones > 0 else 1
if go_backwards:
i = shape[1] - 1
while i >= 0:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, time_axis)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, time_axis)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states
i -= 1
else:
i = 0
while i < shape[1]:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, 1)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, 1)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states[:len(states)]
i += 1
i = 1
# add the time_step axis back
final_output = expand_dims(outputs[0], 1)
last_output = outputs[0]
while i < len(outputs):
# add the time_step axis back
output_slice = expand_dims(outputs[i], 1)
final_output = C.splice(final_output, output_slice, axis=time_axis)
last_output = outputs[i]
i += 1
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, states
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
global uses_learning_phase
uses_learning_phase = False
if dims < 3:
raise ValueError('CNTK Backend: the input of rnn has only rank %d '
'Need at least rank 3 to run RNN.' % dims)
if _get_dynamic_axis_num(inputs) == 0 or unroll:
return _static_rnn(
step_function,
inputs,
initial_states,
go_backwards,
mask,
constants,
unroll,
input_length)
if constants is None:
constants = []
num_time_step = shape[1]
if num_time_step is None and not has_seq_axis(inputs):
num_time_step = inputs.shape[0]
initial = []
for s in initial_states:
if _get_dynamic_axis_num(s) == 0:
if hasattr(C, 'to_batch'):
initial.append(C.to_batch(s))
else:
initial.append(C.user_function(ConvertToBatch(s)))
else:
initial.append(s)
need_convert = not has_seq_axis(inputs)
if go_backwards and need_convert is False:
raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with '
'variable-length sequences. Please specify a '
'static length for your sequences.')
rnn_inputs = inputs
if need_convert:
if go_backwards:
rnn_inputs = reverse(rnn_inputs, 1)
rnn_inputs = C.to_sequence(rnn_inputs)
rnn_constants = []
for constant in constants:
if isinstance(constant, list):
new_c = []
for c in constant:
if _get_dynamic_axis_num(c) == 1:
new_c.append(C.sequence.broadcast_as(c, rnn_inputs))
else:
new_c.append(c)
rnn_constants.append(new_c)
else:
if _get_dynamic_axis_num(constant) == 1:
rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs))
else:
rnn_constants.append(constant)
else:
rnn_constants = constants
if mask is not None and not has_seq_axis(mask):
if go_backwards:
mask = reverse(mask, 1)
if len(int_shape(mask)) == 2:
mask = expand_dims(mask)
mask = C.to_sequence_like(mask, rnn_inputs)
states = tuple(initial)
with C.default_options(axis_offset=1):
def _recurrence(x, states, m):
# create place holder
place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states]
past_values = []
for s, p in zip(states, place_holders):
past_values.append(C.sequence.past_value(p, s))
new_output, new_states = step_function(
x, tuple(past_values) + tuple(rnn_constants))
if getattr(new_output, '_uses_learning_phase', False):
global uses_learning_phase
uses_learning_phase = True
if m is not None:
new_states = [C.element_select(m, n, s) for n, s in zip(new_states, past_values)]
n_s = []
for o, p in zip(new_states, place_holders):
n_s.append(o.replace_placeholders({p: o.output}))
if len(n_s) > 0:
new_output = n_s[0]
return new_output, n_s
final_output, final_states = _recurrence(rnn_inputs, states, mask)
last_output = C.sequence.last(final_output)
last_states = [C.sequence.last(s) for s in final_states]
if need_convert:
final_output = C.sequence.unpack(final_output, 0, no_mask_output=True)
if num_time_step is not None and num_time_step is not C.FreeDimension:
final_output = _reshape_sequence(final_output, num_time_step)
f_stats = []
for l_s, i_s in zip(last_states, initial_states):
if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1:
if hasattr(C, 'unpack_batch'):
f_stats.append(C.unpack_batch(l_s))
else:
f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0])))
else:
f_stats.append(l_s)
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, f_stats
def has_seq_axis(x):
return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1
def l2_normalize(x, axis=None):
axis = [axis]
axis = _normalize_axis(axis, x)
norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))
return x / norm
def hard_sigmoid(x):
x = (0.2 * x) + 0.5
x = C.clip(x, 0.0, 1.0)
return x
def conv1d(x, kernel, strides=1, padding='valid',
data_format=None, dilation_rate=1):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel.shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
kernel = C.swapaxes(kernel, 0, 2)
padding = _preprocess_border_mode(padding)
strides = [strides]
x = C.convolution(
kernel,
x,
strides=tuple(strides),
auto_padding=[
False,
padding])
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
return x
def conv2d(x, kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding])
else:
assert dilation_rate[0] == dilation_rate[1]
assert strides == (1, 1), 'Invalid strides for dilated convolution'
x = C.convolution(
kernel,
x,
strides=dilation_rate[0],
auto_padding=[
False,
padding,
padding])
return _postprocess_conv2d_output(x, data_format)
def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1,
padding='valid', data_format=None, dilation_rate=1):
raise NotImplementedError
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
padding='valid', data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
return _postprocess_conv2d_output(x, data_format)
def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding],
groups=x.shape[0])
return _postprocess_conv2d_output(x, data_format)
def conv3d(x, kernel, strides=(1, 1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = strides + (strides[0],)
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding])
return _postprocess_conv3d_output(x, data_format)
def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[3]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
shape[3] = output_shape[2]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv3d_output(x, data_format)
def pool2d(x, pool_size, strides=(1, 1),
padding='valid', data_format=None,
pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
strides = strides
pool_size = pool_size
x = _preprocess_conv2d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv2d_output(x, data_format)
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid',
data_format=None, pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
x = _preprocess_conv3d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv3d_output(x, data_format)
def relu(x, alpha=0., max_value=None):
if alpha != 0.:
negative_part = C.relu(-x)
x = C.relu(x)
if max_value is not None:
x = C.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
def dropout(x, level, noise_shape=None, seed=None):
if level < 0. or level >= 1:
raise ValueError('CNTK Backend: Invalid dropout level %s, '
'must be in interval [0, 1].' % level)
return C.dropout(x, level)
def batch_flatten(x):
# cntk's batch axis is not in shape,
# so just flatten all the dim in x.shape
dim = np.prod(x.shape)
x = C.reshape(x, (-1,))
x._keras_shape = (None, dim)
return x
def softmax(x, axis=-1):
return C.softmax(x, axis=axis)
def softplus(x):
return C.softplus(x)
def softsign(x):
return x / (1 + C.abs(x))
def categorical_crossentropy(target, output, from_logits=False):
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with epsilon clipping
output = C.clip(output, epsilon(), 1.0 - epsilon())
return -sum(target * C.log(output), axis=-1)
def sparse_categorical_crossentropy(target, output, from_logits=False):
target = C.one_hot(target, output.shape[-1])
target = C.reshape(target, output.shape)
return categorical_crossentropy(target, output, from_logits)
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
self.placeholders = inputs
self.trainer = None
self.unrelated_updates = None
self.updates = updates
if len(updates) > 0:
assert len(outputs) > 0
self.loss = outputs[0]
# need group update by gradient place holder
u_ops = []
unrelated_updates = []
for update in updates:
if isinstance(update, tuple):
if len(update) != 2:
raise NotImplementedError
else:
u = C.assign(update[0], update[1])
else:
u = update
if len(u.arguments) == 0:
u_ops.append(u)
else:
unrelated_updates.append(u)
update_func = C.combine([u.output for u in u_ops])
grads = update_func.find_all_with_name('keras_grad_placeholder')
u_list = []
p_list = []
for g in grads:
if g in grad_parameter_dict:
p_list.append(grad_parameter_dict[g])
u_list.append(g)
else:
raise ValueError(
'CNTK backend: when constructing trainer, '
'found gradient node `%s` which is not '
'related to any parameters in the model. '
'Please double check how the gradient node '
'is constructed.' % g)
if len(u_list) > 0:
learner = C.cntk_py.universal_learner(p_list, u_list, update_func)
criterion = (
outputs[0],
outputs[1]) if len(outputs) > 1 else (
outputs[0],
)
self.trainer = C.trainer.Trainer(
outputs[0], criterion, [learner])
self.trainer_output = tuple([f.output for f in criterion])
elif len(u_ops) > 0:
unrelated_updates.extend(u_ops)
if len(unrelated_updates) > 0:
self.unrelated_updates = C.combine([_.output for _ in unrelated_updates])
if self.trainer is None:
self.metrics_outputs = [f.output for f in outputs]
self.metrics_func = C.combine(self.metrics_outputs)
# cntk only could handle loss and 1 metric in trainer, for metrics more
# than 2, need manual eval
elif len(outputs) > 2:
self.metrics_outputs = [f.output for f in outputs[2:]]
self.metrics_func = C.combine(self.metrics_outputs)
else:
self.metrics_func = None
@staticmethod
def _is_input_shape_compatible(input, placeholder):
if hasattr(input, 'shape') and hasattr(placeholder, 'shape'):
num_dynamic = get_num_dynamic_axis(placeholder)
input_shape = input.shape[num_dynamic:]
placeholder_shape = placeholder.shape
for i, p in zip(input_shape, placeholder_shape):
if i != p and p != C.InferredDimension and p != C.FreeDimension:
return False
return True
def __call__(self, inputs):
global _LEARNING_PHASE_PLACEHOLDER
global _LEARNING_PHASE
assert isinstance(inputs, (list, tuple))
feed_dict = {}
for tensor, value in zip(self.placeholders, inputs):
# cntk only support calculate on float, do auto cast here
if (hasattr(value, 'dtype') and
value.dtype != np.float32 and
value.dtype != np.float64):
value = value.astype(np.float32)
if tensor == _LEARNING_PHASE_PLACEHOLDER:
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value)
else:
# in current version cntk can't support input with variable
# length. Will support it in next release.
if not self._is_input_shape_compatible(value, tensor):
raise ValueError('CNTK backend: The placeholder has been resolved '
'to shape `%s`, but input shape is `%s`. Currently '
'CNTK can not take variable length inputs. Please '
'pass inputs that have a static shape.'
% (str(tensor.shape), str(value.shape)))
feed_dict[tensor] = value
updated = []
if self.trainer is not None:
input_dict = {}
for argument in self.loss.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: argument %s is not found in inputs. '
'Please double check the model and inputs in '
'`train_function`.' % argument.name)
result = self.trainer.train_minibatch(
input_dict, self.trainer_output)
assert(len(result) == 2)
outputs = result[1]
for o in self.trainer_output:
updated.append(outputs[o])
if self.metrics_func is not None:
input_dict = {}
for argument in self.metrics_func.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError('CNTK backend: metrics argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
# Some ops (like dropout) won't be applied during "eval" in cntk.
# They only evaluated in training phase. To make it work, call
# "forward" method to let cntk know we want to evaluate them.from
# But the assign ops won't be executed under this mode, that's why
# we need this check.
if (self.unrelated_updates is None and
(_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)):
_, output_values = self.metrics_func.forward(
input_dict,
self.metrics_func.outputs,
(self.metrics_func.outputs[0],),
as_numpy=False)
else:
output_values = self.metrics_func.eval(input_dict, as_numpy=False)
if isinstance(output_values, dict):
for o in self.metrics_outputs:
value = output_values[o]
v = value.asarray()
updated.append(v)
else:
v = output_values.asarray()
for o in self.metrics_outputs:
updated.append(v)
if self.unrelated_updates is not None:
input_dict = {}
for argument in self.unrelated_updates.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: assign ops argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
self.unrelated_updates.eval(input_dict, as_numpy=False)
return updated
def function(inputs, outputs, updates=[], **kwargs):
return Function(inputs, outputs, updates=updates, **kwargs)
def temporal_padding(x, padding=(1, 1)):
assert len(padding) == 2
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if num_dynamic_axis > 0:
assert len(base_shape) == 2
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[padding, (0, 0)])
else:
x = _padding(x, padding, 0)
else:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[(0, 0), padding, (0, 0)])
else:
x = _padding(x, padding, 1)
return x
def _padding(x, pattern, axis):
base_shape = x.shape
if b_any([dim < 0 for dim in base_shape]):
raise ValueError('CNTK Backend: padding input tensor with '
'shape `%s` contains non-specified dimension, '
'which is not supported. Please give fixed '
'dimension to enable padding.' % base_shape)
if pattern[0] > 0:
prefix_shape = list(base_shape)
prefix_shape[axis] = pattern[0]
prefix_shape = tuple(prefix_shape)
x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis)
base_shape = x.shape
if pattern[1] > 0:
postfix_shape = list(base_shape)
postfix_shape[axis] = pattern[1]
postfix_shape = tuple(postfix_shape)
x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis)
return x
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
return x
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
x = _padding(x, padding[2], 4)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
x = _padding(x, padding[2], 2)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
return x
def one_hot(indices, num_classes):
return C.one_hot(indices, num_classes)
def get_value(x):
if isinstance(
x,
C.variables.Parameter) or isinstance(
x,
C.variables.Constant):
return x.value
else:
return eval(x)
def batch_get_value(xs):
result = []
for x in xs:
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
result.append(x.value)
else:
result.append(eval(x))
return result
def set_value(x, value):
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
if isinstance(value, (float, int)):
value = np.full(x.shape, value, dtype=floatx())
x.value = value
else:
raise NotImplementedError
def print_tensor(x, message=''):
return C.user_function(
LambdaFunc(x,
when=lambda x: True,
execute=lambda x: print(message)))
def batch_set_value(tuples):
for t in tuples:
x = t[0]
value = t[1]
if isinstance(value, np.ndarray) is False:
value = np.asarray(value)
if isinstance(x, C.variables.Parameter):
x.value = value
else:
raise NotImplementedError
def stop_gradient(variables):
if isinstance(variables, (list, tuple)):
return map(C.stop_gradient, variables)
else:
return C.stop_gradient(variables)
def switch(condition, then_expression, else_expression):
ndim_cond = ndim(condition)
ndim_expr = ndim(then_expression)
if ndim_cond > ndim_expr:
raise ValueError('Rank of condition should be less'
' than or equal to rank of then and'
' else expressions. ndim(condition)=' +
str(ndim_cond) + ', ndim(then_expression)'
'=' + str(ndim_expr))
elif ndim_cond < ndim_expr:
shape_expr = int_shape(then_expression)
ndim_diff = ndim_expr - ndim_cond
for i in range(ndim_diff):
condition = expand_dims(condition)
condition = tile(condition, shape_expr[ndim_cond + i])
return C.element_select(condition,
then_expression,
else_expression)
def elu(x, alpha=1.):
res = C.elu(x)
if alpha == 1:
return res
else:
return C.element_select(C.greater(x, 0), res, alpha * res)
def in_top_k(predictions, targets, k):
_targets = C.one_hot(targets, predictions.shape[-1])
result = C.classification_error(predictions, _targets, topN=k)
return 1 - C.reshape(result, shape=())
def conv2d_transpose(x, kernel, output_shape, strides=(1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[2]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv2d_output(x, data_format)
def identity(x, name=None):
if name is None:
name = '%s_alias' % x.name
return C.alias(x, name=name)
def _preprocess_conv2d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = C.transpose(x, (2, 0, 1))
return x
def _preprocess_conv2d_kernel(kernel, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(rows, cols, input_depth, depth)`,
# independently of `data_format`.
# CNTK expects `(depth, input_depth, rows, cols)`.
kernel = C.transpose(kernel, (3, 2, 0, 1))
return kernel
def _preprocess_border_mode(padding):
if padding == 'same':
padding = True
elif padding == 'valid':
padding = False
else:
raise ValueError('Invalid border mode: ' + str(padding))
return padding
def _postprocess_conv2d_output(x, data_format):
if data_format == 'channels_last':
x = C.transpose(x, (1, 2, 0))
return x
def _preprocess_conv3d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3,
# input_depth)
x = C.transpose(x, (3, 0, 1, 2))
return x
def _preprocess_conv3d_kernel(kernel, dim_ordering):
kernel = C.transpose(kernel, (4, 3, 0, 1, 2))
return kernel
def _postprocess_conv3d_output(x, dim_ordering):
if dim_ordering == 'channels_last':
x = C.transpose(x, (1, 2, 3, 0))
return x
def _get_dynamic_axis_num(x):
if hasattr(x, 'dynamic_axes'):
return len(x.dynamic_axes)
else:
return 0
def _contain_seqence_axis(x):
if _get_dynamic_axis_num(x) > 1:
return x.dynamic_axes[1] == C.Axis.default_dynamic_axis()
else:
return False
def get_num_dynamic_axis(x):
return _get_dynamic_axis_num(x)
def _reduce_on_axis(x, axis, reduce_fun_name):
if isinstance(axis, list):
for a in axis:
if isinstance(a, C.Axis) \
and a != C.Axis.default_batch_axis() \
and hasattr(C.sequence, reduce_fun_name):
x = getattr(C.sequence, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, axis)
return x
def _reshape_sequence(x, time_step):
tmp_shape = list(int_shape(x))
tmp_shape[1] = time_step
return reshape(x, tmp_shape)
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length, feature_dim, filters = kernel_shape
xs = []
for i in range(output_length):
slice_length = slice(i * stride,
i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to output_filters first, to apply broadcast
weight = permute_dimensions(kernel, (2, 0, 1))
# Shape: (batch, filters, output_length, input_length * kernel_size)
output = x_aggregate * weight
# Shape: (batch, filters, output_length)
output = sum(output, axis=3)
# Shape: (batch, output_length, filters)
return permute_dimensions(output, (0, 2, 1))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
_, feature_dim, filters = kernel_shape
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + kernel_size[1])
if data_format == 'channels_first':
xs.append(reshape(inputs[:, :, slice_row, slice_col],
(-1, 1, feature_dim)))
else:
xs.append(reshape(inputs[:, slice_row, slice_col, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to put filters first
weight = permute_dimensions(kernel, (2, 0, 1))
# shape: batch, filters, output_length, input_length * kernel_size
output = x_aggregate * weight
# shape: batch, filters, output_length
output = sum(output, axis=3)
# shape: batch, filters, row, col
output = reshape(output,
(-1, filters, output_row, output_col))
if data_format == 'channels_last':
# shape: batch, row, col, filters
output = permute_dimensions(output, (0, 2, 3, 1))
return output
def reverse(x, axes):
if isinstance(axes, int):
axes = [axes]
cntk_axes = _normalize_axis(axes, x)
begin_index = [0 for _ in cntk_axes]
end_index = [0 for _ in cntk_axes]
strides = [-1 for _ in cntk_axes]
return C.slice(x, cntk_axes, begin_index, end_index, strides)
def _reshape_batch(x, shape):
# there is a bug in cntk 2.1's unpack_batch implementation
if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2:
const_a = C.unpack_batch(x)
const_a = C.reshape(const_a, shape)
return C.to_batch(const_a)
else:
return C.user_function(ReshapeBatch(x, shape[1:]))
def _get_cntk_version():
version = C.__version__
if version.endswith('+'):
version = version[:-1]
# for hot fix, ignore all the . except the first one.
if len(version) > 2 and version[1] == '.':
version = version[:2] + version[2:].replace('.', '')
try:
return float(version)
except:
warnings.warn(
'CNTK backend warning: CNTK version not detected. '
'Will using CNTK 2.0 GA as default.')
return float(2.0)
class ReshapeBatch(C.ops.functions.UserFunction):
def __init__(self, input, shape, name='reshape_with_batch'):
super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name)
self.from_shape = input.shape
self.target_shape = shape
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape))
num_static_element = np.prod(np.asarray(self.target_shape))
num_batch = int(num_element / num_static_element)
result = arguments.data().as_shape((num_batch,) + self.target_shape)
return None, C.cntk_py.Value(result)
def backward(self, state, root_gradients):
grad_array_view = root_gradients.data()
num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape))
num_static_element = np.prod(np.asarray(self.from_shape))
num_old_batch = int(num_element / num_static_element)
return C.cntk_py.Value(
grad_array_view.as_shape(
(num_old_batch,) + self.from_shape))
class ConvertToBatch(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK batch axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk variable (parameter/constant)
name: name of this node
"""
def __init__(self, input, name='convert_to_batch'):
super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name)
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.inputs[0].shape[1:],
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class ConvertToStatic(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK static axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk tensor which has batch axis
batch_size: size of batch axis.
name: name of this node.
"""
def __init__(self, input, batch_size, name='convert_to_static'):
super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name)
self.target_shape = (batch_size,) + input.shape
def infer_outputs(self):
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class LambdaFunc(C.ops.functions.UserFunction):
def __init__(self,
arg,
when=lambda arg: True,
execute=lambda arg: print(arg),
name=''):
self.when = when
self.execute = execute
super(LambdaFunc, self).__init__([arg], name=name)
def infer_outputs(self):
return [
C.output_variable(
self.inputs[0].shape,
self.inputs[0].dtype,
self.inputs[0].dynamic_axes)]
def forward(self, argument, device=None, outputs_to_retain=None):
if self.when(argument):
self.execute(argument)
return None, argument
def backward(self, state, root_gradients):
return root_gradients
|
[
"cntk.cntk_py.Value",
"cntk.constant",
"cntk.element_select",
"numpy.random.binomial",
"cntk.assign",
"cntk.swapaxes",
"cntk.relu",
"cntk.pooling",
"cntk.initializer.normal",
"cntk.log",
"numpy.random.seed",
"warnings.warn",
"cntk.less_equal",
"cntk.sin",
"cntk.one_hot",
"cntk.sequence.broadcast_as",
"cntk.times",
"cntk.minus",
"cntk.round",
"cntk.ops.argmin",
"numpy.random.randint",
"cntk.dropout",
"cntk.splice",
"cntk.classification_error",
"cntk.ops.slice",
"cntk.placeholder",
"cntk.sqrt",
"cntk.Axis.all_axes",
"cntk.unpack_batch",
"numpy.ones",
"cntk.softmax",
"cntk.abs",
"cntk.transpose",
"cntk.alias",
"cntk.convolution_transpose",
"cntk.sequence.last",
"cntk.sequence.past_value",
"cntk.equal",
"cntk.elu",
"cntk.to_sequence",
"cntk.ops.one_hot",
"cntk.less",
"cntk.softplus",
"cntk.plus",
"numpy.prod",
"cntk.set_global_option",
"cntk.Axis.default_dynamic_axis",
"cntk.greater",
"cntk.cntk_py.universal_learner",
"cntk.output_variable",
"cntk.cross_entropy_with_softmax",
"cntk.to_sequence_like",
"cntk.reshape",
"cntk.clip",
"cntk.element_min",
"cntk.ops.element_select",
"cntk.stop_gradient",
"cntk.tanh",
"cntk.combine",
"cntk.pow",
"cntk.slice",
"cntk.cos",
"cntk.to_batch",
"cntk.exp",
"cntk.square",
"cntk.element_max",
"cntk.sequence.unpack",
"cntk.ops.gather",
"cntk.sigmoid",
"cntk.ops.argmax",
"numpy.zeros",
"cntk.device.use_default_device",
"cntk.default_options",
"cntk.greater_equal",
"cntk.convolution",
"numpy.asarray",
"numpy.eye",
"cntk.initializer.uniform",
"cntk.initializer.truncated_normal",
"cntk.reduce_mean",
"cntk.Axis.default_batch_axis",
"cntk.trainer.Trainer",
"cntk.not_equal",
"cntk.reduce_sum",
"cntk.pad",
"collections.defaultdict"
] |
[((313, 349), 'cntk.set_global_option', 'C.set_global_option', (['"""align_axis"""', '(1)'], {}), "('align_axis', 1)\n", (332, 349), True, 'import cntk as C\n'), ((371, 400), 'cntk.device.use_default_device', 'C.device.use_default_device', ([], {}), '()\n', (398, 400), True, 'import cntk as C\n'), ((854, 933), 'cntk.constant', 'C.constant', ([], {'shape': '()', 'dtype': 'np.float32', 'value': '(1.0)', 'name': '"""_keras_learning_phase"""'}), "(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')\n", (864, 933), True, 'import cntk as C\n'), ((1069, 1085), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1080, 1085), False, 'from collections import defaultdict\n'), ((425, 583), 'warnings.warn', 'warnings.warn', (['"""CNTK backend warning: GPU is not detected. CNTK\'s CPU version is not fully optimized,please run with GPU to get better performance."""'], {}), '(\n "CNTK backend warning: GPU is not detected. CNTK\'s CPU version is not fully optimized,please run with GPU to get better performance."\n )\n', (438, 583), False, 'import warnings\n'), ((2269, 2284), 'numpy.asarray', 'np.asarray', (['(1.0)'], {}), '(1.0)\n', (2279, 2284), True, 'import numpy as np\n'), ((17501, 17520), 'cntk.swapaxes', 'C.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (17511, 17520), True, 'import cntk as C\n'), ((19618, 19633), 'cntk.square', 'C.square', (['(x - m)'], {}), '(x - m)\n', (19626, 19633), True, 'import cntk as C\n'), ((20103, 20126), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (20112, 20126), True, 'import cntk as C\n'), ((20778, 20801), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (20787, 20801), True, 'import cntk as C\n'), ((25068, 25097), 'cntk.ops.argmax', 'C.ops.argmax', (['x'], {'axis': 'axis[0]'}), '(x, axis=axis[0])\n', (25080, 25097), True, 'import cntk as C\n'), ((25235, 25264), 'cntk.ops.argmin', 'C.ops.argmin', (['x'], {'axis': 'axis[0]'}), '(x, axis=axis[0])\n', (25247, 25264), True, 'import cntk as C\n'), ((25337, 25348), 'cntk.square', 'C.square', (['x'], {}), '(x)\n', (25345, 25348), True, 'import cntk as C\n'), ((25374, 25382), 'cntk.abs', 'C.abs', (['x'], {}), '(x)\n', (25379, 25382), True, 'import cntk as C\n'), ((25409, 25418), 'cntk.sqrt', 'C.sqrt', (['x'], {}), '(x)\n', (25415, 25418), True, 'import cntk as C\n'), ((25444, 25452), 'cntk.exp', 'C.exp', (['x'], {}), '(x)\n', (25449, 25452), True, 'import cntk as C\n'), ((25478, 25486), 'cntk.log', 'C.log', (['x'], {}), '(x)\n', (25483, 25486), True, 'import cntk as C\n'), ((25514, 25524), 'cntk.round', 'C.round', (['x'], {}), '(x)\n', (25521, 25524), True, 'import cntk as C\n'), ((25554, 25566), 'cntk.sigmoid', 'C.sigmoid', (['x'], {}), '(x)\n', (25563, 25566), True, 'import cntk as C\n'), ((25634, 25645), 'cntk.pow', 'C.pow', (['x', 'a'], {}), '(x, a)\n', (25639, 25645), True, 'import cntk as C\n'), ((25887, 25918), 'cntk.clip', 'C.clip', (['x', 'min_value', 'max_value'], {}), '(x, min_value, max_value)\n', (25893, 25918), True, 'import cntk as C\n'), ((26274, 26292), 'cntk.assign', 'C.assign', (['x', 'new_x'], {}), '(x, new_x)\n', (26282, 26292), True, 'import cntk as C\n'), ((26360, 26426), 'cntk.assign', 'C.assign', (['variable', '(variable * momentum + value * (1.0 - momentum))'], {}), '(variable, variable * momentum + value * (1.0 - momentum))\n', (26368, 26426), True, 'import cntk as C\n'), ((26496, 26515), 'cntk.assign', 'C.assign', (['x', 'result'], {}), '(x, result)\n', (26504, 26515), True, 'import cntk as C\n'), ((27076, 27089), 'cntk.equal', 'C.equal', (['x', 'y'], {}), '(x, y)\n', (27083, 27089), True, 'import cntk as C\n'), ((27124, 27141), 'cntk.not_equal', 'C.not_equal', (['x', 'y'], {}), '(x, y)\n', (27135, 27141), True, 'import cntk as C\n'), ((27174, 27189), 'cntk.greater', 'C.greater', (['x', 'y'], {}), '(x, y)\n', (27183, 27189), True, 'import cntk as C\n'), ((27228, 27249), 'cntk.greater_equal', 'C.greater_equal', (['x', 'y'], {}), '(x, y)\n', (27243, 27249), True, 'import cntk as C\n'), ((27279, 27291), 'cntk.less', 'C.less', (['x', 'y'], {}), '(x, y)\n', (27285, 27291), True, 'import cntk as C\n'), ((27327, 27345), 'cntk.less_equal', 'C.less_equal', (['x', 'y'], {}), '(x, y)\n', (27339, 27345), True, 'import cntk as C\n'), ((27378, 27397), 'cntk.element_max', 'C.element_max', (['x', 'y'], {}), '(x, y)\n', (27391, 27397), True, 'import cntk as C\n'), ((27430, 27449), 'cntk.element_min', 'C.element_min', (['x', 'y'], {}), '(x, y)\n', (27443, 27449), True, 'import cntk as C\n'), ((27475, 27483), 'cntk.sin', 'C.sin', (['x'], {}), '(x)\n', (27480, 27483), True, 'import cntk as C\n'), ((27509, 27517), 'cntk.cos', 'C.cos', (['x'], {}), '(x)\n', (27514, 27517), True, 'import cntk as C\n'), ((29326, 29348), 'cntk.stop_gradient', 'C.stop_gradient', (['shift'], {}), '(shift)\n', (29341, 29348), True, 'import cntk as C\n'), ((29368, 29385), 'cntk.minus', 'C.minus', (['x', 'shift'], {}), '(x, shift)\n', (29375, 29385), True, 'import cntk as C\n'), ((29681, 29708), 'cntk.plus', 'C.plus', (['shifted_mean', 'shift'], {}), '(shifted_mean, shift)\n', (29687, 29708), True, 'import cntk as C\n'), ((30817, 30849), 'cntk.splice', 'C.splice', (['*tensors'], {'axis': 'axis[0]'}), '(*tensors, axis=axis[0])\n', (30825, 30849), True, 'import cntk as C\n'), ((33044, 33064), 'cntk.transpose', 'C.transpose', (['x', 'axis'], {}), '(x, axis)\n', (33055, 33064), True, 'import cntk as C\n'), ((34544, 34572), 'cntk.splice', 'C.splice', (['*slices'], {'axis': 'axis'}), '(*slices, axis=axis)\n', (34552, 34572), True, 'import cntk as C\n'), ((35137, 35160), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (35146, 35160), True, 'import cntk as C\n'), ((35191, 35218), 'cntk.splice', 'C.splice', (['*temp'], {'axis': 'index'}), '(*temp, axis=index)\n', (35199, 35218), True, 'import cntk as C\n'), ((35245, 35254), 'cntk.tanh', 'C.tanh', (['x'], {}), '(x)\n', (35251, 35254), True, 'import cntk as C\n'), ((44060, 44079), 'cntk.clip', 'C.clip', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (44066, 44079), True, 'import cntk as C\n'), ((50105, 50191), 'cntk.convolution', 'C.convolution', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding, padding]'}), '(kernel, x, strides, auto_padding=[False, padding, padding,\n padding])\n', (50118, 50191), True, 'import cntk as C\n'), ((51244, 51367), 'cntk.convolution_transpose', 'C.convolution_transpose', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding, padding]', 'output_shape': 'output_shape'}), '(kernel, x, strides, auto_padding=[False, padding,\n padding, padding], output_shape=output_shape)\n', (51267, 51367), True, 'import cntk as C\n'), ((53443, 53452), 'cntk.relu', 'C.relu', (['x'], {}), '(x)\n', (53449, 53452), True, 'import cntk as C\n'), ((53819, 53838), 'cntk.dropout', 'C.dropout', (['x', 'level'], {}), '(x, level)\n', (53828, 53838), True, 'import cntk as C\n'), ((53959, 53975), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (53966, 53975), True, 'import numpy as np\n'), ((53984, 54003), 'cntk.reshape', 'C.reshape', (['x', '(-1,)'], {}), '(x, (-1,))\n', (53993, 54003), True, 'import cntk as C\n'), ((54088, 54111), 'cntk.softmax', 'C.softmax', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (54097, 54111), True, 'import cntk as C\n'), ((54142, 54155), 'cntk.softplus', 'C.softplus', (['x'], {}), '(x)\n', (54152, 54155), True, 'import cntk as C\n'), ((54854, 54889), 'cntk.one_hot', 'C.one_hot', (['target', 'output.shape[-1]'], {}), '(target, output.shape[-1])\n', (54863, 54889), True, 'import cntk as C\n'), ((54903, 54934), 'cntk.reshape', 'C.reshape', (['target', 'output.shape'], {}), '(target, output.shape)\n', (54912, 54934), True, 'import cntk as C\n'), ((67909, 67940), 'cntk.one_hot', 'C.one_hot', (['indices', 'num_classes'], {}), '(indices, num_classes)\n', (67918, 67940), True, 'import cntk as C\n'), ((70112, 70173), 'cntk.element_select', 'C.element_select', (['condition', 'then_expression', 'else_expression'], {}), '(condition, then_expression, else_expression)\n', (70128, 70173), True, 'import cntk as C\n'), ((70264, 70272), 'cntk.elu', 'C.elu', (['x'], {}), '(x)\n', (70269, 70272), True, 'import cntk as C\n'), ((70444, 70485), 'cntk.one_hot', 'C.one_hot', (['targets', 'predictions.shape[-1]'], {}), '(targets, predictions.shape[-1])\n', (70453, 70485), True, 'import cntk as C\n'), ((70499, 70552), 'cntk.classification_error', 'C.classification_error', (['predictions', '_targets'], {'topN': 'k'}), '(predictions, _targets, topN=k)\n', (70521, 70552), True, 'import cntk as C\n'), ((71478, 71592), 'cntk.convolution_transpose', 'C.convolution_transpose', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding]', 'output_shape': 'output_shape'}), '(kernel, x, strides, auto_padding=[False, padding,\n padding], output_shape=output_shape)\n', (71501, 71592), True, 'import cntk as C\n'), ((71818, 71839), 'cntk.alias', 'C.alias', (['x'], {'name': 'name'}), '(x, name=name)\n', (71825, 71839), True, 'import cntk as C\n'), ((72461, 72494), 'cntk.transpose', 'C.transpose', (['kernel', '(3, 2, 0, 1)'], {}), '(kernel, (3, 2, 0, 1))\n', (72472, 72494), True, 'import cntk as C\n'), ((73365, 73401), 'cntk.transpose', 'C.transpose', (['kernel', '(4, 3, 0, 1, 2)'], {}), '(kernel, (4, 3, 0, 1, 2))\n', (73376, 73401), True, 'import cntk as C\n'), ((77621, 77675), 'cntk.slice', 'C.slice', (['x', 'cntk_axes', 'begin_index', 'end_index', 'strides'], {}), '(x, cntk_axes, begin_index, end_index, strides)\n', (77628, 77675), True, 'import cntk as C\n'), ((10726, 10740), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (10733, 10740), True, 'import numpy as np\n'), ((11156, 11186), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (11173, 11186), True, 'import numpy as np\n'), ((11188, 11208), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11202, 11208), True, 'import numpy as np\n'), ((12455, 12481), 'numpy.random.randint', 'np.random.randint', (['(10000.0)'], {}), '(10000.0)\n', (12472, 12481), True, 'import numpy as np\n'), ((13139, 13169), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (13156, 13169), True, 'import numpy as np\n'), ((14113, 14145), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (14130, 14145), True, 'import numpy as np\n'), ((14939, 14951), 'numpy.eye', 'np.eye', (['size'], {}), '(size)\n', (14945, 14951), True, 'import numpy as np\n'), ((15954, 15967), 'cntk.times', 'C.times', (['x', 'y'], {}), '(x, y)\n', (15961, 15967), True, 'import cntk as C\n'), ((17778, 17810), 'cntk.ops.gather', 'C.ops.gather', (['reference', 'indices'], {}), '(reference, indices)\n', (17790, 17810), True, 'import cntk as C\n'), ((17887, 17922), 'cntk.ops.one_hot', 'C.ops.one_hot', (['indices', 'num_classes'], {}), '(indices, num_classes)\n', (17900, 17922), True, 'import cntk as C\n'), ((23765, 23784), 'cntk.reshape', 'C.reshape', (['x', 'shape'], {}), '(x, shape)\n', (23774, 23784), True, 'import cntk as C\n'), ((24272, 24296), 'cntk.reduce_sum', 'C.reduce_sum', (['any_matrix'], {}), '(any_matrix)\n', (24284, 24296), True, 'import cntk as C\n'), ((24641, 24665), 'cntk.reduce_sum', 'C.reduce_sum', (['all_matrix'], {}), '(all_matrix)\n', (24653, 24665), True, 'import cntk as C\n'), ((25597, 25605), 'cntk.abs', 'C.abs', (['x'], {}), '(x)\n', (25602, 25605), True, 'import cntk as C\n'), ((26018, 26035), 'cntk.sigmoid', 'C.sigmoid', (['output'], {}), '(output)\n', (26027, 26035), True, 'import cntk as C\n'), ((26910, 26969), 'cntk.constant', 'C.constant', (['(0)'], {'shape': 'v.shape', 'name': '"""keras_grad_placeholder"""'}), "(0, shape=v.shape, name='keras_grad_placeholder')\n", (26920, 26969), True, 'import cntk as C\n'), ((28608, 28637), 'cntk.reshape', 'C.reshape', (['mean', 'target_shape'], {}), '(mean, target_shape)\n', (28617, 28637), True, 'import cntk as C\n'), ((28662, 28694), 'cntk.reshape', 'C.reshape', (['variant', 'target_shape'], {}), '(variant, target_shape)\n', (28671, 28694), True, 'import cntk as C\n'), ((28721, 28751), 'cntk.reshape', 'C.reshape', (['gamma', 'target_shape'], {}), '(gamma, target_shape)\n', (28730, 28751), True, 'import cntk as C\n'), ((28777, 28806), 'cntk.reshape', 'C.reshape', (['beta', 'target_shape'], {}), '(beta, target_shape)\n', (28786, 28806), True, 'import cntk as C\n'), ((29432, 29470), 'cntk.reduce_mean', 'C.reduce_mean', (['shifted_mean'], {'axis': 'axis'}), '(shifted_mean, axis=axis)\n', (29445, 29470), True, 'import cntk as C\n'), ((29501, 29518), 'cntk.minus', 'C.minus', (['x', 'shift'], {}), '(x, shift)\n', (29508, 29518), True, 'import cntk as C\n'), ((29567, 29606), 'cntk.reduce_mean', 'C.reduce_mean', (['variance_mean'], {'axis': 'axis'}), '(variance_mean, axis=axis)\n', (29580, 29606), True, 'import cntk as C\n'), ((29646, 29668), 'cntk.square', 'C.square', (['shifted_mean'], {}), '(shifted_mean)\n', (29654, 29668), True, 'import cntk as C\n'), ((31069, 31088), 'cntk.reshape', 'C.reshape', (['x', 'shape'], {}), '(x, shape)\n', (31078, 31088), True, 'import cntk as C\n'), ((34427, 34457), 'cntk.ops.slice', 'C.ops.slice', (['x', 'axis', 'i', '(i + 1)'], {}), '(x, axis, i, i + 1)\n', (34438, 34457), True, 'import cntk as C\n'), ((38958, 39010), 'cntk.splice', 'C.splice', (['final_output', 'output_slice'], {'axis': 'time_axis'}), '(final_output, output_slice, axis=time_axis)\n', (38966, 39010), True, 'import cntk as C\n'), ((40814, 40839), 'cntk.to_sequence', 'C.to_sequence', (['rnn_inputs'], {}), '(rnn_inputs)\n', (40827, 40839), True, 'import cntk as C\n'), ((41733, 41769), 'cntk.to_sequence_like', 'C.to_sequence_like', (['mask', 'rnn_inputs'], {}), '(mask, rnn_inputs)\n', (41751, 41769), True, 'import cntk as C\n'), ((41809, 41841), 'cntk.default_options', 'C.default_options', ([], {'axis_offset': '(1)'}), '(axis_offset=1)\n', (41826, 41841), True, 'import cntk as C\n'), ((42888, 42917), 'cntk.sequence.last', 'C.sequence.last', (['final_output'], {}), '(final_output)\n', (42903, 42917), True, 'import cntk as C\n'), ((43028, 43083), 'cntk.sequence.unpack', 'C.sequence.unpack', (['final_output', '(0)'], {'no_mask_output': '(True)'}), '(final_output, 0, no_mask_output=True)\n', (43045, 43083), True, 'import cntk as C\n'), ((44644, 44663), 'cntk.swapaxes', 'C.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (44654, 44663), True, 'import cntk as C\n'), ((44681, 44705), 'cntk.swapaxes', 'C.swapaxes', (['kernel', '(0)', '(2)'], {}), '(kernel, 0, 2)\n', (44691, 44705), True, 'import cntk as C\n'), ((44976, 44995), 'cntk.swapaxes', 'C.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (44986, 44995), True, 'import cntk as C\n'), ((45553, 45626), 'cntk.convolution', 'C.convolution', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding]'}), '(kernel, x, strides, auto_padding=[False, padding, padding])\n', (45566, 45626), True, 'import cntk as C\n'), ((45875, 45969), 'cntk.convolution', 'C.convolution', (['kernel', 'x'], {'strides': 'dilation_rate[0]', 'auto_padding': '[False, padding, padding]'}), '(kernel, x, strides=dilation_rate[0], auto_padding=[False,\n padding, padding])\n', (45888, 45969), True, 'import cntk as C\n'), ((46816, 46859), 'cntk.transpose', 'C.transpose', (['depthwise_kernel', '(1, 0, 2, 3)'], {}), '(depthwise_kernel, (1, 0, 2, 3))\n', (46827, 46859), True, 'import cntk as C\n'), ((47137, 47251), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'strides', 'auto_padding': '[False, padding, padding]', 'groups': 'x.shape[0]'}), '(depthwise_kernel, x, strides=strides, auto_padding=[False,\n padding, padding], groups=x.shape[0])\n', (47150, 47251), True, 'import cntk as C\n'), ((47338, 47413), 'cntk.convolution', 'C.convolution', (['pointwise_kernel', 'x'], {'strides': '(1, 1, 1)', 'auto_padding': '[False]'}), '(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False])\n', (47351, 47413), True, 'import cntk as C\n'), ((47760, 47865), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'dilation_rate[0]', 'auto_padding': '[False, padding, padding]'}), '(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[\n False, padding, padding])\n', (47773, 47865), True, 'import cntk as C\n'), ((47925, 48000), 'cntk.convolution', 'C.convolution', (['pointwise_kernel', 'x'], {'strides': '(1, 1, 1)', 'auto_padding': '[False]'}), '(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False])\n', (47938, 48000), True, 'import cntk as C\n'), ((48610, 48653), 'cntk.transpose', 'C.transpose', (['depthwise_kernel', '(1, 0, 2, 3)'], {}), '(depthwise_kernel, (1, 0, 2, 3))\n', (48621, 48653), True, 'import cntk as C\n'), ((48850, 48964), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'strides', 'auto_padding': '[False, padding, padding]', 'groups': 'x.shape[0]'}), '(depthwise_kernel, x, strides=strides, auto_padding=[False,\n padding, padding], groups=x.shape[0])\n', (48863, 48964), True, 'import cntk as C\n'), ((49333, 49457), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'dilation_rate[0]', 'auto_padding': '[False, padding, padding]', 'groups': 'x.shape[0]'}), '(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[\n False, padding, padding], groups=x.shape[0])\n', (49346, 49457), True, 'import cntk as C\n'), ((52011, 52082), 'cntk.pooling', 'C.pooling', (['x', 'C.MAX_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding])\n', (52020, 52082), True, 'import cntk as C\n'), ((52899, 52970), 'cntk.pooling', 'C.pooling', (['x', 'C.MAX_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding])\n', (52908, 52970), True, 'import cntk as C\n'), ((53424, 53434), 'cntk.relu', 'C.relu', (['(-x)'], {}), '(-x)\n', (53430, 53434), True, 'import cntk as C\n'), ((53495, 53520), 'cntk.clip', 'C.clip', (['x', '(0.0)', 'max_value'], {}), '(x, 0.0, max_value)\n', (53501, 53520), True, 'import cntk as C\n'), ((54309, 54353), 'cntk.cross_entropy_with_softmax', 'C.cross_entropy_with_softmax', (['output', 'target'], {}), '(output, target)\n', (54337, 54353), True, 'import cntk as C\n'), ((54443, 54464), 'cntk.reshape', 'C.reshape', (['result', '()'], {}), '(result, ())\n', (54452, 54464), True, 'import cntk as C\n'), ((54564, 54593), 'cntk.reduce_sum', 'C.reduce_sum', (['output'], {'axis': '(-1)'}), '(output, axis=-1)\n', (54576, 54593), True, 'import cntk as C\n'), ((69342, 69368), 'cntk.stop_gradient', 'C.stop_gradient', (['variables'], {}), '(variables)\n', (69357, 69368), True, 'import cntk as C\n'), ((70568, 70595), 'cntk.reshape', 'C.reshape', (['result'], {'shape': '()'}), '(result, shape=())\n', (70577, 70595), True, 'import cntk as C\n'), ((72154, 72179), 'cntk.transpose', 'C.transpose', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (72165, 72179), True, 'import cntk as C\n'), ((72850, 72875), 'cntk.transpose', 'C.transpose', (['x', '(1, 2, 0)'], {}), '(x, (1, 2, 0))\n', (72861, 72875), True, 'import cntk as C\n'), ((73255, 73283), 'cntk.transpose', 'C.transpose', (['x', '(3, 0, 1, 2)'], {}), '(x, (3, 0, 1, 2))\n', (73266, 73283), True, 'import cntk as C\n'), ((73523, 73551), 'cntk.transpose', 'C.transpose', (['x', '(1, 2, 3, 0)'], {}), '(x, (1, 2, 3, 0))\n', (73534, 73551), True, 'import cntk as C\n'), ((77855, 77872), 'cntk.unpack_batch', 'C.unpack_batch', (['x'], {}), '(x)\n', (77869, 77872), True, 'import cntk as C\n'), ((77891, 77916), 'cntk.reshape', 'C.reshape', (['const_a', 'shape'], {}), '(const_a, shape)\n', (77900, 77916), True, 'import cntk as C\n'), ((77932, 77951), 'cntk.to_batch', 'C.to_batch', (['const_a'], {}), '(const_a)\n', (77942, 77951), True, 'import cntk as C\n'), ((78836, 78863), 'cntk.Axis.default_batch_axis', 'C.Axis.default_batch_axis', ([], {}), '()\n', (78861, 78863), True, 'import cntk as C\n'), ((80338, 80365), 'cntk.Axis.default_batch_axis', 'C.Axis.default_batch_axis', ([], {}), '()\n', (80363, 80365), True, 'import cntk as C\n'), ((3166, 3200), 'cntk.element_select', 'C.element_select', (['training', 'x', 'alt'], {}), '(training, x, alt)\n', (3182, 3200), True, 'import cntk as C\n'), ((12703, 12742), 'cntk.initializer.uniform', 'C.initializer.uniform', (['scale'], {'seed': 'seed'}), '(scale, seed=seed)\n', (12724, 12742), True, 'import cntk as C\n'), ((13366, 13410), 'cntk.initializer.normal', 'C.initializer.normal', ([], {'scale': 'scale', 'seed': 'seed'}), '(scale=scale, seed=seed)\n', (13386, 13410), True, 'import cntk as C\n'), ((14289, 14338), 'cntk.initializer.truncated_normal', 'C.initializer.truncated_normal', (['stddev'], {'seed': 'seed'}), '(stddev, seed=seed)\n', (14319, 14338), True, 'import cntk as C\n'), ((14581, 14603), 'numpy.zeros', 'np.zeros', (['shape', 'ctype'], {}), '(shape, ctype)\n', (14589, 14603), True, 'import numpy as np\n'), ((14785, 14806), 'numpy.ones', 'np.ones', (['shape', 'ctype'], {}), '(shape, ctype)\n', (14792, 14806), True, 'import numpy as np\n'), ((15849, 15881), 'cntk.transpose', 'C.transpose', (['y'], {'perm': 'permutation'}), '(y, perm=permutation)\n', (15860, 15881), True, 'import cntk as C\n'), ((17108, 17131), 'cntk.swapaxes', 'C.swapaxes', (['x', 'i', '(i + 1)'], {}), '(x, i, i + 1)\n', (17118, 17131), True, 'import cntk as C\n'), ((17219, 17242), 'cntk.swapaxes', 'C.swapaxes', (['y', 'i', '(i - 1)'], {}), '(y, i, i - 1)\n', (17229, 17242), True, 'import cntk as C\n'), ((21328, 21369), 'cntk.splice', 'C.splice', (['*tmp'], {'axis': '(i - num_dynamic_axis)'}), '(*tmp, axis=i - num_dynamic_axis)\n', (21336, 21369), True, 'import cntk as C\n'), ((23066, 23083), 'cntk.Axis.all_axes', 'C.Axis.all_axes', ([], {}), '()\n', (23081, 23083), True, 'import cntk as C\n'), ((23395, 23460), 'cntk.reshape', 'C.reshape', (['result'], {'shape': '()', 'begin_axis': 'index', 'end_axis': '(index + 1)'}), '(result, shape=(), begin_axis=index, end_axis=index + 1)\n', (23404, 23460), True, 'import cntk as C\n'), ((24956, 24973), 'cntk.Axis.all_axes', 'C.Axis.all_axes', ([], {}), '()\n', (24971, 24973), True, 'import cntk as C\n'), ((26115, 26128), 'cntk.log', 'C.log', (['output'], {}), '(output)\n', (26120, 26128), True, 'import cntk as C\n'), ((26148, 26167), 'cntk.log', 'C.log', (['(1.0 - output)'], {}), '(1.0 - output)\n', (26153, 26167), True, 'import cntk as C\n'), ((29281, 29312), 'cntk.reduce_mean', 'C.reduce_mean', (['shift'], {'axis': 'axis'}), '(shift, axis=axis)\n', (29294, 29312), True, 'import cntk as C\n'), ((32296, 32319), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (32305, 32319), True, 'import cntk as C\n'), ((36429, 36469), 'cntk.ops.slice', 'C.ops.slice', (['inputs', 'time_axis', 'i', '(i + 1)'], {}), '(inputs, time_axis, i, i + 1)\n', (36440, 36469), True, 'import cntk as C\n'), ((37602, 37642), 'cntk.ops.slice', 'C.ops.slice', (['inputs', 'time_axis', 'i', '(i + 1)'], {}), '(inputs, time_axis, i, i + 1)\n', (37613, 37642), True, 'import cntk as C\n'), ((42941, 42959), 'cntk.sequence.last', 'C.sequence.last', (['s'], {}), '(s)\n', (42956, 42959), True, 'import cntk as C\n'), ((43957, 43968), 'cntk.square', 'C.square', (['x'], {}), '(x)\n', (43965, 43968), True, 'import cntk as C\n'), ((52185, 52256), 'cntk.pooling', 'C.pooling', (['x', 'C.AVG_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding])\n', (52194, 52256), True, 'import cntk as C\n'), ((53073, 53144), 'cntk.pooling', 'C.pooling', (['x', 'C.AVG_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding])\n', (53082, 53144), True, 'import cntk as C\n'), ((54195, 54203), 'cntk.abs', 'C.abs', (['x'], {}), '(x)\n', (54200, 54203), True, 'import cntk as C\n'), ((55923, 55959), 'cntk.combine', 'C.combine', (['[u.output for u in u_ops]'], {}), '([u.output for u in u_ops])\n', (55932, 55959), True, 'import cntk as C\n'), ((57442, 57473), 'cntk.combine', 'C.combine', (['self.metrics_outputs'], {}), '(self.metrics_outputs)\n', (57451, 57473), True, 'import cntk as C\n'), ((63010, 63045), 'cntk.pad', 'C.pad', (['x'], {'pattern': '[padding, (0, 0)]'}), '(x, pattern=[padding, (0, 0)])\n', (63015, 63045), True, 'import cntk as C\n'), ((63192, 63235), 'cntk.pad', 'C.pad', (['x'], {'pattern': '[(0, 0), padding, (0, 0)]'}), '(x, pattern=[(0, 0), padding, (0, 0)])\n', (63197, 63235), True, 'import cntk as C\n'), ((63857, 63896), 'cntk.constant', 'C.constant', ([], {'value': '(0)', 'shape': 'prefix_shape'}), '(value=0, shape=prefix_shape)\n', (63867, 63896), True, 'import cntk as C\n'), ((64115, 64155), 'cntk.constant', 'C.constant', ([], {'value': '(0)', 'shape': 'postfix_shape'}), '(value=0, shape=postfix_shape)\n', (64125, 64155), True, 'import cntk as C\n'), ((69046, 69063), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (69056, 69063), True, 'import numpy as np\n'), ((70353, 70368), 'cntk.greater', 'C.greater', (['x', '(0)'], {}), '(x, 0)\n', (70362, 70368), True, 'import cntk as C\n'), ((73799, 73828), 'cntk.Axis.default_dynamic_axis', 'C.Axis.default_dynamic_axis', ([], {}), '()\n', (73826, 73828), True, 'import cntk as C\n'), ((78362, 78472), 'warnings.warn', 'warnings.warn', (['"""CNTK backend warning: CNTK version not detected. Will using CNTK 2.0 GA as default."""'], {}), "(\n 'CNTK backend warning: CNTK version not detected. Will using CNTK 2.0 GA as default.'\n )\n", (78375, 78472), False, 'import warnings\n'), ((78893, 78965), 'cntk.output_variable', 'C.output_variable', (['self.target_shape', 'self.inputs[0].dtype', '[batch_axis]'], {}), '(self.target_shape, self.inputs[0].dtype, [batch_axis])\n', (78910, 78965), True, 'import cntk as C\n'), ((79207, 79236), 'numpy.asarray', 'np.asarray', (['self.target_shape'], {}), '(self.target_shape)\n', (79217, 79236), True, 'import numpy as np\n'), ((79394, 79417), 'cntk.cntk_py.Value', 'C.cntk_py.Value', (['result'], {}), '(result)\n', (79409, 79417), True, 'import cntk as C\n'), ((79640, 79667), 'numpy.asarray', 'np.asarray', (['self.from_shape'], {}), '(self.from_shape)\n', (79650, 79667), True, 'import numpy as np\n'), ((80395, 80474), 'cntk.output_variable', 'C.output_variable', (['self.inputs[0].shape[1:]', 'self.inputs[0].dtype', '[batch_axis]'], {}), '(self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])\n', (80412, 80474), True, 'import cntk as C\n'), ((81357, 81419), 'cntk.output_variable', 'C.output_variable', (['self.target_shape', 'self.inputs[0].dtype', '[]'], {}), '(self.target_shape, self.inputs[0].dtype, [])\n', (81374, 81419), True, 'import cntk as C\n'), ((82085, 82180), 'cntk.output_variable', 'C.output_variable', (['self.inputs[0].shape', 'self.inputs[0].dtype', 'self.inputs[0].dynamic_axes'], {}), '(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0\n ].dynamic_axes)\n', (82102, 82180), True, 'import cntk as C\n'), ((31418, 31563), 'warnings.warn', 'warnings.warn', (['"""Warning: CNTK backend does not support collapse of batch axis with inferred dimension. The reshape did not take place."""'], {}), "(\n 'Warning: CNTK backend does not support collapse of batch axis with inferred dimension. The reshape did not take place.'\n )\n", (31431, 31563), False, 'import warnings\n'), ((36834, 36872), 'cntk.ops.slice', 'C.ops.slice', (['mask', 'time_axis', 'i', '(i + 1)'], {}), '(mask, time_axis, i, i + 1)\n', (36845, 36872), True, 'import cntk as C\n'), ((37117, 37170), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'output', 'prev_output'], {}), '(mask_slice, output, prev_output)\n', (37137, 37170), True, 'import cntk as C\n'), ((37999, 38037), 'cntk.ops.slice', 'C.ops.slice', (['mask', 'time_axis', 'i', '(i + 1)'], {}), '(mask, time_axis, i, i + 1)\n', (38010, 38037), True, 'import cntk as C\n'), ((38274, 38327), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'output', 'prev_output'], {}), '(mask_slice, output, prev_output)\n', (38294, 38327), True, 'import cntk as C\n'), ((41945, 41987), 'cntk.placeholder', 'C.placeholder', ([], {'dynamic_axes': 'x.dynamic_axes'}), '(dynamic_axes=x.dynamic_axes)\n', (41958, 41987), True, 'import cntk as C\n'), ((56690, 56746), 'cntk.cntk_py.universal_learner', 'C.cntk_py.universal_learner', (['p_list', 'u_list', 'update_func'], {}), '(p_list, u_list, update_func)\n', (56717, 56746), True, 'import cntk as C\n'), ((56950, 57001), 'cntk.trainer.Trainer', 'C.trainer.Trainer', (['outputs[0]', 'criterion', '[learner]'], {}), '(outputs[0], criterion, [learner])\n', (56967, 57001), True, 'import cntk as C\n'), ((57264, 57312), 'cntk.combine', 'C.combine', (['[_.output for _ in unrelated_updates]'], {}), '([_.output for _ in unrelated_updates])\n', (57273, 57312), True, 'import cntk as C\n'), ((57719, 57750), 'cntk.combine', 'C.combine', (['self.metrics_outputs'], {}), '(self.metrics_outputs)\n', (57728, 57750), True, 'import cntk as C\n'), ((58902, 58919), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (58912, 58919), True, 'import numpy as np\n'), ((79141, 79168), 'numpy.asarray', 'np.asarray', (['self.from_shape'], {}), '(self.from_shape)\n', (79151, 79168), True, 'import numpy as np\n'), ((79572, 79601), 'numpy.asarray', 'np.asarray', (['self.target_shape'], {}), '(self.target_shape)\n', (79582, 79601), True, 'import numpy as np\n'), ((11650, 11680), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p', 'size'], {}), '(1, p, size)\n', (11668, 11680), True, 'import numpy as np\n'), ((28425, 28455), 'cntk.reduce_mean', 'C.reduce_mean', (['gamma', '(axis - 1)'], {}), '(gamma, axis - 1)\n', (28438, 28455), True, 'import cntk as C\n'), ((28483, 28512), 'cntk.reduce_mean', 'C.reduce_mean', (['beta', '(axis - 1)'], {}), '(beta, axis - 1)\n', (28496, 28512), True, 'import cntk as C\n'), ((30621, 30632), 'cntk.sqrt', 'C.sqrt', (['var'], {}), '(var)\n', (30627, 30632), True, 'import cntk as C\n'), ((40197, 40210), 'cntk.to_batch', 'C.to_batch', (['s'], {}), '(s)\n', (40207, 40210), True, 'import cntk as C\n'), ((42121, 42148), 'cntk.sequence.past_value', 'C.sequence.past_value', (['p', 's'], {}), '(p, s)\n', (42142, 42148), True, 'import cntk as C\n'), ((42479, 42504), 'cntk.element_select', 'C.element_select', (['m', 'n', 's'], {}), '(m, n, s)\n', (42495, 42504), True, 'import cntk as C\n'), ((43463, 43482), 'cntk.unpack_batch', 'C.unpack_batch', (['l_s'], {}), '(l_s)\n', (43477, 43482), True, 'import cntk as C\n'), ((54743, 54756), 'cntk.log', 'C.log', (['output'], {}), '(output)\n', (54748, 54756), True, 'import cntk as C\n'), ((74098, 74125), 'cntk.Axis.default_batch_axis', 'C.Axis.default_batch_axis', ([], {}), '()\n', (74123, 74125), True, 'import cntk as C\n'), ((37328, 37368), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'n_s', 's'], {}), '(mask_slice, n_s, s)\n', (37348, 37368), True, 'import cntk as C\n'), ((38485, 38525), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'n_s', 's'], {}), '(mask_slice, n_s, s)\n', (38505, 38525), True, 'import cntk as C\n'), ((41365, 41410), 'cntk.sequence.broadcast_as', 'C.sequence.broadcast_as', (['constant', 'rnn_inputs'], {}), '(constant, rnn_inputs)\n', (41388, 41410), True, 'import cntk as C\n'), ((55663, 55693), 'cntk.assign', 'C.assign', (['update[0]', 'update[1]'], {}), '(update[0], update[1])\n', (55671, 55693), True, 'import cntk as C\n'), ((41099, 41137), 'cntk.sequence.broadcast_as', 'C.sequence.broadcast_as', (['c', 'rnn_inputs'], {}), '(c, rnn_inputs)\n', (41122, 41137), True, 'import cntk as C\n')]
|
import torch
import torch.nn as nn
import numpy as np
import math
class ForwardKinematics:
def __init__(self, args, edges):
self.topology = [-1] * (len(edges) + 1)
self.rotation_map = []
for i, edge in enumerate(edges):
self.topology[edge[1]] = edge[0]
self.rotation_map.append(edge[1])
self.world = args.fk_world
self.pos_repr = args.pos_repr
self.quater = args.rotation == 'quaternion'
def forward_from_raw(self, raw, offset, world=None, quater=None):
if world is None: world = self.world
if quater is None: quater = self.quater
if self.pos_repr == '3d':
position = raw[:, -3:, :]
rotation = raw[:, :-3, :]
elif self.pos_repr == '4d':
raise Exception('Not support')
if quater:
rotation = rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1]))
identity = torch.tensor((1, 0, 0, 0), dtype=torch.float, device=raw.device)
else:
rotation = rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1]))
identity = torch.zeros((3, ), dtype=torch.float, device=raw.device)
identity = identity.reshape((1, 1, -1, 1))
new_shape = list(rotation.shape)
new_shape[1] += 1
new_shape[2] = 1
rotation_final = identity.repeat(new_shape)
for i, j in enumerate(self.rotation_map):
rotation_final[:, j, :, :] = rotation[:, i, :, :]
return self.forward(rotation_final, position, offset, world=world, quater=quater)
'''
rotation should have shape batch_size * Joint_num * (3/4) * Time
position should have shape batch_size * 3 * Time
offset should have shape batch_size * Joint_num * 3
output have shape batch_size * Time * Joint_num * 3
'''
def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True):
if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation')
if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation')
rotation = rotation.permute(0, 3, 1, 2)
position = position.permute(0, 2, 1)
result = torch.empty(rotation.shape[:-1] + (3, ), device=position.device)
norm = torch.norm(rotation, dim=-1, keepdim=True)
#norm[norm < 1e-10] = 1
rotation = rotation / norm
if quater:
transform = self.transform_from_quaternion(rotation)
else:
transform = self.transform_from_euler(rotation, order)
offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1))
result[..., 0, :] = position
for i, pi in enumerate(self.topology):
if pi == -1:
assert i == 0
continue
transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :], transform[..., i, :, :])
result[..., i, :] = torch.matmul(transform[..., i, :, :], offset[..., i, :, :]).squeeze()
if world: result[..., i, :] += result[..., pi, :]
return result
def from_local_to_world(self, res: torch.Tensor):
res = res.clone()
for i, pi in enumerate(self.topology):
if pi == 0 or pi == -1:
continue
res[..., i, :] += res[..., pi, :]
return res
@staticmethod
def transform_from_euler(rotation, order):
rotation = rotation / 180 * math.pi
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]),
ForwardKinematics.transform_from_axis(rotation[..., 2], order[2]))
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform)
return transform
@staticmethod
def transform_from_axis(euler, axis):
transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device)
cos = torch.cos(euler)
sin = torch.sin(euler)
cord = ord(axis) - ord('x')
transform[..., cord, :] = transform[..., :, cord] = 0
transform[..., cord, cord] = 1
if axis == 'x':
transform[..., 1, 1] = transform[..., 2, 2] = cos
transform[..., 1, 2] = -sin
transform[..., 2, 1] = sin
if axis == 'y':
transform[..., 0, 0] = transform[..., 2, 2] = cos
transform[..., 0, 2] = sin
transform[..., 2, 0] = -sin
if axis == 'z':
transform[..., 0, 0] = transform[..., 1, 1] = cos
transform[..., 0, 1] = -sin
transform[..., 1, 0] = sin
return transform
@staticmethod
def transform_from_quaternion(quater: torch.Tensor):
qw = quater[..., 0]
qx = quater[..., 1]
qy = quater[..., 2]
qz = quater[..., 3]
x2 = qx + qx
y2 = qy + qy
z2 = qz + qz
xx = qx * x2
yy = qy * y2
wx = qw * x2
xy = qx * y2
yz = qy * z2
wy = qw * y2
xz = qx * z2
zz = qz * z2
wz = qw * z2
m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device)
m[..., 0, 0] = 1.0 - (yy + zz)
m[..., 0, 1] = xy - wz
m[..., 0, 2] = xz + wy
m[..., 1, 0] = xy + wz
m[..., 1, 1] = 1.0 - (xx + zz)
m[..., 1, 2] = yz - wx
m[..., 2, 0] = xz - wy
m[..., 2, 1] = yz + wx
m[..., 2, 2] = 1.0 - (xx + yy)
return m
class InverseKinematics:
def __init__(self, rotations: torch.Tensor, positions: torch.Tensor, offset, parents, constrains):
self.rotations = rotations
self.rotations.requires_grad_(True)
self.position = positions
self.position.requires_grad_(True)
self.parents = parents
self.offset = offset
self.constrains = constrains
self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999))
self.crit = nn.MSELoss()
def step(self):
self.optimizer.zero_grad()
glb = self.forward(self.rotations, self.position, self.offset, order='', quater=True, world=True)
loss = self.crit(glb, self.constrains)
loss.backward()
self.optimizer.step()
self.glb = glb
return loss.item()
def tloss(self, time):
return self.crit(self.glb[time, :], self.constrains[time, :])
def all_loss(self):
res = [self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])]
return np.array(res)
'''
rotation should have shape batch_size * Joint_num * (3/4) * Time
position should have shape batch_size * 3 * Time
offset should have shape batch_size * Joint_num * 3
output have shape batch_size * Time * Joint_num * 3
'''
def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False,
world=True):
'''
if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation')
if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation')
rotation = rotation.permute(0, 3, 1, 2)
position = position.permute(0, 2, 1)
'''
result = torch.empty(rotation.shape[:-1] + (3,), device=position.device)
norm = torch.norm(rotation, dim=-1, keepdim=True)
rotation = rotation / norm
if quater:
transform = self.transform_from_quaternion(rotation)
else:
transform = self.transform_from_euler(rotation, order)
offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1))
result[..., 0, :] = position
for i, pi in enumerate(self.parents):
if pi == -1:
assert i == 0
continue
result[..., i, :] = torch.matmul(transform[..., pi, :, :], offset[..., i, :, :]).squeeze()
transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :], transform[..., i, :, :])
if world: result[..., i, :] += result[..., pi, :]
return result
@staticmethod
def transform_from_euler(rotation, order):
rotation = rotation / 180 * math.pi
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]),
ForwardKinematics.transform_from_axis(rotation[..., 2], order[2]))
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform)
return transform
@staticmethod
def transform_from_axis(euler, axis):
transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device)
cos = torch.cos(euler)
sin = torch.sin(euler)
cord = ord(axis) - ord('x')
transform[..., cord, :] = transform[..., :, cord] = 0
transform[..., cord, cord] = 1
if axis == 'x':
transform[..., 1, 1] = transform[..., 2, 2] = cos
transform[..., 1, 2] = -sin
transform[..., 2, 1] = sin
if axis == 'y':
transform[..., 0, 0] = transform[..., 2, 2] = cos
transform[..., 0, 2] = sin
transform[..., 2, 0] = -sin
if axis == 'z':
transform[..., 0, 0] = transform[..., 1, 1] = cos
transform[..., 0, 1] = -sin
transform[..., 1, 0] = sin
return transform
@staticmethod
def transform_from_quaternion(quater: torch.Tensor):
qw = quater[..., 0]
qx = quater[..., 1]
qy = quater[..., 2]
qz = quater[..., 3]
x2 = qx + qx
y2 = qy + qy
z2 = qz + qz
xx = qx * x2
yy = qy * y2
wx = qw * x2
xy = qx * y2
yz = qy * z2
wy = qw * y2
xz = qx * z2
zz = qz * z2
wz = qw * z2
m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device)
m[..., 0, 0] = 1.0 - (yy + zz)
m[..., 0, 1] = xy - wz
m[..., 0, 2] = xz + wy
m[..., 1, 0] = xy + wz
m[..., 1, 1] = 1.0 - (xx + zz)
m[..., 1, 2] = yz - wx
m[..., 2, 0] = xz - wy
m[..., 2, 1] = yz + wx
m[..., 2, 2] = 1.0 - (xx + yy)
return m
|
[
"torch.optim.Adam",
"torch.sin",
"torch.nn.MSELoss",
"torch.norm",
"torch.cos",
"numpy.array",
"torch.tensor",
"torch.matmul",
"torch.empty",
"torch.zeros"
] |
[((2281, 2344), 'torch.empty', 'torch.empty', (['(rotation.shape[:-1] + (3,))'], {'device': 'position.device'}), '(rotation.shape[:-1] + (3,), device=position.device)\n', (2292, 2344), False, 'import torch\n'), ((2363, 2405), 'torch.norm', 'torch.norm', (['rotation'], {'dim': '(-1)', 'keepdim': '(True)'}), '(rotation, dim=-1, keepdim=True)\n', (2373, 2405), False, 'import torch\n'), ((3956, 4015), 'torch.empty', 'torch.empty', (['(euler.shape[0:3] + (3, 3))'], {'device': 'euler.device'}), '(euler.shape[0:3] + (3, 3), device=euler.device)\n', (3967, 4015), False, 'import torch\n'), ((4030, 4046), 'torch.cos', 'torch.cos', (['euler'], {}), '(euler)\n', (4039, 4046), False, 'import torch\n'), ((4061, 4077), 'torch.sin', 'torch.sin', (['euler'], {}), '(euler)\n', (4070, 4077), False, 'import torch\n'), ((5192, 5253), 'torch.empty', 'torch.empty', (['(quater.shape[:-1] + (3, 3))'], {'device': 'quater.device'}), '(quater.shape[:-1] + (3, 3), device=quater.device)\n', (5203, 5253), False, 'import torch\n'), ((5985, 6064), 'torch.optim.Adam', 'torch.optim.Adam', (['[self.position, self.rotations]'], {'lr': '(0.001)', 'betas': '(0.9, 0.999)'}), '([self.position, self.rotations], lr=0.001, betas=(0.9, 0.999))\n', (6001, 6064), False, 'import torch\n'), ((6084, 6096), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6094, 6096), True, 'import torch.nn as nn\n'), ((6636, 6649), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (6644, 6649), True, 'import numpy as np\n'), ((7394, 7457), 'torch.empty', 'torch.empty', (['(rotation.shape[:-1] + (3,))'], {'device': 'position.device'}), '(rotation.shape[:-1] + (3,), device=position.device)\n', (7405, 7457), False, 'import torch\n'), ((7474, 7516), 'torch.norm', 'torch.norm', (['rotation'], {'dim': '(-1)', 'keepdim': '(True)'}), '(rotation, dim=-1, keepdim=True)\n', (7484, 7516), False, 'import torch\n'), ((8780, 8839), 'torch.empty', 'torch.empty', (['(euler.shape[0:3] + (3, 3))'], {'device': 'euler.device'}), '(euler.shape[0:3] + (3, 3), device=euler.device)\n', (8791, 8839), False, 'import torch\n'), ((8854, 8870), 'torch.cos', 'torch.cos', (['euler'], {}), '(euler)\n', (8863, 8870), False, 'import torch\n'), ((8885, 8901), 'torch.sin', 'torch.sin', (['euler'], {}), '(euler)\n', (8894, 8901), False, 'import torch\n'), ((10016, 10077), 'torch.empty', 'torch.empty', (['(quater.shape[:-1] + (3, 3))'], {'device': 'quater.device'}), '(quater.shape[:-1] + (3, 3), device=quater.device)\n', (10027, 10077), False, 'import torch\n'), ((950, 1014), 'torch.tensor', 'torch.tensor', (['(1, 0, 0, 0)'], {'dtype': 'torch.float', 'device': 'raw.device'}), '((1, 0, 0, 0), dtype=torch.float, device=raw.device)\n', (962, 1014), False, 'import torch\n'), ((1140, 1195), 'torch.zeros', 'torch.zeros', (['(3,)'], {'dtype': 'torch.float', 'device': 'raw.device'}), '((3,), dtype=torch.float, device=raw.device)\n', (1151, 1195), False, 'import torch\n'), ((2925, 2988), 'torch.matmul', 'torch.matmul', (['transform[..., pi, :, :]', 'transform[..., i, :, :]'], {}), '(transform[..., pi, :, :], transform[..., i, :, :])\n', (2937, 2988), False, 'import torch\n'), ((8105, 8168), 'torch.matmul', 'torch.matmul', (['transform[..., pi, :, :]', 'transform[..., i, :, :]'], {}), '(transform[..., pi, :, :], transform[..., i, :, :])\n', (8117, 8168), False, 'import torch\n'), ((3021, 3080), 'torch.matmul', 'torch.matmul', (['transform[..., i, :, :]', 'offset[..., i, :, :]'], {}), '(transform[..., i, :, :], offset[..., i, :, :])\n', (3033, 3080), False, 'import torch\n'), ((7996, 8056), 'torch.matmul', 'torch.matmul', (['transform[..., pi, :, :]', 'offset[..., i, :, :]'], {}), '(transform[..., pi, :, :], offset[..., i, :, :])\n', (8008, 8056), False, 'import torch\n')]
|
import time
import h5py
import hdbscan
import numpy as np
import torch
from sklearn.cluster import MeanShift
from pytorch3dunet.datasets.hdf5 import SliceBuilder
from pytorch3dunet.unet3d.utils import get_logger
from pytorch3dunet.unet3d.utils import unpad
logger = get_logger('UNet3DPredictor')
class _AbstractPredictor:
def __init__(self, model, loader, output_file, config, **kwargs):
self.model = model
self.loader = loader
self.output_file = output_file
self.config = config
self.predictor_config = kwargs
@staticmethod
def _volume_shape(dataset):
# TODO: support multiple internal datasets
raw = dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
@staticmethod
def _get_output_dataset_names(number_of_datasets, prefix='predictions'):
if number_of_datasets == 1:
return [prefix]
else:
return [f'{prefix}{i}' for i in range(number_of_datasets)]
def predict(self):
raise NotImplementedError
class StandardPredictor(_AbstractPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predictions from the network are kept in memory. If the results from the network don't fit in into RAM
use `LazyPredictor` instead.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def predict(self):
out_channels = self.config['model'].get('out_channels')
if out_channels is None:
out_channels = self.config['model']['dt_out_channels']
prediction_channel = self.config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} batches...')
# dimensionality of the the output predictions
volume_shape = self._volume_shape(self.loader.dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True)
logger.info(f'Avoid block artifacts: {avoid_block_artifacts}')
# create destination H5 file
h5_output_file = h5py.File(self.output_file, 'w')
# allocate prediction and normalization arrays
logger.info('Allocating prediction and normalization arrays...')
prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape,
output_heads, h5_output_file)
# Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present)
self.model.eval()
# Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied!
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# send batch to device
batch = batch.to(device)
# forward pass
predictions = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
# for each output head
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# convert to numpy array
prediction = prediction.cpu().numpy()
# for each batch sample
for pred, index in zip(prediction, indices):
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = (channel_slice,) + index
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
pred = np.expand_dims(pred[prediction_channel], axis=0)
logger.info(f'Saving predictions for slice:{index}...')
if avoid_block_artifacts:
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = unpad(pred, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
else:
# accumulate probabilities into the output prediction array
prediction_map[index] += pred
# count voxel visits for normalization
normalization_mask[index] += 1
# save results to
self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset)
# close the output H5 file
h5_output_file.close()
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# initialize the output prediction arrays
prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)]
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
# save probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks,
prediction_datasets):
prediction_map = prediction_map / normalization_mask
if dataset.mirror_padding:
pad_width = dataset.pad_width
logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...')
prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width]
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=prediction_map, compression="gzip")
class LazyPredictor(StandardPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor
is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# allocate datasets for probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
prediction_maps = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True,
compression='gzip')
for dataset_name in prediction_datasets]
# allocate datasets for normalization masks
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
normalization_masks = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True,
compression='gzip')
for dataset_name in normalization_datasets]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
if dataset.mirror_padding:
logger.warn(
f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}')
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
# normalize the prediction_maps inside the H5
for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps,
normalization_masks,
prediction_datasets,
normalization_datasets):
# split the volume into 4 parts and load each into the memory separately
logger.info(f'Normalizing {prediction_dataset}...')
z, y, x = prediction_map.shape[1:]
# take slices which are 1/27 of the original volume
patch_shape = (z // 3, y // 3, x // 3)
for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape):
logger.info(f'Normalizing slice: {index}')
prediction_map[index] /= normalization_mask[index]
# make sure to reset the slice that has been visited already in order to avoid 'double' normalization
# when the patches overlap with each other
normalization_mask[index] = 1
logger.info(f'Deleting {normalization_dataset}...')
del output_file[normalization_dataset]
class EmbeddingsPredictor(_AbstractPredictor):
"""
Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format.
The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings
with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together.
"""
def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
self.iou_threshold = iou_threshold
self.noise_label = noise_label
self.clustering = clustering
assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported'
logger.info(f'IoU threshold: {iou_threshold}')
self.clustering_name = clustering
self.clustering = self._get_clustering(clustering, kwargs)
def predict(self):
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} patches...')
# dimensionality of the the output segmentation
volume_shape = self._volume_shape(self.loader.dataset)
logger.info(f'The shape of the output segmentation (DHW): {volume_shape}')
logger.info('Allocating segmentation array...')
# initialize the output prediction arrays
output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)]
# initialize visited_voxels arrays
visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)]
# Sets the module in evaluation mode explicitly
self.model.eval()
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# logger.info(f'Predicting embeddings for slice:{index}')
# send batch to device
batch = batch.to(device)
# forward pass
embeddings = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
embeddings = [embeddings]
for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations,
visited_voxels_arrays):
# convert to numpy array
prediction = prediction.cpu().numpy()
# iterate sequentially because of the current simple stitching that we're using
for pred, index in zip(prediction, indices):
# convert embeddings to segmentation with hdbscan clustering
segmentation = self._embeddings_to_segmentation(pred)
# stitch patches
self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array)
# save results
with h5py.File(self.output_file, 'w') as output_file:
prediction_datasets = self._get_output_dataset_names(output_heads,
prefix=f'segmentation/{self.clustering_name}')
for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets):
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=output_segmentation, compression="gzip")
def _embeddings_to_segmentation(self, embeddings):
"""
Cluster embeddings vectors with HDBSCAN and return the segmented volume.
Args:
embeddings (ndarray): 4D (CDHW) embeddings tensor
Returns:
3D (DHW) segmentation
"""
# shape of the output segmentation
output_shape = embeddings.shape[1:]
# reshape (C, D, H, W) -> (C, D * H * W) and transpose -> (D * H * W, C)
flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose()
logger.info('Clustering embeddings...')
# perform clustering and reshape in order to get the segmentation volume
start = time.time()
clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape)
logger.info(
f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.')
return clusters
def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array):
"""
Given the `segmentation` patch, its `index` in the `output_segmentation` array and the array visited voxels
merge the segmented patch (`segmentation`) into the `output_segmentation`
Args:
segmentation (ndarray): segmented patch
index (tuple): position of the patch inside `output_segmentation` volume
output_segmentation (ndarray): current state of the output segmentation
visited_voxels_array (ndarray): array of voxels visited so far (same size as `output_segmentation`); visited
voxels will be marked by a number greater than 0
"""
index = tuple(index)
# get new unassigned label
max_label = np.max(output_segmentation) + 1
# make sure there are no clashes between current segmentation patch and the output_segmentation
# but keep the noise label
noise_mask = segmentation == self.noise_label
segmentation += int(max_label)
segmentation[noise_mask] = self.noise_label
# get the overlap mask in the current patch
overlap_mask = visited_voxels_array[index] > 0
# get the new labels inside the overlap_mask
new_labels = np.unique(segmentation[overlap_mask])
merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation)
# relabel new segmentation with the merged labels
for current_label, new_label in merged_labels:
segmentation[segmentation == new_label] = current_label
# update the output_segmentation
output_segmentation[index] = segmentation
# visit the patch
visited_voxels_array[index] += 1
def _merge_labels(self, current_segmentation, new_labels, new_segmentation):
def _most_frequent_label(labels):
unique, counts = np.unique(labels, return_counts=True)
ind = np.argmax(counts)
return unique[ind]
result = []
# iterate over new_labels and merge regions if the IoU exceeds a given threshold
for new_label in new_labels:
# skip 'noise' label assigned by hdbscan
if new_label == self.noise_label:
continue
new_label_mask = new_segmentation == new_label
# get only the most frequent overlapping label
most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask])
# skip 'noise' label
if most_frequent_label == self.noise_label:
continue
current_label_mask = current_segmentation == most_frequent_label
# compute Jaccard index
iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask,
current_label_mask).sum()
if iou > self.iou_threshold:
# merge labels
result.append((most_frequent_label, new_label))
return result
def _get_clustering(self, clustering_alg, kwargs):
logger.info(f'Using {clustering_alg} for clustering')
if clustering_alg == 'hdbscan':
min_cluster_size = kwargs.get('min_cluster_size', 50)
min_samples = kwargs.get('min_samples', None),
metric = kwargs.get('metric', 'euclidean')
cluster_selection_method = kwargs.get('cluster_selection_method', 'eom')
logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}')
return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric,
cluster_selection_method=cluster_selection_method)
else:
bandwidth = kwargs['bandwidth']
logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True')
# use fast MeanShift with bin seeding
return MeanShift(bandwidth=bandwidth, bin_seeding=True)
|
[
"numpy.bitwise_or",
"numpy.unique",
"sklearn.cluster.MeanShift",
"pytorch3dunet.unet3d.utils.unpad",
"pytorch3dunet.datasets.hdf5.SliceBuilder._build_slices",
"numpy.argmax",
"h5py.File",
"pytorch3dunet.unet3d.utils.get_logger",
"numpy.max",
"numpy.zeros",
"numpy.bitwise_and",
"numpy.expand_dims",
"torch.no_grad",
"time.time",
"hdbscan.HDBSCAN"
] |
[((269, 298), 'pytorch3dunet.unet3d.utils.get_logger', 'get_logger', (['"""UNet3DPredictor"""'], {}), "('UNet3DPredictor')\n", (279, 298), False, 'from pytorch3dunet.unet3d.utils import get_logger\n'), ((3293, 3325), 'h5py.File', 'h5py.File', (['self.output_file', '"""w"""'], {}), "(self.output_file, 'w')\n", (3302, 3325), False, 'import h5py\n'), ((16480, 16491), 'time.time', 'time.time', ([], {}), '()\n', (16489, 16491), False, 'import time\n'), ((18073, 18110), 'numpy.unique', 'np.unique', (['segmentation[overlap_mask]'], {}), '(segmentation[overlap_mask])\n', (18082, 18110), True, 'import numpy as np\n'), ((3991, 4006), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4004, 4006), False, 'import torch\n'), ((6737, 6776), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': '"""float32"""'}), "(output_shape, dtype='float32')\n", (6745, 6776), True, 'import numpy as np\n'), ((6939, 6976), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': '"""uint8"""'}), "(output_shape, dtype='uint8')\n", (6947, 6976), True, 'import numpy as np\n'), ((11449, 11546), 'pytorch3dunet.datasets.hdf5.SliceBuilder._build_slices', 'SliceBuilder._build_slices', (['prediction_map'], {'patch_shape': 'patch_shape', 'stride_shape': 'patch_shape'}), '(prediction_map, patch_shape=patch_shape,\n stride_shape=patch_shape)\n', (11475, 11546), False, 'from pytorch3dunet.datasets.hdf5 import SliceBuilder\n'), ((13521, 13558), 'numpy.zeros', 'np.zeros', (['volume_shape'], {'dtype': '"""int32"""'}), "(volume_shape, dtype='int32')\n", (13529, 13558), True, 'import numpy as np\n'), ((13665, 13702), 'numpy.zeros', 'np.zeros', (['volume_shape'], {'dtype': '"""uint8"""'}), "(volume_shape, dtype='uint8')\n", (13673, 13702), True, 'import numpy as np\n'), ((13917, 13932), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13930, 13932), False, 'import torch\n'), ((15241, 15273), 'h5py.File', 'h5py.File', (['self.output_file', '"""w"""'], {}), "(self.output_file, 'w')\n", (15250, 15273), False, 'import h5py\n'), ((17576, 17603), 'numpy.max', 'np.max', (['output_segmentation'], {}), '(output_segmentation)\n', (17582, 17603), True, 'import numpy as np\n'), ((18700, 18737), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (18709, 18737), True, 'import numpy as np\n'), ((18756, 18773), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (18765, 18773), True, 'import numpy as np\n'), ((20450, 20595), 'hdbscan.HDBSCAN', 'hdbscan.HDBSCAN', ([], {'min_cluster_size': 'min_cluster_size', 'min_samples': 'min_samples', 'metric': 'metric', 'cluster_selection_method': 'cluster_selection_method'}), '(min_cluster_size=min_cluster_size, min_samples=min_samples,\n metric=metric, cluster_selection_method=cluster_selection_method)\n', (20465, 20595), False, 'import hdbscan\n'), ((20842, 20890), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {'bandwidth': 'bandwidth', 'bin_seeding': '(True)'}), '(bandwidth=bandwidth, bin_seeding=True)\n', (20851, 20890), False, 'from sklearn.cluster import MeanShift\n'), ((16666, 16682), 'numpy.max', 'np.max', (['clusters'], {}), '(clusters)\n', (16672, 16682), True, 'import numpy as np\n'), ((16696, 16707), 'time.time', 'time.time', ([], {}), '()\n', (16705, 16707), False, 'import time\n'), ((19532, 19582), 'numpy.bitwise_and', 'np.bitwise_and', (['new_label_mask', 'current_label_mask'], {}), '(new_label_mask, current_label_mask)\n', (19546, 19582), True, 'import numpy as np\n'), ((19591, 19640), 'numpy.bitwise_or', 'np.bitwise_or', (['new_label_mask', 'current_label_mask'], {}), '(new_label_mask, current_label_mask)\n', (19604, 19640), True, 'import numpy as np\n'), ((5415, 5463), 'numpy.expand_dims', 'np.expand_dims', (['pred[prediction_channel]'], {'axis': '(0)'}), '(pred[prediction_channel], axis=0)\n', (5429, 5463), True, 'import numpy as np\n'), ((5749, 5781), 'pytorch3dunet.unet3d.utils.unpad', 'unpad', (['pred', 'index', 'volume_shape'], {}), '(pred, index, volume_shape)\n', (5754, 5781), False, 'from pytorch3dunet.unet3d.utils import unpad\n')]
|
"""
Random Variables.
This module implements random variables. Random variables are the main in- and outputs
of probabilistic numerical methods.
"""
from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union
import numpy as np
from probnum import utils as _utils
from probnum.type import (
ArrayLikeGetitemArgType,
DTypeArgType,
FloatArgType,
RandomStateArgType,
RandomStateType,
ShapeArgType,
ShapeType,
)
try:
# functools.cached_property is only available in Python >=3.8
from functools import cached_property
except ImportError:
from cached_property import cached_property
_ValueType = TypeVar("ValueType")
class RandomVariable(Generic[_ValueType]):
"""
Random variables are the main objects used by probabilistic numerical methods.
Every probabilistic numerical method takes a random variable encoding the prior
distribution as input and outputs a random variable whose distribution encodes the
uncertainty arising from finite computation. The generic signature of a
probabilistic numerical method is:
``output_rv = probnum_method(input_rv, method_params)``
In practice, most random variables used by methods in ProbNum have Dirac or Gaussian
measure.
Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays and
linear operators. This may change their ``distribution`` and not necessarily all
previously available methods are retained.
The internals of :class:`RandomVariable` objects are assumed to be constant over
their whole lifecycle. This is due to the caches used to make certain computations
more efficient. As a consequence, altering the internal state of a
:class:`RandomVariable` (e.g. its mean, cov, sampling function, etc.) will result in
undefined behavior. In particular, this should be kept in mind when subclassing
:class:`RandomVariable` or any of its descendants.
Parameters
----------
shape :
Shape of realizations of this random variable.
dtype :
Data type of realizations of this random variable. If ``object`` will be
converted to ``numpy.dtype``.
as_value_type :
Function which can be used to transform user-supplied arguments, interpreted as
realizations of this random variable, to an easy-to-process, normalized format.
Will be called internally to transform the argument of functions like
``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and ``logpmf`` (in
:class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in
:class:`ContinuousRandomVariable`), and potentially by similar functions in
subclasses.
For instance, this method is useful if (``log``)``cdf`` and (``log``)``pdf``
both only work on :class:`np.float_` arguments, but we still want the user to be
able to pass Python :class:`float`. Then ``as_value_type`` should be set to
something like ``lambda x: np.float64(x)``.
See Also
--------
asrandvar : Transform into a :class:`RandomVariable`.
Examples
--------
"""
# pylint: disable=too-many-instance-attributes,too-many-public-methods
def __init__(
self,
shape: ShapeArgType,
dtype: DTypeArgType,
random_state: RandomStateArgType = None,
parameters: Optional[Dict[str, Any]] = None,
sample: Optional[Callable[[ShapeType], _ValueType]] = None,
in_support: Optional[Callable[[_ValueType], bool]] = None,
cdf: Optional[Callable[[_ValueType], np.float_]] = None,
logcdf: Optional[Callable[[_ValueType], np.float_]] = None,
quantile: Optional[Callable[[FloatArgType], _ValueType]] = None,
mode: Optional[Callable[[], _ValueType]] = None,
median: Optional[Callable[[], _ValueType]] = None,
mean: Optional[Callable[[], _ValueType]] = None,
cov: Optional[Callable[[], _ValueType]] = None,
var: Optional[Callable[[], _ValueType]] = None,
std: Optional[Callable[[], _ValueType]] = None,
entropy: Optional[Callable[[], np.float_]] = None,
as_value_type: Optional[Callable[[Any], _ValueType]] = None,
):
# pylint: disable=too-many-arguments,too-many-locals
"""Create a new random variable."""
self.__shape = _utils.as_shape(shape)
# Data Types
self.__dtype = np.dtype(dtype)
self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype)
self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype)
self._random_state = _utils.as_random_state(random_state)
# Probability distribution of the random variable
self.__parameters = parameters.copy() if parameters is not None else {}
self.__sample = sample
self.__in_support = in_support
self.__cdf = cdf
self.__logcdf = logcdf
self.__quantile = quantile
# Properties of the random variable
self.__mode = mode
self.__median = median
self.__mean = mean
self.__cov = cov
self.__var = var
self.__std = std
self.__entropy = entropy
# Utilities
self.__as_value_type = as_value_type
def __repr__(self) -> str:
return f"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>"
@property
def shape(self) -> ShapeType:
"""Shape of realizations of the random variable."""
return self.__shape
@cached_property
def ndim(self) -> int:
return len(self.__shape)
@cached_property
def size(self) -> int:
return int(np.prod(self.__shape))
@property
def dtype(self) -> np.dtype:
"""Data type of (elements of) a realization of this random variable."""
return self.__dtype
@property
def median_dtype(self) -> np.dtype:
"""The dtype of the :attr:`median`. It will be set to the dtype arising from
the multiplication of values with dtypes :attr:`dtype` and :class:`np.float_`.
This is motivated by the fact that, even for discrete random variables, e.g.
integer-valued random variables, the :attr:`median` might lie in between two
values in which case these values are averaged. For example, a uniform random
variable on :math:`\\{ 1, 2, 3, 4 \\}` will have a median of :math:`2.5`.
"""
return self.__median_dtype
@property
def moment_dtype(self) -> np.dtype:
"""The dtype of any (function of a) moment of the random variable, e.g. its
:attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`. It will be set to the
dtype arising from the multiplication of values with dtypes :attr:`dtype`
and :class:`np.float_`. This is motivated by the mathematical definition of a
moment as a sum or an integral over products of probabilities and values of the
random variable, which are represented as using the dtypes :class:`np.float_`
and :attr:`dtype`, respectively.
"""
return self.__moment_dtype
@property
def random_state(self) -> RandomStateType:
"""Random state of the random variable.
This attribute defines the RandomState object to use for drawing
realizations from this random variable.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local :class:`~numpy.random.RandomState`
instance.
"""
return self._random_state
@random_state.setter
def random_state(self, seed: RandomStateArgType):
"""Get or set the RandomState object of the underlying distribution.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
self._random_state = _utils.as_random_state(seed)
@property
def parameters(self) -> Dict[str, Any]:
"""
Parameters of the probability distribution.
The parameters of the distribution such as mean, variance, et cetera stored in a
``dict``.
"""
return self.__parameters.copy()
@cached_property
def mode(self) -> _ValueType:
"""
Mode of the random variable.
Returns
-------
mode : float
The mode of the random variable.
"""
if self.__mode is None:
raise NotImplementedError
mode = self.__mode()
RandomVariable._check_property_value(
"mode",
mode,
shape=self.__shape,
dtype=self.__dtype,
)
# Make immutable
if isinstance(mode, np.ndarray):
mode.setflags(write=False)
return mode
@cached_property
def median(self) -> _ValueType:
"""
Median of the random variable.
To learn about the dtype of the median, see :attr:`median_dtype`.
Returns
-------
median : float
The median of the distribution.
"""
if self.__shape != ():
raise NotImplementedError(
"The median is only defined for scalar random variables."
)
median = self.__median()
RandomVariable._check_property_value(
"median",
median,
shape=self.__shape,
dtype=self.__median_dtype,
)
# Make immutable
if isinstance(median, np.ndarray):
median.setflags(write=False)
return median
@cached_property
def mean(self) -> _ValueType:
"""
Mean :math:`\\mathbb{E}(X)` of the distribution.
To learn about the dtype of the mean, see :attr:`moment_dtype`.
Returns
-------
mean : array-like
The mean of the distribution.
"""
if self.__mean is None:
raise NotImplementedError
mean = self.__mean()
RandomVariable._check_property_value(
"mean",
mean,
shape=self.__shape,
dtype=self.__moment_dtype,
)
# Make immutable
if isinstance(mean, np.ndarray):
mean.setflags(write=False)
return mean
@cached_property
def cov(self) -> _ValueType:
"""
Covariance :math:`\\operatorname{Cov}(X) = \\mathbb{E}((X-\\mathbb{E}(X))(X-\\mathbb{E}(X))^\\top)`
of the random variable.
To learn about the dtype of the covariance, see :attr:`moment_dtype`.
Returns
-------
cov : array-like
The kernels of the random variable.
""" # pylint: disable=line-too-long
if self.__cov is None:
raise NotImplementedError
cov = self.__cov()
RandomVariable._check_property_value(
"covariance",
cov,
shape=(self.size, self.size) if self.ndim > 0 else (),
dtype=self.__moment_dtype,
)
# Make immutable
if isinstance(cov, np.ndarray):
cov.setflags(write=False)
return cov
@cached_property
def var(self) -> _ValueType:
"""
Variance :math:`\\operatorname{Var}(X) = \\mathbb{E}((X-\\mathbb{E}(X))^2)` of
the distribution.
To learn about the dtype of the variance, see :attr:`moment_dtype`.
Returns
-------
var : array-like
The variance of the distribution.
"""
if self.__var is None:
try:
var = np.diag(self.cov).reshape(self.__shape).copy()
except NotImplementedError as exc:
raise NotImplementedError from exc
else:
var = self.__var()
RandomVariable._check_property_value(
"variance",
var,
shape=self.__shape,
dtype=self.__moment_dtype,
)
# Make immutable
if isinstance(var, np.ndarray):
var.setflags(write=False)
return var
@cached_property
def std(self) -> _ValueType:
"""
Standard deviation of the distribution.
To learn about the dtype of the standard deviation, see :attr:`moment_dtype`.
Returns
-------
std : array-like
The standard deviation of the distribution.
"""
if self.__std is None:
try:
std = np.sqrt(self.var)
except NotImplementedError as exc:
raise NotImplementedError from exc
else:
std = self.__std()
RandomVariable._check_property_value(
"standard deviation",
std,
shape=self.__shape,
dtype=self.__moment_dtype,
)
# Make immutable
if isinstance(std, np.ndarray):
std.setflags(write=False)
return std
@cached_property
def entropy(self) -> np.float_:
if self.__entropy is None:
raise NotImplementedError
entropy = self.__entropy()
entropy = RandomVariable._ensure_numpy_float(
"entropy", entropy, force_scalar=True
)
return entropy
def in_support(self, x: _ValueType) -> bool:
if self.__in_support is None:
raise NotImplementedError
in_support = self.__in_support(self._as_value_type(x))
if not isinstance(in_support, bool):
raise ValueError(
f"The function `in_support` must return a `bool`, but its return value "
f"is of type `{type(x)}`."
)
return in_support
def sample(self, size: ShapeArgType = ()) -> _ValueType:
"""
Draw realizations from a random variable.
Parameters
----------
size : tuple
Size of the drawn sample of realizations.
Returns
-------
sample : array-like
Sample of realizations with the given ``size`` and the inherent ``shape``.
"""
if self.__sample is None:
raise NotImplementedError("No sampling method provided.")
return self.__sample(size=_utils.as_shape(size))
def cdf(self, x: _ValueType) -> np.float_:
"""
Cumulative distribution function.
Parameters
----------
x : array-like
Evaluation points of the cumulative distribution function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The cdf evaluation will be broadcast over all additional dimensions.
Returns
-------
q : array-like
Value of the cumulative density function at the given points.
"""
if self.__cdf is not None:
return RandomVariable._ensure_numpy_float(
"cdf", self.__cdf(self._as_value_type(x))
)
elif self.__logcdf is not None:
cdf = np.exp(self.logcdf(self._as_value_type(x)))
assert isinstance(cdf, np.float_)
return cdf
else:
raise NotImplementedError(
f"Neither the `cdf` nor the `logcdf` of the random variable object "
f"with type `{type(self).__name__}` is implemented."
)
def logcdf(self, x: _ValueType) -> np.float_:
"""
Log-cumulative distribution function.
Parameters
----------
x : array-like
Evaluation points of the cumulative distribution function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The logcdf evaluation will be broadcast over all additional dimensions.
Returns
-------
q : array-like
Value of the log-cumulative density function at the given points.
"""
if self.__logcdf is not None:
return RandomVariable._ensure_numpy_float(
"logcdf", self.__logcdf(self._as_value_type(x))
)
elif self.__cdf is not None:
logcdf = np.log(self.__cdf(x))
assert isinstance(logcdf, np.float_)
return logcdf
else:
raise NotImplementedError(
f"Neither the `logcdf` nor the `cdf` of the random variable object "
f"with type `{type(self).__name__}` is implemented."
)
def quantile(self, p: FloatArgType) -> _ValueType:
"""Quantile function.
The quantile function :math:`Q \\colon [0, 1] \\to \\mathbb{R}` of a random
variable :math:`X` is defined as
:math:`Q(p) = \\inf\\{ x \\in \\mathbb{R} \\colon p \\le F_X(x) \\}`, where
:math:`F_X \\colon \\mathbb{R} \\to [0, 1]` is the :meth:`cdf` of the random
variable. From the definition it follows that the quantile function always
returns values of the same dtype as the random variable. For instance, for a
discrete distribution over the integers, the returned quantiles will also be
integers. This means that, in general, :math:`Q(0.5)` is not equal to the
:attr:`median` as it is defined in this class. See
https://en.wikipedia.org/wiki/Quantile_function for more details and examples.
"""
if self.__shape != ():
raise NotImplementedError(
"The quantile function is only defined for scalar random variables."
)
if self.__quantile is None:
raise NotImplementedError
try:
p = _utils.as_numpy_scalar(p, dtype=np.floating)
except TypeError as exc:
raise TypeError(
"The given argument `p` can not be cast to a `np.floating` object."
) from exc
quantile = self.__quantile(p)
if quantile.shape != self.__shape:
raise ValueError(
f"The quantile function should return values of the same shape as the "
f"random variable, i.e. {self.__shape}, but it returned a value with "
f"{quantile.shape}."
)
if quantile.dtype != self.__dtype:
raise ValueError(
f"The quantile function should return values of the same dtype as the "
f"random variable, i.e. `{self.__dtype.name}`, but it returned a value "
f"with dtype `{quantile.dtype.name}`."
)
return quantile
def __getitem__(self, key: ArrayLikeGetitemArgType) -> "RandomVariable":
return RandomVariable(
shape=np.empty(shape=self.shape)[key].shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: self.sample(size)[key],
mode=lambda: self.mode[key],
mean=lambda: self.mean[key],
var=lambda: self.var[key],
std=lambda: self.std[key],
entropy=lambda: self.entropy,
as_value_type=self.__as_value_type,
)
def reshape(self, newshape: ShapeArgType) -> "RandomVariable":
"""
Give a new shape to a random variable.
Parameters
----------
newshape : int or tuple of ints
New shape for the random variable. It must be compatible with the original
shape.
Returns
-------
reshaped_rv : ``self`` with the new dimensions of ``shape``.
"""
newshape = _utils.as_shape(newshape)
return RandomVariable(
shape=newshape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: self.sample(size).reshape(size + newshape),
mode=lambda: self.mode.reshape(newshape),
median=lambda: self.median.reshape(newshape),
mean=lambda: self.mean.reshape(newshape),
cov=lambda: self.cov,
var=lambda: self.var.reshape(newshape),
std=lambda: self.std.reshape(newshape),
entropy=lambda: self.entropy,
as_value_type=self.__as_value_type,
)
def transpose(self, *axes: int) -> "RandomVariable":
"""
Transpose the random variable.
Parameters
----------
axes : None, tuple of ints, or n ints
See documentation of numpy.ndarray.transpose.
Returns
-------
transposed_rv : The transposed random variable.
"""
return RandomVariable(
shape=np.empty(shape=self.shape).transpose(*axes).shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: self.sample(size).transpose(*axes),
mode=lambda: self.mode.transpose(*axes),
median=lambda: self.median.transpose(*axes),
mean=lambda: self.mean.transpose(*axes),
cov=lambda: self.cov,
var=lambda: self.var.transpose(*axes),
std=lambda: self.std.transpose(*axes),
entropy=lambda: self.entropy,
as_value_type=self.__as_value_type,
)
T = property(transpose)
# Unary arithmetic operations
def __neg__(self) -> "RandomVariable":
return RandomVariable(
shape=self.shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: -self.sample(size=size),
in_support=lambda x: self.in_support(-x),
mode=lambda: -self.mode,
median=lambda: -self.median,
mean=lambda: -self.mean,
cov=lambda: self.cov,
var=lambda: self.var,
std=lambda: self.std,
as_value_type=self.__as_value_type,
)
def __pos__(self) -> "RandomVariable":
return RandomVariable(
shape=self.shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: +self.sample(size=size),
in_support=lambda x: self.in_support(+x),
mode=lambda: +self.mode,
median=lambda: +self.median,
mean=lambda: +self.mean,
cov=lambda: self.cov,
var=lambda: self.var,
std=lambda: self.std,
as_value_type=self.__as_value_type,
)
def __abs__(self) -> "RandomVariable":
return RandomVariable(
shape=self.shape,
dtype=self.dtype,
random_state=_utils.derive_random_seed(self.random_state),
sample=lambda size: abs(self.sample(size=size)),
)
# Binary arithmetic operations
__array_ufunc__ = None
"""
This prevents numpy from calling elementwise arithmetic
operations allowing expressions like: y = np.array([1, 1]) + RV
to call the arithmetic operations defined by RandomVariable
instead of elementwise. Thus no array of RandomVariables but a
RandomVariable with the correct shape is returned.
"""
def __add__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import add
return add(self, other)
def __radd__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import add
return add(other, self)
def __sub__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import sub
return sub(self, other)
def __rsub__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import sub
return sub(other, self)
def __mul__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import mul
return mul(self, other)
def __rmul__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import mul
return mul(other, self)
def __matmul__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import matmul
return matmul(self, other)
def __rmatmul__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import matmul
return matmul(other, self)
def __truediv__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import truediv
return truediv(self, other)
def __rtruediv__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import truediv
return truediv(other, self)
def __floordiv__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import floordiv
return floordiv(self, other)
def __rfloordiv__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import floordiv
return floordiv(other, self)
def __mod__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import mod
return mod(self, other)
def __rmod__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import mod
return mod(other, self)
def __divmod__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import divmod_
return divmod_(self, other)
def __rdivmod__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import divmod_
return divmod_(other, self)
def __pow__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import pow_
return pow_(self, other)
def __rpow__(self, other: Any) -> "RandomVariable":
# pylint: disable=import-outside-toplevel,cyclic-import
from ._arithmetic import pow_
return pow_(other, self)
@staticmethod
def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype:
return RandomVariable.infer_moment_dtype(value_dtype)
@staticmethod
def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype:
return np.promote_types(value_dtype, np.float_)
def _as_value_type(self, x: Any) -> _ValueType:
if self.__as_value_type is not None:
return self.__as_value_type(x)
return x
@staticmethod
def _check_property_value(
name: str,
value: Any,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[np.dtype] = None,
):
if shape is not None:
if value.shape != shape:
raise ValueError(
f"The {name} of the random variable does not have the correct "
f"shape. Expected {shape} but got {value.shape}."
)
if dtype is not None:
if not np.issubdtype(value.dtype, dtype):
raise ValueError(
f"The {name} of the random variable does not have the correct "
f"dtype. Expected {dtype.name} but got {value.dtype.name}."
)
@classmethod
def _ensure_numpy_float(
cls, name: str, value: Any, force_scalar: bool = False
) -> Union[np.float_, np.ndarray]:
if np.isscalar(value):
if not isinstance(value, np.float_):
try:
value = _utils.as_numpy_scalar(value, dtype=np.float_)
except TypeError as err:
raise TypeError(
f"The function `{name}` specified via the constructor of "
f"`{cls.__name__}` must return a scalar value that can be "
f"converted to a `np.float_`, which is not possible for "
f"{value} of type {type(value)}."
) from err
elif not force_scalar:
try:
value = np.asarray(value, dtype=np.float_)
except TypeError as err:
raise TypeError(
f"The function `{name}` specified via the constructor of "
f"`{cls.__name__}` must return a value that can be converted "
f"to a `np.ndarray` of type `np.float_`, which is not possible "
f"for {value} of type {type(value)}."
) from err
else:
raise TypeError(
f"The function `{name}` specified via the constructor of "
f"`{cls.__name__}` must return a scalar value, but {value} of type "
f"{type(value)} is not scalar."
)
assert isinstance(value, (np.float_, np.ndarray))
return value
class DiscreteRandomVariable(RandomVariable[_ValueType]):
def __init__(
self,
shape: ShapeArgType,
dtype: DTypeArgType,
random_state: Optional[RandomStateType] = None,
parameters: Optional[Dict[str, Any]] = None,
sample: Optional[Callable[[ShapeArgType], _ValueType]] = None,
in_support: Optional[Callable[[_ValueType], bool]] = None,
pmf: Optional[Callable[[_ValueType], np.float_]] = None,
logpmf: Optional[Callable[[_ValueType], np.float_]] = None,
cdf: Optional[Callable[[_ValueType], np.float_]] = None,
logcdf: Optional[Callable[[_ValueType], np.float_]] = None,
quantile: Optional[Callable[[FloatArgType], _ValueType]] = None,
mode: Optional[Callable[[], _ValueType]] = None,
median: Optional[Callable[[], _ValueType]] = None,
mean: Optional[Callable[[], _ValueType]] = None,
cov: Optional[Callable[[], _ValueType]] = None,
var: Optional[Callable[[], _ValueType]] = None,
std: Optional[Callable[[], _ValueType]] = None,
entropy: Optional[Callable[[], np.float_]] = None,
):
# Probability mass function
self.__pmf = pmf
self.__logpmf = logpmf
super().__init__(
shape=shape,
dtype=dtype,
random_state=random_state,
parameters=parameters,
sample=sample,
in_support=in_support,
cdf=cdf,
logcdf=logcdf,
quantile=quantile,
mode=mode,
median=median,
mean=mean,
cov=cov,
var=var,
std=std,
entropy=entropy,
)
def pmf(self, x: _ValueType) -> np.float_:
if self.__pmf is not None:
return DiscreteRandomVariable._ensure_numpy_float("pmf", self.__pmf(x))
elif self.__logpmf is not None:
pmf = np.exp(self.__logpmf(x))
assert isinstance(pmf, np.float_)
return pmf
else:
raise NotImplementedError(
f"Neither the `pmf` nor the `logpmf` of the discrete random variable "
f"object with type `{type(self).__name__}` is implemented."
)
def logpmf(self, x: _ValueType) -> np.float_:
if self.__logpmf is not None:
return DiscreteRandomVariable._ensure_numpy_float(
"logpmf", self.__logpmf(self._as_value_type(x))
)
elif self.__pmf is not None:
logpmf = np.log(self.__pmf(self._as_value_type(x)))
assert isinstance(logpmf, np.float_)
return logpmf
else:
raise NotImplementedError(
f"Neither the `logpmf` nor the `pmf` of the discrete random variable "
f"object with type `{type(self).__name__}` is implemented."
)
class ContinuousRandomVariable(RandomVariable[_ValueType]):
def __init__(
self,
shape: ShapeArgType,
dtype: DTypeArgType,
random_state: Optional[RandomStateType] = None,
parameters: Optional[Dict[str, Any]] = None,
sample: Optional[Callable[[ShapeArgType], _ValueType]] = None,
in_support: Optional[Callable[[_ValueType], bool]] = None,
pdf: Optional[Callable[[_ValueType], np.float_]] = None,
logpdf: Optional[Callable[[_ValueType], np.float_]] = None,
cdf: Optional[Callable[[_ValueType], np.float_]] = None,
logcdf: Optional[Callable[[_ValueType], np.float_]] = None,
quantile: Optional[Callable[[FloatArgType], _ValueType]] = None,
mode: Optional[Callable[[], _ValueType]] = None,
median: Optional[Callable[[], _ValueType]] = None,
mean: Optional[Callable[[], _ValueType]] = None,
cov: Optional[Callable[[], _ValueType]] = None,
var: Optional[Callable[[], _ValueType]] = None,
std: Optional[Callable[[], _ValueType]] = None,
entropy: Optional[Callable[[], np.float_]] = None,
):
# Probability density function
self.__pdf = pdf
self.__logpdf = logpdf
super().__init__(
shape=shape,
dtype=dtype,
random_state=random_state,
parameters=parameters,
sample=sample,
in_support=in_support,
cdf=cdf,
logcdf=logcdf,
quantile=quantile,
mode=mode,
median=median,
mean=mean,
cov=cov,
var=var,
std=std,
entropy=entropy,
)
def pdf(self, x: _ValueType) -> np.float_:
"""
Probability density or mass function.
Following the predominant convention in mathematics, we express pdfs with
respect to the Lebesgue measure unless stated otherwise.
Parameters
----------
x : array-like
Evaluation points of the probability density / mass function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The pdf evaluation will be broadcast over all additional dimensions.
Returns
-------
p : array-like
Value of the probability density / mass function at the given points.
"""
if self.__pdf is not None:
return ContinuousRandomVariable._ensure_numpy_float(
"pdf", self.__pdf(self._as_value_type(x))
)
if self.__logpdf is not None:
pdf = np.exp(self.__logpdf(self._as_value_type(x)))
assert isinstance(pdf, np.float_)
return pdf
raise NotImplementedError(
f"Neither the `pdf` nor the `logpdf` of the continuous random variable "
f"object with type `{type(self).__name__}` is implemented."
)
def logpdf(self, x: _ValueType) -> np.float_:
"""
Natural logarithm of the probability density function.
Parameters
----------
x : array-like
Evaluation points of the log-probability density/mass function.
The shape of this argument should be :code:`(..., S1, ..., SN)`, where
:code:`(S1, ..., SN)` is the :attr:`shape` of the random variable.
The logpdf evaluation will be broadcast over all additional dimensions.
Returns
-------
logp : array-like
Value of the log-probability density / mass function at the given points.
"""
if self.__logpdf is not None:
return ContinuousRandomVariable._ensure_numpy_float(
"logpdf", self.__logpdf(self._as_value_type(x))
)
elif self.__pdf is not None:
logpdf = np.log(self.__pdf(self._as_value_type(x)))
assert isinstance(logpdf, np.float_)
return logpdf
else:
raise NotImplementedError(
f"Neither the `logpdf` nor the `pdf` of the continuous random variable "
f"object with type `{type(self).__name__}` is implemented."
)
|
[
"numpy.prod",
"probnum.utils.as_shape",
"numpy.sqrt",
"numpy.isscalar",
"numpy.promote_types",
"probnum.utils.as_random_state",
"numpy.asarray",
"probnum.utils.as_numpy_scalar",
"numpy.diag",
"numpy.issubdtype",
"numpy.empty",
"probnum.utils.derive_random_seed",
"numpy.dtype",
"typing.TypeVar"
] |
[((658, 678), 'typing.TypeVar', 'TypeVar', (['"""ValueType"""'], {}), "('ValueType')\n", (665, 678), False, 'from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union\n'), ((4355, 4377), 'probnum.utils.as_shape', '_utils.as_shape', (['shape'], {}), '(shape)\n', (4370, 4377), True, 'from probnum import utils as _utils\n'), ((4423, 4438), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (4431, 4438), True, 'import numpy as np\n'), ((4625, 4661), 'probnum.utils.as_random_state', '_utils.as_random_state', (['random_state'], {}), '(random_state)\n', (4647, 4661), True, 'from probnum import utils as _utils\n'), ((8030, 8058), 'probnum.utils.as_random_state', '_utils.as_random_state', (['seed'], {}), '(seed)\n', (8052, 8058), True, 'from probnum import utils as _utils\n'), ((19830, 19855), 'probnum.utils.as_shape', '_utils.as_shape', (['newshape'], {}), '(newshape)\n', (19845, 19855), True, 'from probnum import utils as _utils\n'), ((27213, 27253), 'numpy.promote_types', 'np.promote_types', (['value_dtype', 'np.float_'], {}), '(value_dtype, np.float_)\n', (27229, 27253), True, 'import numpy as np\n'), ((28334, 28352), 'numpy.isscalar', 'np.isscalar', (['value'], {}), '(value)\n', (28345, 28352), True, 'import numpy as np\n'), ((5670, 5691), 'numpy.prod', 'np.prod', (['self.__shape'], {}), '(self.__shape)\n', (5677, 5691), True, 'import numpy as np\n'), ((17904, 17948), 'probnum.utils.as_numpy_scalar', '_utils.as_numpy_scalar', (['p'], {'dtype': 'np.floating'}), '(p, dtype=np.floating)\n', (17926, 17948), True, 'from probnum import utils as _utils\n'), ((12628, 12645), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (12635, 12645), True, 'import numpy as np\n'), ((14374, 14395), 'probnum.utils.as_shape', '_utils.as_shape', (['size'], {}), '(size)\n', (14389, 14395), True, 'from probnum import utils as _utils\n'), ((19023, 19067), 'probnum.utils.derive_random_seed', '_utils.derive_random_seed', (['self.random_state'], {}), '(self.random_state)\n', (19048, 19067), True, 'from probnum import utils as _utils\n'), ((19971, 20015), 'probnum.utils.derive_random_seed', '_utils.derive_random_seed', (['self.random_state'], {}), '(self.random_state)\n', (19996, 20015), True, 'from probnum import utils as _utils\n'), ((21005, 21049), 'probnum.utils.derive_random_seed', '_utils.derive_random_seed', (['self.random_state'], {}), '(self.random_state)\n', (21030, 21049), True, 'from probnum import utils as _utils\n'), ((21742, 21786), 'probnum.utils.derive_random_seed', '_utils.derive_random_seed', (['self.random_state'], {}), '(self.random_state)\n', (21767, 21786), True, 'from probnum import utils as _utils\n'), ((22334, 22378), 'probnum.utils.derive_random_seed', '_utils.derive_random_seed', (['self.random_state'], {}), '(self.random_state)\n', (22359, 22378), True, 'from probnum import utils as _utils\n'), ((22926, 22970), 'probnum.utils.derive_random_seed', '_utils.derive_random_seed', (['self.random_state'], {}), '(self.random_state)\n', (22951, 22970), True, 'from probnum import utils as _utils\n'), ((27923, 27956), 'numpy.issubdtype', 'np.issubdtype', (['value.dtype', 'dtype'], {}), '(value.dtype, dtype)\n', (27936, 27956), True, 'import numpy as np\n'), ((28452, 28498), 'probnum.utils.as_numpy_scalar', '_utils.as_numpy_scalar', (['value'], {'dtype': 'np.float_'}), '(value, dtype=np.float_)\n', (28474, 28498), True, 'from probnum import utils as _utils\n'), ((28987, 29021), 'numpy.asarray', 'np.asarray', (['value'], {'dtype': 'np.float_'}), '(value, dtype=np.float_)\n', (28997, 29021), True, 'import numpy as np\n'), ((18929, 18955), 'numpy.empty', 'np.empty', ([], {'shape': 'self.shape'}), '(shape=self.shape)\n', (18937, 18955), True, 'import numpy as np\n'), ((20899, 20925), 'numpy.empty', 'np.empty', ([], {'shape': 'self.shape'}), '(shape=self.shape)\n', (20907, 20925), True, 'import numpy as np\n'), ((11747, 11764), 'numpy.diag', 'np.diag', (['self.cov'], {}), '(self.cov)\n', (11754, 11764), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 12:10:11 2019
@author: Omer
"""
## File handler
## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf
## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it.
## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format.
import numpy as np
from scipy.linalg import circulant
#import matplotlib.pyplot as plt
import scipy.io
import common
import hashlib
import os
projectDir = os.environ.get('LDPC')
if projectDir == None:
import pathlib
projectDir = pathlib.Path(__file__).parent.absolute()
## <NAME>: added on 01/12/2020, need to make sure this doesn't break anything.
import sys
sys.path.insert(1, projectDir)
FILE_HANDLER_INT_DATA_TYPE = np.int32
GENERAL_CODE_MATRIX_DATA_TYPE = np.int32
NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
def nibbleToHex(inputArray):
n = NIBBLE_CONVERTER.dot(inputArray)
if n == 10:
h = 'A'
elif n== 11:
h = 'B'
elif n== 12:
h = 'C'
elif n== 13:
h = 'D'
elif n== 14:
h = 'E'
elif n== 15:
h = 'F'
else:
h = str(n)
return h
def binaryArraytoHex(inputArray):
d1 = len(inputArray)
assert (d1 % 4 == 0)
outputArray = np.zeros(d1//4, dtype = str)
outputString = ''
for j in range(d1//4):
nibble = inputArray[4 * j : 4 * j + 4]
h = nibbleToHex(nibble)
outputArray[j] = h
outputString = outputString + h
return outputArray, outputString
def hexStringToBinaryArray(hexString):
outputBinary = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
for i in hexString:
if i == '0':
nibble = np.array([0,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '1':
nibble = np.array([0,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '2':
nibble = np.array([0,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '3':
nibble = np.array([0,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '4':
nibble = np.array([0,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '5':
nibble = np.array([0,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '6':
nibble = np.array([0,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '7':
nibble = np.array([0,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '8':
nibble = np.array([1,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == '9':
nibble = np.array([1,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'A':
nibble = np.array([1,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'B':
nibble = np.array([1,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'C':
nibble = np.array([1,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'D':
nibble = np.array([1,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'E':
nibble = np.array([1,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
elif i == 'F':
nibble = np.array([1,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
else:
#print('Error, 0-9 or A-F')
pass
nibble = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
outputBinary = np.hstack((outputBinary, nibble))
return outputBinary
def hexToCirculant(hexStr, circulantSize):
binaryArray = hexStringToBinaryArray(hexStr)
if len(binaryArray) < circulantSize:
binaryArray = np.hstack(np.zeros(circulantSize-len(binaryArray), dtype = GENERAL_CODE_MATRIX_DATA_TYPE))
else:
binaryArray = binaryArray[1:]
circulantMatrix = circulant(binaryArray)
circulantMatrix = circulantMatrix.T
return circulantMatrix
def hotLocationsToCirculant(locationList, circulantSize):
generatingVector = np.zeros(circulantSize, dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
generatingVector[locationList] = 1
newCirculant = circulant(generatingVector)
newCirculant = newCirculant.T
return newCirculant
def readMatrixFromFile(fileName, dim0, dim1, circulantSize, isRow = True, isHex = True, isGenerator = True ):
# This function assumes that each line in the file contains the non zero locations of the first row of a circulant.
# Each line in the file then defines a circulant, and the order in which they are defined is top to bottom left to right, i.e.:
# line 0 defines circulant 0,0
with open(fileName) as fid:
lines = fid.readlines()
if isGenerator:
for i in range((dim0 // circulantSize) ):
bLeft = hexToCirculant(lines[2 * i], circulantSize)
bRight = hexToCirculant(lines[2 * i + 1], circulantSize)
newBlock = np.hstack((bLeft, bRight))
if i == 0:
accumulatedBlock = newBlock
else:
accumulatedBlock = np.vstack((accumulatedBlock, newBlock))
newMatrix = np.hstack((np.eye(dim0, dtype = GENERAL_CODE_MATRIX_DATA_TYPE), accumulatedBlock))
else:
for i in range((dim1 // circulantSize)):
locationList1 = list(lines[ i].rstrip('\n').split(','))
locationList1 = list(map(int, locationList1))
upBlock = hotLocationsToCirculant(locationList1, circulantSize)
if i == 0:
accumulatedUpBlock1 = upBlock
else:
accumulatedUpBlock1 = np.hstack((accumulatedUpBlock1, upBlock))
for i in range((dim1 // circulantSize)):
locationList = list(lines[(dim1 // circulantSize) + i].rstrip('\n').split(','))
locationList = list(map(int, locationList))
newBlock = hotLocationsToCirculant(locationList, circulantSize)
if i == 0:
accumulatedBlock2 = newBlock
else:
accumulatedBlock2 = np.hstack((accumulatedBlock2, newBlock))
newMatrix = np.vstack((accumulatedUpBlock1, accumulatedBlock2))
return newMatrix
def binaryMatrixToHexString(binaryMatrix, circulantSize):
leftPadding = np.array(4 - (circulantSize % 4))
m,n = binaryMatrix.shape
#print(m)
#print(n)
assert( m % circulantSize == 0)
assert (n % circulantSize == 0)
M = m // circulantSize
N = n // circulantSize
hexName = ''
for r in range(M):
for k in range(N):
nextLine = np.hstack((leftPadding, binaryMatrix[ r * circulantSize , k * circulantSize : (k + 1) * circulantSize]))
hexArray, hexString = binaryArraytoHex(nextLine)
hexName = hexName + hexString
return hexName
def saveCodeInstance(parityMatrix, circulantSize, codewordSize, evaluationData = None, path = None, evaluationTime = 0, numberOfNonZero = 0, fileName = None):
print("*** in saveCodeInstance ...")
m, n = parityMatrix.shape
M = m // circulantSize
N = n // circulantSize
if fileName == None:
fileName = binaryMatrixToHexString(parityMatrix, circulantSize)
fileNameSHA224 = str(circulantSize) + '_' + str(M) + '_' + str(N) + '_' + str(hashlib.sha224(str(fileName).encode('utf-8')).hexdigest())
fileNameWithPath = path + fileNameSHA224
else:
fileNameWithPath = path + fileName
print("*** " + fileName)
workspaceDict = {}
workspaceDict['parityMatrix'] = parityMatrix
workspaceDict['fileName'] = fileName
if evaluationData != None:
scatterSNR, scatterBER, scatterITR, snrAxis, averageSnrAxis, berData, averageNumberOfIterations = evaluationData.getStatsV2()
workspaceDict['snrData'] = scatterSNR
workspaceDict['berData'] = scatterBER
workspaceDict['itrData'] = scatterITR
workspaceDict['averageSnrAxis'] = averageSnrAxis
workspaceDict['averageNumberOfIterations'] = averageNumberOfIterations
workspaceDict['evaluationTime'] = evaluationTime
workspaceDict['nonZero'] = numberOfNonZero
scipy.io.savemat((fileNameWithPath + '.mat'), workspaceDict)
#evaluationData.plotStats(codewordSize, fileNameWithPath)
print("*** Finishing saveCodeInstance !")
return fileName
def testFileHandler():
nearEarthGenerator = readMatrixFromFile(projectDir + '/codeMatrices/nearEarthGenerator.txt', 7154, 8176, 511, True, True, True)
nearEarthParity = readMatrixFromFile(projectDir + '/codeMatrices/nearEarthParity.txt', 1022, 8176, 511, True, False, False)
return 'OK'
def plotResults(path, makeMat = False):
i = 10
evaluationFaildAt = np.zeros(4, dtype = FILE_HANDLER_INT_DATA_TYPE)
evalTimes = []
numberOfIterationsAtHigh = []
for root, dirs, files in os.walk(path):
for file in files:
if str(file).endswith('.mat'):
i = i + 1
mat = scipy.io.loadmat(str(os.path.join(root, file)))
snrAxis = mat['snrAxis']
snrActual = mat['averageSnrAxis']
if len(snrAxis) < 3:
evaluationFaildAt[len(snrAxis)] = evaluationFaildAt[len(snrAxis)] + 1
berAxis = mat['berData']
if ('evaluationTime' in mat.keys()):
evalTimes.append(mat['evaluationTime'])
averageNumberOfIterations = mat['averageNumberOfIterations']
numberOfIterationsAtHigh.append(averageNumberOfIterations[-1])
common.plotSNRvsBER(snrActual, berAxis, fileName = None, inputLabel = '', figureNumber = i, figureName = str(file))
else:
pass
return evalTimes, evaluationFaildAt, numberOfIterationsAtHigh
#plt.imshow(nearEarthParity)
#nearEarthParity = readMatrixFromFile('/home/oss22/swift/swift/codeMatrices/nearEarthParity.txt', 1022, 8176, 511, True, False, False)
#import networkx as nx
#from networkx.algorithms import bipartite
#B = nx.Graph()
#B.add_nodes_from(range(1022), bipartite=0)
#B.add_nodes_from(range(1022, 7156 + 1022), bipartite=1)
# Add edges only between nodes of opposite node sets
#for i in range(8176):
# for j in range(1022):
# if nearEarthParity[j,i] != 0:
# B.add_edges_from([(j, 7156 + i)])
#X, Y = bipartite.sets(B)
#pos = dict()
#pos.update( (n, (1, i)) for i, n in enumerate(X) )
#pos.update( (n, (2, i)) for i, n in enumerate(Y) )
#nx.draw(B, pos=pos)
#plt.show()
|
[
"numpy.eye",
"sys.path.insert",
"numpy.hstack",
"pathlib.Path",
"os.environ.get",
"os.path.join",
"numpy.array",
"numpy.zeros",
"scipy.linalg.circulant",
"numpy.vstack",
"os.walk"
] |
[((694, 716), 'os.environ.get', 'os.environ.get', (['"""LDPC"""'], {}), "('LDPC')\n", (708, 716), False, 'import os\n'), ((907, 937), 'sys.path.insert', 'sys.path.insert', (['(1)', 'projectDir'], {}), '(1, projectDir)\n', (922, 937), False, 'import sys\n'), ((1038, 1097), 'numpy.array', 'np.array', (['[8, 4, 2, 1]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([8, 4, 2, 1], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (1046, 1097), True, 'import numpy as np\n'), ((1524, 1552), 'numpy.zeros', 'np.zeros', (['(d1 // 4)'], {'dtype': 'str'}), '(d1 // 4, dtype=str)\n', (1532, 1552), True, 'import numpy as np\n'), ((1848, 1897), 'numpy.array', 'np.array', (['[]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (1856, 1897), True, 'import numpy as np\n'), ((4356, 4378), 'scipy.linalg.circulant', 'circulant', (['binaryArray'], {}), '(binaryArray)\n', (4365, 4378), False, 'from scipy.linalg import circulant\n'), ((4528, 4588), 'numpy.zeros', 'np.zeros', (['circulantSize'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '(circulantSize, dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (4536, 4588), True, 'import numpy as np\n'), ((4649, 4676), 'scipy.linalg.circulant', 'circulant', (['generatingVector'], {}), '(generatingVector)\n', (4658, 4676), False, 'from scipy.linalg import circulant\n'), ((6792, 6823), 'numpy.array', 'np.array', (['(4 - circulantSize % 4)'], {}), '(4 - circulantSize % 4)\n', (6800, 6823), True, 'import numpy as np\n'), ((9217, 9262), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'FILE_HANDLER_INT_DATA_TYPE'}), '(4, dtype=FILE_HANDLER_INT_DATA_TYPE)\n', (9225, 9262), True, 'import numpy as np\n'), ((9347, 9360), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (9354, 9360), False, 'import os\n'), ((3963, 3996), 'numpy.hstack', 'np.hstack', (['(outputBinary, nibble)'], {}), '((outputBinary, nibble))\n', (3972, 3996), True, 'import numpy as np\n'), ((6625, 6676), 'numpy.vstack', 'np.vstack', (['(accumulatedUpBlock1, accumulatedBlock2)'], {}), '((accumulatedUpBlock1, accumulatedBlock2))\n', (6634, 6676), True, 'import numpy as np\n'), ((1967, 2026), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([0, 0, 0, 0], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (1975, 2026), True, 'import numpy as np\n'), ((5433, 5459), 'numpy.hstack', 'np.hstack', (['(bLeft, bRight)'], {}), '((bLeft, bRight))\n', (5442, 5459), True, 'import numpy as np\n'), ((7104, 7209), 'numpy.hstack', 'np.hstack', (['(leftPadding, binaryMatrix[r * circulantSize, k * circulantSize:(k + 1) *\n circulantSize])'], {}), '((leftPadding, binaryMatrix[r * circulantSize, k * circulantSize:(\n k + 1) * circulantSize]))\n', (7113, 7209), True, 'import numpy as np\n'), ((776, 798), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (788, 798), False, 'import pathlib\n'), ((2072, 2131), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([0, 0, 0, 1], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (2080, 2131), True, 'import numpy as np\n'), ((5580, 5619), 'numpy.vstack', 'np.vstack', (['(accumulatedBlock, newBlock)'], {}), '((accumulatedBlock, newBlock))\n', (5589, 5619), True, 'import numpy as np\n'), ((5651, 5700), 'numpy.eye', 'np.eye', (['dim0'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '(dim0, dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (5657, 5700), True, 'import numpy as np\n'), ((6118, 6159), 'numpy.hstack', 'np.hstack', (['(accumulatedUpBlock1, upBlock)'], {}), '((accumulatedUpBlock1, upBlock))\n', (6127, 6159), True, 'import numpy as np\n'), ((6564, 6604), 'numpy.hstack', 'np.hstack', (['(accumulatedBlock2, newBlock)'], {}), '((accumulatedBlock2, newBlock))\n', (6573, 6604), True, 'import numpy as np\n'), ((2190, 2249), 'numpy.array', 'np.array', (['[0, 0, 1, 0]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([0, 0, 1, 0], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (2198, 2249), True, 'import numpy as np\n'), ((2308, 2367), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([0, 0, 1, 1], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (2316, 2367), True, 'import numpy as np\n'), ((9501, 9525), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (9513, 9525), False, 'import os\n'), ((2426, 2485), 'numpy.array', 'np.array', (['[0, 1, 0, 0]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([0, 1, 0, 0], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (2434, 2485), True, 'import numpy as np\n'), ((2544, 2603), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([0, 1, 0, 1], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (2552, 2603), True, 'import numpy as np\n'), ((2662, 2721), 'numpy.array', 'np.array', (['[0, 1, 1, 0]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([0, 1, 1, 0], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (2670, 2721), True, 'import numpy as np\n'), ((2780, 2839), 'numpy.array', 'np.array', (['[0, 1, 1, 1]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([0, 1, 1, 1], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (2788, 2839), True, 'import numpy as np\n'), ((2898, 2957), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([1, 0, 0, 0], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (2906, 2957), True, 'import numpy as np\n'), ((3016, 3075), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([1, 0, 0, 1], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (3024, 3075), True, 'import numpy as np\n'), ((3134, 3193), 'numpy.array', 'np.array', (['[1, 0, 1, 0]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([1, 0, 1, 0], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (3142, 3193), True, 'import numpy as np\n'), ((3252, 3311), 'numpy.array', 'np.array', (['[1, 0, 1, 1]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([1, 0, 1, 1], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (3260, 3311), True, 'import numpy as np\n'), ((3370, 3429), 'numpy.array', 'np.array', (['[1, 1, 0, 0]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([1, 1, 0, 0], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (3378, 3429), True, 'import numpy as np\n'), ((3488, 3547), 'numpy.array', 'np.array', (['[1, 1, 0, 1]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([1, 1, 0, 1], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (3496, 3547), True, 'import numpy as np\n'), ((3606, 3665), 'numpy.array', 'np.array', (['[1, 1, 1, 0]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([1, 1, 1, 0], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (3614, 3665), True, 'import numpy as np\n'), ((3724, 3783), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([1, 1, 1, 1], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (3732, 3783), True, 'import numpy as np\n'), ((3888, 3937), 'numpy.array', 'np.array', (['[]'], {'dtype': 'GENERAL_CODE_MATRIX_DATA_TYPE'}), '([], dtype=GENERAL_CODE_MATRIX_DATA_TYPE)\n', (3896, 3937), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Showcases *ICTCP* *colour encoding* computations.
"""
import numpy as np
import colour
from colour.utilities import message_box
message_box('"ICTCP" Colour Encoding Computations')
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
message_box(('Converting from "ITU-R BT.2020" colourspace to "ICTCP" colour '
'encoding given "RGB" values:\n'
'\n\t{0}'.format(RGB)))
print(colour.RGB_to_ICTCP(RGB))
print('\n')
ICTCP = np.array([0.07351364, 0.00475253, 0.09351596])
message_box(('Converting from "ICTCP" colour encoding to "ITU-R BT.2020" '
'colourspace given "ICTCP" values:\n'
'\n\t{0}'.format(ICTCP)))
print(colour.ICTCP_to_RGB(ICTCP))
|
[
"colour.ICTCP_to_RGB",
"colour.utilities.message_box",
"numpy.array",
"colour.RGB_to_ICTCP"
] |
[((159, 210), 'colour.utilities.message_box', 'message_box', (['""""ICTCP" Colour Encoding Computations"""'], {}), '(\'"ICTCP" Colour Encoding Computations\')\n', (170, 210), False, 'from colour.utilities import message_box\n'), ((218, 264), 'numpy.array', 'np.array', (['[0.45620519, 0.03081071, 0.04091952]'], {}), '([0.45620519, 0.03081071, 0.04091952])\n', (226, 264), True, 'import numpy as np\n'), ((480, 526), 'numpy.array', 'np.array', (['[0.07351364, 0.00475253, 0.09351596]'], {}), '([0.07351364, 0.00475253, 0.09351596])\n', (488, 526), True, 'import numpy as np\n'), ((432, 456), 'colour.RGB_to_ICTCP', 'colour.RGB_to_ICTCP', (['RGB'], {}), '(RGB)\n', (451, 456), False, 'import colour\n'), ((698, 724), 'colour.ICTCP_to_RGB', 'colour.ICTCP_to_RGB', (['ICTCP'], {}), '(ICTCP)\n', (717, 724), False, 'import colour\n')]
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..config import PathField, BoolField
from ..representation import ClassificationAnnotation
from ..utils import read_csv, check_file_existence, read_json
from .format_converter import BaseFormatConverter, ConverterReturn
try:
from PIL import Image
except ImportError:
Image = None
class MNISTCSVFormatConverter(BaseFormatConverter):
"""
MNIST CSV dataset converter. All annotation converters should be derived from BaseFormatConverter class.
"""
# register name for this converter
# this name will be used for converter class look up
__provider__ = 'mnist_csv'
annotation_types = (ClassificationAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'annotation_file': PathField(description="Path to csv file which contain dataset."),
'convert_images': BoolField(
optional=True,
default=False,
description="Allows to convert images from pickle file to user specified directory."
),
'converted_images_dir': PathField(
optional=True, is_directory=True, check_exists=False, description="Path to converted images location."
),
'dataset_meta_file': PathField(
description='path to json file with dataset meta (e.g. label_map, color_encoding)', optional=True
)
})
return configuration_parameters
def configure(self):
"""
This method is responsible for obtaining the necessary parameters
for converting from the command line or config.
"""
self.test_csv_file = self.get_value_from_config('annotation_file')
self.converted_images_dir = self.get_value_from_config('converted_images_dir')
self.convert_images = self.get_value_from_config('convert_images')
if self.convert_images and not self.converted_images_dir:
self.converted_images_dir = self.test_csv_file.parent / 'converted_images'
if not self.converted_images_dir.exists():
self.converted_images_dir.mkdir(parents=True)
if self.convert_images and Image is None:
raise ValueError(
"conversion mnist images requires Pillow installation, please install it before usage"
)
self.dataset_meta = self.get_value_from_config('dataset_meta_file')
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
"""
This method is executed automatically when convert.py is started.
All arguments are automatically got from command line arguments or config file in method configure
Returns:
annotations: list of annotation representation objects.
meta: dictionary with additional dataset level metadata.
"""
annotations = []
check_images = check_content and not self.convert_images
meta = self.generate_meta()
labels_to_id = meta['label_map']
content_errors = None
if check_content:
self.converted_images_dir = self.converted_images_dir or self.test_csv_file.parent / 'converted_images'
if self.converted_images_dir and check_content:
if not self.converted_images_dir.exists():
content_errors = ['{}: does not exist'.format(self.converted_images_dir)]
check_images = False
# read original dataset annotation
annotation_table = read_csv(self.test_csv_file)
num_iterations = len(annotation_table)
for index, annotation in enumerate(annotation_table):
identifier = '{}.png'.format(index)
label = labels_to_id.get(annotation['label'], int(annotation['label']))
if self.convert_images:
image = Image.fromarray(self.convert_image(annotation))
image = image.convert("L")
image.save(str(self.converted_images_dir / identifier))
annotations.append(ClassificationAnnotation(identifier, label))
if check_images:
if not check_file_existence(self.converted_images_dir / identifier):
# add error to errors list if file not found
content_errors.append('{}: does not exist'.format(self.converted_images_dir / identifier))
if progress_callback is not None and index % progress_interval == 0:
progress_callback(index / num_iterations * 100)
return ConverterReturn(annotations, meta, content_errors)
@staticmethod
def convert_image(features):
image = np.zeros((28, 28))
column_template = '{}x{}'
for x in range(28):
for y in range(28):
pixel = int(features[column_template.format(x+1, y+1)])
image[x, y] = pixel
return image
def generate_meta(self):
if not self.dataset_meta:
return {'label_map': {str(i): i for i in range(10)}}
dataset_meta = read_json(self.dataset_meta)
label_map = dataset_meta.get('label_map')
if 'labels' in dataset_meta:
label_map = dict(enumerate(dataset_meta['labels']))
dataset_meta['label_map'] = label_map or {str(i): i for i in range(10)}
return dataset_meta
|
[
"numpy.zeros"
] |
[((5319, 5337), 'numpy.zeros', 'np.zeros', (['(28, 28)'], {}), '((28, 28))\n', (5327, 5337), True, 'import numpy as np\n')]
|
import numpy as np
import sys
import os
from PIL import Image
from visu.helper_functions import save_image
from scipy.spatial.transform import Rotation as R
from helper import re_quat
import copy
import torch
import numpy as np
import k3d
class Visualizer():
def __init__(self, p_visu, writer=None):
if p_visu[-1] != '/':
p_visu = p_visu + '/'
self.p_visu = p_visu
self.writer = writer
if not os.path.exists(self.p_visu):
os.makedirs(self.p_visu)
def plot_estimated_pose(self, tag, epoch, img, points, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, store=False, jupyter=False, w=2):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
points:= points of the object model [length,x,y,z]
trans: [1,3]
rot: [3,3]
"""
img_d = copy.deepcopy(img)
points = np.dot(points, rot_mat.T)
points = np.add(points, trans[0, :])
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounce")
pass
if jupyter:
display(Image.fromarray(img_d))
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
#print("IMAGE D:" ,img_d,img_d.shape )
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_bounding_box(self, tag, epoch, img, rmin=0, rmax=0, cmin=0, cmax=0, str_width=2, store=False, jupyter=False, b=None):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
"""
if isinstance(b, dict):
rmin = b['rmin']
rmax = b['rmax']
cmin = b['cmin']
cmax = b['cmax']
# ToDo check Input data
img_d = np.array(copy.deepcopy(img))
c = [0, 0, 255]
rmin_mi = max(0, rmin - str_width)
rmin_ma = min(img_d.shape[0], rmin + str_width)
rmax_mi = max(0, rmax - str_width)
rmax_ma = min(img_d.shape[0], rmax + str_width)
cmin_mi = max(0, cmin - str_width)
cmin_ma = min(img_d.shape[1], cmin + str_width)
cmax_mi = max(0, cmax - str_width)
cmax_ma = min(img_d.shape[1], cmax + str_width)
img_d[rmin_mi:rmin_ma, cmin:cmax, :] = c
img_d[rmax_mi:rmax_ma, cmin:cmax, :] = c
img_d[rmin:rmax, cmin_mi:cmin_ma, :] = c
img_d[rmin:rmax, cmax_mi:cmax_ma, :] = c
print("STORE", store)
img_d = img_d.astype(np.uint8)
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if jupyter:
display(Image.fromarray(img_d))
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_pcd(x, point_size=0.005, c='g'):
"""
x: point_nr,3
"""
if c == 'b':
k = 245
elif c == 'g':
k = 25811000
elif c == 'r':
k = 11801000
elif c == 'black':
k = 2580
else:
k = 2580
colors = np.ones(x.shape[0]) * k
plot = k3d.plot(name='points')
plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
def plot_two_pcd(x, y, point_size=0.005, c1='g', c2='r'):
if c1 == 'b':
k = 245
elif c1 == 'g':
k = 25811000
elif c1 == 'r':
k = 11801000
elif c1 == 'black':
k = 2580
else:
k = 2580
if c2 == 'b':
k2 = 245
elif c2 == 'g':
k2 = 25811000
elif c2 == 'r':
k2 = 11801000
elif c2 == 'black':
k2 = 2580
else:
k2 = 2580
col1 = np.ones(x.shape[0]) * k
col2 = np.ones(y.shape[0]) * k2
plot = k3d.plot(name='points')
plt_points = k3d.points(x, col1.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points = k3d.points(y, col2.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
class SequenceVisualizer():
def __init__(self, seq_data, images_path, output_path=None):
self.seq_data = seq_data
self.images_path = images_path
self.output_path = output_path
def plot_points_on_image(self, seq_no, frame_no, jupyter=False, store=False, pose_type='filtered'):
seq_data = self.seq_data
images_path = self.images_path
output_path = self.output_path
frame = seq_data[seq_no][frame_no]
unique_desig = frame['dl_dict']['unique_desig'][0]
if pose_type == 'ground_truth':
# ground truth
t = frame['dl_dict']['gt_trans'].reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['dl_dict']['gt_rot_wxyz'][0]), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'filtered':
# filter pred
t = np.array(frame['filter_pred']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['filter_pred']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'final_pred_obs':
# final pred
t = np.array(frame['final_pred_obs']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['final_pred_obs']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
else:
raise Exception('Pose type not implemented.')
w = 2
if type(unique_desig) != str:
im = np.array(Image.open(
images_path + unique_desig[0] + '-color.png')) # ycb
else:
im = np.array(Image.open(
images_path + unique_desig + '.png')) # laval
img_d = copy.deepcopy(im)
dl_dict = frame['dl_dict']
points = copy.deepcopy(
seq_data[seq_no][0]['dl_dict']['model_points'][0, :, :])
points = np.dot(points, rot.T)
points = np.add(points, t[0, :])
cam_cx = dl_dict['cam_cal'][0][0]
cam_cy = dl_dict['cam_cal'][0][1]
cam_fx = dl_dict['cam_cal'][0][2]
cam_fy = dl_dict['cam_cal'][0][3]
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounds")
pass
img_disp = Image.fromarray(img_d)
if jupyter:
display(img_disp)
if store:
outpath = output_path + \
'{}_{}_{}.png'.format(pose_type, seq_no, frame_no)
img_disp.save(outpath, "PNG", compress_level=1)
print("Saved image to {}".format(outpath))
def save_sequence(self, seq_no, pose_type='filtered', name=''):
for fn in range(len(self.seq_data)):
self.plot_points_on_image(seq_no, fn, False, True, pose_type)
if name:
video_name = '{}_{}_{}'.format(name, pose_type, seq_no)
else:
video_name = '{}_{}'.format(pose_type, seq_no)
cmd = "cd {} && ffmpeg -r 10 -i ./filtered_{}_%d.png -vcodec mpeg4 -y {}.mp4".format(
self.output_path, seq_no, video_name)
os.system(cmd)
|
[
"k3d.plot",
"PIL.Image.fromarray",
"os.path.exists",
"PIL.Image.open",
"numpy.ones",
"numpy.add",
"os.makedirs",
"scipy.spatial.transform.Rotation.from_quat",
"numpy.array",
"numpy.dot",
"copy.deepcopy",
"os.system"
] |
[((4150, 4173), 'k3d.plot', 'k3d.plot', ([], {'name': '"""points"""'}), "(name='points')\n", (4158, 4173), False, 'import k3d\n'), ((4842, 4865), 'k3d.plot', 'k3d.plot', ([], {'name': '"""points"""'}), "(name='points')\n", (4850, 4865), False, 'import k3d\n'), ((1091, 1109), 'copy.deepcopy', 'copy.deepcopy', (['img'], {}), '(img)\n', (1104, 1109), False, 'import copy\n'), ((1127, 1152), 'numpy.dot', 'np.dot', (['points', 'rot_mat.T'], {}), '(points, rot_mat.T)\n', (1133, 1152), True, 'import numpy as np\n'), ((1170, 1197), 'numpy.add', 'np.add', (['points', 'trans[0, :]'], {}), '(points, trans[0, :])\n', (1176, 1197), True, 'import numpy as np\n'), ((4115, 4134), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (4122, 4134), True, 'import numpy as np\n'), ((4771, 4790), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (4778, 4790), True, 'import numpy as np\n'), ((4806, 4825), 'numpy.ones', 'np.ones', (['y.shape[0]'], {}), '(y.shape[0])\n', (4813, 4825), True, 'import numpy as np\n'), ((6871, 6888), 'copy.deepcopy', 'copy.deepcopy', (['im'], {}), '(im)\n', (6884, 6888), False, 'import copy\n'), ((6942, 7012), 'copy.deepcopy', 'copy.deepcopy', (["seq_data[seq_no][0]['dl_dict']['model_points'][0, :, :]"], {}), "(seq_data[seq_no][0]['dl_dict']['model_points'][0, :, :])\n", (6955, 7012), False, 'import copy\n'), ((7043, 7064), 'numpy.dot', 'np.dot', (['points', 'rot.T'], {}), '(points, rot.T)\n', (7049, 7064), True, 'import numpy as np\n'), ((7082, 7105), 'numpy.add', 'np.add', (['points', 't[0, :]'], {}), '(points, t[0, :])\n', (7088, 7105), True, 'import numpy as np\n'), ((7827, 7849), 'PIL.Image.fromarray', 'Image.fromarray', (['img_d'], {}), '(img_d)\n', (7842, 7849), False, 'from PIL import Image\n'), ((8637, 8651), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (8646, 8651), False, 'import os\n'), ((444, 471), 'os.path.exists', 'os.path.exists', (['self.p_visu'], {}), '(self.p_visu)\n', (458, 471), False, 'import os\n'), ((485, 509), 'os.makedirs', 'os.makedirs', (['self.p_visu'], {}), '(self.p_visu)\n', (496, 509), False, 'import os\n'), ((2763, 2781), 'copy.deepcopy', 'copy.deepcopy', (['img'], {}), '(img)\n', (2776, 2781), False, 'import copy\n'), ((1771, 1793), 'PIL.Image.fromarray', 'Image.fromarray', (['img_d'], {}), '(img_d)\n', (1786, 1793), False, 'from PIL import Image\n'), ((3666, 3688), 'PIL.Image.fromarray', 'Image.fromarray', (['img_d'], {}), '(img_d)\n', (3681, 3688), False, 'from PIL import Image\n'), ((5798, 5847), 'copy.deepcopy', 'copy.deepcopy', (["frame['dl_dict']['gt_rot_wxyz'][0]"], {}), "(frame['dl_dict']['gt_rot_wxyz'][0])\n", (5811, 5847), False, 'import copy\n'), ((6658, 6714), 'PIL.Image.open', 'Image.open', (["(images_path + unique_desig[0] + '-color.png')"], {}), "(images_path + unique_desig[0] + '-color.png')\n", (6668, 6714), False, 'from PIL import Image\n'), ((6780, 6827), 'PIL.Image.open', 'Image.open', (["(images_path + unique_desig + '.png')"], {}), "(images_path + unique_desig + '.png')\n", (6790, 6827), False, 'from PIL import Image\n'), ((5892, 5913), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['rot_quat'], {}), '(rot_quat)\n', (5903, 5913), True, 'from scipy.spatial.transform import Rotation as R\n'), ((6087, 6132), 'copy.deepcopy', 'copy.deepcopy', (["frame['filter_pred']['r_wxyz']"], {}), "(frame['filter_pred']['r_wxyz'])\n", (6100, 6132), False, 'import copy\n'), ((6006, 6041), 'numpy.array', 'np.array', (["frame['filter_pred']['t']"], {}), "(frame['filter_pred']['t'])\n", (6014, 6041), True, 'import numpy as np\n'), ((6177, 6198), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['rot_quat'], {}), '(rot_quat)\n', (6188, 6198), True, 'from scipy.spatial.transform import Rotation as R\n'), ((6380, 6428), 'copy.deepcopy', 'copy.deepcopy', (["frame['final_pred_obs']['r_wxyz']"], {}), "(frame['final_pred_obs']['r_wxyz'])\n", (6393, 6428), False, 'import copy\n'), ((6296, 6334), 'numpy.array', 'np.array', (["frame['final_pred_obs']['t']"], {}), "(frame['final_pred_obs']['t'])\n", (6304, 6334), True, 'import numpy as np\n'), ((6473, 6494), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['rot_quat'], {}), '(rot_quat)\n', (6484, 6494), True, 'from scipy.spatial.transform import Rotation as R\n')]
|
import os
from PIL import Image
import cv2
from os import listdir
from os.path import join
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import LogNorm
from io_utils.io_common import create_folder
from viz_utils.constants import PlotMode, BackgroundType
import pylab
import numpy as np
import cmocean
import shapely
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy
def select_colormap(field_name):
'''
Based on the name if the field it chooses a colormap from cmocean
Args:
field_name:
Returns:
'''
if np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]):
# cmaps_fields.append(cmocean.cm.deep_r)
return cmocean.cm.curl
elif np.any([field_name.find(x) != -1 for x in ('temp', 'sst', 'temperature')]):
return cmocean.cm.thermal
elif np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]):
return cmocean.cm.curl
elif np.any([field_name.find(x) != -1 for x in ('salin', 'sss', 'sal')]):
return cmocean.cm.haline
elif field_name.find('error') != -1:
return cmocean.cm.diff
elif field_name.find('binary') != -1:
return cmocean.cm.oxy
elif np.any([field_name.find(x) != -1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]):
return cmocean.cm.speed
class EOAImageVisualizer:
"""This class makes plenty of plots assuming we are plotting Geospatial data (maps).
It is made to read xarrays, numpy arrays, and numpy arrays in dictionaries
vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output',
lats=[lats],lons=[lons])
"""
_COLORS = ['y', 'r', 'c', 'b', 'g', 'w', 'k', 'y', 'r', 'c', 'b', 'g', 'w', 'k']
_figsize = 8
_font_size = 30
_units = ''
_max_imgs_per_row = 4
_mincbar = np.nan # User can set a min and max colorbar values to 'force' same color bar to all plots
_maxcbar = np.nan
_flip_data = True
_eoas_pyutils_path = './eoas_pyutils'# This is the path where the eoas_utils folder is stored with respect to the main project
_contourf = False # When plotting non-regular grids and need precision
_background = BackgroundType.BLUE_MARBLE_LR # Select the background to use
_auto_colormap = True # Selects the colormap based on the name of the field
_show_var_names = False # Includes the name of the field name in the titles
_additional_polygons = [] # MUST BE SHAPELY GEOMETRIES In case we want to include additional polygons in the plots (all of them)
# If you want to add a streamplot of a vector field. It must be a dictionary with keys x,y,u,v
# and optional density, color, cmap, arrowsize, arrowstyle, minlength
_vector_field = None
_norm = None # Use to normalize the colormap. For example with LogNorm
# vizobj = EOAImageVisualizer(disp_images=True, output_folder='output',
# lats=[lats],lons=[lons])
def __init__(self, disp_images=True, output_folder='output',
lats=[-90,90], lons =[-180,180],
projection=ccrs.PlateCarree(), **kwargs):
# All the arguments that are passed to the constructor of the class MUST have its name on it.
self._disp_images = disp_images
self._output_folder = output_folder
self._projection = projection
bbox = self.getExtent(lats, lons)
self._extent = bbox
self._lats = lats
self._lons = lons
self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2])
self._contour_labels = False
for arg_name, arg_value in kwargs.items():
self.__dict__["_" + arg_name] = arg_value
print(self.__dict__["_" + arg_name])
def __getattr__(self, attr):
'''Generic getter for all the properties of the class'''
return self.__dict__["_" + attr]
def __setattr__(self, attr, value):
'''Generic setter for all the properties of the class'''
self.__dict__["_" + attr] = value
def add_colorbar(self, fig, im, ax, show_color_bar, label=""):
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html
if show_color_bar:
font_size_cbar = self._font_size * .5
# TODO how to make this automatic and works always
cbar = fig.colorbar(im, ax=ax, shrink=.7)
cbar.ax.tick_params(labelsize=font_size_cbar)
if label != "":
cbar.set_label(label, fontsize=font_size_cbar*1.2)
else:
cbar.set_label(self._units, fontsize=font_size_cbar*1.2)
def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None:
"""
Plots a 2D img for EOA data.
:param c_img: 2D array
:param ax: geoaxes
:return:
"""
c_ax = ax
if self._flip_data:
origin = 'lower'
else:
origin = 'upper'
if self._background == BackgroundType.CARTO_DEF:
c_ax.stock_img()
else:
if self._background == BackgroundType.BLUE_MARBLE_LR:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png'))
if self._background == BackgroundType.BLUE_MARBLE_HR:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg'))
if self._background == BackgroundType.TOPO:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png'))
if self._background == BackgroundType.BATHYMETRY:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg'))
c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree())
if mode == PlotMode.RASTER or mode == PlotMode.MERGED:
if self._contourf:
im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent)
else:
if np.isnan(mincbar):
im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm)
else:
im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm)
if mode == PlotMode.CONTOUR or mode == PlotMode.MERGED:
c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons)))
if mode == PlotMode.CONTOUR:
im = c_ax.contour(c_img, extent=self._extent, transform=self._projection)
if mode == PlotMode.MERGED:
if self._contour_labels:
c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection)
else:
c_ax.contour(c_img, extent=self._extent, transform=self._projection)
if len(self._additional_polygons) > 0:
pol_lats = []
pol_lons = []
for c_polygon in self._additional_polygons:
if isinstance(c_polygon, shapely.geometry.linestring.LineString):
x,y = c_polygon.xy
elif isinstance(c_polygon, shapely.geometry.polygon.Polygon):
x, y = c_polygon.exterior.xy
pol_lats += y
pol_lons += x
c_ax.plot(x,y, transform=self._projection, c='r')
# Adds a threshold to the plot to see the polygons
c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5))
if self._vector_field != None:
try:
u = self._vector_field['u']
v = self._vector_field['v']
x = self._vector_field['x']
y = self._vector_field['y']
vec_keys = self._vector_field.keys()
c = 'r'
density = 1
linewidth = 3
vec_cmap = cmocean.cm.solar
if 'color' in vec_keys:
c = self._vector_field['color']
if 'density' in vec_keys:
density = self._vector_field['density']
if 'linewidth' in vec_keys:
linewidth = self._vector_field['linewidth']
if 'cmap' in vec_keys:
vec_cmap = self._vector_field['cmap']
c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons)))
c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c,
cmap=vec_cmap, linewidth=linewidth)
except Exception as e:
print(F"Couldn't add vector field e:{e}")
gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--')
# gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'}
font_coords = {'size': self._font_size*.6}
gl.xlabel_style = font_coords
gl.ylabel_style = font_coords
gl.top_labels = False
gl.right_labels = False
return im
def get_proper_size(self, rows, cols):
"""
Obtains the proper size for a figure.
:param rows: how many rows will the figure have
:param cols: how many colswill the figure have
:param prop: Proportion is the proportion to use w/h
:return:
"""
if rows == 1:
return self._figsize * cols * self._fig_prop, self._figsize
else:
return self._figsize * cols * self._fig_prop, self._figsize * rows
def _close_figure(self):
"""Depending on what is disp_images, the figures are displayed or just closed"""
if self._disp_images:
plt.show()
else:
plt.close()
def getExtent(self, lats, lons, expand_ext=0.0):
'''
Obtains the bbox of the coordinates. If included threshold then increases the bbox in all directions with that thres
Args:
lats:
lons:
inc_threshold:
Returns:
'''
minLat = np.amin(lats) - expand_ext
maxLat = np.amax(lats) + expand_ext
minLon = np.amin(lons) - expand_ext
maxLon = np.amax(lons) + expand_ext
bbox = (minLon, maxLon, minLat, maxLat)
return bbox
def xr_summary(self, ds):
""" Prints a summary of the netcdf (global attributes, variables, etc)
:param ds:
:return:
"""
print("\n========== Global attributes =========")
for name in ds.attrs:
print(F"{name} = {getattr(ds, name)}")
print("\n========== Dimensions =========")
for name in ds.dims:
print(F"{name}: {ds[name].shape}")
print("\n========== Coordinates =========")
for name in ds.coords:
print(F"{name}: {ds[name].shape}")
print("\n========== Variables =========")
for cur_variable_name in ds.variables:
cur_var = ds[cur_variable_name]
print(F"{cur_variable_name}: {cur_var.dims} {cur_var.shape}")
def nc_summary(self, ds):
""" Prints a summary of the netcdf (global attributes, variables, etc)
:param ds:
:return:
"""
print("\n========== Global attributes =========")
for name in ds.ncattrs():
print(F"{name} = {getattr(ds, name)}")
print("\n========== Variables =========")
netCDFvars = ds.variables
for cur_variable_name in netCDFvars.keys():
cur_var = ds.variables[cur_variable_name]
print(F"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}")
def add_roads(self, ax):
# Names come from: https://www.naturalearthdata.com/features/
# -- Add states
roads = cfeature.NaturalEarthFeature(
category='cultural',
name='roads',
scale='10m',
facecolor='none')
ax.add_feature(roads, edgecolor='black')
return ax
def add_states(self, ax):
# Names come from: https://www.naturalearthdata.com/features/
# -- Add states
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none')
ax.add_feature(states_provinces, edgecolor='gray')
return ax
def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''):
'''
This function plots points in a map
:param bbox:
:return:
'''
if bbox is None:
bbox = (-180, 180, -90, 90)
if lats is None:
lats = self.lats
if lons is None:
lons = self.lons
fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()})
ax.set_extent(bbox) # If we do not set this, it will cropp it to the limits of the locations
ax.gridlines()
im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap)
fig.colorbar(im, ax=ax, shrink=0.7)
ax.coastlines()
plt.title(title)
plt.show()
def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='',
file_name_prefix='', cmap=None, z_names = [],
show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):
"""
Plots multiple z_levels for multiple fields.
It uses rows for each depth, and columns for each variable
"""
create_folder(self._output_folder)
orig_cmap = cmap
# If the user do not requires any z-leve, then all are plotted
if len(z_levels) == 0:
z_levels = range(np_variables[var_names[0]].shape[0])
cols = np.min((self._max_imgs_per_row, len(var_names)))
if cols == len(var_names):
rows = len(z_levels)
else:
rows = int(len(z_levels) * np.ceil(len(var_names)/cols))
fig, _axs = plt.subplots(rows, cols,
figsize=self.get_proper_size(rows, cols),
subplot_kw={'projection': self._projection})
for c_zlevel, c_slice in enumerate(z_levels): # Iterates over the z-levels
# Verify the index of the z_levels are the original ones.
if len(z_names) != 0:
c_slice_txt = z_names[c_slice]
else:
c_slice_txt = c_slice
c_mincbar = np.nan
c_maxcbar = np.nan
for idx_var, c_var in enumerate(var_names): # Iterate over the fields
if rows*cols == 1: # Single figure
ax = _axs
else:
ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var]
# Here we chose the min and max colorbars for each field
if not(np.all(np.isnan(mincbar))):
if type(mincbar) is list:
c_mincbar = mincbar[idx_var]
else:
c_mincbar = mincbar
if not(np.all(np.isnan(maxcbar))):
if type(mincbar) is list:
c_maxcbar = maxcbar[idx_var]
else:
c_maxcbar = maxcbar
# By default we select the colorbar from the name of the variable
if self._auto_colormap and orig_cmap is None:
cmap = select_colormap(c_var)
else:
# If there is an array of colormaps we select the one for this field
if type(orig_cmap) is list:
cmap = orig_cmap[idx_var]
else:
# If it is just one cmap, then we use it for all the fields
cmap = orig_cmap
im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode,
mincbar=c_mincbar, maxcbar=c_maxcbar)
if self._show_var_names:
c_title = F'{var_names[idx_var]} {title}'
else:
c_title = F'{title}'
if len(z_levels) > 1:
c_title += F"Z - level: {c_slice_txt}"
ax.set_title(c_title, fontsize=self._font_size)
self.add_colorbar(fig, im, ax, show_color_bar)
plt.tight_layout(pad=.5)
file_name = F'{file_name_prefix}'
pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight')
self._close_figure()
def plot_2d_data_xr(self, np_variables:list, var_names:list, title='',
file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):
'''
Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting
:param np_variables:
:param var_names:
:param title:
:param file_name_prefix:
:param cmap:
:param flip_data:
:param rot_90:
:param show_color_bar:
:param plot_mode:
:param mincbar:
:param maxcbar:
:return:
'''
npdict_3d = {}
for i, field_name in enumerate(var_names):
npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0)
self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title,
file_name_prefix=file_name_prefix, cmap=cmap, z_names = [],
show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar)
def plot_2d_data_np(self, np_variables:list, var_names:list, title='',
file_name_prefix='', cmap=None, flip_data=False,
rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):
'''
Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting
:param np_variables: Numpy variables. They can be with shape [fields, x, y] or just a single field with shape [x,y]
:param var_names:
:param title:
:param file_name_prefix:
:param cmap:
:param flip_data:
:param rot_90:
:param show_color_bar:
:param plot_mode:
:param mincbar:
:param maxcbar:
:return:
'''
npdict_3d = {}
for i, field_name in enumerate(var_names):
if len(np_variables.shape) == 3:
c_np_data = np_variables[i, :, :]
else:
c_np_data = np_variables # Single field
if rot_90:
c_np_data = np.rot90(c_np_data)
if flip_data:
c_np_data = np.flip(np.flip(c_np_data), axis=1)
npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0)
self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title,
file_name_prefix=file_name_prefix, cmap=cmap, z_names = [],
show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar)
def make_video_from_images(self, input_folder, output_file, fps=24):
files = listdir(input_folder)
files.sort()
print(F"Generating video file: {output_file}")
out_video = -1
for i, file_name in enumerate(files[0:36]):
if i % 10 == 0:
print(F"Adding file # {i}: {file_name}")
c_file = join(input_folder, file_name)
im = Image.open(c_file)
np_im = np.asarray(im)[:, :, :3]
if i == 0:
video_size = (np_im.shape[1], np_im.shape[0])
out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True)
out_video.write(np_im[:, :, ::-1])
out_video.release()
cv2.destroyAllWindows()
print("Done! yeah babe!")
|
[
"io_utils.io_common.create_folder",
"cv2.destroyAllWindows",
"numpy.rot90",
"numpy.flip",
"os.listdir",
"numpy.asarray",
"matplotlib.pyplot.close",
"cv2.VideoWriter_fourcc",
"numpy.amin",
"cartopy.crs.PlateCarree",
"numpy.isnan",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"PIL.Image.open",
"os.path.join",
"matplotlib.pyplot.tight_layout",
"numpy.expand_dims",
"numpy.amax",
"cartopy.feature.NaturalEarthFeature"
] |
[((3165, 3183), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3181, 3183), True, 'import cartopy.crs as ccrs\n'), ((12073, 12171), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', ([], {'category': '"""cultural"""', 'name': '"""roads"""', 'scale': '"""10m"""', 'facecolor': '"""none"""'}), "(category='cultural', name='roads', scale='10m',\n facecolor='none')\n", (12101, 12171), True, 'import cartopy.feature as cfeature\n'), ((12437, 12561), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', ([], {'category': '"""cultural"""', 'name': '"""admin_1_states_provinces_lines"""', 'scale': '"""50m"""', 'facecolor': '"""none"""'}), "(category='cultural', name=\n 'admin_1_states_provinces_lines', scale='50m', facecolor='none')\n", (12465, 12561), True, 'import cartopy.feature as cfeature\n'), ((13453, 13469), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (13462, 13469), True, 'import matplotlib.pyplot as plt\n'), ((13478, 13488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13486, 13488), True, 'import matplotlib.pyplot as plt\n'), ((13914, 13948), 'io_utils.io_common.create_folder', 'create_folder', (['self._output_folder'], {}), '(self._output_folder)\n', (13927, 13948), False, 'from io_utils.io_common import create_folder\n'), ((16832, 16857), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)'}), '(pad=0.5)\n', (16848, 16857), True, 'import matplotlib.pyplot as plt\n'), ((19744, 19765), 'os.listdir', 'listdir', (['input_folder'], {}), '(input_folder)\n', (19751, 19765), False, 'from os import listdir\n'), ((20417, 20440), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (20438, 20440), False, 'import cv2\n'), ((9968, 9978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9976, 9978), True, 'import matplotlib.pyplot as plt\n'), ((10005, 10016), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10014, 10016), True, 'import matplotlib.pyplot as plt\n'), ((10333, 10346), 'numpy.amin', 'np.amin', (['lats'], {}), '(lats)\n', (10340, 10346), True, 'import numpy as np\n'), ((10377, 10390), 'numpy.amax', 'np.amax', (['lats'], {}), '(lats)\n', (10384, 10390), True, 'import numpy as np\n'), ((10421, 10434), 'numpy.amin', 'np.amin', (['lons'], {}), '(lons)\n', (10428, 10434), True, 'import numpy as np\n'), ((10465, 10478), 'numpy.amax', 'np.amax', (['lons'], {}), '(lons)\n', (10472, 10478), True, 'import numpy as np\n'), ((16921, 16966), 'os.path.join', 'join', (['self._output_folder', 'f"""{file_name}.png"""'], {}), "(self._output_folder, f'{file_name}.png')\n", (16925, 16966), False, 'from os.path import join\n'), ((17777, 17825), 'numpy.expand_dims', 'np.expand_dims', (['np_variables[field_name]'], {'axis': '(0)'}), '(np_variables[field_name], axis=0)\n', (17791, 17825), True, 'import numpy as np\n'), ((19343, 19376), 'numpy.expand_dims', 'np.expand_dims', (['c_np_data'], {'axis': '(0)'}), '(c_np_data, axis=0)\n', (19357, 19376), True, 'import numpy as np\n'), ((20024, 20053), 'os.path.join', 'join', (['input_folder', 'file_name'], {}), '(input_folder, file_name)\n', (20028, 20053), False, 'from os.path import join\n'), ((20071, 20089), 'PIL.Image.open', 'Image.open', (['c_file'], {}), '(c_file)\n', (20081, 20089), False, 'from PIL import Image\n'), ((6138, 6155), 'numpy.isnan', 'np.isnan', (['mincbar'], {}), '(mincbar)\n', (6146, 6155), True, 'import numpy as np\n'), ((19197, 19216), 'numpy.rot90', 'np.rot90', (['c_np_data'], {}), '(c_np_data)\n', (19205, 19216), True, 'import numpy as np\n'), ((20110, 20124), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (20120, 20124), True, 'import numpy as np\n'), ((5236, 5298), 'os.path.join', 'join', (['self._eoas_pyutils_path', '"""viz_utils/imgs/bluemarble.png"""'], {}), "(self._eoas_pyutils_path, 'viz_utils/imgs/bluemarble.png')\n", (5240, 5298), False, 'from os.path import join\n'), ((5398, 5470), 'os.path.join', 'join', (['self._eoas_pyutils_path', '"""viz_utils/imgs/bluemarble_5400x2700.jpg"""'], {}), "(self._eoas_pyutils_path, 'viz_utils/imgs/bluemarble_5400x2700.jpg')\n", (5402, 5470), False, 'from os.path import join\n'), ((5560, 5617), 'os.path.join', 'join', (['self._eoas_pyutils_path', '"""viz_utils/imgs/etopo.png"""'], {}), "(self._eoas_pyutils_path, 'viz_utils/imgs/etopo.png')\n", (5564, 5617), False, 'from os.path import join\n'), ((5713, 5785), 'os.path.join', 'join', (['self._eoas_pyutils_path', '"""viz_utils/imgs/bathymetry_3600x1800.jpg"""'], {}), "(self._eoas_pyutils_path, 'viz_utils/imgs/bathymetry_3600x1800.jpg')\n", (5717, 5785), False, 'from os.path import join\n'), ((5867, 5885), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5883, 5885), True, 'import cartopy.crs as ccrs\n'), ((13174, 13192), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (13190, 13192), True, 'import cartopy.crs as ccrs\n'), ((19279, 19297), 'numpy.flip', 'np.flip', (['c_np_data'], {}), '(c_np_data)\n', (19286, 19297), True, 'import numpy as np\n'), ((20277, 20308), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (20299, 20308), False, 'import cv2\n'), ((15278, 15295), 'numpy.isnan', 'np.isnan', (['mincbar'], {}), '(mincbar)\n', (15286, 15295), True, 'import numpy as np\n'), ((15498, 15515), 'numpy.isnan', 'np.isnan', (['maxcbar'], {}), '(maxcbar)\n', (15506, 15515), True, 'import numpy as np\n')]
|
from os import listdir
from os.path import isfile, join
from path import Path
import numpy as np
import cv2
# Dataset path
target_path = Path('target/')
annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath()
dataset = [ f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))]
images = np.empty(len(dataset), dtype = object)
count = 1
# Iterate all Training Images
for n in range(0, len(dataset)):
# Read image
images[n] = cv2.imread(join(annotation_images_path,dataset[n]))
# Convert it to array
array = np.asarray(images[n],dtype=np.int8)
# Conditions when the value equal less than 1, change it to 255.
# If it is >= 1, increment it by -1
arr = np.where(array < 1, 255, array -1)
#Saved it to another file
if count < 10:
cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + ".png", arr)
elif count < 100 and count > 9:
cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + ".png", arr)
elif count < 1000 and count > 99:
cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + ".png", arr)
elif count < 10000 and count > 999:
cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + ".png", arr)
else:
cv2.imwrite(target_path +'ADE_train_000'+ str(count) + ".png", arr)
print(str(count) + ".png is printed")
count += 1
|
[
"os.listdir",
"numpy.where",
"numpy.asarray",
"os.path.join",
"path.Path"
] |
[((138, 153), 'path.Path', 'Path', (['"""target/"""'], {}), "('target/')\n", (142, 153), False, 'from path import Path\n'), ((577, 613), 'numpy.asarray', 'np.asarray', (['images[n]'], {'dtype': 'np.int8'}), '(images[n], dtype=np.int8)\n', (587, 613), True, 'import numpy as np\n'), ((734, 769), 'numpy.where', 'np.where', (['(array < 1)', '(255)', '(array - 1)'], {}), '(array < 1, 255, array - 1)\n', (742, 769), True, 'import numpy as np\n'), ((179, 223), 'path.Path', 'Path', (['"""dataset/ade20k/annotations/training/"""'], {}), "('dataset/ade20k/annotations/training/')\n", (183, 223), False, 'from path import Path\n'), ((257, 288), 'os.listdir', 'listdir', (['annotation_images_path'], {}), '(annotation_images_path)\n', (264, 288), False, 'from os import listdir\n'), ((497, 537), 'os.path.join', 'join', (['annotation_images_path', 'dataset[n]'], {}), '(annotation_images_path, dataset[n])\n', (501, 537), False, 'from os.path import isfile, join\n'), ((299, 330), 'os.path.join', 'join', (['annotation_images_path', 'f'], {}), '(annotation_images_path, f)\n', (303, 330), False, 'from os.path import isfile, join\n')]
|
import os
import numpy as np
import cv2
import albumentations
from PIL import Image
from torch.utils.data import Dataset
from taming.data.sflckr import SegmentationBase # for examples included in repo
class Examples(SegmentationBase):
def __init__(self, size=256, random_crop=False, interpolation="bicubic"):
super().__init__(data_csv="data/ade20k_examples.txt",
data_root="data/ade20k_images",
segmentation_root="data/ade20k_segmentations",
size=size, random_crop=random_crop,
interpolation=interpolation,
n_labels=151, shift_segmentation=False)
# With semantic map and scene label
class ADE20kBase(Dataset):
def __init__(self, config=None, size=None, random_crop=False, interpolation="bicubic", crop_size=None):
self.split = self.get_split()
self.n_labels = 151 # unknown + 150
self.data_csv = {"train": "data/ade20k_train.txt",
"validation": "data/ade20k_test.txt"}[self.split]
self.data_root = "./data/ade20k_root"
with open(os.path.join(self.data_root, "sceneCategories.txt"), "r") as f:
self.scene_categories = f.read().splitlines()
self.scene_categories = dict(line.split() for line in self.scene_categories)
with open(self.data_csv, "r") as f:
self.image_paths = f.read().splitlines()
self._length = len(self.image_paths)
ss = self.split
if ss=='train':
ss='training'
self.labels = {
"relative_file_path_": [l for l in self.image_paths],
"file_path_": [os.path.join(self.data_root, "images",ss, l)
for l in self.image_paths],
"relative_segmentation_path_": [l.replace(".jpg", ".png")
for l in self.image_paths],
"segmentation_path_": [os.path.join(self.data_root, "annotations",ss,
l.replace(".jpg", ".png"))
for l in self.image_paths],
"scene_category": [self.scene_categories[l.replace(".jpg", "")]
for l in self.image_paths],
}
size = None if size is not None and size<=0 else size
self.size = size
if crop_size is None:
self.crop_size = size if size is not None else None
else:
self.crop_size = crop_size
if self.size is not None:
self.interpolation = interpolation
self.interpolation = {
"nearest": cv2.INTER_NEAREST,
"bilinear": cv2.INTER_LINEAR,
"bicubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
"lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=self.interpolation)
self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=cv2.INTER_NEAREST)
if crop_size is not None:
self.center_crop = not random_crop
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size)
else:
self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size)
self.preprocessor = self.cropper
def __len__(self):
return self._length
def __getitem__(self, i):
example = dict((k, self.labels[k][i]) for k in self.labels)
image = Image.open(example["file_path_"])
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
if self.size is not None:
image = self.image_rescaler(image=image)["image"]
segmentation = Image.open(example["segmentation_path_"])
segmentation = np.array(segmentation).astype(np.uint8)
if self.size is not None:
segmentation = self.segmentation_rescaler(image=segmentation)["image"]
if self.size is not None:
processed = self.preprocessor(image=image, mask=segmentation)
else:
processed = {"image": image, "mask": segmentation}
example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32)
segmentation = processed["mask"]
onehot = np.eye(self.n_labels)[segmentation]
example["segmentation"] = onehot
return example
class ADE20kTrain(ADE20kBase):
# default to random_crop=True
def __init__(self, config=None, size=None, random_crop=True, interpolation="bicubic", crop_size=None):
super().__init__(config=config, size=size, random_crop=random_crop,
interpolation=interpolation, crop_size=crop_size)
def get_split(self):
return "train"
class ADE20kValidation(ADE20kBase):
def get_split(self):
return "validation"
if __name__ == "__main__":
dset = ADE20kValidation()
ex = dset[0]
for k in ["image", "scene_category", "segmentation"]:
print(type(ex[k]))
try:
print(ex[k].shape)
except:
print(ex[k])
|
[
"numpy.eye",
"PIL.Image.open",
"os.path.join",
"albumentations.RandomCrop",
"numpy.array",
"albumentations.CenterCrop",
"albumentations.SmallestMaxSize"
] |
[((3811, 3844), 'PIL.Image.open', 'Image.open', (["example['file_path_']"], {}), "(example['file_path_'])\n", (3821, 3844), False, 'from PIL import Image\n'), ((4090, 4131), 'PIL.Image.open', 'Image.open', (["example['segmentation_path_']"], {}), "(example['segmentation_path_'])\n", (4100, 4131), False, 'from PIL import Image\n'), ((2915, 3004), 'albumentations.SmallestMaxSize', 'albumentations.SmallestMaxSize', ([], {'max_size': 'self.size', 'interpolation': 'self.interpolation'}), '(max_size=self.size, interpolation=self.\n interpolation)\n', (2945, 3004), False, 'import albumentations\n'), ((3106, 3194), 'albumentations.SmallestMaxSize', 'albumentations.SmallestMaxSize', ([], {'max_size': 'self.size', 'interpolation': 'cv2.INTER_NEAREST'}), '(max_size=self.size, interpolation=cv2.\n INTER_NEAREST)\n', (3136, 3194), False, 'import albumentations\n'), ((4634, 4655), 'numpy.eye', 'np.eye', (['self.n_labels'], {}), '(self.n_labels)\n', (4640, 4655), True, 'import numpy as np\n'), ((1140, 1191), 'os.path.join', 'os.path.join', (['self.data_root', '"""sceneCategories.txt"""'], {}), "(self.data_root, 'sceneCategories.txt')\n", (1152, 1191), False, 'import os\n'), ((1680, 1725), 'os.path.join', 'os.path.join', (['self.data_root', '"""images"""', 'ss', 'l'], {}), "(self.data_root, 'images', ss, l)\n", (1692, 1725), False, 'import os\n'), ((3408, 3478), 'albumentations.CenterCrop', 'albumentations.CenterCrop', ([], {'height': 'self.crop_size', 'width': 'self.crop_size'}), '(height=self.crop_size, width=self.crop_size)\n', (3433, 3478), False, 'import albumentations\n'), ((3528, 3598), 'albumentations.RandomCrop', 'albumentations.RandomCrop', ([], {'height': 'self.crop_size', 'width': 'self.crop_size'}), '(height=self.crop_size, width=self.crop_size)\n', (3553, 3598), False, 'import albumentations\n'), ((3938, 3953), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3946, 3953), True, 'import numpy as np\n'), ((4155, 4177), 'numpy.array', 'np.array', (['segmentation'], {}), '(segmentation)\n', (4163, 4177), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import argparse
import os
import shutil
import time
import numpy as np
import random
from collections import OrderedDict
import torch
import torch.backends.cudnn as cudnn
from callbacks import AverageMeter
from data_utils.causal_data_loader_frames import VideoFolder
from utils import save_results
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Counterfactual CAR')
# Path, dataset and log related arguments
parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/',
help='path to the folder with frames')
parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json',
help='path to the json file with train video meta data')
parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json',
help='path to the json file with validation video meta data')
parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json',
help='path to the json file with ground truth labels')
parser.add_argument('--dataset', default='smth_smth',
help='which dataset to train')
parser.add_argument('--logname', default='my_method',
help='name of the experiment for checkpoints and logs')
parser.add_argument('--print_freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 20)')
parser.add_argument('--ckpt', default='./ckpt',
help='folder to output checkpoints')
parser.add_argument('--resume_vision', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_coord', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# model, image&feature dim and training related arguments
parser.add_argument('--model_vision', default='rgb_roi')
parser.add_argument('--model_coord', default='interaction')
parser.add_argument('--model_fusion', default='concat_fusion')
parser.add_argument('--fusion_function', default='fused_sum', type=str,
help='function for fusing activations from each branch')
parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N',
help='intermediate feature dimension for image-based features')
parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N',
help='intermediate feature dimension for coord-based features')
parser.add_argument('--size', default=224, type=int, metavar='N',
help='primary image input size')
parser.add_argument('--num_boxes', default=4, type=int,
help='num of boxes for each image')
parser.add_argument('--num_frames', default=16, type=int,
help='num of frames for the model')
parser.add_argument('--num_classes', default=174, type=int,
help='num of class in the model')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start_epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size', '-b', default=16, type=int,
metavar='N', help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs="+",
metavar='LRSteps', help='epochs to decay learning rate by 10')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--clip_gradient', '-cg', default=5, type=float,
metavar='W', help='gradient norm clipping (default: 5)')
parser.add_argument('--search_stride', type=int, default=5, help='test performance every n strides')
# train mode, hardware setting and others related arguments
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation set')
parser.add_argument('--parallel', default=True, type=bool,
help='whether or not train with multi GPUs')
parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the index of gpu you want to use')
best_loss = 1000000
def main():
global args, best_loss
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
print(args)
# create vision model
if args.model_vision == 'global_i3d':
from model.model_lib import VideoGlobalModel as RGBModel
print('global_i3d loaded!!')
elif args.model_vision == 'rgb_roi':
from model.model_lib import BboxVisualModel as RGBModel
print('rgb_roi loaded!!')
else:
print("no such a vision model!")
# create coord model
if args.model_coord == 'interaction':
from model.model_lib import BboxInteractionLatentModel as BboxModel
print('interaction loaded!!')
else:
print("no such a coordinate model!")
# create fusion model
if args.model_fusion == 'concat_fusion':
from model.model_lib import ConcatFusionModel as FusionModel
print('concat_fusion loaded!!')
else:
print('no such a fusion model!')
# load model branch
vision_model = RGBModel(args)
coord_model = BboxModel(args)
fusion_model = FusionModel(args)
# create the fusion function for the activation of three branches
if args.fusion_function == 'fused_sum':
from fusion_function import logsigsum as fusion_func
print('fused_sum loaded!!')
elif args.fusion_function == 'naive_sum':
from fusion_function import naivesum as fusion_func
print('naive_sum loaded!!')
else:
print('no such a fusion function!')
fusion_function = fusion_func()
if args.parallel:
vision_model = torch.nn.DataParallel(vision_model).cuda()
coord_model = torch.nn.DataParallel(coord_model).cuda()
fusion_model = torch.nn.DataParallel(fusion_model).cuda()
else:
vision_model = vision_model.cuda()
coord_model = coord_model.cuda()
fusion_model = fusion_model.cuda()
# optionally resume vision model from a checkpoint
if args.resume_vision:
assert os.path.isfile(args.resume_vision), "No checkpoint found at '{}'".format(args.resume_vision)
print("=> loading checkpoint '{}'".format(args.resume_vision))
checkpoint = torch.load(args.resume_vision)
if args.start_epoch is None:
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
vision_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume_vision, checkpoint['epoch']))
# optionally resume coord model from a checkpoint
if args.resume_coord:
assert os.path.isfile(args.resume_coord), "No checkpoint found at '{}'".format(args.resume_coord)
print("=> loading checkpoint '{}'".format(args.resume_coord))
checkpoint = torch.load(args.resume_coord)
if args.start_epoch is None:
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
coord_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume_coord, checkpoint['epoch']))
if args.resume_fusion:
assert os.path.isfile(args.resume_fusion), "No checkpoint found at '{}'".format(args.resume_fusion)
print("=> loading checkpoint '{}'".format(args.resume_fusion))
checkpoint = torch.load(args.resume_fusion)
if args.start_epoch is None:
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
fusion_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume_fusion, checkpoint['epoch']))
if args.start_epoch is None:
args.start_epoch = 0
cudnn.benchmark = True
# create training and validation dataset
dataset_train = VideoFolder(root=args.root_frames,
num_boxes=args.num_boxes,
file_input=args.json_data_train,
file_labels=args.json_file_labels,
frames_duration=args.num_frames,
args=args,
is_val=False,
if_augment=True,
)
dataset_val = VideoFolder(root=args.root_frames,
num_boxes=args.num_boxes,
file_input=args.json_data_val,
file_labels=args.json_file_labels,
frames_duration=args.num_frames,
args=args,
is_val=True,
if_augment=True,
)
# create training and validation loader
train_loader = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, drop_last=True,
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
dataset_val, drop_last=True,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False
)
model_list = [vision_model, coord_model, fusion_model]
optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()),
momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()),
momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()),
momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion]
criterion = torch.nn.CrossEntropyLoss()
search_list = np.linspace(0.0, 1.0, 11)
# factual inference (vanilla test stage)
if args.evaluate:
validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict)
return
# Counterfactual inference by trying a list of hyperparameter
if args.cf_inference_group:
cf_inference_group(val_loader, model_list, fusion_function, search_list,
class_to_idx=dataset_val.classes_dict)
return
print('training begin...')
for epoch in tqdm(range(args.start_epoch, args.epochs)):
adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision')
adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord')
adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion')
# train for one epoch
train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion)
if (epoch+1) >= 30 and (epoch + 1) % args.search_stride == 0:
loss = validate(val_loader, model_list, fusion_function, criterion,
epoch=epoch, class_to_idx=dataset_val.classes_dict)
else:
loss = 100
# remember best loss and save checkpoint
is_best = loss < best_loss
best_loss = min(loss, best_loss)
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': vision_model.state_dict(),
'best_loss': best_loss,
},
is_best,
os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname)))
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': coord_model.state_dict(),
'best_loss': best_loss,
},
is_best,
os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname)))
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': fusion_model.state_dict(),
'best_loss': best_loss,
},
is_best,
os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname)))
def train(train_loader, model_list, fusion_function,
optimizer_list, epoch, criterion):
global args
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
# load three model branches
[vision_model, coord_model, fusion_model] = model_list
# load four optimizers, including the one designed for uniform assumption
[optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list
# switch to train mode
vision_model.train()
coord_model.train()
fusion_model.train()
end = time.time()
for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(train_loader):
data_time.update(time.time() - end)
# obtain the activation and vision features from vision branch
output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label)
output_vision = output_vision.view((-1, len(train_loader.dataset.classes)))
# obtain the activation and coordinate features from coordinate branch
output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label)
output_coord = output_coord.view((-1, len(train_loader.dataset.classes)))
# detach the computation graph, avoid the gradient confusion
feature_vision_detached = feature_vision.detach()
feature_coord_detached = feature_coord.detach()
# obtain the activation of fusion branch
output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda())
output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes)))
output_factual = fusion_function(output_vision, output_coord, output_fusion)
# loss_fusion is the loss of output_fusion(fused, obtained from the fusion_function)
loss_vision = criterion(output_vision, video_label.long().cuda())
loss_coord = criterion(output_coord, video_label.long().cuda())
loss_fusion = criterion(output_fusion, video_label.long().cuda())
loss_factual = criterion(output_factual, video_label.long().cuda())
# Measure the accuracy of the sum of three branch activation results
acc1, acc5 = accuracy(output_factual.cpu(), video_label, topk=(1, 5))
# record the accuracy and loss
losses.update(loss_factual.item(), global_img_tensors.size(0))
acc_top1.update(acc1.item(), global_img_tensors.size(0))
acc_top5.update(acc5.item(), global_img_tensors.size(0))
# refresh the optimizer
optimizer_vision.zero_grad()
optimizer_coord.zero_grad()
optimizer_fusion.zero_grad()
loss = loss_vision + loss_coord + loss_factual
loss.backward()
if args.clip_gradient is not None:
torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient)
# update the parameter
optimizer_vision.step()
optimizer_coord.step()
optimizer_fusion.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\t'
'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses,
acc_top1=acc_top1, acc_top5=acc_top5))
def validate(val_loader, model_list, fusion_function, criterion,
epoch=None, class_to_idx=None):
batch_time = AverageMeter()
losses = AverageMeter()
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
logits_matrix = []
targets_list = []
# unpack three models
[vision_model, coord_model, fusion_model] = model_list
# switch to evaluate mode
vision_model.eval()
coord_model.eval()
fusion_model.eval()
end = time.time()
for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader):
# compute output
with torch.no_grad():
output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label)
output_vision = output_vision.view((-1, len(val_loader.dataset.classes)))
output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label)
output_coord = output_coord.view((-1, len(val_loader.dataset.classes)))
# detach the computation graph, avoid the gradient confusion
feature_vision_detached = feature_vision.detach()
feature_coord_detached = feature_coord.detach()
# obtain the activation of fusion branch
output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda())
output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes)))
# fuse three outputs
output_factual = fusion_function(output_vision, output_coord, output_fusion)
# warning: loss_fusion is the loss of output_fusion(fused, obtained from the fusion_function)
loss_vision = criterion(output_vision, video_label.long().cuda())
loss_coord = criterion(output_coord, video_label.long().cuda())
loss_fusion = criterion(output_factual, video_label.long().cuda())
# statistic result from fusion_branch or value after fusion function
output = output_factual
loss = loss_vision
acc1, acc5 = accuracy(output.cpu(), video_label, topk=(1, 5))
if args.evaluate:
logits_matrix.append(output.cpu().data.numpy())
targets_list.append(video_label.cpu().numpy())
# measure accuracy and record loss
losses.update(loss.item(), global_img_tensors.size(0))
acc_top1.update(acc1.item(), global_img_tensors.size(0))
acc_top5.update(acc5.item(), global_img_tensors.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i + 1 == len(val_loader):
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\t'
'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\t'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
acc_top1=acc_top1, acc_top5=acc_top5,
))
if args.evaluate:
logits_matrix = np.concatenate(logits_matrix)
targets_list = np.concatenate(targets_list)
save_results(logits_matrix, targets_list, class_to_idx, args)
return losses.avg
def cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None):
batch_time = AverageMeter()
search_length = len(search_list)
search_dict = {}
for i in range(search_length):
search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter()
search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter()
[vision_model, coord_model, fusion_model] = model_list
# switch to evaluate mode
vision_model.eval()
coord_model.eval()
fusion_model.eval()
end = time.time()
for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader):
# compute output
with torch.no_grad():
# factual inference
output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(),
video_label)
output_vision = output_vision.view((-1, len(val_loader.dataset.classes)))
output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(),
video_label)
output_coord = output_coord.view((-1, len(val_loader.dataset.classes)))
# obtain the activation of fusion branch
output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda())
output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes)))
# fuse three outputs
output_factual = fusion_function(output_vision, output_coord, output_fusion)
# counterfactual inference
output_vision_subtrahend = output_vision
output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0))
for j in range(search_length):
weight = search_list[j]
output_debiased = output_factual - output_counterfactual * weight
acc1, acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1, 5))
search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0))
search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i + 1 == len(val_loader):
print('Cf-Inference: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\t'
'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\t'
'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\t'
'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\t'
'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format(
i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'],
acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'],
acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0']))
for k in range(search_length):
print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg,
search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg)
return
def save_checkpoint(state, is_best, filename):
torch.save(state, filename + '_latest.pth.tar')
if is_best:
shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar')
def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None):
"""Sets the learning rate to the initial LR decayed by 10"""
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
lr = args.lr * decay
if branch_name == 'vision':
for param_group in optimizer.param_groups:
param_group['lr'] = lr * 0.8
elif branch_name == 'coord':
for param_group in optimizer.param_groups:
param_group['lr'] = lr
elif branch_name == 'fusion':
for param_group in optimizer.param_groups:
param_group['lr'] = lr
else:
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
[
"torch.nn.CrossEntropyLoss",
"utils.save_results",
"numpy.array",
"model.model_lib.BboxInteractionLatentModel",
"argparse.ArgumentParser",
"model.model_lib.BboxVisualModel",
"numpy.linspace",
"numpy.concatenate",
"model.model_lib.ConcatFusionModel",
"callbacks.AverageMeter",
"os.path.isfile",
"shutil.copyfile",
"torch.save",
"time.time",
"torch.load",
"fusion_function.naivesum",
"torch.nn.DataParallel",
"torch.tensor",
"data_utils.causal_data_loader_frames.VideoFolder",
"torch.utils.data.DataLoader",
"torch.no_grad"
] |
[((371, 428), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Counterfactual CAR"""'}), "(description='Counterfactual CAR')\n", (394, 428), False, 'import argparse\n'), ((6328, 6342), 'model.model_lib.BboxVisualModel', 'RGBModel', (['args'], {}), '(args)\n', (6336, 6342), True, 'from model.model_lib import BboxVisualModel as RGBModel\n'), ((6362, 6377), 'model.model_lib.BboxInteractionLatentModel', 'BboxModel', (['args'], {}), '(args)\n', (6371, 6377), True, 'from model.model_lib import BboxInteractionLatentModel as BboxModel\n'), ((6398, 6415), 'model.model_lib.ConcatFusionModel', 'FusionModel', (['args'], {}), '(args)\n', (6409, 6415), True, 'from model.model_lib import ConcatFusionModel as FusionModel\n'), ((6859, 6872), 'fusion_function.naivesum', 'fusion_func', ([], {}), '()\n', (6870, 6872), True, 'from fusion_function import naivesum as fusion_func\n'), ((9249, 9461), 'data_utils.causal_data_loader_frames.VideoFolder', 'VideoFolder', ([], {'root': 'args.root_frames', 'num_boxes': 'args.num_boxes', 'file_input': 'args.json_data_train', 'file_labels': 'args.json_file_labels', 'frames_duration': 'args.num_frames', 'args': 'args', 'is_val': '(False)', 'if_augment': '(True)'}), '(root=args.root_frames, num_boxes=args.num_boxes, file_input=\n args.json_data_train, file_labels=args.json_file_labels,\n frames_duration=args.num_frames, args=args, is_val=False, if_augment=True)\n', (9260, 9461), False, 'from data_utils.causal_data_loader_frames import VideoFolder\n'), ((9738, 9948), 'data_utils.causal_data_loader_frames.VideoFolder', 'VideoFolder', ([], {'root': 'args.root_frames', 'num_boxes': 'args.num_boxes', 'file_input': 'args.json_data_val', 'file_labels': 'args.json_file_labels', 'frames_duration': 'args.num_frames', 'args': 'args', 'is_val': '(True)', 'if_augment': '(True)'}), '(root=args.root_frames, num_boxes=args.num_boxes, file_input=\n args.json_data_val, file_labels=args.json_file_labels, frames_duration=\n args.num_frames, args=args, is_val=True, if_augment=True)\n', (9749, 9948), False, 'from data_utils.causal_data_loader_frames import VideoFolder\n'), ((10256, 10403), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_train'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(dataset_train, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True)\n', (10283, 10403), False, 'import torch\n'), ((10461, 10609), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_val'], {'drop_last': '(True)', 'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers', 'pin_memory': '(False)'}), '(dataset_val, drop_last=True, batch_size=args.\n batch_size, shuffle=False, num_workers=args.workers, pin_memory=False)\n', (10488, 10609), False, 'import torch\n'), ((11422, 11449), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (11447, 11449), False, 'import torch\n'), ((11469, 11494), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(11)'], {}), '(0.0, 1.0, 11)\n', (11480, 11494), True, 'import numpy as np\n'), ((13865, 13879), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (13877, 13879), False, 'from callbacks import AverageMeter\n'), ((13897, 13911), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (13909, 13911), False, 'from callbacks import AverageMeter\n'), ((13928, 13942), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (13940, 13942), False, 'from callbacks import AverageMeter\n'), ((13961, 13975), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (13973, 13975), False, 'from callbacks import AverageMeter\n'), ((13992, 14006), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14004, 14006), False, 'from callbacks import AverageMeter\n'), ((14379, 14390), 'time.time', 'time.time', ([], {}), '()\n', (14388, 14390), False, 'import time\n'), ((17766, 17780), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17778, 17780), False, 'from callbacks import AverageMeter\n'), ((17795, 17809), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17807, 17809), False, 'from callbacks import AverageMeter\n'), ((17826, 17840), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17838, 17840), False, 'from callbacks import AverageMeter\n'), ((17857, 17871), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17869, 17871), False, 'from callbacks import AverageMeter\n'), ((18128, 18139), 'time.time', 'time.time', ([], {}), '()\n', (18137, 18139), False, 'import time\n'), ((21275, 21289), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (21287, 21289), False, 'from callbacks import AverageMeter\n'), ((21746, 21757), 'time.time', 'time.time', ([], {}), '()\n', (21755, 21757), False, 'import time\n'), ((24887, 24934), 'torch.save', 'torch.save', (['state', "(filename + '_latest.pth.tar')"], {}), "(state, filename + '_latest.pth.tar')\n", (24897, 24934), False, 'import torch\n'), ((7340, 7374), 'os.path.isfile', 'os.path.isfile', (['args.resume_vision'], {}), '(args.resume_vision)\n', (7354, 7374), False, 'import os\n'), ((7527, 7557), 'torch.load', 'torch.load', (['args.resume_vision'], {}), '(args.resume_vision)\n', (7537, 7557), False, 'import torch\n'), ((7974, 8007), 'os.path.isfile', 'os.path.isfile', (['args.resume_coord'], {}), '(args.resume_coord)\n', (7988, 8007), False, 'import os\n'), ((8158, 8187), 'torch.load', 'torch.load', (['args.resume_coord'], {}), '(args.resume_coord)\n', (8168, 8187), False, 'import torch\n'), ((8548, 8582), 'os.path.isfile', 'os.path.isfile', (['args.resume_fusion'], {}), '(args.resume_fusion)\n', (8562, 8582), False, 'import os\n'), ((8735, 8765), 'torch.load', 'torch.load', (['args.resume_fusion'], {}), '(args.resume_fusion)\n', (8745, 8765), False, 'import torch\n'), ((17006, 17017), 'time.time', 'time.time', ([], {}), '()\n', (17015, 17017), False, 'import time\n'), ((20387, 20398), 'time.time', 'time.time', ([], {}), '()\n', (20396, 20398), False, 'import time\n'), ((20976, 21005), 'numpy.concatenate', 'np.concatenate', (['logits_matrix'], {}), '(logits_matrix)\n', (20990, 21005), True, 'import numpy as np\n'), ((21030, 21058), 'numpy.concatenate', 'np.concatenate', (['targets_list'], {}), '(targets_list)\n', (21044, 21058), True, 'import numpy as np\n'), ((21068, 21129), 'utils.save_results', 'save_results', (['logits_matrix', 'targets_list', 'class_to_idx', 'args'], {}), '(logits_matrix, targets_list, class_to_idx, args)\n', (21080, 21129), False, 'from utils import save_results\n'), ((21460, 21474), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (21472, 21474), False, 'from callbacks import AverageMeter\n'), ((21549, 21563), 'callbacks.AverageMeter', 'AverageMeter', ([], {}), '()\n', (21561, 21563), False, 'from callbacks import AverageMeter\n'), ((23686, 23697), 'time.time', 'time.time', ([], {}), '()\n', (23695, 23697), False, 'import time\n'), ((24961, 25034), 'shutil.copyfile', 'shutil.copyfile', (["(filename + '_latest.pth.tar')", "(filename + '_best.pth.tar')"], {}), "(filename + '_latest.pth.tar', filename + '_best.pth.tar')\n", (24976, 25034), False, 'import shutil\n'), ((25878, 25893), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25891, 25893), False, 'import torch\n'), ((18281, 18296), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18294, 18296), False, 'import torch\n'), ((21899, 21914), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21912, 21914), False, 'import torch\n'), ((6922, 6957), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['vision_model'], {}), '(vision_model)\n', (6943, 6957), False, 'import torch\n'), ((6988, 7022), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['coord_model'], {}), '(coord_model)\n', (7009, 7022), False, 'import torch\n'), ((7054, 7089), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['fusion_model'], {}), '(fusion_model)\n', (7075, 7089), False, 'import torch\n'), ((14522, 14533), 'time.time', 'time.time', ([], {}), '()\n', (14531, 14533), False, 'import time\n'), ((16972, 16983), 'time.time', 'time.time', ([], {}), '()\n', (16981, 16983), False, 'import time\n'), ((20353, 20364), 'time.time', 'time.time', ([], {}), '()\n', (20362, 20364), False, 'import time\n'), ((23037, 23054), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (23049, 23054), False, 'import torch\n'), ((23056, 23073), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (23068, 23073), False, 'import torch\n'), ((23652, 23663), 'time.time', 'time.time', ([], {}), '()\n', (23661, 23663), False, 'import time\n'), ((25214, 25232), 'numpy.array', 'np.array', (['lr_steps'], {}), '(lr_steps)\n', (25222, 25232), True, 'import numpy as np\n')]
|
import os
import sys
import click
import pickle
import sncosmo
import numpy as np
from astropy.table import Table
DATA_PATH = '/home/samdixon/jla_light_curves/'
def modify_error(lc, error_floor=0.):
"""Add an error floor of `error_floor` times the maximum flux of the band
to each observation
"""
data = sncosmo.photdata.photometric_data(lc).normalized(zp=25., zpsys='ab')
new_lc = {'time': data.time,
'band': data.band,
'flux': data.flux,
'fluxerr': data.fluxerr,
'zp': data.zp,
'zpsys': data.zpsys}
for band in set(data.band):
band_cut = data.band==band
max_flux_in_band = np.max(data.flux[band_cut])
new_lc['fluxerr'][band_cut] = np.sqrt((error_floor*max_flux_in_band)**2+data.fluxerr[band_cut]**2)
new_lc = Table(new_lc, meta=lc.meta)
return new_lc
def fit_lc_and_save(lc, model_name, save_dir, no_mc):
name = lc.meta['SN']
model = sncosmo.Model(source=model_name,
effects=[sncosmo.CCM89Dust()],
effect_names=['mw'],
effect_frames=['obs'])
if type(name) is float:
name = int(name)
z = lc.meta['Z_HELIO']
mwebv = lc.meta['MWEBV']
bounds = {}
try:
t0 = float(lc.meta['DayMax'].split()[0])
bounds['t0'] = (t0-5, t0+5)
except KeyError:
try:
t0 = np.mean(lc['Date'])
bounds['t0'] = (min(lc['Date'])-20, max(lc['Date']))
except KeyError:
t0 = np.mean(lc['time'])
bounds['t0'] = (min(lc['time'])-20, max(lc['time']))
bounds['z'] = ((1-1e-4)*z, (1+1e-4)*z)
for param_name in model.source.param_names[1:]:
bounds[param_name] = (-50, 50)
modelcov = model_name=='salt2'
model.set(z=z, t0=t0, mwebv=mwebv)
phase_range = (-15, 45) if model_name=='salt2' else (-10, 40)
wave_range = (3000, 7000) if model_name=='salt2' else None
save_path = os.path.join(save_dir, '{}.pkl'.format(name))
try:
minuit_result, minuit_fit_model = sncosmo.fit_lc(lc, model, model.param_names[:-2], bounds=bounds,
phase_range=phase_range, wave_range=wave_range,
warn=False, modelcov=modelcov)
if not no_mc:
emcee_result, emcee_fit_model = sncosmo.mcmc_lc(sncosmo.select_data(lc, minuit_result['data_mask']),
minuit_fit_model,
model.param_names[:-2],
guess_t0=False,
bounds=bounds,
warn=False,
nwalkers=40,
modelcov=modelcov)
pickle.dump(emcee_result, open(save_path, 'wb'))
else:
pickle.dump(minuit_result, open(save_path, 'wb'))
except:
print('Fit to {} failed'.format(name))
sys.stdout.flush()
def main():
model_name, start, finish, err_floor, no_mc = sys.argv[1:]
start = int(start)
finish = int(finish)
err_floor = float(err_floor)
no_mc = bool(int(no_mc))
if no_mc:
save_dir = '/home/samdixon/host_unity/fitting/results_mw_reddening/jla_{}_{:02d}'.format(model_name, int(err_floor*100))
else:
save_dir = '/home/samdixon/host_unity/fitting/results_mw_reddening_mcmc/jla_{}_{:02d}'.format(model_name, int(err_floor*100))
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
lcs = []
for f in os.listdir(DATA_PATH)[int(start):int(finish)]:
if f[:2] == 'lc':
lc = sncosmo.read_lc(os.path.join(DATA_PATH, f), format='salt2', expand_bands=True, read_covmat=True)
lc = modify_error(lc, err_floor)
name = lc.meta['SN']
if type(name) is float:
name = int(name)
load_path = os.path.join(save_dir, '{}.pkl'.format(name))
try:
pickle.load(open(load_path, 'rb'))
print('{}: loaded'.format(name))
sys.stdout.flush()
except IOError:
print('Fitting {}'.format(name))
sys.stdout.flush()
fit_lc_and_save(lc, model_name, save_dir, no_mc)
else:
continue
if __name__=='__main__':
main()
|
[
"numpy.mean",
"sncosmo.fit_lc",
"numpy.sqrt",
"os.listdir",
"astropy.table.Table",
"os.makedirs",
"os.path.join",
"numpy.max",
"os.path.isdir",
"sncosmo.select_data",
"sncosmo.CCM89Dust",
"sys.stdout.flush",
"sncosmo.photdata.photometric_data"
] |
[((841, 868), 'astropy.table.Table', 'Table', (['new_lc'], {'meta': 'lc.meta'}), '(new_lc, meta=lc.meta)\n', (846, 868), False, 'from astropy.table import Table\n'), ((693, 720), 'numpy.max', 'np.max', (['data.flux[band_cut]'], {}), '(data.flux[band_cut])\n', (699, 720), True, 'import numpy as np\n'), ((759, 835), 'numpy.sqrt', 'np.sqrt', (['((error_floor * max_flux_in_band) ** 2 + data.fluxerr[band_cut] ** 2)'], {}), '((error_floor * max_flux_in_band) ** 2 + data.fluxerr[band_cut] ** 2)\n', (766, 835), True, 'import numpy as np\n'), ((2109, 2261), 'sncosmo.fit_lc', 'sncosmo.fit_lc', (['lc', 'model', 'model.param_names[:-2]'], {'bounds': 'bounds', 'phase_range': 'phase_range', 'wave_range': 'wave_range', 'warn': '(False)', 'modelcov': 'modelcov'}), '(lc, model, model.param_names[:-2], bounds=bounds,\n phase_range=phase_range, wave_range=wave_range, warn=False, modelcov=\n modelcov)\n', (2123, 2261), False, 'import sncosmo\n'), ((3760, 3783), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (3773, 3783), False, 'import os\n'), ((3793, 3814), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (3804, 3814), False, 'import os\n'), ((3842, 3863), 'os.listdir', 'os.listdir', (['DATA_PATH'], {}), '(DATA_PATH)\n', (3852, 3863), False, 'import os\n'), ((328, 365), 'sncosmo.photdata.photometric_data', 'sncosmo.photdata.photometric_data', (['lc'], {}), '(lc)\n', (361, 365), False, 'import sncosmo\n'), ((3243, 3261), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3259, 3261), False, 'import sys\n'), ((1059, 1078), 'sncosmo.CCM89Dust', 'sncosmo.CCM89Dust', ([], {}), '()\n', (1076, 1078), False, 'import sncosmo\n'), ((1447, 1466), 'numpy.mean', 'np.mean', (["lc['Date']"], {}), "(lc['Date'])\n", (1454, 1466), True, 'import numpy as np\n'), ((2449, 2500), 'sncosmo.select_data', 'sncosmo.select_data', (['lc', "minuit_result['data_mask']"], {}), "(lc, minuit_result['data_mask'])\n", (2468, 2500), False, 'import sncosmo\n'), ((3948, 3974), 'os.path.join', 'os.path.join', (['DATA_PATH', 'f'], {}), '(DATA_PATH, f)\n', (3960, 3974), False, 'import os\n'), ((4379, 4397), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4395, 4397), False, 'import sys\n'), ((1574, 1593), 'numpy.mean', 'np.mean', (["lc['time']"], {}), "(lc['time'])\n", (1581, 1593), True, 'import numpy as np\n'), ((4491, 4509), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4507, 4509), False, 'import sys\n')]
|
""" Unit tests for the system interface."""
import unittest
from six import assertRaisesRegex
from six.moves import cStringIO
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp, ExecComp
from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp
from openmdao.utils.assert_utils import assert_rel_error, assert_warning
class TestSystem(unittest.TestCase):
def test_vector_context_managers(self):
g1 = Group()
g1.add_subsystem('Indep', IndepVarComp('a', 5.0), promotes=['a'])
g2 = g1.add_subsystem('G2', Group(), promotes=['*'])
g2.add_subsystem('C1', ExecComp('b=2*a'), promotes=['a', 'b'])
model = Group()
model.add_subsystem('G1', g1, promotes=['b'])
model.add_subsystem('Sink', ExecComp('c=2*b'), promotes=['b'])
p = Problem(model=model)
p.set_solver_print(level=0)
# Test pre-setup errors
with self.assertRaises(Exception) as cm:
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(str(cm.exception),
"Group: Cannot get vectors because setup has not yet been called.")
with self.assertRaises(Exception) as cm:
d_inputs, d_outputs, d_residuals = model.get_linear_vectors('vec')
self.assertEqual(str(cm.exception),
"Group: Cannot get vectors because setup has not yet been called.")
p.setup()
p.run_model()
# Test inputs with original values
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(inputs['G1.G2.C1.a'], 5.)
inputs, outputs, residuals = g1.get_nonlinear_vectors()
self.assertEqual(inputs['G2.C1.a'], 5.)
# Test inputs after setting a new value
inputs, outputs, residuals = g2.get_nonlinear_vectors()
inputs['C1.a'] = -1.
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(inputs['G1.G2.C1.a'], -1.)
inputs, outputs, residuals = g1.get_nonlinear_vectors()
self.assertEqual(inputs['G2.C1.a'], -1.)
# Test outputs with original values
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(outputs['G1.G2.C1.b'], 10.)
inputs, outputs, residuals = g2.get_nonlinear_vectors()
# Test outputs after setting a new value
inputs, outputs, residuals = model.get_nonlinear_vectors()
outputs['G1.G2.C1.b'] = 123.
self.assertEqual(outputs['G1.G2.C1.b'], 123.)
inputs, outputs, residuals = g2.get_nonlinear_vectors()
outputs['C1.b'] = 789.
self.assertEqual(outputs['C1.b'], 789.)
# Test residuals
inputs, outputs, residuals = model.get_nonlinear_vectors()
residuals['G1.G2.C1.b'] = 99.0
self.assertEqual(residuals['G1.G2.C1.b'], 99.0)
# Test linear
d_inputs, d_outputs, d_residuals = model.get_linear_vectors('linear')
d_outputs['G1.G2.C1.b'] = 10.
self.assertEqual(d_outputs['G1.G2.C1.b'], 10.)
# Test linear with invalid vec_name
with self.assertRaises(Exception) as cm:
d_inputs, d_outputs, d_residuals = model.get_linear_vectors('bad_name')
self.assertEqual(str(cm.exception),
"Group (<model>): There is no linear vector named %s" % 'bad_name')
def test_set_checks_shape(self):
indep = IndepVarComp()
indep.add_output('a')
indep.add_output('x', shape=(5, 1))
g1 = Group()
g1.add_subsystem('Indep', indep, promotes=['a', 'x'])
g2 = g1.add_subsystem('G2', Group(), promotes=['*'])
g2.add_subsystem('C1', ExecComp('b=2*a'), promotes=['a', 'b'])
g2.add_subsystem('C2', ExecComp('y=2*x',
x=np.zeros((5, 1)),
y=np.zeros((5, 1))),
promotes=['x', 'y'])
model = Group()
model.add_subsystem('G1', g1, promotes=['b', 'y'])
model.add_subsystem('Sink', ExecComp(('c=2*b', 'z=2*y'),
y=np.zeros((5, 1)),
z=np.zeros((5, 1))),
promotes=['b', 'y'])
p = Problem(model=model)
p.setup()
p.set_solver_print(level=0)
p.run_model()
msg = "Incompatible shape for '.*': Expected (.*) but got (.*)"
num_val = -10
arr_val = -10*np.ones((5, 1))
bad_val = -10*np.ones((10))
inputs, outputs, residuals = g2.get_nonlinear_vectors()
#
# set input
#
# assign array to scalar
with assertRaisesRegex(self, ValueError, msg):
inputs['C1.a'] = arr_val
# assign scalar to array
inputs['C2.x'] = num_val
assert_rel_error(self, inputs['C2.x'], arr_val, 1e-10)
# assign array to array
inputs['C2.x'] = arr_val
assert_rel_error(self, inputs['C2.x'], arr_val, 1e-10)
# assign bad array shape to array
with assertRaisesRegex(self, ValueError, msg):
inputs['C2.x'] = bad_val
# assign list to array
inputs['C2.x'] = arr_val.tolist()
assert_rel_error(self, inputs['C2.x'], arr_val, 1e-10)
# assign bad list shape to array
with assertRaisesRegex(self, ValueError, msg):
inputs['C2.x'] = bad_val.tolist()
#
# set output
#
# assign array to scalar
with assertRaisesRegex(self, ValueError, msg):
outputs['C1.b'] = arr_val
# assign scalar to array
outputs['C2.y'] = num_val
assert_rel_error(self, outputs['C2.y'], arr_val, 1e-10)
# assign array to array
outputs['C2.y'] = arr_val
assert_rel_error(self, outputs['C2.y'], arr_val, 1e-10)
# assign bad array shape to array
with assertRaisesRegex(self, ValueError, msg):
outputs['C2.y'] = bad_val
# assign list to array
outputs['C2.y'] = arr_val.tolist()
assert_rel_error(self, outputs['C2.y'], arr_val, 1e-10)
# assign bad list shape to array
with assertRaisesRegex(self, ValueError, msg):
outputs['C2.y'] = bad_val.tolist()
#
# set residual
#
# assign array to scalar
with assertRaisesRegex(self, ValueError, msg):
residuals['C1.b'] = arr_val
# assign scalar to array
residuals['C2.y'] = num_val
assert_rel_error(self, residuals['C2.y'], arr_val, 1e-10)
# assign array to array
residuals['C2.y'] = arr_val
assert_rel_error(self, residuals['C2.y'], arr_val, 1e-10)
# assign bad array shape to array
with assertRaisesRegex(self, ValueError, msg):
residuals['C2.y'] = bad_val
# assign list to array
residuals['C2.y'] = arr_val.tolist()
assert_rel_error(self, residuals['C2.y'], arr_val, 1e-10)
# assign bad list shape to array
with assertRaisesRegex(self, ValueError, msg):
residuals['C2.y'] = bad_val.tolist()
def test_deprecated_solver_names(self):
class DummySolver():
pass
model = Group()
# check nl_solver setter & getter
msg = "The 'nl_solver' attribute provides backwards compatibility " \
"with OpenMDAO 1.x ; use 'nonlinear_solver' instead."
with assert_warning(DeprecationWarning, msg):
model.nl_solver = DummySolver()
with assert_warning(DeprecationWarning, msg):
solver = model.nl_solver
self.assertTrue(isinstance(solver, DummySolver))
# check ln_solver setter & getter
msg = "The 'ln_solver' attribute provides backwards compatibility " \
"with OpenMDAO 1.x ; use 'linear_solver' instead."
with assert_warning(DeprecationWarning, msg):
model.ln_solver = DummySolver()
with assert_warning(DeprecationWarning, msg):
solver = model.ln_solver
self.assertTrue(isinstance(solver, DummySolver))
def test_deprecated_metadata(self):
prob = Problem()
prob.model.add_subsystem('inputs', IndepVarComp('x', shape=3))
prob.model.add_subsystem('double', VectorDoublingComp())
msg = "The 'metadata' attribute provides backwards compatibility " \
"with earlier version of OpenMDAO; use 'options' instead."
with assert_warning(DeprecationWarning, msg):
prob.model.double.metadata['size'] = 3
prob.model.connect('inputs.x', 'double.x')
prob.setup()
prob['inputs.x'] = [1., 2., 3.]
prob.run_model()
assert_rel_error(self, prob['double.y'], [2., 4., 6.])
def test_list_inputs_output_with_includes_excludes(self):
from openmdao.test_suite.scripts.circuit_analysis import Resistor, Diode, Node, Circuit
p = Problem()
model = p.model
model.add_subsystem('ground', IndepVarComp('V', 0., units='V'))
model.add_subsystem('source', IndepVarComp('I', 0.1, units='A'))
model.add_subsystem('circuit', Circuit())
model.connect('source.I', 'circuit.I_in')
model.connect('ground.V', 'circuit.Vg')
p.setup()
p.run_model()
# Inputs with no includes or excludes
inputs = model.list_inputs(out_stream=None)
self.assertEqual( len(inputs), 11)
# Inputs with includes
inputs = model.list_inputs(includes=['*V_out*'], out_stream=None)
self.assertEqual( len(inputs), 3)
# Inputs with includes matching a promoted name
inputs = model.list_inputs(includes=['*Vg*'], out_stream=None)
self.assertEqual( len(inputs), 2)
# Inputs with excludes
inputs = model.list_inputs(excludes=['*V_out*'], out_stream=None)
self.assertEqual( len(inputs), 8)
# Inputs with excludes matching a promoted name
inputs = model.list_inputs(excludes=['*Vg*'], out_stream=None)
self.assertEqual( len(inputs), 9)
# Inputs with includes and excludes
inputs = model.list_inputs(includes=['*V_out*'], excludes=['*Vg*'], out_stream=None)
self.assertEqual( len(inputs), 1)
# Outputs with no includes or excludes. Explicit only
outputs = model.list_outputs(implicit=False, out_stream=None)
self.assertEqual( len(outputs), 5)
# Outputs with includes. Explicit only
outputs = model.list_outputs(includes=['*I'], implicit=False, out_stream=None)
self.assertEqual( len(outputs), 4)
# Outputs with excludes. Explicit only
outputs = model.list_outputs(excludes=['circuit*'], implicit=False, out_stream=None)
self.assertEqual( len(outputs), 2)
if __name__ == "__main__":
unittest.main()
|
[
"openmdao.utils.assert_utils.assert_rel_error",
"openmdao.api.ExecComp",
"numpy.ones",
"openmdao.utils.assert_utils.assert_warning",
"openmdao.test_suite.scripts.circuit_analysis.Circuit",
"openmdao.api.IndepVarComp",
"openmdao.api.Group",
"numpy.zeros",
"six.assertRaisesRegex",
"unittest.main",
"openmdao.api.Problem",
"openmdao.test_suite.components.options_feature_vector.VectorDoublingComp"
] |
[((10995, 11010), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11008, 11010), False, 'import unittest\n'), ((467, 474), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (472, 474), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((698, 705), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (703, 705), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((844, 864), 'openmdao.api.Problem', 'Problem', ([], {'model': 'model'}), '(model=model)\n', (851, 864), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((3482, 3496), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (3494, 3496), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((3585, 3592), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (3590, 3592), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((4036, 4043), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (4041, 4043), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((4378, 4398), 'openmdao.api.Problem', 'Problem', ([], {'model': 'model'}), '(model=model)\n', (4385, 4398), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((4952, 5006), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "inputs['C2.x']", 'arr_val', '(1e-10)'], {}), "(self, inputs['C2.x'], arr_val, 1e-10)\n", (4968, 5006), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((5081, 5135), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "inputs['C2.x']", 'arr_val', '(1e-10)'], {}), "(self, inputs['C2.x'], arr_val, 1e-10)\n", (5097, 5135), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((5353, 5407), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "inputs['C2.x']", 'arr_val', '(1e-10)'], {}), "(self, inputs['C2.x'], arr_val, 1e-10)\n", (5369, 5407), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((5796, 5851), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "outputs['C2.y']", 'arr_val', '(1e-10)'], {}), "(self, outputs['C2.y'], arr_val, 1e-10)\n", (5812, 5851), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((5927, 5982), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "outputs['C2.y']", 'arr_val', '(1e-10)'], {}), "(self, outputs['C2.y'], arr_val, 1e-10)\n", (5943, 5982), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((6202, 6257), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "outputs['C2.y']", 'arr_val', '(1e-10)'], {}), "(self, outputs['C2.y'], arr_val, 1e-10)\n", (6218, 6257), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((6653, 6710), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "residuals['C2.y']", 'arr_val', '(1e-10)'], {}), "(self, residuals['C2.y'], arr_val, 1e-10)\n", (6669, 6710), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((6788, 6845), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "residuals['C2.y']", 'arr_val', '(1e-10)'], {}), "(self, residuals['C2.y'], arr_val, 1e-10)\n", (6804, 6845), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((7069, 7126), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "residuals['C2.y']", 'arr_val', '(1e-10)'], {}), "(self, residuals['C2.y'], arr_val, 1e-10)\n", (7085, 7126), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((7381, 7388), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (7386, 7388), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((8318, 8327), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (8325, 8327), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((8869, 8926), 'openmdao.utils.assert_utils.assert_rel_error', 'assert_rel_error', (['self', "prob['double.y']", '[2.0, 4.0, 6.0]'], {}), "(self, prob['double.y'], [2.0, 4.0, 6.0])\n", (8885, 8926), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((9096, 9105), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (9103, 9105), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((509, 531), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""a"""', '(5.0)'], {}), "('a', 5.0)\n", (521, 531), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((585, 592), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (590, 592), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((641, 658), 'openmdao.api.ExecComp', 'ExecComp', (['"""b=2*a"""'], {}), "('b=2*a')\n", (649, 658), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((796, 813), 'openmdao.api.ExecComp', 'ExecComp', (['"""c=2*b"""'], {}), "('c=2*b')\n", (804, 813), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((3692, 3699), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (3697, 3699), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((3748, 3765), 'openmdao.api.ExecComp', 'ExecComp', (['"""b=2*a"""'], {}), "('b=2*a')\n", (3756, 3765), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((4594, 4609), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (4601, 4609), True, 'import numpy as np\n'), ((4632, 4643), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (4639, 4643), True, 'import numpy as np\n'), ((4798, 4838), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'msg'], {}), '(self, ValueError, msg)\n', (4815, 4838), False, 'from six import assertRaisesRegex\n'), ((5192, 5232), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'msg'], {}), '(self, ValueError, msg)\n', (5209, 5232), False, 'from six import assertRaisesRegex\n'), ((5463, 5503), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'msg'], {}), '(self, ValueError, msg)\n', (5480, 5503), False, 'from six import assertRaisesRegex\n'), ((5640, 5680), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'msg'], {}), '(self, ValueError, msg)\n', (5657, 5680), False, 'from six import assertRaisesRegex\n'), ((6039, 6079), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'msg'], {}), '(self, ValueError, msg)\n', (6056, 6079), False, 'from six import assertRaisesRegex\n'), ((6313, 6353), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'msg'], {}), '(self, ValueError, msg)\n', (6330, 6353), False, 'from six import assertRaisesRegex\n'), ((6493, 6533), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'msg'], {}), '(self, ValueError, msg)\n', (6510, 6533), False, 'from six import assertRaisesRegex\n'), ((6902, 6942), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'msg'], {}), '(self, ValueError, msg)\n', (6919, 6942), False, 'from six import assertRaisesRegex\n'), ((7182, 7222), 'six.assertRaisesRegex', 'assertRaisesRegex', (['self', 'ValueError', 'msg'], {}), '(self, ValueError, msg)\n', (7199, 7222), False, 'from six import assertRaisesRegex\n'), ((7592, 7631), 'openmdao.utils.assert_utils.assert_warning', 'assert_warning', (['DeprecationWarning', 'msg'], {}), '(DeprecationWarning, msg)\n', (7606, 7631), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((7691, 7730), 'openmdao.utils.assert_utils.assert_warning', 'assert_warning', (['DeprecationWarning', 'msg'], {}), '(DeprecationWarning, msg)\n', (7705, 7730), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((8027, 8066), 'openmdao.utils.assert_utils.assert_warning', 'assert_warning', (['DeprecationWarning', 'msg'], {}), '(DeprecationWarning, msg)\n', (8041, 8066), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((8126, 8165), 'openmdao.utils.assert_utils.assert_warning', 'assert_warning', (['DeprecationWarning', 'msg'], {}), '(DeprecationWarning, msg)\n', (8140, 8165), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((8371, 8397), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""'], {'shape': '(3)'}), "('x', shape=3)\n", (8383, 8397), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((8442, 8462), 'openmdao.test_suite.components.options_feature_vector.VectorDoublingComp', 'VectorDoublingComp', ([], {}), '()\n', (8460, 8462), False, 'from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp\n'), ((8629, 8668), 'openmdao.utils.assert_utils.assert_warning', 'assert_warning', (['DeprecationWarning', 'msg'], {}), '(DeprecationWarning, msg)\n', (8643, 8668), False, 'from openmdao.utils.assert_utils import assert_rel_error, assert_warning\n'), ((9169, 9202), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""V"""', '(0.0)'], {'units': '"""V"""'}), "('V', 0.0, units='V')\n", (9181, 9202), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((9241, 9274), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""I"""', '(0.1)'], {'units': '"""A"""'}), "('I', 0.1, units='A')\n", (9253, 9274), False, 'from openmdao.api import Problem, Group, IndepVarComp, ExecComp\n'), ((9315, 9324), 'openmdao.test_suite.scripts.circuit_analysis.Circuit', 'Circuit', ([], {}), '()\n', (9322, 9324), False, 'from openmdao.test_suite.scripts.circuit_analysis import Resistor, Diode, Node, Circuit\n'), ((3879, 3895), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (3887, 3895), True, 'import numpy as np\n'), ((3939, 3955), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (3947, 3955), True, 'import numpy as np\n'), ((4215, 4231), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (4223, 4231), True, 'import numpy as np\n'), ((4280, 4296), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (4288, 4296), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Script to execute example covarying MMGP regression forecasting model
with full Krhh.
Inputs: Data training and test sets (dictionary pickle)
Data for example:
- normalised solar data for 25 sites for 15 minute forecast
- N_train = 4200, N_test = 2276, P = 25, D = 51
- Xtr[:, :50] 2 recent lagged observations for each site in order
- Xtr[:, 50] time index
- link inputs is a 25x2 array (link inputs repeated for every group)
with normalised lat,long for each site in order
Model Options:
- Sparse or full x-function covariance prior Krhh (set bool SPARSE_PRIOR)
- Diagonal or Kronecker-structured variational posterior covariance Sr (set bool DIAG_POST)
- Sparse or full posterior covariance (when Kronecker posterior; set bool SPARSE_POST)
Current Settings (sparse covarying mmgp model with sparse Kronecker posterior):
DIAG_POST = False
SPARSE_PRIOR = False # set True for equivalent sparse scmmgp model
SPARSE_POST = True
Note on specifying group structure for F:
Grouping occurs via block_struct, a nested list of grouping order
Where functions [i] are independent i.e. in own block, set link_kernel[i] = link_inputs[i] = 1.0
See model class preamble and example below for further details.
"""
import os
import numpy as np
import pickle
import pandas as pd
import traceback
import time
import sklearn.cluster
import csv
import sys
import mmgp
from mmgp import likelihoods
from mmgp import kernels
import tensorflow as tf
from mmgp import datasets
from mmgp import losses
from mmgp import util
dpath = '/experiments/datasets/'
dfile = 'p25_inputsdict.pickle'
dlinkfile = 'p25_linkinputsarray.pickle'
outdir = '/experiments/results/p25_nonsparse_cmmgp/'
try:
os.makedirs(outdir)
except FileExistsError:
pass
def get_inputs():
"""
inputsdict contains {'Yte': Yte, 'Ytr': Ytr, 'Xtr': Xtr, 'Xte': Xte} where values are np.arrays
np. arrays are truncated to evenly split into batches of size = batchsize
returns inputsdict, Xtr_link (ndarray, shape = [P, D_link_features])
"""
with open(os.path.join(dpath, dfile), 'rb') as f:
d_all = pickle.load(f)
with open(os.path.join(dpath, dlinkfile), 'rb') as f:
d_link = pickle.load(f)
return d_all, d_link
def init_z(train_inputs, num_inducing):
# Initialize inducing points using clustering.
mini_batch = sklearn.cluster.MiniBatchKMeans(num_inducing)
cluster_indices = mini_batch.fit_predict(train_inputs)
inducing_locations = mini_batch.cluster_centers_
return inducing_locations
FLAGS = util.util.get_flags()
BATCH_SIZE = FLAGS.batch_size
LEARNING_RATE = FLAGS.learning_rate
DISPLAY_STEP = FLAGS.display_step
EPOCHS = FLAGS.n_epochs
NUM_SAMPLES = FLAGS.mc_train
PRED_SAMPLES = FLAGS.mc_test
NUM_INDUCING = FLAGS.n_inducing
NUM_COMPONENTS = FLAGS.num_components
IS_ARD = FLAGS.is_ard
TOL = FLAGS.opt_tol
VAR_STEPS = FLAGS.var_steps
DIAG_POST = False
SPARSE_PRIOR = False
SPARSE_POST = True # option for non-diag post
MAXTIME = 1200
print("settings done")
# define GPRN P and Q
output_dim = 25 #P
node_dim = 25 #Q
lag_dim = 2
save_nlpds = False # If True saves samples of nlpds for n,p,s
# extract dataset
d, d_link = get_inputs()
Ytr, Yte, Xtr, Xte = d['Ytr'], d['Yte'], d['Xtr'], d['Xte']
data = datasets.DataSet(Xtr.astype(np.float32), Ytr.astype(np.float32), shuffle=False)
test = datasets.DataSet(Xte.astype(np.float32), Yte.astype(np.float32), shuffle=False)
print("dataset created")
# model config block rows (where P=Q): block all w.1, w.2 etc, leave f independent
# order of block_struct is rows, node functions
# lists required: block_struct, link_inputs, kern_link, kern
#block_struct nested list of grouping order
weight_struct = [[] for _ in range(output_dim)]
for i in range(output_dim):
row = list(range(i, i+output_dim*(node_dim-1)+1, output_dim))
row_0 = row.pop(i) # bring diag to pivot position
weight_struct[i] = [row_0] + row
nodes = [[x] for x in list(range(output_dim * node_dim, output_dim * node_dim + output_dim))]
block_struct = weight_struct + nodes
# create link inputs (link inputs used repeatedly but can have link input per group)
# permute to bring diagonal to first position
link_inputs = [[] for _ in range(output_dim)]
for i in range(output_dim):
idx = list(range(d_link.shape[0]))
link_inputs[i] = d_link[[idx.pop(i)] + idx, :]
link_inputs = link_inputs + [1.0 for i in range(output_dim)] # for full W row blocks, independent nodes
# create 'between' kernel list
klink_rows = [kernels.CompositeKernel('mul',[kernels.RadialBasis(2, std_dev=2.0, lengthscale=1.0, white=0.01, input_scaling = IS_ARD),
kernels.CompactSlice(2, active_dims=[0,1], lengthscale = 2.0, input_scaling = IS_ARD)] )
for i in range(output_dim) ]
klink_f = [1.0 for i in range(node_dim)]
kernlink = klink_rows + klink_f
# create 'within' kernel
# kern
lag_active_dims_s = [ [] for _ in range(output_dim)]
for i in range(output_dim):
lag_active_dims_s[i] = list(range(lag_dim*i, lag_dim*(i+1)))
k_rows = [kernels.CompositeKernel('mul',[kernels.RadialBasisSlice(lag_dim, active_dims=lag_active_dims_s[i],
std_dev = 1.0, white = 0.01, input_scaling = IS_ARD),
kernels.PeriodicSliceFixed(1, active_dims=[Xtr.shape[1]-1],
lengthscale=0.5, std_dev=1.0, period = 144) ])
for i in range(output_dim)]
k_f = [kernels.RadialBasisSlice(lag_dim, active_dims=lag_active_dims_s[i], std_dev = 1.0, white = 0.01, input_scaling = IS_ARD)
for i in range(output_dim)]
kern = k_rows + k_f
print('len link_inputs ',len(link_inputs))
print('len kernlink ',len(kernlink))
print('len kern ', len(kern))
print('no. groups = ', len(block_struct), 'no. latent functions =', len([i for b in block_struct for i in b]))
print('number latent functions', node_dim*(output_dim+1))
likelihood = likelihoods.CovaryingRegressionNetwork(output_dim, node_dim, std_dev = 0.2) # p, q, lik_noise
print("likelihood and kernels set")
Z = init_z(data.X, NUM_INDUCING)
print('inducing points set')
m = mmgp.ExplicitSCMMGP(output_dim, likelihood, kern, kernlink, block_struct, Z, link_inputs,
num_components=NUM_COMPONENTS, diag_post=DIAG_POST, sparse_prior=SPARSE_PRIOR,
sparse_post=SPARSE_POST, num_samples=NUM_SAMPLES, predict_samples=PRED_SAMPLES)
print("model set")
# initialise losses and logging
error_rate = losses.RootMeanSqError(data.Dout)
os.chdir(outdir)
with open("log_results.csv", 'w', newline='') as f:
csv.writer(f).writerow(['epoch', 'fit_runtime', 'nelbo', error_rate.get_name(),'generalised_nlpd'])
with open("log_params.csv", 'w', newline='') as f:
csv.writer(f).writerow(['epoch', 'raw_kernel_params', 'raw_kernlink_params', 'raw_likelihood_params', 'raw_weights'])
with open("log_comp_time.csv", 'w', newline='') as f:
csv.writer(f).writerow(['epoch', 'batch_time', 'nelbo_time', 'pred_time', 'gen_nlpd_time', error_rate.get_name()+'_time'])
# optimise
o = tf.train.AdamOptimizer(LEARNING_RATE, beta1=0.9,beta2=0.99)
print("start time = ", time.strftime('%X %x %Z'))
m.fit(data, o, var_steps = VAR_STEPS, epochs = EPOCHS, batch_size = BATCH_SIZE, display_step=DISPLAY_STEP,
test = test, loss = error_rate, tolerance = TOL, max_time=MAXTIME )
print("optimisation complete")
# export final predicted values and loss metrics
ypred = m.predict(test.X, batch_size = BATCH_SIZE) #same batchsize used for convenience
np.savetxt("predictions.csv", np.concatenate(ypred, axis=1), delimiter=",")
if save_nlpds == True:
nlpd_samples, nlpd_meanvar = m.nlpd_samples(test.X, test.Y, batch_size = BATCH_SIZE)
try:
np.savetxt("nlpd_meanvar.csv", nlpd_meanvar, delimiter=",") # N x 2P as for predictions
except:
print('nlpd_meanvar export fail')
try:
np.savetxt("nlpd_samples.csv", nlpd_samples, delimiter=",") # NP x S (NxS concat for P tasks)
except:
print('nlpd_samples export fail')
print("Final " + error_rate.get_name() + "=" + "%.4f" % error_rate.eval(test.Y, ypred[0]))
print("Final " + "generalised_nlpd" + "=" + "%.4f" % m.nlpd_general(test.X, test.Y, batch_size = BATCH_SIZE))
error_rate_end = [losses.MeanAbsError(data.Dout)] # any extra accuracy measures at end of routine
print("Final ", [e.get_name() for e in error_rate_end])
print([e.eval(test.Y, ypred[0]) for e in error_rate_end])
predvar = [np.mean(np.mean(ypred[1]))]
print("Final predvar ", predvar)
with open("final_losses.csv", 'w', newline='') as f:
csv.writer(f).writerows([[e.get_name() for e in error_rate_end] + ['pred_var'],
[e.eval(test.Y, ypred[0]) for e in error_rate_end] + predvar])
print("finish time = " + time.strftime('%X %x %Z'))
|
[
"mmgp.ExplicitSCMMGP",
"mmgp.losses.MeanAbsError",
"numpy.mean",
"mmgp.kernels.PeriodicSliceFixed",
"mmgp.util.util.get_flags",
"numpy.concatenate",
"tensorflow.train.AdamOptimizer",
"mmgp.kernels.RadialBasisSlice",
"csv.writer",
"pickle.load",
"numpy.savetxt",
"mmgp.kernels.CompactSlice",
"mmgp.likelihoods.CovaryingRegressionNetwork",
"os.makedirs",
"time.strftime",
"os.path.join",
"os.chdir",
"mmgp.kernels.RadialBasis",
"mmgp.losses.RootMeanSqError"
] |
[((2670, 2691), 'mmgp.util.util.get_flags', 'util.util.get_flags', ([], {}), '()\n', (2689, 2691), False, 'from mmgp import util\n'), ((6258, 6331), 'mmgp.likelihoods.CovaryingRegressionNetwork', 'likelihoods.CovaryingRegressionNetwork', (['output_dim', 'node_dim'], {'std_dev': '(0.2)'}), '(output_dim, node_dim, std_dev=0.2)\n', (6296, 6331), False, 'from mmgp import likelihoods\n'), ((6459, 6720), 'mmgp.ExplicitSCMMGP', 'mmgp.ExplicitSCMMGP', (['output_dim', 'likelihood', 'kern', 'kernlink', 'block_struct', 'Z', 'link_inputs'], {'num_components': 'NUM_COMPONENTS', 'diag_post': 'DIAG_POST', 'sparse_prior': 'SPARSE_PRIOR', 'sparse_post': 'SPARSE_POST', 'num_samples': 'NUM_SAMPLES', 'predict_samples': 'PRED_SAMPLES'}), '(output_dim, likelihood, kern, kernlink, block_struct, Z,\n link_inputs, num_components=NUM_COMPONENTS, diag_post=DIAG_POST,\n sparse_prior=SPARSE_PRIOR, sparse_post=SPARSE_POST, num_samples=\n NUM_SAMPLES, predict_samples=PRED_SAMPLES)\n', (6478, 6720), False, 'import mmgp\n'), ((6789, 6822), 'mmgp.losses.RootMeanSqError', 'losses.RootMeanSqError', (['data.Dout'], {}), '(data.Dout)\n', (6811, 6822), False, 'from mmgp import losses\n'), ((6826, 6842), 'os.chdir', 'os.chdir', (['outdir'], {}), '(outdir)\n', (6834, 6842), False, 'import os\n'), ((7378, 7438), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['LEARNING_RATE'], {'beta1': '(0.9)', 'beta2': '(0.99)'}), '(LEARNING_RATE, beta1=0.9, beta2=0.99)\n', (7400, 7438), True, 'import tensorflow as tf\n'), ((1792, 1811), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (1803, 1811), False, 'import os\n'), ((5777, 5896), 'mmgp.kernels.RadialBasisSlice', 'kernels.RadialBasisSlice', (['lag_dim'], {'active_dims': 'lag_active_dims_s[i]', 'std_dev': '(1.0)', 'white': '(0.01)', 'input_scaling': 'IS_ARD'}), '(lag_dim, active_dims=lag_active_dims_s[i], std_dev\n =1.0, white=0.01, input_scaling=IS_ARD)\n', (5801, 5896), False, 'from mmgp import kernels\n'), ((7462, 7487), 'time.strftime', 'time.strftime', (['"""%X %x %Z"""'], {}), "('%X %x %Z')\n", (7475, 7487), False, 'import time\n'), ((7883, 7912), 'numpy.concatenate', 'np.concatenate', (['ypred'], {'axis': '(1)'}), '(ypred, axis=1)\n', (7897, 7912), True, 'import numpy as np\n'), ((8603, 8633), 'mmgp.losses.MeanAbsError', 'losses.MeanAbsError', (['data.Dout'], {}), '(data.Dout)\n', (8622, 8633), False, 'from mmgp import losses\n'), ((2214, 2228), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2225, 2228), False, 'import pickle\n'), ((2308, 2322), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2319, 2322), False, 'import pickle\n'), ((8064, 8123), 'numpy.savetxt', 'np.savetxt', (['"""nlpd_meanvar.csv"""', 'nlpd_meanvar'], {'delimiter': '""","""'}), "('nlpd_meanvar.csv', nlpd_meanvar, delimiter=',')\n", (8074, 8123), True, 'import numpy as np\n'), ((8228, 8287), 'numpy.savetxt', 'np.savetxt', (['"""nlpd_samples.csv"""', 'nlpd_samples'], {'delimiter': '""","""'}), "('nlpd_samples.csv', nlpd_samples, delimiter=',')\n", (8238, 8287), True, 'import numpy as np\n'), ((8819, 8836), 'numpy.mean', 'np.mean', (['ypred[1]'], {}), '(ypred[1])\n', (8826, 8836), True, 'import numpy as np\n'), ((9134, 9159), 'time.strftime', 'time.strftime', (['"""%X %x %Z"""'], {}), "('%X %x %Z')\n", (9147, 9159), False, 'import time\n'), ((2157, 2183), 'os.path.join', 'os.path.join', (['dpath', 'dfile'], {}), '(dpath, dfile)\n', (2169, 2183), False, 'import os\n'), ((2246, 2276), 'os.path.join', 'os.path.join', (['dpath', 'dlinkfile'], {}), '(dpath, dlinkfile)\n', (2258, 2276), False, 'import os\n'), ((4725, 4815), 'mmgp.kernels.RadialBasis', 'kernels.RadialBasis', (['(2)'], {'std_dev': '(2.0)', 'lengthscale': '(1.0)', 'white': '(0.01)', 'input_scaling': 'IS_ARD'}), '(2, std_dev=2.0, lengthscale=1.0, white=0.01,\n input_scaling=IS_ARD)\n', (4744, 4815), False, 'from mmgp import kernels\n'), ((4860, 4947), 'mmgp.kernels.CompactSlice', 'kernels.CompactSlice', (['(2)'], {'active_dims': '[0, 1]', 'lengthscale': '(2.0)', 'input_scaling': 'IS_ARD'}), '(2, active_dims=[0, 1], lengthscale=2.0, input_scaling=\n IS_ARD)\n', (4880, 4947), False, 'from mmgp import kernels\n'), ((5332, 5451), 'mmgp.kernels.RadialBasisSlice', 'kernels.RadialBasisSlice', (['lag_dim'], {'active_dims': 'lag_active_dims_s[i]', 'std_dev': '(1.0)', 'white': '(0.01)', 'input_scaling': 'IS_ARD'}), '(lag_dim, active_dims=lag_active_dims_s[i], std_dev\n =1.0, white=0.01, input_scaling=IS_ARD)\n', (5356, 5451), False, 'from mmgp import kernels\n'), ((5544, 5652), 'mmgp.kernels.PeriodicSliceFixed', 'kernels.PeriodicSliceFixed', (['(1)'], {'active_dims': '[Xtr.shape[1] - 1]', 'lengthscale': '(0.5)', 'std_dev': '(1.0)', 'period': '(144)'}), '(1, active_dims=[Xtr.shape[1] - 1], lengthscale=\n 0.5, std_dev=1.0, period=144)\n', (5570, 5652), False, 'from mmgp import kernels\n'), ((6901, 6914), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (6911, 6914), False, 'import csv\n'), ((7058, 7071), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (7068, 7071), False, 'import csv\n'), ((7236, 7249), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (7246, 7249), False, 'import csv\n'), ((8934, 8947), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (8944, 8947), False, 'import csv\n')]
|
#!/usr/bin/env python3
import sys
import os
import logging
import numpy as np
import pandas as pd
import dateutil
def tempF2C(x): return (x-32.0)*5.0/9.0
def tempC2F(x): return (x*9.0/5.0)+32.0
def load_temperature_hdf5(temps_fn, local_time_offset, basedir=None, start_year=None, truncate_to_full_day=False):
## Load temperature
# temps_fn = "{}_AT_cleaned.h5".format(station_callsign)
logging.info("Using saved temperatures file '{}'".format(temps_fn))
if basedir is not None:
temps_fn = os.path.join(basedir, temps_fn)
tempdf = pd.read_hdf(temps_fn, 'table')
tmp = local_time_offset.split(':')
tmp = int(tmp[0])*3600+int(tmp[1])*60
sitetz = dateutil.tz.tzoffset(local_time_offset, tmp)
tempdf.index = tempdf.index.tz_convert(sitetz)
if truncate_to_full_day:
x = tempdf.index[-1]
if x.hour != 23:
x = x-pd.Timedelta(days=1)
tmp = '{:04d}-{:02d}-{:02d}'.format(x.year, x.month, x.day)
tempdf = tempdf.loc[:tmp]
if start_year is not None:
tempdf = tempdf.loc['{}-01-01'.format(start_year):]
logging.info("Temperature data date range used: {} through {}".format(tempdf.index[0], tempdf.index[-1]))
return tempdf
def load_temperature_csv(fn, local_time_offset=None):
t = pd.read_csv(fn, index_col=0)
if local_time_offset is not None:
tmp = local_time_offset.split(':')
tmp = int(tmp[0])*3600+int(tmp[1])*60
sitetz = dateutil.tz.tzoffset(local_time_offset, tmp)
#t.index = pd.to_datetime(t.index).tz_localize('UTC').tz_convert(sitetz) # @TCC this fails if csv contains datetimes with TZ
t.index = pd.to_datetime(t.index)
try:
t.index = t.index.tz_localize('UTC')
except TypeError:
pass
t.index = t.index.tz_convert(sitetz)
return t
# Function which computes BM (single sine method) degree day generation from temperature data
def compute_BMDD_Fs(tmin, tmax, base_temp, dd_gen):
# Used internally
def _compute_daily_BM_DD(mint, maxt, avet, base_temp):
"""Use standard Baskerville-Ermin (single sine) degree-day method
to compute the degree-day values for each a single day.
"""
if avet is None:
avet = (mint+maxt)/2.0 # simple midpoint (like in the refs)
dd = np.nan # value which we're computing
# Step 1: Adjust for observation time; not relevant
# Step 2: GDD = 0 if max < base (curve all below base)
if maxt < base_temp:
dd = 0
# Step 3: Calc mean temp for day; already done previously
# Step 4: min > base; then whole curve counts
elif mint >= base_temp:
dd = avet - base_temp
# Step 5: else use curve minus part below base
else:
W = (maxt-mint)/2.0
tmp = (base_temp-avet) / W
if tmp < -1:
print('WARNING: (base_temp-avet)/W = {} : should be [-1:1]'.format(tmp))
tmp = -1
if tmp > 1:
print('WARNING: (base_temp-avet)/W = {} : should be [-1:1]'.format(tmp))
tmp = 1
A = np.arcsin(tmp)
dd = ((W*np.cos(A))-((base_temp-avet)*((np.pi/2.0)-A)))/np.pi
return dd
# compute the degree-days for each day in the temperature input (from tmin and tmax vectors)
dd = pd.concat([tmin,tmax], axis=1)
dd.columns = ['tmin', 'tmax']
dd['DD'] = dd.apply(lambda x: _compute_daily_BM_DD(x[0], x[1], (x[0]+x[1])/2.0, base_temp), axis=1)
# compute the degree-days for each day in the temperature input (from a daily groupby)
# grp = t.groupby(pd.TimeGrouper('D'))
# dd = grp.agg(lambda x: _compute_daily_BM_DD(np.min(x), np.max(x), None, base_temp))
# dd.columns = ['DD']
# Find the point where cumulative sums of degree days cross the threshold
cDD = dd['DD'].cumsum(skipna=True)
for cumdd_threshold,label in [[1*dd_gen,'F1'], [2*dd_gen,'F2'], [3*dd_gen,'F3']]:
dtmp = np.zeros(len(dd['DD']))*np.nan
tmp = np.searchsorted(cDD, cDD+(cumdd_threshold)-dd['DD'], side='left').astype(float)
tmp[tmp>=len(tmp)] = np.nan
#dd[label+'_idx'] = tmp
# convert those indexes into end times
e = pd.Series(index=dd.index, dtype='float64')#, dtype='datetime64[ns]')
#e[~np.isnan(tmp)] = dd.index[tmp[~np.isnan(tmp)].astype(int)] # @TCC previous code
e.loc[~np.isnan(tmp)] = dd.index[tmp[~np.isnan(tmp)].astype(int)]
e.loc[np.isnan(tmp)] = np.nan
dd[label+'_end'] = e
# and duration...
#dd[label] = (e-dd.index+pd.Timedelta(days=1)).apply(lambda x: np.nan if pd.isnull(x) else x.days) # @TCC previous code
dd[label] = (pd.to_datetime(e)-dd.index+pd.Timedelta(days=1)).apply(lambda x: np.nan if pd.isnull(x) else x.days)
#dd.loc[np.isnan(tmp), label] = np.nan
print("DD dataframe min values\n", dd.min())
return dd
def compute_year_over_year_norm(in_dataframe,
start, end,
norm_start=None, norm_end=None,
freq='daily',
interp_method='linear',
norm_method='mean'):
"""
Parameters
----------
start: convertable to Datetime
start range of dates to output
end: convertable to Datetime
end range of dates to output
norm_start : convertable to Datetime or None
`None` will use in_dataframe.index[0]
norm_end : convertable to Datetime or None
if given (not None), output range does not include `norm_end` (it is half-open)
`None` will use in_dataframe.index[-1]
freq : {'daily', 'hourly'}
interp_method : str or None
`None` will skip resample and interpolation, so
`in_dataframe` must already be daily or hourly (depending on `freq`)!
norm_method : {'mean', 'median'}
"""
if freq == 'hourly':
hrs = 24
hrs_freq = '1h'
elif freq == 'daily':
hrs = 1
hrs_freq = '24h'
else:
raise ValueError("Invalid `freq` argument value: {}".format(freq))
if norm_start is None:
norm_start = in_dataframe.index[0]
if norm_end is None:
norm_end = in_dataframe.index[-1]
else:
norm_end = pd.to_datetime([norm_end])[0] - pd.Timedelta('1 second')
print('Computing using range:', norm_start, 'to', norm_end)
if interp_method is None: # skip resample+interpolation (assumes in_dataframe is daily!)
t = in_dataframe.loc[norm_start:norm_end]
else: # resample and interpolate to get hourly
t = in_dataframe.resample(hrs_freq).interpolate(method=interp_method).loc[norm_start:norm_end]
if norm_method == 'mean':
norm = t.groupby([t.index.month, t.index.day, t.index.hour]).mean().sort_index()
elif norm_method == 'median':
norm = t.groupby([t.index.month, t.index.day, t.index.hour]).median().sort_index()
else:
assert False, "Error: Unknown norm_method '{}'".format(norm_method)
# now replicate and trim to the desired output range
start = pd.to_datetime(start)
end = pd.to_datetime(end)
# need a non-leapyear and leapyear version
norm_ly = norm.copy()
if norm.shape[0] == 366*hrs:
norm = norm.drop((2,29,))
else: # norm doesn't include any leapyear data
assert norm.shape[0] == 365*hrs
# make Feb 29 the mean of Feb 28 and Mar 1
foo = (norm.loc[(2,28,)] + norm.loc[(3,1,)]) / 2.0
foo.index = pd.MultiIndex.from_product( ([2],[29],list(range(hrs))) )
norm_ly = pd.concat((norm_ly,foo)).sort_index()
norm_ly.sort_index(inplace=True) # probably not needed
# build up a 'long normal' (lnorm) dataframe year by year by appending the norm or norm_ly
lnorm = None
for yr in np.arange(start.year, end.year+1):
#print(yr)
idx = pd.date_range(start='{}-{:02d}-{:02d} {:02d}:00:00'.format(yr,*norm.index[0]),
end= '{}-{:02d}-{:02d} {:02d}:00:00'.format(yr,*norm.index[-1]),
freq=hrs_freq)
if idx.shape[0] == 366*hrs:
foo = norm_ly.copy()
else:
assert norm.shape[0] == 365*hrs
foo = norm.copy()
foo.index = idx
if lnorm is None:
lnorm = foo
else:
lnorm = lnorm.append(foo)
return lnorm.loc[start:end]
|
[
"pandas.Series",
"pandas.isnull",
"pandas.read_csv",
"numpy.arange",
"numpy.searchsorted",
"pandas.Timedelta",
"os.path.join",
"numpy.arcsin",
"dateutil.tz.tzoffset",
"pandas.read_hdf",
"numpy.isnan",
"numpy.cos",
"pandas.concat",
"pandas.to_datetime"
] |
[((562, 592), 'pandas.read_hdf', 'pd.read_hdf', (['temps_fn', '"""table"""'], {}), "(temps_fn, 'table')\n", (573, 592), True, 'import pandas as pd\n'), ((688, 732), 'dateutil.tz.tzoffset', 'dateutil.tz.tzoffset', (['local_time_offset', 'tmp'], {}), '(local_time_offset, tmp)\n', (708, 732), False, 'import dateutil\n'), ((1300, 1328), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'index_col': '(0)'}), '(fn, index_col=0)\n', (1311, 1328), True, 'import pandas as pd\n'), ((3385, 3416), 'pandas.concat', 'pd.concat', (['[tmin, tmax]'], {'axis': '(1)'}), '([tmin, tmax], axis=1)\n', (3394, 3416), True, 'import pandas as pd\n'), ((7182, 7203), 'pandas.to_datetime', 'pd.to_datetime', (['start'], {}), '(start)\n', (7196, 7203), True, 'import pandas as pd\n'), ((7214, 7233), 'pandas.to_datetime', 'pd.to_datetime', (['end'], {}), '(end)\n', (7228, 7233), True, 'import pandas as pd\n'), ((7900, 7935), 'numpy.arange', 'np.arange', (['start.year', '(end.year + 1)'], {}), '(start.year, end.year + 1)\n', (7909, 7935), True, 'import numpy as np\n'), ((517, 548), 'os.path.join', 'os.path.join', (['basedir', 'temps_fn'], {}), '(basedir, temps_fn)\n', (529, 548), False, 'import os\n'), ((1473, 1517), 'dateutil.tz.tzoffset', 'dateutil.tz.tzoffset', (['local_time_offset', 'tmp'], {}), '(local_time_offset, tmp)\n', (1493, 1517), False, 'import dateutil\n'), ((1669, 1692), 'pandas.to_datetime', 'pd.to_datetime', (['t.index'], {}), '(t.index)\n', (1683, 1692), True, 'import pandas as pd\n'), ((4275, 4317), 'pandas.Series', 'pd.Series', ([], {'index': 'dd.index', 'dtype': '"""float64"""'}), "(index=dd.index, dtype='float64')\n", (4284, 4317), True, 'import pandas as pd\n'), ((4525, 4538), 'numpy.isnan', 'np.isnan', (['tmp'], {}), '(tmp)\n', (4533, 4538), True, 'import numpy as np\n'), ((6393, 6417), 'pandas.Timedelta', 'pd.Timedelta', (['"""1 second"""'], {}), "('1 second')\n", (6405, 6417), True, 'import pandas as pd\n'), ((886, 906), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(1)'}), '(days=1)\n', (898, 906), True, 'import pandas as pd\n'), ((3171, 3185), 'numpy.arcsin', 'np.arcsin', (['tmp'], {}), '(tmp)\n', (3180, 3185), True, 'import numpy as np\n'), ((4068, 4135), 'numpy.searchsorted', 'np.searchsorted', (['cDD', "(cDD + cumdd_threshold - dd['DD'])"], {'side': '"""left"""'}), "(cDD, cDD + cumdd_threshold - dd['DD'], side='left')\n", (4083, 4135), True, 'import numpy as np\n'), ((4452, 4465), 'numpy.isnan', 'np.isnan', (['tmp'], {}), '(tmp)\n', (4460, 4465), True, 'import numpy as np\n'), ((6361, 6387), 'pandas.to_datetime', 'pd.to_datetime', (['[norm_end]'], {}), '([norm_end])\n', (6375, 6387), True, 'import pandas as pd\n'), ((7672, 7697), 'pandas.concat', 'pd.concat', (['(norm_ly, foo)'], {}), '((norm_ly, foo))\n', (7681, 7697), True, 'import pandas as pd\n'), ((4780, 4800), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4792, 4800), True, 'import pandas as pd\n'), ((4828, 4840), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (4837, 4840), True, 'import pandas as pd\n'), ((4753, 4770), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (4767, 4770), True, 'import pandas as pd\n'), ((3207, 3216), 'numpy.cos', 'np.cos', (['A'], {}), '(A)\n', (3213, 3216), True, 'import numpy as np\n'), ((4483, 4496), 'numpy.isnan', 'np.isnan', (['tmp'], {}), '(tmp)\n', (4491, 4496), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
NumPy Array Editor Dialog based on Qt
"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
from __future__ import print_function
# Third party imports
from qtpy.compat import from_qvariant, to_qvariant
from qtpy.QtCore import (QAbstractTableModel, QItemSelection, QLocale,
QItemSelectionRange, QModelIndex, Qt, Slot)
from qtpy.QtGui import QColor, QCursor, QDoubleValidator, QKeySequence
from qtpy.QtWidgets import (QAbstractItemDelegate, QApplication, QCheckBox,
QComboBox, QDialog, QDialogButtonBox, QGridLayout,
QHBoxLayout, QInputDialog, QItemDelegate, QLabel,
QLineEdit, QMenu, QMessageBox, QPushButton,
QSpinBox, QStackedWidget, QTableView, QVBoxLayout,
QWidget)
import numpy as np
# Local imports
from spyder.config.base import _
from spyder.config.fonts import DEFAULT_SMALL_DELTA
from spyder.config.gui import get_font, config_shortcut
from spyder.py3compat import (io, is_binary_string, is_string,
is_text_string, PY3, to_binary_string,
to_text_string)
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import add_actions, create_action, keybinding
# Note: string and unicode data types will be formatted with '%s' (see below)
SUPPORTED_FORMATS = {
'single': '%.6g',
'double': '%.6g',
'float_': '%.6g',
'longfloat': '%.6g',
'float16': '%.6g',
'float32': '%.6g',
'float64': '%.6g',
'float96': '%.6g',
'float128': '%.6g',
'csingle': '%r',
'complex_': '%r',
'clongfloat': '%r',
'complex64': '%r',
'complex128': '%r',
'complex192': '%r',
'complex256': '%r',
'byte': '%d',
'bytes8': '%s',
'short': '%d',
'intc': '%d',
'int_': '%d',
'longlong': '%d',
'intp': '%d',
'int8': '%d',
'int16': '%d',
'int32': '%d',
'int64': '%d',
'ubyte': '%d',
'ushort': '%d',
'uintc': '%d',
'uint': '%d',
'ulonglong': '%d',
'uintp': '%d',
'uint8': '%d',
'uint16': '%d',
'uint32': '%d',
'uint64': '%d',
'bool_': '%r',
'bool8': '%r',
'bool': '%r',
}
LARGE_SIZE = 5e5
LARGE_NROWS = 1e5
LARGE_COLS = 60
#==============================================================================
# Utility functions
#==============================================================================
def is_float(dtype):
"""Return True if datatype dtype is a float kind"""
return ('float' in dtype.name) or dtype.name in ['single', 'double']
def is_number(dtype):
"""Return True is datatype dtype is a number kind"""
return is_float(dtype) or ('int' in dtype.name) or ('long' in dtype.name) \
or ('short' in dtype.name)
def get_idx_rect(index_list):
"""Extract the boundaries from a list of indexes"""
rows, cols = list(zip(*[(i.row(), i.column()) for i in index_list]))
return ( min(rows), max(rows), min(cols), max(cols) )
#==============================================================================
# Main classes
#==============================================================================
class ArrayModel(QAbstractTableModel):
"""Array Editor Table Model"""
ROWS_TO_LOAD = 500
COLS_TO_LOAD = 40
def __init__(self, data, format="%.6g", xlabels=None, ylabels=None,
readonly=False, parent=None):
QAbstractTableModel.__init__(self)
self.dialog = parent
self.changes = {}
self.xlabels = xlabels
self.ylabels = ylabels
self.readonly = readonly
self.test_array = np.array([0], dtype=data.dtype)
# for complex numbers, shading will be based on absolute value
# but for all other types it will be the real part
if data.dtype in (np.complex64, np.complex128):
self.color_func = np.abs
else:
self.color_func = np.real
# Backgroundcolor settings
huerange = [.66, .99] # Hue
self.sat = .7 # Saturation
self.val = 1. # Value
self.alp = .6 # Alpha-channel
self._data = data
self._format = format
self.total_rows = self._data.shape[0]
self.total_cols = self._data.shape[1]
size = self.total_rows * self.total_cols
try:
self.vmin = np.nanmin(self.color_func(data))
self.vmax = np.nanmax(self.color_func(data))
if self.vmax == self.vmin:
self.vmin -= 1
self.hue0 = huerange[0]
self.dhue = huerange[1]-huerange[0]
self.bgcolor_enabled = True
except (TypeError, ValueError):
self.vmin = None
self.vmax = None
self.hue0 = None
self.dhue = None
self.bgcolor_enabled = False
# Use paging when the total size, number of rows or number of
# columns is too large
if size > LARGE_SIZE:
self.rows_loaded = self.ROWS_TO_LOAD
self.cols_loaded = self.COLS_TO_LOAD
else:
if self.total_rows > LARGE_NROWS:
self.rows_loaded = self.ROWS_TO_LOAD
else:
self.rows_loaded = self.total_rows
if self.total_cols > LARGE_COLS:
self.cols_loaded = self.COLS_TO_LOAD
else:
self.cols_loaded = self.total_cols
def get_format(self):
"""Return current format"""
# Avoid accessing the private attribute _format from outside
return self._format
def get_data(self):
"""Return data"""
return self._data
def set_format(self, format):
"""Change display format"""
self._format = format
self.reset()
def columnCount(self, qindex=QModelIndex()):
"""Array column number"""
if self.total_cols <= self.cols_loaded:
return self.total_cols
else:
return self.cols_loaded
def rowCount(self, qindex=QModelIndex()):
"""Array row number"""
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded
def can_fetch_more(self, rows=False, columns=False):
if rows:
if self.total_rows > self.rows_loaded:
return True
else:
return False
if columns:
if self.total_cols > self.cols_loaded:
return True
else:
return False
def fetch_more(self, rows=False, columns=False):
if self.can_fetch_more(rows=rows):
reminder = self.total_rows - self.rows_loaded
items_to_fetch = min(reminder, self.ROWS_TO_LOAD)
self.beginInsertRows(QModelIndex(), self.rows_loaded,
self.rows_loaded + items_to_fetch - 1)
self.rows_loaded += items_to_fetch
self.endInsertRows()
if self.can_fetch_more(columns=columns):
reminder = self.total_cols - self.cols_loaded
items_to_fetch = min(reminder, self.COLS_TO_LOAD)
self.beginInsertColumns(QModelIndex(), self.cols_loaded,
self.cols_loaded + items_to_fetch - 1)
self.cols_loaded += items_to_fetch
self.endInsertColumns()
def bgcolor(self, state):
"""Toggle backgroundcolor"""
self.bgcolor_enabled = state > 0
self.reset()
def get_value(self, index):
i = index.row()
j = index.column()
if len(self._data.shape) == 1:
value = self._data[j]
else:
value = self._data[i, j]
return self.changes.get((i, j), value)
def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return to_qvariant()
value = self.get_value(index)
if is_binary_string(value):
try:
value = to_text_string(value, 'utf8')
except:
pass
if role == Qt.DisplayRole:
if value is np.ma.masked:
return ''
else:
try:
return to_qvariant(self._format % value)
except TypeError:
self.readonly = True
return repr(value)
elif role == Qt.TextAlignmentRole:
return to_qvariant(int(Qt.AlignCenter|Qt.AlignVCenter))
elif role == Qt.BackgroundColorRole and self.bgcolor_enabled \
and value is not np.ma.masked:
try:
hue = (self.hue0 +
self.dhue * (float(self.vmax) - self.color_func(value))
/ (float(self.vmax) - self.vmin))
hue = float(np.abs(hue))
color = QColor.fromHsvF(hue, self.sat, self.val, self.alp)
return to_qvariant(color)
except TypeError:
return to_qvariant()
elif role == Qt.FontRole:
return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
return to_qvariant()
def setData(self, index, value, role=Qt.EditRole):
"""Cell content change"""
if not index.isValid() or self.readonly:
return False
i = index.row()
j = index.column()
value = from_qvariant(value, str)
dtype = self._data.dtype.name
if dtype == "bool":
try:
val = bool(float(value))
except ValueError:
val = value.lower() == "true"
elif dtype.startswith("string") or dtype.startswith("bytes"):
val = to_binary_string(value, 'utf8')
elif dtype.startswith("unicode") or dtype.startswith("str"):
val = to_text_string(value)
else:
if value.lower().startswith('e') or value.lower().endswith('e'):
return False
try:
val = complex(value)
if not val.imag:
val = val.real
except ValueError as e:
QMessageBox.critical(self.dialog, "Error",
"Value error: %s" % str(e))
return False
try:
self.test_array[0] = val # will raise an Exception eventually
except OverflowError as e:
print("OverflowError: " + str(e)) # spyder: test-skip
QMessageBox.critical(self.dialog, "Error",
"Overflow error: %s" % str(e))
return False
# Add change to self.changes
self.changes[(i, j)] = val
self.dataChanged.emit(index, index)
if not is_string(val):
if val > self.vmax:
self.vmax = val
if val < self.vmin:
self.vmin = val
return True
def flags(self, index):
"""Set editable flag"""
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Set header data"""
if role != Qt.DisplayRole:
return to_qvariant()
labels = self.xlabels if orientation == Qt.Horizontal else self.ylabels
if labels is None:
return to_qvariant(int(section))
else:
return to_qvariant(labels[section])
def reset(self):
self.beginResetModel()
self.endResetModel()
class ArrayDelegate(QItemDelegate):
"""Array Editor Item Delegate"""
def __init__(self, dtype, parent=None):
QItemDelegate.__init__(self, parent)
self.dtype = dtype
def createEditor(self, parent, option, index):
"""Create editor widget"""
model = index.model()
value = model.get_value(index)
if model._data.dtype.name == "bool":
value = not value
model.setData(index, to_qvariant(value))
return
elif value is not np.ma.masked:
editor = QLineEdit(parent)
editor.setFont(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
editor.setAlignment(Qt.AlignCenter)
if is_number(self.dtype):
validator = QDoubleValidator(editor)
validator.setLocale(QLocale('C'))
editor.setValidator(validator)
editor.returnPressed.connect(self.commitAndCloseEditor)
return editor
def commitAndCloseEditor(self):
"""Commit and close editor"""
editor = self.sender()
# Avoid a segfault with PyQt5. Variable value won't be changed
# but at least Spyder won't crash. It seems generated by a bug in sip.
try:
self.commitData.emit(editor)
except AttributeError:
pass
self.closeEditor.emit(editor, QAbstractItemDelegate.NoHint)
def setEditorData(self, editor, index):
"""Set editor widget's data"""
text = from_qvariant(index.model().data(index, Qt.DisplayRole), str)
editor.setText(text)
#TODO: Implement "Paste" (from clipboard) feature
class ArrayView(QTableView):
"""Array view class"""
def __init__(self, parent, model, dtype, shape):
QTableView.__init__(self, parent)
self.setModel(model)
self.setItemDelegate(ArrayDelegate(dtype, self))
total_width = 0
for k in range(shape[1]):
total_width += self.columnWidth(k)
self.viewport().resize(min(total_width, 1024), self.height())
self.shape = shape
self.menu = self.setup_menu()
config_shortcut(self.copy, context='variable_explorer', name='copy',
parent=self)
self.horizontalScrollBar().valueChanged.connect(
lambda val: self.load_more_data(val, columns=True))
self.verticalScrollBar().valueChanged.connect(
lambda val: self.load_more_data(val, rows=True))
def load_more_data(self, value, rows=False, columns=False):
try:
old_selection = self.selectionModel().selection()
old_rows_loaded = old_cols_loaded = None
if rows and value == self.verticalScrollBar().maximum():
old_rows_loaded = self.model().rows_loaded
self.model().fetch_more(rows=rows)
if columns and value == self.horizontalScrollBar().maximum():
old_cols_loaded = self.model().cols_loaded
self.model().fetch_more(columns=columns)
if old_rows_loaded is not None or old_cols_loaded is not None:
# if we've changed anything, update selection
new_selection = QItemSelection()
for part in old_selection:
top = part.top()
bottom = part.bottom()
if (old_rows_loaded is not None and
top == 0 and bottom == (old_rows_loaded-1)):
# complete column selected (so expand it to match
# updated range)
bottom = self.model().rows_loaded-1
left = part.left()
right = part.right()
if (old_cols_loaded is not None
and left == 0 and right == (old_cols_loaded-1)):
# compete row selected (so expand it to match updated
# range)
right = self.model().cols_loaded-1
top_left = self.model().index(top, left)
bottom_right = self.model().index(bottom, right)
part = QItemSelectionRange(top_left, bottom_right)
new_selection.append(part)
self.selectionModel().select
(new_selection, self.selectionModel().ClearAndSelect)
except NameError:
# Needed to handle a NameError while fetching data when closing
# See isue 7880
pass
def resize_to_contents(self):
"""Resize cells to contents"""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.resizeColumnsToContents()
self.model().fetch_more(columns=True)
self.resizeColumnsToContents()
QApplication.restoreOverrideCursor()
def setup_menu(self):
"""Setup context menu"""
self.copy_action = create_action(self, _('Copy'),
shortcut=keybinding('Copy'),
icon=ima.icon('editcopy'),
triggered=self.copy,
context=Qt.WidgetShortcut)
menu = QMenu(self)
add_actions(menu, [self.copy_action, ])
return menu
def contextMenuEvent(self, event):
"""Reimplement Qt method"""
self.menu.popup(event.globalPos())
event.accept()
def keyPressEvent(self, event):
"""Reimplement Qt method"""
if event == QKeySequence.Copy:
self.copy()
else:
QTableView.keyPressEvent(self, event)
def _sel_to_text(self, cell_range):
"""Copy an array portion to a unicode string"""
if not cell_range:
return
row_min, row_max, col_min, col_max = get_idx_rect(cell_range)
if col_min == 0 and col_max == (self.model().cols_loaded-1):
# we've selected a whole column. It isn't possible to
# select only the first part of a column without loading more,
# so we can treat it as intentional and copy the whole thing
col_max = self.model().total_cols-1
if row_min == 0 and row_max == (self.model().rows_loaded-1):
row_max = self.model().total_rows-1
_data = self.model().get_data()
if PY3:
output = io.BytesIO()
else:
output = io.StringIO()
try:
np.savetxt(output, _data[row_min:row_max+1, col_min:col_max+1],
delimiter='\t', fmt=self.model().get_format())
except:
QMessageBox.warning(self, _("Warning"),
_("It was not possible to copy values for "
"this array"))
return
contents = output.getvalue().decode('utf-8')
output.close()
return contents
@Slot()
def copy(self):
"""Copy text to clipboard"""
cliptxt = self._sel_to_text( self.selectedIndexes() )
clipboard = QApplication.clipboard()
clipboard.setText(cliptxt)
class ArrayEditorWidget(QWidget):
def __init__(self, parent, data, readonly=False,
xlabels=None, ylabels=None):
QWidget.__init__(self, parent)
self.data = data
self.old_data_shape = None
if len(self.data.shape) == 1:
self.old_data_shape = self.data.shape
self.data.shape = (self.data.shape[0], 1)
elif len(self.data.shape) == 0:
self.old_data_shape = self.data.shape
self.data.shape = (1, 1)
format = SUPPORTED_FORMATS.get(data.dtype.name, '%s')
self.model = ArrayModel(self.data, format=format, xlabels=xlabels,
ylabels=ylabels, readonly=readonly, parent=self)
self.view = ArrayView(self, self.model, data.dtype, data.shape)
btn_layout = QHBoxLayout()
btn_layout.setAlignment(Qt.AlignLeft)
btn = QPushButton(_( "Format"))
# disable format button for int type
btn.setEnabled(is_float(data.dtype))
btn_layout.addWidget(btn)
btn.clicked.connect(self.change_format)
btn = QPushButton(_( "Resize"))
btn_layout.addWidget(btn)
btn.clicked.connect(self.view.resize_to_contents)
bgcolor = QCheckBox(_( 'Background color'))
bgcolor.setChecked(self.model.bgcolor_enabled)
bgcolor.setEnabled(self.model.bgcolor_enabled)
bgcolor.stateChanged.connect(self.model.bgcolor)
btn_layout.addWidget(bgcolor)
layout = QVBoxLayout()
layout.addWidget(self.view)
layout.addLayout(btn_layout)
self.setLayout(layout)
def accept_changes(self):
"""Accept changes"""
for (i, j), value in list(self.model.changes.items()):
self.data[i, j] = value
if self.old_data_shape is not None:
self.data.shape = self.old_data_shape
def reject_changes(self):
"""Reject changes"""
if self.old_data_shape is not None:
self.data.shape = self.old_data_shape
def change_format(self):
"""Change display format"""
format, valid = QInputDialog.getText(self, _( 'Format'),
_( "Float formatting"),
QLineEdit.Normal, self.model.get_format())
if valid:
format = str(format)
try:
format % 1.1
except:
QMessageBox.critical(self, _("Error"),
_("Format (%s) is incorrect") % format)
return
self.model.set_format(format)
class ArrayEditor(QDialog):
"""Array Editor Dialog"""
def __init__(self, parent=None):
QDialog.__init__(self, parent)
# Destroying the C++ object right after closing the dialog box,
# otherwise it may be garbage-collected in another QThread
# (e.g. the editor's analysis thread in Spyder), thus leading to
# a segmentation fault on UNIX or an application crash on Windows
self.setAttribute(Qt.WA_DeleteOnClose)
self.data = None
self.arraywidget = None
self.stack = None
self.layout = None
self.btn_save_and_close = None
self.btn_close = None
# Values for 3d array editor
self.dim_indexes = [{}, {}, {}]
self.last_dim = 0 # Adjust this for changing the startup dimension
def setup_and_check(self, data, title='', readonly=False,
xlabels=None, ylabels=None):
"""
Setup ArrayEditor:
return False if data is not supported, True otherwise
"""
self.data = data
readonly = readonly or not self.data.flags.writeable
is_record_array = data.dtype.names is not None
is_masked_array = isinstance(data, np.ma.MaskedArray)
if data.ndim > 3:
self.error(_("Arrays with more than 3 dimensions are not "
"supported"))
return False
if xlabels is not None and len(xlabels) != self.data.shape[1]:
self.error(_("The 'xlabels' argument length do no match array "
"column number"))
return False
if ylabels is not None and len(ylabels) != self.data.shape[0]:
self.error(_("The 'ylabels' argument length do no match array row "
"number"))
return False
if not is_record_array:
dtn = data.dtype.name
if dtn not in SUPPORTED_FORMATS and not dtn.startswith('str') \
and not dtn.startswith('unicode'):
arr = _("%s arrays") % data.dtype.name
self.error(_("%s are currently not supported") % arr)
return False
self.layout = QGridLayout()
self.setLayout(self.layout)
self.setWindowIcon(ima.icon('arredit'))
if title:
title = to_text_string(title) + " - " + _("NumPy array")
else:
title = _("Array editor")
if readonly:
title += ' (' + _('read only') + ')'
self.setWindowTitle(title)
self.resize(600, 500)
# Stack widget
self.stack = QStackedWidget(self)
if is_record_array:
for name in data.dtype.names:
self.stack.addWidget(ArrayEditorWidget(self, data[name],
readonly, xlabels, ylabels))
elif is_masked_array:
self.stack.addWidget(ArrayEditorWidget(self, data, readonly,
xlabels, ylabels))
self.stack.addWidget(ArrayEditorWidget(self, data.data, readonly,
xlabels, ylabels))
self.stack.addWidget(ArrayEditorWidget(self, data.mask, readonly,
xlabels, ylabels))
elif data.ndim == 3:
pass
else:
self.stack.addWidget(ArrayEditorWidget(self, data, readonly,
xlabels, ylabels))
self.arraywidget = self.stack.currentWidget()
if self.arraywidget:
self.arraywidget.model.dataChanged.connect(
self.save_and_close_enable)
self.stack.currentChanged.connect(self.current_widget_changed)
self.layout.addWidget(self.stack, 1, 0)
# Buttons configuration
btn_layout = QHBoxLayout()
if is_record_array or is_masked_array or data.ndim == 3:
if is_record_array:
btn_layout.addWidget(QLabel(_("Record array fields:")))
names = []
for name in data.dtype.names:
field = data.dtype.fields[name]
text = name
if len(field) >= 3:
title = field[2]
if not is_text_string(title):
title = repr(title)
text += ' - '+title
names.append(text)
else:
names = [_('Masked data'), _('Data'), _('Mask')]
if data.ndim == 3:
# QSpinBox
self.index_spin = QSpinBox(self, keyboardTracking=False)
self.index_spin.valueChanged.connect(self.change_active_widget)
# QComboBox
names = [str(i) for i in range(3)]
ra_combo = QComboBox(self)
ra_combo.addItems(names)
ra_combo.currentIndexChanged.connect(self.current_dim_changed)
# Adding the widgets to layout
label = QLabel(_("Axis:"))
btn_layout.addWidget(label)
btn_layout.addWidget(ra_combo)
self.shape_label = QLabel()
btn_layout.addWidget(self.shape_label)
label = QLabel(_("Index:"))
btn_layout.addWidget(label)
btn_layout.addWidget(self.index_spin)
self.slicing_label = QLabel()
btn_layout.addWidget(self.slicing_label)
# set the widget to display when launched
self.current_dim_changed(self.last_dim)
else:
ra_combo = QComboBox(self)
ra_combo.currentIndexChanged.connect(self.stack.setCurrentIndex)
ra_combo.addItems(names)
btn_layout.addWidget(ra_combo)
if is_masked_array:
label = QLabel(_("<u>Warning</u>: changes are applied separately"))
label.setToolTip(_("For performance reasons, changes applied "\
"to masked array won't be reflected in "\
"array's data (and vice-versa)."))
btn_layout.addWidget(label)
btn_layout.addStretch()
if not readonly:
self.btn_save_and_close = QPushButton(_('Save and Close'))
self.btn_save_and_close.setDisabled(True)
self.btn_save_and_close.clicked.connect(self.accept)
btn_layout.addWidget(self.btn_save_and_close)
self.btn_close = QPushButton(_('Close'))
self.btn_close.setAutoDefault(True)
self.btn_close.setDefault(True)
self.btn_close.clicked.connect(self.reject)
btn_layout.addWidget(self.btn_close)
self.layout.addLayout(btn_layout, 2, 0)
self.setMinimumSize(400, 300)
# Make the dialog act as a window
self.setWindowFlags(Qt.Window)
return True
@Slot(QModelIndex, QModelIndex)
def save_and_close_enable(self, left_top, bottom_right):
"""Handle the data change event to enable the save and close button."""
if self.btn_save_and_close:
self.btn_save_and_close.setEnabled(True)
self.btn_save_and_close.setAutoDefault(True)
self.btn_save_and_close.setDefault(True)
def current_widget_changed(self, index):
self.arraywidget = self.stack.widget(index)
self.arraywidget.model.dataChanged.connect(self.save_and_close_enable)
def change_active_widget(self, index):
"""
This is implemented for handling negative values in index for
3d arrays, to give the same behavior as slicing
"""
string_index = [':']*3
string_index[self.last_dim] = '<font color=red>%i</font>'
self.slicing_label.setText((r"Slicing: [" + ", ".join(string_index) +
"]") % index)
if index < 0:
data_index = self.data.shape[self.last_dim] + index
else:
data_index = index
slice_index = [slice(None)]*3
slice_index[self.last_dim] = data_index
stack_index = self.dim_indexes[self.last_dim].get(data_index)
if stack_index is None:
stack_index = self.stack.count()
try:
self.stack.addWidget(ArrayEditorWidget(
self, self.data[tuple(slice_index)]))
except IndexError: # Handle arrays of size 0 in one axis
self.stack.addWidget(ArrayEditorWidget(self, self.data))
self.dim_indexes[self.last_dim][data_index] = stack_index
self.stack.update()
self.stack.setCurrentIndex(stack_index)
def current_dim_changed(self, index):
"""
This change the active axis the array editor is plotting over
in 3D
"""
self.last_dim = index
string_size = ['%i']*3
string_size[index] = '<font color=red>%i</font>'
self.shape_label.setText(('Shape: (' + ', '.join(string_size) +
') ') % self.data.shape)
if self.index_spin.value() != 0:
self.index_spin.setValue(0)
else:
# this is done since if the value is currently 0 it does not emit
# currentIndexChanged(int)
self.change_active_widget(0)
self.index_spin.setRange(-self.data.shape[index],
self.data.shape[index]-1)
@Slot()
def accept(self):
"""Reimplement Qt method"""
for index in range(self.stack.count()):
self.stack.widget(index).accept_changes()
QDialog.accept(self)
def get_value(self):
"""Return modified array -- this is *not* a copy"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
return self.data
def error(self, message):
"""An error occured, closing the dialog box"""
QMessageBox.critical(self, _("Array editor"), message)
self.setAttribute(Qt.WA_DeleteOnClose)
self.reject()
@Slot()
def reject(self):
"""Reimplement Qt method"""
if self.arraywidget is not None:
for index in range(self.stack.count()):
self.stack.widget(index).reject_changes()
QDialog.reject(self)
|
[
"qtpy.QtWidgets.QComboBox",
"spyder.utils.qthelpers.add_actions",
"qtpy.QtWidgets.QGridLayout",
"spyder.py3compat.to_text_string",
"spyder.utils.qthelpers.keybinding",
"numpy.array",
"spyder.py3compat.to_binary_string",
"qtpy.QtWidgets.QDialog.accept",
"spyder.py3compat.is_binary_string",
"spyder.py3compat.is_string",
"spyder.config.base._",
"qtpy.QtWidgets.QTableView.keyPressEvent",
"qtpy.QtWidgets.QSpinBox",
"qtpy.compat.to_qvariant",
"qtpy.QtWidgets.QVBoxLayout",
"qtpy.QtCore.QAbstractTableModel.__init__",
"qtpy.QtCore.Slot",
"spyder.py3compat.io.BytesIO",
"spyder.config.gui.get_font",
"qtpy.QtWidgets.QWidget.__init__",
"numpy.abs",
"qtpy.QtWidgets.QMenu",
"spyder.config.gui.config_shortcut",
"qtpy.QtWidgets.QTableView.__init__",
"qtpy.QtCore.QAbstractTableModel.flags",
"qtpy.QtCore.QItemSelection",
"qtpy.compat.from_qvariant",
"qtpy.QtWidgets.QApplication.clipboard",
"qtpy.QtCore.QLocale",
"qtpy.QtWidgets.QStackedWidget",
"spyder.utils.icon_manager.icon",
"qtpy.QtWidgets.QHBoxLayout",
"qtpy.QtCore.QModelIndex",
"spyder.py3compat.io.StringIO",
"spyder.py3compat.is_text_string",
"qtpy.QtGui.QColor.fromHsvF",
"qtpy.QtGui.QDoubleValidator",
"qtpy.QtWidgets.QItemDelegate.__init__",
"qtpy.QtWidgets.QLabel",
"qtpy.QtGui.QCursor",
"qtpy.QtWidgets.QDialog.reject",
"qtpy.QtWidgets.QApplication.restoreOverrideCursor",
"qtpy.QtWidgets.QLineEdit",
"qtpy.QtWidgets.QDialog.__init__",
"qtpy.QtCore.QItemSelectionRange"
] |
[((20131, 20137), 'qtpy.QtCore.Slot', 'Slot', ([], {}), '()\n', (20135, 20137), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((30224, 30254), 'qtpy.QtCore.Slot', 'Slot', (['QModelIndex', 'QModelIndex'], {}), '(QModelIndex, QModelIndex)\n', (30228, 30254), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((32813, 32819), 'qtpy.QtCore.Slot', 'Slot', ([], {}), '()\n', (32817, 32819), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((33513, 33519), 'qtpy.QtCore.Slot', 'Slot', ([], {}), '()\n', (33517, 33519), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((4530, 4564), 'qtpy.QtCore.QAbstractTableModel.__init__', 'QAbstractTableModel.__init__', (['self'], {}), '(self)\n', (4558, 4564), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((4749, 4780), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'data.dtype'}), '([0], dtype=data.dtype)\n', (4757, 4780), True, 'import numpy as np\n'), ((6988, 7001), 'qtpy.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (6999, 7001), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((7209, 7222), 'qtpy.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (7220, 7222), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((9196, 9219), 'spyder.py3compat.is_binary_string', 'is_binary_string', (['value'], {}), '(value)\n', (9212, 9219), False, 'from spyder.py3compat import io, is_binary_string, is_string, is_text_string, PY3, to_binary_string, to_text_string\n'), ((10438, 10451), 'qtpy.compat.to_qvariant', 'to_qvariant', ([], {}), '()\n', (10449, 10451), False, 'from qtpy.compat import from_qvariant, to_qvariant\n'), ((10691, 10716), 'qtpy.compat.from_qvariant', 'from_qvariant', (['value', 'str'], {}), '(value, str)\n', (10704, 10716), False, 'from qtpy.compat import from_qvariant, to_qvariant\n'), ((13108, 13144), 'qtpy.QtWidgets.QItemDelegate.__init__', 'QItemDelegate.__init__', (['self', 'parent'], {}), '(self, parent)\n', (13130, 13144), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((14786, 14819), 'qtpy.QtWidgets.QTableView.__init__', 'QTableView.__init__', (['self', 'parent'], {}), '(self, parent)\n', (14805, 14819), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((15165, 15251), 'spyder.config.gui.config_shortcut', 'config_shortcut', (['self.copy'], {'context': '"""variable_explorer"""', 'name': '"""copy"""', 'parent': 'self'}), "(self.copy, context='variable_explorer', name='copy', parent\n =self)\n", (15180, 15251), False, 'from spyder.config.gui import get_font, config_shortcut\n'), ((17938, 17974), 'qtpy.QtWidgets.QApplication.restoreOverrideCursor', 'QApplication.restoreOverrideCursor', ([], {}), '()\n', (17972, 17974), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((18385, 18396), 'qtpy.QtWidgets.QMenu', 'QMenu', (['self'], {}), '(self)\n', (18390, 18396), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((18406, 18443), 'spyder.utils.qthelpers.add_actions', 'add_actions', (['menu', '[self.copy_action]'], {}), '(menu, [self.copy_action])\n', (18417, 18443), False, 'from spyder.utils.qthelpers import add_actions, create_action, keybinding\n'), ((20281, 20305), 'qtpy.QtWidgets.QApplication.clipboard', 'QApplication.clipboard', ([], {}), '()\n', (20303, 20305), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((20493, 20523), 'qtpy.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (20509, 20523), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((21181, 21194), 'qtpy.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (21192, 21194), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((21876, 21889), 'qtpy.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (21887, 21889), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((23120, 23150), 'qtpy.QtWidgets.QDialog.__init__', 'QDialog.__init__', (['self', 'parent'], {}), '(self, parent)\n', (23136, 23150), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((25257, 25270), 'qtpy.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (25268, 25270), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((25687, 25707), 'qtpy.QtWidgets.QStackedWidget', 'QStackedWidget', (['self'], {}), '(self)\n', (25701, 25707), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((27020, 27033), 'qtpy.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (27031, 27033), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((32993, 33013), 'qtpy.QtWidgets.QDialog.accept', 'QDialog.accept', (['self'], {}), '(self)\n', (33007, 33013), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((33743, 33763), 'qtpy.QtWidgets.QDialog.reject', 'QDialog.reject', (['self'], {}), '(self)\n', (33757, 33763), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((9131, 9144), 'qtpy.compat.to_qvariant', 'to_qvariant', ([], {}), '()\n', (9142, 9144), False, 'from qtpy.compat import from_qvariant, to_qvariant\n'), ((12077, 12091), 'spyder.py3compat.is_string', 'is_string', (['val'], {}), '(val)\n', (12086, 12091), False, 'from spyder.py3compat import io, is_binary_string, is_string, is_text_string, PY3, to_binary_string, to_text_string\n'), ((12656, 12669), 'qtpy.compat.to_qvariant', 'to_qvariant', ([], {}), '()\n', (12667, 12669), False, 'from qtpy.compat import from_qvariant, to_qvariant\n'), ((12860, 12888), 'qtpy.compat.to_qvariant', 'to_qvariant', (['labels[section]'], {}), '(labels[section])\n', (12871, 12888), False, 'from qtpy.compat import from_qvariant, to_qvariant\n'), ((17778, 17800), 'qtpy.QtGui.QCursor', 'QCursor', (['Qt.WaitCursor'], {}), '(Qt.WaitCursor)\n', (17785, 17800), False, 'from qtpy.QtGui import QColor, QCursor, QDoubleValidator, QKeySequence\n'), ((18086, 18095), 'spyder.config.base._', '_', (['"""Copy"""'], {}), "('Copy')\n", (18087, 18095), False, 'from spyder.config.base import _\n'), ((18783, 18820), 'qtpy.QtWidgets.QTableView.keyPressEvent', 'QTableView.keyPressEvent', (['self', 'event'], {}), '(self, event)\n', (18807, 18820), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((19577, 19589), 'spyder.py3compat.io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (19587, 19589), False, 'from spyder.py3compat import io, is_binary_string, is_string, is_text_string, PY3, to_binary_string, to_text_string\n'), ((19627, 19640), 'spyder.py3compat.io.StringIO', 'io.StringIO', ([], {}), '()\n', (19638, 19640), False, 'from spyder.py3compat import io, is_binary_string, is_string, is_text_string, PY3, to_binary_string, to_text_string\n'), ((21269, 21280), 'spyder.config.base._', '_', (['"""Format"""'], {}), "('Format')\n", (21270, 21280), False, 'from spyder.config.base import _\n'), ((21486, 21497), 'spyder.config.base._', '_', (['"""Resize"""'], {}), "('Resize')\n", (21487, 21497), False, 'from spyder.config.base import _\n'), ((21623, 21644), 'spyder.config.base._', '_', (['"""Background color"""'], {}), "('Background color')\n", (21624, 21644), False, 'from spyder.config.base import _\n'), ((22537, 22548), 'spyder.config.base._', '_', (['"""Format"""'], {}), "('Format')\n", (22538, 22548), False, 'from spyder.config.base import _\n'), ((22585, 22606), 'spyder.config.base._', '_', (['"""Float formatting"""'], {}), "('Float formatting')\n", (22586, 22606), False, 'from spyder.config.base import _\n'), ((25336, 25355), 'spyder.utils.icon_manager.icon', 'ima.icon', (['"""arredit"""'], {}), "('arredit')\n", (25344, 25355), True, 'from spyder.utils import icon_manager as ima\n'), ((25482, 25499), 'spyder.config.base._', '_', (['"""Array editor"""'], {}), "('Array editor')\n", (25483, 25499), False, 'from spyder.config.base import _\n'), ((29821, 29831), 'spyder.config.base._', '_', (['"""Close"""'], {}), "('Close')\n", (29822, 29831), False, 'from spyder.config.base import _\n'), ((33406, 33423), 'spyder.config.base._', '_', (['"""Array editor"""'], {}), "('Array editor')\n", (33407, 33423), False, 'from spyder.config.base import _\n'), ((8009, 8022), 'qtpy.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (8020, 8022), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((8406, 8419), 'qtpy.QtCore.QModelIndex', 'QModelIndex', ([], {}), '()\n', (8417, 8419), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((9264, 9293), 'spyder.py3compat.to_text_string', 'to_text_string', (['value', '"""utf8"""'], {}), "(value, 'utf8')\n", (9278, 9293), False, 'from spyder.py3compat import io, is_binary_string, is_string, is_text_string, PY3, to_binary_string, to_text_string\n'), ((11014, 11045), 'spyder.py3compat.to_binary_string', 'to_binary_string', (['value', '"""utf8"""'], {}), "(value, 'utf8')\n", (11030, 11045), False, 'from spyder.py3compat import io, is_binary_string, is_string, is_text_string, PY3, to_binary_string, to_text_string\n'), ((12409, 12447), 'qtpy.QtCore.QAbstractTableModel.flags', 'QAbstractTableModel.flags', (['self', 'index'], {}), '(self, index)\n', (12434, 12447), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((13445, 13463), 'qtpy.compat.to_qvariant', 'to_qvariant', (['value'], {}), '(value)\n', (13456, 13463), False, 'from qtpy.compat import from_qvariant, to_qvariant\n'), ((13548, 13565), 'qtpy.QtWidgets.QLineEdit', 'QLineEdit', (['parent'], {}), '(parent)\n', (13557, 13565), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((16301, 16317), 'qtpy.QtCore.QItemSelection', 'QItemSelection', ([], {}), '()\n', (16315, 16317), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((18148, 18166), 'spyder.utils.qthelpers.keybinding', 'keybinding', (['"""Copy"""'], {}), "('Copy')\n", (18158, 18166), False, 'from spyder.utils.qthelpers import add_actions, create_action, keybinding\n'), ((18215, 18235), 'spyder.utils.icon_manager.icon', 'ima.icon', (['"""editcopy"""'], {}), "('editcopy')\n", (18223, 18235), True, 'from spyder.utils import icon_manager as ima\n'), ((24330, 24387), 'spyder.config.base._', '_', (['"""Arrays with more than 3 dimensions are not supported"""'], {}), "('Arrays with more than 3 dimensions are not supported')\n", (24331, 24387), False, 'from spyder.config.base import _\n'), ((24540, 24606), 'spyder.config.base._', '_', (['"""The \'xlabels\' argument length do no match array column number"""'], {}), '("The \'xlabels\' argument length do no match array column number")\n', (24541, 24606), False, 'from spyder.config.base import _\n'), ((24759, 24822), 'spyder.config.base._', '_', (['"""The \'ylabels\' argument length do no match array row number"""'], {}), '("The \'ylabels\' argument length do no match array row number")\n', (24760, 24822), False, 'from spyder.config.base import _\n'), ((25429, 25445), 'spyder.config.base._', '_', (['"""NumPy array"""'], {}), "('NumPy array')\n", (25430, 25445), False, 'from spyder.config.base import _\n'), ((27819, 27857), 'qtpy.QtWidgets.QSpinBox', 'QSpinBox', (['self'], {'keyboardTracking': '(False)'}), '(self, keyboardTracking=False)\n', (27827, 27857), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((28048, 28063), 'qtpy.QtWidgets.QComboBox', 'QComboBox', (['self'], {}), '(self)\n', (28057, 28063), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((28407, 28415), 'qtpy.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (28413, 28415), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((28655, 28663), 'qtpy.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (28661, 28663), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((28885, 28900), 'qtpy.QtWidgets.QComboBox', 'QComboBox', (['self'], {}), '(self)\n', (28894, 28900), False, 'from qtpy.QtWidgets import QAbstractItemDelegate, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QInputDialog, QItemDelegate, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSpinBox, QStackedWidget, QTableView, QVBoxLayout, QWidget\n'), ((29580, 29599), 'spyder.config.base._', '_', (['"""Save and Close"""'], {}), "('Save and Close')\n", (29581, 29599), False, 'from spyder.config.base import _\n'), ((9508, 9541), 'qtpy.compat.to_qvariant', 'to_qvariant', (['(self._format % value)'], {}), '(self._format % value)\n', (9519, 9541), False, 'from qtpy.compat import from_qvariant, to_qvariant\n'), ((11135, 11156), 'spyder.py3compat.to_text_string', 'to_text_string', (['value'], {}), '(value)\n', (11149, 11156), False, 'from spyder.py3compat import io, is_binary_string, is_string, is_text_string, PY3, to_binary_string, to_text_string\n'), ((13594, 13639), 'spyder.config.gui.get_font', 'get_font', ([], {'font_size_delta': 'DEFAULT_SMALL_DELTA'}), '(font_size_delta=DEFAULT_SMALL_DELTA)\n', (13602, 13639), False, 'from spyder.config.gui import get_font, config_shortcut\n'), ((13758, 13782), 'qtpy.QtGui.QDoubleValidator', 'QDoubleValidator', (['editor'], {}), '(editor)\n', (13774, 13782), False, 'from qtpy.QtGui import QColor, QCursor, QDoubleValidator, QKeySequence\n'), ((17299, 17342), 'qtpy.QtCore.QItemSelectionRange', 'QItemSelectionRange', (['top_left', 'bottom_right'], {}), '(top_left, bottom_right)\n', (17318, 17342), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((19859, 19871), 'spyder.config.base._', '_', (['"""Warning"""'], {}), "('Warning')\n", (19860, 19871), False, 'from spyder.config.base import _\n'), ((19906, 19960), 'spyder.config.base._', '_', (['"""It was not possible to copy values for this array"""'], {}), "('It was not possible to copy values for this array')\n", (19907, 19960), False, 'from spyder.config.base import _\n'), ((25098, 25112), 'spyder.config.base._', '_', (['"""%s arrays"""'], {}), "('%s arrays')\n", (25099, 25112), False, 'from spyder.config.base import _\n'), ((25397, 25418), 'spyder.py3compat.to_text_string', 'to_text_string', (['title'], {}), '(title)\n', (25411, 25418), False, 'from spyder.py3compat import io, is_binary_string, is_string, is_text_string, PY3, to_binary_string, to_text_string\n'), ((25551, 25565), 'spyder.config.base._', '_', (['"""read only"""'], {}), "('read only')\n", (25552, 25565), False, 'from spyder.config.base import _\n'), ((27684, 27700), 'spyder.config.base._', '_', (['"""Masked data"""'], {}), "('Masked data')\n", (27685, 27700), False, 'from spyder.config.base import _\n'), ((27702, 27711), 'spyder.config.base._', '_', (['"""Data"""'], {}), "('Data')\n", (27703, 27711), False, 'from spyder.config.base import _\n'), ((27713, 27722), 'spyder.config.base._', '_', (['"""Mask"""'], {}), "('Mask')\n", (27714, 27722), False, 'from spyder.config.base import _\n'), ((28266, 28276), 'spyder.config.base._', '_', (['"""Axis:"""'], {}), "('Axis:')\n", (28267, 28276), False, 'from spyder.config.base import _\n'), ((28504, 28515), 'spyder.config.base._', '_', (['"""Index:"""'], {}), "('Index:')\n", (28505, 28515), False, 'from spyder.config.base import _\n'), ((29138, 29189), 'spyder.config.base._', '_', (['"""<u>Warning</u>: changes are applied separately"""'], {}), "('<u>Warning</u>: changes are applied separately')\n", (29139, 29189), False, 'from spyder.config.base import _\n'), ((29225, 29344), 'spyder.config.base._', '_', (['"""For performance reasons, changes applied to masked array won\'t be reflected in array\'s data (and vice-versa)."""'], {}), '("For performance reasons, changes applied to masked array won\'t be reflected in array\'s data (and vice-versa)."\n )\n', (29226, 29344), False, 'from spyder.config.base import _\n'), ((10145, 10195), 'qtpy.QtGui.QColor.fromHsvF', 'QColor.fromHsvF', (['hue', 'self.sat', 'self.val', 'self.alp'], {}), '(hue, self.sat, self.val, self.alp)\n', (10160, 10195), False, 'from qtpy.QtGui import QColor, QCursor, QDoubleValidator, QKeySequence\n'), ((10220, 10238), 'qtpy.compat.to_qvariant', 'to_qvariant', (['color'], {}), '(color)\n', (10231, 10238), False, 'from qtpy.compat import from_qvariant, to_qvariant\n'), ((13820, 13832), 'qtpy.QtCore.QLocale', 'QLocale', (['"""C"""'], {}), "('C')\n", (13827, 13832), False, 'from qtpy.QtCore import QAbstractTableModel, QItemSelection, QLocale, QItemSelectionRange, QModelIndex, Qt, Slot\n'), ((22852, 22862), 'spyder.config.base._', '_', (['"""Error"""'], {}), "('Error')\n", (22853, 22862), False, 'from spyder.config.base import _\n'), ((25159, 25194), 'spyder.config.base._', '_', (['"""%s are currently not supported"""'], {}), "('%s are currently not supported')\n", (25160, 25194), False, 'from spyder.config.base import _\n'), ((27178, 27203), 'spyder.config.base._', '_', (['"""Record array fields:"""'], {}), "('Record array fields:')\n", (27179, 27203), False, 'from spyder.config.base import _\n'), ((10107, 10118), 'numpy.abs', 'np.abs', (['hue'], {}), '(hue)\n', (10113, 10118), True, 'import numpy as np\n'), ((10294, 10307), 'qtpy.compat.to_qvariant', 'to_qvariant', ([], {}), '()\n', (10305, 10307), False, 'from qtpy.compat import from_qvariant, to_qvariant\n'), ((10375, 10420), 'spyder.config.gui.get_font', 'get_font', ([], {'font_size_delta': 'DEFAULT_SMALL_DELTA'}), '(font_size_delta=DEFAULT_SMALL_DELTA)\n', (10383, 10420), False, 'from spyder.config.gui import get_font, config_shortcut\n'), ((22902, 22931), 'spyder.config.base._', '_', (['"""Format (%s) is incorrect"""'], {}), "('Format (%s) is incorrect')\n", (22903, 22931), False, 'from spyder.config.base import _\n'), ((27482, 27503), 'spyder.py3compat.is_text_string', 'is_text_string', (['title'], {}), '(title)\n', (27496, 27503), False, 'from spyder.py3compat import io, is_binary_string, is_string, is_text_string, PY3, to_binary_string, to_text_string\n')]
|
""" Defines the PolygonPlot class.
"""
from __future__ import with_statement
# Major library imports
import numpy as np
# Enthought library imports.
from enable.api import LineStyle, black_color_trait, \
transparent_color_trait
from kiva.agg import points_in_polygon
from traits.api import Enum, Float, Tuple, Property, cached_property, \
on_trait_change
# Local imports.
from base_xy_plot import BaseXYPlot
class PolygonPlot(BaseXYPlot):
""" Plots a polygon in dataspace.
Assuming that the index and value mappers are linear mappers, and that
"index" corresponds to X-coordinates and "value" corresponds to
Y-coordinates, the points are arranged in a counter-clockwise fashion.
The polygon is closed automatically, so there is no need to reproduce
the first point as the last point.
Nonlinear mappers are possible, but the results may be unexpected. Only the
data-space points are mapped in a nonlinear fashion. Straight lines
connecting them in a linear screen-space become curved in a nonlinear
screen-space; however, the drawing still contains straight lines in
screen-space.
If you don't want the edge of the polygon to be drawn, set **edge_color**
to transparent; don't try to do this by setting **edge_width** to 0. In
some drawing systems, such as PostScript, a line width of 0 means to make
the line as small as possible while still putting ink on the page.
"""
# The color of the line on the edge of the polygon.
edge_color = black_color_trait
# The thickness of the edge of the polygon.
edge_width = Float(1.0)
# The line dash style for the edge of the polygon.
edge_style = LineStyle
# The color of the face of the polygon.
face_color = transparent_color_trait
# Override the hittest_type trait inherited from BaseXYPlot
hittest_type = Enum("poly", "point", "line")
# The RGBA tuple for rendering edges. It is always a tuple of length 4.
# It has the same RGB values as edge_color_, and its alpha value is the
# alpha value of self.edge_color multiplied by self.alpha.
effective_edge_color = Property(Tuple, depends_on=['edge_color', 'alpha'])
# The RGBA tuple for rendering the face. It is always a tuple of length 4.
# It has the same RGB values as face_color_, and its alpha value is the
# alpha value of self.face_color multiplied by self.alpha.
effective_face_color = Property(Tuple, depends_on=['face_color', 'alpha'])
#----------------------------------------------------------------------
# Private 'BaseXYPlot' interface
#----------------------------------------------------------------------
def _gather_points(self):
""" Collects the data points that are within the bounds of the plot and
caches them.
"""
if self._cache_valid:
return
index = self.index.get_data()
value = self.value.get_data()
if not self.index or not self.value:
return
if len(index) == 0 or len(value) == 0 or len(index) != len(value):
self._cached_data_pts = []
self._cache_valid = True
return
points = np.transpose(np.array((index,value)))
self._cached_data_pts = points
self._cache_valid = True
def _render(self, gc, points):
""" Renders an Nx2 array of screen-space points as a polygon.
"""
with gc:
gc.clip_to_rect(self.x, self.y, self.width, self.height)
gc.set_stroke_color(self.effective_edge_color)
gc.set_line_width(self.edge_width)
gc.set_line_dash(self.edge_style_)
gc.set_fill_color(self.effective_face_color)
gc.lines(points)
gc.close_path()
gc.draw_path()
def _render_icon(self, gc, x, y, width, height):
""" Renders a representation of this plot as an icon into the box
defined by the parameters.
Used by the legend.
"""
with gc:
gc.set_stroke_color(self.effective_edge_color)
gc.set_line_width(self.edge_width)
gc.set_fill_color(self.effective_face_color)
if hasattr(self, 'line_style_'):
gc.set_line_dash(self.line_style_)
gc.draw_rect((x,y,width,height))
return
def hittest(self, screen_pt, threshold=7.0, return_distance=False):
""" Performs point-in-polygon testing or point/line proximity testing.
If self.hittest_type is "line" or "point", then behaves like the
parent class BaseXYPlot.hittest().
If self.hittest_type is "poly", then returns True if the given
point is inside the polygon, and False otherwise.
"""
if self.hittest_type in ("line", "point"):
return BaseXYPlot.hittest(self, screen_pt, threshold, return_distance)
data_pt = self.map_data(screen_pt, all_values=True)
index = self.index.get_data()
value = self.value.get_data()
poly = np.vstack((index,value)).T
if points_in_polygon([data_pt], poly)[0] == 1:
return True
else:
return False
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
@on_trait_change('edge_color, edge_width, edge_style, face_color, alpha')
def _attributes_changed(self):
self.invalidate_draw()
self.request_redraw()
#------------------------------------------------------------------------
# Property getters
#------------------------------------------------------------------------
@cached_property
def _get_effective_edge_color(self):
if len(self.edge_color_) == 4:
edge_alpha = self.edge_color_[-1]
else:
edge_alpha = 1.0
c = self.edge_color_[:3] + (edge_alpha * self.alpha,)
return c
@cached_property
def _get_effective_face_color(self):
if len(self.face_color_) == 4:
face_alpha = self.face_color_[-1]
else:
face_alpha = 1.0
c = self.face_color_[:3] + (face_alpha * self.alpha,)
return c
|
[
"traits.api.Enum",
"traits.api.on_trait_change",
"traits.api.Property",
"kiva.agg.points_in_polygon",
"base_xy_plot.BaseXYPlot.hittest",
"numpy.array",
"numpy.vstack",
"traits.api.Float"
] |
[((1659, 1669), 'traits.api.Float', 'Float', (['(1.0)'], {}), '(1.0)\n', (1664, 1669), False, 'from traits.api import Enum, Float, Tuple, Property, cached_property, on_trait_change\n'), ((1923, 1952), 'traits.api.Enum', 'Enum', (['"""poly"""', '"""point"""', '"""line"""'], {}), "('poly', 'point', 'line')\n", (1927, 1952), False, 'from traits.api import Enum, Float, Tuple, Property, cached_property, on_trait_change\n'), ((2202, 2253), 'traits.api.Property', 'Property', (['Tuple'], {'depends_on': "['edge_color', 'alpha']"}), "(Tuple, depends_on=['edge_color', 'alpha'])\n", (2210, 2253), False, 'from traits.api import Enum, Float, Tuple, Property, cached_property, on_trait_change\n'), ((2508, 2559), 'traits.api.Property', 'Property', (['Tuple'], {'depends_on': "['face_color', 'alpha']"}), "(Tuple, depends_on=['face_color', 'alpha'])\n", (2516, 2559), False, 'from traits.api import Enum, Float, Tuple, Property, cached_property, on_trait_change\n'), ((5451, 5523), 'traits.api.on_trait_change', 'on_trait_change', (['"""edge_color, edge_width, edge_style, face_color, alpha"""'], {}), "('edge_color, edge_width, edge_style, face_color, alpha')\n", (5466, 5523), False, 'from traits.api import Enum, Float, Tuple, Property, cached_property, on_trait_change\n'), ((3287, 3311), 'numpy.array', 'np.array', (['(index, value)'], {}), '((index, value))\n', (3295, 3311), True, 'import numpy as np\n'), ((4906, 4969), 'base_xy_plot.BaseXYPlot.hittest', 'BaseXYPlot.hittest', (['self', 'screen_pt', 'threshold', 'return_distance'], {}), '(self, screen_pt, threshold, return_distance)\n', (4924, 4969), False, 'from base_xy_plot import BaseXYPlot\n'), ((5122, 5147), 'numpy.vstack', 'np.vstack', (['(index, value)'], {}), '((index, value))\n', (5131, 5147), True, 'import numpy as np\n'), ((5160, 5194), 'kiva.agg.points_in_polygon', 'points_in_polygon', (['[data_pt]', 'poly'], {}), '([data_pt], poly)\n', (5177, 5194), False, 'from kiva.agg import points_in_polygon\n')]
|
import os
import os.path as osp
import numpy as np
from joblib import Parallel, delayed
from tensorflow.keras.utils import get_file
from tqdm import tqdm
from spektral.data import Dataset, Graph
from spektral.utils import label_to_one_hot, sparse
from spektral.utils.io import load_csv, load_sdf
ATOM_TYPES = [1, 6, 7, 8, 9]
BOND_TYPES = [1, 2, 3, 4]
class QM9(Dataset):
"""
The QM9 chemical data set of small molecules.
In this dataset, nodes represent atoms and edges represent chemical bonds.
There are 5 possible atom types (H, C, N, O, F) and 4 bond types (single,
double, triple, aromatic).
Node features represent the chemical properties of each atom and include:
- The atomic number, one-hot encoded;
- The atom's position in the X, Y, and Z dimensions;
- The atomic charge;
- The mass difference from the monoisotope;
The edge features represent the type of chemical bond between two atoms,
one-hot encoded.
Each graph has an 19-dimensional label for regression.
**Arguments**
- `amount`: int, load this many molecules instead of the full dataset
(useful for debugging).
- `n_jobs`: number of CPU cores to use for reading the data (-1, to use all
available cores).
"""
url = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb9.tar.gz"
def __init__(self, amount=None, n_jobs=1, **kwargs):
self.amount = amount
self.n_jobs = n_jobs
super().__init__(**kwargs)
def download(self):
get_file(
"qm9.tar.gz",
self.url,
extract=True,
cache_dir=self.path,
cache_subdir=self.path,
)
os.remove(osp.join(self.path, "qm9.tar.gz"))
def read(self):
print("Loading QM9 dataset.")
sdf_file = osp.join(self.path, "gdb9.sdf")
data = load_sdf(sdf_file, amount=self.amount) # Internal SDF format
def read_mol(mol):
x = np.array([atom_to_feature(atom) for atom in mol["atoms"]])
a, e = mol_to_adj(mol)
return x, a, e
data = Parallel(n_jobs=self.n_jobs)(
delayed(read_mol)(mol) for mol in tqdm(data, ncols=80)
)
x_list, a_list, e_list = list(zip(*data))
# Load labels
labels_file = osp.join(self.path, "gdb9.sdf.csv")
labels = load_csv(labels_file)
labels = labels.set_index("mol_id").values
if self.amount is not None:
labels = labels[: self.amount]
return [
Graph(x=x, a=a, e=e, y=y)
for x, a, e, y in zip(x_list, a_list, e_list, labels)
]
def atom_to_feature(atom):
atomic_num = label_to_one_hot(atom["atomic_num"], ATOM_TYPES)
coords = atom["coords"]
charge = atom["charge"]
iso = atom["iso"]
return np.concatenate((atomic_num, coords, [charge, iso]), -1)
def mol_to_adj(mol):
row, col, edge_features = [], [], []
for bond in mol["bonds"]:
start, end = bond["start_atom"], bond["end_atom"]
row += [start, end]
col += [end, start]
edge_features += [bond["type"]] * 2
a, e = sparse.edge_index_to_matrix(
edge_index=np.array((row, col)).T,
edge_weight=np.ones_like(row),
edge_features=label_to_one_hot(edge_features, BOND_TYPES),
)
return a, e
|
[
"spektral.utils.io.load_sdf",
"spektral.utils.label_to_one_hot",
"numpy.ones_like",
"spektral.utils.io.load_csv",
"tqdm.tqdm",
"os.path.join",
"joblib.Parallel",
"numpy.array",
"tensorflow.keras.utils.get_file",
"numpy.concatenate",
"joblib.delayed",
"spektral.data.Graph"
] |
[((2701, 2749), 'spektral.utils.label_to_one_hot', 'label_to_one_hot', (["atom['atomic_num']", 'ATOM_TYPES'], {}), "(atom['atomic_num'], ATOM_TYPES)\n", (2717, 2749), False, 'from spektral.utils import label_to_one_hot, sparse\n'), ((2840, 2895), 'numpy.concatenate', 'np.concatenate', (['(atomic_num, coords, [charge, iso])', '(-1)'], {}), '((atomic_num, coords, [charge, iso]), -1)\n', (2854, 2895), True, 'import numpy as np\n'), ((1532, 1627), 'tensorflow.keras.utils.get_file', 'get_file', (['"""qm9.tar.gz"""', 'self.url'], {'extract': '(True)', 'cache_dir': 'self.path', 'cache_subdir': 'self.path'}), "('qm9.tar.gz', self.url, extract=True, cache_dir=self.path,\n cache_subdir=self.path)\n", (1540, 1627), False, 'from tensorflow.keras.utils import get_file\n'), ((1826, 1857), 'os.path.join', 'osp.join', (['self.path', '"""gdb9.sdf"""'], {}), "(self.path, 'gdb9.sdf')\n", (1834, 1857), True, 'import os.path as osp\n'), ((1873, 1911), 'spektral.utils.io.load_sdf', 'load_sdf', (['sdf_file'], {'amount': 'self.amount'}), '(sdf_file, amount=self.amount)\n', (1881, 1911), False, 'from spektral.utils.io import load_csv, load_sdf\n'), ((2318, 2353), 'os.path.join', 'osp.join', (['self.path', '"""gdb9.sdf.csv"""'], {}), "(self.path, 'gdb9.sdf.csv')\n", (2326, 2353), True, 'import os.path as osp\n'), ((2371, 2392), 'spektral.utils.io.load_csv', 'load_csv', (['labels_file'], {}), '(labels_file)\n', (2379, 2392), False, 'from spektral.utils.io import load_csv, load_sdf\n'), ((1713, 1746), 'os.path.join', 'osp.join', (['self.path', '"""qm9.tar.gz"""'], {}), "(self.path, 'qm9.tar.gz')\n", (1721, 1746), True, 'import os.path as osp\n'), ((2116, 2144), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (2124, 2144), False, 'from joblib import Parallel, delayed\n'), ((2553, 2578), 'spektral.data.Graph', 'Graph', ([], {'x': 'x', 'a': 'a', 'e': 'e', 'y': 'y'}), '(x=x, a=a, e=e, y=y)\n', (2558, 2578), False, 'from spektral.data import Dataset, Graph\n'), ((3252, 3269), 'numpy.ones_like', 'np.ones_like', (['row'], {}), '(row)\n', (3264, 3269), True, 'import numpy as np\n'), ((3293, 3336), 'spektral.utils.label_to_one_hot', 'label_to_one_hot', (['edge_features', 'BOND_TYPES'], {}), '(edge_features, BOND_TYPES)\n', (3309, 3336), False, 'from spektral.utils import label_to_one_hot, sparse\n'), ((3208, 3228), 'numpy.array', 'np.array', (['(row, col)'], {}), '((row, col))\n', (3216, 3228), True, 'import numpy as np\n'), ((2158, 2175), 'joblib.delayed', 'delayed', (['read_mol'], {}), '(read_mol)\n', (2165, 2175), False, 'from joblib import Parallel, delayed\n'), ((2192, 2212), 'tqdm.tqdm', 'tqdm', (['data'], {'ncols': '(80)'}), '(data, ncols=80)\n', (2196, 2212), False, 'from tqdm import tqdm\n')]
|
import numpy as np
board = np.zeros(shape=(9, 9))
count = 0
def solve():
global count
count += 1
if count % 1000 == 0:
print('\rCurrent number of computations made:', count, end='')
freePos = find()
if freePos is None:
return True
i = freePos[0]
j = freePos[1]
for w in range(1, 10):
if possible(w, freePos):
board[i][j] = w
if solve():
return True
board[i][j] = 0
return False
def find():
for i in range(9):
for j in range(9):
if board[i][j] == 0:
return [i, j]
return None
def possible(value, position):
# position = (i, j) tuple
i = position[0]
j = position[1]
# checks row and column for repeat value
if (value in board[:, j]) or (value in board[i]):
return False
# reset to i,j - top left square
i = (i // 3) * 3
j = (j // 3) * 3
# check all squares in square
for n in range(i, i + 3):
for m in range(j, j + 3):
if board[n][m] == value:
return False
return True
def change(position):
# position = (i, j) tuple
i = position[0]
j = position[1]
for w in range(1, 10):
if w not in board[:, j] and w not in board[i]:
board[i][j] = w
return True
return False
def initialize():
print("Please enter the values on the board starting from left to right, top to bottom, 0 for blank")
integerChunk = input("Numbers: ")
pos = 0
for i in range(9):
for j in range(9):
board[i][j] = int(integerChunk[pos])
pos += 1
def displayBoard():
for i in range(3):
for j in range(9):
if board[i][j] == 0:
print(" ", end="")
else:
print("%d " % board[i][j], end="")
if (j == 2) or (j == 5):
print("| ", end="")
if j == 8:
print("")
print("- - - - - - - - - - -")
for i in range(3, 6):
for j in range(9):
if board[i][j] == 0:
print(" ", end="")
else:
print("%d " % board[i][j], end="")
if (j == 2) or (j == 5):
print("| ", end="")
if j == 8:
print("")
print("- - - - - - - - - - -")
for i in range(6, 9):
for j in range(9):
if board[i][j] == 0:
print(" ", end="")
else:
print("%d " % board[i][j], end="")
if (j == 2) or (j == 5):
print("| ", end="")
if j == 8:
print("")
def main():
initialize()
print("Is this the correct board? Press enter to continue or 'q' to exit program.")
displayBoard()
response = input()
if response == "q":
exit()
print("---------------SOLVING---------------\n")
solve()
print("\r\rSOLUTION")
displayBoard()
print("\nTotal number of computations:", count)
if __name__ == "__main__":
main()
|
[
"numpy.zeros"
] |
[((28, 50), 'numpy.zeros', 'np.zeros', ([], {'shape': '(9, 9)'}), '(shape=(9, 9))\n', (36, 50), True, 'import numpy as np\n')]
|
from multiprocessing import Pool
import EnvEq as ee
import numpy as np
import itertools as it
import os
#parsing input into numpy arrays
from input import *
y0=np.array([y0_Tpos,y0_Tpro,y0_Tneg,y0_o2,y0_test])
p=np.array([p_o2,p_test])
mu=np.array([[mu_o2Tpos,mu_o2Tpro,mu_o2Tneg],[mu_testTpos,mu_testTpro,0]])
lam=np.array([lam_o2,lam_test])
t_D=np.array([t_DTpos,t_DTpro,t_DTneg])
r=np.array([r_Tpos,r_Tpro,r_Tneg])
delta=np.array([delta_Tpos,delta_Tpro,delta_Tneg])
rho=np.array([rho_Tpos,rho_Tpro,rho_Tneg])
lim=np.array([[[l_lim_o2Tpos,u_lim_o2Tpos],[l_lim_o2Tpro,u_lim_o2Tpro],[l_lim_o2Tneg,u_lim_o2Tneg]],[[l_lim_testTpos,u_lim_testTpos],[l_lim_testTpro,u_lim_testTpro],[0,0]]],dtype=np.float64)
#make directories for saving raw_outputs
try:
os.makedirs("../../raw_output/EnvEq/"+f_name)
except:
pass
#iterator over these
o2_lim_arr=np.empty([0,2])
for ulim_Tpro in np.arange(0.1,1,0.2):
for ulim_Tneg in np.arange(0.1,1,0.2):
o2_lim_arr=np.append(o2_lim_arr,[[ulim_Tpro,ulim_Tneg]],axis=0)
def solve_parm(u_lim_o2): #calls the solve_eq function with all default inputs other than o2_lim
f_name_i=f_name+"{:.1f}".format(u_lim_o2[0])+"-"+"{:.1f}".format(u_lim_o2[1])
lim[0,1,1]=u_lim_o2[0]
lim[0,2,1]=u_lim_o2[1]
ee.solve_eq(t_max,dt,y0,p,mu,lam,r,K,delta,rho,lim,f_name_i)
if __name__ == '__main__':
pool = Pool(4)
pool.map(solve_parm,o2_lim_arr) #iterate over the o2_lims
pool.close()
pool.join()
|
[
"EnvEq.solve_eq",
"os.makedirs",
"numpy.append",
"numpy.array",
"numpy.empty",
"multiprocessing.Pool",
"numpy.arange"
] |
[((161, 214), 'numpy.array', 'np.array', (['[y0_Tpos, y0_Tpro, y0_Tneg, y0_o2, y0_test]'], {}), '([y0_Tpos, y0_Tpro, y0_Tneg, y0_o2, y0_test])\n', (169, 214), True, 'import numpy as np\n'), ((213, 237), 'numpy.array', 'np.array', (['[p_o2, p_test]'], {}), '([p_o2, p_test])\n', (221, 237), True, 'import numpy as np\n'), ((240, 316), 'numpy.array', 'np.array', (['[[mu_o2Tpos, mu_o2Tpro, mu_o2Tneg], [mu_testTpos, mu_testTpro, 0]]'], {}), '([[mu_o2Tpos, mu_o2Tpro, mu_o2Tneg], [mu_testTpos, mu_testTpro, 0]])\n', (248, 316), True, 'import numpy as np\n'), ((316, 344), 'numpy.array', 'np.array', (['[lam_o2, lam_test]'], {}), '([lam_o2, lam_test])\n', (324, 344), True, 'import numpy as np\n'), ((348, 385), 'numpy.array', 'np.array', (['[t_DTpos, t_DTpro, t_DTneg]'], {}), '([t_DTpos, t_DTpro, t_DTneg])\n', (356, 385), True, 'import numpy as np\n'), ((386, 420), 'numpy.array', 'np.array', (['[r_Tpos, r_Tpro, r_Tneg]'], {}), '([r_Tpos, r_Tpro, r_Tneg])\n', (394, 420), True, 'import numpy as np\n'), ((425, 471), 'numpy.array', 'np.array', (['[delta_Tpos, delta_Tpro, delta_Tneg]'], {}), '([delta_Tpos, delta_Tpro, delta_Tneg])\n', (433, 471), True, 'import numpy as np\n'), ((474, 514), 'numpy.array', 'np.array', (['[rho_Tpos, rho_Tpro, rho_Tneg]'], {}), '([rho_Tpos, rho_Tpro, rho_Tneg])\n', (482, 514), True, 'import numpy as np\n'), ((517, 725), 'numpy.array', 'np.array', (['[[[l_lim_o2Tpos, u_lim_o2Tpos], [l_lim_o2Tpro, u_lim_o2Tpro], [l_lim_o2Tneg,\n u_lim_o2Tneg]], [[l_lim_testTpos, u_lim_testTpos], [l_lim_testTpro,\n u_lim_testTpro], [0, 0]]]'], {'dtype': 'np.float64'}), '([[[l_lim_o2Tpos, u_lim_o2Tpos], [l_lim_o2Tpro, u_lim_o2Tpro], [\n l_lim_o2Tneg, u_lim_o2Tneg]], [[l_lim_testTpos, u_lim_testTpos], [\n l_lim_testTpro, u_lim_testTpro], [0, 0]]], dtype=np.float64)\n', (525, 725), True, 'import numpy as np\n'), ((852, 868), 'numpy.empty', 'np.empty', (['[0, 2]'], {}), '([0, 2])\n', (860, 868), True, 'import numpy as np\n'), ((885, 907), 'numpy.arange', 'np.arange', (['(0.1)', '(1)', '(0.2)'], {}), '(0.1, 1, 0.2)\n', (894, 907), True, 'import numpy as np\n'), ((755, 802), 'os.makedirs', 'os.makedirs', (["('../../raw_output/EnvEq/' + f_name)"], {}), "('../../raw_output/EnvEq/' + f_name)\n", (766, 802), False, 'import os\n'), ((928, 950), 'numpy.arange', 'np.arange', (['(0.1)', '(1)', '(0.2)'], {}), '(0.1, 1, 0.2)\n', (937, 950), True, 'import numpy as np\n'), ((1261, 1332), 'EnvEq.solve_eq', 'ee.solve_eq', (['t_max', 'dt', 'y0', 'p', 'mu', 'lam', 'r', 'K', 'delta', 'rho', 'lim', 'f_name_i'], {}), '(t_max, dt, y0, p, mu, lam, r, K, delta, rho, lim, f_name_i)\n', (1272, 1332), True, 'import EnvEq as ee\n'), ((1361, 1368), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (1365, 1368), False, 'from multiprocessing import Pool\n'), ((969, 1024), 'numpy.append', 'np.append', (['o2_lim_arr', '[[ulim_Tpro, ulim_Tneg]]'], {'axis': '(0)'}), '(o2_lim_arr, [[ulim_Tpro, ulim_Tneg]], axis=0)\n', (978, 1024), True, 'import numpy as np\n')]
|
import numpy as np
import xml.etree.ElementTree as ET
class Geom(object):
def __init__(self, geom):
self.xml = geom
self.params = []
def get_params(self):
return self.params.copy()
def set_params(self, new_params):
self.params = new_params
def update_point(self, p, new_params):
pass
def update_xml(self):
pass
def update(self, new_params):
self.set_params(new_params)
self.update_xml()
def get_smallest_z(self):
pass
def get_param_limits(self):
pass
def get_param_names(self):
pass
def get_volume(self):
pass
class Sphere(Geom):
min_radius = .05
max_radius = .4
def __init__(self, geom):
self.xml = geom
self.params = [float(self.xml.get('size'))] # radius
self.center = np.array([float(x) for x in self.xml.get('pos').split()])
def update_point(self, p, new_params):
return ((p - self.center) * new_params[0] / self.params[0]) + self.center
def update_xml(self):
self.xml.set('size', str(self.params[0]))
def get_smallest_z(self):
return self.center[2] - self.params[0]
def get_param_limits(self):
return [[self.min_radius], [self.max_radius]]
def get_param_names(self):
return ['radius']
def get_volume(self):
return 4./3. * np.pi * self.params[0] ** 3
class Capsule(Geom):
min_length = 0.175
max_length = 0.8
min_radius = 0.035
max_radius = 0.085
def __init__(self, geom):
self.xml = geom
fromto = [float(x) for x in self.xml.get('fromto').split()]
self.p1 = np.array(fromto[:3])
self.p2 = np.array(fromto[3:])
length = np.sqrt(np.sum((self.p2 - self.p1) ** 2))
radius = float(self.xml.get('size'))
self.params = [length, radius]
self.axis = (self.p2 - self.p1) / length
def update_point(self, p, new_params):
lfac = p.dot(self.axis) * self.axis
rfac = p - lfac
return p + lfac * (-1.0 + new_params[0] / self.params[0])# + rfac * (new_params[1] / self.params[1])
def update_xml(self):
self.xml.set('fromto', ' '.join([str(x) for x in np.concatenate([self.p1, self.p2])]))
self.xml.set('size', str(self.params[1])) # radius
def set_params(self, new_params):
p1 = self.update_point(self.p1, new_params)
p2 = self.update_point(self.p2, new_params)
# update only after computing p1, p2
self.p1 = p1
self.p2 = p2
super().set_params(new_params)
def get_smallest_z(self):
return min(self.p1[2], self.p2[2]) - self.params[1]
def get_param_limits(self):
return [[self.min_length, self.min_radius], [self.max_length, self.max_radius]]
def get_param_names(self):
return ['length','radius']
def get_volume(self):
return 4./3. * np.pi * self.params[1]**3 + self.params[0] * np.pi * self.params[1]**2
class Body:
geoms = {'sphere': Sphere, 'capsule': Capsule} # dictionary of legal geometry types
def __init__(self, body, worldbody=False):
self.xml = body
self.worldbody = worldbody
geom_xml = body.find('geom') # assume only one geometry per body
self.geom = self.geoms[geom_xml.get('type')](geom_xml)
self.joints = [j for j in body.findall('joint') if 'ignore' not in j.get('name')]
self.parts = [Body(b) for b in body.findall('body')]
pos = [b.get('pos') for b in body.findall('body')]
self.part_positions = [np.array([float(x) for x in p.split()]) for p in pos]
pos = [j.get('pos') for j in self.joints]
self.joint_positions = [np.array([float(x) for x in p.split()]) for p in pos]
self.n = len(self.geom.get_params())
self.n_all_params = len(self.get_params())
self.zmin = float(self.xml.get("pos").split()[2]) - self.get_height()
def get_height(self):
max_height = -self.geom.get_smallest_z()
for body, pos in zip(self.parts, self.part_positions):
max_height = max(max_height, body.get_height() - pos[2])
return max_height
def update_initial_position(self):
pos = self.xml.get("pos").split()
pos[2] = str(self.get_height() + self.zmin)
self.xml.set("pos", ' '.join(pos))
def update_xml(self):
for body, pos in zip(self.parts, self.part_positions):
body.xml.set('pos', ' '.join([str(x) for x in pos]))
for joint, pos in zip(self.joints, self.joint_positions):
joint.set('pos', ' '.join([str(x) for x in pos]))
def set_body_positions(self, new_params):
for i, pos in enumerate(self.part_positions):
self.part_positions[i] = self.geom.update_point(pos, new_params)
for i, pos in enumerate(self.joint_positions):
self.joint_positions[i] = self.geom.update_point(pos, new_params)
def update(self, new_params):
self.set_body_positions(new_params)
self.geom.update(new_params)
self.update_xml()
def get_params(self):
params = self.geom.get_params()
for body in self.parts:
params += body.get_params()
return params
def get_param_limits(self):
limits = self.geom.get_param_limits()
for body in self.parts:
body_limits = body.get_param_limits()
limits[0] += body_limits[0]
limits[1] += body_limits[1]
return limits
def get_param_names(self):
name = self.xml.get('name')
param_names = [name + '-' + p for p in self.geom.get_param_names()]
for body in self.parts:
param_names += body.get_param_names()
return param_names
def update_params(self, new_params):
if self.worldbody: assert len(new_params) == self.n_all_params, "Wrong number of parameters"
self.update(new_params[:self.n])
remaining_params = new_params[self.n:]
for body in self.parts:
remaining_params = body.update_params(remaining_params)
if self.worldbody:
self.update_initial_position()
else:
return remaining_params
def get_body_names(self):
names = [self.xml.get('name')]
for body in self.parts:
names += body.get_names()
return names
def get_joints(self):
joints = {}
for body,pos in zip(self.parts, self.part_positions):
for j in body.joints:
joints[j.get('name')] = (self.xml.get('name'), body.xml.get('name'), self.geom, body.geom, pos)
joints.update(body.get_joints())
return joints
def get_volumes(self):
volumes = {}
if len(self.joints) > 0:
for j in self.joints:
v1 = self.geom.get_volume()
v2 = sum([b.geom.get_volume() for b in self.parts])
volumes[j.get('name')] = np.array((v1, v2))
for body in self.parts:
volumes.update(body.get_volumes())
return volumes
class MuJoCoXmlRobot:
def __init__(self, model_xml):
self.model_xml = model_xml
self.tree = ET.parse(self.model_xml)
worldbody = self.tree.getroot().find('worldbody')
self.body = Body(worldbody.find('body'), worldbody=True)
def get_params(self):
return self.body.get_params()
def get_param_limits(self):
return self.body.get_param_limits()
def get_param_names(self):
return self.body.get_param_names()
def get_height(self):
return self.body.get_height()
def get_joints(self):
return self.body.get_joints()
def get_volumes(self):
return self.body.get_volumes()
def update(self, params, xml_file=None):
if xml_file is None:
xml_file = self.model_xml
self.body.update_params(list(params))
self.tree.write(xml_file)
if __name__ == '__main__':
robot = MuJoCoXmlRobot('mujoco_assets/hopper.xml')
params = list(1.0 * np.array(robot.get_params()))
robot.update(params, 'mujoco_assets/hopper_test.xml')
assert robot.get_params() == params
#assert robot.get_height() == 1.31
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/walker2d.xml')
params = [.4,.04,.5,.05,.55,.055,.6,.06,.5,.05,.55,.055,.6,.06]
robot.update(params, 'mujoco_assets/walker2d_test.xml')
assert robot.get_params() == params
assert robot.get_height() == 1.31
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/ant.xml')
params = [.2, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06]
robot.update(params, 'mujoco_assets/ant_test.xml')
assert robot.get_params() == params
assert robot.get_height() == .2
print(robot.get_param_limits())
print(robot.get_param_names())
robot = MuJoCoXmlRobot('mujoco_assets/humanoid.xml')
params = list(.8 * np.array(robot.get_params()))
robot.update(params, 'mujoco_assets/humanoid_test.xml')
assert robot.get_params() == params
print(robot.get_height())
#assert robot.get_height() == .6085
print(robot.get_param_limits())
print(robot.get_param_names())
import gym, roboschool
env = gym.make("RoboschoolHopper-v1")
env.unwrapped.model_xml = 'mujoco_assets/hopper_test.xml'
env.reset()
#env.render()
import os
from scipy.misc import imsave
import subprocess as sp
outdir = 'xml_vid'
os.makedirs(outdir, exist_ok=True)
i = 0
for _ in range(10):
env.reset()
for _ in range(100):
env.step(env.action_space.sample())
rgb = env.render('rgb_array')
imsave(os.path.join(outdir, '{:05d}.png'.format(i)), rgb)
i+=1
sp.call(['ffmpeg', '-r', '60', '-f', 'image2', '-i', os.path.join(outdir, '%05d.png'), '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', os.path.join(outdir, 'out.mp4')])
env.close()
|
[
"xml.etree.ElementTree.parse",
"os.makedirs",
"os.path.join",
"numpy.sum",
"numpy.array",
"numpy.concatenate",
"gym.make"
] |
[((9394, 9425), 'gym.make', 'gym.make', (['"""RoboschoolHopper-v1"""'], {}), "('RoboschoolHopper-v1')\n", (9402, 9425), False, 'import gym, roboschool\n'), ((9625, 9659), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (9636, 9659), False, 'import os\n'), ((1671, 1691), 'numpy.array', 'np.array', (['fromto[:3]'], {}), '(fromto[:3])\n', (1679, 1691), True, 'import numpy as np\n'), ((1710, 1730), 'numpy.array', 'np.array', (['fromto[3:]'], {}), '(fromto[3:])\n', (1718, 1730), True, 'import numpy as np\n'), ((7208, 7232), 'xml.etree.ElementTree.parse', 'ET.parse', (['self.model_xml'], {}), '(self.model_xml)\n', (7216, 7232), True, 'import xml.etree.ElementTree as ET\n'), ((1756, 1788), 'numpy.sum', 'np.sum', (['((self.p2 - self.p1) ** 2)'], {}), '((self.p2 - self.p1) ** 2)\n', (1762, 1788), True, 'import numpy as np\n'), ((9977, 10009), 'os.path.join', 'os.path.join', (['outdir', '"""%05d.png"""'], {}), "(outdir, '%05d.png')\n", (9989, 10009), False, 'import os\n'), ((10056, 10087), 'os.path.join', 'os.path.join', (['outdir', '"""out.mp4"""'], {}), "(outdir, 'out.mp4')\n", (10068, 10087), False, 'import os\n'), ((6973, 6991), 'numpy.array', 'np.array', (['(v1, v2)'], {}), '((v1, v2))\n', (6981, 6991), True, 'import numpy as np\n'), ((2228, 2262), 'numpy.concatenate', 'np.concatenate', (['[self.p1, self.p2]'], {}), '([self.p1, self.p2])\n', (2242, 2262), True, 'import numpy as np\n')]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test the Python API and shell binary of the tensorflowjs pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
from tensorflow.python.saved_model.save import save
import tensorflow_hub as hub
import tensorflowjs as tfjs
def _createKerasModel(layer_name_prefix, h5_path=None):
"""Create a Keras model for testing.
Args:
layer_name_prefix: A prefix string for layer names. This helps avoid
clashes in layer names between different test methods.
h5_path: Optional string path for a HDF5 (.h5) file to save the model
in.
Returns:
An instance of keras.Model.
"""
input_tensor = keras.layers.Input((3, ))
dense1 = keras.layers.Dense(
4,
use_bias=True,
kernel_initializer='ones',
bias_initializer='zeros',
name=layer_name_prefix + '1')(input_tensor)
output = keras.layers.Dense(
2,
use_bias=False,
kernel_initializer='ones',
name=layer_name_prefix + '2')(dense1)
model = keras.models.Model(inputs=[input_tensor], outputs=[output])
if h5_path:
model.save(h5_path)
return model
def _createTensorFlowSavedModelV1(name_scope, save_path):
"""Create a TensorFlow SavedModel for testing.
Args:
name_scope: Name scope to create the model under. This helps avoid
op and variable name clashes between different test methods.
save_path: The directory path in which to save the model.
"""
graph = tf.Graph()
with graph.as_default():
with tf.compat.v1.name_scope(name_scope):
x = tf.compat.v1.constant([[37.0, -23.0], [1.0, 4.0]])
w = tf.compat.v1.get_variable('w', shape=[2, 2])
y = tf.compat.v1.matmul(x, w)
output = tf.compat.v1.nn.softmax(y)
init_op = w.initializer
# Create a builder.
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_path)
with tf.compat.v1.Session() as sess:
# Run the initializer on `w`.
sess.run(init_op)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
"serving_default":
tf.compat.v1.saved_model.signature_def_utils.predict_signature_def(
inputs={"x": x},
outputs={"output": output})
},
assets_collection=None)
builder.save()
def _createTensorFlowSavedModel(name_scope, save_path):
"""Create a TensorFlow SavedModel for testing.
Args:
name_scope: Name scope to create the model under. This helps avoid
op and variable name clashes between different test methods.
save_path: The directory path in which to save the model.
"""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data)
save(root, save_path, to_save)
def _create_hub_module(save_path):
"""Create a TensorFlow Hub module for testing.
Args:
save_path: The directory path in which to save the model.
"""
# Module function that doubles its input.
def double_module_fn():
w = tf.Variable([2.0, 4.0])
x = tf.compat.v1.placeholder(dtype=tf.float32)
hub.add_signature(inputs=x, outputs=x*w)
graph = tf.Graph()
with graph.as_default():
spec = hub.create_module_spec(double_module_fn)
m = hub.Module(spec)
# Export the module.
with tf.compat.v1.Session(graph=graph) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
m.export(save_path, sess)
class APIAndShellTest(tf.test.TestCase):
"""Tests for the Python API of the pip package."""
@classmethod
def setUpClass(cls):
cls.class_tmp_dir = tempfile.mkdtemp()
cls.tf_saved_model_dir = os.path.join(cls.class_tmp_dir, 'tf_saved_model')
cls.tf_saved_model_v1_dir = os.path.join(
cls.class_tmp_dir, 'tf_saved_model_v1')
_createTensorFlowSavedModel('a', cls.tf_saved_model_dir)
_createTensorFlowSavedModelV1('b', cls.tf_saved_model_v1_dir)
cls.tf_hub_module_dir = os.path.join(cls.class_tmp_dir, 'tf_hub_module')
_create_hub_module(cls.tf_hub_module_dir)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.class_tmp_dir)
def setUp(self):
# Make sure this file is not being run from the source directory, to
# avoid picking up source files.
if os.path.isdir(
os.path.join(os.path.dirname(__file__), 'tensorflowjs')):
self.fail('Do not run this test from the Python source directory. '
'This file is intended to be run on pip install.')
self._tmp_dir = tempfile.mkdtemp()
super(APIAndShellTest, self).setUp()
def tearDown(self):
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
super(APIAndShellTest, self).tearDown()
def testVersionString(self):
self.assertEqual(2, tfjs.__version__.count('.'))
def testSaveKerasModel(self):
with self.test_session():
# First create a toy keras model.
model = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model, self._tmp_dir)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json')) as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
for weight in manifest_item['weights']:
weight_name = weight['name']
weight_shapes[weight_name] = weight['shape']
weight_dtypes[weight_name] = weight['dtype']
self.assertEqual(
sorted(list(weight_shapes.keys())),
sorted([
'MergedDense1/kernel', 'MergedDense1/bias',
'MergedDense2/kernel'
]))
self.assertEqual(weight_shapes['MergedDense1/kernel'], [3, 4])
self.assertEqual(weight_shapes['MergedDense1/bias'], [4])
self.assertEqual(weight_shapes['MergedDense2/kernel'], [4, 2])
self.assertEqual(weight_dtypes['MergedDense1/kernel'], 'float32')
self.assertEqual(weight_dtypes['MergedDense1/bias'], 'float32')
self.assertEqual(weight_dtypes['MergedDense2/kernel'], 'float32')
def testLoadKerasModel(self):
# Use separate tf.Graph and tf.compat.v1.Session contexts to prevent name collision.
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
model1 = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model1, self._tmp_dir)
model1_weight_values = model1.get_weights()
with tf.Graph().as_default(), tf.compat.v1.Session():
# Load the model from saved artifacts.
model2 = tfjs.converters.load_keras_model(
os.path.join(self._tmp_dir, 'model.json'))
# Compare the loaded model with the original one.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
for model1_weight_value, model2_weight_value in zip(
model1_weight_values, model2_weight_values):
self.assertAllClose(model1_weight_value, model2_weight_value)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(self._tmp_dir, 'group*-*')))
def testInvalidInputFormatRaisesError(self):
process = subprocess.Popen(
[
'tensorflowjs_converter', '--input_format',
'nonsensical_format', self._tmp_dir, self._tmp_dir
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertGreater(process.returncode, 0)
self.assertIn(b'--input_format', tf.compat.as_bytes(stderr))
def testMissingInputPathRaisesError(self):
process = subprocess.Popen(
[
'tensorflowjs_converter'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertGreater(process.returncode, 0)
self.assertIn(b'input_path', tf.compat.as_bytes(stderr))
def testKerasH5ConversionWorksFromCLI(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
_createKerasModel('MergedDenseForCLI', h5_path)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras', h5_path,
self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json'), 'rt') as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
for weight in manifest_item['weights']:
weight_name = weight['name']
weight_shapes[weight_name] = weight['shape']
weight_dtypes[weight_name] = weight['dtype']
self.assertEqual(
sorted(list(weight_shapes.keys())),
sorted([
'MergedDenseForCLI1/kernel', 'MergedDenseForCLI1/bias',
'MergedDenseForCLI2/kernel'
]))
self.assertEqual(weight_shapes['MergedDenseForCLI1/kernel'], [3, 4])
self.assertEqual(weight_shapes['MergedDenseForCLI1/bias'], [4])
self.assertEqual(weight_shapes['MergedDenseForCLI2/kernel'], [4, 2])
self.assertEqual(weight_dtypes['MergedDenseForCLI1/kernel'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI1/bias'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI2/kernel'], 'float32')
# Verify that there is only one weight group due to the default
# non-split_weights_by_layer behavior. The model is a small one, which
# does not exceed the 4-MB shard size limit. Therefore, there should
# be only one weight file.
self.assertEqual(
1, len(glob.glob(os.path.join(self._tmp_dir, 'group*'))))
def testKerasH5ConversionSplitWeightsByLayerWorksFromCLI(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
_createKerasModel('MergedDenseForCLI', h5_path)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras',
'--split_weights_by_layer', h5_path, self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json'), 'rt') as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
for weight in manifest_item['weights']:
weight_name = weight['name']
weight_shapes[weight_name] = weight['shape']
weight_dtypes[weight_name] = weight['dtype']
self.assertEqual(
sorted(list(weight_shapes.keys())),
sorted([
'MergedDenseForCLI1/kernel', 'MergedDenseForCLI1/bias',
'MergedDenseForCLI2/kernel'
]))
self.assertEqual(weight_shapes['MergedDenseForCLI1/kernel'], [3, 4])
self.assertEqual(weight_shapes['MergedDenseForCLI1/bias'], [4])
self.assertEqual(weight_shapes['MergedDenseForCLI2/kernel'], [4, 2])
self.assertEqual(weight_dtypes['MergedDenseForCLI1/kernel'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI1/bias'], 'float32')
self.assertEqual(weight_dtypes['MergedDenseForCLI2/kernel'], 'float32')
# Verify that there are two weight groups due to the optional flag
# --split_weights_by_layer behavior. The model is a small one. None of
# the layers should have weight sizes exceeding the 4-MB shard size
# limit.
self.assertEqual(
2, len(glob.glob(os.path.join(self._tmp_dir, 'group*'))))
def testKerasH5ConversionWithSignatureNameErrors(self):
process = subprocess.Popen(
[
'tensorflowjs_converter', '--input_format', 'keras',
'--signature_name', 'bar',
os.path.join(self._tmp_dir, 'foo.h5'),
os.path.join(self._tmp_dir, 'output')
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertGreater(process.returncode, 0)
self.assertIn(
b'The --signature_name flag is applicable only to',
tf.compat.as_bytes(stderr))
def testConvertTFSavedModelV1WithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model',
self.tf_saved_model_v1_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{'dtype': 'float32', 'name': 'w', 'shape': [2, 2]}]}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
self.assertEqual(output_json['weightsManifest'], weights)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTFHubModuleWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_hub',
self.tf_hub_module_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'shape': [2],
'name': 'module/Variable',
'dtype': 'float32'
}]
}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
self.assertEqual(output_json['weightsManifest'], weights)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTFSavedModelWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model',
self.tf_saved_model_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'dtype': 'float32',
'shape': [],
'name': 'StatefulPartitionedCall/mul'
}]
}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
weights_manifest = output_json['weightsManifest']
self.assertEqual(len(weights_manifest), len(weights))
if sys.version_info[0] < 3:
self.assertItemsEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertItemsEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
else:
self.assertCountEqual(weights_manifest[0]['paths'],
weights[0]['paths'])
self.assertCountEqual(weights_manifest[0]['weights'],
weights[0]['weights'])
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTFHubModuleWithCommandLineWorks(self):
output_dir = os.path.join(self._tmp_dir)
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tf_hub',
self.tf_hub_module_dir, output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
weights = [{
'paths': ['group1-shard1of1.bin'],
'weights': [{
'shape': [2],
'name': 'module/Variable',
'dtype': 'float32'
}]
}]
# Load the saved weights as a JSON string.
output_json = json.load(
open(os.path.join(output_dir, 'model.json'), 'rt'))
self.assertEqual(output_json['weightsManifest'], weights)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def testConvertTensorflowjsArtifactsToKerasH5(self):
# 1. Create a toy keras model and save it as an HDF5 file.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
with tf.Graph().as_default(), tf.compat.v1.Session():
model = _createKerasModel('MergedDenseForCLI', h5_path)
model_json = model.to_json()
# 2. Convert the HDF5 file to tensorflowjs format.
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras', h5_path,
self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tensorflowjs artifacts back to HDF5.
new_h5_path = os.path.join(self._tmp_dir, 'model_2.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'keras',
os.path.join(self._tmp_dir, 'model.json'), new_h5_path])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Load the model back from the new HDF5 file and compare with the
# original model.
with tf.Graph().as_default(), tf.compat.v1.Session():
model_2 = keras.models.load_model(new_h5_path)
model_2_json = model_2.to_json()
self.assertEqual(model_json, model_2_json)
def testLoadTensorflowjsArtifactsAsKerasModel(self):
# 1. Create a toy keras model and save it as an HDF5 file.
os.makedirs(os.path.join(self._tmp_dir, 'keras_h5'))
h5_path = os.path.join(self._tmp_dir, 'keras_h5', 'model.h5')
with tf.Graph().as_default(), tf.compat.v1.Session():
model = _createKerasModel('MergedDenseForCLI', h5_path)
model_json = model.to_json()
# 2. Convert the HDF5 file to tensorflowjs format.
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras', h5_path,
self._tmp_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Load the tensorflowjs artifacts as a keras.Model instance.
with tf.Graph().as_default(), tf.compat.v1.Session():
model_2 = tfjs.converters.load_keras_model(
os.path.join(self._tmp_dir, 'model.json'))
model_2_json = model_2.to_json()
self.assertEqual(model_json, model_2_json)
def testVersion(self):
process = subprocess.Popen(
['tensorflowjs_converter', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = process.communicate()
self.assertEqual(0, process.returncode)
self.assertIn(
tf.compat.as_bytes('tensorflowjs %s' % tfjs.__version__),
tf.compat.as_bytes(stdout))
process = subprocess.Popen(
['tensorflowjs_converter', '-v'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = process.communicate()
self.assertEqual(0, process.returncode)
self.assertIn(
tf.compat.as_bytes('tensorflowjs %s' % tfjs.__version__),
tf.compat.as_bytes(stdout))
class ConvertTfKerasSavedModelTest(tf.test.TestCase):
def setUp(self):
super(ConvertTfKerasSavedModelTest, self).setUp()
self._tmp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
super(ConvertTfKerasSavedModelTest, self).tearDown()
def _createSimpleSequentialModel(self):
model = keras.Sequential()
model.add(keras.layers.Reshape([2, 3], input_shape=[6]))
model.add(keras.layers.LSTM(10))
model.add(keras.layers.Dense(1, activation='sigmoid'))
return model
def _createNestedSequentialModel(self):
model = keras.Sequential()
model.add(keras.layers.Dense(6, input_shape=[10], activation='relu'))
model.add(self._createSimpleSequentialModel())
return model
def _createFunctionalModelWithWeights(self):
input1 = keras.Input(shape=[8])
input2 = keras.Input(shape=[10])
y = keras.layers.Concatenate()([input1, input2])
y = keras.layers.Dense(4, activation='softmax')(y)
model = keras.Model([input1, input2], y)
return model
def testConvertTfKerasNestedSequentialSavedModelIntoTfjsFormat(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
model_json_path = os.path.join(tfjs_output_dir, 'model.json')
self.assertTrue(os.path.isfile(model_json_path))
# 3. Convert the tfjs model to keras h5 format.
new_h5_path = os.path.join(self._tmp_dir, 'new_h5.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'keras', model_json_path, new_h5_path])
process.communicate()
self.assertEqual(0, process.returncode)
self.assertTrue(os.path.isfile(new_h5_path))
# 4. Load the model back and assert on the equality of the predict
# results.
model_prime = keras.models.load_model(new_h5_path)
new_y = model_prime.predict(x)
self.assertAllClose(y, new_y)
def testConvertTfKerasFunctionalSavedModelIntoTfjsFormat(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x1 = np.random.randn(4, 8)
x2 = np.random.randn(4, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createFunctionalModelWithWeights()
y = model.predict([x1, x2])
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Use explicit --output_format value: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
'--output_format', 'tfjs_layers_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
model_json_path = os.path.join(tfjs_output_dir, 'model.json')
self.assertTrue(os.path.isfile(model_json_path))
# 3. Convert the tfjs model to keras h5 format.
new_h5_path = os.path.join(self._tmp_dir, 'new_h5.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'keras', model_json_path, new_h5_path])
process.communicate()
self.assertEqual(0, process.returncode)
self.assertTrue(os.path.isfile(new_h5_path))
# 4. Load the model back and assert on the equality of the predict
# results.
model_prime = keras.models.load_model(new_h5_path)
new_y = model_prime.predict([x1, x2])
self.assertAllClose(y, new_y)
def testUsingIncorrectKerasSavedModelRaisesError(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Use incorrect --input_format value: keras
process = subprocess.Popen(
[
'tensorflowjs_converter', '--input_format', 'keras',
self._tmp_dir, tfjs_output_dir
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
self.assertIn(
b'Expected path to point to an HDF5 file, '
b'but it points to a directory', tf.compat.as_bytes(stderr))
def testConvertTfjsLayersModelIntoShardedWeights(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
weights = model.get_weights()
total_weight_bytes = sum(np.size(w) for w in weights) * 4
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs_layers_model format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tfjs_layers_model to another tfjs_layers_model,
# with sharded weights.
weight_shard_size_bytes = int(total_weight_bytes * 0.3)
# Due to the shard size, there ought to be 4 shards after conversion.
sharded_model_dir = os.path.join(self._tmp_dir, 'tfjs_sharded')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'tfjs_layers_model',
'--weight_shard_size_bytes', str(weight_shard_size_bytes),
os.path.join(tfjs_output_dir, 'model.json'), sharded_model_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Check the sharded weight files and their sizes.
weight_files = sorted(
glob.glob(os.path.join(sharded_model_dir, 'group*.bin')))
self.assertEqual(len(weight_files), 4)
weight_file_sizes = [os.path.getsize(f) for f in weight_files]
self.assertEqual(sum(weight_file_sizes), total_weight_bytes)
self.assertEqual(weight_file_sizes[0], weight_file_sizes[1])
self.assertEqual(weight_file_sizes[0], weight_file_sizes[2])
self.assertLess(weight_file_sizes[3], weight_file_sizes[0])
# 5. Convert the sharded tfjs_layers_model back into a keras h5 file.
new_h5_path = os.path.join(self._tmp_dir, 'new_h5.h5')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
os.path.join(sharded_model_dir, 'model.json'), new_h5_path
])
process.communicate()
self.assertEqual(0, process.returncode)
with tf.Graph().as_default(), tf.compat.v1.Session():
# 6. Load the keras model and check the predict() output is close to
# before.
new_model = keras.models.load_model(new_h5_path)
new_y = new_model.predict(x)
self.assertAllClose(new_y, y)
def testConvertTfjsLayersModelWithQuantization(self):
with tf.Graph().as_default(), tf.compat.v1.Session():
x = np.random.randn(8, 10)
# 1. Run the model.predict(), store the result. Then saved the model
# as a SavedModel.
model = self._createNestedSequentialModel()
y = model.predict(x)
weights = model.get_weights()
total_weight_bytes = sum(np.size(w) for w in weights) * 4
keras.experimental.export_saved_model(model, self._tmp_dir)
# 2. Convert the keras saved model to tfjs_layers_model format.
tfjs_output_dir = os.path.join(self._tmp_dir, 'tfjs')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras_saved_model',
self._tmp_dir, tfjs_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tfjs_layers_model to another tfjs_layers_model,
# with uint16 quantization.
weight_shard_size_bytes = int(total_weight_bytes * 0.3)
# Due to the shard size, there ought to be 4 shards after conversion.
sharded_model_dir = os.path.join(self._tmp_dir, 'tfjs_sharded')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'tfjs_layers_model',
'--quantization_bytes', '2',
os.path.join(tfjs_output_dir, 'model.json'), sharded_model_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Check the quantized weight file and its size.
weight_files = sorted(
glob.glob(os.path.join(sharded_model_dir, 'group*.bin')))
self.assertEqual(len(weight_files), 1)
weight_file_size = os.path.getsize(weight_files[0])
# The size of the weight file should reflect the uint16 quantization.
self.assertEqual(weight_file_size, total_weight_bytes // 2)
def testConvertTfjsLayersModelToTfjsGraphModel(self):
x = np.random.randn(8, 10)
# 1. Create a model for testing.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=[4]))
model.add(keras.layers.Dense(1, activation='sigmoid'))
h5_path = os.path.join(self._tmp_dir, 'model.h5')
model.save(h5_path)
# 2. Convert the keras saved model to tfjs_layers_model format.
layers_model_output_dir = os.path.join(self._tmp_dir, 'tfjs_layers')
# Implicit value of --output_format: tfjs_layers_model
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'keras',
h5_path, layers_model_output_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 3. Convert the tfjs_layers_model to another tfjs_graph_model.
graph_model_dir = os.path.join(self._tmp_dir, 'tfjs_graph')
process = subprocess.Popen([
'tensorflowjs_converter', '--input_format', 'tfjs_layers_model',
'--output_format', 'tfjs_graph_model',
os.path.join(layers_model_output_dir, 'model.json'), graph_model_dir
])
process.communicate()
self.assertEqual(0, process.returncode)
# 4. Check the model.json and weight file and its size.
self.assertTrue(os.path.isfile(os.path.join(graph_model_dir, 'model.json')))
weight_files = sorted(
glob.glob(os.path.join(graph_model_dir, 'group*.bin')))
self.assertEqual(len(weight_files), 1)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.python.eager.def_function.function",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"tensorflow.keras.experimental.export_saved_model",
"tensorflow.compat.as_bytes",
"tensorflow.python.ops.variables.Variable",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"tensorflow.keras.layers.Input",
"tensorflow.Graph",
"tensorflow.compat.v1.saved_model.builder.SavedModelBuilder",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.Sequential",
"tensorflow_hub.create_module_spec",
"tensorflow.python.training.tracking.tracking.AutoTrackable",
"tensorflow.python.saved_model.save.save",
"subprocess.Popen",
"tensorflow.compat.v1.saved_model.signature_def_utils.predict_signature_def",
"os.path.isdir",
"tensorflow.keras.models.Model",
"tensorflow.compat.v1.nn.softmax",
"os.path.getsize",
"tensorflow_hub.Module",
"tensorflow.Variable",
"numpy.size",
"tensorflowjs.converters.save_keras_model",
"os.path.isfile",
"tensorflow.keras.layers.LSTM",
"os.path.dirname",
"tempfile.mkdtemp",
"tensorflow.compat.v1.constant",
"tensorflow.keras.Input",
"numpy.random.randn",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.get_variable",
"tensorflow.keras.layers.Concatenate",
"os.path.join",
"tensorflow.python.framework.constant_op.constant",
"tensorflowjs.__version__.count",
"tensorflow.test.main",
"json.load",
"shutil.rmtree",
"tensorflow.keras.Model",
"tensorflow_hub.add_signature"
] |
[((1826, 1850), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['(3,)'], {}), '((3,))\n', (1844, 1850), False, 'from tensorflow import keras\n'), ((2177, 2236), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': '[input_tensor]', 'outputs': '[output]'}), '(inputs=[input_tensor], outputs=[output])\n', (2195, 2236), False, 'from tensorflow import keras\n'), ((2622, 2632), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2630, 2632), True, 'import tensorflow as tf\n'), ((3916, 3952), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1]'}), '(1.0, shape=[1])\n', (3936, 3952), False, 'from tensorflow.python.framework import constant_op\n'), ((3961, 3985), 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), '()\n', (3983, 3985), False, 'from tensorflow.python.training.tracking import tracking\n'), ((3998, 4021), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), '(3.0)\n', (4016, 4021), False, 'from tensorflow.python.ops import variables\n'), ((4033, 4056), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), '(2.0)\n', (4051, 4056), False, 'from tensorflow.python.ops import variables\n'), ((4067, 4121), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['(lambda x: root.v1 * root.v2 * x)'], {}), '(lambda x: root.v1 * root.v2 * x)\n', (4088, 4121), False, 'from tensorflow.python.eager import def_function\n'), ((4178, 4208), 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_path', 'to_save'], {}), '(root, save_path, to_save)\n', (4182, 4208), False, 'from tensorflow.python.saved_model.save import save\n'), ((4580, 4590), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4588, 4590), True, 'import tensorflow as tf\n'), ((33845, 33859), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (33857, 33859), True, 'import tensorflow as tf\n'), ((1863, 1986), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(4)'], {'use_bias': '(True)', 'kernel_initializer': '"""ones"""', 'bias_initializer': '"""zeros"""', 'name': "(layer_name_prefix + '1')"}), "(4, use_bias=True, kernel_initializer='ones',\n bias_initializer='zeros', name=layer_name_prefix + '1')\n", (1881, 1986), False, 'from tensorflow import keras\n'), ((2039, 2138), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(2)'], {'use_bias': '(False)', 'kernel_initializer': '"""ones"""', 'name': "(layer_name_prefix + '2')"}), "(2, use_bias=False, kernel_initializer='ones', name=\n layer_name_prefix + '2')\n", (2057, 2138), False, 'from tensorflow import keras\n'), ((4450, 4473), 'tensorflow.Variable', 'tf.Variable', (['[2.0, 4.0]'], {}), '([2.0, 4.0])\n', (4461, 4473), True, 'import tensorflow as tf\n'), ((4482, 4524), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (4506, 4524), True, 'import tensorflow as tf\n'), ((4529, 4571), 'tensorflow_hub.add_signature', 'hub.add_signature', ([], {'inputs': 'x', 'outputs': '(x * w)'}), '(inputs=x, outputs=x * w)\n', (4546, 4571), True, 'import tensorflow_hub as hub\n'), ((4629, 4669), 'tensorflow_hub.create_module_spec', 'hub.create_module_spec', (['double_module_fn'], {}), '(double_module_fn)\n', (4651, 4669), True, 'import tensorflow_hub as hub\n'), ((4678, 4694), 'tensorflow_hub.Module', 'hub.Module', (['spec'], {}), '(spec)\n', (4688, 4694), True, 'import tensorflow_hub as hub\n'), ((4725, 4758), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (4745, 4758), True, 'import tensorflow as tf\n'), ((5014, 5032), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5030, 5032), False, 'import tempfile\n'), ((5062, 5111), 'os.path.join', 'os.path.join', (['cls.class_tmp_dir', '"""tf_saved_model"""'], {}), "(cls.class_tmp_dir, 'tf_saved_model')\n", (5074, 5111), False, 'import os\n'), ((5144, 5196), 'os.path.join', 'os.path.join', (['cls.class_tmp_dir', '"""tf_saved_model_v1"""'], {}), "(cls.class_tmp_dir, 'tf_saved_model_v1')\n", (5156, 5196), False, 'import os\n'), ((5369, 5417), 'os.path.join', 'os.path.join', (['cls.class_tmp_dir', '"""tf_hub_module"""'], {}), "(cls.class_tmp_dir, 'tf_hub_module')\n", (5381, 5417), False, 'import os\n'), ((5510, 5542), 'shutil.rmtree', 'shutil.rmtree', (['cls.class_tmp_dir'], {}), '(cls.class_tmp_dir)\n', (5523, 5542), False, 'import shutil\n'), ((5923, 5941), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5939, 5941), False, 'import tempfile\n'), ((6013, 6041), 'os.path.isdir', 'os.path.isdir', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (6026, 6041), False, 'import os\n'), ((9021, 9192), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'nonsensical_format', self.\n _tmp_dir, self._tmp_dir]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['tensorflowjs_converter', '--input_format',\n 'nonsensical_format', self._tmp_dir, self._tmp_dir], stdout=subprocess.\n PIPE, stderr=subprocess.PIPE)\n", (9037, 9192), False, 'import subprocess\n'), ((9452, 9549), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['tensorflowjs_converter'], stdout=subprocess.PIPE, stderr\n =subprocess.PIPE)\n", (9468, 9549), False, 'import subprocess\n'), ((15253, 15280), 'os.path.join', 'os.path.join', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (15265, 15280), False, 'import os\n'), ((15295, 15463), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'tf_saved_model',\n '--output_format', 'tfjs_graph_model', self.tf_saved_model_v1_dir,\n output_dir]"], {}), "(['tensorflowjs_converter', '--input_format',\n 'tf_saved_model', '--output_format', 'tfjs_graph_model', self.\n tf_saved_model_v1_dir, output_dir])\n", (15311, 15463), False, 'import subprocess\n'), ((16083, 16110), 'os.path.join', 'os.path.join', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (16095, 16110), False, 'import os\n'), ((16125, 16237), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'tf_hub', self.\n tf_hub_module_dir, output_dir]"], {}), "(['tensorflowjs_converter', '--input_format', 'tf_hub',\n self.tf_hub_module_dir, output_dir])\n", (16141, 16237), False, 'import subprocess\n'), ((16915, 16942), 'os.path.join', 'os.path.join', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (16927, 16942), False, 'import os\n'), ((16957, 17122), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'tf_saved_model',\n '--output_format', 'tfjs_graph_model', self.tf_saved_model_dir, output_dir]"], {}), "(['tensorflowjs_converter', '--input_format',\n 'tf_saved_model', '--output_format', 'tfjs_graph_model', self.\n tf_saved_model_dir, output_dir])\n", (16973, 17122), False, 'import subprocess\n'), ((18336, 18363), 'os.path.join', 'os.path.join', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (18348, 18363), False, 'import os\n'), ((18378, 18490), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'tf_hub', self.\n tf_hub_module_dir, output_dir]"], {}), "(['tensorflowjs_converter', '--input_format', 'tf_hub',\n self.tf_hub_module_dir, output_dir])\n", (18394, 18490), False, 'import subprocess\n'), ((19283, 19334), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""keras_h5"""', '"""model.h5"""'], {}), "(self._tmp_dir, 'keras_h5', 'model.h5')\n", (19295, 19334), False, 'import os\n'), ((19560, 19659), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras', h5_path, self._tmp_dir]"], {}), "(['tensorflowjs_converter', '--input_format', 'keras',\n h5_path, self._tmp_dir])\n", (19576, 19659), False, 'import subprocess\n'), ((19825, 19866), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""model_2.h5"""'], {}), "(self._tmp_dir, 'model_2.h5')\n", (19837, 19866), False, 'import os\n'), ((20632, 20683), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""keras_h5"""', '"""model.h5"""'], {}), "(self._tmp_dir, 'keras_h5', 'model.h5')\n", (20644, 20683), False, 'import os\n'), ((20909, 21008), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras', h5_path, self._tmp_dir]"], {}), "(['tensorflowjs_converter', '--input_format', 'keras',\n h5_path, self._tmp_dir])\n", (20925, 21008), False, 'import subprocess\n'), ((21455, 21565), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--version']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['tensorflowjs_converter', '--version'], stdout=subprocess\n .PIPE, stderr=subprocess.PIPE)\n", (21471, 21565), False, 'import subprocess\n'), ((21804, 21906), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '-v']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['tensorflowjs_converter', '-v'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (21820, 21906), False, 'import subprocess\n'), ((22281, 22299), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (22297, 22299), False, 'import tempfile\n'), ((22330, 22358), 'os.path.isdir', 'os.path.isdir', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (22343, 22358), False, 'import os\n'), ((22507, 22525), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (22523, 22525), False, 'from tensorflow import keras\n'), ((22755, 22773), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (22771, 22773), False, 'from tensorflow import keras\n'), ((22977, 22999), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '[8]'}), '(shape=[8])\n', (22988, 22999), False, 'from tensorflow import keras\n'), ((23013, 23036), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '[10]'}), '(shape=[10])\n', (23024, 23036), False, 'from tensorflow import keras\n'), ((23157, 23189), 'tensorflow.keras.Model', 'keras.Model', (['[input1, input2]', 'y'], {}), '([input1, input2], y)\n', (23168, 23189), False, 'from tensorflow import keras\n'), ((32381, 32403), 'numpy.random.randn', 'np.random.randn', (['(8)', '(10)'], {}), '(8, 10)\n', (32396, 32403), True, 'import numpy as np\n'), ((32454, 32472), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (32470, 32472), False, 'from tensorflow import keras\n'), ((32621, 32660), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""model.h5"""'], {}), "(self._tmp_dir, 'model.h5')\n", (32633, 32660), False, 'import os\n'), ((32784, 32826), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""tfjs_layers"""'], {}), "(self._tmp_dir, 'tfjs_layers')\n", (32796, 32826), False, 'import os\n'), ((32900, 33009), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras', h5_path,\n layers_model_output_dir]"], {}), "(['tensorflowjs_converter', '--input_format', 'keras',\n h5_path, layers_model_output_dir])\n", (32916, 33009), False, 'import subprocess\n'), ((33189, 33230), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""tfjs_graph"""'], {}), "(self._tmp_dir, 'tfjs_graph')\n", (33201, 33230), False, 'import os\n'), ((2669, 2704), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name_scope'], {}), '(name_scope)\n', (2692, 2704), True, 'import tensorflow as tf\n'), ((2716, 2766), 'tensorflow.compat.v1.constant', 'tf.compat.v1.constant', (['[[37.0, -23.0], [1.0, 4.0]]'], {}), '([[37.0, -23.0], [1.0, 4.0]])\n', (2737, 2766), True, 'import tensorflow as tf\n'), ((2777, 2821), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', (['"""w"""'], {'shape': '[2, 2]'}), "('w', shape=[2, 2])\n", (2802, 2821), True, 'import tensorflow as tf\n'), ((2832, 2857), 'tensorflow.compat.v1.matmul', 'tf.compat.v1.matmul', (['x', 'w'], {}), '(x, w)\n', (2851, 2857), True, 'import tensorflow as tf\n'), ((2873, 2899), 'tensorflow.compat.v1.nn.softmax', 'tf.compat.v1.nn.softmax', (['y'], {}), '(y)\n', (2896, 2899), True, 'import tensorflow as tf\n'), ((2973, 3034), 'tensorflow.compat.v1.saved_model.builder.SavedModelBuilder', 'tf.compat.v1.saved_model.builder.SavedModelBuilder', (['save_path'], {}), '(save_path)\n', (3023, 3034), True, 'import tensorflow as tf\n'), ((4781, 4824), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (4822, 4824), True, 'import tensorflow as tf\n'), ((6049, 6077), 'shutil.rmtree', 'shutil.rmtree', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (6062, 6077), False, 'import shutil\n'), ((6178, 6205), 'tensorflowjs.__version__.count', 'tfjs.__version__.count', (['"""."""'], {}), "('.')\n", (6200, 6205), True, 'import tensorflowjs as tfjs\n'), ((6364, 6418), 'tensorflowjs.converters.save_keras_model', 'tfjs.converters.save_keras_model', (['model', 'self._tmp_dir'], {}), '(model, self._tmp_dir)\n', (6396, 6418), True, 'import tensorflowjs as tfjs\n'), ((8039, 8061), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (8059, 8061), True, 'import tensorflow as tf\n'), ((8157, 8212), 'tensorflowjs.converters.save_keras_model', 'tfjs.converters.save_keras_model', (['model1', 'self._tmp_dir'], {}), '(model1, self._tmp_dir)\n', (8189, 8212), True, 'import tensorflowjs as tfjs\n'), ((8298, 8320), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (8318, 8320), True, 'import tensorflow as tf\n'), ((9364, 9390), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['stderr'], {}), '(stderr)\n', (9382, 9390), True, 'import tensorflow as tf\n'), ((9709, 9735), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['stderr'], {}), '(stderr)\n', (9727, 9735), True, 'import tensorflow as tf\n'), ((9819, 9841), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (9839, 9841), True, 'import tensorflow as tf\n'), ((9958, 10009), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""keras_h5"""', '"""model.h5"""'], {}), "(self._tmp_dir, 'keras_h5', 'model.h5')\n", (9970, 10009), False, 'import os\n'), ((10081, 10180), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras', h5_path, self._tmp_dir]"], {}), "(['tensorflowjs_converter', '--input_format', 'keras',\n h5_path, self._tmp_dir])\n", (10097, 10180), False, 'import subprocess\n'), ((12252, 12274), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (12272, 12274), True, 'import tensorflow as tf\n'), ((12391, 12442), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""keras_h5"""', '"""model.h5"""'], {}), "(self._tmp_dir, 'keras_h5', 'model.h5')\n", (12403, 12442), False, 'import os\n'), ((12514, 12641), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras',\n '--split_weights_by_layer', h5_path, self._tmp_dir]"], {}), "(['tensorflowjs_converter', '--input_format', 'keras',\n '--split_weights_by_layer', h5_path, self._tmp_dir])\n", (12530, 12641), False, 'import subprocess\n'), ((15148, 15174), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['stderr'], {}), '(stderr)\n', (15166, 15174), True, 'import tensorflow as tf\n'), ((19228, 19267), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""keras_h5"""'], {}), "(self._tmp_dir, 'keras_h5')\n", (19240, 19267), False, 'import os\n'), ((19369, 19391), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (19389, 19391), True, 'import tensorflow as tf\n'), ((20277, 20299), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (20297, 20299), True, 'import tensorflow as tf\n'), ((20317, 20353), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['new_h5_path'], {}), '(new_h5_path)\n', (20340, 20353), False, 'from tensorflow import keras\n'), ((20577, 20616), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""keras_h5"""'], {}), "(self._tmp_dir, 'keras_h5')\n", (20589, 20616), False, 'import os\n'), ((20718, 20740), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (20738, 20740), True, 'import tensorflow as tf\n'), ((21200, 21222), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (21220, 21222), True, 'import tensorflow as tf\n'), ((21695, 21751), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (["('tensorflowjs %s' % tfjs.__version__)"], {}), "('tensorflowjs %s' % tfjs.__version__)\n", (21713, 21751), True, 'import tensorflow as tf\n'), ((21761, 21787), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['stdout'], {}), '(stdout)\n', (21779, 21787), True, 'import tensorflow as tf\n'), ((22037, 22093), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (["('tensorflowjs %s' % tfjs.__version__)"], {}), "('tensorflowjs %s' % tfjs.__version__)\n", (22055, 22093), True, 'import tensorflow as tf\n'), ((22103, 22129), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['stdout'], {}), '(stdout)\n', (22121, 22129), True, 'import tensorflow as tf\n'), ((22366, 22394), 'shutil.rmtree', 'shutil.rmtree', (['self._tmp_dir'], {}), '(self._tmp_dir)\n', (22379, 22394), False, 'import shutil\n'), ((22540, 22585), 'tensorflow.keras.layers.Reshape', 'keras.layers.Reshape', (['[2, 3]'], {'input_shape': '[6]'}), '([2, 3], input_shape=[6])\n', (22560, 22585), False, 'from tensorflow import keras\n'), ((22601, 22622), 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['(10)'], {}), '(10)\n', (22618, 22622), False, 'from tensorflow import keras\n'), ((22638, 22681), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (22656, 22681), False, 'from tensorflow import keras\n'), ((22788, 22846), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(6)'], {'input_shape': '[10]', 'activation': '"""relu"""'}), "(6, input_shape=[10], activation='relu')\n", (22806, 22846), False, 'from tensorflow import keras\n'), ((23045, 23071), 'tensorflow.keras.layers.Concatenate', 'keras.layers.Concatenate', ([], {}), '()\n', (23069, 23071), False, 'from tensorflow import keras\n'), ((23098, 23141), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(4)'], {'activation': '"""softmax"""'}), "(4, activation='softmax')\n", (23116, 23141), False, 'from tensorflow import keras\n'), ((23314, 23336), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (23334, 23336), True, 'import tensorflow as tf\n'), ((23348, 23370), 'numpy.random.randn', 'np.random.randn', (['(8)', '(10)'], {}), '(8, 10)\n', (23363, 23370), True, 'import numpy as np\n'), ((23559, 23618), 'tensorflow.keras.experimental.export_saved_model', 'keras.experimental.export_saved_model', (['model', 'self._tmp_dir'], {}), '(model, self._tmp_dir)\n', (23596, 23618), False, 'from tensorflow import keras\n'), ((23701, 23736), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""tfjs"""'], {}), "(self._tmp_dir, 'tfjs')\n", (23713, 23736), False, 'import os\n'), ((23814, 23933), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras_saved_model', self.\n _tmp_dir, tfjs_output_dir]"], {}), "(['tensorflowjs_converter', '--input_format',\n 'keras_saved_model', self._tmp_dir, tfjs_output_dir])\n", (23830, 23933), False, 'import subprocess\n'), ((24057, 24100), 'os.path.join', 'os.path.join', (['tfjs_output_dir', '"""model.json"""'], {}), "(tfjs_output_dir, 'model.json')\n", (24069, 24100), False, 'import os\n'), ((24231, 24271), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""new_h5.h5"""'], {}), "(self._tmp_dir, 'new_h5.h5')\n", (24243, 24271), False, 'import os\n'), ((24288, 24437), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'tfjs_layers_model',\n '--output_format', 'keras', model_json_path, new_h5_path]"], {}), "(['tensorflowjs_converter', '--input_format',\n 'tfjs_layers_model', '--output_format', 'keras', model_json_path,\n new_h5_path])\n", (24304, 24437), False, 'import subprocess\n'), ((24691, 24727), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['new_h5_path'], {}), '(new_h5_path)\n', (24714, 24727), False, 'from tensorflow import keras\n'), ((24902, 24924), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (24922, 24924), True, 'import tensorflow as tf\n'), ((24937, 24958), 'numpy.random.randn', 'np.random.randn', (['(4)', '(8)'], {}), '(4, 8)\n', (24952, 24958), True, 'import numpy as np\n'), ((24970, 24992), 'numpy.random.randn', 'np.random.randn', (['(4)', '(10)'], {}), '(4, 10)\n', (24985, 24992), True, 'import numpy as np\n'), ((25193, 25252), 'tensorflow.keras.experimental.export_saved_model', 'keras.experimental.export_saved_model', (['model', 'self._tmp_dir'], {}), '(model, self._tmp_dir)\n', (25230, 25252), False, 'from tensorflow import keras\n'), ((25335, 25370), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""tfjs"""'], {}), "(self._tmp_dir, 'tfjs')\n", (25347, 25370), False, 'import os\n'), ((25449, 25613), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras_saved_model',\n '--output_format', 'tfjs_layers_model', self._tmp_dir, tfjs_output_dir]"], {}), "(['tensorflowjs_converter', '--input_format',\n 'keras_saved_model', '--output_format', 'tfjs_layers_model', self.\n _tmp_dir, tfjs_output_dir])\n", (25465, 25613), False, 'import subprocess\n'), ((25742, 25785), 'os.path.join', 'os.path.join', (['tfjs_output_dir', '"""model.json"""'], {}), "(tfjs_output_dir, 'model.json')\n", (25754, 25785), False, 'import os\n'), ((25916, 25956), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""new_h5.h5"""'], {}), "(self._tmp_dir, 'new_h5.h5')\n", (25928, 25956), False, 'import os\n'), ((25973, 26122), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'tfjs_layers_model',\n '--output_format', 'keras', model_json_path, new_h5_path]"], {}), "(['tensorflowjs_converter', '--input_format',\n 'tfjs_layers_model', '--output_format', 'keras', model_json_path,\n new_h5_path])\n", (25989, 26122), False, 'import subprocess\n'), ((26376, 26412), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['new_h5_path'], {}), '(new_h5_path)\n', (26399, 26412), False, 'from tensorflow import keras\n'), ((26586, 26608), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (26606, 26608), True, 'import tensorflow as tf\n'), ((26620, 26642), 'numpy.random.randn', 'np.random.randn', (['(8)', '(10)'], {}), '(8, 10)\n', (26635, 26642), True, 'import numpy as np\n'), ((26831, 26890), 'tensorflow.keras.experimental.export_saved_model', 'keras.experimental.export_saved_model', (['model', 'self._tmp_dir'], {}), '(model, self._tmp_dir)\n', (26868, 26890), False, 'from tensorflow import keras\n'), ((26973, 27008), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""tfjs"""'], {}), "(self._tmp_dir, 'tfjs')\n", (26985, 27008), False, 'import os\n'), ((27075, 27236), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras', self._tmp_dir,\n tfjs_output_dir]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['tensorflowjs_converter', '--input_format', 'keras', self\n ._tmp_dir, tfjs_output_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n", (27091, 27236), False, 'import subprocess\n'), ((27573, 27595), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (27593, 27595), True, 'import tensorflow as tf\n'), ((27607, 27629), 'numpy.random.randn', 'np.random.randn', (['(8)', '(10)'], {}), '(8, 10)\n', (27622, 27629), True, 'import numpy as np\n'), ((27919, 27978), 'tensorflow.keras.experimental.export_saved_model', 'keras.experimental.export_saved_model', (['model', 'self._tmp_dir'], {}), '(model, self._tmp_dir)\n', (27956, 27978), False, 'from tensorflow import keras\n'), ((28074, 28109), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""tfjs"""'], {}), "(self._tmp_dir, 'tfjs')\n", (28086, 28109), False, 'import os\n'), ((28187, 28306), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras_saved_model', self.\n _tmp_dir, tfjs_output_dir]"], {}), "(['tensorflowjs_converter', '--input_format',\n 'keras_saved_model', self._tmp_dir, tfjs_output_dir])\n", (28203, 28306), False, 'import subprocess\n'), ((28674, 28717), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""tfjs_sharded"""'], {}), "(self._tmp_dir, 'tfjs_sharded')\n", (28686, 28717), False, 'import os\n'), ((29738, 29778), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""new_h5.h5"""'], {}), "(self._tmp_dir, 'new_h5.h5')\n", (29750, 29778), False, 'import os\n'), ((30076, 30098), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (30096, 30098), True, 'import tensorflow as tf\n'), ((30212, 30248), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['new_h5_path'], {}), '(new_h5_path)\n', (30235, 30248), False, 'from tensorflow import keras\n'), ((30411, 30433), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (30431, 30433), True, 'import tensorflow as tf\n'), ((30445, 30467), 'numpy.random.randn', 'np.random.randn', (['(8)', '(10)'], {}), '(8, 10)\n', (30460, 30467), True, 'import numpy as np\n'), ((30757, 30816), 'tensorflow.keras.experimental.export_saved_model', 'keras.experimental.export_saved_model', (['model', 'self._tmp_dir'], {}), '(model, self._tmp_dir)\n', (30794, 30816), False, 'from tensorflow import keras\n'), ((30912, 30947), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""tfjs"""'], {}), "(self._tmp_dir, 'tfjs')\n", (30924, 30947), False, 'import os\n'), ((31025, 31144), 'subprocess.Popen', 'subprocess.Popen', (["['tensorflowjs_converter', '--input_format', 'keras_saved_model', self.\n _tmp_dir, tfjs_output_dir]"], {}), "(['tensorflowjs_converter', '--input_format',\n 'keras_saved_model', self._tmp_dir, tfjs_output_dir])\n", (31041, 31144), False, 'import subprocess\n'), ((31516, 31559), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""tfjs_sharded"""'], {}), "(self._tmp_dir, 'tfjs_sharded')\n", (31528, 31559), False, 'import os\n'), ((32140, 32172), 'os.path.getsize', 'os.path.getsize', (['weight_files[0]'], {}), '(weight_files[0])\n', (32155, 32172), False, 'import os\n'), ((32487, 32545), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': '"""relu"""', 'input_shape': '[4]'}), "(10, activation='relu', input_shape=[4])\n", (32505, 32545), False, 'from tensorflow import keras\n'), ((32561, 32604), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (32579, 32604), False, 'from tensorflow import keras\n'), ((3047, 3069), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (3067, 3069), True, 'import tensorflow as tf\n'), ((5716, 5741), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5731, 5741), False, 'import os\n'), ((6550, 6562), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6559, 6562), False, 'import json\n'), ((8426, 8467), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""model.json"""'], {}), "(self._tmp_dir, 'model.json')\n", (8438, 8467), False, 'import os\n'), ((8917, 8956), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""group*-*"""'], {}), "(self._tmp_dir, 'group*-*')\n", (8929, 8956), False, 'import os\n'), ((9901, 9940), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""keras_h5"""'], {}), "(self._tmp_dir, 'keras_h5')\n", (9913, 9940), False, 'import os\n'), ((10416, 10428), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10425, 10428), False, 'import json\n'), ((12334, 12373), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""keras_h5"""'], {}), "(self._tmp_dir, 'keras_h5')\n", (12346, 12373), False, 'import os\n'), ((12877, 12889), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12886, 12889), False, 'import json\n'), ((14813, 14850), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""foo.h5"""'], {}), "(self._tmp_dir, 'foo.h5')\n", (14825, 14850), False, 'import os\n'), ((14864, 14901), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""output"""'], {}), "(self._tmp_dir, 'output')\n", (14876, 14901), False, 'import os\n'), ((15780, 15818), 'os.path.join', 'os.path.join', (['output_dir', '"""model.json"""'], {}), "(output_dir, 'model.json')\n", (15792, 15818), False, 'import os\n'), ((15969, 16005), 'os.path.join', 'os.path.join', (['output_dir', '"""group*-*"""'], {}), "(output_dir, 'group*-*')\n", (15981, 16005), False, 'import os\n'), ((16612, 16650), 'os.path.join', 'os.path.join', (['output_dir', '"""model.json"""'], {}), "(output_dir, 'model.json')\n", (16624, 16650), False, 'import os\n'), ((16801, 16837), 'os.path.join', 'os.path.join', (['output_dir', '"""group*-*"""'], {}), "(output_dir, 'group*-*')\n", (16813, 16837), False, 'import os\n'), ((17506, 17544), 'os.path.join', 'os.path.join', (['output_dir', '"""model.json"""'], {}), "(output_dir, 'model.json')\n", (17518, 17544), False, 'import os\n'), ((18223, 18259), 'os.path.join', 'os.path.join', (['output_dir', '"""group*-*"""'], {}), "(output_dir, 'group*-*')\n", (18235, 18259), False, 'import os\n'), ((18865, 18903), 'os.path.join', 'os.path.join', (['output_dir', '"""model.json"""'], {}), "(output_dir, 'model.json')\n", (18877, 18903), False, 'import os\n'), ((19054, 19090), 'os.path.join', 'os.path.join', (['output_dir', '"""group*-*"""'], {}), "(output_dir, 'group*-*')\n", (19066, 19090), False, 'import os\n'), ((20017, 20058), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""model.json"""'], {}), "(self._tmp_dir, 'model.json')\n", (20029, 20058), False, 'import os\n'), ((21284, 21325), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""model.json"""'], {}), "(self._tmp_dir, 'model.json')\n", (21296, 21325), False, 'import os\n'), ((24123, 24154), 'os.path.isfile', 'os.path.isfile', (['model_json_path'], {}), '(model_json_path)\n', (24137, 24154), False, 'import os\n'), ((24548, 24575), 'os.path.isfile', 'os.path.isfile', (['new_h5_path'], {}), '(new_h5_path)\n', (24562, 24575), False, 'import os\n'), ((25808, 25839), 'os.path.isfile', 'os.path.isfile', (['model_json_path'], {}), '(model_json_path)\n', (25822, 25839), False, 'import os\n'), ((26233, 26260), 'os.path.isfile', 'os.path.isfile', (['new_h5_path'], {}), '(new_h5_path)\n', (26247, 26260), False, 'import os\n'), ((27452, 27478), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['stderr'], {}), '(stderr)\n', (27470, 27478), True, 'import tensorflow as tf\n'), ((29332, 29350), 'os.path.getsize', 'os.path.getsize', (['f'], {}), '(f)\n', (29347, 29350), False, 'import os\n'), ((33392, 33443), 'os.path.join', 'os.path.join', (['layers_model_output_dir', '"""model.json"""'], {}), "(layers_model_output_dir, 'model.json')\n", (33404, 33443), False, 'import os\n'), ((33634, 33677), 'os.path.join', 'os.path.join', (['graph_model_dir', '"""model.json"""'], {}), "(graph_model_dir, 'model.json')\n", (33646, 33677), False, 'import os\n'), ((33725, 33768), 'os.path.join', 'os.path.join', (['graph_model_dir', '"""group*.bin"""'], {}), "(graph_model_dir, 'group*.bin')\n", (33737, 33768), False, 'import os\n'), ((6478, 6519), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""model.json"""'], {}), "(self._tmp_dir, 'model.json')\n", (6490, 6519), False, 'import os\n'), ((8014, 8024), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8022, 8024), True, 'import tensorflow as tf\n'), ((8273, 8283), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8281, 8283), True, 'import tensorflow as tf\n'), ((9794, 9804), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9802, 9804), True, 'import tensorflow as tf\n'), ((10338, 10379), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""model.json"""'], {}), "(self._tmp_dir, 'model.json')\n", (10350, 10379), False, 'import os\n'), ((12227, 12237), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (12235, 12237), True, 'import tensorflow as tf\n'), ((12799, 12840), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""model.json"""'], {}), "(self._tmp_dir, 'model.json')\n", (12811, 12840), False, 'import os\n'), ((19344, 19354), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (19352, 19354), True, 'import tensorflow as tf\n'), ((20252, 20262), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (20260, 20262), True, 'import tensorflow as tf\n'), ((20693, 20703), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (20701, 20703), True, 'import tensorflow as tf\n'), ((21175, 21185), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (21183, 21185), True, 'import tensorflow as tf\n'), ((23289, 23299), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (23297, 23299), True, 'import tensorflow as tf\n'), ((24877, 24887), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (24885, 24887), True, 'import tensorflow as tf\n'), ((26561, 26571), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (26569, 26571), True, 'import tensorflow as tf\n'), ((27548, 27558), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (27556, 27558), True, 'import tensorflow as tf\n'), ((28957, 29000), 'os.path.join', 'os.path.join', (['tfjs_output_dir', '"""model.json"""'], {}), "(tfjs_output_dir, 'model.json')\n", (28969, 29000), False, 'import os\n'), ((29212, 29257), 'os.path.join', 'os.path.join', (['sharded_model_dir', '"""group*.bin"""'], {}), "(sharded_model_dir, 'group*.bin')\n", (29224, 29257), False, 'import os\n'), ((29899, 29944), 'os.path.join', 'os.path.join', (['sharded_model_dir', '"""model.json"""'], {}), "(sharded_model_dir, 'model.json')\n", (29911, 29944), False, 'import os\n'), ((30051, 30061), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (30059, 30061), True, 'import tensorflow as tf\n'), ((30386, 30396), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (30394, 30396), True, 'import tensorflow as tf\n'), ((31769, 31812), 'os.path.join', 'os.path.join', (['tfjs_output_dir', '"""model.json"""'], {}), "(tfjs_output_dir, 'model.json')\n", (31781, 31812), False, 'import os\n'), ((32022, 32067), 'os.path.join', 'os.path.join', (['sharded_model_dir', '"""group*.bin"""'], {}), "(sharded_model_dir, 'group*.bin')\n", (32034, 32067), False, 'import os\n'), ((12110, 12147), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""group*"""'], {}), "(self._tmp_dir, 'group*')\n", (12122, 12147), False, 'import os\n'), ((14555, 14592), 'os.path.join', 'os.path.join', (['self._tmp_dir', '"""group*"""'], {}), "(self._tmp_dir, 'group*')\n", (14567, 14592), False, 'import os\n'), ((27879, 27889), 'numpy.size', 'np.size', (['w'], {}), '(w)\n', (27886, 27889), True, 'import numpy as np\n'), ((30717, 30727), 'numpy.size', 'np.size', (['w'], {}), '(w)\n', (30724, 30727), True, 'import numpy as np\n'), ((3345, 3461), 'tensorflow.compat.v1.saved_model.signature_def_utils.predict_signature_def', 'tf.compat.v1.saved_model.signature_def_utils.predict_signature_def', ([], {'inputs': "{'x': x}", 'outputs': "{'output': output}"}), "(inputs={\n 'x': x}, outputs={'output': output})\n", (3411, 3461), True, 'import tensorflow as tf\n')]
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import sys
sys.path.append("../")
from quelea import *
nx = 217
ny = 133
x0 = 0
x1 = 30 # lambdas
y0 = 0
y1 = 20 # lambdas
xs = np.linspace(x0, x1, nx)
ys = np.linspace(y0, y1, ny)
# 2d array of (x, y, z, t)
coords = np.array( [ [x, y, 0, 0] for x in xs for y in ys ] )
# for map_fields function this should be converted from 2D to 1D array
coords = coords.reshape((4 * nx * ny,))
ftype = 1 # plane wave
a0 = 1 # normalized field amplitude
omega = 1 # frequency
fparam = [a0, 1, 0, 0, 0, 1, 0, 0, omega] # parameters of the plane wave
ex, ey, ez, bx, by, bz = map_fields(coords, ftype, fparam)
# now convert to 2d arrays
ex = ex.reshape((nx, ny))
ey = ey.reshape((nx, ny))
ez = ez.reshape((nx, ny))
bx = bx.reshape((nx, ny))
by = by.reshape((nx, ny))
bz = bz.reshape((nx, ny))
ex = ex.transpose()
ey = ey.transpose()
ez = ez.transpose()
bx = bx.transpose()
by = by.transpose()
bz = bz.transpose()
plt.imshow(ey, cmap = 'RdYlBu', origin = 'lower', extent = [x0, x1, y0, y1])
plt.colorbar()
plt.clim(-a0, a0)
plt.savefig("map_fields.pdf")
|
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.clim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"numpy.array",
"numpy.linspace",
"sys.path.append"
] |
[((81, 103), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (96, 103), False, 'import sys\n'), ((201, 224), 'numpy.linspace', 'np.linspace', (['x0', 'x1', 'nx'], {}), '(x0, x1, nx)\n', (212, 224), True, 'import numpy as np\n'), ((230, 253), 'numpy.linspace', 'np.linspace', (['y0', 'y1', 'ny'], {}), '(y0, y1, ny)\n', (241, 253), True, 'import numpy as np\n'), ((291, 339), 'numpy.array', 'np.array', (['[[x, y, 0, 0] for x in xs for y in ys]'], {}), '([[x, y, 0, 0] for x in xs for y in ys])\n', (299, 339), True, 'import numpy as np\n'), ((974, 1044), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ey'], {'cmap': '"""RdYlBu"""', 'origin': '"""lower"""', 'extent': '[x0, x1, y0, y1]'}), "(ey, cmap='RdYlBu', origin='lower', extent=[x0, x1, y0, y1])\n", (984, 1044), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1065), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1063, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1066, 1083), 'matplotlib.pyplot.clim', 'plt.clim', (['(-a0)', 'a0'], {}), '(-a0, a0)\n', (1074, 1083), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1114), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""map_fields.pdf"""'], {}), "('map_fields.pdf')\n", (1096, 1114), True, 'import matplotlib.pyplot as plt\n')]
|
import h5py
import numpy as np
np.set_printoptions(threshold=np.nan)
from shutil import copyfile
copyfile("dummy_lutnet.h5", "pretrained_bin.h5") # create pretrained.h5 using datastructure from dummy.h5
bl = h5py.File("baseline_pruned.h5", 'r')
#dummy = h5py.File("dummy.h5", 'r')
pretrained = h5py.File("pretrained_bin.h5", 'r+')
# dense layer 1
bl_w1 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
pret_w1[...] = np.array(bl_w1)
p_gamma[...] = np.array(bl_gamma)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 2
bl_w1 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 3
bl_w1 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 4
bl_w1 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 5
bl_w1 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# bn 1
bl_beta = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 2
bl_beta = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 3
bl_beta = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 4
bl_beta = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 5
bl_beta = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
pretrained.close()
|
[
"numpy.tile",
"numpy.random.shuffle",
"numpy.reshape",
"numpy.ones",
"numpy.logical_and",
"numpy.logical_not",
"numpy.logical_or",
"h5py.File",
"numpy.argsort",
"numpy.array",
"shutil.copyfile",
"numpy.zeros",
"numpy.shape",
"numpy.arange",
"numpy.set_printoptions"
] |
[((31, 68), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (50, 68), True, 'import numpy as np\n'), ((99, 147), 'shutil.copyfile', 'copyfile', (['"""dummy_lutnet.h5"""', '"""pretrained_bin.h5"""'], {}), "('dummy_lutnet.h5', 'pretrained_bin.h5')\n", (107, 147), False, 'from shutil import copyfile\n'), ((211, 247), 'h5py.File', 'h5py.File', (['"""baseline_pruned.h5"""', '"""r"""'], {}), "('baseline_pruned.h5', 'r')\n", (220, 247), False, 'import h5py\n'), ((297, 333), 'h5py.File', 'h5py.File', (['"""pretrained_bin.h5"""', '"""r+"""'], {}), "('pretrained_bin.h5', 'r+')\n", (306, 333), False, 'import h5py\n'), ((949, 964), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (957, 964), True, 'import numpy as np\n'), ((980, 998), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (988, 998), True, 'import numpy as np\n'), ((1024, 1049), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (1032, 1049), True, 'import numpy as np\n'), ((5514, 5529), 'numpy.shape', 'np.shape', (['bl_w1'], {}), '(bl_w1)\n', (5522, 5529), True, 'import numpy as np\n'), ((5543, 5560), 'numpy.shape', 'np.shape', (['pret_c1'], {}), '(pret_c1)\n', (5551, 5560), True, 'import numpy as np\n'), ((5573, 5593), 'numpy.zeros', 'np.zeros', (['tile_shape'], {}), '(tile_shape)\n', (5581, 5593), True, 'import numpy as np\n'), ((5605, 5624), 'numpy.ones', 'np.ones', (['tile_shape'], {}), '(tile_shape)\n', (5612, 5624), True, 'import numpy as np\n'), ((5714, 5729), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (5722, 5729), True, 'import numpy as np\n'), ((5738, 5753), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (5746, 5753), True, 'import numpy as np\n'), ((5768, 5792), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (5777, 5792), True, 'import numpy as np\n'), ((5793, 5822), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_0'], {}), '(rand_map_0)\n', (5810, 5822), True, 'import numpy as np\n'), ((5836, 5860), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (5845, 5860), True, 'import numpy as np\n'), ((5861, 5890), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_1'], {}), '(rand_map_1)\n', (5878, 5890), True, 'import numpy as np\n'), ((5904, 5928), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (5913, 5928), True, 'import numpy as np\n'), ((5929, 5958), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_2'], {}), '(rand_map_2)\n', (5946, 5958), True, 'import numpy as np\n'), ((6026, 6066), 'numpy.logical_not', 'np.logical_not', (['pruning_mask[rand_map_0]'], {}), '(pruning_mask[rand_map_0])\n', (6040, 6066), True, 'import numpy as np\n'), ((6169, 6218), 'numpy.logical_or', 'np.logical_or', (['pruning_mask', 'pruning_mask_recover'], {}), '(pruning_mask, pruning_mask_recover)\n', (6182, 6218), True, 'import numpy as np\n'), ((6231, 6264), 'numpy.reshape', 'np.reshape', (['init_mask', 'tile_shape'], {}), '(init_mask, tile_shape)\n', (6241, 6264), True, 'import numpy as np\n'), ((6327, 6381), 'numpy.tile', 'np.tile', (['rand_map_0', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_0, [weight_shape[0] / tile_shape[0]])\n', (6334, 6381), True, 'import numpy as np\n'), ((6399, 6453), 'numpy.tile', 'np.tile', (['rand_map_1', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_1, [weight_shape[0] / tile_shape[0]])\n', (6406, 6453), True, 'import numpy as np\n'), ((6471, 6525), 'numpy.tile', 'np.tile', (['rand_map_2', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_2, [weight_shape[0] / tile_shape[0]])\n', (6478, 6525), True, 'import numpy as np\n'), ((7198, 7236), 'numpy.reshape', 'np.reshape', (['bl_w1_rand_0', 'weight_shape'], {}), '(bl_w1_rand_0, weight_shape)\n', (7208, 7236), True, 'import numpy as np\n'), ((8681, 8699), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (8689, 8699), True, 'import numpy as np\n'), ((8718, 8736), 'numpy.array', 'np.array', (['bl_means'], {}), '(bl_means)\n', (8726, 8736), True, 'import numpy as np\n'), ((8762, 8787), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (8770, 8787), True, 'import numpy as np\n'), ((13604, 13619), 'numpy.shape', 'np.shape', (['bl_w1'], {}), '(bl_w1)\n', (13612, 13619), True, 'import numpy as np\n'), ((13633, 13650), 'numpy.shape', 'np.shape', (['pret_c1'], {}), '(pret_c1)\n', (13641, 13650), True, 'import numpy as np\n'), ((13663, 13683), 'numpy.zeros', 'np.zeros', (['tile_shape'], {}), '(tile_shape)\n', (13671, 13683), True, 'import numpy as np\n'), ((13695, 13714), 'numpy.ones', 'np.ones', (['tile_shape'], {}), '(tile_shape)\n', (13702, 13714), True, 'import numpy as np\n'), ((13804, 13819), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (13812, 13819), True, 'import numpy as np\n'), ((13828, 13843), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (13836, 13843), True, 'import numpy as np\n'), ((13858, 13882), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (13867, 13882), True, 'import numpy as np\n'), ((13883, 13912), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_0'], {}), '(rand_map_0)\n', (13900, 13912), True, 'import numpy as np\n'), ((13926, 13950), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (13935, 13950), True, 'import numpy as np\n'), ((13951, 13980), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_1'], {}), '(rand_map_1)\n', (13968, 13980), True, 'import numpy as np\n'), ((13994, 14018), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (14003, 14018), True, 'import numpy as np\n'), ((14019, 14048), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_2'], {}), '(rand_map_2)\n', (14036, 14048), True, 'import numpy as np\n'), ((14116, 14156), 'numpy.logical_not', 'np.logical_not', (['pruning_mask[rand_map_0]'], {}), '(pruning_mask[rand_map_0])\n', (14130, 14156), True, 'import numpy as np\n'), ((14259, 14308), 'numpy.logical_or', 'np.logical_or', (['pruning_mask', 'pruning_mask_recover'], {}), '(pruning_mask, pruning_mask_recover)\n', (14272, 14308), True, 'import numpy as np\n'), ((14321, 14354), 'numpy.reshape', 'np.reshape', (['init_mask', 'tile_shape'], {}), '(init_mask, tile_shape)\n', (14331, 14354), True, 'import numpy as np\n'), ((14417, 14471), 'numpy.tile', 'np.tile', (['rand_map_0', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_0, [weight_shape[0] / tile_shape[0]])\n', (14424, 14471), True, 'import numpy as np\n'), ((14489, 14543), 'numpy.tile', 'np.tile', (['rand_map_1', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_1, [weight_shape[0] / tile_shape[0]])\n', (14496, 14543), True, 'import numpy as np\n'), ((14561, 14615), 'numpy.tile', 'np.tile', (['rand_map_2', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_2, [weight_shape[0] / tile_shape[0]])\n', (14568, 14615), True, 'import numpy as np\n'), ((15288, 15326), 'numpy.reshape', 'np.reshape', (['bl_w1_rand_0', 'weight_shape'], {}), '(bl_w1_rand_0, weight_shape)\n', (15298, 15326), True, 'import numpy as np\n'), ((16771, 16789), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (16779, 16789), True, 'import numpy as np\n'), ((16808, 16826), 'numpy.array', 'np.array', (['bl_means'], {}), '(bl_means)\n', (16816, 16826), True, 'import numpy as np\n'), ((16852, 16877), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (16860, 16877), True, 'import numpy as np\n'), ((21694, 21709), 'numpy.shape', 'np.shape', (['bl_w1'], {}), '(bl_w1)\n', (21702, 21709), True, 'import numpy as np\n'), ((21723, 21740), 'numpy.shape', 'np.shape', (['pret_c1'], {}), '(pret_c1)\n', (21731, 21740), True, 'import numpy as np\n'), ((21753, 21773), 'numpy.zeros', 'np.zeros', (['tile_shape'], {}), '(tile_shape)\n', (21761, 21773), True, 'import numpy as np\n'), ((21785, 21804), 'numpy.ones', 'np.ones', (['tile_shape'], {}), '(tile_shape)\n', (21792, 21804), True, 'import numpy as np\n'), ((21894, 21909), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (21902, 21909), True, 'import numpy as np\n'), ((21918, 21933), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (21926, 21933), True, 'import numpy as np\n'), ((21948, 21972), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (21957, 21972), True, 'import numpy as np\n'), ((21973, 22002), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_0'], {}), '(rand_map_0)\n', (21990, 22002), True, 'import numpy as np\n'), ((22016, 22040), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (22025, 22040), True, 'import numpy as np\n'), ((22041, 22070), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_1'], {}), '(rand_map_1)\n', (22058, 22070), True, 'import numpy as np\n'), ((22084, 22108), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (22093, 22108), True, 'import numpy as np\n'), ((22109, 22138), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_2'], {}), '(rand_map_2)\n', (22126, 22138), True, 'import numpy as np\n'), ((22206, 22246), 'numpy.logical_not', 'np.logical_not', (['pruning_mask[rand_map_0]'], {}), '(pruning_mask[rand_map_0])\n', (22220, 22246), True, 'import numpy as np\n'), ((22349, 22398), 'numpy.logical_or', 'np.logical_or', (['pruning_mask', 'pruning_mask_recover'], {}), '(pruning_mask, pruning_mask_recover)\n', (22362, 22398), True, 'import numpy as np\n'), ((22411, 22444), 'numpy.reshape', 'np.reshape', (['init_mask', 'tile_shape'], {}), '(init_mask, tile_shape)\n', (22421, 22444), True, 'import numpy as np\n'), ((22507, 22561), 'numpy.tile', 'np.tile', (['rand_map_0', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_0, [weight_shape[0] / tile_shape[0]])\n', (22514, 22561), True, 'import numpy as np\n'), ((22579, 22633), 'numpy.tile', 'np.tile', (['rand_map_1', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_1, [weight_shape[0] / tile_shape[0]])\n', (22586, 22633), True, 'import numpy as np\n'), ((22651, 22705), 'numpy.tile', 'np.tile', (['rand_map_2', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_2, [weight_shape[0] / tile_shape[0]])\n', (22658, 22705), True, 'import numpy as np\n'), ((23378, 23416), 'numpy.reshape', 'np.reshape', (['bl_w1_rand_0', 'weight_shape'], {}), '(bl_w1_rand_0, weight_shape)\n', (23388, 23416), True, 'import numpy as np\n'), ((24861, 24879), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (24869, 24879), True, 'import numpy as np\n'), ((24898, 24916), 'numpy.array', 'np.array', (['bl_means'], {}), '(bl_means)\n', (24906, 24916), True, 'import numpy as np\n'), ((24942, 24967), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (24950, 24967), True, 'import numpy as np\n'), ((29784, 29799), 'numpy.shape', 'np.shape', (['bl_w1'], {}), '(bl_w1)\n', (29792, 29799), True, 'import numpy as np\n'), ((29813, 29830), 'numpy.shape', 'np.shape', (['pret_c1'], {}), '(pret_c1)\n', (29821, 29830), True, 'import numpy as np\n'), ((29843, 29863), 'numpy.zeros', 'np.zeros', (['tile_shape'], {}), '(tile_shape)\n', (29851, 29863), True, 'import numpy as np\n'), ((29875, 29894), 'numpy.ones', 'np.ones', (['tile_shape'], {}), '(tile_shape)\n', (29882, 29894), True, 'import numpy as np\n'), ((29984, 29999), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (29992, 29999), True, 'import numpy as np\n'), ((30008, 30023), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (30016, 30023), True, 'import numpy as np\n'), ((30038, 30062), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (30047, 30062), True, 'import numpy as np\n'), ((30063, 30092), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_0'], {}), '(rand_map_0)\n', (30080, 30092), True, 'import numpy as np\n'), ((30106, 30130), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (30115, 30130), True, 'import numpy as np\n'), ((30131, 30160), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_1'], {}), '(rand_map_1)\n', (30148, 30160), True, 'import numpy as np\n'), ((30174, 30198), 'numpy.arange', 'np.arange', (['tile_shape[0]'], {}), '(tile_shape[0])\n', (30183, 30198), True, 'import numpy as np\n'), ((30199, 30228), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_map_2'], {}), '(rand_map_2)\n', (30216, 30228), True, 'import numpy as np\n'), ((30296, 30336), 'numpy.logical_not', 'np.logical_not', (['pruning_mask[rand_map_0]'], {}), '(pruning_mask[rand_map_0])\n', (30310, 30336), True, 'import numpy as np\n'), ((30439, 30488), 'numpy.logical_or', 'np.logical_or', (['pruning_mask', 'pruning_mask_recover'], {}), '(pruning_mask, pruning_mask_recover)\n', (30452, 30488), True, 'import numpy as np\n'), ((30501, 30534), 'numpy.reshape', 'np.reshape', (['init_mask', 'tile_shape'], {}), '(init_mask, tile_shape)\n', (30511, 30534), True, 'import numpy as np\n'), ((30597, 30651), 'numpy.tile', 'np.tile', (['rand_map_0', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_0, [weight_shape[0] / tile_shape[0]])\n', (30604, 30651), True, 'import numpy as np\n'), ((30669, 30723), 'numpy.tile', 'np.tile', (['rand_map_1', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_1, [weight_shape[0] / tile_shape[0]])\n', (30676, 30723), True, 'import numpy as np\n'), ((30741, 30795), 'numpy.tile', 'np.tile', (['rand_map_2', '[weight_shape[0] / tile_shape[0]]'], {}), '(rand_map_2, [weight_shape[0] / tile_shape[0]])\n', (30748, 30795), True, 'import numpy as np\n'), ((31468, 31506), 'numpy.reshape', 'np.reshape', (['bl_w1_rand_0', 'weight_shape'], {}), '(bl_w1_rand_0, weight_shape)\n', (31478, 31506), True, 'import numpy as np\n'), ((32951, 32969), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (32959, 32969), True, 'import numpy as np\n'), ((32988, 33006), 'numpy.array', 'np.array', (['bl_means'], {}), '(bl_means)\n', (32996, 33006), True, 'import numpy as np\n'), ((33032, 33057), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (33040, 33057), True, 'import numpy as np\n'), ((34347, 34364), 'numpy.array', 'np.array', (['bl_beta'], {}), '(bl_beta)\n', (34355, 34364), True, 'import numpy as np\n'), ((34380, 34398), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (34388, 34398), True, 'import numpy as np\n'), ((34420, 34444), 'numpy.array', 'np.array', (['bl_moving_mean'], {}), '(bl_moving_mean)\n', (34428, 34444), True, 'import numpy as np\n'), ((34470, 34498), 'numpy.array', 'np.array', (['bl_moving_variance'], {}), '(bl_moving_variance)\n', (34478, 34498), True, 'import numpy as np\n'), ((35347, 35364), 'numpy.array', 'np.array', (['bl_beta'], {}), '(bl_beta)\n', (35355, 35364), True, 'import numpy as np\n'), ((35380, 35398), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (35388, 35398), True, 'import numpy as np\n'), ((35420, 35444), 'numpy.array', 'np.array', (['bl_moving_mean'], {}), '(bl_moving_mean)\n', (35428, 35444), True, 'import numpy as np\n'), ((35470, 35498), 'numpy.array', 'np.array', (['bl_moving_variance'], {}), '(bl_moving_variance)\n', (35478, 35498), True, 'import numpy as np\n'), ((36347, 36364), 'numpy.array', 'np.array', (['bl_beta'], {}), '(bl_beta)\n', (36355, 36364), True, 'import numpy as np\n'), ((36380, 36398), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (36388, 36398), True, 'import numpy as np\n'), ((36420, 36444), 'numpy.array', 'np.array', (['bl_moving_mean'], {}), '(bl_moving_mean)\n', (36428, 36444), True, 'import numpy as np\n'), ((36470, 36498), 'numpy.array', 'np.array', (['bl_moving_variance'], {}), '(bl_moving_variance)\n', (36478, 36498), True, 'import numpy as np\n'), ((37347, 37364), 'numpy.array', 'np.array', (['bl_beta'], {}), '(bl_beta)\n', (37355, 37364), True, 'import numpy as np\n'), ((37380, 37398), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (37388, 37398), True, 'import numpy as np\n'), ((37420, 37444), 'numpy.array', 'np.array', (['bl_moving_mean'], {}), '(bl_moving_mean)\n', (37428, 37444), True, 'import numpy as np\n'), ((37470, 37498), 'numpy.array', 'np.array', (['bl_moving_variance'], {}), '(bl_moving_variance)\n', (37478, 37498), True, 'import numpy as np\n'), ((38347, 38364), 'numpy.array', 'np.array', (['bl_beta'], {}), '(bl_beta)\n', (38355, 38364), True, 'import numpy as np\n'), ((38380, 38398), 'numpy.array', 'np.array', (['bl_gamma'], {}), '(bl_gamma)\n', (38388, 38398), True, 'import numpy as np\n'), ((38420, 38444), 'numpy.array', 'np.array', (['bl_moving_mean'], {}), '(bl_moving_mean)\n', (38428, 38444), True, 'import numpy as np\n'), ((38470, 38498), 'numpy.array', 'np.array', (['bl_moving_variance'], {}), '(bl_moving_variance)\n', (38478, 38498), True, 'import numpy as np\n'), ((5641, 5660), 'numpy.ones', 'np.ones', (['tile_shape'], {}), '(tile_shape)\n', (5648, 5660), True, 'import numpy as np\n'), ((6090, 6129), 'numpy.logical_and', 'np.logical_and', (['pruning_mask', 'init_mask'], {}), '(pruning_mask, init_mask)\n', (6104, 6129), True, 'import numpy as np\n'), ((6130, 6152), 'numpy.argsort', 'np.argsort', (['rand_map_0'], {}), '(rand_map_0)\n', (6140, 6152), True, 'import numpy as np\n'), ((13731, 13750), 'numpy.ones', 'np.ones', (['tile_shape'], {}), '(tile_shape)\n', (13738, 13750), True, 'import numpy as np\n'), ((14180, 14219), 'numpy.logical_and', 'np.logical_and', (['pruning_mask', 'init_mask'], {}), '(pruning_mask, init_mask)\n', (14194, 14219), True, 'import numpy as np\n'), ((14220, 14242), 'numpy.argsort', 'np.argsort', (['rand_map_0'], {}), '(rand_map_0)\n', (14230, 14242), True, 'import numpy as np\n'), ((21821, 21840), 'numpy.ones', 'np.ones', (['tile_shape'], {}), '(tile_shape)\n', (21828, 21840), True, 'import numpy as np\n'), ((22270, 22309), 'numpy.logical_and', 'np.logical_and', (['pruning_mask', 'init_mask'], {}), '(pruning_mask, init_mask)\n', (22284, 22309), True, 'import numpy as np\n'), ((22310, 22332), 'numpy.argsort', 'np.argsort', (['rand_map_0'], {}), '(rand_map_0)\n', (22320, 22332), True, 'import numpy as np\n'), ((29911, 29930), 'numpy.ones', 'np.ones', (['tile_shape'], {}), '(tile_shape)\n', (29918, 29930), True, 'import numpy as np\n'), ((30360, 30399), 'numpy.logical_and', 'np.logical_and', (['pruning_mask', 'init_mask'], {}), '(pruning_mask, init_mask)\n', (30374, 30399), True, 'import numpy as np\n'), ((30400, 30422), 'numpy.argsort', 'np.argsort', (['rand_map_0'], {}), '(rand_map_0)\n', (30410, 30422), True, 'import numpy as np\n'), ((635, 650), 'numpy.array', 'np.array', (['bl_w1'], {}), '(bl_w1)\n', (643, 650), True, 'import numpy as np\n'), ((1064, 1089), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (1072, 1089), True, 'import numpy as np\n'), ((5975, 6000), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (5983, 6000), True, 'import numpy as np\n'), ((8485, 8516), 'numpy.reshape', 'np.reshape', (['rand_map_0', '(-1, 1)'], {}), '(rand_map_0, (-1, 1))\n', (8495, 8516), True, 'import numpy as np\n'), ((8553, 8584), 'numpy.reshape', 'np.reshape', (['rand_map_1', '(-1, 1)'], {}), '(rand_map_1, (-1, 1))\n', (8563, 8584), True, 'import numpy as np\n'), ((8621, 8652), 'numpy.reshape', 'np.reshape', (['rand_map_2', '(-1, 1)'], {}), '(rand_map_2, (-1, 1))\n', (8631, 8652), True, 'import numpy as np\n'), ((8809, 8847), 'numpy.reshape', 'np.reshape', (['rand_map_0_expand', '[-1, 1]'], {}), '(rand_map_0_expand, [-1, 1])\n', (8819, 8847), True, 'import numpy as np\n'), ((8926, 8964), 'numpy.reshape', 'np.reshape', (['rand_map_1_expand', '[-1, 1]'], {}), '(rand_map_1_expand, [-1, 1])\n', (8936, 8964), True, 'import numpy as np\n'), ((9043, 9081), 'numpy.reshape', 'np.reshape', (['rand_map_2_expand', '[-1, 1]'], {}), '(rand_map_2_expand, [-1, 1])\n', (9053, 9081), True, 'import numpy as np\n'), ((9154, 9179), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (9162, 9179), True, 'import numpy as np\n'), ((14065, 14090), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (14073, 14090), True, 'import numpy as np\n'), ((16575, 16606), 'numpy.reshape', 'np.reshape', (['rand_map_0', '(-1, 1)'], {}), '(rand_map_0, (-1, 1))\n', (16585, 16606), True, 'import numpy as np\n'), ((16643, 16674), 'numpy.reshape', 'np.reshape', (['rand_map_1', '(-1, 1)'], {}), '(rand_map_1, (-1, 1))\n', (16653, 16674), True, 'import numpy as np\n'), ((16711, 16742), 'numpy.reshape', 'np.reshape', (['rand_map_2', '(-1, 1)'], {}), '(rand_map_2, (-1, 1))\n', (16721, 16742), True, 'import numpy as np\n'), ((16899, 16937), 'numpy.reshape', 'np.reshape', (['rand_map_0_expand', '[-1, 1]'], {}), '(rand_map_0_expand, [-1, 1])\n', (16909, 16937), True, 'import numpy as np\n'), ((17016, 17054), 'numpy.reshape', 'np.reshape', (['rand_map_1_expand', '[-1, 1]'], {}), '(rand_map_1_expand, [-1, 1])\n', (17026, 17054), True, 'import numpy as np\n'), ((17133, 17171), 'numpy.reshape', 'np.reshape', (['rand_map_2_expand', '[-1, 1]'], {}), '(rand_map_2_expand, [-1, 1])\n', (17143, 17171), True, 'import numpy as np\n'), ((17244, 17269), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (17252, 17269), True, 'import numpy as np\n'), ((22155, 22180), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (22163, 22180), True, 'import numpy as np\n'), ((24665, 24696), 'numpy.reshape', 'np.reshape', (['rand_map_0', '(-1, 1)'], {}), '(rand_map_0, (-1, 1))\n', (24675, 24696), True, 'import numpy as np\n'), ((24733, 24764), 'numpy.reshape', 'np.reshape', (['rand_map_1', '(-1, 1)'], {}), '(rand_map_1, (-1, 1))\n', (24743, 24764), True, 'import numpy as np\n'), ((24801, 24832), 'numpy.reshape', 'np.reshape', (['rand_map_2', '(-1, 1)'], {}), '(rand_map_2, (-1, 1))\n', (24811, 24832), True, 'import numpy as np\n'), ((24989, 25027), 'numpy.reshape', 'np.reshape', (['rand_map_0_expand', '[-1, 1]'], {}), '(rand_map_0_expand, [-1, 1])\n', (24999, 25027), True, 'import numpy as np\n'), ((25106, 25144), 'numpy.reshape', 'np.reshape', (['rand_map_1_expand', '[-1, 1]'], {}), '(rand_map_1_expand, [-1, 1])\n', (25116, 25144), True, 'import numpy as np\n'), ((25223, 25261), 'numpy.reshape', 'np.reshape', (['rand_map_2_expand', '[-1, 1]'], {}), '(rand_map_2_expand, [-1, 1])\n', (25233, 25261), True, 'import numpy as np\n'), ((25334, 25359), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (25342, 25359), True, 'import numpy as np\n'), ((30245, 30270), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (30253, 30270), True, 'import numpy as np\n'), ((32755, 32786), 'numpy.reshape', 'np.reshape', (['rand_map_0', '(-1, 1)'], {}), '(rand_map_0, (-1, 1))\n', (32765, 32786), True, 'import numpy as np\n'), ((32823, 32854), 'numpy.reshape', 'np.reshape', (['rand_map_1', '(-1, 1)'], {}), '(rand_map_1, (-1, 1))\n', (32833, 32854), True, 'import numpy as np\n'), ((32891, 32922), 'numpy.reshape', 'np.reshape', (['rand_map_2', '(-1, 1)'], {}), '(rand_map_2, (-1, 1))\n', (32901, 32922), True, 'import numpy as np\n'), ((33079, 33117), 'numpy.reshape', 'np.reshape', (['rand_map_0_expand', '[-1, 1]'], {}), '(rand_map_0_expand, [-1, 1])\n', (33089, 33117), True, 'import numpy as np\n'), ((33196, 33234), 'numpy.reshape', 'np.reshape', (['rand_map_1_expand', '[-1, 1]'], {}), '(rand_map_1_expand, [-1, 1])\n', (33206, 33234), True, 'import numpy as np\n'), ((33313, 33351), 'numpy.reshape', 'np.reshape', (['rand_map_2_expand', '[-1, 1]'], {}), '(rand_map_2_expand, [-1, 1])\n', (33323, 33351), True, 'import numpy as np\n'), ((33424, 33449), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (33432, 33449), True, 'import numpy as np\n'), ((1109, 1134), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (1117, 1134), True, 'import numpy as np\n'), ((9199, 9224), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (9207, 9224), True, 'import numpy as np\n'), ((17289, 17314), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (17297, 17314), True, 'import numpy as np\n'), ((25379, 25404), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (25387, 25404), True, 'import numpy as np\n'), ((33469, 33494), 'numpy.array', 'np.array', (['bl_pruning_mask'], {}), '(bl_pruning_mask)\n', (33477, 33494), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from src.si.util.util import label_gen
__all__ = ['Dataset']
class Dataset:
def __init__(self, X=None, Y=None,
xnames: list = None,
yname: str = None):
""" Tabular Dataset"""
if X is None:
raise Exception("Trying to instanciate a DataSet without any data")
self.X = X
self.Y = Y
self.xnames = xnames if xnames else label_gen(X.shape[1])
self.yname = yname if yname else 'Y'
@classmethod
def from_data(cls, filename, sep=",", labeled=True):
"""Creates a DataSet from a data file.
:param filename: The filename
:type filename: str
:param sep: attributes separator, defaults to ","
:type sep: str, optional
:return: A DataSet object
:rtype: DataSet
"""
data = np.genfromtxt(filename, delimiter=sep)
if labeled:
X = data[:, 0:-1]
Y = data[:, -1]
else:
X = data
Y = None
return cls(X, Y)
@classmethod
def from_dataframe(cls, df, ylabel=None):
"""Creates a DataSet from a pandas dataframe.
:param df: [description]
:type df: [type]
:param ylabel: [description], defaults to None
:type ylabel: [type], optional
:return: [description]
:rtype: [type]
"""
if ylabel and ylabel in df.columns:
X = df.loc[:, df.columns != ylabel].to_numpy() #transforma num array de numpy
Y = df.loc[:, ylabel].to_numpy()
# xnames = df.columns.tolist().remove(ylabel)
yname = ylabel
xnames = df.columns.tolist()
for name in xnames:
if name == yname:
xnames.remove(yname)
else:
X = df.to_numpy()
Y = None
xnames = df.columns.tolist()
yname = None
return cls(X, Y, xnames, yname)
def __len__(self):
"""Returns the number of data points."""
return self.X.shape[0]
def hasLabel(self):
"""Returns True if the dataset constains labels (a dependent variable)"""
return self.Y is not None
def getNumFeatures(self):
"""Returns the number of features"""
return self.X.shape[1]
def getNumClasses(self):
"""Returns the number of label classes or 0 if the dataset has no dependent variable."""
return len(np.unique(self.Y)) if self.hasLabel() else 0
def writeDataset(self, filename, sep=","):
"""Saves the dataset to a file
:param filename: The output file path
:type filename: str
:param sep: The fields separator, defaults to ","
:type sep: str, optional
"""
fullds = np.hstack((self.X, self.Y.reshape(len(self.Y), 1)))
np.savetxt(filename, fullds, delimiter=sep)
def toDataframe(self):
""" Converts the dataset into a pandas DataFrame"""
if self.hasLabel():
df = pd.DataFrame(np.hstack((self.X, self.Y.reshape(len(self.Y), 1))), columns=self.xnames[:]+[self.yname]) #columns=np.hstack((self.xnames, self.yname)))
else:
df = pd.DataFrame(self.X.copy(), columns=self.xnames[:])
return df
def getXy(self):
return self.X, self.Y
def summary(dataset, format='df'):
""" Returns the statistics of a dataset(mean, std, max, min)
:param dataset: A Dataset object
:type dataset: si.data.Dataset
:param format: Output format ('df':DataFrame, 'dict':dictionary ), defaults to 'df'
:type format: str, optional
"""
if format not in ["df", "dict"]:
raise Exception("Invalid format. Choose between 'df' and 'dict'.")
if dataset.hasLabel():
data = np.hstack((dataset.X, dataset.Y.reshape(len(dataset.Y), 1)))
#data = np.hstack([dataset.X, np.reshape(dataset.Y, (-1, 1))])
columns = dataset.xnames[:] + [dataset.yname]
else:
data = dataset.X
columns = dataset.xnames[:]
stats = {}
if type(dataset.Y[0]) is str:
for i in range(data.shape[1]-1): #ve colunas
_means = np.mean(data[:, i], axis=0)
_vars = np.var(data[:, i], axis=0)
_maxs = np.max(data[:, i], axis=0)
_mins = np.min(data[:, i], axis=0)
stat = {"mean": _means,
"var": _vars,
"max": _maxs,
"min": _mins
}
stats[columns[i]] = stat
else:
for i in range(data.shape[1]): # ve colunas
_means = np.mean(data[:, i], axis=0)
_vars = np.var(data[:, i], axis=0)
_maxs = np.max(data[:, i], axis=0)
_mins = np.min(data[:, i], axis=0)
stat = {"mean": _means,
"var": _vars,
"max": _maxs,
"min": _mins
}
stats[columns[i]] = stat
# _means = np.mean(data, axis=0)
# _vars = np.var(data, axis=0)
# _maxs = np.max(data, axis=0)
# _mins = np.min(data, axis=0)
# stats = {}
# for i in range(data.shape[1]):
# stat = {"mean": _means[i],
# "var": _vars[i],
# "max": _maxs[i],
# "min": _mins[i]
# }
# stats[columns[i]] = stat
if format == "dict":
return stats
else:
return pd.DataFrame(stats)
|
[
"src.si.util.util.label_gen",
"numpy.mean",
"numpy.unique",
"numpy.max",
"numpy.savetxt",
"numpy.min",
"pandas.DataFrame",
"numpy.genfromtxt",
"numpy.var"
] |
[((878, 916), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': 'sep'}), '(filename, delimiter=sep)\n', (891, 916), True, 'import numpy as np\n'), ((2885, 2928), 'numpy.savetxt', 'np.savetxt', (['filename', 'fullds'], {'delimiter': 'sep'}), '(filename, fullds, delimiter=sep)\n', (2895, 2928), True, 'import numpy as np\n'), ((5482, 5501), 'pandas.DataFrame', 'pd.DataFrame', (['stats'], {}), '(stats)\n', (5494, 5501), True, 'import pandas as pd\n'), ((446, 467), 'src.si.util.util.label_gen', 'label_gen', (['X.shape[1]'], {}), '(X.shape[1])\n', (455, 467), False, 'from src.si.util.util import label_gen\n'), ((4202, 4229), 'numpy.mean', 'np.mean', (['data[:, i]'], {'axis': '(0)'}), '(data[:, i], axis=0)\n', (4209, 4229), True, 'import numpy as np\n'), ((4250, 4276), 'numpy.var', 'np.var', (['data[:, i]'], {'axis': '(0)'}), '(data[:, i], axis=0)\n', (4256, 4276), True, 'import numpy as np\n'), ((4297, 4323), 'numpy.max', 'np.max', (['data[:, i]'], {'axis': '(0)'}), '(data[:, i], axis=0)\n', (4303, 4323), True, 'import numpy as np\n'), ((4344, 4370), 'numpy.min', 'np.min', (['data[:, i]'], {'axis': '(0)'}), '(data[:, i], axis=0)\n', (4350, 4370), True, 'import numpy as np\n'), ((4652, 4679), 'numpy.mean', 'np.mean', (['data[:, i]'], {'axis': '(0)'}), '(data[:, i], axis=0)\n', (4659, 4679), True, 'import numpy as np\n'), ((4700, 4726), 'numpy.var', 'np.var', (['data[:, i]'], {'axis': '(0)'}), '(data[:, i], axis=0)\n', (4706, 4726), True, 'import numpy as np\n'), ((4747, 4773), 'numpy.max', 'np.max', (['data[:, i]'], {'axis': '(0)'}), '(data[:, i], axis=0)\n', (4753, 4773), True, 'import numpy as np\n'), ((4794, 4820), 'numpy.min', 'np.min', (['data[:, i]'], {'axis': '(0)'}), '(data[:, i], axis=0)\n', (4800, 4820), True, 'import numpy as np\n'), ((2497, 2514), 'numpy.unique', 'np.unique', (['self.Y'], {}), '(self.Y)\n', (2506, 2514), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
import utils
logger = logging.getLogger("indexconverter")
class IndexConverter(object):
def __init__(self, ndim, ngrid):
self.ndim = ndim
self.ngrid = ngrid
self._modulus = [(ngrid - 1) ** (ndim - j - 1) for j in range(ndim)]
self._zerodim = np.zeros((self.ndim,))
self.nbins = int(np.rint((ngrid - 1) ** ndim))
def convert_to_vector(self, grid):
if grid.shape[0] != self.ngrid - 1:
raise Exception("Wrong dimension of grid. Expect length fo %s got %s" % (self.ngrid - 1, grid.shape[0]))
vector = np.empty((self.nbins,))
for bin_idx in range(self.nbins):
vector[bin_idx] = grid[tuple(self.convert_to_grid_idx(bin_idx))]
return vector
def convert_to_grid(self, vector):
grid_shape = tuple(np.zeros(self.ndim).astype(int) + (self.ngrid - 1))
if len(vector.shape) > 1:
grids = np.empty((len(vector),) + grid_shape)
for idx, v in enumerate(vector):
grids[idx] = self.convert_to_grid(v)
return grids
else:
grid = np.zeros(grid_shape)
for idx in range(len(vector)):
grid[tuple(self.convert_to_grid_idx(idx))] = vector[idx]
return grid
def convert_to_grid_idx(self, bin_idx):
if bin_idx >= self.nbins or bin_idx < 0:
print(self.nbins, self.ndim, self.nbins ** self.ndim)
raise Exception("Invalid index %s. You are probably outside the grid..." % bin_idx)
grid_idx = ((self._zerodim + bin_idx) / self._modulus) % (self.ngrid - 1)
return grid_idx.astype(int)
def convert_to_bin_idx(self, grid_idx):
bin_idx = utils.rint(np.sum(grid_idx * self._modulus))
if bin_idx >= self.nbins or bin_idx < 0:
raise Exception(
"Invalid bin index %s. You are probably outside the grid. Size:%s" % (bin_idx, self.nbins))
return bin_idx
|
[
"logging.basicConfig",
"logging.getLogger",
"numpy.sum",
"numpy.zeros",
"numpy.empty",
"numpy.rint"
] |
[((93, 249), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(name)s-%(levelname)s: %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(stream=sys.stdout, level=logging.DEBUG, format=\n '%(asctime)s %(name)s-%(levelname)s: %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (112, 249), False, 'import logging\n'), ((298, 333), 'logging.getLogger', 'logging.getLogger', (['"""indexconverter"""'], {}), "('indexconverter')\n", (315, 333), False, 'import logging\n'), ((556, 578), 'numpy.zeros', 'np.zeros', (['(self.ndim,)'], {}), '((self.ndim,))\n', (564, 578), True, 'import numpy as np\n'), ((852, 875), 'numpy.empty', 'np.empty', (['(self.nbins,)'], {}), '((self.nbins,))\n', (860, 875), True, 'import numpy as np\n'), ((604, 632), 'numpy.rint', 'np.rint', (['((ngrid - 1) ** ndim)'], {}), '((ngrid - 1) ** ndim)\n', (611, 632), True, 'import numpy as np\n'), ((1384, 1404), 'numpy.zeros', 'np.zeros', (['grid_shape'], {}), '(grid_shape)\n', (1392, 1404), True, 'import numpy as np\n'), ((1993, 2025), 'numpy.sum', 'np.sum', (['(grid_idx * self._modulus)'], {}), '(grid_idx * self._modulus)\n', (1999, 2025), True, 'import numpy as np\n'), ((1084, 1103), 'numpy.zeros', 'np.zeros', (['self.ndim'], {}), '(self.ndim)\n', (1092, 1103), True, 'import numpy as np\n')]
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import FrozenSet, Callable, List, Sequence, Any, Union, Dict
import numpy as np
import networkx as nx
import cirq
from cirq import _compat, GridQubit, LineQubit
from cirq.ops import NamedQubit
from cirq_pasqal import ThreeDQubit, TwoDQubit, PasqalGateset
@cirq.value.value_equality
class PasqalDevice(cirq.devices.Device):
"""A generic Pasqal device.
The most general of Pasqal devices, enforcing only restrictions expected to
be shared by all future devices. Serves as the parent class of all Pasqal
devices, but can also be used on its own for hosting a nearly unconstrained
device. When used as a circuit's device, the qubits have to be of the type
cirq.NamedQubit and assumed to be all connected, the idea behind it being
that after submission, all optimization and transpilation necessary for its
execution on the specified device are handled internally by Pasqal.
"""
def __init__(self, qubits: Sequence[cirq.Qid]) -> None:
"""Initializes a device with some qubits.
Args:
qubits (NamedQubit): Qubits on the device, exclusively unrelated to
a physical position.
Raises:
TypeError: If the wrong qubit type is provided.
ValueError: If the number of qubits is greater than the devices maximum.
"""
if len(qubits) > 0:
q_type = type(qubits[0])
for q in qubits:
if not isinstance(q, self.supported_qubit_type):
raise TypeError(
'Unsupported qubit type: {!r}. This device '
'supports qubit types: {}'.format(q, self.supported_qubit_type)
)
if not type(q) is q_type:
raise TypeError("All qubits must be of same type.")
if len(qubits) > self.maximum_qubit_number:
raise ValueError(
'Too many qubits. {} accepts at most {} '
'qubits.'.format(type(self), self.maximum_qubit_number)
)
self.gateset = PasqalGateset()
self.qubits = qubits
self._metadata = cirq.DeviceMetadata(
qubits, nx.from_edgelist([(a, b) for a in qubits for b in qubits if a != b])
)
# pylint: enable=missing-raises-doc
@property
def supported_qubit_type(self):
return (NamedQubit,)
@property
def maximum_qubit_number(self):
return 100
@property
def metadata(self):
return self._metadata
@_compat.deprecated(fix='Use metadata.qubit_set() if applicable.', deadline='v0.15')
def qubit_set(self) -> FrozenSet[cirq.Qid]:
return frozenset(self.qubits)
def qubit_list(self):
return [qubit for qubit in self.qubits]
def is_pasqal_device_op(self, op: cirq.Operation) -> bool:
if not isinstance(op, cirq.Operation):
raise ValueError('Got unknown operation:', op)
return op in self.gateset
def validate_operation(self, operation: cirq.Operation):
"""Raises an error if the given operation is invalid on this device.
Args:
operation: The operation to validate.
Raises:
ValueError: If the operation is not valid.
NotImplementedError: If the operation is a measurement with an invert
mask.
"""
if not isinstance(operation, cirq.GateOperation):
raise ValueError("Unsupported operation")
if not self.is_pasqal_device_op(operation):
raise ValueError(f'{operation.gate!r} is not a supported gate')
for qub in operation.qubits:
if not isinstance(qub, self.supported_qubit_type):
raise ValueError(
'{} is not a valid qubit for gate {!r}. This '
'device accepts gates on qubits of type: '
'{}'.format(qub, operation.gate, self.supported_qubit_type)
)
if qub not in self.metadata.qubit_set:
raise ValueError(f'{qub} is not part of the device.')
if isinstance(operation.gate, cirq.MeasurementGate):
if operation.gate.invert_mask != ():
raise NotImplementedError(
"Measurements on Pasqal devices don't support invert_mask."
)
def validate_circuit(self, circuit: 'cirq.AbstractCircuit') -> None:
"""Raises an error if the given circuit is invalid on this device.
A circuit is invalid if any of its moments are invalid or if there
is a non-empty moment after a moment with a measurement.
Args:
circuit: The circuit to validate
Raises:
ValueError: If the given circuit can't be run on this device
"""
super().validate_circuit(circuit)
# Measurements must be in the last non-empty moment
has_measurement_occurred = False
for moment in circuit:
if has_measurement_occurred:
if len(moment.operations) > 0:
raise ValueError("Non-empty moment after measurement")
for operation in moment.operations:
if isinstance(operation.gate, cirq.MeasurementGate):
has_measurement_occurred = True
def __repr__(self):
return f'pasqal.PasqalDevice(qubits={sorted(self.qubits)!r})'
def _value_equality_values_(self):
return self.qubits
def _json_dict_(self):
return cirq.protocols.obj_to_dict_helper(self, ['qubits'])
class PasqalVirtualDevice(PasqalDevice):
"""A Pasqal virtual device with qubits in 3d.
A virtual representation of a Pasqal device, enforcing the constraints
typically found in a physical device. The qubits can be positioned in 3d
space, although 2d layouts will be supported sooner and are thus
recommended. Only accepts qubits with physical placement.
"""
def __init__(
self, control_radius: float, qubits: Sequence[Union[ThreeDQubit, GridQubit, LineQubit]]
) -> None:
"""Initializes a device with some qubits.
Args:
control_radius: the maximum distance between qubits for a controlled
gate. Distance is measured in units of the coordinates passed
into the qubit constructor.
qubits: Qubits on the device, identified by their x, y, z position.
Must be of type ThreeDQubit, TwoDQubit, LineQubit or GridQubit.
Raises:
ValueError: if the wrong qubit type is provided or if invalid
parameter is provided for control_radius."""
super().__init__(qubits)
if not control_radius >= 0:
raise ValueError('Control_radius needs to be a non-negative float.')
if len(self.qubits) > 1:
if control_radius > 3.0 * self.minimal_distance():
raise ValueError(
'Control_radius cannot be larger than 3 times'
' the minimal distance between qubits.'
)
self.control_radius = control_radius
self.gateset = PasqalGateset(include_additional_controlled_ops=False)
self.controlled_gateset = cirq.Gateset(cirq.AnyIntegerPowerGateFamily(cirq.CZPowGate))
@property
def supported_qubit_type(self):
return (ThreeDQubit, TwoDQubit, GridQubit, LineQubit)
def validate_operation(self, operation: cirq.Operation):
"""Raises an error if the given operation is invalid on this device.
Args:
operation: the operation to validate
Raises:
ValueError: If the operation is not valid
"""
super().validate_operation(operation)
# Verify that a controlled gate operation is valid
if operation in self.controlled_gateset:
for p in operation.qubits:
for q in operation.qubits:
if self.distance(p, q) > self.control_radius:
raise ValueError(f"Qubits {p!r}, {q!r} are too far away")
def validate_moment(self, moment: cirq.Moment):
"""Raises an error if the given moment is invalid on this device.
Args:
moment: The moment to validate.
Raises:
ValueError: If the given moment is invalid.
"""
super().validate_moment(moment)
if len(moment) > 1:
for operation in moment:
if not isinstance(operation.gate, cirq.MeasurementGate):
raise ValueError("Cannot do simultaneous gates. Use cirq.InsertStrategy.NEW.")
def minimal_distance(self) -> float:
"""Returns the minimal distance between two qubits in qubits.
Args:
qubits: qubit involved in the distance computation
Raises:
ValueError: If the device has only one qubit
Returns:
The minimal distance between qubits, in spacial coordinate units.
"""
if len(self.qubits) <= 1:
raise ValueError("Two qubits to compute a minimal distance.")
return min([self.distance(q1, q2) for q1 in self.qubits for q2 in self.qubits if q1 != q2])
def distance(self, p: Any, q: Any) -> float:
"""Returns the distance between two qubits.
Args:
p: qubit involved in the distance computation
q: qubit involved in the distance computation
Raises:
ValueError: If p or q not part of the device
Returns:
The distance between qubits p and q.
"""
all_qubits = self.qubit_list()
if p not in all_qubits or q not in all_qubits:
raise ValueError("Qubit not part of the device.")
if isinstance(p, GridQubit):
return np.sqrt((p.row - q.row) ** 2 + (p.col - q.col) ** 2)
if isinstance(p, LineQubit):
return abs(p.x - q.x)
return np.sqrt((p.x - q.x) ** 2 + (p.y - q.y) ** 2 + (p.z - q.z) ** 2)
def __repr__(self):
return ('pasqal.PasqalVirtualDevice(control_radius={!r}, qubits={!r})').format(
self.control_radius, sorted(self.qubits)
)
def _value_equality_values_(self) -> Any:
return (self.control_radius, self.qubits)
def _json_dict_(self) -> Dict[str, Any]:
return cirq.protocols.obj_to_dict_helper(self, ['control_radius', 'qubits'])
@_compat.deprecated_class(
deadline='v0.16', fix='Use cirq.optimize_for_target_gateset(circuit, gateset=PasqalGateset()).'
)
class PasqalConverter(cirq.neutral_atoms.ConvertToNeutralAtomGates):
"""A gate converter for compatibility with Pasqal processors.
Modified version of ConvertToNeutralAtomGates, where a new 'convert' method
'pasqal_convert' takes the 'keep' function as an input.
"""
def pasqal_convert(
self, op: cirq.Operation, keep: Callable[[cirq.Operation], bool]
) -> List[cirq.Operation]:
def on_stuck_raise(bad):
return TypeError(
"Don't know how to work with {!r}. "
"It isn't a native PasqalDevice operation, "
"a 1 or 2 qubit gate with a known unitary, "
"or composite.".format(bad)
)
return cirq.protocols.decompose(
op,
keep=keep,
intercepting_decomposer=self._convert_one,
on_stuck_raise=None if self.ignore_failures else on_stuck_raise,
)
|
[
"numpy.sqrt",
"cirq.protocols.decompose",
"cirq.AnyIntegerPowerGateFamily",
"cirq._compat.deprecated",
"networkx.from_edgelist",
"cirq._compat.deprecated_class",
"cirq.protocols.obj_to_dict_helper",
"cirq_pasqal.PasqalGateset"
] |
[((10990, 11116), 'cirq._compat.deprecated_class', '_compat.deprecated_class', ([], {'deadline': '"""v0.16"""', 'fix': '"""Use cirq.optimize_for_target_gateset(circuit, gateset=PasqalGateset())."""'}), "(deadline='v0.16', fix=\n 'Use cirq.optimize_for_target_gateset(circuit, gateset=PasqalGateset()).')\n", (11014, 11116), False, 'from cirq import _compat, GridQubit, LineQubit\n'), ((3091, 3179), 'cirq._compat.deprecated', '_compat.deprecated', ([], {'fix': '"""Use metadata.qubit_set() if applicable."""', 'deadline': '"""v0.15"""'}), "(fix='Use metadata.qubit_set() if applicable.', deadline=\n 'v0.15')\n", (3109, 3179), False, 'from cirq import _compat, GridQubit, LineQubit\n'), ((2636, 2651), 'cirq_pasqal.PasqalGateset', 'PasqalGateset', ([], {}), '()\n', (2649, 2651), False, 'from cirq_pasqal import ThreeDQubit, TwoDQubit, PasqalGateset\n'), ((6074, 6125), 'cirq.protocols.obj_to_dict_helper', 'cirq.protocols.obj_to_dict_helper', (['self', "['qubits']"], {}), "(self, ['qubits'])\n", (6107, 6125), False, 'import cirq\n'), ((7717, 7771), 'cirq_pasqal.PasqalGateset', 'PasqalGateset', ([], {'include_additional_controlled_ops': '(False)'}), '(include_additional_controlled_ops=False)\n', (7730, 7771), False, 'from cirq_pasqal import ThreeDQubit, TwoDQubit, PasqalGateset\n'), ((10519, 10582), 'numpy.sqrt', 'np.sqrt', (['((p.x - q.x) ** 2 + (p.y - q.y) ** 2 + (p.z - q.z) ** 2)'], {}), '((p.x - q.x) ** 2 + (p.y - q.y) ** 2 + (p.z - q.z) ** 2)\n', (10526, 10582), True, 'import numpy as np\n'), ((10917, 10986), 'cirq.protocols.obj_to_dict_helper', 'cirq.protocols.obj_to_dict_helper', (['self', "['control_radius', 'qubits']"], {}), "(self, ['control_radius', 'qubits'])\n", (10950, 10986), False, 'import cirq\n'), ((11843, 11999), 'cirq.protocols.decompose', 'cirq.protocols.decompose', (['op'], {'keep': 'keep', 'intercepting_decomposer': 'self._convert_one', 'on_stuck_raise': '(None if self.ignore_failures else on_stuck_raise)'}), '(op, keep=keep, intercepting_decomposer=self.\n _convert_one, on_stuck_raise=None if self.ignore_failures else\n on_stuck_raise)\n', (11867, 11999), False, 'import cirq\n'), ((2747, 2815), 'networkx.from_edgelist', 'nx.from_edgelist', (['[(a, b) for a in qubits for b in qubits if a != b]'], {}), '([(a, b) for a in qubits for b in qubits if a != b])\n', (2763, 2815), True, 'import networkx as nx\n'), ((7819, 7865), 'cirq.AnyIntegerPowerGateFamily', 'cirq.AnyIntegerPowerGateFamily', (['cirq.CZPowGate'], {}), '(cirq.CZPowGate)\n', (7849, 7865), False, 'import cirq\n'), ((10378, 10430), 'numpy.sqrt', 'np.sqrt', (['((p.row - q.row) ** 2 + (p.col - q.col) ** 2)'], {}), '((p.row - q.row) ** 2 + (p.col - q.col) ** 2)\n', (10385, 10430), True, 'import numpy as np\n')]
|
import base64
import io
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
import numpy as np
import tensorflow as tf
from PIL import Image
from constants import CLASSES
import yaml
with open('app.yaml') as yaml_data :
params = yaml.safe_load(yaml_data)
IMAGE_WIDTH = params['IMAGE_WIDTH']
IMAGE_HEIGHT = params['IMAGE_HEIGHT']
PATH_MODEL = params['PATH_MODEL']
# Load DNN model
classifier = tf.keras.models.load_model(PATH_MODEL)
def classify_image(image, model, image_box=None):
"""Classify image by model
Parameters
----------
content: image content
model: tf/keras classifier
Returns
-------
class id returned by model classifier
"""
images_list = []
image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)
# box argument clips image to (x1, y1, x2, y2)
image = np.array(image)
images_list.append(image)
return model.predict_classes(np.array(images_list))
app = dash.Dash('Traffic Signs Recognition', external_stylesheets=[dbc.themes.BOOTSTRAP])
pre_style = {
'whiteSpace': 'pre-wrap',
'wordBreak': 'break-all',
'whiteSpace': 'normal'
}
# Define application layout
navbar = dbc.NavbarSimple(
children=[
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem('Réseau de Neurones', header=True),
dbc.DropdownMenuItem('SVM', href="#"),
],
nav=True,
in_navbar=True,
label='Modèle',
),
],
brand="Menu",
brand_href="#",
color= "#d90054",
dark=True
)
cards = html.Div(
[
dbc.Card(
dbc.CardBody(
[
html.H5("Présentation", className="card-title"),
html.P(
[
'Cette application à pour but de réaliser des modèles capables de classer des panneaux de signalisation allemand à partir d\'une image. L\'application fonctionne de la manière suivante : vous déposer une image à l\'emplacement indiqué et la prédiction du modèle apparait immédiatement en dessous. En haut à droite vous pouvez sélectionner le modèle que vous voulez tester.',
],
className='card-text',
),
]
),
className='w-75 mb-3',
color='#f1cbd1',
outline='Black',
style={
'margin-top': '75px',
'margin-left': '185px'},
),
]
)
app.layout = html.Div([
html.Div([navbar]),
html.Div(cards),
dcc.Upload(
id='bouton-chargement',
children=html.Div([
'Cliquer-déposer ou ',
html.A('sélectionner une image')
]),
style={
'width': '50%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin-top': '75px',
'margin-left': '370px',
}
),
html.Div(id='mon-image'),
html.Div(id='ma-zone-resultat')
])
@app.callback(Output('mon-image', 'children'),
[Input('bouton-chargement', 'contents')])
def update_output(contents):
if contents is not None:
content_type, content_string = contents.split(',')
if 'image' in content_type:
image = Image.open(io.BytesIO(base64.b64decode(content_string)))
predicted_class = classify_image(image, classifier)[0]
return html.Div([
html.Hr(style={'margin-top': '75px'}),
html.Img(src=contents, style={'margin-left': '750px'}),
html.H4('Classe prédite : {}'.format(CLASSES[predicted_class]), style={'textAlign': 'center'}),
html.Hr(),
#html.Div('Raw Content'),
#html.Pre(contents, style=pre_style)
])
else:
try:
# Décodage de l'image transmise en base 64 (cas des fichiers ppm)
# fichier base 64 --> image PIL
image = Image.open(io.BytesIO(base64.b64decode(content_string)))
# image PIL --> conversion PNG --> buffer mémoire
buffer = io.BytesIO()
image.save(buffer, format='PNG')
# buffer mémoire --> image base 64
buffer.seek(0)
img_bytes = buffer.read()
content_string = base64.b64encode(img_bytes).decode('ascii')
# Appel du modèle de classification
predicted_class = classify_image(image, classifier)[0]
# Affichage de l'image
return html.Div([
html.Hr(style={'margin-top': '75px'}),
html.Img(src='data:image/png;base64,' + content_string, style={'margin-left': '750px'}),
html.H4('Classe prédite : {}'.format(CLASSES[predicted_class]), style={'textAlign': 'center'}),
html.Hr(),
])
except:
return html.Div([
html.Hr(),
html.Div('Uniquement des images svp : {}'.format(content_type)),
html.Hr(),
html.Div('Raw Content'),
html.Pre(contents, style=pre_style)
])
# Manage interactions with callbacks
@app.callback(
Output(component_id='ma-zone-resultat', component_property='children'),
[Input(component_id='mon-champ-texte', component_property='value')]
)
def update_output_div(input_value):
return html.H3('Valeur saisie ici "{}"'.format(input_value))
# Start the application
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"dash_bootstrap_components.DropdownMenuItem",
"dash.dependencies.Output",
"base64.b64encode",
"io.BytesIO",
"dash_html_components.H5",
"base64.b64decode",
"dash.dependencies.Input",
"dash_html_components.Pre",
"yaml.safe_load",
"numpy.array",
"dash_html_components.Div",
"tensorflow.keras.models.load_model",
"dash_html_components.Img",
"dash_html_components.Hr",
"dash_html_components.P",
"dash.Dash",
"dash_html_components.A"
] |
[((524, 562), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['PATH_MODEL'], {}), '(PATH_MODEL)\n', (550, 562), True, 'import tensorflow as tf\n'), ((1083, 1171), 'dash.Dash', 'dash.Dash', (['"""Traffic Signs Recognition"""'], {'external_stylesheets': '[dbc.themes.BOOTSTRAP]'}), "('Traffic Signs Recognition', external_stylesheets=[dbc.themes.\n BOOTSTRAP])\n", (1092, 1171), False, 'import dash\n'), ((353, 378), 'yaml.safe_load', 'yaml.safe_load', (['yaml_data'], {}), '(yaml_data)\n', (367, 378), False, 'import yaml\n'), ((974, 989), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (982, 989), True, 'import numpy as np\n'), ((3345, 3376), 'dash.dependencies.Output', 'Output', (['"""mon-image"""', '"""children"""'], {}), "('mon-image', 'children')\n", (3351, 3376), False, 'from dash.dependencies import Input, Output\n'), ((5671, 5741), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""ma-zone-resultat"""', 'component_property': '"""children"""'}), "(component_id='ma-zone-resultat', component_property='children')\n", (5677, 5741), False, 'from dash.dependencies import Input, Output\n'), ((1052, 1073), 'numpy.array', 'np.array', (['images_list'], {}), '(images_list)\n', (1060, 1073), True, 'import numpy as np\n'), ((2702, 2720), 'dash_html_components.Div', 'html.Div', (['[navbar]'], {}), '([navbar])\n', (2710, 2720), True, 'import dash_html_components as html\n'), ((2731, 2746), 'dash_html_components.Div', 'html.Div', (['cards'], {}), '(cards)\n', (2739, 2746), True, 'import dash_html_components as html\n'), ((3265, 3289), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""mon-image"""'}), "(id='mon-image')\n", (3273, 3289), True, 'import dash_html_components as html\n'), ((3295, 3326), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""ma-zone-resultat"""'}), "(id='ma-zone-resultat')\n", (3303, 3326), True, 'import dash_html_components as html\n'), ((3393, 3431), 'dash.dependencies.Input', 'Input', (['"""bouton-chargement"""', '"""contents"""'], {}), "('bouton-chargement', 'contents')\n", (3398, 3431), False, 'from dash.dependencies import Input, Output\n'), ((5748, 5813), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""mon-champ-texte"""', 'component_property': '"""value"""'}), "(component_id='mon-champ-texte', component_property='value')\n", (5753, 5813), False, 'from dash.dependencies import Input, Output\n'), ((4471, 4483), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (4481, 4483), False, 'import io\n'), ((1819, 1866), 'dash_html_components.H5', 'html.H5', (['"""Présentation"""'], {'className': '"""card-title"""'}), "('Présentation', className='card-title')\n", (1826, 1866), True, 'import dash_html_components as html\n'), ((1888, 2301), 'dash_html_components.P', 'html.P', (['["Cette application à pour but de réaliser des modèles capables de classer des panneaux de signalisation allemand à partir d\'une image. L\'application fonctionne de la manière suivante : vous déposer une image à l\'emplacement indiqué et la prédiction du modèle apparait immédiatement en dessous. En haut à droite vous pouvez sélectionner le modèle que vous voulez tester."\n ]'], {'className': '"""card-text"""'}), '([\n "Cette application à pour but de réaliser des modèles capables de classer des panneaux de signalisation allemand à partir d\'une image. L\'application fonctionne de la manière suivante : vous déposer une image à l\'emplacement indiqué et la prédiction du modèle apparait immédiatement en dessous. En haut à droite vous pouvez sélectionner le modèle que vous voulez tester."\n ], className=\'card-text\')\n', (1894, 2301), True, 'import dash_html_components as html\n'), ((3629, 3661), 'base64.b64decode', 'base64.b64decode', (['content_string'], {}), '(content_string)\n', (3645, 3661), False, 'import base64\n'), ((3777, 3814), 'dash_html_components.Hr', 'html.Hr', ([], {'style': "{'margin-top': '75px'}"}), "(style={'margin-top': '75px'})\n", (3784, 3814), True, 'import dash_html_components as html\n'), ((3832, 3886), 'dash_html_components.Img', 'html.Img', ([], {'src': 'contents', 'style': "{'margin-left': '750px'}"}), "(src=contents, style={'margin-left': '750px'})\n", (3840, 3886), True, 'import dash_html_components as html\n'), ((4016, 4025), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (4023, 4025), True, 'import dash_html_components as html\n'), ((1411, 1466), 'dash_bootstrap_components.DropdownMenuItem', 'dbc.DropdownMenuItem', (['"""Réseau de Neurones"""'], {'header': '(True)'}), "('Réseau de Neurones', header=True)\n", (1431, 1466), True, 'import dash_bootstrap_components as dbc\n'), ((1484, 1521), 'dash_bootstrap_components.DropdownMenuItem', 'dbc.DropdownMenuItem', (['"""SVM"""'], {'href': '"""#"""'}), "('SVM', href='#')\n", (1504, 1521), True, 'import dash_bootstrap_components as dbc\n'), ((2879, 2911), 'dash_html_components.A', 'html.A', (['"""sélectionner une image"""'], {}), "('sélectionner une image')\n", (2885, 2911), True, 'import dash_html_components as html\n'), ((4344, 4376), 'base64.b64decode', 'base64.b64decode', (['content_string'], {}), '(content_string)\n', (4360, 4376), False, 'import base64\n'), ((4690, 4717), 'base64.b64encode', 'base64.b64encode', (['img_bytes'], {}), '(img_bytes)\n', (4706, 4717), False, 'import base64\n'), ((4950, 4987), 'dash_html_components.Hr', 'html.Hr', ([], {'style': "{'margin-top': '75px'}"}), "(style={'margin-top': '75px'})\n", (4957, 4987), True, 'import dash_html_components as html\n'), ((5009, 5101), 'dash_html_components.Img', 'html.Img', ([], {'src': "('data:image/png;base64,' + content_string)", 'style': "{'margin-left': '750px'}"}), "(src='data:image/png;base64,' + content_string, style={\n 'margin-left': '750px'})\n", (5017, 5101), True, 'import dash_html_components as html\n'), ((5234, 5243), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (5241, 5243), True, 'import dash_html_components as html\n'), ((5338, 5347), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (5345, 5347), True, 'import dash_html_components as html\n'), ((5454, 5463), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (5461, 5463), True, 'import dash_html_components as html\n'), ((5501, 5524), 'dash_html_components.Div', 'html.Div', (['"""Raw Content"""'], {}), "('Raw Content')\n", (5509, 5524), True, 'import dash_html_components as html\n'), ((5546, 5581), 'dash_html_components.Pre', 'html.Pre', (['contents'], {'style': 'pre_style'}), '(contents, style=pre_style)\n', (5554, 5581), True, 'import dash_html_components as html\n')]
|
import gym.envs.mujoco.hopper as hopper
import numpy as np
class HopperEnv(hopper.HopperEnv):
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
])
def reset_obs(self, obs):
state = np.insert(obs, 0, 0.)
qpos = state[:self.model.nq]
qvel = state[self.model.nq:]
self.set_state(qpos, qvel)
return self._get_obs()
|
[
"numpy.insert",
"numpy.concatenate"
] |
[((135, 205), 'numpy.concatenate', 'np.concatenate', (['[self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat]'], {}), '([self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat])\n', (149, 205), True, 'import numpy as np\n'), ((288, 310), 'numpy.insert', 'np.insert', (['obs', '(0)', '(0.0)'], {}), '(obs, 0, 0.0)\n', (297, 310), True, 'import numpy as np\n')]
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase, rand_array
class TestPartitionOps(TestCase):
def test_configs(self):
# (main dims, partitions, main type, [list of (extra dims, type)])
configs = [
((10, ), 3),
((4, ), 10),
((10, 10), 4),
((100, ), 2),
((5, ), 1),
((1, ), 1),
((2, 10), 2),
]
suffixes = [
[],
[((2, 2), np.float32)],
[((3, ), np.int64), ((2, ), np.float32)],
]
return [
(main_dims, parts, main_type, extra, pack)
for main_dims, parts in configs
for main_type in [np.int32, np.int64] for extra in suffixes
for pack in [False, True]
]
def testPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
ins = ['in' + str(i) for i in range(1 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(1 + len(extra_ins))
]
op = core.CreateOperator(
'Partition', ins, outs, pack_first_input=(1 if pack else 0))
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i], d)
x.append(d)
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
print(x)
print(ref)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
# test inverse operation (GatherByKey)
if len(main_dims) == 1:
# currently only 1D key tensor supported
for i in range(len(extra_ins)):
expected_out = ins[i + 1]
gather_ins = [ins[0]] + [
outs[len(ins) * p + i + 1] for p in range(parts)]
actual_out = expected_out + '_actual'
op = core.CreateOperator(
'GatherByKey', gather_ins, actual_out)
workspace.RunOperatorOnce(op)
expected = workspace.FetchBlob(expected_out)
actual = workspace.FetchBlob(actual_out)
np.testing.assert_array_equal(expected, actual)
def testLengthsPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
# For LengthsSharding only 1-D tensors supported as a first input
if len(main_dims) > 1:
continue
ins = ['in' + str(i) for i in range(2 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(2 + len(extra_ins))
]
op = core.CreateOperator(
'LengthsPartition', ins, outs,
pack_first_input=(1 if pack else 0)
)
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i + 1], d)
x.append(d)
# Randomly generate length tensor as well
elements = np.random.randint(2, 10)
lengths = []
total_length = 0
for _ in range(elements - 1):
lengths.append(np.random.randint(main_dims[0] - total_length))
total_length += lengths[-1]
lengths.append(main_dims[0] - total_length)
workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32))
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
sharded_lengths[ind] += 1
idx += 1
out.append(sharded_lengths)
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
if __name__ == "__main__":
import unittest
unittest.main()
|
[
"caffe2.python.test_util.rand_array",
"caffe2.python.workspace.RunOperatorOnce",
"caffe2.python.workspace.FetchBlob",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"numpy.stack",
"numpy.empty",
"caffe2.python.core.CreateOperator",
"unittest.main",
"numpy.testing.assert_array_equal",
"caffe2.python.workspace.FeedBlob"
] |
[((7638, 7653), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7651, 7653), False, 'import unittest\n'), ((2004, 2082), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Partition"""', 'ins', 'outs'], {'pack_first_input': '(1 if pack else 0)'}), "('Partition', ins, outs, pack_first_input=1 if pack else 0)\n", (2023, 2082), False, 'from caffe2.python import core, workspace\n'), ((3473, 3502), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (3498, 3502), False, 'from caffe2.python import core, workspace\n'), ((5009, 5098), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""LengthsPartition"""', 'ins', 'outs'], {'pack_first_input': '(1 if pack else 0)'}), "('LengthsPartition', ins, outs, pack_first_input=1 if\n pack else 0)\n", (5028, 5098), False, 'from caffe2.python import core, workspace\n'), ((5625, 5649), 'numpy.random.randint', 'np.random.randint', (['(2)', '(10)'], {}), '(2, 10)\n', (5642, 5649), True, 'import numpy as np\n'), ((7355, 7384), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (7380, 7384), False, 'from caffe2.python import core, workspace\n'), ((2444, 2473), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', (['ins[i]', 'd'], {}), '(ins[i], d)\n', (2462, 2473), False, 'from caffe2.python import core, workspace\n'), ((5485, 5518), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', (['ins[i + 1]', 'd'], {}), '(ins[i + 1], d)\n', (5503, 5518), False, 'from caffe2.python import core, workspace\n'), ((5964, 5997), 'numpy.array', 'np.array', (['lengths'], {'dtype': 'np.int32'}), '(lengths, dtype=np.int32)\n', (5972, 5997), True, 'import numpy as np\n'), ((2269, 2300), 'caffe2.python.test_util.rand_array', 'rand_array', (['*(main_dims + dims)'], {}), '(*(main_dims + dims))\n', (2279, 2300), False, 'from caffe2.python.test_util import TestCase, rand_array\n'), ((2347, 2393), 'numpy.random.randint', 'np.random.randint', (['(-100)', '(100)', '(main_dims + dims)'], {}), '(-100, 100, main_dims + dims)\n', (2364, 2393), True, 'import numpy as np\n'), ((3704, 3729), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['name'], {}), '(name)\n', (3723, 3729), False, 'from caffe2.python import core, workspace\n'), ((4190, 4248), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""GatherByKey"""', 'gather_ins', 'actual_out'], {}), "('GatherByKey', gather_ins, actual_out)\n", (4209, 4248), False, 'from caffe2.python import core, workspace\n'), ((4294, 4323), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (4319, 4323), False, 'from caffe2.python import core, workspace\n'), ((4355, 4388), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['expected_out'], {}), '(expected_out)\n', (4374, 4388), False, 'from caffe2.python import core, workspace\n'), ((4418, 4449), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['actual_out'], {}), '(actual_out)\n', (4437, 4449), False, 'from caffe2.python import core, workspace\n'), ((4470, 4517), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (4499, 4517), True, 'import numpy as np\n'), ((5310, 5341), 'caffe2.python.test_util.rand_array', 'rand_array', (['*(main_dims + dims)'], {}), '(*(main_dims + dims))\n', (5320, 5341), False, 'from caffe2.python.test_util import TestCase, rand_array\n'), ((5388, 5434), 'numpy.random.randint', 'np.random.randint', (['(-100)', '(100)', '(main_dims + dims)'], {}), '(-100, 100, main_dims + dims)\n', (5405, 5434), True, 'import numpy as np\n'), ((5777, 5823), 'numpy.random.randint', 'np.random.randint', (['(main_dims[0] - total_length)'], {}), '(main_dims[0] - total_length)\n', (5794, 5823), True, 'import numpy as np\n'), ((6291, 6309), 'numpy.zeros', 'np.zeros', (['elements'], {}), '(elements)\n', (6299, 6309), True, 'import numpy as np\n'), ((7542, 7567), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['name'], {}), '(name)\n', (7561, 7567), False, 'from caffe2.python import core, workspace\n'), ((3372, 3383), 'numpy.stack', 'np.stack', (['a'], {}), '(a)\n', (3380, 3383), True, 'import numpy as np\n'), ((7254, 7265), 'numpy.stack', 'np.stack', (['a'], {}), '(a)\n', (7262, 7265), True, 'import numpy as np\n'), ((3300, 3335), 'numpy.empty', 'np.empty', ([], {'shape': '((0,) + suffix_shape)'}), '(shape=(0,) + suffix_shape)\n', (3308, 3335), True, 'import numpy as np\n'), ((7182, 7217), 'numpy.empty', 'np.empty', ([], {'shape': '((0,) + suffix_shape)'}), '(shape=(0,) + suffix_shape)\n', (7190, 7217), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["<NAME>"]
__all__ = ["_StatsModelsAdapter"]
import numpy as np
import pandas as pd
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin
from sktime.forecasting.base._sktime import _SktimeForecaster
class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):
"""Base class for interfacing statsmodels forecasting algorithms"""
_fitted_param_names = ()
def __init__(self):
self._forecaster = None
self._fitted_forecaster = None
super(_StatsModelsAdapter, self).__init__()
def fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
# statsmodels does not support the pd.Int64Index as required,
# so we coerce them here to pd.RangeIndex
if isinstance(y, pd.Series) and type(y.index) == pd.Int64Index:
y, X = _coerce_int_to_range_index(y, X)
self._set_y_X(y, X)
self._set_fh(fh)
self._fit_forecaster(y, X)
self._is_fitted = True
return self
def _fit_forecaster(self, y_train, X_train=None):
"""Internal fit"""
raise NotImplementedError("abstract method")
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""
Make forecasts.
Parameters
----------
fh : ForecastingHorizon
The forecasters horizon with the steps ahead to to predict.
Default is one-step ahead forecast,
i.e. np.array([1])
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
return_pred_int : bool, optional (default=False)
alpha : int or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series
Returns series of predicted values.
"""
if return_pred_int:
raise NotImplementedError()
# statsmodels requires zero-based indexing starting at the
# beginning of the training series when passing integers
start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
y_pred = self._fitted_forecaster.predict(start, end)
# statsmodels forecasts all periods from start to end of forecasting
# horizon, but only return given time points in forecasting horizon
return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()]
def get_fitted_params(self):
"""Get fitted parameters
Returns
-------
fitted_params : dict
"""
self.check_is_fitted()
return {
name: self._fitted_forecaster.params.get(name)
for name in self._get_fitted_param_names()
}
def _get_fitted_param_names(self):
"""Get names of fitted parameters"""
return self._fitted_param_names
def _coerce_int_to_range_index(y, X=None):
new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)
try:
np.testing.assert_array_equal(y.index, new_index)
except AssertionError:
raise ValueError(
"Coercion of pd.Int64Index to pd.RangeIndex "
"failed. Please provide `y_train` with a "
"pd.RangeIndex."
)
y.index = new_index
if X is not None:
X.index = new_index
return y, X
|
[
"numpy.testing.assert_array_equal",
"pandas.RangeIndex"
] |
[((3433, 3475), 'pandas.RangeIndex', 'pd.RangeIndex', (['y.index[0]', '(y.index[-1] + 1)'], {}), '(y.index[0], y.index[-1] + 1)\n', (3446, 3475), True, 'import pandas as pd\n'), ((3493, 3542), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['y.index', 'new_index'], {}), '(y.index, new_index)\n', (3522, 3542), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.sparse
__all__ = ['save_npz', 'load_npz']
# Make loading safe vs. malicious input
PICKLE_KWARGS = dict(allow_pickle=False)
def save_npz(file, matrix, compressed=True):
""" Save a sparse matrix to a file using ``.npz`` format.
Parameters
----------
file : str or file-like object
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already
there.
matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``)
The sparse matrix to save.
compressed : bool, optional
Allow compressing the file. Default: True
See Also
--------
scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format.
numpy.savez: Save several arrays into a ``.npz`` archive.
numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive.
Examples
--------
Store sparse matrix to disk, and load it again:
>>> import scipy.sparse
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
"""
arrays_dict = {}
if matrix.format in ('csc', 'csr', 'bsr'):
arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
elif matrix.format == 'dia':
arrays_dict.update(offsets=matrix.offsets)
elif matrix.format == 'coo':
arrays_dict.update(row=matrix.row, col=matrix.col)
else:
raise NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format))
arrays_dict.update(
format=matrix.format.encode('ascii'),
shape=matrix.shape,
data=matrix.data
)
if compressed:
np.savez_compressed(file, **arrays_dict)
else:
np.savez(file, **arrays_dict)
def load_npz(file):
""" Load a sparse matrix from a file using ``.npz`` format.
Parameters
----------
file : str or file-like object
Either the file name (string) or an open file (file-like object)
where the data will be loaded.
Returns
-------
result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix
A sparse matrix containing the loaded data.
Raises
------
OSError
If the input file does not exist or cannot be read.
See Also
--------
scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz`` format.
numpy.load: Load several arrays from a ``.npz`` archive.
Examples
--------
Store sparse matrix to disk, and load it again:
>>> import scipy.sparse
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
"""
with np.load(file, **PICKLE_KWARGS) as loaded:
try:
matrix_format = loaded['format']
except KeyError as e:
raise ValueError('The file {} does not contain a sparse matrix.'.format(file)) from e
matrix_format = matrix_format.item()
if not isinstance(matrix_format, str):
# Play safe with Python 2 vs 3 backward compatibility;
# files saved with SciPy < 1.0.0 may contain unicode or bytes.
matrix_format = matrix_format.decode('ascii')
try:
cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format))
except AttributeError as e:
raise ValueError('Unknown matrix format "{}"'.format(matrix_format)) from e
if matrix_format in ('csc', 'csr', 'bsr'):
return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape'])
elif matrix_format == 'dia':
return cls((loaded['data'], loaded['offsets']), shape=loaded['shape'])
elif matrix_format == 'coo':
return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape'])
else:
raise NotImplementedError('Load is not implemented for '
'sparse matrix of format {}.'.format(matrix_format))
|
[
"numpy.savez_compressed",
"numpy.load",
"numpy.savez"
] |
[((2426, 2466), 'numpy.savez_compressed', 'np.savez_compressed', (['file'], {}), '(file, **arrays_dict)\n', (2445, 2466), True, 'import numpy as np\n'), ((2485, 2514), 'numpy.savez', 'np.savez', (['file'], {}), '(file, **arrays_dict)\n', (2493, 2514), True, 'import numpy as np\n'), ((4018, 4048), 'numpy.load', 'np.load', (['file'], {}), '(file, **PICKLE_KWARGS)\n', (4025, 4048), True, 'import numpy as np\n')]
|
'''
This code is based on https://github.com/jrieke/shape-detection/
'''
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import tensorflow as tf
import datetime
class JriekeBboxDataset:
def generate(self):
print('Generating...')
self.WIDTH = 8
self.HEIGHT = 8
num_imgs = 50000
min_object_size = 1
max_object_size = 4
num_objects = 1
self.bboxes = np.zeros((num_imgs, num_objects, 4))
self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set background to 0
for i_img in range(num_imgs):
for i_object in range(num_objects):
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, self.WIDTH - w)
y = np.random.randint(0, self.HEIGHT - h)
self.imgs[i_img, y:y+h, x:x+w] = 1. # set rectangle to 1
self.bboxes[i_img, i_object] = [x, y, w, h]
print("Shapes: imgs ", self.imgs.shape, " bboxes ", self.bboxes.shape)
#why this?
# X = (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) / np.std(self.imgs)
X = self.imgs
y = self.bboxes.reshape(num_imgs, -1) / self.WIDTH
# Split training and test.
i = int(0.8 * num_imgs)
train_X = X[:i] #80% for training
test_X = X[i:]
train_y = y[:i]
test_y = y[i:]
self.test_imgs = self.imgs[i:]
self.test_bboxes = self.bboxes[i:]
return train_X, train_y, test_X, test_y
def check_dataset_image_compability(self, test_X_sample, test_imgs_sample):
fig = plt.figure(figsize=(12, 3))
fig.suptitle('check if the generated imgs match to the test_X slice image')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 2, 1)
plt.gca().set_title('Returned by the dataset class: used for training')
plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.subplot(1, 2, 2)
plt.gca().set_title('Global image holder: used for plotting.')
plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.show()
print('compare:',TMP,test_imgs_sample)
def IOU(self,bbox1, bbox2):
'''Calculate overlap between two bounding boxes [x, y, w, h] as the area of intersection over the area of unity'''
x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3]
x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3]
w_I = min(x1 + w1, x2 + w2) - max(x1, x2)
h_I = min(y1 + h1, y2 + h2) - max(y1, y2)
if w_I <= 0 or h_I <= 0: # no overlap
return 0.
I = w_I * h_I
U = w1 * h1 + w2 * h2 - I
return I / U
def convertDefaultAnnotToCoord(self, annot):
'''
annot -> [x, y, w, h]
'''
w = annot[2] * self.WIDTH
h = annot[3] * self.HEIGHT
x = annot[0] * self.HEIGHT
y = annot[1] * self.HEIGHT
return [x,y,w,h]
def convertYoloAnnotToCoord(self, yolo_annot):
'''
yolo_annot -> [x, y, w, h]
'''
w = yolo_annot[2] * self.WIDTH
h = yolo_annot[3] * self.HEIGHT
x = (yolo_annot[0] * self.WIDTH) - (w/2)
y = (yolo_annot[1] * self.HEIGHT) - (h/2)
return [x,y,w,h]
def show_generated(self, i=0):
fig = plt.figure()
fig.subplots_adjust(top=0.85)
fig.suptitle('Generated image sample + GT')
plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
for bbox in self.bboxes[i]:
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
plt.gca().legend(['GT'])
plt.show()
def plot_rectangle(self, img, bbox):
fig = plt.figure()
fig.suptitle('Plotting rectangle.')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 1, 1)
plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
plt.show()
def check_dataset_image_compability(self, test_X_sample, test_imgs_sample):
fig = plt.figure(figsize=(12, 3))
fig.suptitle('check if the generated imgs match to the test_X slice image')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 2, 1)
plt.gca().set_title('Returned by the dataset class: used for training')
plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.subplot(1, 2, 2)
plt.gca().set_title('Global image holder: used for plotting.')
plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.show()
print('compare:',test_X_sample,test_imgs_sample)
def show_predicted(self, pred_bboxes):
# Show a few images and predicted bounding boxes from the test dataset.
fig = plt.figure(figsize=(12, 3))
fig.subplots_adjust(top=0.85)
fig.suptitle('Prediction demonstration. Random samples.')
legend_plotted = False
for i_subplot in range(1, 11):
plt.subplot(1, 10, i_subplot)
i = np.random.randint(len(pred_bboxes))
plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
for pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]):
# print('before convertion: pred',pred_bbox, 'gt',exp_bbox)
pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox)
# exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox)
print('after convertion: pred',pred_bbox, 'gt',exp_bbox)
plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none'))
#gt
plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none'))
plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r')
if not legend_plotted:
legend_plotted = True
plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True)
plt.show()
# plt.savefig('plots/bw-single-rectangle_prediction_{0:%Y-%m-%d%H:%M:%S}.png'.format(datetime.datetime.now()), dpi=300)
|
[
"matplotlib.pyplot.imshow",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.gca",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((440, 476), 'numpy.zeros', 'np.zeros', (['(num_imgs, num_objects, 4)'], {}), '((num_imgs, num_objects, 4))\n', (448, 476), True, 'import numpy as np\n'), ((497, 542), 'numpy.zeros', 'np.zeros', (['(num_imgs, self.WIDTH, self.HEIGHT)'], {}), '((num_imgs, self.WIDTH, self.HEIGHT))\n', (505, 542), True, 'import numpy as np\n'), ((1662, 1689), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (1672, 1689), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1841), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1832, 1841), True, 'import matplotlib.pyplot as plt\n'), ((1930, 2052), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_X_sample'], {'cmap': '"""Greys"""', 'interpolation': '"""none"""', 'origin': '"""lower"""', 'extent': '[0, self.WIDTH, 0, self.HEIGHT]'}), "(test_X_sample, cmap='Greys', interpolation='none', origin=\n 'lower', extent=[0, self.WIDTH, 0, self.HEIGHT])\n", (1940, 2052), True, 'import matplotlib.pyplot as plt\n'), ((2057, 2077), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2068, 2077), True, 'import matplotlib.pyplot as plt\n'), ((2157, 2282), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_imgs_sample'], {'cmap': '"""Greys"""', 'interpolation': '"""none"""', 'origin': '"""lower"""', 'extent': '[0, self.WIDTH, 0, self.HEIGHT]'}), "(test_imgs_sample, cmap='Greys', interpolation='none', origin=\n 'lower', extent=[0, self.WIDTH, 0, self.HEIGHT])\n", (2167, 2282), True, 'import matplotlib.pyplot as plt\n'), ((2286, 2296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2294, 2296), True, 'import matplotlib.pyplot as plt\n'), ((3515, 3527), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3525, 3527), True, 'import matplotlib.pyplot as plt\n'), ((3626, 3746), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.imgs[i]'], {'cmap': '"""Greys"""', 'interpolation': '"""none"""', 'origin': '"""lower"""', 'extent': '[0, self.WIDTH, 0, self.HEIGHT]'}), "(self.imgs[i], cmap='Greys', interpolation='none', origin='lower',\n extent=[0, self.WIDTH, 0, self.HEIGHT])\n", (3636, 3746), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3953), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3951, 3953), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4023), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4021, 4023), True, 'import matplotlib.pyplot as plt\n'), ((4115, 4135), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (4126, 4135), True, 'import matplotlib.pyplot as plt\n'), ((4144, 4256), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""Greys"""', 'interpolation': '"""none"""', 'origin': '"""lower"""', 'extent': '[0, self.WIDTH, 0, self.HEIGHT]'}), "(img, cmap='Greys', interpolation='none', origin='lower', extent=\n [0, self.WIDTH, 0, self.HEIGHT])\n", (4154, 4256), True, 'import matplotlib.pyplot as plt\n'), ((4375, 4385), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4383, 4385), True, 'import matplotlib.pyplot as plt\n'), ((4481, 4508), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (4491, 4508), True, 'import matplotlib.pyplot as plt\n'), ((4640, 4660), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4651, 4660), True, 'import matplotlib.pyplot as plt\n'), ((4749, 4871), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_X_sample'], {'cmap': '"""Greys"""', 'interpolation': '"""none"""', 'origin': '"""lower"""', 'extent': '[0, self.WIDTH, 0, self.HEIGHT]'}), "(test_X_sample, cmap='Greys', interpolation='none', origin=\n 'lower', extent=[0, self.WIDTH, 0, self.HEIGHT])\n", (4759, 4871), True, 'import matplotlib.pyplot as plt\n'), ((4876, 4896), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4887, 4896), True, 'import matplotlib.pyplot as plt\n'), ((4976, 5101), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_imgs_sample'], {'cmap': '"""Greys"""', 'interpolation': '"""none"""', 'origin': '"""lower"""', 'extent': '[0, self.WIDTH, 0, self.HEIGHT]'}), "(test_imgs_sample, cmap='Greys', interpolation='none', origin=\n 'lower', extent=[0, self.WIDTH, 0, self.HEIGHT])\n", (4986, 5101), True, 'import matplotlib.pyplot as plt\n'), ((5105, 5115), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5113, 5115), True, 'import matplotlib.pyplot as plt\n'), ((5312, 5339), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (5322, 5339), True, 'import matplotlib.pyplot as plt\n'), ((6760, 6770), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6768, 6770), True, 'import matplotlib.pyplot as plt\n'), ((4280, 4369), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(bbox[0], bbox[1])', 'bbox[2]', 'bbox[3]'], {'ec': '"""r"""', 'fc': '"""none"""'}), "((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r',\n fc='none')\n", (4308, 4369), False, 'import matplotlib\n'), ((5527, 5556), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(10)', 'i_subplot'], {}), '(1, 10, i_subplot)\n', (5538, 5556), True, 'import matplotlib.pyplot as plt\n'), ((5621, 5747), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.test_imgs[i]'], {'cmap': '"""Greys"""', 'interpolation': '"""none"""', 'origin': '"""lower"""', 'extent': '[0, self.WIDTH, 0, self.HEIGHT]'}), "(self.test_imgs[i], cmap='Greys', interpolation='none', origin=\n 'lower', extent=[0, self.WIDTH, 0, self.HEIGHT])\n", (5631, 5747), True, 'import matplotlib.pyplot as plt\n'), ((676, 735), 'numpy.random.randint', 'np.random.randint', (['min_object_size', 'max_object_size'], {'size': '(2)'}), '(min_object_size, max_object_size, size=2)\n', (693, 735), True, 'import numpy as np\n'), ((756, 792), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.WIDTH - w)'], {}), '(0, self.WIDTH - w)\n', (773, 792), True, 'import numpy as np\n'), ((813, 850), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.HEIGHT - h)'], {}), '(0, self.HEIGHT - h)\n', (830, 850), True, 'import numpy as np\n'), ((1850, 1859), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1857, 1859), True, 'import matplotlib.pyplot as plt\n'), ((2086, 2095), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2093, 2095), True, 'import matplotlib.pyplot as plt\n'), ((3811, 3900), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(bbox[0], bbox[1])', 'bbox[2]', 'bbox[3]'], {'ec': '"""r"""', 'fc': '"""none"""'}), "((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r',\n fc='none')\n", (3839, 3900), False, 'import matplotlib\n'), ((4260, 4269), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4267, 4269), True, 'import matplotlib.pyplot as plt\n'), ((4669, 4678), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4676, 4678), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4914), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4912, 4914), True, 'import matplotlib.pyplot as plt\n'), ((3791, 3800), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3798, 3800), True, 'import matplotlib.pyplot as plt\n'), ((3910, 3919), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3917, 3919), True, 'import matplotlib.pyplot as plt\n'), ((6151, 6260), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(pred_bbox[0], pred_bbox[1])', 'pred_bbox[2]', 'pred_bbox[3]'], {'ec': '"""r"""', 'fc': '"""none"""'}), "((pred_bbox[0], pred_bbox[1]), pred_bbox[2],\n pred_bbox[3], ec='r', fc='none')\n", (6179, 6260), False, 'import matplotlib\n'), ((6314, 6419), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(exp_bbox[0], exp_bbox[1])', 'exp_bbox[2]', 'exp_bbox[3]'], {'ec': '"""b"""', 'fc': '"""none"""'}), "((exp_bbox[0], exp_bbox[1]), exp_bbox[2],\n exp_bbox[3], ec='b', fc='none')\n", (6342, 6419), False, 'import matplotlib\n'), ((6131, 6140), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6138, 6140), True, 'import matplotlib.pyplot as plt\n'), ((6294, 6303), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6301, 6303), True, 'import matplotlib.pyplot as plt\n'), ((6658, 6667), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6665, 6667), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import copy, os, pdb, random, shutil, subprocess, time
import h5py
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
import seaborn as sns
from sklearn import preprocessing
import tensorflow as tf
import basenji
'''
basenji_motifs.py
Collect statistics and make plots to explore the first convolution layer
of the given model using the given sequences.
'''
weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint ""'
weblogo_opts += ' -C "#CB2026" A A'
weblogo_opts += ' -C "#34459C" C C'
weblogo_opts += ' -C "#FBB116" G G'
weblogo_opts += ' -C "#0C8040" T T'
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <data_file>'
parser = OptionParser(usage)
parser.add_option(
'-a',
dest='act_t',
default=0.5,
type='float',
help=
'Activation threshold (as proportion of max) to consider for PWM [Default: %default]'
)
parser.add_option(
'-d',
dest='model_hdf5_file',
default=None,
help='Pre-computed model output as HDF5.')
parser.add_option('-o', dest='out_dir', default='.')
parser.add_option(
'-m',
dest='meme_db',
default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'],
help='MEME database used to annotate motifs')
parser.add_option(
'-p',
dest='plot_heats',
default=False,
action='store_true',
help=
'Plot heat maps describing filter activations in the test sequences [Default: %default]'
)
parser.add_option(
'-s',
dest='sample',
default=None,
type='int',
help='Sample sequences from the test set [Default:%default]')
parser.add_option(
'-t',
dest='trim_filters',
default=False,
action='store_true',
help='Trim uninformative positions off the filter ends [Default: %default]'
)
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error(
'Must provide Basenji parameters and model files and test data in HDF5'
' format.'
)
else:
params_file = args[0]
model_file = args[1]
data_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
#################################################################
# load data
data_open = h5py.File(data_file)
test_seqs1 = data_open['test_in']
test_targets = data_open['test_out']
try:
target_names = list(data_open['target_labels'])
except KeyError:
target_names = ['t%d' % ti for ti in range(test_targets.shape[1])]
if options.sample is not None:
# choose sampled indexes
sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample))
# filter
test_seqs1 = test_seqs1[sample_i]
test_targets = test_targets[sample_i]
# convert to letters
test_seqs = basenji.dna_io.hot1_dna(test_seqs1)
#################################################################
# model parameters and placeholders
job = basenji.dna_io.read_job_params(params_file)
job['seq_length'] = test_seqs1.shape[1]
job['seq_depth'] = test_seqs1.shape[2]
job['num_targets'] = test_targets.shape[2]
job['target_pool'] = int(np.array(data_open.get('pool_width', 1)))
t0 = time.time()
dr = basenji.seqnn.SeqNN()
dr.build(job)
print('Model building time %ds' % (time.time() - t0))
# adjust for fourier
job['fourier'] = 'train_out_imag' in data_open
if job['fourier']:
test_targets_imag = data_open['test_out_imag']
if options.valid:
test_targets_imag = data_open['valid_out_imag']
#################################################################
# predict
# initialize batcher
if job['fourier']:
batcher_test = basenji.batcher.BatcherF(
test_seqs1,
test_targets,
test_targets_imag,
batch_size=dr.batch_size,
pool_width=job['target_pool'])
else:
batcher_test = basenji.batcher.Batcher(
test_seqs1,
test_targets,
batch_size=dr.batch_size,
pool_width=job['target_pool'])
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
# get weights
filter_weights = sess.run(dr.filter_weights[0])
filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0])
print(filter_weights.shape)
# test
t0 = time.time()
layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0])
filter_outs = layer_filter_outs[0]
print(filter_outs.shape)
# store useful variables
num_filters = filter_weights.shape[0]
filter_size = filter_weights.shape[2]
#################################################################
# individual filter plots
#################################################################
# also save information contents
filters_ic = []
meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs)
for f in range(num_filters):
print('Filter %d' % f)
# plot filter parameters as a heatmap
plot_filter_heat(filter_weights[f, :, :],
'%s/filter%d_heat.pdf' % (options.out_dir, f))
# write possum motif file
filter_possum(filter_weights[f, :, :], 'filter%d' % f,
'%s/filter%d_possum.txt' % (options.out_dir,
f), options.trim_filters)
# plot weblogo of high scoring outputs
plot_filter_logo(
filter_outs[:, :, f],
filter_size,
test_seqs,
'%s/filter%d_logo' % (options.out_dir, f),
maxpct_t=options.act_t)
# make a PWM for the filter
filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' %
(options.out_dir, f))
if nsites < 10:
# no information
filters_ic.append(0)
else:
# compute and save information content
filters_ic.append(info_content(filter_pwm))
# add to the meme motif file
meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters)
meme_out.close()
#################################################################
# annotate filters
#################################################################
# run tomtom
subprocess.call(
'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' %
(options.out_dir, options.out_dir, options.meme_db),
shell=True)
# read in annotations
filter_names = name_filters(
num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db)
#################################################################
# print a table of information
#################################################################
table_out = open('%s/table.txt' % options.out_dir, 'w')
# print header for later panda reading
header_cols = ('', 'consensus', 'annotation', 'ic', 'mean', 'std')
print('%3s %19s %10s %5s %6s %6s' % header_cols, file=table_out)
for f in range(num_filters):
# collapse to a consensus motif
consensus = filter_motif(filter_weights[f, :, :])
# grab annotation
annotation = '.'
name_pieces = filter_names[f].split('_')
if len(name_pieces) > 1:
annotation = name_pieces[1]
# plot density of filter output scores
fmean, fstd = plot_score_density(
np.ravel(filter_outs[:, :, f]),
'%s/filter%d_dens.pdf' % (options.out_dir, f))
row_cols = (f, consensus, annotation, filters_ic[f], fmean, fstd)
print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out)
table_out.close()
#################################################################
# global filter plots
#################################################################
if options.plot_heats:
# plot filter-sequence heatmap
plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir)
# plot filter-segment heatmap
plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir)
plot_filter_seg_heat(
filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False)
# plot filter-target correlation heatmap
plot_target_corr(filter_outs, seq_targets, filter_names, target_names,
'%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean')
plot_target_corr(filter_outs, seq_targets, filter_names, target_names,
'%s/filter_target_cors_max.pdf' % options.out_dir, 'max')
def get_motif_proteins(meme_db_file):
""" Hash motif_id's to protein names using the MEME DB file """
motif_protein = {}
for line in open(meme_db_file):
a = line.split()
if len(a) > 0 and a[0] == 'MOTIF':
if a[2][0] == '(':
motif_protein[a[1]] = a[2][1:a[2].find(')')]
else:
motif_protein[a[1]] = a[2]
return motif_protein
def info_content(pwm, transpose=False, bg_gc=0.415):
""" Compute PWM information content.
In the original analysis, I used a bg_gc=0.5. For any
future analysis, I ought to switch to the true hg19
value of 0.415.
"""
pseudoc = 1e-9
if transpose:
pwm = np.transpose(pwm)
bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc]
ic = 0
for i in range(pwm.shape[0]):
for j in range(4):
# ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j])
ic += -bg_pwm[j] * np.log2(
bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j])
return ic
def make_filter_pwm(filter_fasta):
""" Make a PWM for this filter from its top hits """
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
pwm_counts = []
nsites = 4 # pseudocounts
for line in open(filter_fasta):
if line[0] != '>':
seq = line.rstrip()
nsites += 1
if len(pwm_counts) == 0:
# initialize with the length
for i in range(len(seq)):
pwm_counts.append(np.array([1.0] * 4))
# count
for i in range(len(seq)):
try:
pwm_counts[i][nts[seq[i]]] += 1
except KeyError:
pwm_counts[i] += np.array([0.25] * 4)
# normalize
pwm_freqs = []
for i in range(len(pwm_counts)):
pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in range(4)])
return np.array(pwm_freqs), nsites - 4
def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False):
""" Print a filter to the growing MEME file
Attrs:
meme_out : open file
f (int) : filter index #
filter_pwm (array) : filter PWM array
nsites (int) : number of filter sites
"""
if not trim_filters:
ic_start = 0
ic_end = filter_pwm.shape[0] - 1
else:
ic_t = 0.2
# trim PWM of uninformative prefix
ic_start = 0
while ic_start < filter_pwm.shape[0] and info_content(
filter_pwm[ic_start:ic_start + 1]) < ic_t:
ic_start += 1
# trim PWM of uninformative suffix
ic_end = filter_pwm.shape[0] - 1
while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t:
ic_end -= 1
if ic_start < ic_end:
print('MOTIF filter%d' % f, file=meme_out)
print(
'letter-probability matrix: alength= 4 w= %d nsites= %d' %
(ic_end - ic_start + 1, nsites),
file=meme_out)
for i in range(ic_start, ic_end + 1):
print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out)
print('', file=meme_out)
def meme_intro(meme_file, seqs):
""" Open MEME motif format file and print intro
Attrs:
meme_file (str) : filename
seqs [str] : list of strings for obtaining background freqs
Returns:
mem_out : open MEME file
"""
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
# count
nt_counts = [1] * 4
for i in range(len(seqs)):
for nt in seqs[i]:
try:
nt_counts[nts[nt]] += 1
except KeyError:
pass
# normalize
nt_sum = float(sum(nt_counts))
nt_freqs = [nt_counts[i] / nt_sum for i in range(4)]
# open file for writing
meme_out = open(meme_file, 'w')
# print intro material
print('MEME version 4', file=meme_out)
print('', file=meme_out)
print('ALPHABET= ACGT', file=meme_out)
print('', file=meme_out)
print('Background letter frequencies:', file=meme_out)
print('A %.4f C %.4f G %.4f T %.4f' % tuple(nt_freqs), file=meme_out)
print('', file=meme_out)
return meme_out
def name_filters(num_filters, tomtom_file, meme_db_file):
""" Name the filters using Tomtom matches.
Attrs:
num_filters (int) : total number of filters
tomtom_file (str) : filename of Tomtom output table.
meme_db_file (str) : filename of MEME db
Returns:
filter_names [str] :
"""
# name by number
filter_names = ['f%d' % fi for fi in range(num_filters)]
# name by protein
if tomtom_file is not None and meme_db_file is not None:
motif_protein = get_motif_proteins(meme_db_file)
# hash motifs and q-value's by filter
filter_motifs = {}
tt_in = open(tomtom_file)
tt_in.readline()
for line in tt_in:
a = line.split()
fi = int(a[0][6:])
motif_id = a[1]
qval = float(a[5])
filter_motifs.setdefault(fi, []).append((qval, motif_id))
tt_in.close()
# assign filter's best match
for fi in filter_motifs:
top_motif = sorted(filter_motifs[fi])[0][1]
filter_names[fi] += '_%s' % motif_protein[top_motif]
return np.array(filter_names)
################################################################################
# plot_target_corr
#
# Plot a clustered heatmap of correlations between filter activations and
# targets.
#
# Input
# filter_outs:
# filter_names:
# target_names:
# out_pdf:
################################################################################
def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'):
num_seqs = filter_outs.shape[0]
num_targets = len(target_names)
if seq_op == 'mean':
filter_outs_seq = filter_outs.mean(axis=2)
else:
filter_outs_seq = filter_outs.max(axis=2)
# std is sequence by filter.
filter_seqs_std = filter_outs_seq.std(axis=0)
filter_outs_seq = filter_outs_seq[:, filter_seqs_std > 0]
filter_names_live = filter_names[filter_seqs_std > 0]
filter_target_cors = np.zeros((len(filter_names_live), num_targets))
for fi in range(len(filter_names_live)):
for ti in range(num_targets):
cor, p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti])
filter_target_cors[fi, ti] = cor
cor_df = pd.DataFrame(
filter_target_cors, index=filter_names_live, columns=target_names)
sns.set(font_scale=0.3)
plt.figure()
sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10))
plt.savefig(out_pdf)
plt.close()
################################################################################
# plot_filter_seq_heat
#
# Plot a clustered heatmap of filter activations in
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True):
# compute filter output means per sequence
filter_seqs = filter_outs.mean(axis=2)
# whiten
if whiten:
filter_seqs = preprocessing.scale(filter_seqs)
# transpose
filter_seqs = np.transpose(filter_seqs)
if drop_dead:
filter_stds = filter_seqs.std(axis=1)
filter_seqs = filter_seqs[filter_stds > 0]
# downsample sequences
seqs_i = np.random.randint(0, filter_seqs.shape[1], 500)
hmin = np.percentile(filter_seqs[:, seqs_i], 0.1)
hmax = np.percentile(filter_seqs[:, seqs_i], 99.9)
sns.set(font_scale=0.3)
plt.figure()
sns.clustermap(
filter_seqs[:, seqs_i],
row_cluster=True,
col_cluster=True,
linewidths=0,
xticklabels=False,
vmin=hmin,
vmax=hmax)
plt.savefig(out_pdf)
#out_png = out_pdf[:-2] + 'ng'
#plt.savefig(out_png, dpi=300)
plt.close()
################################################################################
# plot_filter_seq_heat
#
# Plot a clustered heatmap of filter activations in sequence segments.
#
# Mean doesn't work well for the smaller segments for some reason, but taking
# the max looks OK. Still, similar motifs don't cluster quite as well as you
# might expect.
#
# Input
# filter_outs
################################################################################
def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True):
b = filter_outs.shape[0]
f = filter_outs.shape[1]
l = filter_outs.shape[2]
s = 5
while l / float(s) - (l / s) > 0:
s += 1
print('%d segments of length %d' % (s, l / s))
# split into multiple segments
filter_outs_seg = np.reshape(filter_outs, (b, f, s, l / s))
# mean across the segments
filter_outs_mean = filter_outs_seg.max(axis=3)
# break each segment into a new instance
filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s * b, f))
# whiten
if whiten:
filter_seqs = preprocessing.scale(filter_seqs)
# transpose
filter_seqs = np.transpose(filter_seqs)
if drop_dead:
filter_stds = filter_seqs.std(axis=1)
filter_seqs = filter_seqs[filter_stds > 0]
# downsample sequences
seqs_i = np.random.randint(0, filter_seqs.shape[1], 500)
hmin = np.percentile(filter_seqs[:, seqs_i], 0.1)
hmax = np.percentile(filter_seqs[:, seqs_i], 99.9)
sns.set(font_scale=0.3)
if whiten:
dist = 'euclidean'
else:
dist = 'cosine'
plt.figure()
sns.clustermap(
filter_seqs[:, seqs_i],
metric=dist,
row_cluster=True,
col_cluster=True,
linewidths=0,
xticklabels=False,
vmin=hmin,
vmax=hmax)
plt.savefig(out_pdf)
#out_png = out_pdf[:-2] + 'ng'
#plt.savefig(out_png, dpi=300)
plt.close()
################################################################################
# filter_motif
#
# Collapse the filter parameter matrix to a single DNA motif.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def filter_motif(param_matrix):
nts = 'ACGT'
motif_list = []
for v in range(param_matrix.shape[1]):
max_n = 0
for n in range(1, 4):
if param_matrix[n, v] > param_matrix[max_n, v]:
max_n = n
if param_matrix[max_n, v] > 0:
motif_list.append(nts[max_n])
else:
motif_list.append('N')
return ''.join(motif_list)
################################################################################
# filter_possum
#
# Write a Possum-style motif
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200):
# possible trim
trim_start = 0
trim_end = param_matrix.shape[1] - 1
trim_t = 0.3
if trim_filters:
# trim PWM of uninformative prefix
while trim_start < param_matrix.shape[1] and np.max(
param_matrix[:, trim_start]) - np.min(
param_matrix[:, trim_start]) < trim_t:
trim_start += 1
# trim PWM of uninformative suffix
while trim_end >= 0 and np.max(param_matrix[:, trim_end]) - np.min(
param_matrix[:, trim_end]) < trim_t:
trim_end -= 1
if trim_start < trim_end:
possum_out = open(possum_file, 'w')
print('BEGIN GROUP', file=possum_out)
print('BEGIN FLOAT', file=possum_out)
print('ID %s' % motif_id, file=possum_out)
print('AP DNA', file=possum_out)
print('LE %d' % (trim_end + 1 - trim_start), file=possum_out)
for ci in range(trim_start, trim_end + 1):
print(
'MA %s' % ' '.join(['%.2f' % (mult * n)
for n in param_matrix[:, ci]]),
file=possum_out)
print('END', file=possum_out)
print('END', file=possum_out)
possum_out.close()
################################################################################
# plot_filter_heat
#
# Plot a heatmap of the filter's parameters.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_filter_heat(param_matrix, out_pdf):
param_range = abs(param_matrix).max()
sns.set(font_scale=2)
plt.figure(figsize=(param_matrix.shape[1], 4))
sns.heatmap(
param_matrix,
cmap='PRGn',
linewidths=0.2,
vmin=-param_range,
vmax=param_range)
ax = plt.gca()
ax.set_xticklabels(range(1, param_matrix.shape[1] + 1))
ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10)
plt.savefig(out_pdf)
plt.close()
################################################################################
# plot_filter_logo
#
# Plot a weblogo of the filter's occurrences
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None):
if maxpct_t:
all_outs = np.ravel(filter_outs)
all_outs_mean = all_outs.mean()
all_outs_norm = all_outs - all_outs_mean
raw_t = maxpct_t * all_outs_norm.max() + all_outs_mean
left_pad = (filter_size - 1) // 2
right_pad = filter_size - left_pad
# print fasta file of positive outputs
filter_fasta_out = open('%s.fa' % out_prefix, 'w')
filter_count = 0
for i in range(filter_outs.shape[0]):
for j in range(filter_outs.shape[1]):
if filter_outs[i, j] > raw_t:
# construct kmer
kmer = ''
# determine boundaries, considering padding
fstart = j - left_pad
fend = fstart + filter_size
# if it starts in left_pad
if fstart < 0:
kmer += 'N' * (-fstart)
fstart = 0
# add primary sequence
kmer += seqs[i][fstart:fend]
# if it ends in right_pad
if fend > len(seqs[i]):
kmer += 'N' * (fend - len(seqs[i]))
# output
print('>%d_%d' % (i, j), file=filter_fasta_out)
print(kmer, file=filter_fasta_out)
filter_count += 1
filter_fasta_out.close()
# make weblogo
if filter_count > 0:
weblogo_cmd = 'weblogo %s < %s.fa > %s.eps' % (weblogo_opts, out_prefix,
out_prefix)
subprocess.call(weblogo_cmd, shell=True)
################################################################################
# plot_score_density
#
# Plot the score density and print to the stats table.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_score_density(f_scores, out_pdf):
sns.set(font_scale=1.3)
plt.figure()
sns.distplot(f_scores, kde=False)
plt.xlabel('ReLU output')
plt.savefig(out_pdf)
plt.close()
return f_scores.mean(), f_scores.std()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
# pdb.runcall(main)
|
[
"basenji.seqnn.SeqNN",
"numpy.array",
"basenji.dna_io.read_job_params",
"basenji.batcher.BatcherF",
"seaborn.set",
"numpy.reshape",
"seaborn.distplot",
"matplotlib.pyplot.xlabel",
"basenji.batcher.Batcher",
"tensorflow.Session",
"numpy.max",
"matplotlib.pyplot.close",
"os.path.isdir",
"basenji.dna_io.hot1_dna",
"subprocess.call",
"os.mkdir",
"numpy.min",
"pandas.DataFrame",
"scipy.stats.spearmanr",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"seaborn.clustermap",
"matplotlib.pyplot.gca",
"seaborn.heatmap",
"h5py.File",
"numpy.squeeze",
"numpy.log2",
"numpy.transpose",
"time.time",
"sklearn.preprocessing.scale",
"tensorflow.train.Saver",
"optparse.OptionParser",
"numpy.swapaxes",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.ravel",
"numpy.percentile"
] |
[((829, 850), 'matplotlib.use', 'matplotlib.use', (['"""PDF"""'], {}), "('PDF')\n", (843, 850), False, 'import matplotlib\n'), ((1670, 1689), 'optparse.OptionParser', 'OptionParser', (['usage'], {}), '(usage)\n', (1682, 1689), False, 'from optparse import OptionParser\n'), ((3270, 3290), 'h5py.File', 'h5py.File', (['data_file'], {}), '(data_file)\n', (3279, 3290), False, 'import h5py\n'), ((3793, 3828), 'basenji.dna_io.hot1_dna', 'basenji.dna_io.hot1_dna', (['test_seqs1'], {}), '(test_seqs1)\n', (3816, 3828), False, 'import basenji\n'), ((3945, 3988), 'basenji.dna_io.read_job_params', 'basenji.dna_io.read_job_params', (['params_file'], {}), '(params_file)\n', (3975, 3988), False, 'import basenji\n'), ((4195, 4206), 'time.time', 'time.time', ([], {}), '()\n', (4204, 4206), False, 'import copy, os, pdb, random, shutil, subprocess, time\n'), ((4214, 4235), 'basenji.seqnn.SeqNN', 'basenji.seqnn.SeqNN', ([], {}), '()\n', (4233, 4235), False, 'import basenji\n'), ((5041, 5057), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5055, 5057), True, 'import tensorflow as tf\n'), ((7204, 7367), 'subprocess.call', 'subprocess.call', (["('tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' %\n (options.out_dir, options.out_dir, options.meme_db))"], {'shell': '(True)'}), "(\n 'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' %\n (options.out_dir, options.out_dir, options.meme_db), shell=True)\n", (7219, 7367), False, 'import copy, os, pdb, random, shutil, subprocess, time\n'), ((14271, 14293), 'numpy.array', 'np.array', (['filter_names'], {}), '(filter_names)\n', (14279, 14293), True, 'import numpy as np\n'), ((15402, 15481), 'pandas.DataFrame', 'pd.DataFrame', (['filter_target_cors'], {'index': 'filter_names_live', 'columns': 'target_names'}), '(filter_target_cors, index=filter_names_live, columns=target_names)\n', (15414, 15481), True, 'import pandas as pd\n'), ((15492, 15515), 'seaborn.set', 'sns.set', ([], {'font_scale': '(0.3)'}), '(font_scale=0.3)\n', (15499, 15515), True, 'import seaborn as sns\n'), ((15518, 15530), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15528, 15530), True, 'import matplotlib.pyplot as plt\n'), ((15533, 15595), 'seaborn.clustermap', 'sns.clustermap', (['cor_df'], {'cmap': '"""BrBG"""', 'center': '(0)', 'figsize': '(8, 10)'}), "(cor_df, cmap='BrBG', center=0, figsize=(8, 10))\n", (15547, 15595), True, 'import seaborn as sns\n'), ((15598, 15618), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_pdf'], {}), '(out_pdf)\n', (15609, 15618), True, 'import matplotlib.pyplot as plt\n'), ((15621, 15632), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15630, 15632), True, 'import matplotlib.pyplot as plt\n'), ((16225, 16250), 'numpy.transpose', 'np.transpose', (['filter_seqs'], {}), '(filter_seqs)\n', (16237, 16250), True, 'import numpy as np\n'), ((16394, 16441), 'numpy.random.randint', 'np.random.randint', (['(0)', 'filter_seqs.shape[1]', '(500)'], {}), '(0, filter_seqs.shape[1], 500)\n', (16411, 16441), True, 'import numpy as np\n'), ((16452, 16494), 'numpy.percentile', 'np.percentile', (['filter_seqs[:, seqs_i]', '(0.1)'], {}), '(filter_seqs[:, seqs_i], 0.1)\n', (16465, 16494), True, 'import numpy as np\n'), ((16504, 16547), 'numpy.percentile', 'np.percentile', (['filter_seqs[:, seqs_i]', '(99.9)'], {}), '(filter_seqs[:, seqs_i], 99.9)\n', (16517, 16547), True, 'import numpy as np\n'), ((16551, 16574), 'seaborn.set', 'sns.set', ([], {'font_scale': '(0.3)'}), '(font_scale=0.3)\n', (16558, 16574), True, 'import seaborn as sns\n'), ((16578, 16590), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16588, 16590), True, 'import matplotlib.pyplot as plt\n'), ((16593, 16726), 'seaborn.clustermap', 'sns.clustermap', (['filter_seqs[:, seqs_i]'], {'row_cluster': '(True)', 'col_cluster': '(True)', 'linewidths': '(0)', 'xticklabels': '(False)', 'vmin': 'hmin', 'vmax': 'hmax'}), '(filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True,\n linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax)\n', (16607, 16726), True, 'import seaborn as sns\n'), ((16768, 16788), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_pdf'], {}), '(out_pdf)\n', (16779, 16788), True, 'import matplotlib.pyplot as plt\n'), ((16857, 16868), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16866, 16868), True, 'import matplotlib.pyplot as plt\n'), ((17644, 17685), 'numpy.reshape', 'np.reshape', (['filter_outs', '(b, f, s, l / s)'], {}), '(filter_outs, (b, f, s, l / s))\n', (17654, 17685), True, 'import numpy as np\n'), ((17992, 18017), 'numpy.transpose', 'np.transpose', (['filter_seqs'], {}), '(filter_seqs)\n', (18004, 18017), True, 'import numpy as np\n'), ((18161, 18208), 'numpy.random.randint', 'np.random.randint', (['(0)', 'filter_seqs.shape[1]', '(500)'], {}), '(0, filter_seqs.shape[1], 500)\n', (18178, 18208), True, 'import numpy as np\n'), ((18219, 18261), 'numpy.percentile', 'np.percentile', (['filter_seqs[:, seqs_i]', '(0.1)'], {}), '(filter_seqs[:, seqs_i], 0.1)\n', (18232, 18261), True, 'import numpy as np\n'), ((18271, 18314), 'numpy.percentile', 'np.percentile', (['filter_seqs[:, seqs_i]', '(99.9)'], {}), '(filter_seqs[:, seqs_i], 99.9)\n', (18284, 18314), True, 'import numpy as np\n'), ((18318, 18341), 'seaborn.set', 'sns.set', ([], {'font_scale': '(0.3)'}), '(font_scale=0.3)\n', (18325, 18341), True, 'import seaborn as sns\n'), ((18409, 18421), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18419, 18421), True, 'import matplotlib.pyplot as plt\n'), ((18424, 18570), 'seaborn.clustermap', 'sns.clustermap', (['filter_seqs[:, seqs_i]'], {'metric': 'dist', 'row_cluster': '(True)', 'col_cluster': '(True)', 'linewidths': '(0)', 'xticklabels': '(False)', 'vmin': 'hmin', 'vmax': 'hmax'}), '(filter_seqs[:, seqs_i], metric=dist, row_cluster=True,\n col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax)\n', (18438, 18570), True, 'import seaborn as sns\n'), ((18618, 18638), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_pdf'], {}), '(out_pdf)\n', (18629, 18638), True, 'import matplotlib.pyplot as plt\n'), ((18707, 18718), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18716, 18718), True, 'import matplotlib.pyplot as plt\n'), ((21275, 21296), 'seaborn.set', 'sns.set', ([], {'font_scale': '(2)'}), '(font_scale=2)\n', (21282, 21296), True, 'import seaborn as sns\n'), ((21299, 21345), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(param_matrix.shape[1], 4)'}), '(figsize=(param_matrix.shape[1], 4))\n', (21309, 21345), True, 'import matplotlib.pyplot as plt\n'), ((21348, 21443), 'seaborn.heatmap', 'sns.heatmap', (['param_matrix'], {'cmap': '"""PRGn"""', 'linewidths': '(0.2)', 'vmin': '(-param_range)', 'vmax': 'param_range'}), "(param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range,\n vmax=param_range)\n", (21359, 21443), True, 'import seaborn as sns\n'), ((21478, 21487), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (21485, 21487), True, 'import matplotlib.pyplot as plt\n'), ((21614, 21634), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_pdf'], {}), '(out_pdf)\n', (21625, 21634), True, 'import matplotlib.pyplot as plt\n'), ((21637, 21648), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (21646, 21648), True, 'import matplotlib.pyplot as plt\n'), ((23772, 23795), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.3)'}), '(font_scale=1.3)\n', (23779, 23795), True, 'import seaborn as sns\n'), ((23798, 23810), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23808, 23810), True, 'import matplotlib.pyplot as plt\n'), ((23813, 23846), 'seaborn.distplot', 'sns.distplot', (['f_scores'], {'kde': '(False)'}), '(f_scores, kde=False)\n', (23825, 23846), True, 'import seaborn as sns\n'), ((23849, 23874), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ReLU output"""'], {}), "('ReLU output')\n", (23859, 23874), True, 'import matplotlib.pyplot as plt\n'), ((23877, 23897), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_pdf'], {}), '(out_pdf)\n', (23888, 23897), True, 'import matplotlib.pyplot as plt\n'), ((23900, 23911), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (23909, 23911), True, 'import matplotlib.pyplot as plt\n'), ((3110, 3140), 'os.path.isdir', 'os.path.isdir', (['options.out_dir'], {}), '(options.out_dir)\n', (3123, 3140), False, 'import copy, os, pdb, random, shutil, subprocess, time\n'), ((3146, 3171), 'os.mkdir', 'os.mkdir', (['options.out_dir'], {}), '(options.out_dir)\n', (3154, 3171), False, 'import copy, os, pdb, random, shutil, subprocess, time\n'), ((4674, 4804), 'basenji.batcher.BatcherF', 'basenji.batcher.BatcherF', (['test_seqs1', 'test_targets', 'test_targets_imag'], {'batch_size': 'dr.batch_size', 'pool_width': "job['target_pool']"}), "(test_seqs1, test_targets, test_targets_imag,\n batch_size=dr.batch_size, pool_width=job['target_pool'])\n", (4698, 4804), False, 'import basenji\n'), ((4869, 4979), 'basenji.batcher.Batcher', 'basenji.batcher.Batcher', (['test_seqs1', 'test_targets'], {'batch_size': 'dr.batch_size', 'pool_width': "job['target_pool']"}), "(test_seqs1, test_targets, batch_size=dr.batch_size,\n pool_width=job['target_pool'])\n", (4892, 4979), False, 'import basenji\n'), ((5066, 5078), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5076, 5078), True, 'import tensorflow as tf\n'), ((5355, 5366), 'time.time', 'time.time', ([], {}), '()\n', (5364, 5366), False, 'import copy, os, pdb, random, shutil, subprocess, time\n'), ((10067, 10084), 'numpy.transpose', 'np.transpose', (['pwm'], {}), '(pwm)\n', (10079, 10084), True, 'import numpy as np\n'), ((11127, 11146), 'numpy.array', 'np.array', (['pwm_freqs'], {}), '(pwm_freqs)\n', (11135, 11146), True, 'import numpy as np\n'), ((16161, 16193), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['filter_seqs'], {}), '(filter_seqs)\n', (16180, 16193), False, 'from sklearn import preprocessing\n'), ((17836, 17871), 'numpy.swapaxes', 'np.swapaxes', (['filter_outs_mean', '(2)', '(1)'], {}), '(filter_outs_mean, 2, 1)\n', (17847, 17871), True, 'import numpy as np\n'), ((17928, 17960), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['filter_seqs'], {}), '(filter_seqs)\n', (17947, 17960), False, 'from sklearn import preprocessing\n'), ((22080, 22101), 'numpy.ravel', 'np.ravel', (['filter_outs'], {}), '(filter_outs)\n', (22088, 22101), True, 'import numpy as np\n'), ((23363, 23403), 'subprocess.call', 'subprocess.call', (['weblogo_cmd'], {'shell': '(True)'}), '(weblogo_cmd, shell=True)\n', (23378, 23403), False, 'import copy, os, pdb, random, shutil, subprocess, time\n'), ((5263, 5289), 'numpy.squeeze', 'np.squeeze', (['filter_weights'], {}), '(filter_weights)\n', (5273, 5289), True, 'import numpy as np\n'), ((8287, 8317), 'numpy.ravel', 'np.ravel', (['filter_outs[:, :, f]'], {}), '(filter_outs[:, :, f])\n', (8295, 8317), True, 'import numpy as np\n'), ((15289, 15350), 'scipy.stats.spearmanr', 'spearmanr', (['filter_outs_seq[:, fi]', 'seq_targets[:num_seqs, ti]'], {}), '(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti])\n', (15298, 15350), False, 'from scipy.stats import spearmanr\n'), ((4289, 4300), 'time.time', 'time.time', ([], {}), '()\n', (4298, 4300), False, 'import copy, os, pdb, random, shutil, subprocess, time\n'), ((10281, 10299), 'numpy.log2', 'np.log2', (['bg_pwm[j]'], {}), '(bg_pwm[j])\n', (10288, 10299), True, 'import numpy as np\n'), ((10325, 10353), 'numpy.log2', 'np.log2', (['(pseudoc + pwm[i][j])'], {}), '(pseudoc + pwm[i][j])\n', (10332, 10353), True, 'import numpy as np\n'), ((19977, 20012), 'numpy.max', 'np.max', (['param_matrix[:, trim_start]'], {}), '(param_matrix[:, trim_start])\n', (19983, 20012), True, 'import numpy as np\n'), ((20024, 20059), 'numpy.min', 'np.min', (['param_matrix[:, trim_start]'], {}), '(param_matrix[:, trim_start])\n', (20030, 20059), True, 'import numpy as np\n'), ((20173, 20206), 'numpy.max', 'np.max', (['param_matrix[:, trim_end]'], {}), '(param_matrix[:, trim_end])\n', (20179, 20206), True, 'import numpy as np\n'), ((20209, 20242), 'numpy.min', 'np.min', (['param_matrix[:, trim_end]'], {}), '(param_matrix[:, trim_end])\n', (20215, 20242), True, 'import numpy as np\n'), ((10779, 10798), 'numpy.array', 'np.array', (['([1.0] * 4)'], {}), '([1.0] * 4)\n', (10787, 10798), True, 'import numpy as np\n'), ((10954, 10974), 'numpy.array', 'np.array', (['([0.25] * 4)'], {}), '([0.25] * 4)\n', (10962, 10974), True, 'import numpy as np\n')]
|
"""
Functions for testing independence of several distributions.
The functions in this module provide methods for testing if
the samples generated from two random vectors are independent.
"""
import numpy as np
import scipy.stats
from . import _dcor_internals, _hypothesis
from ._dcor import u_distance_correlation_sqr
from ._utils import _random_state_init, _transform_to_2d
def distance_covariance_test(
x,
y,
*,
num_resamples=0,
exponent=1,
random_state=None,
n_jobs=1,
):
"""
Test of distance covariance independence.
Compute the test of independence based on the distance
covariance, for two random vectors.
The test is a permutation test where the null hypothesis is that the two
random vectors are independent.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
num_resamples: int
Number of permutations resamples to take in the permutation test.
random_state: {None, int, array_like, numpy.random.RandomState}
Random state to generate the permutations.
Returns
-------
HypothesisTest
Results of the hypothesis test.
See Also
--------
distance_covariance
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1, 0, 0, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [1, 1, 0, 1]])
>>> dcor.independence.distance_covariance_test(a, a)
HypothesisTest(p_value=1.0, statistic=208.0)
>>> dcor.independence.distance_covariance_test(a, b)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=1.0, statistic=11.75323056...)
>>> dcor.independence.distance_covariance_test(b, b)
HypothesisTest(p_value=1.0, statistic=1.3604610...)
>>> dcor.independence.distance_covariance_test(a, b,
... num_resamples=5, random_state=0)
HypothesisTest(p_value=0.5, statistic=11.7532305...)
>>> dcor.independence.distance_covariance_test(a, b,
... num_resamples=5, random_state=13)
HypothesisTest(p_value=0.3333333..., statistic=11.7532305...)
>>> dcor.independence.distance_covariance_test(a, a,
... num_resamples=7, random_state=0)
HypothesisTest(p_value=0.125, statistic=208.0)
"""
x = _transform_to_2d(x)
y = _transform_to_2d(y)
_dcor_internals._check_same_n_elements(x, y)
random_state = _random_state_init(random_state)
# Compute U-centered matrices
u_x = _dcor_internals._distance_matrix_generic(
x,
centering=_dcor_internals.double_centered,
exponent=exponent)
u_y = _dcor_internals._distance_matrix_generic(
y,
centering=_dcor_internals.double_centered,
exponent=exponent)
# Use the dcov statistic
def statistic_function(distance_matrix):
return u_x.shape[0] * _dcor_internals.mean_product(
distance_matrix, u_y)
return _hypothesis._permutation_test_with_sym_matrix(
u_x,
statistic_function=statistic_function,
num_resamples=num_resamples,
random_state=random_state,
n_jobs=n_jobs)
def partial_distance_covariance_test(
x,
y,
z,
*,
num_resamples=0,
exponent=1,
random_state=None,
n_jobs=1,
):
"""
Test of partial distance covariance independence.
Compute the test of independence based on the partial distance
covariance, for two random vectors conditioned on a third.
The test is a permutation test where the null hypothesis is that the first
two random vectors are independent given the third one.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Observed random vector. The columns correspond with the individual
random variables while the rows are individual instances of the random
vector.
num_resamples: int
Number of permutations resamples to take in the permutation test.
random_state: {None, int, array_like, numpy.random.RandomState}
Random state to generate the permutations.
Returns
-------
HypothesisTest
Results of the hypothesis test.
See Also
--------
partial_distance_covariance
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1, 0, 0, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [1, 1, 0, 1]])
>>> c = np.array([[1000, 0, 0, 1000],
... [0, 1000, 1000, 1000],
... [1000, 1000, 1000, 1000],
... [1000, 1000, 0, 1000]])
>>> dcor.independence.partial_distance_covariance_test(a, a, b)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=1.0, statistic=142.6664416...)
>>> dcor.independence.partial_distance_covariance_test(a, b, c)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=1.0, statistic=7.2690070...e-15)
>>> dcor.independence.partial_distance_covariance_test(b, b, c)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=1.0, statistic=2.2533380...e-30)
>>> dcor.independence.partial_distance_covariance_test(a, b, c,
... num_resamples=5, random_state=0)
HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15)
>>> dcor.independence.partial_distance_covariance_test(a, b, c,
... num_resamples=5, random_state=13)
HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15)
>>> dcor.independence.partial_distance_covariance_test(a, c, b,
... num_resamples=7, random_state=0)
HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12)
"""
random_state = _random_state_init(random_state)
# Compute U-centered matrices
u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent)
u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent)
u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent)
# Compute projections
proj = _dcor_internals.u_complementary_projection(u_z)
p_xz = proj(u_x)
p_yz = proj(u_y)
# Use the pdcor statistic
def statistic_function(distance_matrix):
return u_x.shape[0] * _dcor_internals.u_product(
distance_matrix, p_yz)
return _hypothesis._permutation_test_with_sym_matrix(
p_xz,
statistic_function=statistic_function,
num_resamples=num_resamples,
random_state=random_state,
n_jobs=n_jobs)
def distance_correlation_t_statistic(x, y):
"""
Transformation of the bias corrected version of distance correlation used
in :func:`distance_correlation_t_test`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
T statistic.
See Also
--------
distance_correlation_t_test
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1, 0, 0, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [1, 1, 0, 1]])
>>> with np.errstate(divide='ignore'):
... dcor.independence.distance_correlation_t_statistic(a, a)
inf
>>> dcor.independence.distance_correlation_t_statistic(a, b)
... # doctest: +ELLIPSIS
-0.4430164...
>>> with np.errstate(divide='ignore'):
... dcor.independence.distance_correlation_t_statistic(b, b)
inf
"""
bcdcor = u_distance_correlation_sqr(x, y)
n = x.shape[0]
v = n * (n - 3) / 2
return np.sqrt(v - 1) * bcdcor / np.sqrt(1 - bcdcor**2)
def distance_correlation_t_test(x, y):
"""
Test of independence for high dimension based on convergence to a Student t
distribution. The null hypothesis is that the two random vectors are
independent.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
HypothesisTest
Results of the hypothesis test.
See Also
--------
distance_correlation_t_statistic
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1, 0, 0, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [1, 1, 0, 1]])
>>> with np.errstate(divide='ignore'):
... dcor.independence.distance_correlation_t_test(a, a)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=0.0, statistic=inf)
>>> dcor.independence.distance_correlation_t_test(a, b)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...)
>>> with np.errstate(divide='ignore'):
... dcor.independence.distance_correlation_t_test(b, b)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=0.0, statistic=inf)
"""
t_test = distance_correlation_t_statistic(x, y)
n = x.shape[0]
v = n * (n - 3) / 2
df = v - 1
p_value = 1 - scipy.stats.t.cdf(t_test, df=df)
return _hypothesis.HypothesisTest(p_value=p_value, statistic=t_test)
|
[
"numpy.sqrt"
] |
[((9268, 9292), 'numpy.sqrt', 'np.sqrt', (['(1 - bcdcor ** 2)'], {}), '(1 - bcdcor ** 2)\n', (9275, 9292), True, 'import numpy as np\n'), ((9242, 9256), 'numpy.sqrt', 'np.sqrt', (['(v - 1)'], {}), '(v - 1)\n', (9249, 9256), True, 'import numpy as np\n')]
|
#coding=utf-8
#性别识别
import cv2
from keras.models import load_model
import numpy as np
import chineseText
img = cv2.imread("img/gather.png")
face_classifier = cv2.CascadeClassifier(
"d:\Python36\Lib\site-packages\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml"
)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(
gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140))
gender_classifier = load_model(
"classifier/gender_models/simple_CNN.81-0.96.hdf5")
gender_labels = {0: '女', 1: '男'}
color = (255, 255, 255)
for (x, y, w, h) in faces:
face = img[(y - 60):(y + h + 60), (x - 30):(x + w + 30)]
face = cv2.resize(face, (48, 48))
face = np.expand_dims(face, 0)
face = face / 255.0
gender_label_arg = np.argmax(gender_classifier.predict(face))
gender = gender_labels[gender_label_arg]
cv2.rectangle(img, (x, y), (x + h, y + w), color, 2)
img = chineseText.cv2ImgAddText(img, gender, x + h, y, color, 30)
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"keras.models.load_model",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"numpy.expand_dims",
"cv2.CascadeClassifier",
"cv2.resize",
"cv2.imread",
"chineseText.cv2ImgAddText"
] |
[((113, 141), 'cv2.imread', 'cv2.imread', (['"""img/gather.png"""'], {}), "('img/gather.png')\n", (123, 141), False, 'import cv2\n'), ((160, 299), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""d:\\\\Python36\\\\Lib\\\\site-packages\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_frontalface_default.xml"""'], {}), "(\n 'd:\\\\Python36\\\\Lib\\\\site-packages\\\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_frontalface_default.xml'\n )\n", (181, 299), False, 'import cv2\n'), ((296, 333), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (308, 333), False, 'import cv2\n'), ((460, 522), 'keras.models.load_model', 'load_model', (['"""classifier/gender_models/simple_CNN.81-0.96.hdf5"""'], {}), "('classifier/gender_models/simple_CNN.81-0.96.hdf5')\n", (470, 522), False, 'from keras.models import load_model\n'), ((1010, 1034), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (1020, 1034), False, 'import cv2\n'), ((1035, 1049), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1046, 1049), False, 'import cv2\n'), ((1050, 1073), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1071, 1073), False, 'import cv2\n'), ((685, 711), 'cv2.resize', 'cv2.resize', (['face', '(48, 48)'], {}), '(face, (48, 48))\n', (695, 711), False, 'import cv2\n'), ((723, 746), 'numpy.expand_dims', 'np.expand_dims', (['face', '(0)'], {}), '(face, 0)\n', (737, 746), True, 'import numpy as np\n'), ((886, 938), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + h, y + w)', 'color', '(2)'], {}), '(img, (x, y), (x + h, y + w), color, 2)\n', (899, 938), False, 'import cv2\n'), ((949, 1008), 'chineseText.cv2ImgAddText', 'chineseText.cv2ImgAddText', (['img', 'gender', '(x + h)', 'y', 'color', '(30)'], {}), '(img, gender, x + h, y, color, 30)\n', (974, 1008), False, 'import chineseText\n')]
|
"""
Project for Udacity Danaodgree in Deep Reinforcement Learning
This script train an agent to navigate (and collect bananas!) in a large, square world.
A reward of +1 is provided for collecting a yellow banana, and a reward of -1 is provided for collecting a blue banana. Thus, the goal of your agent is to collect as many yellow bananas as possible while avoiding blue bananas.
The state space has 37 dimensions and contains the agent's velocity, along with ray-based perception of objects around the agent's forward direction. Given this information, the agent has to learn how to best select actions. Four discrete actions are available, corresponding to:
0 - move forward.
1 - move backward.
2 - turn left.
3 - turn right.
The task is episodic, and in order to solve the environment, your agent must get an average score of +13 over 100 consecutive episodes.
"""
from unityagents import UnityEnvironment
import numpy as np
from collections import deque
from dqn_agent import Agent
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
"""
Unity environment configuration
Mac: "path/to/Banana.app"
Windows (x86): "path/to/Banana_Windows_x86/Banana.exe"
Windows (x86_64): "path/to/Banana_Windows_x86_64/Banana.exe"
Linux (x86): "path/to/Banana_Linux/Banana.x86"
Linux (x86_64): "path/to/Banana_Linux/Banana.x86_64"
Linux (x86, headless): "path/to/Banana_Linux_NoVis/Banana.x86"
Linux (x86_64, headless): "path/to/Banana_Linux_NoVis/Banana.x86_64"
"""
# start Unity environment
env = UnityEnvironment(file_name="Banana.app")
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=False)[brain_name]
action_size = brain.vector_action_space_size
state_size = len(env_info.vector_observations[0])
# initialize agent
agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device)
def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
# reset environment
env_info = env.reset(train_mode=True)[brain_name]
# get initial state
state = env_info.vector_observations[0]
# set initial score
score = 0
while True:
action = agent.act(state, eps)
env_info = env.step(action)[brain_name]
next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0]
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=14:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
train()
|
[
"numpy.mean",
"collections.deque",
"dqn_agent.Agent",
"unityagents.UnityEnvironment",
"torch.cuda.is_available"
] |
[((1524, 1564), 'unityagents.UnityEnvironment', 'UnityEnvironment', ([], {'file_name': '"""Banana.app"""'}), "(file_name='Banana.app')\n", (1540, 1564), False, 'from unityagents import UnityEnvironment\n'), ((1829, 1905), 'dqn_agent.Agent', 'Agent', ([], {'state_size': 'state_size', 'action_size': 'action_size', 'seed': '(0)', 'device': 'device'}), '(state_size=state_size, action_size=action_size, seed=0, device=device)\n', (1834, 1905), False, 'from dqn_agent import Agent\n'), ((2429, 2446), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (2434, 2446), False, 'from collections import deque\n'), ((1038, 1063), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1061, 1063), False, 'import torch\n'), ((3653, 3675), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (3660, 3675), True, 'import numpy as np\n'), ((3477, 3499), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (3484, 3499), True, 'import numpy as np\n'), ((3617, 3639), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (3624, 3639), True, 'import numpy as np\n'), ((3785, 3807), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (3792, 3807), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Train seq-to-seq model on random supervised training tasks."""
# pytype: disable=wrong-arg-count
# pytype: disable=attribute-error
import collections
import functools
import json
import os
import random
import sys
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import linen as nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
from latent_programmer import decode
from latent_programmer import models as base_models
from latent_programmer.decomposition_transformer_attention import decomposition_models as models
from latent_programmer.decomposition_transformer_attention import input_pipeline
from latent_programmer.tasks.robust_fill import dsl
from latent_programmer.tasks.robust_fill import tokens as dsl_tokens
sys.path.append('../../')
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_integer('seed', 0, 'Fixed random seed for training.')
flags.DEFINE_float('lr', 1e-3, 'Learning rate.')
flags.DEFINE_float('weight_decay', 1e-1,
'Decay factor for AdamW-style weight decay.')
flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.')
flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.')
flags.DEFINE_integer('num_heads', 4, 'Number of layers.')
flags.DEFINE_integer('num_layers', 3, 'Number of Transformer heads.')
flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding for prediction?')
flags.DEFINE_string('dataset_filepattern', None,
'Filepattern for TFRecord dataset.')
flags.DEFINE_integer('per_device_batch_size', 16,
'Number of program tasks in a batch.')
flags.DEFINE_integer('num_strings_per_task', 4,
'Number of input/output strings per task.')
flags.DEFINE_integer('max_program_length', 100,
'Maximum number of tokens in program.')
flags.DEFINE_integer('max_characters', 120,
'Maximum number of characters in input/output strings.')
flags.DEFINE_string('save_dir', None, 'Directory to save results to.')
flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training steps.')
flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation steps.')
flags.DEFINE_integer('log_freq', 1000, 'Number of steps between training logs.')
flags.DEFINE_integer('eval_freq', 2000, 'Number of steps between eval.')
flags.DEFINE_integer('predict_freq', 50000,
'Number of steps between prediction (beam search).')
flags.DEFINE_integer('checkpoint_freq', 50000,
'Number of steps between checkpoint saves.')
flags.DEFINE_integer('finetune_start_step', -1,
'Step the initial checkpoint should start at for '
'finetuning, or -1 if not finetuning.')
flags.DEFINE_bool('restore_checkpoints', True,
'Whether to restore from existing model checkpoints.')
flags.DEFINE_string('attention_mask_type', 'bos_full_attention',
'The kind of attention mask to use. Options are: baseline, '
'bos_to_bos, bos_full_attention')
flags.DEFINE_bool('use_relative_attention', True,
'Whether to use relative positonal embeddings.')
flags.DEFINE_bool('bos_special_attention', False,
'Whether to use special relative attention computation for '
'BOS tokens.')
_internal = False
if not _internal:
flags.DEFINE_string('xm_parameters', None,
'String specifying hyperparamter search.')
def create_learning_rate_scheduler(
base_learning_rate=0.5,
factors='constant * linear_warmup * rsqrt_normalized_decay',
warmup_steps=16000,
decay_factor=0.5,
steps_per_decay=50000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
base_learning_rate: float, the starting constant for the lr schedule.
factors: a string with factors separated by '*' that defines the schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
steps_per_cycle: Steps per cycle when using cosine decay.
Returns:
A function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def compute_weighted_cross_entropy(logits, targets, weights=None):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
onehot_targets = common_utils.onehot(targets, logits.shape[-1])
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar accuracy and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
acc = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
acc = acc * weights
normalizing_factor = weights.sum()
return acc.sum(), normalizing_factor
def compute_metrics(logits, targets, weights):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights)
acc, _ = compute_weighted_accuracy(logits, targets, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
# Train / eval / decode step functions.
# -----------------------------------------------------------------------------
def train_step(optimizer,
inputs,
outputs,
programs,
learning_rate_fn,
config,
dropout_rng):
"""Train on batch of program tasks."""
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)
def loss_fn(params):
"""Loss function used for training."""
logits = models.DecomposeAttentionTransformer(config).apply(
{'params': params},
inputs,
outputs,
programs,
rngs={'dropout': dropout_rng})
loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
# Get metrics.
metrics = compute_metrics(logits, programs, weights)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(params, inputs, outputs, programs, eos_token, config):
"""Collect metrics for evaluation during training."""
weights = jnp.where(
jnp.logical_and(programs > 0,
jnp.logical_and(programs != config.base_config.bos_token,
programs != eos_token)),
1, 0).astype(jnp.float32)
logits = models.DecomposeAttentionTransformer(config).apply(
{'params': params}, inputs, outputs, programs)
return compute_metrics(logits, programs, weights)
def initialize_cache(inputs, outputs, programs, max_decode_len, config):
"""Initialize a cache for a given input shape and max decode length."""
target_shape = (programs.shape[0], max_decode_len)
dtype = config.base_config.dtype
initial_variables = models.DecomposeAttentionTransformer(config).init(
jax.random.PRNGKey(0),
jnp.ones(inputs.shape, dtype),
jnp.ones(outputs.shape, dtype),
jnp.ones(target_shape, dtype))
return initial_variables['cache']
def predict_step(params,
inputs,
outputs,
cache,
beam_size,
eos_token,
max_decode_len,
config,
slow_decode=True):
"""Predict translation with fast decoding beam search on a batch."""
# Prepare transformer fast-decoder call for beam search: for beam search, we
# need to set up our decoder model to handle a batch size equal to
# batch_size * beam_size, where each batch item's data is expanded in-place
# rather than tiled.
flat_encoded = decode.flat_batch_beam_expand(
models.DecomposeAttentionTransformer(config).apply(
{'params': params},
inputs,
outputs,
method=models.DecomposeAttentionTransformer.encode),
beam_size)
encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32)
flat_encoded_padding_mask = decode.flat_batch_beam_expand(
encoded_padding_mask, beam_size)
if slow_decode:
def tokens_ids_to_logits(flat_ids):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits = models.DecomposeAttentionTransformer(config=config).apply(
{'params': params},
flat_ids,
flat_encoded,
flat_encoded_padding_mask,
method=models.DecomposeAttentionTransformer.decode)
return flat_logits
else:
def tokens_ids_to_logits(flat_ids, flat_cache):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits, new_vars = models.DecomposeAttentionTransformer(
config=config).apply(
{'params': params, 'cache': flat_cache},
flat_ids,
flat_encoded,
flat_encoded_padding_mask,
mutable=['cache'],
method=models.DecomposeAttentionTransformer.decode)
new_flat_cache = new_vars['cache']
# Remove singleton sequence-length dimension:
# [batch * beam, 1, vocab] --> [batch * beam, vocab]
flat_logits = flat_logits.squeeze(axis=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
beam_seqs, _ = decode.beam_search(
inputs,
cache,
tokens_ids_to_logits,
beam_size=beam_size,
alpha=0.6,
bos_token=config.base_config.bos_token,
eos_token=eos_token,
max_decode_len=max_decode_len,
slow_decode=slow_decode)
# Beam search returns [n_batch, n_beam, n_length] with beam dimension
# sorted in increasing order of log-probability.
return beam_seqs
# Util functions for prediction
# -----------------------------------------------------------------------------
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
tile_dims = [1] * len(x.shape)
tile_dims[0] = batch_pad
return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0)
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return x.reshape((n_device * n_batch,) + tuple(remaining_dims))
def per_host_sum_pmap(in_tree):
"""Execute psum on in_tree's leaves over one device per host."""
host2devices = collections.defaultdict(list)
for d in jax.devices():
host2devices[d.host_id].append(d)
devices = [host2devices[k][0] for k in host2devices]
host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices)
def pre_pmap(xs):
return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs)
def post_pmap(xs):
return jax.tree_map(lambda x: x[0], xs)
return post_pmap(host_psum(pre_pmap(in_tree)))
def eval_predicted(predicted, inputs, outputs, parse_beam_fn):
"""Evaluate predicted program beams."""
best_p, best_score = None, -1
# predicted shape [beam_size, length]
for beam in predicted[::-1]:
try:
p = parse_beam_fn(beam)
p_outs = [p(inp) for inp in inputs]
score = np.sum([p_out == out for p_out, out in zip(p_outs, outputs)])
if score > best_score:
best_p, best_score = p, score
except: # pylint: disable=bare-except
pass
if best_score >= len(inputs): # Found solution.
break
return best_p, best_score
def shorten(key):
splits = key.split('_')
return ''.join(s[0] for s in splits)
def main(_):
tf.enable_v2_behavior()
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
# BOS special attention only makes sense if we are using relative attention
# and it's not the baseline.
if FLAGS.bos_special_attention and (not FLAGS.use_relative_attention or
FLAGS.attention_mask_type == 'baseline'):
raise ValueError(
"bos_special_attention doesn't work when use_relative_attention={} and "
'attention_mask_type={}'.format(FLAGS.use_relative_attention,
FLAGS.attention_mask_type))
if not gfile.isdir(FLAGS.save_dir):
gfile.makedirs(FLAGS.save_dir)
hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr)
# Get hyperparmaters
if FLAGS.xm_parameters:
for key, value in json.loads(FLAGS.xm_parameters).items():
if key not in hparam_str_dict:
hparam_str_dict[key] = value
hparam_str = ','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k]))
for k in sorted(hparam_str_dict.keys())])
# Number of local devices for this host.
n_devices = jax.local_device_count()
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.save_dir, 'tb', hparam_str))
batch_size = FLAGS.per_device_batch_size * n_devices
io_shape = (FLAGS.per_device_batch_size,
FLAGS.num_strings_per_task,
FLAGS.max_characters)
program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length)
# Setup DSL
# ---------------------------------------------------------------------------
# Build token tables.
id_char_table = {i+1: char for (i, char) in enumerate(dsl.CHARACTER)}
char_id_table = {char: id for id, char in id_char_table.items()}
id_token_table, token_id_table = dsl_tokens.build_token_tables()
io_vocab_size = len(char_id_table) + 1 # For padding.
program_vocab_size = len(token_id_table) + 1
bos_token = token_id_table[dsl.BOS]
eos_token = token_id_table[dsl.EOS]
# Parse io and program token sequences (for eval).
def decode_io(inputs, outputs):
"""Decode io examples tokens."""
def decode_str(s):
"""Decode string tokens."""
return ''.join([id_char_table[c_id] for c_id in s if c_id > 0])
inps, outs = [], []
for inp, out in zip(inputs, outputs):
inps.append(decode_str(inp))
outs.append(decode_str(out))
return inps, outs
def decode_program(program):
"""Decode program tokens."""
program = program[:np.argmax(program == eos_token) + 1].astype(np.int32)
program = program[program != bos_token]
try:
return dsl.decode_program(program.tolist(), id_token_table)
except: # pylint: disable=bare-except
return None # Program does not compile.
# Load Dataset
# ---------------------------------------------------------------------------
logging.info('Initializing dataset.')
if not FLAGS.dataset_filepattern:
raise ValueError('Must specify filepattern to dataset.')
# Training dataset.
logging.info('Loading dataset from %s', FLAGS.dataset_filepattern)
padded_shapes = (io_shape[1:], io_shape[1:], program_shape[1:])
logging.info('padded_shapes: %s', padded_shapes)
dataset = input_pipeline.create_dataset_from_tf_record(
FLAGS.dataset_filepattern, token_id_table, char_id_table)
dataset = dataset.padded_batch(
batch_size,
padded_shapes=padded_shapes,
drop_remainder=True)
# Split evaluation and training.
eval_ds = dataset.take(FLAGS.num_eval_steps)
# Decrease batch of predict dataset to handle beam search.
predict_ds = eval_ds.unbatch().padded_batch(
int(np.ceil(batch_size / 10)),
padded_shapes=padded_shapes)
train_ds = dataset.skip(FLAGS.num_eval_steps).repeat()
train_iter = train_ds.as_numpy_iterator()
# Build Model and Optimizer
# ---------------------------------------------------------------------------
use_dropout = False
base_config = base_models.TransformerConfig(
vocab_size=io_vocab_size,
output_vocab_size=program_vocab_size,
shift=True,
emb_dim=FLAGS.embedding_dim,
num_heads=FLAGS.num_heads,
num_layers=FLAGS.num_layers,
qkv_dim=FLAGS.embedding_dim,
mlp_dim=FLAGS.hidden_dim,
max_len=max(FLAGS.max_characters, FLAGS.max_program_length),
use_relative_attention=FLAGS.use_relative_attention,
deterministic=not use_dropout,
decode=False,
bos_token=bos_token)
train_config = models.DecomposeAttentionTransformerConfig(
base_config=base_config,
attention_mask_type=FLAGS.attention_mask_type,
bos_special_attention=FLAGS.bos_special_attention)
eval_config = models.DecomposeAttentionTransformerConfig(
base_config=base_config.replace(deterministic=not use_dropout),
attention_mask_type=FLAGS.attention_mask_type,
bos_special_attention=FLAGS.bos_special_attention)
predict_config = models.DecomposeAttentionTransformerConfig(
base_config=base_config.replace(
shift=False, deterministic=not use_dropout,
decode=not FLAGS.slow_decode),
attention_mask_type=FLAGS.attention_mask_type,
bos_special_attention=FLAGS.bos_special_attention)
rng = jax.random.PRNGKey(FLAGS.seed)
rng = jax.random.fold_in(rng, jax.host_id())
rng, init_rng = jax.random.split(rng)
m = models.DecomposeAttentionTransformer(eval_config)
initial_variables = jax.jit(m.init)(
{'params': init_rng, 'dropout': init_rng},
jnp.ones(io_shape, jnp.float32),
jnp.ones(io_shape, jnp.float32),
jnp.ones(program_shape, jnp.float32))
optimizer_def = optim.Adam(
FLAGS.lr,
beta1=0.9,
beta2=0.98,
eps=1e-9,
weight_decay=FLAGS.weight_decay)
optimizer = optimizer_def.create(initial_variables['params'])
del initial_variables # Don't keep a copy of the initial model.
start_step = 0
if FLAGS.restore_checkpoints:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(
os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
logging.info('Found model checkpointed at step %d.', start_step)
if FLAGS.finetune_start_step > 0:
logging.info('Checking that start_step (%s) == finetune_start_step (%s)',
start_step, FLAGS.finetune_start_step)
assert start_step == FLAGS.finetune_start_step
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
# TODO(jxihong): Implement fast decoding.
assert FLAGS.slow_decode, 'Fast decoding is not implemented yet.'
if FLAGS.finetune_start_step <= 0:
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=FLAGS.lr)
else:
# Constant LR for finetuning.
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=FLAGS.lr,
factors='constant')
p_train_step = jax.pmap(
functools.partial(
train_step,
learning_rate_fn=learning_rate_fn,
config=train_config),
axis_name='batch')
p_eval_step = jax.pmap(
functools.partial(eval_step,
eos_token=eos_token,
config=eval_config),
axis_name='batch')
p_init_cache = jax.pmap(
functools.partial(
initialize_cache,
max_decode_len=FLAGS.max_program_length,
config=predict_config),
axis_name='batch')
p_pred_step = jax.pmap(
functools.partial(
predict_step,
eos_token=eos_token,
max_decode_len=FLAGS.max_program_length,
config=predict_config,
slow_decode=FLAGS.slow_decode),
axis_name='batch',
static_broadcasted_argnums=(4,))
# Main Train Loop
# ---------------------------------------------------------------------------
dropout_rng = jax.random.split(rng, jax.local_device_count())
del rng
metrics_all = []
tick = time.time()
for step in range(start_step, FLAGS.num_train_steps):
inputs, outputs, programs = common_utils.shard(next(train_iter))
optimizer, metrics, dropout_rng = p_train_step(
optimizer, inputs, outputs, programs, dropout_rng=dropout_rng)
metrics_all.append(metrics)
is_last_step = step == FLAGS.num_train_steps - 1
# Save a Checkpoint
if (step % FLAGS.checkpoint_freq == 0 and step > 0) or is_last_step:
if jax.host_id() == 0:
# Save unreplicated optimizer + model state.
checkpoints.save_checkpoint(
os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str),
jax_utils.unreplicate(optimizer),
step)
# Periodic metric handling.
# Training Metrics
if (step and step % FLAGS.log_freq == 0) or is_last_step:
logging.info('Gathering training metrics.')
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(
lambda x: x / denominator, # pylint: disable=cell-var-from-loop
metrics_sums)
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)
if jax.host_id() == 0:
logging.info('Train in step: %d, loss: %.4f', step, summary['loss'])
tock = time.time()
steps_per_sec = FLAGS.log_freq / (tock - tick)
tick = tock
summary_writer.scalar('train/steps per second', steps_per_sec, step)
for key, val in summary.items():
summary_writer.scalar('train/' + key, val, step)
summary_writer.flush()
# Reset metric accumulation for next evaluation cycle.
metrics_all = []
# Evaluation Metrics
if (step and step % FLAGS.eval_freq == 0) or is_last_step:
logging.info('Gathering evaluation metrics.')
t_evaluation_start = time.time()
eval_metrics = []
for batches in eval_ds.as_numpy_iterator():
inputs, outputs, programs = common_utils.shard(batches)
metrics = p_eval_step(optimizer.target, inputs, outputs, programs)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
if jax.host_id() == 0:
logging.info('Evaluation time: %.4f s step %d, loss: %.4f.',
time.time()-t_evaluation_start, step, eval_summary['loss'])
for key, val in eval_summary.items():
summary_writer.scalar('eval/' + key, val, step)
summary_writer.flush()
# Beam search metrics.
if (step and step % FLAGS.predict_freq == 0) or is_last_step:
logging.info('Gathering beam search metrics.')
for beam_size in [1, 5, 10, 20, 50]:
t_inference_start = time.time()
pred_acc = 0
pred_denominator = 0
ios, targets, predictions, top_of_beams = [], [], [], []
for batches in predict_ds.as_numpy_iterator():
pred_batch = batches
# Handle final odd-sized batch by padding instead of dropping it.
cur_pred_batch_size = pred_batch[0].shape[0]
if cur_pred_batch_size % n_devices:
padded_size = int(
np.ceil(cur_pred_batch_size / n_devices) * n_devices)
# pylint: disable=cell-var-from-loop
pred_batch = jax.tree_map(
lambda x: pad_examples(x, padded_size), pred_batch)
inputs, outputs, programs = common_utils.shard(pred_batch)
cache = (p_init_cache(inputs, outputs, programs)
if not FLAGS.slow_decode else None)
predicted = p_pred_step(optimizer.target, inputs, outputs, cache,
beam_size)
predicted = tohost(predicted)
inputs, outputs, programs = map(tohost, (inputs, outputs, programs))
pred_denominator += programs.shape[0]
for i, beams in enumerate(predicted):
inps, outs = decode_io(inputs[i], outputs[i])
p, p_score = eval_predicted(
beams, inps, outs, parse_beam_fn=decode_program)
if p_score >= len(inps):
pred_acc += 1
ios.append(' ; '.join(map(str, zip(inps, outs))))
targets.append(decode_program(programs[i]).to_string())
try:
predictions.append(p.to_string())
except: # pylint: disable=bare-except
predictions.append('Did not compile')
logging.info('ios: %s', ios[-1])
logging.info('target: %s', targets[-1])
beams_log = []
for beam in beams:
try:
beams_log.append(decode_program(beam).to_string())
except: # pylint: disable=bare-except
beams_log.append('Did not compile')
logging.info('predicted beam: %s', '\n'.join(beams_log))
top_of_beam = []
for index, beam in enumerate(beams[:-5:-1]):
try:
decoded_program = decode_program(beam).to_string()
except: # pylint: disable=bare-except
decoded_program = 'Did not compile'
top_of_beam.append('index: {}, decoded: {}, tokens: {}'.format(
index, decoded_program, beam))
top_of_beams.append('\n\n'.join(top_of_beam))
all_pred_acc, all_pred_denominator = per_host_sum_pmap(
jax.tree_map(np.array, (pred_acc, pred_denominator)))
# Record beam search results as text summaries.
message = []
for n in np.random.choice(np.arange(len(predictions)), 8):
text = (f'ios: {ios[n]}\n\ntarget: {targets[n]}\n\n'
f'predicted: {predictions[n]}\n\n'
f'top of beam:\n\n{top_of_beams[n]}\n\n')
message.append(text)
# Write to tensorboard.
if jax.host_id() == 0:
slow_or_fast = 'slow' if FLAGS.slow_decode else 'fast'
logging.info(
'Prediction time, %s (beam %d): %.4f s, step %d, score %.4f',
slow_or_fast, beam_size, time.time() - t_inference_start, step,
all_pred_acc / all_pred_denominator)
summary_writer.scalar(
'predict-{}/score-{}'.format(slow_or_fast, beam_size),
all_pred_acc / all_pred_denominator, step)
summary_writer.text('samples-{}'.format(beam_size),
'\n------\n'.join(message), step)
summary_writer.flush()
if __name__ == '__main__':
app.run(main)
|
[
"flax.training.common_utils.shard",
"flax.optim.Adam",
"tensorflow.compat.v2.random.set_seed",
"latent_programmer.tasks.robust_fill.tokens.build_token_tables",
"absl.logging.info",
"jax.tree_map",
"latent_programmer.decode.beam_search",
"flax.training.common_utils.onehot",
"jax.jit",
"sys.path.append",
"absl.flags.DEFINE_float",
"jax.random.split",
"jax.random.PRNGKey",
"absl.flags.DEFINE_boolean",
"absl.app.run",
"jax.numpy.asarray",
"flax.jax_utils.replicate",
"flax.jax_utils.unreplicate",
"numpy.random.seed",
"jax.value_and_grad",
"numpy.tile",
"numpy.ceil",
"json.loads",
"jax.lax.psum",
"jax.numpy.where",
"jax.numpy.cos",
"jax.local_device_count",
"jax.devices",
"jax.numpy.broadcast_to",
"latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformer",
"numpy.argmax",
"jax.lax.pmean",
"jax.numpy.ones",
"flax.training.common_utils.get_metrics",
"absl.flags.DEFINE_string",
"time.time",
"jax.host_id",
"tensorflow.compat.v2.enable_v2_behavior",
"latent_programmer.decomposition_transformer_attention.input_pipeline.create_dataset_from_tf_record",
"latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformerConfig",
"jax.numpy.minimum",
"jax.numpy.logical_and",
"absl.flags.DEFINE_bool",
"absl.flags.DEFINE_integer",
"jax.numpy.sqrt",
"os.path.join",
"jax.numpy.exp",
"random.seed",
"jax.numpy.maximum",
"collections.defaultdict",
"latent_programmer.decode.flat_batch_beam_expand",
"functools.partial",
"flax.linen.log_softmax",
"jax.numpy.argmax"
] |
[((1590, 1615), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (1605, 1615), False, 'import sys\n'), ((1658, 1724), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""', '(0)', '"""Fixed random seed for training."""'], {}), "('seed', 0, 'Fixed random seed for training.')\n", (1678, 1724), False, 'from absl import flags\n'), ((1725, 1774), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""lr"""', '(0.001)', '"""Learning rate."""'], {}), "('lr', 0.001, 'Learning rate.')\n", (1743, 1774), False, 'from absl import flags\n'), ((1774, 1863), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""weight_decay"""', '(0.1)', '"""Decay factor for AdamW-style weight decay."""'], {}), "('weight_decay', 0.1,\n 'Decay factor for AdamW-style weight decay.')\n", (1792, 1863), False, 'from absl import flags\n'), ((1880, 1946), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""embedding_dim"""', '(256)', '"""Embedding dimension."""'], {}), "('embedding_dim', 256, 'Embedding dimension.')\n", (1900, 1946), False, 'from absl import flags\n'), ((1947, 2007), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""hidden_dim"""', '(512)', '"""Hidden dimension."""'], {}), "('hidden_dim', 512, 'Hidden dimension.')\n", (1967, 2007), False, 'from absl import flags\n'), ((2008, 2065), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_heads"""', '(4)', '"""Number of layers."""'], {}), "('num_heads', 4, 'Number of layers.')\n", (2028, 2065), False, 'from absl import flags\n'), ((2066, 2135), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_layers"""', '(3)', '"""Number of Transformer heads."""'], {}), "('num_layers', 3, 'Number of Transformer heads.')\n", (2086, 2135), False, 'from absl import flags\n'), ((2136, 2214), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""slow_decode"""', '(True)', '"""Use slow decoding for prediction?"""'], {}), "('slow_decode', True, 'Use slow decoding for prediction?')\n", (2156, 2214), False, 'from absl import flags\n'), ((2216, 2305), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset_filepattern"""', 'None', '"""Filepattern for TFRecord dataset."""'], {}), "('dataset_filepattern', None,\n 'Filepattern for TFRecord dataset.')\n", (2235, 2305), False, 'from absl import flags\n'), ((2322, 2414), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""per_device_batch_size"""', '(16)', '"""Number of program tasks in a batch."""'], {}), "('per_device_batch_size', 16,\n 'Number of program tasks in a batch.')\n", (2342, 2414), False, 'from absl import flags\n'), ((2432, 2527), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_strings_per_task"""', '(4)', '"""Number of input/output strings per task."""'], {}), "('num_strings_per_task', 4,\n 'Number of input/output strings per task.')\n", (2452, 2527), False, 'from absl import flags\n'), ((2545, 2636), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_program_length"""', '(100)', '"""Maximum number of tokens in program."""'], {}), "('max_program_length', 100,\n 'Maximum number of tokens in program.')\n", (2565, 2636), False, 'from absl import flags\n'), ((2654, 2758), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_characters"""', '(120)', '"""Maximum number of characters in input/output strings."""'], {}), "('max_characters', 120,\n 'Maximum number of characters in input/output strings.')\n", (2674, 2758), False, 'from absl import flags\n'), ((2777, 2847), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""save_dir"""', 'None', '"""Directory to save results to."""'], {}), "('save_dir', None, 'Directory to save results to.')\n", (2796, 2847), False, 'from absl import flags\n'), ((2848, 2925), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_train_steps"""', '(2000000)', '"""Number of training steps."""'], {}), "('num_train_steps', 2000000, 'Number of training steps.')\n", (2868, 2925), False, 'from absl import flags\n'), ((2926, 2999), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_eval_steps"""', '(10)', '"""Number of evaluation steps."""'], {}), "('num_eval_steps', 10, 'Number of evaluation steps.')\n", (2946, 2999), False, 'from absl import flags\n'), ((3000, 3085), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""log_freq"""', '(1000)', '"""Number of steps between training logs."""'], {}), "('log_freq', 1000, 'Number of steps between training logs.'\n )\n", (3020, 3085), False, 'from absl import flags\n'), ((3081, 3153), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""eval_freq"""', '(2000)', '"""Number of steps between eval."""'], {}), "('eval_freq', 2000, 'Number of steps between eval.')\n", (3101, 3153), False, 'from absl import flags\n'), ((3154, 3254), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""predict_freq"""', '(50000)', '"""Number of steps between prediction (beam search)."""'], {}), "('predict_freq', 50000,\n 'Number of steps between prediction (beam search).')\n", (3174, 3254), False, 'from absl import flags\n'), ((3272, 3367), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""checkpoint_freq"""', '(50000)', '"""Number of steps between checkpoint saves."""'], {}), "('checkpoint_freq', 50000,\n 'Number of steps between checkpoint saves.')\n", (3292, 3367), False, 'from absl import flags\n'), ((3385, 3529), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""finetune_start_step"""', '(-1)', '"""Step the initial checkpoint should start at for finetuning, or -1 if not finetuning."""'], {}), "('finetune_start_step', -1,\n 'Step the initial checkpoint should start at for finetuning, or -1 if not finetuning.'\n )\n", (3405, 3529), False, 'from absl import flags\n'), ((3566, 3671), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""restore_checkpoints"""', '(True)', '"""Whether to restore from existing model checkpoints."""'], {}), "('restore_checkpoints', True,\n 'Whether to restore from existing model checkpoints.')\n", (3583, 3671), False, 'from absl import flags\n'), ((3687, 3852), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""attention_mask_type"""', '"""bos_full_attention"""', '"""The kind of attention mask to use. Options are: baseline, bos_to_bos, bos_full_attention"""'], {}), "('attention_mask_type', 'bos_full_attention',\n 'The kind of attention mask to use. Options are: baseline, bos_to_bos, bos_full_attention'\n )\n", (3706, 3852), False, 'from absl import flags\n'), ((3888, 3990), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""use_relative_attention"""', '(True)', '"""Whether to use relative positonal embeddings."""'], {}), "('use_relative_attention', True,\n 'Whether to use relative positonal embeddings.')\n", (3905, 3990), False, 'from absl import flags\n'), ((4005, 4131), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""bos_special_attention"""', '(False)', '"""Whether to use special relative attention computation for BOS tokens."""'], {}), "('bos_special_attention', False,\n 'Whether to use special relative attention computation for BOS tokens.')\n", (4022, 4131), False, 'from absl import flags\n'), ((4207, 4296), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""xm_parameters"""', 'None', '"""String specifying hyperparamter search."""'], {}), "('xm_parameters', None,\n 'String specifying hyperparamter search.')\n", (4226, 4296), False, 'from absl import flags\n'), ((7127, 7173), 'flax.training.common_utils.onehot', 'common_utils.onehot', (['targets', 'logits.shape[-1]'], {}), '(targets, logits.shape[-1])\n', (7146, 7173), False, 'from flax.training import common_utils\n'), ((8564, 8594), 'jax.lax.psum', 'jax.lax.psum', (['metrics', '"""batch"""'], {}), "(metrics, 'batch')\n", (8576, 8594), False, 'import jax\n'), ((9159, 9188), 'jax.random.split', 'jax.random.split', (['dropout_rng'], {}), '(dropout_rng)\n', (9175, 9188), False, 'import jax\n'), ((9719, 9760), 'jax.value_and_grad', 'jax.value_and_grad', (['loss_fn'], {'has_aux': '(True)'}), '(loss_fn, has_aux=True)\n', (9737, 9760), False, 'import jax\n'), ((9818, 9846), 'jax.lax.pmean', 'jax.lax.pmean', (['grad', '"""batch"""'], {}), "(grad, 'batch')\n", (9831, 9846), False, 'import jax\n'), ((12015, 12077), 'latent_programmer.decode.flat_batch_beam_expand', 'decode.flat_batch_beam_expand', (['encoded_padding_mask', 'beam_size'], {}), '(encoded_padding_mask, beam_size)\n', (12044, 12077), False, 'from latent_programmer import decode\n'), ((13395, 13607), 'latent_programmer.decode.beam_search', 'decode.beam_search', (['inputs', 'cache', 'tokens_ids_to_logits'], {'beam_size': 'beam_size', 'alpha': '(0.6)', 'bos_token': 'config.base_config.bos_token', 'eos_token': 'eos_token', 'max_decode_len': 'max_decode_len', 'slow_decode': 'slow_decode'}), '(inputs, cache, tokens_ids_to_logits, beam_size=beam_size,\n alpha=0.6, bos_token=config.base_config.bos_token, eos_token=eos_token,\n max_decode_len=max_decode_len, slow_decode=slow_decode)\n', (13413, 13607), False, 'from latent_programmer import decode\n'), ((14514, 14543), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (14537, 14543), False, 'import collections\n'), ((14555, 14568), 'jax.devices', 'jax.devices', ([], {}), '()\n', (14566, 14568), False, 'import jax\n'), ((15633, 15656), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (15654, 15656), True, 'import tensorflow.compat.v2 as tf\n'), ((15660, 15690), 'tensorflow.compat.v2.random.set_seed', 'tf.random.set_seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (15678, 15690), True, 'import tensorflow.compat.v2 as tf\n'), ((15693, 15719), 'numpy.random.seed', 'np.random.seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (15707, 15719), True, 'import numpy as np\n'), ((15722, 15745), 'random.seed', 'random.seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (15733, 15745), False, 'import random\n'), ((16766, 16790), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (16788, 16790), False, 'import jax\n'), ((17466, 17497), 'latent_programmer.tasks.robust_fill.tokens.build_token_tables', 'dsl_tokens.build_token_tables', ([], {}), '()\n', (17495, 17497), True, 'from latent_programmer.tasks.robust_fill import tokens as dsl_tokens\n'), ((18542, 18579), 'absl.logging.info', 'logging.info', (['"""Initializing dataset."""'], {}), "('Initializing dataset.')\n", (18554, 18579), False, 'from absl import logging\n'), ((18702, 18768), 'absl.logging.info', 'logging.info', (['"""Loading dataset from %s"""', 'FLAGS.dataset_filepattern'], {}), "('Loading dataset from %s', FLAGS.dataset_filepattern)\n", (18714, 18768), False, 'from absl import logging\n'), ((18837, 18885), 'absl.logging.info', 'logging.info', (['"""padded_shapes: %s"""', 'padded_shapes'], {}), "('padded_shapes: %s', padded_shapes)\n", (18849, 18885), False, 'from absl import logging\n'), ((18898, 19004), 'latent_programmer.decomposition_transformer_attention.input_pipeline.create_dataset_from_tf_record', 'input_pipeline.create_dataset_from_tf_record', (['FLAGS.dataset_filepattern', 'token_id_table', 'char_id_table'], {}), '(FLAGS.dataset_filepattern,\n token_id_table, char_id_table)\n', (18942, 19004), False, 'from latent_programmer.decomposition_transformer_attention import input_pipeline\n'), ((20156, 20330), 'latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformerConfig', 'models.DecomposeAttentionTransformerConfig', ([], {'base_config': 'base_config', 'attention_mask_type': 'FLAGS.attention_mask_type', 'bos_special_attention': 'FLAGS.bos_special_attention'}), '(base_config=base_config,\n attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=\n FLAGS.bos_special_attention)\n', (20198, 20330), True, 'from latent_programmer.decomposition_transformer_attention import decomposition_models as models\n'), ((20897, 20927), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (20915, 20927), False, 'import jax\n'), ((20993, 21014), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (21009, 21014), False, 'import jax\n'), ((21022, 21071), 'latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformer', 'models.DecomposeAttentionTransformer', (['eval_config'], {}), '(eval_config)\n', (21058, 21071), True, 'from latent_programmer.decomposition_transformer_attention import decomposition_models as models\n'), ((21301, 21393), 'flax.optim.Adam', 'optim.Adam', (['FLAGS.lr'], {'beta1': '(0.9)', 'beta2': '(0.98)', 'eps': '(1e-09)', 'weight_decay': 'FLAGS.weight_decay'}), '(FLAGS.lr, beta1=0.9, beta2=0.98, eps=1e-09, weight_decay=FLAGS.\n weight_decay)\n', (21311, 21393), False, 'from flax import optim\n'), ((22201, 22231), 'flax.jax_utils.replicate', 'jax_utils.replicate', (['optimizer'], {}), '(optimizer)\n', (22220, 22231), False, 'from flax import jax_utils\n'), ((23679, 23690), 'time.time', 'time.time', ([], {}), '()\n', (23688, 23690), False, 'import time\n'), ((30635, 30648), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (30642, 30648), False, 'from absl import app\n'), ((6479, 6514), 'jax.numpy.asarray', 'jnp.asarray', (['ret'], {'dtype': 'jnp.float32'}), '(ret, dtype=jnp.float32)\n', (6490, 6514), True, 'import jax.numpy as jnp\n'), ((7274, 7300), 'jax.numpy.asarray', 'jnp.asarray', (['targets.shape'], {}), '(targets.shape)\n', (7285, 7300), True, 'import jax.numpy as jnp\n'), ((8008, 8035), 'jax.numpy.argmax', 'jnp.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (8018, 8035), True, 'import jax.numpy as jnp\n'), ((8078, 8104), 'jax.numpy.asarray', 'jnp.asarray', (['targets.shape'], {}), '(targets.shape)\n', (8089, 8104), True, 'import jax.numpy as jnp\n'), ((10914, 10935), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (10932, 10935), False, 'import jax\n'), ((10943, 10972), 'jax.numpy.ones', 'jnp.ones', (['inputs.shape', 'dtype'], {}), '(inputs.shape, dtype)\n', (10951, 10972), True, 'import jax.numpy as jnp\n'), ((10980, 11010), 'jax.numpy.ones', 'jnp.ones', (['outputs.shape', 'dtype'], {}), '(outputs.shape, dtype)\n', (10988, 11010), True, 'import jax.numpy as jnp\n'), ((11018, 11047), 'jax.numpy.ones', 'jnp.ones', (['target_shape', 'dtype'], {}), '(target_shape, dtype)\n', (11026, 11047), True, 'import jax.numpy as jnp\n'), ((14867, 14899), 'jax.tree_map', 'jax.tree_map', (['(lambda x: x[0])', 'xs'], {}), '(lambda x: x[0], xs)\n', (14879, 14899), False, 'import jax\n'), ((16797, 16810), 'jax.host_id', 'jax.host_id', ([], {}), '()\n', (16808, 16810), False, 'import jax\n'), ((20960, 20973), 'jax.host_id', 'jax.host_id', ([], {}), '()\n', (20971, 20973), False, 'import jax\n'), ((21094, 21109), 'jax.jit', 'jax.jit', (['m.init'], {}), '(m.init)\n', (21101, 21109), False, 'import jax\n'), ((21166, 21197), 'jax.numpy.ones', 'jnp.ones', (['io_shape', 'jnp.float32'], {}), '(io_shape, jnp.float32)\n', (21174, 21197), True, 'import jax.numpy as jnp\n'), ((21205, 21236), 'jax.numpy.ones', 'jnp.ones', (['io_shape', 'jnp.float32'], {}), '(io_shape, jnp.float32)\n', (21213, 21236), True, 'import jax.numpy as jnp\n'), ((21244, 21280), 'jax.numpy.ones', 'jnp.ones', (['program_shape', 'jnp.float32'], {}), '(program_shape, jnp.float32)\n', (21252, 21280), True, 'import jax.numpy as jnp\n'), ((21867, 21931), 'absl.logging.info', 'logging.info', (['"""Found model checkpointed at step %d."""', 'start_step'], {}), "('Found model checkpointed at step %d.', start_step)\n", (21879, 21931), False, 'from absl import logging\n'), ((22670, 22760), 'functools.partial', 'functools.partial', (['train_step'], {'learning_rate_fn': 'learning_rate_fn', 'config': 'train_config'}), '(train_step, learning_rate_fn=learning_rate_fn, config=\n train_config)\n', (22687, 22760), False, 'import functools\n'), ((22845, 22914), 'functools.partial', 'functools.partial', (['eval_step'], {'eos_token': 'eos_token', 'config': 'eval_config'}), '(eval_step, eos_token=eos_token, config=eval_config)\n', (22862, 22914), False, 'import functools\n'), ((23022, 23125), 'functools.partial', 'functools.partial', (['initialize_cache'], {'max_decode_len': 'FLAGS.max_program_length', 'config': 'predict_config'}), '(initialize_cache, max_decode_len=FLAGS.max_program_length,\n config=predict_config)\n', (23039, 23125), False, 'import functools\n'), ((23211, 23363), 'functools.partial', 'functools.partial', (['predict_step'], {'eos_token': 'eos_token', 'max_decode_len': 'FLAGS.max_program_length', 'config': 'predict_config', 'slow_decode': 'FLAGS.slow_decode'}), '(predict_step, eos_token=eos_token, max_decode_len=FLAGS.\n max_program_length, config=predict_config, slow_decode=FLAGS.slow_decode)\n', (23228, 23363), False, 'import functools\n'), ((23614, 23638), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (23636, 23638), False, 'import jax\n'), ((9202, 9231), 'jax.numpy.where', 'jnp.where', (['(programs > 0)', '(1)', '(0)'], {}), '(programs > 0, 1, 0)\n', (9211, 9231), True, 'import jax.numpy as jnp\n'), ((10440, 10484), 'latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformer', 'models.DecomposeAttentionTransformer', (['config'], {}), '(config)\n', (10476, 10484), True, 'from latent_programmer.decomposition_transformer_attention import decomposition_models as models\n'), ((10857, 10901), 'latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformer', 'models.DecomposeAttentionTransformer', (['config'], {}), '(config)\n', (10893, 10901), True, 'from latent_programmer.decomposition_transformer_attention import decomposition_models as models\n'), ((11936, 11964), 'jax.numpy.where', 'jnp.where', (['(outputs > 0)', '(1)', '(0)'], {}), '(outputs > 0, 1, 0)\n', (11945, 11964), True, 'import jax.numpy as jnp\n'), ((14151, 14176), 'numpy.tile', 'np.tile', (['x[-1]', 'tile_dims'], {}), '(x[-1], tile_dims)\n', (14158, 14176), True, 'import numpy as np\n'), ((14696, 14716), 'jax.lax.psum', 'jax.lax.psum', (['x', '"""i"""'], {}), "(x, 'i')\n", (14708, 14716), False, 'import jax\n'), ((16873, 16919), 'os.path.join', 'os.path.join', (['FLAGS.save_dir', '"""tb"""', 'hparam_str'], {}), "(FLAGS.save_dir, 'tb', hparam_str)\n", (16885, 16919), False, 'import os\n'), ((19322, 19346), 'numpy.ceil', 'np.ceil', (['(batch_size / 10)'], {}), '(batch_size / 10)\n', (19329, 19346), True, 'import numpy as np\n'), ((21730, 21785), 'os.path.join', 'os.path.join', (['FLAGS.save_dir', '"""checkpoints"""', 'hparam_str'], {}), "(FLAGS.save_dir, 'checkpoints', hparam_str)\n", (21742, 21785), False, 'import os\n'), ((21976, 22092), 'absl.logging.info', 'logging.info', (['"""Checking that start_step (%s) == finetune_start_step (%s)"""', 'start_step', 'FLAGS.finetune_start_step'], {}), "('Checking that start_step (%s) == finetune_start_step (%s)',\n start_step, FLAGS.finetune_start_step)\n", (21988, 22092), False, 'from absl import logging\n'), ((24500, 24543), 'absl.logging.info', 'logging.info', (['"""Gathering training metrics."""'], {}), "('Gathering training metrics.')\n", (24512, 24543), False, 'from absl import logging\n'), ((24564, 24601), 'flax.training.common_utils.get_metrics', 'common_utils.get_metrics', (['metrics_all'], {}), '(metrics_all)\n', (24588, 24601), False, 'from flax.training import common_utils\n'), ((24674, 24708), 'jax.tree_map', 'jax.tree_map', (['jnp.sum', 'metrics_all'], {}), '(jnp.sum, metrics_all)\n', (24686, 24708), False, 'import jax\n'), ((24777, 24830), 'jax.tree_map', 'jax.tree_map', (['(lambda x: x / denominator)', 'metrics_sums'], {}), '(lambda x: x / denominator, metrics_sums)\n', (24789, 24830), False, 'import jax\n'), ((25673, 25718), 'absl.logging.info', 'logging.info', (['"""Gathering evaluation metrics."""'], {}), "('Gathering evaluation metrics.')\n", (25685, 25718), False, 'from absl import logging\n'), ((25746, 25757), 'time.time', 'time.time', ([], {}), '()\n', (25755, 25757), False, 'import time\n'), ((26031, 26069), 'flax.training.common_utils.get_metrics', 'common_utils.get_metrics', (['eval_metrics'], {}), '(eval_metrics)\n', (26055, 26069), False, 'from flax.training import common_utils\n'), ((26096, 26131), 'jax.tree_map', 'jax.tree_map', (['jnp.sum', 'eval_metrics'], {}), '(jnp.sum, eval_metrics)\n', (26108, 26131), False, 'import jax\n'), ((26215, 26278), 'jax.tree_map', 'jax.tree_map', (['(lambda x: x / eval_denominator)', 'eval_metrics_sums'], {}), '(lambda x: x / eval_denominator, eval_metrics_sums)\n', (26227, 26278), False, 'import jax\n'), ((26753, 26799), 'absl.logging.info', 'logging.info', (['"""Gathering beam search metrics."""'], {}), "('Gathering beam search metrics.')\n", (26765, 26799), False, 'from absl import logging\n'), ((7209, 7231), 'flax.linen.log_softmax', 'nn.log_softmax', (['logits'], {}), '(logits)\n', (7223, 7231), True, 'from flax import linen as nn\n'), ((9332, 9376), 'latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformer', 'models.DecomposeAttentionTransformer', (['config'], {}), '(config)\n', (9368, 9376), True, 'from latent_programmer.decomposition_transformer_attention import decomposition_models as models\n'), ((11711, 11755), 'latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformer', 'models.DecomposeAttentionTransformer', (['config'], {}), '(config)\n', (11747, 11755), True, 'from latent_programmer.decomposition_transformer_attention import decomposition_models as models\n'), ((14794, 14829), 'jax.numpy.broadcast_to', 'jnp.broadcast_to', (['x', '((1,) + x.shape)'], {}), '(x, (1,) + x.shape)\n', (14810, 14829), True, 'import jax.numpy as jnp\n'), ((16452, 16483), 'json.loads', 'json.loads', (['FLAGS.xm_parameters'], {}), '(FLAGS.xm_parameters)\n', (16462, 16483), False, 'import json\n'), ((24132, 24145), 'jax.host_id', 'jax.host_id', ([], {}), '()\n', (24143, 24145), False, 'import jax\n'), ((25038, 25062), 'jax.numpy.exp', 'jnp.exp', (["summary['loss']"], {}), "(summary['loss'])\n", (25045, 25062), True, 'import jax.numpy as jnp\n'), ((25087, 25100), 'jax.host_id', 'jax.host_id', ([], {}), '()\n', (25098, 25100), False, 'import jax\n'), ((25115, 25183), 'absl.logging.info', 'logging.info', (['"""Train in step: %d, loss: %.4f"""', 'step', "summary['loss']"], {}), "('Train in step: %d, loss: %.4f', step, summary['loss'])\n", (25127, 25183), False, 'from absl import logging\n'), ((25199, 25210), 'time.time', 'time.time', ([], {}), '()\n', (25208, 25210), False, 'import time\n'), ((25868, 25895), 'flax.training.common_utils.shard', 'common_utils.shard', (['batches'], {}), '(batches)\n', (25886, 25895), False, 'from flax.training import common_utils\n'), ((26348, 26361), 'jax.host_id', 'jax.host_id', ([], {}), '()\n', (26359, 26361), False, 'import jax\n'), ((26871, 26882), 'time.time', 'time.time', ([], {}), '()\n', (26880, 26882), False, 'import time\n'), ((5777, 5814), 'jax.numpy.minimum', 'jnp.minimum', (['(1.0)', '(step / warmup_steps)'], {}), '(1.0, step / warmup_steps)\n', (5788, 5814), True, 'import jax.numpy as jnp\n'), ((10276, 10361), 'jax.numpy.logical_and', 'jnp.logical_and', (['(programs != config.base_config.bos_token)', '(programs != eos_token)'], {}), '(programs != config.base_config.bos_token, programs != eos_token\n )\n', (10291, 10361), True, 'import jax.numpy as jnp\n'), ((12255, 12306), 'latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformer', 'models.DecomposeAttentionTransformer', ([], {'config': 'config'}), '(config=config)\n', (12291, 12306), True, 'from latent_programmer.decomposition_transformer_attention import decomposition_models as models\n'), ((12693, 12744), 'latent_programmer.decomposition_transformer_attention.decomposition_models.DecomposeAttentionTransformer', 'models.DecomposeAttentionTransformer', ([], {'config': 'config'}), '(config=config)\n', (12729, 12744), True, 'from latent_programmer.decomposition_transformer_attention import decomposition_models as models\n'), ((24254, 24309), 'os.path.join', 'os.path.join', (['FLAGS.save_dir', '"""checkpoints"""', 'hparam_str'], {}), "(FLAGS.save_dir, 'checkpoints', hparam_str)\n", (24266, 24309), False, 'import os\n'), ((24323, 24355), 'flax.jax_utils.unreplicate', 'jax_utils.unreplicate', (['optimizer'], {}), '(optimizer)\n', (24344, 24355), False, 'from flax import jax_utils\n'), ((27557, 27587), 'flax.training.common_utils.shard', 'common_utils.shard', (['pred_batch'], {}), '(pred_batch)\n', (27575, 27587), False, 'from flax.training import common_utils\n'), ((29522, 29574), 'jax.tree_map', 'jax.tree_map', (['np.array', '(pred_acc, pred_denominator)'], {}), '(np.array, (pred_acc, pred_denominator))\n', (29534, 29574), False, 'import jax\n'), ((29972, 29985), 'jax.host_id', 'jax.host_id', ([], {}), '()\n', (29983, 29985), False, 'import jax\n'), ((26458, 26469), 'time.time', 'time.time', ([], {}), '()\n', (26467, 26469), False, 'import time\n'), ((28579, 28611), 'absl.logging.info', 'logging.info', (['"""ios: %s"""', 'ios[-1]'], {}), "('ios: %s', ios[-1])\n", (28591, 28611), False, 'from absl import logging\n'), ((28624, 28663), 'absl.logging.info', 'logging.info', (['"""target: %s"""', 'targets[-1]'], {}), "('target: %s', targets[-1])\n", (28636, 28663), False, 'from absl import logging\n'), ((5873, 5910), 'jax.numpy.maximum', 'jnp.maximum', (['(1.0)', '(step - warmup_steps)'], {}), '(1.0, step - warmup_steps)\n', (5884, 5910), True, 'import jax.numpy as jnp\n'), ((5972, 5994), 'jax.numpy.sqrt', 'jnp.sqrt', (['warmup_steps'], {}), '(warmup_steps)\n', (5980, 5994), True, 'import jax.numpy as jnp\n'), ((18178, 18209), 'numpy.argmax', 'np.argmax', (['(program == eos_token)'], {}), '(program == eos_token)\n', (18187, 18209), True, 'import numpy as np\n'), ((30196, 30207), 'time.time', 'time.time', ([], {}), '()\n', (30205, 30207), False, 'import time\n'), ((6019, 6050), 'jax.numpy.maximum', 'jnp.maximum', (['step', 'warmup_steps'], {}), '(step, warmup_steps)\n', (6030, 6050), True, 'import jax.numpy as jnp\n'), ((27309, 27349), 'numpy.ceil', 'np.ceil', (['(cur_pred_batch_size / n_devices)'], {}), '(cur_pred_batch_size / n_devices)\n', (27316, 27349), True, 'import numpy as np\n'), ((6365, 6399), 'jax.numpy.cos', 'jnp.cos', (['(jnp.pi * (progress % 1.0))'], {}), '(jnp.pi * (progress % 1.0))\n', (6372, 6399), True, 'import jax.numpy as jnp\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import click
import numba
def prepare_data(data_pd, parameter):
lon_set = set(data_pd["lon"])
lat_set = set(data_pd["lat"])
dep_set = set(data_pd["dep"])
lon_list = sorted(lon_set)
lat_list = sorted(lat_set)
dep_list = sorted(dep_set)
lon_mesh, lat_mesh, dep_mesh = np.meshgrid(
lon_list, lat_list, dep_list, indexing="ij")
dx, dy, dz = np.shape(lon_mesh)
value_mesh = np.zeros_like(lon_mesh)
x_mesh = np.zeros_like(lon_mesh)
y_mesh = np.zeros_like(lon_mesh)
z_mesh = np.zeros_like(lon_mesh)
r_mesh = np.zeros_like(lon_mesh)
for i in range(dx):
for j in range(dy):
for k in range(dz):
x_mesh[i, j, k], y_mesh[i, j, k], z_mesh[i, j, k], r_mesh[i, j, k] = lld2xyzr(
lat_mesh[i, j, k], lon_mesh[i, j, k], dep_mesh[i, j, k])
for index, row in data_pd.iterrows():
i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0))
j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0))
k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0))
value_mesh[i, j, k] = row[parameter]
return x_mesh, y_mesh, z_mesh, value_mesh
def get_value(data_pd, lat, lon, dep, parameter):
return data_pd.loc[(data_pd.lat == lat) & (data_pd.lon == lon) & (data_pd.dep == dep)][parameter].values[0]
@numba.njit()
def lld2xyzr(lat, lon, dep):
R_EARTH_KM = 6371.0
r = (R_EARTH_KM-dep)/R_EARTH_KM
theta = 90-lat
phi = lon
z = r*cosd(theta)
h = r*sind(theta)
x = h*cosd(phi)
y = h*sind(phi)
return (x, y, z, r)
@numba.njit()
def cosd(x):
return np.cos(np.deg2rad(x))
@numba.njit()
def sind(x):
return np.sin(np.deg2rad(x))
# def get_value_func(x_mesh, y_mesh, z_mesh, value_mesh):
# value_func = RegularGridInterpolator(
# (x_mesh, y_mesh, z_mesh), value_mesh, method="nearest")
# return value_func
@numba.njit()
def interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh, value_mesh):
x, y, z, _ = lld2xyzr(lat, lon, dep)
distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2
mindistance2 = np.min(distance2)
coors = np.where(distance2 == mindistance2)
value = value_mesh[coors[0][0], coors[1][0], coors[2][0]]
return value
def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts):
lons = np.linspace(lon_list[0], lon_list[1], hnpts)
lats = np.linspace(lat_list[0], lat_list[1], hnpts)
deps = np.linspace(dep_list[0], dep_list[1], vnpts)
return lons, lats, deps
@click.command()
@click.option('--lon1', required=True, type=float, help="lon1")
@click.option('--lon2', required=True, type=float, help="lon2")
@click.option('--lat1', required=True, type=float, help="lat1")
@click.option('--lat2', required=True, type=float, help="lat2")
@click.option('--dep1', required=True, type=float, help="dep1")
@click.option('--dep2', required=True, type=float, help="dep2")
@click.option('--data', required=True, type=str, help="the pickle file")
@click.option('--parameter', required=True, type=str, help="physicial parameter to plot")
@click.option('--hnpts', required=True, type=int, help="horizontal npts")
@click.option('--vnpts', required=True, type=int, help="vertical npts")
def main(lon1, lon2, lat1, lat2, dep1, dep2, data, parameter, hnpts, vnpts):
lon_list = [lon1, lon2]
lat_list = [lat1, lat2]
dep_list = [dep1, dep2]
data_pd_raw = pd.read_pickle(data)
# data_pd is too big
minlon = min(lon1, lon2)
maxlon = max(lon1, lon2)
minlat = min(lat1, lat2)
maxlat = max(lat1, lat2)
mindep = min(dep1, dep2)
maxdep = max(dep1, dep2)
data_pd = data_pd_raw.loc[(data_pd_raw.lat <= maxlat) & (
data_pd_raw.lat >= minlat) & (data_pd_raw.lon < maxlon) & (data_pd_raw.lon > minlon) & (data_pd_raw.dep >= mindep) & (data_pd_raw.dep <= maxdep)]
x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_pd, parameter)
lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids(
lon_list, lat_list, dep_list, hnpts, vnpts)
values = np.zeros((hnpts, vnpts))
for ih in range(hnpts):
for iv in range(vnpts):
values[ih, iv] = interp_value(
lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh)
# print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv])
# plotting part
plt.figure()
mesh_plot_lat, mesh_plot_dep = np.meshgrid(
lats_plot, deps_plot, indexing="ij")
# get vmin and vmax
vmin_round = round(np.min(values), 2)
if(vmin_round < np.min(values)):
vmin = vmin_round
else:
vmin = vmin_round-0.01
vmax_round = round(np.max(values), 2)
if(vmax_round > np.max(values)):
vmax = vmax_round
else:
vmax = vmax_round+0.01
print(vmin, vmax, np.max(values), np.min(values), vmin_round, vmax_round)
plt.contourf(mesh_plot_lat, mesh_plot_dep,
values, 101, cmap=plt.cm.seismic_r)
v = np.arange(vmin, vmax, 0.01)
plt.colorbar(ticks=v, label="perturbation")
plt.gca().invert_yaxis()
plt.xlabel(
f"latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)")
plt.ylabel("depth(km)")
plt.show()
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.ylabel",
"numpy.arange",
"pandas.read_pickle",
"matplotlib.pyplot.contourf",
"click.option",
"numpy.where",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.linspace",
"numpy.min",
"numpy.meshgrid",
"click.command",
"matplotlib.pyplot.gca",
"numba.njit",
"numpy.deg2rad",
"numpy.shape",
"matplotlib.pyplot.show",
"matplotlib.pyplot.colorbar",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.zeros_like"
] |
[((1443, 1455), 'numba.njit', 'numba.njit', ([], {}), '()\n', (1453, 1455), False, 'import numba\n'), ((1691, 1703), 'numba.njit', 'numba.njit', ([], {}), '()\n', (1701, 1703), False, 'import numba\n'), ((1753, 1765), 'numba.njit', 'numba.njit', ([], {}), '()\n', (1763, 1765), False, 'import numba\n'), ((2009, 2021), 'numba.njit', 'numba.njit', ([], {}), '()\n', (2019, 2021), False, 'import numba\n'), ((2636, 2651), 'click.command', 'click.command', ([], {}), '()\n', (2649, 2651), False, 'import click\n'), ((2653, 2715), 'click.option', 'click.option', (['"""--lon1"""'], {'required': '(True)', 'type': 'float', 'help': '"""lon1"""'}), "('--lon1', required=True, type=float, help='lon1')\n", (2665, 2715), False, 'import click\n'), ((2717, 2779), 'click.option', 'click.option', (['"""--lon2"""'], {'required': '(True)', 'type': 'float', 'help': '"""lon2"""'}), "('--lon2', required=True, type=float, help='lon2')\n", (2729, 2779), False, 'import click\n'), ((2781, 2843), 'click.option', 'click.option', (['"""--lat1"""'], {'required': '(True)', 'type': 'float', 'help': '"""lat1"""'}), "('--lat1', required=True, type=float, help='lat1')\n", (2793, 2843), False, 'import click\n'), ((2845, 2907), 'click.option', 'click.option', (['"""--lat2"""'], {'required': '(True)', 'type': 'float', 'help': '"""lat2"""'}), "('--lat2', required=True, type=float, help='lat2')\n", (2857, 2907), False, 'import click\n'), ((2909, 2971), 'click.option', 'click.option', (['"""--dep1"""'], {'required': '(True)', 'type': 'float', 'help': '"""dep1"""'}), "('--dep1', required=True, type=float, help='dep1')\n", (2921, 2971), False, 'import click\n'), ((2973, 3035), 'click.option', 'click.option', (['"""--dep2"""'], {'required': '(True)', 'type': 'float', 'help': '"""dep2"""'}), "('--dep2', required=True, type=float, help='dep2')\n", (2985, 3035), False, 'import click\n'), ((3037, 3108), 'click.option', 'click.option', (['"""--data"""'], {'required': '(True)', 'type': 'str', 'help': '"""the pickle file"""'}), "('--data', required=True, type=str, help='the pickle file')\n", (3049, 3108), False, 'import click\n'), ((3110, 3203), 'click.option', 'click.option', (['"""--parameter"""'], {'required': '(True)', 'type': 'str', 'help': '"""physicial parameter to plot"""'}), "('--parameter', required=True, type=str, help=\n 'physicial parameter to plot')\n", (3122, 3203), False, 'import click\n'), ((3200, 3272), 'click.option', 'click.option', (['"""--hnpts"""'], {'required': '(True)', 'type': 'int', 'help': '"""horizontal npts"""'}), "('--hnpts', required=True, type=int, help='horizontal npts')\n", (3212, 3272), False, 'import click\n'), ((3274, 3344), 'click.option', 'click.option', (['"""--vnpts"""'], {'required': '(True)', 'type': 'int', 'help': '"""vertical npts"""'}), "('--vnpts', required=True, type=int, help='vertical npts')\n", (3286, 3344), False, 'import click\n'), ((369, 425), 'numpy.meshgrid', 'np.meshgrid', (['lon_list', 'lat_list', 'dep_list'], {'indexing': '"""ij"""'}), "(lon_list, lat_list, dep_list, indexing='ij')\n", (380, 425), True, 'import numpy as np\n'), ((452, 470), 'numpy.shape', 'np.shape', (['lon_mesh'], {}), '(lon_mesh)\n', (460, 470), True, 'import numpy as np\n'), ((488, 511), 'numpy.zeros_like', 'np.zeros_like', (['lon_mesh'], {}), '(lon_mesh)\n', (501, 511), True, 'import numpy as np\n'), ((525, 548), 'numpy.zeros_like', 'np.zeros_like', (['lon_mesh'], {}), '(lon_mesh)\n', (538, 548), True, 'import numpy as np\n'), ((562, 585), 'numpy.zeros_like', 'np.zeros_like', (['lon_mesh'], {}), '(lon_mesh)\n', (575, 585), True, 'import numpy as np\n'), ((599, 622), 'numpy.zeros_like', 'np.zeros_like', (['lon_mesh'], {}), '(lon_mesh)\n', (612, 622), True, 'import numpy as np\n'), ((636, 659), 'numpy.zeros_like', 'np.zeros_like', (['lon_mesh'], {}), '(lon_mesh)\n', (649, 659), True, 'import numpy as np\n'), ((2209, 2226), 'numpy.min', 'np.min', (['distance2'], {}), '(distance2)\n', (2215, 2226), True, 'import numpy as np\n'), ((2239, 2274), 'numpy.where', 'np.where', (['(distance2 == mindistance2)'], {}), '(distance2 == mindistance2)\n', (2247, 2274), True, 'import numpy as np\n'), ((2448, 2492), 'numpy.linspace', 'np.linspace', (['lon_list[0]', 'lon_list[1]', 'hnpts'], {}), '(lon_list[0], lon_list[1], hnpts)\n', (2459, 2492), True, 'import numpy as np\n'), ((2504, 2548), 'numpy.linspace', 'np.linspace', (['lat_list[0]', 'lat_list[1]', 'hnpts'], {}), '(lat_list[0], lat_list[1], hnpts)\n', (2515, 2548), True, 'import numpy as np\n'), ((2560, 2604), 'numpy.linspace', 'np.linspace', (['dep_list[0]', 'dep_list[1]', 'vnpts'], {}), '(dep_list[0], dep_list[1], vnpts)\n', (2571, 2604), True, 'import numpy as np\n'), ((3524, 3544), 'pandas.read_pickle', 'pd.read_pickle', (['data'], {}), '(data)\n', (3538, 3544), True, 'import pandas as pd\n'), ((4172, 4196), 'numpy.zeros', 'np.zeros', (['(hnpts, vnpts)'], {}), '((hnpts, vnpts))\n', (4180, 4196), True, 'import numpy as np\n'), ((4503, 4515), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4513, 4515), True, 'import matplotlib.pyplot as plt\n'), ((4551, 4599), 'numpy.meshgrid', 'np.meshgrid', (['lats_plot', 'deps_plot'], {'indexing': '"""ij"""'}), "(lats_plot, deps_plot, indexing='ij')\n", (4562, 4599), True, 'import numpy as np\n'), ((5009, 5087), 'matplotlib.pyplot.contourf', 'plt.contourf', (['mesh_plot_lat', 'mesh_plot_dep', 'values', '(101)'], {'cmap': 'plt.cm.seismic_r'}), '(mesh_plot_lat, mesh_plot_dep, values, 101, cmap=plt.cm.seismic_r)\n', (5021, 5087), True, 'import matplotlib.pyplot as plt\n'), ((5114, 5141), 'numpy.arange', 'np.arange', (['vmin', 'vmax', '(0.01)'], {}), '(vmin, vmax, 0.01)\n', (5123, 5141), True, 'import numpy as np\n'), ((5146, 5189), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ticks': 'v', 'label': '"""perturbation"""'}), "(ticks=v, label='perturbation')\n", (5158, 5189), True, 'import matplotlib.pyplot as plt\n'), ((5223, 5329), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)"""'], {}), "(\n f'latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)'\n )\n", (5233, 5329), True, 'import matplotlib.pyplot as plt\n'), ((5333, 5356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""depth(km)"""'], {}), "('depth(km)')\n", (5343, 5356), True, 'import matplotlib.pyplot as plt\n'), ((5361, 5371), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5369, 5371), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1748), 'numpy.deg2rad', 'np.deg2rad', (['x'], {}), '(x)\n', (1745, 1748), True, 'import numpy as np\n'), ((1797, 1810), 'numpy.deg2rad', 'np.deg2rad', (['x'], {}), '(x)\n', (1807, 1810), True, 'import numpy as np\n'), ((4658, 4672), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (4664, 4672), True, 'import numpy as np\n'), ((4697, 4711), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (4703, 4711), True, 'import numpy as np\n'), ((4804, 4818), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (4810, 4818), True, 'import numpy as np\n'), ((4843, 4857), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (4849, 4857), True, 'import numpy as np\n'), ((4949, 4963), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (4955, 4963), True, 'import numpy as np\n'), ((4965, 4979), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (4971, 4979), True, 'import numpy as np\n'), ((5194, 5203), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5201, 5203), True, 'import matplotlib.pyplot as plt\n')]
|
from typing import Tuple
import numpy as np
import rasterio.warp
from opensfm import features
from .orthophoto_manager import OrthoPhotoManager
from .view import View
class OrthoPhotoView(View):
def __init__(
self,
main_ui,
path: str,
init_lat: float,
init_lon: float,
is_geo_reference: bool = False,
):
"""[summary]
Args:
main_ui (GUI.Gui)
path (str): path containing geotiffs
"""
self.image_manager = OrthoPhotoManager(path, 100.0)
self.images_in_list = self.image_manager.image_keys
self.zoom_window_size_px = 500
self.is_geo_reference = is_geo_reference
self.size = 50 # TODO add widget for zoom level
super(OrthoPhotoView, self).__init__(main_ui, False)
self.refocus(init_lat, init_lon)
self.populate_image_list()
if self.images_in_list:
self.bring_new_image(self.images_in_list[0])
self.set_title()
def get_image(self, new_image):
crop, image_window, geot = self.image_manager.read_image_around_latlon(
new_image, self.center_lat, self.center_lon, self.size
)
self.image_window = image_window
self.geot = geot
return crop
def get_candidate_images(self):
return self.image_manager.get_candidate_images(
self.center_lat, self.center_lon, self.size
)
def pixel_to_latlon(self, x: float, y: float):
"""
From pixels (in the viewing window) to latlon
"""
if not self.is_geo_reference:
return None
# Pixel to whatever crs the image is in
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `geot`.
x, y = self.geot.xy(y, x)
# And then to WSG84 (lat/lon)
lons, lats = rasterio.warp.transform(self.geot.crs, "EPSG:4326", [x], [y])
return lats[0], lons[0]
def gcp_to_pixel_coordinates(self, x: float, y: float) -> Tuple[float, float]:
"""
Transforms from normalized coordinates (in the whole geotiff) to
pixels (in the viewing window)
"""
h, w = self.image_manager.get_image_size(self.current_image)
px = features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0]
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.
x = px[0] - self.image_window.col_off
y = px[1] - self.image_window.row_off
# pyre-fixme[7]: Expected `Tuple[float, float]` but got `List[typing.Any]`.
return [x, y]
def pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float, float]:
"""
Transforms from pixels (in the viewing window) to normalized coordinates
(in the whole geotiff)
"""
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.
x += self.image_window.col_off
y += self.image_window.row_off
h, w = self.image_manager.get_image_size(self.current_image)
coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0]
return coords.tolist()
def refocus(self, lat, lon):
self.center_lat = lat
self.center_lon = lon
self.populate_image_list()
if self.images_in_list:
if self.current_image not in self.images_in_list:
self.bring_new_image(self.images_in_list[0])
else:
self.bring_new_image(self.current_image)
self.set_title()
def bring_new_image(self, new_image):
super(OrthoPhotoView, self).bring_new_image(new_image, force=True)
xlim = self.ax.get_xlim()
ylim = self.ax.get_ylim()
artists = self.ax.plot(np.mean(xlim), np.mean(ylim), "rx")
self.plt_artists.extend(artists)
self.canvas.draw_idle()
def set_title(self):
lat, lon = self.center_lat, self.center_lon
if self.images_in_list:
t = "Images covering lat:{:.4f}, lon:{:.4f}".format(lat, lon)
shot = self.current_image
seq_ix = self.images_in_list.index(shot)
title = f"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}"
else:
title = f"No orthophotos around {lat}, {lon}"
self.current_image = None
self.ax.clear()
self.ax.axis("off")
self.canvas.draw_idle()
self.window.title(title)
|
[
"numpy.mean",
"numpy.array"
] |
[((3748, 3761), 'numpy.mean', 'np.mean', (['xlim'], {}), '(xlim)\n', (3755, 3761), True, 'import numpy as np\n'), ((3763, 3776), 'numpy.mean', 'np.mean', (['ylim'], {}), '(ylim)\n', (3770, 3776), True, 'import numpy as np\n'), ((2286, 2304), 'numpy.array', 'np.array', (['[[x, y]]'], {}), '([[x, y]])\n', (2294, 2304), True, 'import numpy as np\n'), ((3087, 3105), 'numpy.array', 'np.array', (['[[x, y]]'], {}), '([[x, y]])\n', (3095, 3105), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import random
class JuliaSet:
def __init__(self):
"""
Constructor of the JuliaSet class
:param size: size in pixels (for both width and height)
:param dpi: dots per inch (default 300)
"""
# Initialize image related parameters
self.size = 256
self.dpi = 300
self.norm = True
self.mirror = False
# Initialize process related parameters
self.escrad = 3
self.niter = 250
def param(self, **kwargs):
"""
Get parameters from input dictionary and set attributes.
:param kwargs: a dictionary in the form
`{'arg1':value, ..., 'argN': value}`
"""
# Check if kwargs in not empty
if kwargs is not None:
# Image related parameters
if 'size' in kwargs:
self.size = kwargs.pop('size', 256)
if 'dpi' in kwargs:
self.dpi = kwargs.pop('dpi', 300)
if 'norm' in kwargs:
self.norm = kwargs.pop('norm', True)
if 'mirror' in kwargs:
self.mirror = kwargs.pop('mirror', False)
# Process related parameters
if 'escrad' in kwargs:
self.escrad = kwargs.pop('escrad', 3)
if 'niter' in kwargs:
self.niter = kwargs.pop('niter', 250)
# If kwargs is not empty there is some invalid keywords
if kwargs:
print("{} are invalid keyword arguments!".format(kwargs.keys()))
def run(self, show=False, fname='juilaset-output'):
"""
Run the Julia set generator
:param mirror: if True the julia is mirrored horizontally and
vertically; each mirror is concatenate with the original
to produce a new image
:param norm: if true the Julia set is normalized by its
absolute maximum value.
:param show: if show is `False` th eoutput image will be
written as a PNG file named `fname`
:param fname: Name of the output PNG file to write on disk
"""
# Get a complex value among a list of best Julia sets
cpxNum = self.getComplexValue()
# Get the target area
# For more randomness, the target area is a random
# subset of a wide one defined with x[-1.5, 1.5] and
# y[-1.5, 1.5]
xrng, yrng = self.getTargetArea()
# Process
julia = self.processJulia(cpxNum, xrng, yrng)
# Normalization
if(self.norm):
julia /= np.amax(np.abs(julia))
# Mirroring
if(self.mirror):
# Horizontal mirroring and concatenate
juliamirror = np.flip(julia, axis=1)
julia = np.concatenate((julia, juliamirror), axis=1)
# Vertical mirroring and concatenate
juliamirror = np.flip(julia, axis=0)
julia = np.concatenate((julia, juliamirror), axis=0)
# Plot the output with a random colormap using matplotlib
self.plotJuliaSet(julia, show=show, fname=fname)
def getComplexValue(self):
"""
Random choice in a list of best complex values for Julia
sets (real, imag).
:return cpxNum: a semi-random complex value
"""
# Define the list of best complex values
cpxList = [
(-0.10, 0.650), (0.00, 0.80), (0.370, 0.100),
(0.355, 0.355), (-0.54, 0.54), (0.340, -0.05),
(0.37, 0.10), (0.355, 0.355)
]
# Randomly choose one
cpxTmp = random.choice(cpxList)
# Manipulate the base value slightly to make it a little more unique
cpxNum = self.twearkComplex(cpxTmp)
return cpxNum
def twearkComplex(self, cpxTmp):
"""
Manipulate the base value slightly to make it a little more unique.
:param cpxTmp: complex value to modify
:param cpxNum: a slightly manipulate version of the input
"""
# Get the signs for the imaginary parts
isign = random.randrange(-1, 1, 2)
# Get a value variation for for real and imaginary parts
# The possible variation range is fixed at +/- 2% to stay
# In the neightborhood of the initial value
rsigma = random.uniform(0.98, 1.02)
isigma = random.uniform(0.98, 1.02)
# Apply modification and return the new complex value
realPart = cpxTmp[0] * rsigma
imagPart = cpxTmp[1] * isigma * isign
return complex(realPart, imagPart)
def getTargetArea(self):
"""
For more randomness, the target area is a random
subset of a wide one defined with x[-1.5, 1.5] and
y[-1.5, 1.5]
:return xrng, yrng: tuples containing (xmin, xmax)
and (ymin, ymax)
"""
# Randomly choose the center of the target area
# Possible values are in [-1.0, 1.0] to stay in an
# area where there are always pieces of fractals
xctr = random.uniform(-1.0,1.0)
yctr = random.uniform(-1.0,1.0)
# Extend around the center
xrng = (xctr-0.5, xctr+0.5)
yrng = (yctr-0.5, yctr+0.5)
return xrng, yrng
def processJulia(self, cpxNum, xrng, yrng):
"""
Calculate the Julia set for the given input parameters.
:param cpxNum: complex value acting as a seed for the Julia set
:param xrng: range of values (min, max) for the x-axis
:param yrng: range of values (min, max) for the y-axis
:param escrad: escape radius
:param niter: maximum number of iterations
"""
# Initialize numpy array of dimensions (size, size) with zeros
julia = np.ones((self.size, self.size), dtype=np.float32)
# Calculate the width (equal to height) of the image since the
# image is defined as a square
width = xrng[1] - xrng[0] # xmax - xmin = ymax - ymin
# Randomly choose the sign of the shade
#ssign = random.randrange(-1, 1, 2)
ssign = -1.
# Loop over x range
for ix in range(self.size):
# Get the pixel position in the complex plane
# For the real part
realPart = float(ix) / self.size * width + xrng[0]
# Loop over y range
for iy in range(self.size):
# Get the pixel position in the complex plane
# For the imaginary part
imagPart = float(iy) / self.size * width + yrng[0]
# Build the complex
cpxTmp = complex(realPart, imagPart)
# Initialize iteration counter
it = 0
# Loop over iterations
while(np.abs(cpxTmp) <= self.escrad**2 and it < self.niter):
# Quadratic polynomial
cpxTmp = cpxTmp**2 + cpxNum
# Increment iteration counter
it += 1
# Calculate the shade (a cool thing find somewhere on the net)
shade = 1. - np.sqrt(it/self.niter)
# Fill the outpout array
julia[ix][iy] = ssign * shade
return julia
def plotJuliaSet(self, julia, fname='juilaset-output', show=False):
"""
Plot the output Julia set and show it in matplotlib window or
write it on disk as a png file.
:param julia: the Julia set
:param show: if show is `False` th eoutput image will be
written as a PNG file named `fname`
:param fname: Name of the output PNG file to write on disk
"""
# List of beautiful colormap for Julia sets
cmapList = [
cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno,
cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma
]
# Randomly chose one colormap
cmapName = random.choice(cmapList)
# Plot the image with a gaussian interpolation
fig = plt.gcf()
fig.set_size_inches(3., 3.)
plt.imshow(julia, interpolation='gaussian', cmap=cmapName)
# Disable axis
plt.axis('off')
if(show):
plt.show()
else:
# Write on disk
fig.savefig(fname+".png", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight')
def julia(**kwargs):
"""
temp
"""
# Initialize Julia Set instance
juliaInstance = JuliaSet()
# If kwargs not empty update the attributes
if kwargs is not None:
juliaInstance.param(**kwargs)
return juliaInstance
if __name__ == "__main__":
# execute only if run as a script
genJuliaSet = JuliaSet()
genJuliaSet.param()
genJuliaSet.run()
|
[
"matplotlib.pyplot.imshow",
"numpy.flip",
"random.uniform",
"random.choice",
"numpy.abs",
"numpy.ones",
"numpy.sqrt",
"random.randrange",
"matplotlib.pyplot.gcf",
"numpy.concatenate",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] |
[((3661, 3683), 'random.choice', 'random.choice', (['cpxList'], {}), '(cpxList)\n', (3674, 3683), False, 'import random\n'), ((4146, 4172), 'random.randrange', 'random.randrange', (['(-1)', '(1)', '(2)'], {}), '(-1, 1, 2)\n', (4162, 4172), False, 'import random\n'), ((4374, 4400), 'random.uniform', 'random.uniform', (['(0.98)', '(1.02)'], {}), '(0.98, 1.02)\n', (4388, 4400), False, 'import random\n'), ((4418, 4444), 'random.uniform', 'random.uniform', (['(0.98)', '(1.02)'], {}), '(0.98, 1.02)\n', (4432, 4444), False, 'import random\n'), ((5105, 5130), 'random.uniform', 'random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (5119, 5130), False, 'import random\n'), ((5145, 5170), 'random.uniform', 'random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (5159, 5170), False, 'import random\n'), ((5817, 5866), 'numpy.ones', 'np.ones', (['(self.size, self.size)'], {'dtype': 'np.float32'}), '((self.size, self.size), dtype=np.float32)\n', (5824, 5866), True, 'import numpy as np\n'), ((8018, 8041), 'random.choice', 'random.choice', (['cmapList'], {}), '(cmapList)\n', (8031, 8041), False, 'import random\n'), ((8112, 8121), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8119, 8121), True, 'import matplotlib.pyplot as plt\n'), ((8166, 8224), 'matplotlib.pyplot.imshow', 'plt.imshow', (['julia'], {'interpolation': '"""gaussian"""', 'cmap': 'cmapName'}), "(julia, interpolation='gaussian', cmap=cmapName)\n", (8176, 8224), True, 'import matplotlib.pyplot as plt\n'), ((8265, 8280), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8273, 8280), True, 'import matplotlib.pyplot as plt\n'), ((2795, 2817), 'numpy.flip', 'np.flip', (['julia'], {'axis': '(1)'}), '(julia, axis=1)\n', (2802, 2817), True, 'import numpy as np\n'), ((2838, 2882), 'numpy.concatenate', 'np.concatenate', (['(julia, juliamirror)'], {'axis': '(1)'}), '((julia, juliamirror), axis=1)\n', (2852, 2882), True, 'import numpy as np\n'), ((2958, 2980), 'numpy.flip', 'np.flip', (['julia'], {'axis': '(0)'}), '(julia, axis=0)\n', (2965, 2980), True, 'import numpy as np\n'), ((3001, 3045), 'numpy.concatenate', 'np.concatenate', (['(julia, juliamirror)'], {'axis': '(0)'}), '((julia, juliamirror), axis=0)\n', (3015, 3045), True, 'import numpy as np\n'), ((8312, 8322), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8320, 8322), True, 'import matplotlib.pyplot as plt\n'), ((2657, 2670), 'numpy.abs', 'np.abs', (['julia'], {}), '(julia)\n', (2663, 2670), True, 'import numpy as np\n'), ((7187, 7211), 'numpy.sqrt', 'np.sqrt', (['(it / self.niter)'], {}), '(it / self.niter)\n', (7194, 7211), True, 'import numpy as np\n'), ((6838, 6852), 'numpy.abs', 'np.abs', (['cpxTmp'], {}), '(cpxTmp)\n', (6844, 6852), True, 'import numpy as np\n')]
|
import numpy as np
from keras import backend as K
import os
import sys
K.set_image_dim_ordering('tf')
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
sys.path.append(patch_path('..'))
data_dir_path = patch_path('very_large_data')
model_dir_path = patch_path('models/UCF-101')
from keras_video_classifier.library.convolutional import CnnVideoClassifier
from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf, scan_ucf_with_labels
config_file_path = CnnVideoClassifier.get_config_file_path(model_dir_path)
weight_file_path = CnnVideoClassifier.get_weight_file_path(model_dir_path)
np.random.seed(42)
load_ucf(data_dir_path)
predictor = CnnVideoClassifier()
predictor.load_model(config_file_path, weight_file_path)
videos = scan_ucf_with_labels(data_dir_path, [label for (label, label_index) in predictor.labels.items()])
video_file_path_list = np.array([file_path for file_path in videos.keys()])
np.random.shuffle(video_file_path_list)
for video_file_path in video_file_path_list:
label = videos[video_file_path]
predicted_label = predictor.predict(video_file_path)
print('predicted: ' + predicted_label + ' actual: ' + label)
if __name__ == '__main__':
main()
|
[
"keras_video_classifier.library.utility.ucf.UCF101_loader.load_ucf",
"numpy.random.shuffle",
"os.path.dirname",
"keras_video_classifier.library.convolutional.CnnVideoClassifier.get_weight_file_path",
"numpy.random.seed",
"keras_video_classifier.library.convolutional.CnnVideoClassifier.get_config_file_path",
"keras.backend.set_image_dim_ordering",
"keras_video_classifier.library.convolutional.CnnVideoClassifier"
] |
[((72, 102), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (96, 102), True, 'from keras import backend as K\n'), ((545, 600), 'keras_video_classifier.library.convolutional.CnnVideoClassifier.get_config_file_path', 'CnnVideoClassifier.get_config_file_path', (['model_dir_path'], {}), '(model_dir_path)\n', (584, 600), False, 'from keras_video_classifier.library.convolutional import CnnVideoClassifier\n'), ((624, 679), 'keras_video_classifier.library.convolutional.CnnVideoClassifier.get_weight_file_path', 'CnnVideoClassifier.get_weight_file_path', (['model_dir_path'], {}), '(model_dir_path)\n', (663, 679), False, 'from keras_video_classifier.library.convolutional import CnnVideoClassifier\n'), ((685, 703), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (699, 703), True, 'import numpy as np\n'), ((709, 732), 'keras_video_classifier.library.utility.ucf.UCF101_loader.load_ucf', 'load_ucf', (['data_dir_path'], {}), '(data_dir_path)\n', (717, 732), False, 'from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf, scan_ucf_with_labels\n'), ((750, 770), 'keras_video_classifier.library.convolutional.CnnVideoClassifier', 'CnnVideoClassifier', ([], {}), '()\n', (768, 770), False, 'from keras_video_classifier.library.convolutional import CnnVideoClassifier\n'), ((1029, 1068), 'numpy.random.shuffle', 'np.random.shuffle', (['video_file_path_list'], {}), '(video_file_path_list)\n', (1046, 1068), True, 'import numpy as np\n'), ((151, 176), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'import os\n')]
|
'''
-------------------------------------
Assignment 2 - EE2703 (Jan-May 2020)
Done by <NAME> (EE18B122)
Created on 18/01/20
Last Modified on 04/02/20
-------------------------------------
'''
# importing necessary libraries
import sys
import cmath
import numpy as np
import pandas as pd
# To improve readability
CIRCUIT_START = ".circuit"
CIRCUIT_END = ".end"
RESISTOR = "R"
CAPACITOR = "C"
INDUCTOR = "L"
IVS = "V"
ICS = "I"
VCVS = "E"
VCCS = "G"
CCVS = "H"
CCCS = "F"
PI = np.pi
# Classes for each circuit component
class resistor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class inductor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class capacitor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class voltageSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class currentSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class vcvs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class vccs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class ccvs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
class cccs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
# Convert a number in engineer's format to math
def enggToMath(enggNumber):
try:
return float(enggNumber)
except:
lenEnggNumber = len(enggNumber)
# Kilo
if enggNumber[lenEnggNumber-1] == 'k':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e3
# Milli
elif enggNumber[lenEnggNumber-1] == 'm':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-3
# Micro
elif enggNumber[lenEnggNumber-1] == 'u':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-6
# Nano
elif enggNumber[lenEnggNumber-1] == 'n':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-9
# Mega
elif enggNumber[lenEnggNumber-1] == 'M':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e6
else:
sys.exit("Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential format (eg. 1e3 = 1000).")
if __name__ == "__main__":
# checking number of command line arguments
if len(sys.argv)!=2 :
sys.exit("Invalid number of arguments!")
else:
try:
circuitFile = sys.argv[1]
circuitFreq = 1e-100
circuitComponents = { RESISTOR: [], CAPACITOR: [], INDUCTOR: [], IVS: [], ICS: [], VCVS: [], VCCS: [], CCVS: [], CCCS: [] }
circuitNodes = []
# checking if given netlist file is of correct type
if (not circuitFile.endswith(".netlist")):
print("Wrong file type!")
else:
netlistFileLines = []
with open (circuitFile, "r") as f:
for line in f.readlines():
netlistFileLines.append(line.split('#')[0].split('\n')[0])
# Getting frequency, if any
if(line[:3] == '.ac'):
circuitFreq = float(line.split()[2])
# Setting Angular Frequency w
w = 2*PI*circuitFreq
try:
# Finding the location of the identifiers
identifier1 = netlistFileLines.index(CIRCUIT_START)
identifier2 = netlistFileLines.index(CIRCUIT_END)
circuitBody = netlistFileLines[identifier1+1:identifier2]
for line in circuitBody:
# Extracting the data from the line
lineTokens = line.split()
# Appending new nodes to a list
try:
if lineTokens[1] not in circuitNodes:
circuitNodes.append(lineTokens[1])
if lineTokens[2] not in circuitNodes:
circuitNodes.append(lineTokens[2])
except IndexError:
continue
# Resistor
if lineTokens[0][0] == RESISTOR:
circuitComponents[RESISTOR].append(resistor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Capacitor
elif lineTokens[0][0] == CAPACITOR:
circuitComponents[CAPACITOR].append(capacitor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Inductor
elif lineTokens[0][0] == INDUCTOR:
circuitComponents[INDUCTOR].append(inductor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Voltage Source
elif lineTokens[0][0] == IVS:
if len(lineTokens) == 5: # DC Source
circuitComponents[IVS].append(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif len(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[IVS].append(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# Current Source
elif lineTokens[0][0] == ICS:
if len(lineTokens) == 5: # DC Source
circuitComponents[ICS].append(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif len(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[ICS].append(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# VCVS
elif lineTokens[0][0] == VCVS:
circuitComponents[VCVS].append(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# VCCS
elif lineTokens[0][0] == VCCS:
circuitComponents[VCCS].append(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# CCVS
elif lineTokens[0][0] == CCVS:
circuitComponents[CCVS].append(ccvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# CCCS
elif lineTokens[0][0] == CCCS:
circuitComponents[CCCS].append(cccs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# Erroneous Component Name
else:
sys.exit("Wrong Component Given. ABORT!")
try:
circuitNodes.remove('GND')
circuitNodes = ['GND'] + circuitNodes
except:
sys.exit("No ground node specified in the circuit!!")
# Creating a dictionary with node names and their numbers (to reduce the time taken by later parts of the program)
nodeNumbers = {circuitNodes[i]:i for i in range(len(circuitNodes))}
numNodes = len(circuitNodes)
numVS = len(circuitComponents[IVS])+len(circuitComponents[VCVS])+len(circuitComponents[CCVS])
# Creating Matrices M and b
matrixM = np.zeros((numNodes+numVS, numNodes+numVS), np.complex)
matrixB = np.zeros((numNodes+numVS,), np.complex)
# GND Equation
matrixM[0][0] = 1.0
# Resistor Equations
for r in circuitComponents[RESISTOR]:
if r.node1 != 'GND':
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node1]] += 1/r.value
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node2]] -= 1/r.value
if r.node2 != 'GND':
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node1]] -= 1/r.value
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node2]] += 1/r.value
# Capacitor Equations
for c in circuitComponents[CAPACITOR]:
if c.node1 != 'GND':
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node1]] += complex(0, w*c.value)
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node2]] -= complex(0, w*c.value)
if c.node2 != 'GND':
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node1]] -= complex(0, w*c.value)
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node2]] += complex(0, w*c.value)
# Inductor Equations
for l in circuitComponents[INDUCTOR]:
if l.node1 != 'GND':
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node1]] += complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node2]] -= complex(0, -1.0/(w*l.value))
if l.node2 != 'GND':
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node1]] -= complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node2]] += complex(0, -1.0/(w*l.value))
# Voltage Source Equations
for i in range(len(circuitComponents[IVS])):
# Equation accounting for current through the source
if circuitComponents[IVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node1]][numNodes+i] = 1.0
if circuitComponents[IVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node2]][numNodes+i] = -1.0
# Auxiliary Equations
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node1]] = -1.0
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node2]] = +1.0
matrixB[numNodes+i] = cmath.rect(circuitComponents[IVS][i].value, circuitComponents[IVS][i].phase*PI/180)
# Current Source Equations
for i in circuitComponents[ICS]:
if i.node1 != 'GND':
matrixB[nodeNumbers[i.node1]] = -1*i.value
if i.node2 != 'GND':
matrixB[nodeNumbers[i.node2]] = i.value
# VCVS Equations
for i in range(len(circuitComponents[VCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node1]][numNodes+len(circuitComponents[IVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+len(circuitComponents[IVS])+i] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node1]] = 1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node2]] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node3]] = -1.0*circuitComponents[VCVS][i].value
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node4]] = 1.0*circuitComponents[VCVS][i].value
# CCVS Equations
for i in range(len(circuitComponents[CCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[CCVS][i].node1]][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node1]] = 1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node2]] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = -1.0*circuitComponents[CCVS][i].value
# VCCS Equations
for vccs in circuitComponents[VCCS]:
if vccs.node1 != 'GND':
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node4]]+=vccs.value
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node3]]-=vccs.value
if vccs.node2 != 'GND':
matrixM[nodeNumbers[vccs.node2]][nodeNumbers[vccs.node4]]-=vccs.value
matrixM[nodeNumbers[vccs.node3]][nodeNumbers[vccs.node3]]+=vccs.value
# CCCS Equations
for cccs in circuitComponents[CCCS]:
def getIndexIVS(vName):
for i in range(len(circuitComponents[IVS])):
if circuitComponents[IVS][i].name == vName:
return i
if cccs.node1 != 'GND':
matrixM[nodeNumbers[cccs.node1]][numNodes+getIndexIVS(cccs.vSource)]-=cccs.value
if cccs.node2 != 'GND':
matrixM[nodeNumbers[cccs.node2]][numNodes+getIndexIVS(cccs.vSource)]+=cccs.value
try:
x = np.linalg.solve(matrixM, matrixB)
circuitCurrents = []
# Formatting Output Data
for v in circuitComponents[IVS]:
circuitCurrents.append("current in "+v.name)
for v in circuitComponents[VCVS]:
circuitCurrents.append("current in "+v.name)
for v in circuitComponents[CCVS]:
circuitCurrents.append("current in "+v.name)
# Printing output in table format
print(pd.DataFrame(x, circuitNodes+circuitCurrents, columns=['Voltage / Current']))
print("The values given above are AMPLITUDE values and NOT RMS values.")
except np.linalg.LinAlgError:
sys.exit("Singular Matrix Formed! Please check if you have entered the circuit definition correctly!")
except ValueError:
sys.exit("Netlist does not abide to given format!")
except FileNotFoundError:
sys.exit("Given file does not exist!")
|
[
"numpy.linalg.solve",
"cmath.rect",
"numpy.zeros",
"sys.exit",
"pandas.DataFrame"
] |
[((3524, 3564), 'sys.exit', 'sys.exit', (['"""Invalid number of arguments!"""'], {}), "('Invalid number of arguments!')\n", (3532, 3564), False, 'import sys\n'), ((17150, 17188), 'sys.exit', 'sys.exit', (['"""Given file does not exist!"""'], {}), "('Given file does not exist!')\n", (17158, 17188), False, 'import sys\n'), ((9213, 9271), 'numpy.zeros', 'np.zeros', (['(numNodes + numVS, numNodes + numVS)', 'np.complex'], {}), '((numNodes + numVS, numNodes + numVS), np.complex)\n', (9221, 9271), True, 'import numpy as np\n'), ((9298, 9339), 'numpy.zeros', 'np.zeros', (['(numNodes + numVS,)', 'np.complex'], {}), '((numNodes + numVS,), np.complex)\n', (9306, 9339), True, 'import numpy as np\n'), ((12036, 12127), 'cmath.rect', 'cmath.rect', (['circuitComponents[IVS][i].value', '(circuitComponents[IVS][i].phase * PI / 180)'], {}), '(circuitComponents[IVS][i].value, circuitComponents[IVS][i].phase *\n PI / 180)\n', (12046, 12127), False, 'import cmath\n'), ((16037, 16070), 'numpy.linalg.solve', 'np.linalg.solve', (['matrixM', 'matrixB'], {}), '(matrixM, matrixB)\n', (16052, 16070), True, 'import numpy as np\n'), ((17052, 17103), 'sys.exit', 'sys.exit', (['"""Netlist does not abide to given format!"""'], {}), "('Netlist does not abide to given format!')\n", (17060, 17103), False, 'import sys\n'), ((8695, 8748), 'sys.exit', 'sys.exit', (['"""No ground node specified in the circuit!!"""'], {}), "('No ground node specified in the circuit!!')\n", (8703, 8748), False, 'import sys\n'), ((16645, 16723), 'pandas.DataFrame', 'pd.DataFrame', (['x', '(circuitNodes + circuitCurrents)'], {'columns': "['Voltage / Current']"}), "(x, circuitNodes + circuitCurrents, columns=['Voltage / Current'])\n", (16657, 16723), True, 'import pandas as pd\n'), ((16894, 17006), 'sys.exit', 'sys.exit', (['"""Singular Matrix Formed! Please check if you have entered the circuit definition correctly!"""'], {}), "(\n 'Singular Matrix Formed! Please check if you have entered the circuit definition correctly!'\n )\n", (16902, 17006), False, 'import sys\n'), ((3250, 3426), 'sys.exit', 'sys.exit', (['"""Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential format (eg. 1e3 = 1000)."""'], {}), '(\n """Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential format (eg. 1e3 = 1000)."""\n )\n', (3258, 3426), False, 'import sys\n'), ((6531, 6581), 'sys.exit', 'sys.exit', (['"""Frequency of AC Source not specified!!"""'], {}), "('Frequency of AC Source not specified!!')\n", (6539, 6581), False, 'import sys\n'), ((7208, 7258), 'sys.exit', 'sys.exit', (['"""Frequency of AC Source not specified!!"""'], {}), "('Frequency of AC Source not specified!!')\n", (7216, 7258), False, 'import sys\n'), ((8463, 8504), 'sys.exit', 'sys.exit', (['"""Wrong Component Given. ABORT!"""'], {}), "('Wrong Component Given. ABORT!')\n", (8471, 8504), False, 'import sys\n')]
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a class which acts as a wrapper around the PPR algorithm.
This class has the following functionality:
1. Load the KB graph,
2. Given list of seed entities, get topk entities from PPR.
3. Get unique facts between all extracted entities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from fat.fat_bert_nq.ppr.apr_algo import csr_personalized_pagerank
from fat.fat_bert_nq.ppr.apr_algo import csr_topk_fact_extractor
from fat.fat_bert_nq.ppr.kb_csr_io import CsrData
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'verbose_logging', False,
'If true, all of the warnings related to data processing will be printed. '
'A number of warnings are expected for a normal NQ evaluation.')
class ApproximatePageRank(object):
"""APR main lib which is used to wrap functions around ppr algo."""
def __init__(self):
self.data = CsrData()
self.data.load_csr_data(
full_wiki=FLAGS.full_wiki, files_dir=FLAGS.apr_files_dir)
def get_topk_extracted_ent(self, seeds, alpha, topk):
"""Extract topk entities given seeds.
Args:
seeds: An Ex1 vector with weight on every seed entity
alpha: probability for PPR
topk: max top entities to extract
Returns:
extracted_ents: list of selected entities
extracted_scores: list of scores of selected entities
"""
ppr_scores = csr_personalized_pagerank(seeds, self.data.adj_mat_t_csr,
alpha)
sorted_idx = np.argsort(ppr_scores)[::-1]
extracted_ents = sorted_idx[:topk]
extracted_scores = ppr_scores[sorted_idx[:topk]]
# Check for really low values
# Get idx of First value < 1e-6, limit extracted ents till there
zero_idx = np.where(ppr_scores[extracted_ents] < 1e-6)[0]
if zero_idx.shape[0] > 0:
extracted_ents = extracted_ents[:zero_idx[0]]
return extracted_ents, extracted_scores
def get_facts(self, entities, topk, alpha, seed_weighting=True):
"""Get subgraph describing a neighbourhood around given entities.
Args:
entities: A list of Wikidata entities
topk: Max entities to extract from PPR
alpha: Node probability for PPR
seed_weighting: Boolean for performing weighting seeds by freq in passage
Returns:
unique_facts: A list of unique facts around the seeds.
"""
if FLAGS.verbose_logging:
tf.logging.info('Getting subgraph')
entity_ids = [
int(self.data.ent2id[x]) for x in entities if x in self.data.ent2id
]
if FLAGS.verbose_logging:
tf.logging.info(
str([self.data.entity_names['e'][str(x)]['name'] for x in entity_ids
]))
freq_dict = {x: entity_ids.count(x) for x in entity_ids}
seed = np.zeros((self.data.adj_mat.shape[0], 1))
if not seed_weighting:
seed[entity_ids] = 1. / len(set(entity_ids))
else:
for x, y in freq_dict.items():
seed[x] = y
seed = seed / seed.sum()
extracted_ents, extracted_scores = self.get_topk_extracted_ent(
seed, alpha, topk)
if FLAGS.verbose_logging:
tf.logging.info('Extracted ents: ')
tf.logging.info(
str([
self.data.entity_names['e'][str(x)]['name']
for x in extracted_ents
]))
facts = csr_topk_fact_extractor(self.data.adj_mat_t_csr, self.data.rel_dict,
freq_dict, self.data.entity_names,
extracted_ents, extracted_scores)
if FLAGS.verbose_logging:
tf.logging.info('Extracted facts: ')
tf.logging.info(str(facts))
# Extract 1 unique fact per pair of entities (fact with highest score)
# Sort by scores
unique_facts = {}
for (sub, obj, rel, score) in facts:
fwd_dir = (sub, obj)
rev_dir = (obj, sub)
if fwd_dir in unique_facts and score > unique_facts[fwd_dir][1]:
unique_facts[fwd_dir] = (rel, score)
elif rev_dir in unique_facts and score > unique_facts[rev_dir][1]:
unique_facts[fwd_dir] = (rel, score)
del unique_facts[rev_dir] # Remove existing entity pair
else:
unique_facts[(sub, obj)] = (rel, score)
unique_facts = list(unique_facts.items())
return unique_facts
|
[
"fat.fat_bert_nq.ppr.kb_csr_io.CsrData",
"numpy.where",
"tensorflow.logging.info",
"numpy.argsort",
"numpy.zeros",
"fat.fat_bert_nq.ppr.apr_algo.csr_personalized_pagerank",
"fat.fat_bert_nq.ppr.apr_algo.csr_topk_fact_extractor"
] |
[((1590, 1599), 'fat.fat_bert_nq.ppr.kb_csr_io.CsrData', 'CsrData', ([], {}), '()\n', (1597, 1599), False, 'from fat.fat_bert_nq.ppr.kb_csr_io import CsrData\n'), ((2084, 2148), 'fat.fat_bert_nq.ppr.apr_algo.csr_personalized_pagerank', 'csr_personalized_pagerank', (['seeds', 'self.data.adj_mat_t_csr', 'alpha'], {}), '(seeds, self.data.adj_mat_t_csr, alpha)\n', (2109, 2148), False, 'from fat.fat_bert_nq.ppr.apr_algo import csr_personalized_pagerank\n'), ((3459, 3500), 'numpy.zeros', 'np.zeros', (['(self.data.adj_mat.shape[0], 1)'], {}), '((self.data.adj_mat.shape[0], 1))\n', (3467, 3500), True, 'import numpy as np\n'), ((4007, 4148), 'fat.fat_bert_nq.ppr.apr_algo.csr_topk_fact_extractor', 'csr_topk_fact_extractor', (['self.data.adj_mat_t_csr', 'self.data.rel_dict', 'freq_dict', 'self.data.entity_names', 'extracted_ents', 'extracted_scores'], {}), '(self.data.adj_mat_t_csr, self.data.rel_dict,\n freq_dict, self.data.entity_names, extracted_ents, extracted_scores)\n', (4030, 4148), False, 'from fat.fat_bert_nq.ppr.apr_algo import csr_topk_fact_extractor\n'), ((2209, 2231), 'numpy.argsort', 'np.argsort', (['ppr_scores'], {}), '(ppr_scores)\n', (2219, 2231), True, 'import numpy as np\n'), ((2449, 2493), 'numpy.where', 'np.where', (['(ppr_scores[extracted_ents] < 1e-06)'], {}), '(ppr_scores[extracted_ents] < 1e-06)\n', (2457, 2493), True, 'import numpy as np\n'), ((3099, 3134), 'tensorflow.logging.info', 'tf.logging.info', (['"""Getting subgraph"""'], {}), "('Getting subgraph')\n", (3114, 3134), True, 'import tensorflow as tf\n'), ((3809, 3844), 'tensorflow.logging.info', 'tf.logging.info', (['"""Extracted ents: """'], {}), "('Extracted ents: ')\n", (3824, 3844), True, 'import tensorflow as tf\n'), ((4253, 4289), 'tensorflow.logging.info', 'tf.logging.info', (['"""Extracted facts: """'], {}), "('Extracted facts: ')\n", (4268, 4289), True, 'import tensorflow as tf\n')]
|
import warnings
from collections import OrderedDict
from distutils.version import LooseVersion
from functools import partial
from inspect import isclass
from typing import Callable, Optional, Dict, Union
import numpy as np
import torch
import tqdm
from torch import Tensor, nn
from torch.nn import functional as F
from adv_lib.distances.lp_norms import l0_distances, l1_distances, l2_distances, linf_distances
from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
"""
Generates one random target in (num_classes - 1) possibilities for each label that is different from the original
label.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random target for each label. Has the same shape as labels.
"""
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze(-1), 0)
return random.argmax(1)
def get_all_targets(labels: Tensor, num_classes: int):
"""
Generates all possible targets that are different from the original labels.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random targets for each label. shape: (len(labels), num_classes - 1).
"""
all_possible_targets = torch.zeros(len(labels), num_classes - 1, dtype=torch.long)
all_classes = set(range(num_classes))
for i in range(len(labels)):
this_label = labels[i].item()
other_labels = list(all_classes.difference({this_label}))
all_possible_targets[i] = torch.tensor(other_labels)
return all_possible_targets
def run_attack(model: nn.Module,
inputs: Tensor,
labels: Tensor,
attack: Callable,
targets: Optional[Tensor] = None,
batch_size: Optional[int] = None) -> dict:
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
targeted, adv_labels = False, labels
if targets is not None:
targeted, adv_labels = True, targets
batch_size = batch_size or len(inputs)
# run attack only on non already adversarial samples
already_adv = []
chunks = [tensor.split(batch_size) for tensor in [inputs, adv_labels]]
for (inputs_chunk, label_chunk) in zip(*chunks):
batch_chunk_d, label_chunk_d = [to_device(tensor) for tensor in [inputs_chunk, label_chunk]]
preds = model(batch_chunk_d).argmax(1)
is_adv = (preds == label_chunk_d) if targeted else (preds != label_chunk_d)
already_adv.append(is_adv.cpu())
not_adv = ~torch.cat(already_adv, 0)
start, end = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
forward_counter, backward_counter = ForwardCounter(), BackwardCounter()
model.register_forward_pre_hook(forward_counter)
if LooseVersion(torch.__version__) >= LooseVersion('1.8'):
model.register_full_backward_hook(backward_counter)
else:
model.register_backward_hook(backward_counter)
average_forwards, average_backwards = [], [] # number of forward and backward calls per sample
advs_chunks = []
chunks = [tensor.split(batch_size) for tensor in [inputs[not_adv], adv_labels[not_adv]]]
total_time = 0
for (inputs_chunk, label_chunk) in tqdm.tqdm(zip(*chunks), ncols=80, total=len(chunks[0])):
batch_chunk_d, label_chunk_d = [to_device(tensor.clone()) for tensor in [inputs_chunk, label_chunk]]
start.record()
advs_chunk_d = attack(model, batch_chunk_d, label_chunk_d, targeted=targeted)
# performance monitoring
end.record()
torch.cuda.synchronize()
total_time += (start.elapsed_time(end)) / 1000 # times for cuda Events are in milliseconds
average_forwards.append(forward_counter.num_samples_called / len(batch_chunk_d))
average_backwards.append(backward_counter.num_samples_called / len(batch_chunk_d))
forward_counter.reset(), backward_counter.reset()
advs_chunks.append(advs_chunk_d.cpu())
if isinstance(attack, partial) and (callback := attack.keywords.get('callback')) is not None:
callback.reset_windows()
adv_inputs = inputs.clone()
adv_inputs[not_adv] = torch.cat(advs_chunks, 0)
data = {
'inputs': inputs,
'labels': labels,
'targets': adv_labels if targeted else None,
'adv_inputs': adv_inputs,
'time': total_time,
'num_forwards': sum(average_forwards) / len(chunks[0]),
'num_backwards': sum(average_backwards) / len(chunks[0]),
}
return data
_default_metrics = OrderedDict([
('linf', linf_distances),
('l0', l0_distances),
('l1', l1_distances),
('l2', l2_distances),
])
def compute_attack_metrics(model: nn.Module,
attack_data: Dict[str, Union[Tensor, float]],
batch_size: Optional[int] = None,
metrics: Dict[str, Callable] = _default_metrics) -> Dict[str, Union[Tensor, float]]:
inputs, labels, targets, adv_inputs = map(attack_data.get, ['inputs', 'labels', 'targets', 'adv_inputs'])
if adv_inputs.min() < 0 or adv_inputs.max() > 1:
warnings.warn('Values of produced adversarials are not in the [0, 1] range -> Clipping to [0, 1].')
adv_inputs.clamp_(min=0, max=1)
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
batch_size = batch_size or len(inputs)
chunks = [tensor.split(batch_size) for tensor in [inputs, labels, adv_inputs]]
all_predictions = [[] for _ in range(6)]
distances = {k: [] for k in metrics.keys()}
metrics = {k: v().to(device) if (isclass(v.func) if isinstance(v, partial) else False) else v for k, v in
metrics.items()}
append = lambda list, data: list.append(data.cpu())
for inputs_chunk, labels_chunk, adv_chunk in zip(*chunks):
inputs_chunk, adv_chunk = map(to_device, [inputs_chunk, adv_chunk])
clean_preds, adv_preds = [predict_inputs(model, chunk.to(device)) for chunk in [inputs_chunk, adv_chunk]]
list(map(append, all_predictions, [*clean_preds, *adv_preds]))
for metric, metric_func in metrics.items():
distances[metric].append(metric_func(adv_chunk, inputs_chunk).detach().cpu())
logits, probs, preds, logits_adv, probs_adv, preds_adv = [torch.cat(l) for l in all_predictions]
for metric in metrics.keys():
distances[metric] = torch.cat(distances[metric], 0)
accuracy_orig = (preds == labels).float().mean().item()
if targets is not None:
success = (preds_adv == targets)
labels = targets
else:
success = (preds_adv != labels)
prob_orig = probs.gather(1, labels.unsqueeze(1)).squeeze(1)
prob_adv = probs_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
labels_infhot = torch.zeros_like(logits_adv).scatter_(1, labels.unsqueeze(1), float('inf'))
real = logits_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
other = (logits_adv - labels_infhot).max(1).values
diff_vs_max_adv = (real - other)
nll = F.cross_entropy(logits, labels, reduction='none')
nll_adv = F.cross_entropy(logits_adv, labels, reduction='none')
data = {
'time': attack_data['time'],
'num_forwards': attack_data['num_forwards'],
'num_backwards': attack_data['num_backwards'],
'targeted': targets is not None,
'preds': preds,
'adv_preds': preds_adv,
'accuracy_orig': accuracy_orig,
'success': success,
'probs_orig': prob_orig,
'probs_adv': prob_adv,
'logit_diff_adv': diff_vs_max_adv,
'nll': nll,
'nll_adv': nll_adv,
'distances': distances,
}
return data
def print_metrics(metrics: dict) -> None:
np.set_printoptions(formatter={'float': '{:0.3f}'.format}, threshold=16, edgeitems=3,
linewidth=120) # To print arrays with less precision
print('Original accuracy: {:.2%}'.format(metrics['accuracy_orig']))
print('Attack done in: {:.2f}s with {:.4g} forwards and {:.4g} backwards.'.format(
metrics['time'], metrics['num_forwards'], metrics['num_backwards']))
success = metrics['success'].numpy()
fail = bool(success.mean() != 1)
print('Attack success: {:.2%}'.format(success.mean()) + fail * ' - {}'.format(success))
for distance, values in metrics['distances'].items():
data = values.numpy()
print('{}: {} - Average: {:.3f} - Median: {:.3f}'.format(distance, data, data.mean(), np.median(data)) +
fail * ' | Avg over success: {:.3f}'.format(data[success].mean()))
attack_type = 'targets' if metrics['targeted'] else 'correct'
print('Logit({} class) - max_Logit(other classes): {} - Average: {:.2f}'.format(
attack_type, metrics['logit_diff_adv'].numpy(), metrics['logit_diff_adv'].numpy().mean()))
print('NLL of target/pred class: {:.3f}'.format(metrics['nll_adv'].numpy().mean()))
|
[
"torch.cuda.Event",
"collections.OrderedDict",
"numpy.median",
"inspect.isclass",
"torch.cuda.synchronize",
"torch.tensor",
"torch.zeros_like",
"adv_lib.utils.ForwardCounter",
"torch.nn.functional.cross_entropy",
"adv_lib.utils.BackwardCounter",
"distutils.version.LooseVersion",
"warnings.warn",
"torch.cat",
"numpy.set_printoptions"
] |
[((5060, 5169), 'collections.OrderedDict', 'OrderedDict', (["[('linf', linf_distances), ('l0', l0_distances), ('l1', l1_distances), (\n 'l2', l2_distances)]"], {}), "([('linf', linf_distances), ('l0', l0_distances), ('l1',\n l1_distances), ('l2', l2_distances)])\n", (5071, 5169), False, 'from collections import OrderedDict\n'), ((4679, 4704), 'torch.cat', 'torch.cat', (['advs_chunks', '(0)'], {}), '(advs_chunks, 0)\n', (4688, 4704), False, 'import torch\n'), ((7563, 7612), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {'reduction': '"""none"""'}), "(logits, labels, reduction='none')\n", (7578, 7612), True, 'from torch.nn import functional as F\n'), ((7627, 7680), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_adv', 'labels'], {'reduction': '"""none"""'}), "(logits_adv, labels, reduction='none')\n", (7642, 7680), True, 'from torch.nn import functional as F\n'), ((8263, 8367), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float': '{:0.3f}'.format}", 'threshold': '(16)', 'edgeitems': '(3)', 'linewidth': '(120)'}), "(formatter={'float': '{:0.3f}'.format}, threshold=16,\n edgeitems=3, linewidth=120)\n", (8282, 8367), True, 'import numpy as np\n'), ((1980, 2006), 'torch.tensor', 'torch.tensor', (['other_labels'], {}), '(other_labels)\n', (1992, 2006), False, 'import torch\n'), ((3023, 3048), 'torch.cat', 'torch.cat', (['already_adv', '(0)'], {}), '(already_adv, 0)\n', (3032, 3048), False, 'import torch\n'), ((3067, 3103), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (3083, 3103), False, 'import torch\n'), ((3105, 3141), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (3121, 3141), False, 'import torch\n'), ((3182, 3198), 'adv_lib.utils.ForwardCounter', 'ForwardCounter', ([], {}), '()\n', (3196, 3198), False, 'from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs\n'), ((3200, 3217), 'adv_lib.utils.BackwardCounter', 'BackwardCounter', ([], {}), '()\n', (3215, 3217), False, 'from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs\n'), ((3278, 3309), 'distutils.version.LooseVersion', 'LooseVersion', (['torch.__version__'], {}), '(torch.__version__)\n', (3290, 3309), False, 'from distutils.version import LooseVersion\n'), ((3313, 3332), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.8"""'], {}), "('1.8')\n", (3325, 3332), False, 'from distutils.version import LooseVersion\n'), ((4070, 4094), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4092, 4094), False, 'import torch\n'), ((5649, 5758), 'warnings.warn', 'warnings.warn', (['"""Values of produced adversarials are not in the [0, 1] range -> Clipping to [0, 1]."""'], {}), "(\n 'Values of produced adversarials are not in the [0, 1] range -> Clipping to [0, 1].'\n )\n", (5662, 5758), False, 'import warnings\n'), ((6831, 6843), 'torch.cat', 'torch.cat', (['l'], {}), '(l)\n', (6840, 6843), False, 'import torch\n'), ((6932, 6963), 'torch.cat', 'torch.cat', (['distances[metric]', '(0)'], {}), '(distances[metric], 0)\n', (6941, 6963), False, 'import torch\n'), ((7321, 7349), 'torch.zeros_like', 'torch.zeros_like', (['logits_adv'], {}), '(logits_adv)\n', (7337, 7349), False, 'import torch\n'), ((6140, 6155), 'inspect.isclass', 'isclass', (['v.func'], {}), '(v.func)\n', (6147, 6155), False, 'from inspect import isclass\n'), ((9015, 9030), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (9024, 9030), True, 'import numpy as np\n')]
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module for graph representations of crystals.
"""
import copy
import logging
import os.path
import subprocess
import warnings
from collections import defaultdict, namedtuple
from itertools import combinations
from operator import itemgetter
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
from monty.json import MSONable
from monty.os.path import which
from networkx.drawing.nx_agraph import write_dot
from networkx.readwrite import json_graph
from scipy.spatial import KDTree
from scipy.stats import describe
from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure
from pymatgen.core.structure import FunctionalGroups
from pymatgen.util.coord import lattice_points_in_supercell
from pymatgen.vis.structure_vtk import EL_COLORS
try:
import igraph
IGRAPH_AVAILABLE = True
except ImportError:
IGRAPH_AVAILABLE = False
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__author__ = "<NAME>, <NAME>, <NAME>"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "August 2017"
ConnectedSite = namedtuple("ConnectedSite", "site, jimage, index, weight, dist")
def _compare(g1, g2, i1, i2):
"""
Helper function called by isomorphic to ensure comparison of node identities.
"""
return g1.vs[i1]["species"] == g2.vs[i2]["species"]
def _igraph_from_nxgraph(graph):
"""
Helper function that converts a networkx graph object into an igraph graph object.
"""
nodes = graph.nodes(data=True)
new_igraph = igraph.Graph()
for node in nodes:
new_igraph.add_vertex(name=str(node[0]), species=node[1]["specie"], coords=node[1]["coords"])
new_igraph.add_edges([(str(edge[0]), str(edge[1])) for edge in graph.edges()])
return new_igraph
def _isomorphic(frag1, frag2):
"""
Internal function to check if two graph objects are isomorphic, using igraph if
if is available and networkx if it is not.
"""
f1_nodes = frag1.nodes(data=True)
f2_nodes = frag2.nodes(data=True)
if len(f1_nodes) != len(f2_nodes):
return False
f2_edges = frag2.edges()
if len(f2_edges) != len(f2_edges):
return False
f1_comp_dict = {}
f2_comp_dict = {}
for node in f1_nodes:
if node[1]["specie"] not in f1_comp_dict:
f1_comp_dict[node[1]["specie"]] = 1
else:
f1_comp_dict[node[1]["specie"]] += 1
for node in f2_nodes:
if node[1]["specie"] not in f2_comp_dict:
f2_comp_dict[node[1]["specie"]] = 1
else:
f2_comp_dict[node[1]["specie"]] += 1
if f1_comp_dict != f2_comp_dict:
return False
if IGRAPH_AVAILABLE:
ifrag1 = _igraph_from_nxgraph(frag1)
ifrag2 = _igraph_from_nxgraph(frag2)
return ifrag1.isomorphic_vf2(ifrag2, node_compat_fn=_compare)
nm = iso.categorical_node_match("specie", "ERROR")
return nx.is_isomorphic(frag1.to_undirected(), frag2.to_undirected(), node_match=nm)
class StructureGraph(MSONable):
"""
This is a class for annotating a Structure with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, structure, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given crystallographic
structure easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
For periodic graphs, class stores information on the graph
edges of what lattice image the edge belongs to.
:param structure: a Structure object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(structure, StructureGraph):
# just make a copy from input
graph_data = structure.as_dict()["graphs"]
self.structure = structure
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
@classmethod
def with_empty_graph(cls, structure, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for StructureGraph, returns a StructureGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Structure).
:param structure (Structure):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (StructureGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(structure)))
graph_data = json_graph.adjacency_data(graph)
return cls(structure, graph_data=graph_data)
@staticmethod
def with_edges(structure, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(from_index, to_index, from_image, to_image): props},
where props is a dictionary of properties, including weight.
Props should be None if no additional properties are to be
specified.
:return: sg, a StructureGraph
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
from_image = edge[2]
to_image = edge[3]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index," " from_image, to_image) tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = sg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
sg.add_edge(
from_index,
to_index,
from_jimage=from_image,
to_jimage=to_image,
weight=weight,
edge_properties=props,
)
sg.set_node_attributes()
return sg
@staticmethod
def with_local_env_strategy(structure, strategy, weights=False):
"""
Constructor for StructureGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param structure: Structure object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:param weights: if True, use weights from local_env class
(consult relevant class for their meaning)
:return:
"""
if not strategy.structures_allowed:
raise ValueError(
"Chosen strategy is not designed for use with structures! " "Please choose another strategy."
)
sg = StructureGraph.with_empty_graph(structure, name="bonds")
for n, neighbors in enumerate(strategy.get_all_nn_info(structure)):
for neighbor in neighbors:
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
sg.add_edge(
from_index=n,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"] if weights else None,
warn_duplicates=False,
)
return sg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
from_jimage=(0, 0, 0),
to_jimage=None,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param from_jimage (tuple of ints): lattice vector of periodic
image, e.g. (1, 0, 0) for periodic image in +x direction
:param to_jimage (tuple of ints): lattice vector of image
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
to_jimage, from_jimage = from_jimage, to_jimage
# constrain all from_jimages to be (0, 0, 0),
# initial version of this class worked even if
# from_jimage != (0, 0, 0), but making this
# assumption simplifies logic later
if not np.array_equal(from_jimage, (0, 0, 0)):
shift = from_jimage
from_jimage = np.subtract(from_jimage, shift)
to_jimage = np.subtract(to_jimage, shift)
# automatic detection of to_jimage if user doesn't specify
# will try and detect all equivalent images and add multiple
# edges if appropriate
if to_jimage is None:
# assume we want the closest site
warnings.warn("Please specify to_jimage to be unambiguous, " "trying to automatically detect.")
dist, to_jimage = self.structure[from_index].distance_and_image(self.structure[to_index])
if dist == 0:
# this will happen when from_index == to_index,
# typically in primitive single-atom lattices
images = [1, 0, 0], [0, 1, 0], [0, 0, 1]
dists = []
for image in images:
dists.append(
self.structure[from_index].distance_and_image(self.structure[from_index], jimage=image)[0]
)
dist = min(dists)
equiv_sites = self.structure.get_neighbors_in_shell(
self.structure[from_index].coords, dist, dist * 0.01, include_index=True
)
for nnsite in equiv_sites:
to_jimage = np.subtract(nnsite.frac_coords, self.structure[from_index].frac_coords)
to_jimage = np.round(to_jimage).astype(int)
self.add_edge(
from_index=from_index,
from_jimage=(0, 0, 0),
to_jimage=to_jimage,
to_index=nnsite.index,
)
return
# sanitize types
from_jimage, to_jimage = (
tuple(map(int, from_jimage)),
tuple(map(int, to_jimage)),
)
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between a given (site, jimage) pair and another
# (site, jimage) pair
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data:
for key, d in existing_edge_data.items():
if d["to_jimage"] == to_jimage:
if warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from "
"site {} to site {} in {}.".format(from_index, to_index, to_jimage)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, **edge_properties)
def insert_node(
self,
i,
species,
coords,
coords_are_cartesian=False,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param coords_are_cartesian: Whether coordinates are cartesian.
Defaults to False.
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.structure.insert(
i,
species,
coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.structure) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
from_jimage=(0, 0, 0),
to_jimage=edge["to_jimage"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Gives each node a "specie" and a "coords" attribute, updated with the
current species and coordinates.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.structure[node].specie.symbol
coords[node] = self.structure[node].coords
properties[node] = self.structure[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(
self,
from_index,
to_index,
to_jimage=None,
new_weight=None,
new_edge_properties=None,
):
"""
Alters either the weight or the edge_properties of
an edge in the StructureGraph.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edges = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edges:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
if to_jimage is None:
edge_index = 0
else:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
if new_weight is not None:
self.graph[from_index][to_index][edge_index]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):
"""
Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edges = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if to_jimage is None:
raise ValueError("Image must be supplied, to avoid ambiguity.")
if existing_edges:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(from_index, to_index, edge_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
for i, properties in existing_reverse.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(to_index, from_index, edge_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.structure.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Structure.substitute to replace an atom in self.structure
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: Care must be taken to ensure that the functional group that is
substituted will not place atoms to close to each other, or violate the
dimensions of the Lattice.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.structure) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.structure.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "to_jimage" in edge_props.keys():
to_jimage = edge_props["to_jimage"]
del edge_props["to_jimage"]
else:
# By default, assume that all edges should stay remain
# inside the initial image
to_jimage = (0, 0, 0)
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
to_jimage=to_jimage,
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
for site in mapping.values():
neighbors = strat.get_nn_info(self.structure, site)
for neighbor in neighbors:
self.add_edge(
from_index=site,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"],
warn_duplicates=False,
)
def get_connected_sites(self, n, jimage=(0, 0, 0)):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
connected_site_images = set()
out_edges = [(u, v, d, "out") for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d, "in") for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d, dir in out_edges + in_edges:
to_jimage = d["to_jimage"]
if dir == "in":
u, v = v, u
to_jimage = np.multiply(-1, to_jimage)
to_jimage = tuple(map(int, np.add(to_jimage, jimage)))
site_d = self.structure[v].as_dict()
site_d["abc"] = np.add(site_d["abc"], to_jimage).tolist()
site = PeriodicSite.from_dict(site_d)
# from_site if jimage arg != (0, 0, 0)
relative_jimage = np.subtract(to_jimage, jimage)
dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)
weight = d.get("weight", None)
if (v, to_jimage) not in connected_site_images:
connected_site = ConnectedSite(site=site, jimage=to_jimage, index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
connected_site_images.add((v, to_jimage))
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
to_image = d["to_jimage"]
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.nodes[u]["fillcolor"]
color_v = g.nodes[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
@property
def types_and_weights_of_connections(self):
"""
Extract a dictionary summarizing the types and weights
of edges in the graph.
:return: A dictionary with keys specifying the
species involved in a connection in alphabetical order
(e.g. string 'Fe-O') and values which are a list of
weights for those connections (e.g. bond lengths).
"""
def get_label(u, v):
u_label = self.structure[u].species_string
v_label = self.structure[v].species_string
return "-".join(sorted((u_label, v_label)))
types = defaultdict(list)
for u, v, d in self.graph.edges(data=True):
label = get_label(u, v)
types[label].append(d["weight"])
return dict(types)
@property
def weight_statistics(self):
"""
Extract a statistical summary of edge weights present in
the graph.
:return: A dict with an 'all_weights' list, 'minimum',
'maximum', 'median', 'mean', 'std_dev'
"""
all_weights = [d.get("weight", None) for u, v, d in self.graph.edges(data=True)]
stats = describe(all_weights, nan_policy="omit")
return {
"all_weights": all_weights,
"min": stats.minmax[0],
"max": stats.minmax[1],
"mean": stats.mean,
"variance": stats.variance,
}
def types_of_coordination_environments(self, anonymous=False):
"""
Extract information on the different co-ordination environments
present in the graph.
:param anonymous: if anonymous, will replace specie names
with A, B, C, etc.
:return: a list of co-ordination environments,
e.g. ['Mo-S(6)', 'S-Mo(3)']
"""
motifs = set()
for idx, site in enumerate(self.structure):
centre_sp = site.species_string
connected_sites = self.get_connected_sites(idx)
connected_species = [connected_site.site.species_string for connected_site in connected_sites]
labels = []
for sp in set(connected_species):
count = connected_species.count(sp)
labels.append((count, sp))
labels = sorted(labels, reverse=True)
if anonymous:
mapping = {centre_sp: "A"}
available_letters = [chr(66 + i) for i in range(25)]
for label in labels:
sp = label[1]
if sp not in mapping:
mapping[sp] = available_letters.pop(0)
centre_sp = "A"
labels = [(label[0], mapping[label[1]]) for label in labels]
labels = ["{}({})".format(label[1], label[0]) for label in labels]
motif = "{}-{}".format(centre_sp, ",".join(labels))
motifs.add(motif)
return sorted(list(motifs))
def as_dict(self):
"""
As in :Class: `pymatgen.core.Structure` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Structure` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
s = Structure.from_dict(d["structure"])
return cls(s, d["graphs"])
def __mul__(self, scaling_matrix):
"""
Replicates the graph, creating a supercell,
intelligently joining together
edges that lie on periodic boundaries.
In principle, any operations on the expanded
graph could also be done on the original
graph, but a larger graph can be easier to
visualize and reason about.
:param scaling_matrix: same as Structure.__mul__
:return:
"""
# Developer note: a different approach was also trialed, using
# a simple Graph (instead of MultiDiGraph), with node indices
# representing both site index and periodic image. Here, the
# number of nodes != number of sites in the Structure. This
# approach has many benefits, but made it more difficult to
# keep the graph in sync with its corresponding Structure.
# Broadly, it would be easier to multiply the Structure
# *before* generating the StructureGraph, but this isn't
# possible when generating the graph using critic2 from
# charge density.
# Multiplication works by looking for the expected position
# of an image node, and seeing if that node exists in the
# supercell. If it does, the edge is updated. This is more
# computationally expensive than just keeping track of the
# which new lattice images present, but should hopefully be
# easier to extend to a general 3x3 scaling matrix.
# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError("Not tested with 3x3 scaling matrices yet.")
new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
new_graphs = []
for v in c_lat:
# create a map of nodes from original graph to its image
mapping = {n: n + len(new_sites) for n in range(len(self.structure))}
for idx, site in enumerate(self.structure):
s = PeriodicSite(
site.species,
site.coords + v,
new_lattice,
properties=site.properties,
coords_are_cartesian=True,
to_unit_cell=False,
)
new_sites.append(s)
new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))
new_structure = Structure.from_sites(new_sites)
# merge all graphs into one big graph
new_g = nx.MultiDiGraph()
for new_graph in new_graphs:
new_g = nx.union(new_g, new_graph)
edges_to_remove = [] # tuple of (u, v, k)
edges_to_add = [] # tuple of (u, v, attr_dict)
# list of new edges inside supercell
# for duplicate checking
edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True) if d["to_jimage"] == (0, 0, 0)]
new_periodic_images = []
orig_lattice = self.structure.lattice
# use k-d tree to match given position to an
# existing Site in Structure
kd_tree = KDTree(new_structure.cart_coords)
# tolerance in Å for sites to be considered equal
# this could probably be a lot smaller
tol = 0.05
for u, v, k, d in new_g.edges(keys=True, data=True):
to_jimage = d["to_jimage"] # for node v
# reduce unnecessary checking
if to_jimage != (0, 0, 0):
# get index in original site
n_u = u % len(self.structure)
n_v = v % len(self.structure)
# get fractional co-ordinates of where atoms defined
# by edge are expected to be, relative to original
# lattice (keeping original lattice has
# significant benefits)
v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)
u_frac = self.structure[n_u].frac_coords
# using the position of node u as a reference,
# get relative Cartesian co-ordinates of where
# atoms defined by edge are expected to be
v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)
u_cart = orig_lattice.get_cartesian_coords(u_frac)
v_rel = np.subtract(v_image_cart, u_cart)
# now retrieve position of node v in
# new supercell, and get asgolute Cartesian
# co-ordinates of where atoms defined by edge
# are expected to be
v_expec = new_structure[u].coords + v_rel
# now search in new structure for these atoms
# query returns (distance, index)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
# check if image sites now present in supercell
# and if so, delete old edge that went through
# periodic boundary
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
# node now inside supercell
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
# make sure we don't try to add duplicate edges
# will remove two edges for everyone one we add
if {new_u, new_v} not in edges_inside_supercell:
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
edges_inside_supercell.append({new_u, new_v})
edges_to_add.append((new_u, new_v, new_d))
else:
# want to find new_v such that we have
# full periodic boundary conditions
# so that nodes on one side of supercell
# are connected to nodes on opposite side
v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)
# find new to_jimage
# use np.around to fix issues with finite precision leading to incorrect image
v_expec_image = np.around(v_expec_frac, decimals=3)
v_expec_image = v_expec_image - v_expec_image % 1
v_expec_frac = np.subtract(v_expec_frac, v_expec_image)
v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
new_to_jimage = tuple(map(int, v_expec_image))
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
new_to_jimage = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
new_d["to_jimage"] = new_to_jimage
edges_to_remove.append((u, v, k))
if (new_u, new_v, new_to_jimage) not in new_periodic_images:
edges_to_add.append((new_u, new_v, new_d))
new_periodic_images.append((new_u, new_v, new_to_jimage))
logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove), len(edges_to_add)))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
new_g.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
new_g.add_edge(u, v, **d)
# return new instance of StructureGraph with supercell
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": new_structure.as_dict(),
"graphs": json_graph.adjacency_data(new_g),
}
sg = StructureGraph.from_dict(d)
return sg
def __rmul__(self, other):
return self.__mul__(other)
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Structure / number of nodes in graph
"""
return len(self.structure)
def sort(self, key=None, reverse=False):
"""
Same as Structure.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_structure = self.structure.copy()
# sort Structure
self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)
# apply Structure ordering to graph
mapping = {idx: self.structure.index(site) for idx, site in enumerate(old_structure)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return StructureGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two StructureGraphs are equal if they have equal Structures,
and have the same edges between Sites. Edge weights can be
different and StructureGraphs can still be considered equal.
:param other: StructureGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.structure == other_sorted.structure)
def diff(self, other, strict=True):
"""
Compares two StructureGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one StructureGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if " "corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {
(str(self.structure[u].specie), str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, "_supercell_sg", None) is None:
self._supercell_sg = supercell_sg = self * (3, 3, 3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = [supercell_sg.graph.subgraph(c) for c in nx.connected_components(supercell_sg.graph)]
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any(d["to_jimage"] != (0, 0, 0) for u, v, d in subgraph.edges(data=True))
if not intersects_boundary:
molecule_subgraphs.append(nx.MultiDiGraph(subgraph))
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1["specie"] == n2["specie"]
def edge_match(e1, e2):
if use_weights:
return e1["weight"] == e2["weight"]
return True
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [
nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs
]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
class MolGraphSplitError(Exception):
"""
Raised when a molecule graph is failed to split into two disconnected
subgraphs
"""
pass
class MoleculeGraph(MSONable):
"""
This is a class for annotating a Molecule with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, molecule, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given molecule easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
:param molecule: Molecule object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(molecule, MoleculeGraph):
# just make a copy from input
graph_data = molecule.as_dict()["graphs"]
self.molecule = molecule
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
self.set_node_attributes()
@classmethod
def with_empty_graph(cls, molecule, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data)
@staticmethod
def with_edges(molecule, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. Props should be None if no
additional properties are to be specified.
:return: mg, a MoleculeGraph
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index)" "tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = mg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
mg.add_edge(from_index, to_index, weight=weight, edge_properties=props)
mg.set_node_attributes()
return mg
@staticmethod
def with_local_env_strategy(molecule, strategy):
"""
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:return: mg, a MoleculeGraph
"""
if not strategy.molecules_allowed:
raise ValueError(
"Chosen strategy is not designed for use with molecules! " "Please choose another strategy."
)
extend_structure = strategy.extend_structure_molecules
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
# NearNeighbor classes only (generally) work with structures
# molecules have to be boxed first
coords = molecule.cart_coords
if extend_structure:
a = max(coords[:, 0]) - min(coords[:, 0]) + 100
b = max(coords[:, 1]) - min(coords[:, 1]) + 100
c = max(coords[:, 2]) - min(coords[:, 2]) + 100
structure = molecule.get_boxed_structure(a, b, c, no_cross=True, reorder=False)
else:
structure = None
for n in range(len(molecule)):
if structure is None:
neighbors = strategy.get_nn_info(molecule, n)
else:
neighbors = strategy.get_nn_info(structure, n)
for neighbor in neighbors:
# all bonds in molecules should not cross
# (artificial) periodic boundaries
if not np.array_equal(neighbor["image"], [0, 0, 0]):
continue
if n > neighbor["site_index"]:
from_index = neighbor["site_index"]
to_index = n
else:
from_index = n
to_index = neighbor["site_index"]
mg.add_edge(
from_index=from_index,
to_index=to_index,
weight=neighbor["weight"],
warn_duplicates=False,
)
duplicates = []
for edge in mg.graph.edges:
if edge[2] != 0:
duplicates.append(edge)
for duplicate in duplicates:
mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2])
mg.set_node_attributes()
return mg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
# sanitize types
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between two sites
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data and warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, **edge_properties)
def insert_node(
self,
i,
species,
coords,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.molecule.insert(
i,
species,
coords,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.molecule) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Replicates molecule site properties (specie, coords, etc.) in the
MoleculeGraph.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.molecule[node].specie.symbol
coords[node] = self.molecule[node].coords
properties[node] = self.molecule[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(self, from_index, to_index, new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edge = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edge:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
# Third index should always be 0 because there should only be one edge between any two nodes
if new_weight is not None:
self.graph[from_index][to_index][0]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, allow_reverse=False):
"""
Remove an edge from the MoleculeGraph
:param from_index: int
:param to_index: int
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edge = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if existing_edge:
self.graph.remove_edge(from_index, to_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
self.graph.remove_edge(to_index, from_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.molecule.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def get_disconnected_fragments(self):
"""
Determine if the MoleculeGraph is connected. If it is not, separate the
MoleculeGraph into different MoleculeGraphs, where each resulting
MoleculeGraph is a disconnected subgraph of the original.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:return: list of MoleculeGraphs
"""
if nx.is_weakly_connected(self.graph):
return [copy.deepcopy(self)]
original = copy.deepcopy(self)
sub_mols = list()
# Had to use nx.weakly_connected_components because of deprecation
# of nx.weakly_connected_component_subgraphs
subgraphs = [original.graph.subgraph(c) for c in nx.weakly_connected_components(original.graph)]
for subg in subgraphs:
nodes = sorted(list(subg.nodes))
# Molecule indices are essentially list-based, so node indices
# must be remapped, incrementing from 0
mapping = {}
for i, n in enumerate(nodes):
mapping[n] = i
# just give charge to whatever subgraph has node with index 0
# TODO: actually figure out how to distribute charge
if 0 in nodes:
charge = self.molecule.charge
else:
charge = 0
# relabel nodes in graph to match mapping
new_graph = nx.relabel_nodes(subg, mapping)
species = nx.get_node_attributes(new_graph, "specie")
coords = nx.get_node_attributes(new_graph, "coords")
raw_props = nx.get_node_attributes(new_graph, "properties")
properties = {}
for prop_set in raw_props.values():
for prop in prop_set.keys():
if prop in properties:
properties[prop].append(prop_set[prop])
else:
properties[prop] = [prop_set[prop]]
# Site properties must be present for all atoms in the molecule
# in order to be used for Molecule instantiation
for k, v in properties.items():
if len(v) != len(species):
del properties[k]
new_mol = Molecule(species, coords, charge=charge, site_properties=properties)
graph_data = json_graph.adjacency_data(new_graph)
# create new MoleculeGraph
sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))
return sub_mols
def split_molecule_subgraphs(self, bonds, allow_reverse=False, alterations=None):
"""
Split MoleculeGraph into two or more MoleculeGraphs by
breaking a set of bonds. This function uses
MoleculeGraph.break_edge repeatedly to create
disjoint graphs (two or more separate molecules).
This function does not only alter the graph
information, but also changes the underlying
Molecules.
If the bonds parameter does not include sufficient
bonds to separate two molecule fragments, then this
function will fail.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:param bonds: list of tuples (from_index, to_index)
representing bonds to be broken to split the MoleculeGraph.
:param alterations: a dict {(from_index, to_index): alt},
where alt is a dictionary including weight and/or edge
properties to be changed following the split.
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: list of MoleculeGraphs
"""
self.set_node_attributes()
original = copy.deepcopy(self)
for bond in bonds:
original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)
if nx.is_weakly_connected(original.graph):
raise MolGraphSplitError(
"Cannot split molecule; \
MoleculeGraph is still connected."
)
# alter any bonds before partition, to avoid remapping
if alterations is not None:
for (u, v) in alterations.keys():
if "weight" in alterations[(u, v)]:
weight = alterations[(u, v)]["weight"]
del alterations[(u, v)]["weight"]
edge_properties = alterations[(u, v)] if len(alterations[(u, v)]) != 0 else None
original.alter_edge(u, v, new_weight=weight, new_edge_properties=edge_properties)
else:
original.alter_edge(u, v, new_edge_properties=alterations[(u, v)])
return original.get_disconnected_fragments()
def build_unique_fragments(self):
"""
Find all possible fragment combinations of the MoleculeGraphs (in other
words, all connected induced subgraphs)
:return:
"""
self.set_node_attributes()
graph = self.graph.to_undirected()
# find all possible fragments, aka connected induced subgraphs
frag_dict = {}
for ii in range(1, len(self.molecule)):
for combination in combinations(graph.nodes, ii):
mycomp = []
for idx in combination:
mycomp.append(str(self.molecule[idx].specie))
mycomp = "".join(sorted(mycomp))
subgraph = nx.subgraph(graph, combination)
if nx.is_connected(subgraph):
mykey = mycomp + str(len(subgraph.edges()))
if mykey not in frag_dict:
frag_dict[mykey] = [copy.deepcopy(subgraph)]
else:
frag_dict[mykey].append(copy.deepcopy(subgraph))
# narrow to all unique fragments using graph isomorphism
unique_frag_dict = {}
for key in frag_dict:
unique_frags = []
for frag in frag_dict[key]:
found = False
for f in unique_frags:
if _isomorphic(frag, f):
found = True
break
if not found:
unique_frags.append(frag)
unique_frag_dict[key] = copy.deepcopy(unique_frags)
# convert back to molecule graphs
unique_mol_graph_dict = {}
for key in unique_frag_dict:
unique_mol_graph_list = []
for fragment in unique_frag_dict[key]:
mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))}
remapped = nx.relabel_nodes(fragment, mapping)
species = nx.get_node_attributes(remapped, "specie")
coords = nx.get_node_attributes(remapped, "coords")
edges = {}
for from_index, to_index, key in remapped.edges:
edge_props = fragment.get_edge_data(from_index, to_index, key=key)
edges[(from_index, to_index)] = edge_props
unique_mol_graph_list.append(
self.with_edges(
Molecule(species=species, coords=coords, charge=self.molecule.charge),
edges,
)
)
frag_key = (
str(unique_mol_graph_list[0].molecule.composition.alphabetical_formula)
+ " E"
+ str(len(unique_mol_graph_list[0].graph.edges()))
)
unique_mol_graph_dict[frag_key] = copy.deepcopy(unique_mol_graph_list)
return unique_mol_graph_dict
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute to replace an atom in self.molecule
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: using a MoleculeGraph will generally produce a different graph
compared with using a Molecule or str (when not using graph_dict).
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.molecule) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
# Work is simplified if a graph is already in place
if isinstance(func_grp, MoleculeGraph):
self.molecule.substitute(index, func_grp.molecule, bond_order=bond_order)
mapping = map_indices(func_grp.molecule)
for (u, v) in list(func_grp.graph.edges()):
edge_props = func_grp.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props)
else:
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.molecule.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
graph = self.with_local_env_strategy(func_grp, strat)
for (u, v) in list(graph.graph.edges()):
edge_props = graph.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
if 0 not in list(graph.graph.nodes()):
# If graph indices have different indexing
u, v = (u - 1), (v - 1)
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
def replace_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute and MoleculeGraph.substitute_group
to replace a functional group in self.molecule with a functional group.
This method also amends self.graph to incorporate the new functional
group.
TODO: Figure out how to replace into a ring structure.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
self.set_node_attributes()
neighbors = self.get_connected_sites(index)
# If the atom at index is terminal
if len(neighbors) == 1:
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
else:
rings = self.find_rings(including=[index])
if len(rings) != 0:
raise RuntimeError(
"Currently functional group replacement" "cannot occur at an atom within a ring" "structure."
)
to_remove = set()
sizes = dict()
disconnected = self.graph.to_undirected()
disconnected.remove_node(index)
for neighbor in neighbors:
sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))
keep = max(sizes, key=lambda x: sizes[x])
for i in sizes.keys():
if i != keep:
to_remove.add(i)
self.remove_nodes(list(to_remove))
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i - 1], e))
cycles_edges.append(edges)
return cycles_edges
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = list(self.graph.out_edges(n, data=True))
in_edges = list(self.graph.in_edges(n, data=True))
for u, v, d in out_edges + in_edges:
weight = d.get("weight", None)
if v == n:
site = self.molecule[u]
dist = self.molecule[v].distance(self.molecule[u])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=u, weight=weight, dist=dist)
else:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
if "to_image" in d:
to_image = d["to_jimage"]
else:
to_image = (0, 0, 0)
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.node[u]["fillcolor"]
color_v = g.node[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Molecule` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
m = Molecule.from_dict(d["molecule"])
return cls(m, d["graphs"])
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Molecule / number of nodes in graph
"""
return len(self.molecule)
def sort(self, key=None, reverse=False):
"""
Same as Molecule.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_molecule = self.molecule.copy()
# sort Molecule
self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)
# apply Molecule ordering to graph
mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return MoleculeGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two MoleculeGraphs are equal if they have equal Molecules,
and have the same edges between Sites. Edge weights can be
different and MoleculeGraphs can still be considered equal.
:param other: MoleculeGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
try:
mapping = {tuple(site.coords): self.molecule.index(site) for site in other.molecule}
except ValueError:
return False
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])
edges = {(u, v) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.molecule == other_sorted.molecule)
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
:param other: MoleculeGraph object to be compared.
:return: bool
"""
if len(self.molecule) != len(other.molecule):
return False
if self.molecule.composition.alphabetical_formula != other.molecule.composition.alphabetical_formula:
return False
if len(self.graph.edges()) != len(other.graph.edges()):
return False
return _isomorphic(self.graph, other.graph)
def diff(self, other, strict=True):
"""
Compares two MoleculeGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one MoleculeGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two MoleculeGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the MoleculeGraph this method is called
from, not the 'other' MoleculeGraph: there
is no guarantee the node indices will be the
same if the underlying Molecules are ordered
differently.
:param other: MoleculeGraph
:param strict: if False, will compare bonds
from different Molecules, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.molecule != other.molecule and strict:
return ValueError("Meaningless to compare MoleculeGraphs if " "corresponding Molecules are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.molecule.index(site) for site in other.molecule}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {
(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in other_sorted.graph.edges(keys=False, data=True)
}
else:
edges = {
(str(self.molecule[u].specie), str(self.molecule[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
|
[
"logging.getLogger",
"networkx.readwrite.json_graph.adjacency_graph",
"pymatgen.util.coord.lattice_points_in_supercell",
"scipy.spatial.KDTree",
"pymatgen.core.PeriodicSite",
"numpy.array",
"networkx.weakly_connected_components",
"copy.deepcopy",
"operator.itemgetter",
"igraph.Graph",
"networkx.relabel_nodes",
"numpy.multiply",
"networkx.algorithms.isomorphism.categorical_node_match",
"subprocess.Popen",
"networkx.is_connected",
"numpy.subtract",
"networkx.simple_cycles",
"numpy.dot",
"networkx.union",
"warnings.warn",
"networkx.is_weakly_connected",
"pymatgen.core.Molecule",
"numpy.round",
"networkx.readwrite.json_graph.adjacency_data",
"networkx.MultiDiGraph",
"collections.namedtuple",
"numpy.eye",
"numpy.add",
"pymatgen.core.PeriodicSite.from_dict",
"networkx.connected_components",
"pymatgen.core.Molecule.from_dict",
"networkx.subgraph",
"pymatgen.core.Structure.from_dict",
"numpy.around",
"networkx.descendants",
"networkx.is_isomorphic",
"monty.os.path.which",
"scipy.stats.describe",
"networkx.Graph",
"itertools.combinations",
"collections.defaultdict",
"networkx.set_node_attributes",
"numpy.array_equal",
"networkx.get_node_attributes",
"networkx.drawing.nx_agraph.write_dot",
"pymatgen.core.Structure.from_sites"
] |
[((1010, 1037), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1027, 1037), False, 'import logging\n'), ((1243, 1307), 'collections.namedtuple', 'namedtuple', (['"""ConnectedSite"""', '"""site, jimage, index, weight, dist"""'], {}), "('ConnectedSite', 'site, jimage, index, weight, dist')\n", (1253, 1307), False, 'from collections import defaultdict, namedtuple\n'), ((1684, 1698), 'igraph.Graph', 'igraph.Graph', ([], {}), '()\n', (1696, 1698), False, 'import igraph\n'), ((3004, 3049), 'networkx.algorithms.isomorphism.categorical_node_match', 'iso.categorical_node_match', (['"""specie"""', '"""ERROR"""'], {}), "('specie', 'ERROR')\n", (3030, 3049), True, 'import networkx.algorithms.isomorphism as iso\n'), ((4776, 4827), 'networkx.readwrite.json_graph.adjacency_graph', 'nx.readwrite.json_graph.adjacency_graph', (['graph_data'], {}), '(graph_data)\n', (4815, 4827), True, 'import networkx as nx\n'), ((6565, 6668), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {'edge_weight_name': 'edge_weight_name', 'edge_weight_units': 'edge_weight_units', 'name': 'name'}), '(edge_weight_name=edge_weight_name, edge_weight_units=\n edge_weight_units, name=name)\n', (6580, 6668), True, 'import networkx as nx\n'), ((6785, 6817), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['graph'], {}), '(graph)\n', (6810, 6817), False, 'from networkx.readwrite import json_graph\n'), ((17318, 17367), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(False)'}), '(self.graph, mapping, copy=False)\n', (17334, 17367), True, 'import networkx as nx\n'), ((18480, 18533), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'species', '"""specie"""'], {}), "(self.graph, species, 'specie')\n", (18502, 18533), True, 'import networkx as nx\n'), ((18542, 18594), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'coords', '"""coords"""'], {}), "(self.graph, coords, 'coords')\n", (18564, 18594), True, 'import networkx as nx\n'), ((18603, 18663), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'properties', '"""properties"""'], {}), "(self.graph, properties, 'properties')\n", (18625, 18663), True, 'import networkx as nx\n'), ((22658, 22707), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(False)'}), '(self.graph, mapping, copy=False)\n', (22674, 22707), True, 'import networkx as nx\n'), ((36080, 36111), 'networkx.drawing.nx_agraph.write_dot', 'write_dot', (['g', "(basename + '.dot')"], {}), "(g, basename + '.dot')\n", (36089, 36111), False, 'from networkx.drawing.nx_agraph import write_dot\n'), ((37176, 37193), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (37187, 37193), False, 'from collections import defaultdict, namedtuple\n'), ((37732, 37772), 'scipy.stats.describe', 'describe', (['all_weights'], {'nan_policy': '"""omit"""'}), "(all_weights, nan_policy='omit')\n", (37740, 37772), False, 'from scipy.stats import describe\n'), ((40196, 40231), 'pymatgen.core.Structure.from_dict', 'Structure.from_dict', (["d['structure']"], {}), "(d['structure'])\n", (40215, 40231), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((41833, 41867), 'numpy.array', 'np.array', (['scaling_matrix', 'np.int16'], {}), '(scaling_matrix, np.int16)\n', (41841, 41867), True, 'import numpy as np\n'), ((42242, 42283), 'pymatgen.util.coord.lattice_points_in_supercell', 'lattice_points_in_supercell', (['scale_matrix'], {}), '(scale_matrix)\n', (42269, 42283), False, 'from pymatgen.util.coord import lattice_points_in_supercell\n'), ((43056, 43087), 'pymatgen.core.Structure.from_sites', 'Structure.from_sites', (['new_sites'], {}), '(new_sites)\n', (43076, 43087), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((43151, 43168), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (43166, 43168), True, 'import networkx as nx\n'), ((43740, 43773), 'scipy.spatial.KDTree', 'KDTree', (['new_structure.cart_coords'], {}), '(new_structure.cart_coords)\n', (43746, 43773), False, 'from scipy.spatial import KDTree\n'), ((51317, 51365), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(True)'}), '(self.graph, mapping, copy=True)\n', (51333, 51365), True, 'import networkx as nx\n'), ((56983, 57011), 'networkx.Graph', 'nx.Graph', (['supercell_sg.graph'], {}), '(supercell_sg.graph)\n', (56991, 57011), True, 'import networkx as nx\n'), ((60558, 60609), 'networkx.readwrite.json_graph.adjacency_graph', 'nx.readwrite.json_graph.adjacency_graph', (['graph_data'], {}), '(graph_data)\n', (60597, 60609), True, 'import networkx as nx\n'), ((62376, 62479), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {'edge_weight_name': 'edge_weight_name', 'edge_weight_units': 'edge_weight_units', 'name': 'name'}), '(edge_weight_name=edge_weight_name, edge_weight_units=\n edge_weight_units, name=name)\n', (62391, 62479), True, 'import networkx as nx\n'), ((62595, 62627), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['graph'], {}), '(graph)\n', (62620, 62627), False, 'from networkx.readwrite import json_graph\n'), ((70932, 70981), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(False)'}), '(self.graph, mapping, copy=False)\n', (70948, 70981), True, 'import networkx as nx\n'), ((71969, 72022), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'species', '"""specie"""'], {}), "(self.graph, species, 'specie')\n", (71991, 72022), True, 'import networkx as nx\n'), ((72031, 72083), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'coords', '"""coords"""'], {}), "(self.graph, coords, 'coords')\n", (72053, 72083), True, 'import networkx as nx\n'), ((72092, 72152), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'properties', '"""properties"""'], {}), "(self.graph, properties, 'properties')\n", (72114, 72152), True, 'import networkx as nx\n'), ((75360, 75409), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(False)'}), '(self.graph, mapping, copy=False)\n', (75376, 75409), True, 'import networkx as nx\n'), ((76149, 76183), 'networkx.is_weakly_connected', 'nx.is_weakly_connected', (['self.graph'], {}), '(self.graph)\n', (76171, 76183), True, 'import networkx as nx\n'), ((76246, 76265), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (76259, 76265), False, 'import copy\n'), ((79878, 79897), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (79891, 79897), False, 'import copy\n'), ((80017, 80055), 'networkx.is_weakly_connected', 'nx.is_weakly_connected', (['original.graph'], {}), '(original.graph)\n', (80039, 80055), True, 'import networkx as nx\n'), ((102849, 102880), 'networkx.drawing.nx_agraph.write_dot', 'write_dot', (['g', "(basename + '.dot')"], {}), "(g, basename + '.dot')\n", (102858, 102880), False, 'from networkx.drawing.nx_agraph import write_dot\n'), ((103980, 104013), 'pymatgen.core.Molecule.from_dict', 'Molecule.from_dict', (["d['molecule']"], {}), "(d['molecule'])\n", (103998, 104013), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((106378, 106426), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(True)'}), '(self.graph, mapping, copy=True)\n', (106394, 106426), True, 'import networkx as nx\n'), ((12553, 12591), 'numpy.array_equal', 'np.array_equal', (['from_jimage', '(0, 0, 0)'], {}), '(from_jimage, (0, 0, 0))\n', (12567, 12591), True, 'import numpy as np\n'), ((12651, 12682), 'numpy.subtract', 'np.subtract', (['from_jimage', 'shift'], {}), '(from_jimage, shift)\n', (12662, 12682), True, 'import numpy as np\n'), ((12707, 12736), 'numpy.subtract', 'np.subtract', (['to_jimage', 'shift'], {}), '(to_jimage, shift)\n', (12718, 12736), True, 'import numpy as np\n'), ((12993, 13095), 'warnings.warn', 'warnings.warn', (['"""Please specify to_jimage to be unambiguous, trying to automatically detect."""'], {}), "(\n 'Please specify to_jimage to be unambiguous, trying to automatically detect.'\n )\n", (13006, 13095), False, 'import warnings\n'), ((25401, 25424), 'copy.deepcopy', 'copy.deepcopy', (['func_grp'], {}), '(func_grp)\n', (25414, 25424), False, 'import copy\n'), ((28499, 28529), 'pymatgen.core.PeriodicSite.from_dict', 'PeriodicSite.from_dict', (['site_d'], {}), '(site_d)\n', (28521, 28529), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((28612, 28642), 'numpy.subtract', 'np.subtract', (['to_jimage', 'jimage'], {}), '(to_jimage, jimage)\n', (28623, 28642), True, 'import numpy as np\n'), ((31781, 31792), 'monty.os.path.which', 'which', (['algo'], {}), '(algo)\n', (31786, 31792), False, 'from monty.os.path import which\n'), ((36232, 36303), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'f', 'stdin': 'subprocess.PIPE', 'close_fds': '(True)'}), '(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)\n', (36248, 36303), False, 'import subprocess\n'), ((39889, 39926), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['self.graph'], {}), '(self.graph)\n', (39914, 39926), False, 'from networkx.readwrite import json_graph\n'), ((42172, 42223), 'numpy.dot', 'np.dot', (['scale_matrix', 'self.structure.lattice.matrix'], {}), '(scale_matrix, self.structure.lattice.matrix)\n', (42178, 42223), True, 'import numpy as np\n'), ((43226, 43252), 'networkx.union', 'nx.union', (['new_g', 'new_graph'], {}), '(new_g, new_graph)\n', (43234, 43252), True, 'import networkx as nx\n'), ((48799, 48831), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['new_g'], {}), '(new_g)\n', (48824, 48831), False, 'from networkx.readwrite import json_graph\n'), ((58727, 58752), 'pymatgen.core.Molecule', 'Molecule', (['species', 'coords'], {}), '(species, coords)\n', (58735, 58752), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((77167, 77198), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['subg', 'mapping'], {}), '(subg, mapping)\n', (77183, 77198), True, 'import networkx as nx\n'), ((77222, 77265), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['new_graph', '"""specie"""'], {}), "(new_graph, 'specie')\n", (77244, 77265), True, 'import networkx as nx\n'), ((77287, 77330), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['new_graph', '"""coords"""'], {}), "(new_graph, 'coords')\n", (77309, 77330), True, 'import networkx as nx\n'), ((77355, 77402), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['new_graph', '"""properties"""'], {}), "(new_graph, 'properties')\n", (77377, 77402), True, 'import networkx as nx\n'), ((78004, 78072), 'pymatgen.core.Molecule', 'Molecule', (['species', 'coords'], {'charge': 'charge', 'site_properties': 'properties'}), '(species, coords, charge=charge, site_properties=properties)\n', (78012, 78072), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((78098, 78134), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['new_graph'], {}), '(new_graph)\n', (78123, 78134), False, 'from networkx.readwrite import json_graph\n'), ((81357, 81386), 'itertools.combinations', 'combinations', (['graph.nodes', 'ii'], {}), '(graph.nodes, ii)\n', (81369, 81386), False, 'from itertools import combinations\n'), ((82444, 82471), 'copy.deepcopy', 'copy.deepcopy', (['unique_frags'], {}), '(unique_frags)\n', (82457, 82471), False, 'import copy\n'), ((83716, 83752), 'copy.deepcopy', 'copy.deepcopy', (['unique_mol_graph_list'], {}), '(unique_mol_graph_list)\n', (83729, 83752), False, 'import copy\n'), ((98463, 98474), 'monty.os.path.which', 'which', (['algo'], {}), '(algo)\n', (98468, 98474), False, 'from monty.os.path import which\n'), ((103001, 103072), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'f', 'stdin': 'subprocess.PIPE', 'close_fds': '(True)'}), '(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)\n', (103017, 103072), False, 'import subprocess\n'), ((103674, 103711), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['self.graph'], {}), '(self.graph)\n', (103699, 103711), False, 'from networkx.readwrite import json_graph\n'), ((13904, 13975), 'numpy.subtract', 'np.subtract', (['nnsite.frac_coords', 'self.structure[from_index].frac_coords'], {}), '(nnsite.frac_coords, self.structure[from_index].frac_coords)\n', (13915, 13975), True, 'import numpy as np\n'), ((25483, 25524), 'copy.deepcopy', 'copy.deepcopy', (['FunctionalGroups[func_grp]'], {}), '(FunctionalGroups[func_grp])\n', (25496, 25524), False, 'import copy\n'), ((28266, 28292), 'numpy.multiply', 'np.multiply', (['(-1)', 'to_jimage'], {}), '(-1, to_jimage)\n', (28277, 28292), True, 'import numpy as np\n'), ((42642, 42778), 'pymatgen.core.PeriodicSite', 'PeriodicSite', (['site.species', '(site.coords + v)', 'new_lattice'], {'properties': 'site.properties', 'coords_are_cartesian': '(True)', 'to_unit_cell': '(False)'}), '(site.species, site.coords + v, new_lattice, properties=site.\n properties, coords_are_cartesian=True, to_unit_cell=False)\n', (42654, 42778), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((42981, 43029), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(True)'}), '(self.graph, mapping, copy=True)\n', (42997, 43029), True, 'import networkx as nx\n'), ((44499, 44549), 'numpy.add', 'np.add', (['self.structure[n_v].frac_coords', 'to_jimage'], {}), '(self.structure[n_v].frac_coords, to_jimage)\n', (44505, 44549), True, 'import numpy as np\n'), ((44963, 44996), 'numpy.subtract', 'np.subtract', (['v_image_cart', 'u_cart'], {}), '(v_image_cart, u_cart)\n', (44974, 44996), True, 'import numpy as np\n'), ((49782, 49798), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)'], {}), '(0, 1)\n', (49792, 49798), False, 'from operator import itemgetter\n'), ((57103, 57146), 'networkx.connected_components', 'nx.connected_components', (['supercell_sg.graph'], {}), '(supercell_sg.graph)\n', (57126, 57146), True, 'import networkx as nx\n'), ((58215, 58290), 'networkx.is_isomorphic', 'nx.is_isomorphic', (['subgraph', 'g'], {'node_match': 'node_match', 'edge_match': 'edge_match'}), '(subgraph, g, node_match=node_match, edge_match=edge_match)\n', (58231, 58290), True, 'import networkx as nx\n'), ((76205, 76224), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (76218, 76224), False, 'import copy\n'), ((76478, 76524), 'networkx.weakly_connected_components', 'nx.weakly_connected_components', (['original.graph'], {}), '(original.graph)\n', (76508, 76524), True, 'import networkx as nx\n'), ((81598, 81629), 'networkx.subgraph', 'nx.subgraph', (['graph', 'combination'], {}), '(graph, combination)\n', (81609, 81629), True, 'import networkx as nx\n'), ((81649, 81674), 'networkx.is_connected', 'nx.is_connected', (['subgraph'], {}), '(subgraph)\n', (81664, 81674), True, 'import networkx as nx\n'), ((82783, 82818), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['fragment', 'mapping'], {}), '(fragment, mapping)\n', (82799, 82818), True, 'import networkx as nx\n'), ((82846, 82888), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['remapped', '"""specie"""'], {}), "(remapped, 'specie')\n", (82868, 82888), True, 'import networkx as nx\n'), ((82914, 82956), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['remapped', '"""coords"""'], {}), "(remapped, 'coords')\n", (82936, 82956), True, 'import networkx as nx\n'), ((87142, 87165), 'copy.deepcopy', 'copy.deepcopy', (['func_grp'], {}), '(func_grp)\n', (87155, 87165), False, 'import copy\n'), ((93607, 93633), 'networkx.simple_cycles', 'nx.simple_cycles', (['directed'], {}), '(directed)\n', (93623, 93633), True, 'import networkx as nx\n'), ((104860, 104876), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)'], {}), '(0, 1)\n', (104870, 104876), False, 'from operator import itemgetter\n'), ((28333, 28358), 'numpy.add', 'np.add', (['to_jimage', 'jimage'], {}), '(to_jimage, jimage)\n', (28339, 28358), True, 'import numpy as np\n'), ((28438, 28470), 'numpy.add', 'np.add', (["site_d['abc']", 'to_jimage'], {}), "(site_d['abc'], to_jimage)\n", (28444, 28470), True, 'import numpy as np\n'), ((41960, 41969), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (41966, 41969), True, 'import numpy as np\n'), ((46990, 47025), 'numpy.around', 'np.around', (['v_expec_frac'], {'decimals': '(3)'}), '(v_expec_frac, decimals=3)\n', (46999, 47025), True, 'import numpy as np\n'), ((47132, 47172), 'numpy.subtract', 'np.subtract', (['v_expec_frac', 'v_expec_image'], {}), '(v_expec_frac, v_expec_image)\n', (47143, 47172), True, 'import numpy as np\n'), ((57531, 57556), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', (['subgraph'], {}), '(subgraph)\n', (57546, 57556), True, 'import networkx as nx\n'), ((66010, 66054), 'numpy.array_equal', 'np.array_equal', (["neighbor['image']", '[0, 0, 0]'], {}), "(neighbor['image'], [0, 0, 0])\n", (66024, 66054), True, 'import numpy as np\n'), ((87236, 87277), 'copy.deepcopy', 'copy.deepcopy', (['FunctionalGroups[func_grp]'], {}), '(FunctionalGroups[func_grp])\n', (87249, 87277), False, 'import copy\n'), ((92180, 92221), 'networkx.descendants', 'nx.descendants', (['disconnected', 'neighbor[2]'], {}), '(disconnected, neighbor[2])\n', (92194, 92221), True, 'import networkx as nx\n'), ((14004, 14023), 'numpy.round', 'np.round', (['to_jimage'], {}), '(to_jimage)\n', (14012, 14023), True, 'import numpy as np\n'), ((83310, 83379), 'pymatgen.core.Molecule', 'Molecule', ([], {'species': 'species', 'coords': 'coords', 'charge': 'self.molecule.charge'}), '(species=species, coords=coords, charge=self.molecule.charge)\n', (83318, 83379), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((51646, 51677), 'numpy.multiply', 'np.multiply', (['(-1)', "d['to_jimage']"], {}), "(-1, d['to_jimage'])\n", (51657, 51677), True, 'import numpy as np\n'), ((81831, 81854), 'copy.deepcopy', 'copy.deepcopy', (['subgraph'], {}), '(subgraph)\n', (81844, 81854), False, 'import copy\n'), ((81930, 81953), 'copy.deepcopy', 'copy.deepcopy', (['subgraph'], {}), '(subgraph)\n', (81943, 81953), False, 'import copy\n'), ((47824, 47855), 'numpy.multiply', 'np.multiply', (['(-1)', "d['to_jimage']"], {}), "(-1, d['to_jimage'])\n", (47835, 47855), True, 'import numpy as np\n')]
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module define a WulffShape class to generate the Wulff shape from
a lattice, a list of indices and their corresponding surface energies,
and the total area and volume of the wulff shape,the weighted surface energy,
the anisotropy and shape_factor can also be calculated.
In support of plotting from a given view in terms of miller index.
The lattice is from the conventional unit cell, and (hkil) for hexagonal
lattices.
If you use this code extensively, consider citing the following:
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.
(2016). Surface energies of elemental crystals. Scientific Data.
"""
from pymatgen.core.structure import Structure
from pymatgen.util.coord import get_angle
import numpy as np
import scipy as sp
from scipy.spatial import ConvexHull
import logging
import warnings
__author__ = '<NAME>, <NAME>, <NAME>'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = 'May 5 2016'
logger = logging.getLogger(__name__)
def hkl_tuple_to_str(hkl):
"""
Prepare for display on plots
"(hkl)" for surfaces
Agrs:
hkl: in the form of [h, k, l] or (h, k, l)
"""
str_format = '($'
for x in hkl:
if x < 0:
str_format += '\\overline{' + str(-x) + '}'
else:
str_format += str(x)
str_format += '$)'
return str_format
def get_tri_area(pts):
"""
Given a list of coords for 3 points,
Compute the area of this triangle.
Args:
pts: [a, b, c] three points
"""
a, b, c = pts[0], pts[1], pts[2]
v1 = np.array(b) - np.array(a)
v2 = np.array(c) - np.array(a)
area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)
return area_tri
class WulffFacet:
"""
Helper container for each Wulff plane.
"""
def __init__(self, normal, e_surf, normal_pt, dual_pt, index, m_ind_orig,
miller):
"""
:param normal:
:param e_surf:
:param normal_pt:
:param dual_pt:
:param index:
:param m_ind_orig:
:param miller:
"""
self.normal = normal
self.e_surf = e_surf
self.normal_pt = normal_pt
self.dual_pt = dual_pt
self.index = index
self.m_ind_orig = m_ind_orig
self.miller = miller
self.points = []
self.outer_lines = []
class WulffShape:
"""
Generate Wulff Shape from list of miller index and surface energies,
with given conventional unit cell.
surface energy (Jm^2) is the length of normal.
Wulff shape is the convex hull.
Based on:
http://scipy.github.io/devdocs/generated/scipy.spatial.ConvexHull.html
Process:
1. get wulff simplices
2. label with color
3. get wulff_area and other properties
.. attribute:: debug (bool)
.. attribute:: alpha
transparency
.. attribute:: color_set
.. attribute:: grid_off (bool)
.. attribute:: axis_off (bool)
.. attribute:: show_area
.. attribute:: off_color
color of facets off wulff
.. attribute:: structure
Structure object, input conventional unit cell (with H ) from lattice
.. attribute:: miller_list
list of input miller index, for hcp in the form of hkil
.. attribute:: hkl_list
modify hkill to hkl, in the same order with input_miller
.. attribute:: e_surf_list
list of input surface energies, in the same order with input_miller
.. attribute:: lattice
Lattice object, the input lattice for the conventional unit cell
.. attribute:: facets
[WulffFacet] for all facets considering symm
.. attribute:: dual_cv_simp
simplices from the dual convex hull (dual_pt)
.. attribute:: wulff_pt_list
.. attribute:: wulff_cv_simp
simplices from the convex hull of wulff_pt_list
.. attribute:: on_wulff
list for all input_miller, True is on wulff.
.. attribute:: color_area
list for all input_miller, total area on wulff, off_wulff = 0.
.. attribute:: miller_area
($hkl$): area for all input_miller
"""
def __init__(self, lattice, miller_list, e_surf_list, symprec=1e-5):
"""
Args:
lattice: Lattice object of the conventional unit cell
miller_list ([(hkl), ...]: list of hkl or hkil for hcp
e_surf_list ([float]): list of corresponding surface energies
symprec (float): for recp_operation, default is 1e-5.
"""
if any([se < 0 for se in e_surf_list]):
warnings.warn("Unphysical (negative) surface energy detected.")
self.color_ind = list(range(len(miller_list)))
self.input_miller_fig = [hkl_tuple_to_str(x) for x in miller_list]
# store input data
self.structure = Structure(lattice, ["H"], [[0, 0, 0]])
self.miller_list = tuple([tuple(x) for x in miller_list])
self.hkl_list = tuple([(x[0], x[1], x[-1]) for x in miller_list])
self.e_surf_list = tuple(e_surf_list)
self.lattice = lattice
self.symprec = symprec
# 2. get all the data for wulff construction
# get all the surface normal from get_all_miller_e()
self.facets = self._get_all_miller_e()
logger.debug(len(self.facets))
# 3. consider the dual condition
dual_pts = [x.dual_pt for x in self.facets]
dual_convex = ConvexHull(dual_pts)
dual_cv_simp = dual_convex.simplices
# simplices (ndarray of ints, shape (nfacet, ndim))
# list of [i, j, k] , ndim = 3
# i, j, k: ind for normal_e_m
# recalculate the dual of dual, get the wulff shape.
# conner <-> surface
# get cross point from the simplices of the dual convex hull
wulff_pt_list = [self._get_cross_pt_dual_simp(dual_simp)
for dual_simp in dual_cv_simp]
wulff_convex = ConvexHull(wulff_pt_list)
wulff_cv_simp = wulff_convex.simplices
logger.debug(", ".join([str(len(x)) for x in wulff_cv_simp]))
# store simplices and convex
self.dual_cv_simp = dual_cv_simp
self.wulff_pt_list = wulff_pt_list
self.wulff_cv_simp = wulff_cv_simp
self.wulff_convex = wulff_convex
self.on_wulff, self.color_area = self._get_simpx_plane()
miller_area = []
for m, in_mill_fig in enumerate(self.input_miller_fig):
miller_area.append(
in_mill_fig + ' : ' + str(round(self.color_area[m], 4)))
self.miller_area = miller_area
def _get_all_miller_e(self):
"""
from self:
get miller_list(unique_miller), e_surf_list and symmetry
operations(symmops) according to lattice
apply symmops to get all the miller index, then get normal,
get all the facets functions for wulff shape calculation:
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
normal[0]x + normal[1]y + normal[2]z = e_surf
return:
[WulffFacet]
"""
all_hkl = []
color_ind = self.color_ind
planes = []
recp = self.structure.lattice.reciprocal_lattice_crystallographic
recp_symmops = self.lattice.get_recp_symmetry_operation(self.symprec)
for i, (hkl, energy) in enumerate(zip(self.hkl_list,
self.e_surf_list)):
for op in recp_symmops:
miller = tuple([int(x) for x in op.operate(hkl)])
if miller not in all_hkl:
all_hkl.append(miller)
normal = recp.get_cartesian_coords(miller)
normal /= sp.linalg.norm(normal)
normal_pt = [x * energy for x in normal]
dual_pt = [x / energy for x in normal]
color_plane = color_ind[divmod(i, len(color_ind))[1]]
planes.append(WulffFacet(normal, energy, normal_pt,
dual_pt, color_plane, i, hkl))
# sort by e_surf
planes.sort(key=lambda x: x.e_surf)
return planes
def _get_cross_pt_dual_simp(self, dual_simp):
"""
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
"""
matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)]
matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)]
cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e)
return cross_pt
def _get_simpx_plane(self):
"""
Locate the plane for simpx of on wulff_cv, by comparing the center of
the simpx triangle with the plane functions.
"""
on_wulff = [False] * len(self.miller_list)
surface_area = [0.0] * len(self.miller_list)
for simpx in self.wulff_cv_simp:
pts = [self.wulff_pt_list[simpx[i]] for i in range(3)]
center = np.sum(pts, 0) / 3.0
# check whether the center of the simplices is on one plane
for plane in self.facets:
abs_diff = abs(np.dot(plane.normal, center) - plane.e_surf)
if abs_diff < 1e-5:
on_wulff[plane.index] = True
surface_area[plane.index] += get_tri_area(pts)
plane.points.append(pts)
plane.outer_lines.append([simpx[0], simpx[1]])
plane.outer_lines.append([simpx[1], simpx[2]])
plane.outer_lines.append([simpx[0], simpx[2]])
# already find the plane, move to the next simplices
break
for plane in self.facets:
plane.outer_lines.sort()
plane.outer_lines = [line for line in plane.outer_lines
if plane.outer_lines.count(line) != 2]
return on_wulff, surface_area
def _get_colors(self, color_set, alpha, off_color, custom_colors={}):
"""
assign colors according to the surface energies of on_wulff facets.
return:
(color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff,
e_surf_on_wulff_list)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
color_list = [off_color] * len(self.hkl_list)
color_proxy_on_wulff = []
miller_on_wulff = []
e_surf_on_wulff = [(i, e_surf)
for i, e_surf in enumerate(self.e_surf_list)
if self.on_wulff[i]]
c_map = plt.get_cmap(color_set)
e_surf_on_wulff.sort(key=lambda x: x[1], reverse=False)
e_surf_on_wulff_list = [x[1] for x in e_surf_on_wulff]
if len(e_surf_on_wulff) > 1:
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list),
vmax=max(e_surf_on_wulff_list))
else:
# if there is only one hkl on wulff, choose the color of the median
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list) - 0.1,
vmax=max(e_surf_on_wulff_list) + 0.1)
scalar_map = mpl.cm.ScalarMappable(norm=cnorm, cmap=c_map)
for i, e_surf in e_surf_on_wulff:
color_list[i] = scalar_map.to_rgba(e_surf, alpha=alpha)
if tuple(self.miller_list[i]) in custom_colors.keys():
color_list[i] = custom_colors[tuple(self.miller_list[i])]
color_proxy_on_wulff.append(
plt.Rectangle((2, 2), 1, 1, fc=color_list[i], alpha=alpha))
miller_on_wulff.append(self.input_miller_fig[i])
scalar_map.set_array([x[1] for x in e_surf_on_wulff])
color_proxy = [plt.Rectangle((2, 2), 1, 1, fc=x, alpha=alpha)
for x in color_list]
return color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, e_surf_on_wulff_list
def show(self, *args, **kwargs):
r"""
Show the Wulff plot.
Args:
*args: Passed to get_plot.
**kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def get_line_in_facet(self, facet):
"""
Returns the sorted pts in a facet used to draw a line
"""
lines = list(facet.outer_lines)
pt = []
prev = None
while len(lines) > 0:
if prev is None:
l = lines.pop(0)
else:
for i, l in enumerate(lines):
if prev in l:
l = lines.pop(i)
if l[1] == prev:
l.reverse()
break
# make sure the lines are connected one by one.
# find the way covering all pts and facets
pt.append(self.wulff_pt_list[l[0]].tolist())
pt.append(self.wulff_pt_list[l[1]].tolist())
prev = l[1]
return pt
def get_plot(self, color_set='PuBu', grid_off=True, axis_off=True,
show_area=False, alpha=1, off_color='red', direction=None,
bar_pos=(0.75, 0.15, 0.05, 0.65), bar_on=False, units_in_JPERM2=True,
legend_on=True, aspect_ratio=(8, 8), custom_colors={}):
"""
Get the Wulff shape plot.
Args:
color_set: default is 'PuBu'
grid_off (bool): default is True
axis_off (bool): default is Ture
show_area (bool): default is False
alpha (float): chosen from 0 to 1 (float), default is 1
off_color: Default color for facets not present on the Wulff shape.
direction: default is (1, 1, 1)
bar_pos: default is [0.75, 0.15, 0.05, 0.65]
bar_on (bool): default is False
legend_on (bool): default is True
aspect_ratio: default is (8, 8)
custom_colors ({(h,k,l}: [r,g,b,alpha}): Customize color of each
facet with a dictionary. The key is the corresponding Miller
index and value is the color. Undefined facets will use default
color site. Note: If you decide to set your own colors, it
probably won't make any sense to have the color bar on.
Return:
(matplotlib.pyplot)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as mpl3
color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, e_surf_on_wulff = self._get_colors(
color_set, alpha, off_color, custom_colors=custom_colors)
if not direction:
# If direction is not specified, use the miller indices of
# maximum area.
direction = max(self.area_fraction_dict.items(),
key=lambda x: x[1])[0]
fig = plt.figure()
fig.set_size_inches(aspect_ratio[0], aspect_ratio[1])
azim, elev = self._get_azimuth_elev([direction[0], direction[1],
direction[-1]])
wulff_pt_list = self.wulff_pt_list
ax = mpl3.Axes3D(fig, azim=azim, elev=elev)
for plane in self.facets:
# check whether [pts] is empty
if len(plane.points) < 1:
# empty, plane is not on_wulff.
continue
# assign the color for on_wulff facets according to its
# index and the color_list for on_wulff
plane_color = color_list[plane.index]
pt = self.get_line_in_facet(plane)
# plot from the sorted pts from [simpx]
tri = mpl3.art3d.Poly3DCollection([pt])
tri.set_color(plane_color)
tri.set_edgecolor("#808080")
ax.add_collection3d(tri)
# set ranges of x, y, z
# find the largest distance between on_wulff pts and the origin,
# to ensure complete and consistent display for all directions
r_range = max([np.linalg.norm(x) for x in wulff_pt_list])
ax.set_xlim([-r_range * 1.1, r_range * 1.1])
ax.set_ylim([-r_range * 1.1, r_range * 1.1])
ax.set_zlim([-r_range * 1.1, r_range * 1.1])
# add legend
if legend_on:
color_proxy = color_proxy
if show_area:
ax.legend(color_proxy, self.miller_area, loc='upper left',
bbox_to_anchor=(0, 1), fancybox=True, shadow=False)
else:
ax.legend(color_proxy_on_wulff, miller_on_wulff,
loc='upper center',
bbox_to_anchor=(0.5, 1), ncol=3, fancybox=True,
shadow=False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Add colorbar
if bar_on:
cmap = plt.get_cmap(color_set)
cmap.set_over('0.25')
cmap.set_under('0.75')
bounds = [round(e, 2) for e in e_surf_on_wulff]
bounds.append(1.2 * bounds[-1])
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# display surface energies
ax1 = fig.add_axes(bar_pos)
cbar = mpl.colorbar.ColorbarBase(
ax1, cmap=cmap, norm=norm, boundaries=[0] + bounds + [10],
extend='both', ticks=bounds[:-1], spacing='proportional',
orientation='vertical')
units = "$J/m^2$" if units_in_JPERM2 else r"$eV/\AA^2$"
cbar.set_label('Surface Energies (%s)' % (units), fontsize=100)
if grid_off:
ax.grid('off')
if axis_off:
ax.axis('off')
return plt
def _get_azimuth_elev(self, miller_index):
"""
Args:
miller_index: viewing direction
Returns:
azim, elev for plotting
"""
if miller_index == (0, 0, 1) or miller_index == (0, 0, 0, 1):
return 0, 90
else:
cart = self.lattice.get_cartesian_coords(miller_index)
azim = get_angle([cart[0], cart[1], 0], (1, 0, 0))
v = [cart[0], cart[1], 0]
elev = get_angle(cart, v)
return azim, elev
@property
def volume(self):
"""
Volume of the Wulff shape
"""
return self.wulff_convex.volume
@property
def miller_area_dict(self):
"""
Returns {hkl: area_hkl on wulff}
"""
return dict(zip(self.miller_list, self.color_area))
@property
def miller_energy_dict(self):
"""
Returns {hkl: surface energy_hkl}
"""
return dict(zip(self.miller_list, self.e_surf_list))
@property
def surface_area(self):
"""
Total surface area of Wulff shape.
"""
return sum(self.miller_area_dict.values())
@property
def weighted_surface_energy(self):
"""
Returns:
sum(surface_energy_hkl * area_hkl)/ sum(area_hkl)
"""
return self.total_surface_energy / self.surface_area
@property
def area_fraction_dict(self):
"""
Returns:
(dict): {hkl: area_hkl/total area on wulff}
"""
return {hkl: self.miller_area_dict[hkl] / self.surface_area
for hkl in self.miller_area_dict.keys()}
@property
def anisotropy(self):
"""
Returns:
(float) Coefficient of Variation from weighted surface energy
The ideal sphere is 0.
"""
square_diff_energy = 0
weighted_energy = self.weighted_surface_energy
area_frac_dict = self.area_fraction_dict
miller_energy_dict = self.miller_energy_dict
for hkl in miller_energy_dict.keys():
square_diff_energy += (miller_energy_dict[hkl] - weighted_energy) \
** 2 * area_frac_dict[hkl]
return np.sqrt(square_diff_energy) / weighted_energy
@property
def shape_factor(self):
"""
This is useful for determining the critical nucleus size.
A large shape factor indicates great anisotropy.
See <NAME>., <NAME>. & <NAME>. Kinetics
of Materials. (<NAME>, 2005), p.461
Returns:
(float) Shape factor.
"""
return self.surface_area / (self.volume ** (2 / 3))
@property
def effective_radius(self):
"""
Radius of the Wulffshape when the
Wulffshape is approximated as a sphere.
Returns:
(float) radius.
"""
return ((3 / 4) * (self.volume / np.pi)) ** (1 / 3)
@property
def total_surface_energy(self):
"""
Total surface energy of the Wulff shape.
Returns:
(float) sum(surface_energy_hkl * area_hkl)
"""
tot_surface_energy = 0
for hkl in self.miller_energy_dict.keys():
tot_surface_energy += self.miller_energy_dict[hkl] * \
self.miller_area_dict[hkl]
return tot_surface_energy
@property
def tot_corner_sites(self):
"""
Returns the number of vertices in the convex hull.
Useful for identifying catalytically active sites.
"""
return len(self.wulff_convex.vertices)
@property
def tot_edges(self):
"""
Returns the number of edges in the convex hull.
Useful for identifying catalytically active sites.
"""
all_edges = []
for facet in self.facets:
edges = []
pt = self.get_line_in_facet(facet)
lines = []
for i, p in enumerate(pt):
if i == len(pt) / 2:
break
lines.append(tuple(sorted(tuple([tuple(pt[i * 2]), tuple(pt[i * 2 + 1])]))))
for i, p in enumerate(lines):
if p not in all_edges:
edges.append(p)
all_edges.extend(edges)
return len(all_edges)
|
[
"logging.getLogger",
"numpy.sqrt",
"pymatgen.core.structure.Structure",
"matplotlib.colorbar.ColorbarBase",
"numpy.array",
"numpy.linalg.norm",
"scipy.cross",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"numpy.dot",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.Rectangle",
"warnings.warn",
"scipy.spatial.ConvexHull",
"scipy.linalg.inv",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.get_cmap",
"pymatgen.util.coord.get_angle",
"numpy.sum",
"matplotlib.pyplot.figure",
"scipy.linalg.norm",
"matplotlib.colors.BoundaryNorm"
] |
[((1126, 1153), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1143, 1153), False, 'import logging\n'), ((1738, 1749), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (1746, 1749), True, 'import numpy as np\n'), ((1752, 1763), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1760, 1763), True, 'import numpy as np\n'), ((1773, 1784), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (1781, 1784), True, 'import numpy as np\n'), ((1787, 1798), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1795, 1798), True, 'import numpy as np\n'), ((4997, 5035), 'pymatgen.core.structure.Structure', 'Structure', (['lattice', "['H']", '[[0, 0, 0]]'], {}), "(lattice, ['H'], [[0, 0, 0]])\n", (5006, 5035), False, 'from pymatgen.core.structure import Structure\n'), ((5601, 5621), 'scipy.spatial.ConvexHull', 'ConvexHull', (['dual_pts'], {}), '(dual_pts)\n', (5611, 5621), False, 'from scipy.spatial import ConvexHull\n'), ((6108, 6133), 'scipy.spatial.ConvexHull', 'ConvexHull', (['wulff_pt_list'], {}), '(wulff_pt_list)\n', (6118, 6133), False, 'from scipy.spatial import ConvexHull\n'), ((11053, 11076), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['color_set'], {}), '(color_set)\n', (11065, 11076), True, 'import matplotlib.pyplot as plt\n'), ((11660, 11705), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'cnorm', 'cmap': 'c_map'}), '(norm=cnorm, cmap=c_map)\n', (11681, 11705), True, 'import matplotlib as mpl\n'), ((15385, 15397), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15395, 15397), True, 'import matplotlib.pyplot as plt\n'), ((15652, 15690), 'mpl_toolkits.mplot3d.Axes3D', 'mpl3.Axes3D', (['fig'], {'azim': 'azim', 'elev': 'elev'}), '(fig, azim=azim, elev=elev)\n', (15663, 15690), True, 'import mpl_toolkits.mplot3d as mpl3\n'), ((4749, 4812), 'warnings.warn', 'warnings.warn', (['"""Unphysical (negative) surface energy detected."""'], {}), "('Unphysical (negative) surface energy detected.')\n", (4762, 4812), False, 'import warnings\n'), ((8946, 8973), 'scipy.linalg.inv', 'sp.linalg.inv', (['matrix_surfs'], {}), '(matrix_surfs)\n', (8959, 8973), True, 'import scipy as sp\n'), ((12221, 12267), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(2, 2)', '(1)', '(1)'], {'fc': 'x', 'alpha': 'alpha'}), '((2, 2), 1, 1, fc=x, alpha=alpha)\n', (12234, 12267), True, 'import matplotlib.pyplot as plt\n'), ((16167, 16200), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'mpl3.art3d.Poly3DCollection', (['[pt]'], {}), '([pt])\n', (16194, 16200), True, 'import mpl_toolkits.mplot3d as mpl3\n'), ((17366, 17389), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['color_set'], {}), '(color_set)\n', (17378, 17389), True, 'import matplotlib.pyplot as plt\n'), ((17582, 17621), 'matplotlib.colors.BoundaryNorm', 'mpl.colors.BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (17605, 17621), True, 'import matplotlib as mpl\n'), ((17720, 17894), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax1'], {'cmap': 'cmap', 'norm': 'norm', 'boundaries': '([0] + bounds + [10])', 'extend': '"""both"""', 'ticks': 'bounds[:-1]', 'spacing': '"""proportional"""', 'orientation': '"""vertical"""'}), "(ax1, cmap=cmap, norm=norm, boundaries=[0] +\n bounds + [10], extend='both', ticks=bounds[:-1], spacing='proportional',\n orientation='vertical')\n", (17745, 17894), True, 'import matplotlib as mpl\n'), ((18575, 18618), 'pymatgen.util.coord.get_angle', 'get_angle', (['[cart[0], cart[1], 0]', '(1, 0, 0)'], {}), '([cart[0], cart[1], 0], (1, 0, 0))\n', (18584, 18618), False, 'from pymatgen.util.coord import get_angle\n'), ((18676, 18694), 'pymatgen.util.coord.get_angle', 'get_angle', (['cart', 'v'], {}), '(cart, v)\n', (18685, 18694), False, 'from pymatgen.util.coord import get_angle\n'), ((20440, 20467), 'numpy.sqrt', 'np.sqrt', (['square_diff_energy'], {}), '(square_diff_energy)\n', (20447, 20467), True, 'import numpy as np\n'), ((1833, 1849), 'scipy.cross', 'sp.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (1841, 1849), True, 'import scipy as sp\n'), ((9430, 9444), 'numpy.sum', 'np.sum', (['pts', '(0)'], {}), '(pts, 0)\n', (9436, 9444), True, 'import numpy as np\n'), ((12015, 12073), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(2, 2)', '(1)', '(1)'], {'fc': 'color_list[i]', 'alpha': 'alpha'}), '((2, 2), 1, 1, fc=color_list[i], alpha=alpha)\n', (12028, 12073), True, 'import matplotlib.pyplot as plt\n'), ((16518, 16535), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (16532, 16535), True, 'import numpy as np\n'), ((7893, 7915), 'scipy.linalg.norm', 'sp.linalg.norm', (['normal'], {}), '(normal)\n', (7907, 7915), True, 'import scipy as sp\n'), ((9592, 9620), 'numpy.dot', 'np.dot', (['plane.normal', 'center'], {}), '(plane.normal, center)\n', (9598, 9620), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import logging
import os
import re
import numpy
logger = logging.getLogger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
""" Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
transpose = bool(tf_name[-1] == "kernel" or "emb_projs" in tf_name or "out_projs" in tf_name)
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info("Loading PyTorch weights from {}".format(pt_path))
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel() for t in pt_state_dict.values())))
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch state_dict in a TF 2.0 model.
"""
try:
import torch # noqa: F401
import tensorflow as tf # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
continue
raise AttributeError("{} not found in PyTorch model".format(name))
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))
logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights))
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load TF 2.0 HDF5 checkpoint in a PyTorch model
We use HDF5 to easily do transfer learning
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
""" Load TF 2.0 model in a pytorch model
"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
""" Load TF2.0 symbolic weights in a PyTorch model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError("{} not found in TF 2.0 model".format(pt_weight_name))
array, transpose = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning("Initialize PyTorch weight {}".format(pt_weight_name))
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from TF 2.0 model: {}".format(pt_model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from TF 2.0 model not used in {}: {}".format(pt_model.__class__.__name__, unexpected_keys)
)
logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights))
return pt_model
|
[
"logging.getLogger",
"tensorflow.python.keras.backend.batch_set_value",
"torch.load",
"torch.from_numpy",
"numpy.squeeze",
"numpy.expand_dims",
"os.path.abspath",
"re.sub",
"numpy.transpose"
] |
[((802, 829), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (819, 829), False, 'import logging\n'), ((1569, 1614), 're.sub', 're.sub', (['"""/[^/]*___([^/]*)/"""', '"""/\\\\1/"""', 'tf_name'], {}), "('/[^/]*___([^/]*)/', '/\\\\1/', tf_name)\n", (1575, 1614), False, 'import re\n'), ((1901, 1928), 're.sub', 're.sub', (['"""//+"""', '"""/"""', 'tf_name'], {}), "('//+', '/', tf_name)\n", (1907, 1928), False, 'import re\n'), ((3319, 3359), 'os.path.abspath', 'os.path.abspath', (['pytorch_checkpoint_path'], {}), '(pytorch_checkpoint_path)\n', (3334, 3359), False, 'import os\n'), ((3448, 3487), 'torch.load', 'torch.load', (['pt_path'], {'map_location': '"""cpu"""'}), "(pt_path, map_location='cpu')\n", (3458, 3487), False, 'import torch\n'), ((7272, 7310), 'tensorflow.python.keras.backend.batch_set_value', 'K.batch_set_value', (['weight_value_tuples'], {}), '(weight_value_tuples)\n', (7289, 7310), True, 'from tensorflow.python.keras import backend as K\n'), ((12182, 12205), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (12198, 12205), False, 'import torch\n'), ((12265, 12288), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (12281, 12288), False, 'import torch\n'), ((6610, 6632), 'numpy.transpose', 'numpy.transpose', (['array'], {}), '(array)\n', (6625, 6632), False, 'import numpy\n'), ((6712, 6732), 'numpy.squeeze', 'numpy.squeeze', (['array'], {}), '(array)\n', (6725, 6732), False, 'import numpy\n'), ((11646, 11668), 'numpy.transpose', 'numpy.transpose', (['array'], {}), '(array)\n', (11661, 11668), False, 'import numpy\n'), ((11742, 11762), 'numpy.squeeze', 'numpy.squeeze', (['array'], {}), '(array)\n', (11755, 11762), False, 'import numpy\n'), ((6813, 6845), 'numpy.expand_dims', 'numpy.expand_dims', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (6830, 6845), False, 'import numpy\n'), ((11837, 11869), 'numpy.expand_dims', 'numpy.expand_dims', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (11854, 11869), False, 'import numpy\n')]
|
import attr
from firedrake import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.linalg import svd
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from slepc4py import SLEPc
import pandas as pd
from tqdm import tqdm
import os
matplotlib.use('Agg')
@attr.s
class ConditionNumberResult(object):
form = attr.ib()
assembled_form = attr.ib()
condition_number = attr.ib()
sparse_operator = attr.ib()
number_of_dofs = attr.ib()
nnz = attr.ib()
is_operator_symmetric = attr.ib()
bcs = attr.ib(default=list())
def plot_matrix(assembled_form, **kwargs):
"""Provides a plot of a matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def plot_matrix_mixed(assembled_form, **kwargs):
"""Provides a plot of a mixed matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_primal_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_mixed_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
f1_size = assembled_form.M[1, 1].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
ax.axhline(y=f0_size[0] + f1_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] + f1_size[0] - 0.5, color="k")
return plot
def plot_matrix_hybrid_multiplier(a_form, trace_index=2, bcs=[], **kwargs):
"""Provides a plot of a condensed hybrid-mixed matrix for single scale problems."""
fig, ax = plt.subplots(1, 1)
_A = Tensor(a_form)
A = _A.blocks
idx = trace_index
S = A[idx, idx] - A[idx, :idx] * A[:idx, :idx].inv * A[:idx, idx]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Below there is the spy alternative
# plot = plt.spy(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def filter_real_part_in_array(array: np.ndarray, imag_threshold: float = 1e-5) -> np.ndarray:
"""Utility function to filter real part in a numpy array.
:param array:
Array with real and complex numbers.
:param imag_threshold:
Threshold to cut off imaginary part in complex number.
:return:
Filtered array with only real numbers.
"""
real_part_array = array.real[abs(array.imag) < 1e-5]
return real_part_array
def calculate_condition_number(
A,
num_of_factors,
backend: str = "scipy",
use_sparse: bool = False,
zero_tol: float = 1e-5
):
backend = backend.lower()
if backend == "scipy":
size = A.getSize()
Mnp = csr_matrix(A.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
if use_sparse:
singular_values = svds(
A=Mnp,
k=num_of_factors,
which="LM",
maxiter=5000,
return_singular_vectors=False,
solver="lobpcg"
)
else:
M = Mnp.toarray()
singular_values = svd(M, compute_uv=False, check_finite=False)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.max() / singular_values.min()
elif backend == "slepc":
S = SLEPc.SVD()
S.create()
S.setOperator(A)
S.setType(SLEPc.SVD.Type.LAPACK)
S.setDimensions(nsv=num_of_factors)
S.setTolerances(max_it=5000)
S.setWhichSingularTriplets(SLEPc.SVD.Which.LARGEST)
S.solve()
num_converged_values = S.getConverged()
singular_values_list = list()
if num_converged_values > 0:
for i in range(num_converged_values):
singular_value = S.getValue(i)
singular_values_list.append(singular_value)
else:
raise RuntimeError("SLEPc SVD has not converged.")
singular_values = np.array(singular_values_list)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.max() / singular_values.min()
else:
raise NotImplementedError("The required method for condition number estimation is currently unavailable.")
return condition_number
def solve_poisson_cg(mesh, degree=1, use_quads=False):
# Function space declaration
V = FunctionSpace(mesh, "CG", degree)
# Trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
# Dirichlet BCs
bcs = DirichletBC(V, 0.0, "on_boundary")
# Variational form
a = inner(grad(u), grad(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_ls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Stabilization parameters
delta_1 = Constant(1)
delta_2 = Constant(1)
delta_3 = Constant(1)
# Least-squares terms
a = delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * div(u) * div(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_cgls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p - q * div(u)) * dx
# Stabilizing terms
a += -0.5 * inner((u + grad(p)), v + grad(q)) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
a += 0.5 * div(u) * div(v) * dx
a += 0.5 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_vms(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
# Stabilizing terms
a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * div(v) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_mixed_RT(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
if use_quads:
hdiv_family = 'RTCF'
pressure_family = 'DQ'
else:
hdiv_family = 'RT'
pressure_family = 'DG'
U = FunctionSpace(mesh, hdiv_family, degree + 1)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dgls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# eta_u = 1
# Nitsche's penalizing term
beta_0 = Constant(1.0)
beta = beta_0 / h
# Mixed classical terms
a = (dot(u, v) - div(v) * p - q * div(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent terms
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += -0.5 * inner(u + grad(p), v + grad(q)) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# ** Badia-Codina based
a += -eta_u * inner(u + grad(p), v + grad(q)) * dx
a += eta_p * div(u) * div(v) * dx
a += eta_p * inner(curl(u), curl(v)) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
a += beta * p * q * ds # may decrease convergente rates
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dvms(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent (original)
# a += jump(u, n) * jump(v, n) * dS # not considered in the original paper
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * div(v) * dx
# ** Badia-Codina based
a += eta_u * inner(u + grad(p), grad(q) - v) * dx
a += eta_p * div(u) * div(v) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds # may decrease convergente rates
# ** Classical Nitsche
# a += beta * p * q * ds # may decrease convergente rates (Nitsche)
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sipg(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
V = FunctionSpace(mesh, pressure_family, degree)
# Trial and test functions
p = TrialFunction(V)
q = TestFunction(V)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Edge stabilizing parameter
beta0 = Constant(1e1)
beta = beta0 / h
# Symmetry term. Choose if the method is SIPG (-1) or NIPG (1)
s = Constant(-1)
# Classical volumetric terms
a = inner(grad(p), grad(q)) * dx
L = f * q * dx
# DG edge terms
a += s * dot(jump(p, n), avg(grad(q))) * dS - dot(avg(grad(p)), jump(q, n)) * dS
# Edge stabilizing terms
a += beta("+") * dot(jump(p, n), jump(q, n)) * dS
# Weak boundary conditions
a += s * dot(p * n, grad(q)) * ds - dot(grad(p), q * n) * ds
a += beta * p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
# L0 = 1
# eta_p = L0 * h_avg # method B in the Badia-Codina paper
eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
# eta_u = h_avg / L0 # method B in the Badia-Codina paper
eta_u = 1
# eta_u_bc = h / L0 # method B in the Badia-Codina paper
eta_u_bc = 1
# Least-Squares weights
delta = Constant(1.0)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = 1 / h
delta_4 = 1 / h
# Least-squares terms
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
a += delta_1 * div(u) * div(v) * dx
a += delta_2 * inner(curl(u), curl(v)) * dx
# Edge stabilizing terms
# ** Badia-Codina based (better results) **
a += eta_u * avg(delta_3) * (jump(u, n) * jump(v, n)) * dS
a += eta_p * avg(delta_4) * dot(jump(p, n), jump(q, n)) * dS
a += eta_u_bc * delta_3 * p * q * ds # may decrease convergente rates
a += eta_u_bc * delta_4 * dot(u, n) * dot(v, n) * ds
# ** Mesh independent **
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# a += p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-12)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sdhm(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# BCs
u_projected = sigma_e
p_boundaries = p_exact
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e-18)
# beta = beta_0 / h
beta = beta_0
# Stabilization parameters
delta_0 = Constant(-1)
delta_1 = Constant(-0.5) * h * h
delta_2 = Constant(0.5) * h * h
delta_3 = Constant(0.5) * h * h
# Mixed classical terms
a = (dot(u, v) - div(v) * p + delta_0 * q * div(u)) * dx
L = delta_0 * f * q * dx
# Stabilizing terms
a += delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * div(u) * div(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
L += delta_2 * f * div(v) * dx
# Hybridization terms
a += lambda_h("+") * dot(v, n)("+") * dS + mu_h("+") * dot(u, n)("+") * dS
a += beta("+") * (lambda_h("+") - p("+")) * (mu_h("+") - q("+")) * dS
# Weakly imposed BC
a += (p_boundaries * dot(v, n) + mu_h * (dot(u, n) - dot(u_projected, n))) * ds
a += beta * (lambda_h - p_boundaries) * mu_h * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def solve_poisson_hdg(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = (dot(u, v) - div(v) * p) * dx + lambda_h("+") * jump(v, n) * dS
a += -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += lambda_h * dot(v, n) * ds
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_cgh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u = -grad(p)
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_ldgc(
mesh,
degree=1,
is_multiplier_continuous=True
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
primal_family = "DQ" if use_quads else "DG"
V = FunctionSpace(mesh, primal_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
p_boundaries = Constant(0.0)
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
s = Constant(-1.0)
beta = Constant(32.0)
h = CellDiameter(mesh)
h_avg = avg(h)
# Classical term
a = dot(grad(p), grad(q)) * dx
L = f * q * dx
# Hybridization terms
a += s * dot(grad(q), n)("+") * (p("+") - lambda_h("+")) * dS
a += -dot(grad(p), n)("+") * (q("+") - mu_h("+")) * dS
a += (beta / h_avg) * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# Boundary terms
# a += -dot(vel_projected, n) * v * ds # How to set this bc??
# a += (beta / h) * (p- p_boundaries) * q * ds # is this necessary?
L += s * dot(grad(q), n) * p_boundaries * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_lsh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# BCs
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0)
beta = beta_0 / h
beta_avg = beta_0 / h("+")
# Stabilizing parameter
# delta_0 = Constant(1)
# delta_1 = Constant(1)
# delta_2 = Constant(1)
# delta_3 = Constant(1)
# delta_4 = Constant(1)
# delta_5 = Constant(1)
# LARGE_NUMBER = Constant(1e0)
delta = h * h
# delta = Constant(1)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = delta
delta_4 = delta
# delta_4 = LARGE_NUMBER / h
delta_5 = delta
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
v_hat = v + beta * (q - mu_h) * n
# Flux least-squares
# a = (
# (inner(u, v) - q * div(u) - p * div(v) + inner(grad(p), grad(q)))
# * delta_1
# * dx
# )
# # These terms below are unsymmetric
# a += delta_1 * jump(u_hat, n=n) * q("+") * dS
# a += delta_1("+") * dot(u_hat, n) * q * ds
# # a += delta_1 * dot(u, n) * q * ds
# # L = -delta_1 * dot(u_projected, n) * q * ds
# a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
# a += delta_1 * lambda_h * dot(v, n) * ds
# # L = delta_1 * p_exact * dot(v, n) * ds
# Flux Least-squares as in DG
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
# Classical mixed Darcy eq. first-order terms as stabilizing terms
a += delta_1 * (dot(u, v) - div(v) * p) * dx
a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
a += delta_1 * lambda_h * dot(v, n) * ds
# Mass balance least-square
a += delta_2 * div(u) * div(v) * dx
# L = delta_2 * f * div(v) * dx
# Irrotational least-squares
a += delta_3 * inner(curl(u), curl(v)) * dx
# Hybridization terms
a += mu_h("+") * jump(u_hat, n=n) * dS
a += delta_4("+") * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# a += delta_4 * (p - lambda_h) * (q - mu_h) * ds
# a += delta_5 * (dot(u, n)("+") - dot(u_hat, n)("+")) * (dot(v, n)("+") - dot(v_hat, n)("+")) * dS
# a += delta_5 * (dot(u, n) - dot(u_hat, n)) * (dot(v, n) - dot(v_hat, n)) * ds
# Weakly imposed BC from hybridization
# a += mu_h * (lambda_h - p_boundaries) * ds
# a += mu_h * lambda_h * ds
# ###
# a += (
# (mu_h - q) * (lambda_h - p_boundaries) * ds
# ) # maybe this is not a good way to impose BC, but this necessary
_A = Tensor(a)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def hp_refinement_cond_number_calculation(
solver,
min_degree=1,
max_degree=4,
numel_xy=(5, 10, 15, 20, 25),
quadrilateral=True,
name="",
**kwargs
):
results_dict = {
"Element": list(),
"Number of Elements": list(),
"Degree": list(),
"Symmetric": list(),
"nnz": list(),
"dofs": list(),
"h": list(),
"Condition Number": list(),
}
element_kind = "Quad" if quadrilateral else "Tri"
pbar = tqdm(range(min_degree, max_degree))
for degree in pbar:
for n in numel_xy:
pbar.set_description(f"Processing {name} - degree = {degree} - N = {n}")
mesh = UnitSquareMesh(n, n, quadrilateral=quadrilateral)
result = solver(mesh, degree=degree)
current_cell_size = mesh.cell_sizes.dat.data_ro.min() if not quadrilateral else 1 / n
results_dict["Element"].append(element_kind)
results_dict["Number of Elements"].append(n * n)
results_dict["Degree"].append(degree)
results_dict["Symmetric"].append(result.is_operator_symmetric)
results_dict["nnz"].append(result.nnz)
results_dict["dofs"].append(result.number_of_dofs)
results_dict["h"].append(current_cell_size)
results_dict["Condition Number"].append(result.condition_number)
os.makedirs("./cond_number_results/results_%s" % name, exist_ok=True)
df_cond_number = pd.DataFrame(data=results_dict)
path_to_save_results = "./cond_number_results/results_%s/cond_numbers.csv" % name
df_cond_number.to_csv(path_to_save_results)
return df_cond_number
# Solver options
solvers_options = {
# "cg": solve_poisson_cg,
# "cgls": solve_poisson_cgls,
# "dgls": solve_poisson_dgls,
# "sdhm": solve_poisson_sdhm,
# "ls": solve_poisson_ls,
# "dls": solve_poisson_dls,
"lsh": solve_poisson_lsh,
# "vms": solve_poisson_vms,
# "dvms": solve_poisson_dvms,
# "mixed_RT": solve_poisson_mixed_RT,
# "hdg": solve_poisson_hdg,
# "cgh": solve_poisson_cgh,
# "ldgc": solve_poisson_ldgc,
# "sipg": solve_poisson_sipg,
}
degree = 1
last_degree = 1
for current_solver in solvers_options:
# Setting the output file name
name = f"{current_solver}"
# Selecting the solver and its kwargs
solver = solvers_options[current_solver]
# Performing the convergence study
hp_refinement_cond_number_calculation(
solver,
min_degree=degree,
max_degree=degree + last_degree,
quadrilateral=True,
name=name
)
# N = 5
# mesh = UnitSquareMesh(N, N, quadrilateral=True)
# result = solve_poisson_lsh(mesh, degree=1)
# print(f'Is symmetric? {result.is_operator_symmetric}')
# print(f'nnz: {result.nnz}')
# print(f'DoFs: {result.number_of_dofs}')
# print(f'Condition Number: {result.condition_number}')
# # Plotting the resulting matrix
# matplotlib.use('TkAgg')
# import copy
# my_cmap = copy.copy(plt.cm.get_cmap("winter"))
# my_cmap.set_bad(color="lightgray")
# # plot_matrix_primal_hybrid_full(result.form, result.bcs, cmap=my_cmap)
# # plot_matrix_mixed_hybrid_full(result.form, result.bcs, cmap=my_cmap)
# plot_matrix_hybrid_multiplier(result.form, trace_index=2, bcs=result.bcs, cmap=my_cmap)
# # plot_matrix(result.assembled_form, cmap=my_cmap)
# # plot_matrix_mixed(result.assembled_form, cmap=my_cmap)
# plt.tight_layout()
# plt.savefig("sparse_pattern.png")
# plt.show()
|
[
"numpy.ma.masked_values",
"slepc4py.SLEPc.SVD",
"os.makedirs",
"matplotlib.use",
"numpy.delete",
"numpy.array",
"scipy.sparse.linalg.svds",
"scipy.linalg.svd",
"pandas.DataFrame",
"numpy.all",
"matplotlib.pyplot.subplots",
"attr.ib"
] |
[((287, 308), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (301, 308), False, 'import matplotlib\n'), ((367, 376), 'attr.ib', 'attr.ib', ([], {}), '()\n', (374, 376), False, 'import attr\n'), ((398, 407), 'attr.ib', 'attr.ib', ([], {}), '()\n', (405, 407), False, 'import attr\n'), ((431, 440), 'attr.ib', 'attr.ib', ([], {}), '()\n', (438, 440), False, 'import attr\n'), ((463, 472), 'attr.ib', 'attr.ib', ([], {}), '()\n', (470, 472), False, 'import attr\n'), ((494, 503), 'attr.ib', 'attr.ib', ([], {}), '()\n', (501, 503), False, 'import attr\n'), ((514, 523), 'attr.ib', 'attr.ib', ([], {}), '()\n', (521, 523), False, 'import attr\n'), ((552, 561), 'attr.ib', 'attr.ib', ([], {}), '()\n', (559, 561), False, 'import attr\n'), ((698, 716), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (710, 716), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1088), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (1070, 1088), True, 'import numpy as np\n'), ((1098, 1137), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (1117, 1137), True, 'import numpy as np\n'), ((1443, 1461), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1455, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1861, 1888), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (1870, 1888), True, 'import numpy as np\n'), ((1898, 1937), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (1917, 1937), True, 'import numpy as np\n'), ((2361, 2379), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2373, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2843, 2870), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (2852, 2870), True, 'import numpy as np\n'), ((2880, 2919), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (2899, 2919), True, 'import numpy as np\n'), ((3342, 3360), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (3354, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3878, 3905), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (3887, 3905), True, 'import numpy as np\n'), ((3915, 3954), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (3934, 3954), True, 'import numpy as np\n'), ((4541, 4559), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (4553, 4559), True, 'import matplotlib.pyplot as plt\n'), ((5061, 5088), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (5070, 5088), True, 'import numpy as np\n'), ((5098, 5137), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (5117, 5137), True, 'import numpy as np\n'), ((44728, 44797), 'os.makedirs', 'os.makedirs', (["('./cond_number_results/results_%s' % name)"], {'exist_ok': '(True)'}), "('./cond_number_results/results_%s' % name, exist_ok=True)\n", (44739, 44797), False, 'import os\n'), ((44819, 44850), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'results_dict'}), '(data=results_dict)\n', (44831, 44850), True, 'import pandas as pd\n'), ((1017, 1049), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (1023, 1049), True, 'import numpy as np\n'), ((1817, 1849), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (1823, 1849), True, 'import numpy as np\n'), ((2799, 2831), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (2805, 2831), True, 'import numpy as np\n'), ((3834, 3866), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (3840, 3866), True, 'import numpy as np\n'), ((5017, 5049), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (5023, 5049), True, 'import numpy as np\n'), ((6257, 6364), 'scipy.sparse.linalg.svds', 'svds', ([], {'A': 'Mnp', 'k': 'num_of_factors', 'which': '"""LM"""', 'maxiter': '(5000)', 'return_singular_vectors': '(False)', 'solver': '"""lobpcg"""'}), "(A=Mnp, k=num_of_factors, which='LM', maxiter=5000,\n return_singular_vectors=False, solver='lobpcg')\n", (6261, 6364), False, 'from scipy.sparse.linalg import svds\n'), ((6550, 6594), 'scipy.linalg.svd', 'svd', (['M'], {'compute_uv': '(False)', 'check_finite': '(False)'}), '(M, compute_uv=False, check_finite=False)\n', (6553, 6594), False, 'from scipy.linalg import svd\n'), ((6781, 6792), 'slepc4py.SLEPc.SVD', 'SLEPc.SVD', ([], {}), '()\n', (6790, 6792), False, 'from slepc4py import SLEPc\n'), ((7422, 7452), 'numpy.array', 'np.array', (['singular_values_list'], {}), '(singular_values_list)\n', (7430, 7452), True, 'import numpy as np\n')]
|
import numpy as np
from yt.geometry.selection_routines import GridSelector
from yt.utilities.io_handler import BaseIOHandler
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _h5py as h5py
_convert_mass = ("particle_mass", "mass")
_particle_position_names = {}
class IOHandlerPackedHDF5(BaseIOHandler):
_dataset_type = "enzo_packed_3d"
_base = slice(None)
_field_dtype = "float64"
def _read_field_names(self, grid):
if grid.filename is None:
return []
f = h5py.File(grid.filename, mode="r")
try:
group = f["/Grid%08i" % grid.id]
except KeyError:
group = f
fields = []
dtypes = set()
add_io = "io" in grid.ds.particle_types
add_dm = "DarkMatter" in grid.ds.particle_types
for name, v in group.items():
# NOTE: This won't work with 1D datasets or references.
# For all versions of Enzo I know about, we can assume all floats
# are of the same size. So, let's grab one.
if not hasattr(v, "shape") or v.dtype == "O":
continue
elif len(v.dims) == 1:
if grid.ds.dimensionality == 1:
fields.append(("enzo", str(name)))
elif add_io:
fields.append(("io", str(name)))
elif add_dm:
fields.append(("DarkMatter", str(name)))
else:
fields.append(("enzo", str(name)))
dtypes.add(v.dtype)
if len(dtypes) == 1:
# Now, if everything we saw was the same dtype, we can go ahead and
# set it here. We do this because it is a HUGE savings for 32 bit
# floats, since our numpy copying/casting is way faster than
# h5py's, for some reason I don't understand. This does *not* need
# to be correct -- it will get fixed later -- it just needs to be
# okay for now.
self._field_dtype = list(dtypes)[0]
f.close()
return fields
@property
def _read_exception(self):
return (KeyError,)
def _read_particle_coords(self, chunks, ptf):
yield from self._read_particle_fields(chunks, ptf, None)
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
for chunk in chunks: # These should be organized by grid filename
f = None
for g in chunk.objs:
if g.filename is None:
continue
if f is None:
# print("Opening (read) %s" % g.filename)
f = h5py.File(g.filename, mode="r")
nap = sum(g.NumberOfActiveParticles.values())
if g.NumberOfParticles == 0 and nap == 0:
continue
ds = f.get("/Grid%08i" % g.id)
for ptype, field_list in sorted(ptf.items()):
if ptype == "io":
if g.NumberOfParticles == 0:
continue
pds = ds
elif ptype == "DarkMatter":
if g.NumberOfActiveParticles[ptype] == 0:
continue
pds = ds
elif not g.NumberOfActiveParticles[ptype]:
continue
else:
for pname in ["Active Particles", "Particles"]:
pds = ds.get(f"{pname}/{ptype}")
if pds is not None:
break
else:
raise RuntimeError(
"Could not find active particle group in data."
)
pn = _particle_position_names.get(ptype, r"particle_position_%s")
x, y, z = (
np.asarray(pds.get(pn % ax)[()], dtype="=f8") for ax in "xyz"
)
if selector is None:
# This only ever happens if the call is made from
# _read_particle_coords.
yield ptype, (x, y, z)
continue
mask = selector.select_points(x, y, z, 0.0)
if mask is None:
continue
for field in field_list:
data = np.asarray(pds.get(field)[()], "=f8")
if field in _convert_mass:
data *= g.dds.prod(dtype="f8")
yield (ptype, field), data[mask]
if f:
f.close()
def io_iter(self, chunks, fields):
h5_dtype = self._field_dtype
for chunk in chunks:
fid = None
filename = -1
for obj in chunk.objs:
if obj.filename is None:
continue
if obj.filename != filename:
# Note one really important thing here: even if we do
# implement LRU caching in the _read_obj_field function,
# we'll still be doing file opening and whatnot. This is a
# problem, but one we can return to.
if fid is not None:
fid.close()
fid = h5py.h5f.open(
obj.filename.encode("latin-1"), h5py.h5f.ACC_RDONLY
)
filename = obj.filename
for field in fields:
nodal_flag = self.ds.field_info[field].nodal_flag
dims = obj.ActiveDimensions[::-1] + nodal_flag[::-1]
data = np.empty(dims, dtype=h5_dtype)
yield field, obj, self._read_obj_field(obj, field, (fid, data))
if fid is not None:
fid.close()
def _read_obj_field(self, obj, field, fid_data):
if fid_data is None:
fid_data = (None, None)
fid, data = fid_data
if fid is None:
close = True
fid = h5py.h5f.open(obj.filename.encode("latin-1"), h5py.h5f.ACC_RDONLY)
else:
close = False
if data is None:
data = np.empty(obj.ActiveDimensions[::-1], dtype=self._field_dtype)
ftype, fname = field
try:
node = "/Grid%08i/%s" % (obj.id, fname)
dg = h5py.h5d.open(fid, node.encode("latin-1"))
except KeyError:
if fname == "Dark_Matter_Density":
data[:] = 0
return data.T
raise
dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
# I don't know why, but on some installations of h5py this works, but
# on others, nope. Doesn't seem to be a version thing.
# dg.close()
if close:
fid.close()
return data.T
class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
_dataset_type = "enzo_packed_3d_gz"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
NGZ = self.ds.parameters.get("NumberOfGhostZones", 3)
self._base = (slice(NGZ, -NGZ), slice(NGZ, -NGZ), slice(NGZ, -NGZ))
def _read_obj_field(self, *args, **kwargs):
return super()._read_obj_field(*args, **kwargs)[self._base]
class IOHandlerInMemory(BaseIOHandler):
_dataset_type = "enzo_inline"
def __init__(self, ds, ghost_zones=3):
self.ds = ds
import enzo
self.enzo = enzo
self.grids_in_memory = enzo.grid_data
self.old_grids_in_memory = enzo.old_grid_data
self.my_slice = (
slice(ghost_zones, -ghost_zones),
slice(ghost_zones, -ghost_zones),
slice(ghost_zones, -ghost_zones),
)
BaseIOHandler.__init__(self, ds)
def _read_field_names(self, grid):
fields = []
add_io = "io" in grid.ds.particle_types
for name, v in self.grids_in_memory[grid.id].items():
# NOTE: This won't work with 1D datasets or references.
if not hasattr(v, "shape") or v.dtype == "O":
continue
elif v.ndim == 1:
if grid.ds.dimensionality == 1:
fields.append(("enzo", str(name)))
elif add_io:
fields.append(("io", str(name)))
else:
fields.append(("enzo", str(name)))
return fields
def _read_fluid_selection(self, chunks, selector, fields, size):
rv = {}
# Now we have to do something unpleasant
chunks = list(chunks)
if isinstance(selector, GridSelector):
if not (len(chunks) == len(chunks[0].objs) == 1):
raise RuntimeError
g = chunks[0].objs[0]
for ftype, fname in fields:
rv[(ftype, fname)] = self.grids_in_memory[g.id][fname].swapaxes(0, 2)
return rv
if size is None:
size = sum(g.count(selector) for chunk in chunks for g in chunk.objs)
for field in fields:
ftype, fname = field
fsize = size
rv[field] = np.empty(fsize, dtype="float64")
ng = sum(len(c.objs) for c in chunks)
mylog.debug(
"Reading %s cells of %s fields in %s grids",
size,
[f2 for f1, f2 in fields],
ng,
)
ind = 0
for chunk in chunks:
for g in chunk.objs:
# We want a *hard error* here.
# if g.id not in self.grids_in_memory: continue
for field in fields:
ftype, fname = field
data_view = self.grids_in_memory[g.id][fname][
self.my_slice
].swapaxes(0, 2)
nd = g.select(selector, data_view, rv[field], ind)
ind += nd
assert ind == fsize
return rv
def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
for chunk in chunks: # These should be organized by grid filename
for g in chunk.objs:
if g.id not in self.grids_in_memory:
continue
nap = sum(g.NumberOfActiveParticles.values())
if g.NumberOfParticles == 0 and nap == 0:
continue
for ptype in sorted(ptf):
x, y, z = (
self.grids_in_memory[g.id]["particle_position_x"],
self.grids_in_memory[g.id]["particle_position_y"],
self.grids_in_memory[g.id]["particle_position_z"],
)
yield ptype, (x, y, z)
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
for chunk in chunks: # These should be organized by grid filename
for g in chunk.objs:
if g.id not in self.grids_in_memory:
continue
nap = sum(g.NumberOfActiveParticles.values())
if g.NumberOfParticles == 0 and nap == 0:
continue
for ptype, field_list in sorted(ptf.items()):
x, y, z = (
self.grids_in_memory[g.id]["particle_position_x"],
self.grids_in_memory[g.id]["particle_position_y"],
self.grids_in_memory[g.id]["particle_position_z"],
)
mask = selector.select_points(x, y, z, 0.0)
if mask is None:
continue
for field in field_list:
data = self.grids_in_memory[g.id][field]
if field in _convert_mass:
data = data * g.dds.prod(dtype="f8")
yield (ptype, field), data[mask]
class IOHandlerPacked2D(IOHandlerPackedHDF5):
_dataset_type = "enzo_packed_2d"
_particle_reader = False
def _read_data_set(self, grid, field):
f = h5py.File(grid.filename, mode="r")
ds = f["/Grid%08i/%s" % (grid.id, field)][:]
f.close()
return ds.transpose()[:, :, None]
def _read_fluid_selection(self, chunks, selector, fields, size):
rv = {}
# Now we have to do something unpleasant
chunks = list(chunks)
if isinstance(selector, GridSelector):
if not (len(chunks) == len(chunks[0].objs) == 1):
raise RuntimeError
g = chunks[0].objs[0]
f = h5py.File(g.filename, mode="r")
gds = f.get("/Grid%08i" % g.id)
for ftype, fname in fields:
rv[(ftype, fname)] = np.atleast_3d(gds.get(fname)[()].transpose())
f.close()
return rv
if size is None:
size = sum(g.count(selector) for chunk in chunks for g in chunk.objs)
for field in fields:
ftype, fname = field
fsize = size
rv[field] = np.empty(fsize, dtype="float64")
ng = sum(len(c.objs) for c in chunks)
mylog.debug(
"Reading %s cells of %s fields in %s grids",
size,
[f2 for f1, f2 in fields],
ng,
)
ind = 0
for chunk in chunks:
f = None
for g in chunk.objs:
if f is None:
# print("Opening (count) %s" % g.filename)
f = h5py.File(g.filename, mode="r")
gds = f.get("/Grid%08i" % g.id)
if gds is None:
gds = f
for field in fields:
ftype, fname = field
ds = np.atleast_3d(gds.get(fname)[()].transpose())
nd = g.select(selector, ds, rv[field], ind) # caches
ind += nd
f.close()
return rv
class IOHandlerPacked1D(IOHandlerPackedHDF5):
_dataset_type = "enzo_packed_1d"
_particle_reader = False
def _read_data_set(self, grid, field):
f = h5py.File(grid.filename, mode="r")
ds = f["/Grid%08i/%s" % (grid.id, field)][:]
f.close()
return ds.transpose()[:, None, None]
|
[
"yt.utilities.on_demand_imports._h5py.File",
"yt.utilities.io_handler.BaseIOHandler.__init__",
"numpy.empty",
"yt.utilities.logger.ytLogger.debug"
] |
[((550, 584), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['grid.filename'], {'mode': '"""r"""'}), "(grid.filename, mode='r')\n", (559, 584), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((7940, 7972), 'yt.utilities.io_handler.BaseIOHandler.__init__', 'BaseIOHandler.__init__', (['self', 'ds'], {}), '(self, ds)\n', (7962, 7972), False, 'from yt.utilities.io_handler import BaseIOHandler\n'), ((9396, 9493), 'yt.utilities.logger.ytLogger.debug', 'mylog.debug', (['"""Reading %s cells of %s fields in %s grids"""', 'size', '[f2 for f1, f2 in fields]', 'ng'], {}), "('Reading %s cells of %s fields in %s grids', size, [f2 for f1,\n f2 in fields], ng)\n", (9407, 9493), True, 'from yt.utilities.logger import ytLogger as mylog\n'), ((12245, 12279), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['grid.filename'], {'mode': '"""r"""'}), "(grid.filename, mode='r')\n", (12254, 12279), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((13300, 13397), 'yt.utilities.logger.ytLogger.debug', 'mylog.debug', (['"""Reading %s cells of %s fields in %s grids"""', 'size', '[f2 for f1, f2 in fields]', 'ng'], {}), "('Reading %s cells of %s fields in %s grids', size, [f2 for f1,\n f2 in fields], ng)\n", (13311, 13397), True, 'from yt.utilities.logger import ytLogger as mylog\n'), ((14269, 14303), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['grid.filename'], {'mode': '"""r"""'}), "(grid.filename, mode='r')\n", (14278, 14303), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((6390, 6451), 'numpy.empty', 'np.empty', (['obj.ActiveDimensions[::-1]'], {'dtype': 'self._field_dtype'}), '(obj.ActiveDimensions[::-1], dtype=self._field_dtype)\n', (6398, 6451), True, 'import numpy as np\n'), ((9309, 9341), 'numpy.empty', 'np.empty', (['fsize'], {'dtype': '"""float64"""'}), "(fsize, dtype='float64')\n", (9317, 9341), True, 'import numpy as np\n'), ((12752, 12783), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['g.filename'], {'mode': '"""r"""'}), "(g.filename, mode='r')\n", (12761, 12783), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((13213, 13245), 'numpy.empty', 'np.empty', (['fsize'], {'dtype': '"""float64"""'}), "(fsize, dtype='float64')\n", (13221, 13245), True, 'import numpy as np\n'), ((2705, 2736), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['g.filename'], {'mode': '"""r"""'}), "(g.filename, mode='r')\n", (2714, 2736), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((5857, 5887), 'numpy.empty', 'np.empty', (['dims'], {'dtype': 'h5_dtype'}), '(dims, dtype=h5_dtype)\n', (5865, 5887), True, 'import numpy as np\n'), ((13669, 13700), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['g.filename'], {'mode': '"""r"""'}), "(g.filename, mode='r')\n", (13678, 13700), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n')]
|
import matplotlib.pyplot as plt
import random
import pickle
from skimage.transform import rotate
from scipy import ndimage
from skimage.util import img_as_ubyte
from joblib import Parallel, delayed
from sklearn.ensemble.forest import _generate_unsampled_indices
from sklearn.ensemble.forest import _generate_sample_indices
import numpy as np
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from itertools import product
import keras
from keras import layers
from joblib import Parallel, delayed
from multiprocessing import Pool
import tensorflow as tf
from numba import cuda
import sys
sys.path.append("../../proglearn/")
from progressive_learner import ProgressiveLearner
from deciders import SimpleArgmaxAverage
from transformers import TreeClassificationTransformer, NeuralClassificationTransformer
from voters import TreeClassificationVoter, KNNClassificationVoter
def cross_val_data(data_x, data_y, total_cls=10):
x = data_x.copy()
y = data_y.copy()
idx = [np.where(data_y == u)[0] for u in np.unique(data_y)]
for i in range(total_cls):
indx = idx[i]#np.roll(idx[i],(cv-1)*100)
random.shuffle(indx)
if i==0:
train_x1 = x[indx[0:250],:]
train_x2 = x[indx[250:500],:]
train_y1 = y[indx[0:250]]
train_y2 = y[indx[250:500]]
test_x = x[indx[500:600],:]
test_y = y[indx[500:600]]
else:
train_x1 = np.concatenate((train_x1, x[indx[0:250],:]), axis=0)
train_x2 = np.concatenate((train_x2, x[indx[250:500],:]), axis=0)
train_y1 = np.concatenate((train_y1, y[indx[0:250]]), axis=0)
train_y2 = np.concatenate((train_y2, y[indx[250:500]]), axis=0)
test_x = np.concatenate((test_x, x[indx[500:600],:]), axis=0)
test_y = np.concatenate((test_y, y[indx[500:600]]), axis=0)
return train_x1, train_y1, train_x2, train_y2, test_x, test_y
def LF_experiment(data_x, data_y, angle, model, granularity, reps=1, ntrees=29, acorn=None):
if acorn is not None:
np.random.seed(acorn)
errors = np.zeros(2)
for rep in range(reps):
print("Starting Rep {} of Angle {}".format(rep, angle))
train_x1, train_y1, train_x2, train_y2, test_x, test_y = cross_val_data(data_x, data_y, total_cls=10)
#change data angle for second task
tmp_data = train_x2.copy()
_tmp_ = np.zeros((32,32,3), dtype=int)
total_data = tmp_data.shape[0]
for i in range(total_data):
tmp_ = image_aug(tmp_data[i],angle)
tmp_data[i] = tmp_
if model == "uf":
train_x1 = train_x1.reshape((train_x1.shape[0], train_x1.shape[1] * train_x1.shape[2] * train_x1.shape[3]))
tmp_data = tmp_data.reshape((tmp_data.shape[0], tmp_data.shape[1] * tmp_data.shape[2] * tmp_data.shape[3]))
test_x = test_x.reshape((test_x.shape[0], test_x.shape[1] * test_x.shape[2] * test_x.shape[3]))
with tf.device('/gpu:'+str(int(angle // granularity) % 4)):
default_transformer_class = NeuralClassificationTransformer
network = keras.Sequential()
network.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=np.shape(train_x1)[1:]))
network.add(layers.BatchNormalization())
network.add(layers.Conv2D(filters=32, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Conv2D(filters=254, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
network.add(layers.Flatten())
network.add(layers.BatchNormalization())
network.add(layers.Dense(2000, activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Dense(2000, activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Dense(units=10, activation = 'softmax'))
default_transformer_kwargs = {"network" : network,
"euclidean_layer_idx" : -2,
"num_classes" : 10,
"optimizer" : keras.optimizers.Adam(3e-4)
}
default_voter_class = KNNClassificationVoter
default_voter_kwargs = {"k" : int(np.log2(len(train_x1)))}
default_decider_class = SimpleArgmaxAverage
progressive_learner = ProgressiveLearner(default_transformer_class = default_transformer_class,
default_transformer_kwargs = default_transformer_kwargs,
default_voter_class = default_voter_class,
default_voter_kwargs = default_voter_kwargs,
default_decider_class = default_decider_class)
progressive_learner.add_task(
X = train_x1,
y = train_y1,
transformer_voter_decider_split = [0.67, 0.33, 0],
decider_kwargs = {"classes" : np.unique(train_y1)}
)
progressive_learner.add_transformer(
X = tmp_data,
y = train_y2,
transformer_data_proportion = 1,
backward_task_ids = [0]
)
llf_task1=progressive_learner.predict(test_x, task_id=0)
llf_single_task=progressive_learner.predict(test_x, task_id=0, transformer_ids=[0])
errors[1] = errors[1]+(1 - np.mean(llf_task1 == test_y))
errors[0] = errors[0]+(1 - np.mean(llf_single_task == test_y))
errors = errors/reps
print("Errors For Angle {}: {}".format(angle, errors))
with open('rotation_results/angle_'+str(angle)+'_'+model+'.pickle', 'wb') as f:
pickle.dump(errors, f, protocol = 2)
def image_aug(pic, angle, centroid_x=23, centroid_y=23, win=16, scale=1.45):
im_sz = int(np.floor(pic.shape[0]*scale))
pic_ = np.uint8(np.zeros((im_sz,im_sz,3),dtype=int))
pic_[:,:,0] = ndimage.zoom(pic[:,:,0],scale)
pic_[:,:,1] = ndimage.zoom(pic[:,:,1],scale)
pic_[:,:,2] = ndimage.zoom(pic[:,:,2],scale)
image_aug = rotate(pic_, angle, resize=False)
#print(image_aug.shape)
image_aug_ = image_aug[centroid_x-win:centroid_x+win,centroid_y-win:centroid_y+win,:]
return img_as_ubyte(image_aug_)
### MAIN HYPERPARAMS ###
model = "dnn"
granularity = 2
reps = 4
########################
(X_train, y_train), (X_test, y_test) = keras.datasets.cifar100.load_data()
data_x = np.concatenate([X_train, X_test])
data_y = np.concatenate([y_train, y_test])
data_y = data_y[:, 0]
def perform_angle(angle):
LF_experiment(data_x, data_y, angle, model, granularity, reps=reps, ntrees=16, acorn=1)
if model == "dnn":
for angle_adder in range(30, 180, granularity * 4):
angles = angle_adder + np.arange(0, granularity * 4, granularity)
with Pool(4) as p:
p.map(perform_angle, angles)
elif model == "uf":
angles = np.arange(30,180,2)
Parallel(n_jobs=-1)(delayed(LF_experiment)(data_x, data_y, angle, model, granularity, reps=20, ntrees=16, acorn=1) for angle in angles)
|
[
"keras.layers.Conv2D",
"keras.layers.Dense",
"sys.path.append",
"scipy.ndimage.zoom",
"numpy.arange",
"keras.Sequential",
"numpy.mean",
"skimage.transform.rotate",
"numpy.where",
"keras.datasets.cifar100.load_data",
"skimage.util.img_as_ubyte",
"numpy.random.seed",
"numpy.concatenate",
"keras.optimizers.Adam",
"random.shuffle",
"keras.layers.Flatten",
"numpy.floor",
"progressive_learner.ProgressiveLearner",
"keras.layers.BatchNormalization",
"numpy.shape",
"pickle.dump",
"numpy.unique",
"joblib.Parallel",
"numpy.zeros",
"multiprocessing.Pool",
"joblib.delayed"
] |
[((638, 673), 'sys.path.append', 'sys.path.append', (['"""../../proglearn/"""'], {}), "('../../proglearn/')\n", (653, 673), False, 'import sys\n'), ((7056, 7091), 'keras.datasets.cifar100.load_data', 'keras.datasets.cifar100.load_data', ([], {}), '()\n', (7089, 7091), False, 'import keras\n'), ((7101, 7134), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_test]'], {}), '([X_train, X_test])\n', (7115, 7134), True, 'import numpy as np\n'), ((7144, 7177), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_test]'], {}), '([y_train, y_test])\n', (7158, 7177), True, 'import numpy as np\n'), ((2145, 2156), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2153, 2156), True, 'import numpy as np\n'), ((6589, 6622), 'scipy.ndimage.zoom', 'ndimage.zoom', (['pic[:, :, 0]', 'scale'], {}), '(pic[:, :, 0], scale)\n', (6601, 6622), False, 'from scipy import ndimage\n'), ((6639, 6672), 'scipy.ndimage.zoom', 'ndimage.zoom', (['pic[:, :, 1]', 'scale'], {}), '(pic[:, :, 1], scale)\n', (6651, 6672), False, 'from scipy import ndimage\n'), ((6688, 6721), 'scipy.ndimage.zoom', 'ndimage.zoom', (['pic[:, :, 2]', 'scale'], {}), '(pic[:, :, 2], scale)\n', (6700, 6721), False, 'from scipy import ndimage\n'), ((6736, 6769), 'skimage.transform.rotate', 'rotate', (['pic_', 'angle'], {'resize': '(False)'}), '(pic_, angle, resize=False)\n', (6742, 6769), False, 'from skimage.transform import rotate\n'), ((6900, 6924), 'skimage.util.img_as_ubyte', 'img_as_ubyte', (['image_aug_'], {}), '(image_aug_)\n', (6912, 6924), False, 'from skimage.util import img_as_ubyte\n'), ((1170, 1190), 'random.shuffle', 'random.shuffle', (['indx'], {}), '(indx)\n', (1184, 1190), False, 'import random\n'), ((2109, 2130), 'numpy.random.seed', 'np.random.seed', (['acorn'], {}), '(acorn)\n', (2123, 2130), True, 'import numpy as np\n'), ((2456, 2488), 'numpy.zeros', 'np.zeros', (['(32, 32, 3)'], {'dtype': 'int'}), '((32, 32, 3), dtype=int)\n', (2464, 2488), True, 'import numpy as np\n'), ((6352, 6386), 'pickle.dump', 'pickle.dump', (['errors', 'f'], {'protocol': '(2)'}), '(errors, f, protocol=2)\n', (6363, 6386), False, 'import pickle\n'), ((6483, 6513), 'numpy.floor', 'np.floor', (['(pic.shape[0] * scale)'], {}), '(pic.shape[0] * scale)\n', (6491, 6513), True, 'import numpy as np\n'), ((6533, 6571), 'numpy.zeros', 'np.zeros', (['(im_sz, im_sz, 3)'], {'dtype': 'int'}), '((im_sz, im_sz, 3), dtype=int)\n', (6541, 6571), True, 'import numpy as np\n'), ((7570, 7591), 'numpy.arange', 'np.arange', (['(30)', '(180)', '(2)'], {}), '(30, 180, 2)\n', (7579, 7591), True, 'import numpy as np\n'), ((1027, 1048), 'numpy.where', 'np.where', (['(data_y == u)'], {}), '(data_y == u)\n', (1035, 1048), True, 'import numpy as np\n'), ((1061, 1078), 'numpy.unique', 'np.unique', (['data_y'], {}), '(data_y)\n', (1070, 1078), True, 'import numpy as np\n'), ((1485, 1538), 'numpy.concatenate', 'np.concatenate', (['(train_x1, x[indx[0:250], :])'], {'axis': '(0)'}), '((train_x1, x[indx[0:250], :]), axis=0)\n', (1499, 1538), True, 'import numpy as np\n'), ((1561, 1616), 'numpy.concatenate', 'np.concatenate', (['(train_x2, x[indx[250:500], :])'], {'axis': '(0)'}), '((train_x2, x[indx[250:500], :]), axis=0)\n', (1575, 1616), True, 'import numpy as np\n'), ((1639, 1689), 'numpy.concatenate', 'np.concatenate', (['(train_y1, y[indx[0:250]])'], {'axis': '(0)'}), '((train_y1, y[indx[0:250]]), axis=0)\n', (1653, 1689), True, 'import numpy as np\n'), ((1713, 1765), 'numpy.concatenate', 'np.concatenate', (['(train_y2, y[indx[250:500]])'], {'axis': '(0)'}), '((train_y2, y[indx[250:500]]), axis=0)\n', (1727, 1765), True, 'import numpy as np\n'), ((1788, 1841), 'numpy.concatenate', 'np.concatenate', (['(test_x, x[indx[500:600], :])'], {'axis': '(0)'}), '((test_x, x[indx[500:600], :]), axis=0)\n', (1802, 1841), True, 'import numpy as np\n'), ((1862, 1912), 'numpy.concatenate', 'np.concatenate', (['(test_y, y[indx[500:600]])'], {'axis': '(0)'}), '((test_y, y[indx[500:600]]), axis=0)\n', (1876, 1912), True, 'import numpy as np\n'), ((3182, 3200), 'keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (3198, 3200), False, 'import keras\n'), ((4969, 5237), 'progressive_learner.ProgressiveLearner', 'ProgressiveLearner', ([], {'default_transformer_class': 'default_transformer_class', 'default_transformer_kwargs': 'default_transformer_kwargs', 'default_voter_class': 'default_voter_class', 'default_voter_kwargs': 'default_voter_kwargs', 'default_decider_class': 'default_decider_class'}), '(default_transformer_class=default_transformer_class,\n default_transformer_kwargs=default_transformer_kwargs,\n default_voter_class=default_voter_class, default_voter_kwargs=\n default_voter_kwargs, default_decider_class=default_decider_class)\n', (4987, 5237), False, 'from progressive_learner import ProgressiveLearner\n'), ((7426, 7468), 'numpy.arange', 'np.arange', (['(0)', '(granularity * 4)', 'granularity'], {}), '(0, granularity * 4, granularity)\n', (7435, 7468), True, 'import numpy as np\n'), ((7482, 7489), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (7486, 7489), False, 'from multiprocessing import Pool\n'), ((7594, 7613), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (7602, 7613), False, 'from joblib import Parallel, delayed\n'), ((3351, 3378), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3376, 3378), False, 'from keras import layers\n'), ((3404, 3499), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (3417, 3499), False, 'from keras import layers\n'), ((3525, 3552), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3550, 3552), False, 'from keras import layers\n'), ((3578, 3673), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (3591, 3673), False, 'from keras import layers\n'), ((3699, 3726), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3724, 3726), False, 'from keras import layers\n'), ((3752, 3848), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=128, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (3765, 3848), False, 'from keras import layers\n'), ((3874, 3901), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3899, 3901), False, 'from keras import layers\n'), ((3927, 4023), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(254)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=254, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (3940, 4023), False, 'from keras import layers\n'), ((4050, 4066), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (4064, 4066), False, 'from keras import layers\n'), ((4092, 4119), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4117, 4119), False, 'from keras import layers\n'), ((4145, 4182), 'keras.layers.Dense', 'layers.Dense', (['(2000)'], {'activation': '"""relu"""'}), "(2000, activation='relu')\n", (4157, 4182), False, 'from keras import layers\n'), ((4208, 4235), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4233, 4235), False, 'from keras import layers\n'), ((4261, 4298), 'keras.layers.Dense', 'layers.Dense', (['(2000)'], {'activation': '"""relu"""'}), "(2000, activation='relu')\n", (4273, 4298), False, 'from keras import layers\n'), ((4324, 4351), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4349, 4351), False, 'from keras import layers\n'), ((4377, 4421), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(10)', 'activation': '"""softmax"""'}), "(units=10, activation='softmax')\n", (4389, 4421), False, 'from keras import layers\n'), ((4677, 4706), 'keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.0003)'], {}), '(0.0003)\n', (4698, 4706), False, 'import keras\n'), ((6070, 6098), 'numpy.mean', 'np.mean', (['(llf_task1 == test_y)'], {}), '(llf_task1 == test_y)\n', (6077, 6098), True, 'import numpy as np\n'), ((6139, 6173), 'numpy.mean', 'np.mean', (['(llf_single_task == test_y)'], {}), '(llf_single_task == test_y)\n', (6146, 6173), True, 'import numpy as np\n'), ((7614, 7636), 'joblib.delayed', 'delayed', (['LF_experiment'], {}), '(LF_experiment)\n', (7621, 7636), False, 'from joblib import Parallel, delayed\n'), ((5615, 5634), 'numpy.unique', 'np.unique', (['train_y1'], {}), '(train_y1)\n', (5624, 5634), True, 'import numpy as np\n'), ((3302, 3320), 'numpy.shape', 'np.shape', (['train_x1'], {}), '(train_x1)\n', (3310, 3320), True, 'import numpy as np\n')]
|
# <NAME>
# S = 1/2, I = 1/2
# Spin 1/2 electron coupled to spin 1/2 nuclei
import numpy as np
from scipy.linalg import expm
from matplotlib.pylab import *
from matplotlib import cm
sigma_x = 0.5*np.r_[[[0, 1],[1, 0]]]
sigma_y = 0.5*np.r_[[[0,-1j],[1j, 0]]]
sigma_z = 0.5*np.r_[[[1, 0],[0, -1]]]
Identity = np.eye(2)
Sx = np.kron(sigma_x, Identity)
Sy = np.kron(sigma_y, Identity)
Sz = np.kron(sigma_z, Identity)
Ix = np.kron(Identity, sigma_x)
Iy = np.kron(Identity, sigma_y)
Iz = np.kron(Identity, sigma_z)
SxIx = np.kron(sigma_x,sigma_z)
SxIx2 = np.dot(Sx,Iz)
print(SxIx)
print(SxIx2)
print(np.allclose(SxIx,SxIx2))
omega_S = 1.76e11 # rad / (s * T)
omega_I = 267.522e6 # rad / (s * T)
Aiso = 2*np.pi * 50.e6 # Isotropic Hyperfine coupling rad / s
B0 = 0.35# T
H = omega_S/(2.*np.pi)*B0*Sz + omega_I/(2.*np.pi)*B0*Iz + Aiso * np.dot(Sz,Iz)
#H = omega_S/(2.*np.pi)*B0*Sz + omega_I/(2.*np.pi)*B0*Iz + Aiso * (np.dot(Sx,Ix) + np.dot(Sy,Iy) + np.dot(Sz,Iz))
print('Hamiltonian:')
print(H)
out = np.linalg.eig(H)
E = out[0]
print(E)
E12 = E[0] - E[1]
E34 = E[2] - E[3]
E13 = E[0] - E[2]
E24 = E[1] - E[3]
print(E12)
print(E34)
print(E13)
print(E24)
print('Nuclear')
print('%0.05f MHz'%(E12 / 1e6))
print('%0.05f MHz'%(E34 / 1e6))
print('Electron')
print('%0.05f GHz'%(E13 / 1e9))
print('%0.05f GHz'%(E24 / 1e9))
matshow(abs(H), cmap = cm.jet)
title('Hamiltonian')
show()
|
[
"numpy.eye",
"numpy.allclose",
"numpy.linalg.eig",
"numpy.kron",
"numpy.dot"
] |
[((308, 317), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (314, 317), True, 'import numpy as np\n'), ((325, 351), 'numpy.kron', 'np.kron', (['sigma_x', 'Identity'], {}), '(sigma_x, Identity)\n', (332, 351), True, 'import numpy as np\n'), ((357, 383), 'numpy.kron', 'np.kron', (['sigma_y', 'Identity'], {}), '(sigma_y, Identity)\n', (364, 383), True, 'import numpy as np\n'), ((389, 415), 'numpy.kron', 'np.kron', (['sigma_z', 'Identity'], {}), '(sigma_z, Identity)\n', (396, 415), True, 'import numpy as np\n'), ((422, 448), 'numpy.kron', 'np.kron', (['Identity', 'sigma_x'], {}), '(Identity, sigma_x)\n', (429, 448), True, 'import numpy as np\n'), ((454, 480), 'numpy.kron', 'np.kron', (['Identity', 'sigma_y'], {}), '(Identity, sigma_y)\n', (461, 480), True, 'import numpy as np\n'), ((486, 512), 'numpy.kron', 'np.kron', (['Identity', 'sigma_z'], {}), '(Identity, sigma_z)\n', (493, 512), True, 'import numpy as np\n'), ((521, 546), 'numpy.kron', 'np.kron', (['sigma_x', 'sigma_z'], {}), '(sigma_x, sigma_z)\n', (528, 546), True, 'import numpy as np\n'), ((555, 569), 'numpy.dot', 'np.dot', (['Sx', 'Iz'], {}), '(Sx, Iz)\n', (561, 569), True, 'import numpy as np\n'), ((1004, 1020), 'numpy.linalg.eig', 'np.linalg.eig', (['H'], {}), '(H)\n', (1017, 1020), True, 'import numpy as np\n'), ((601, 625), 'numpy.allclose', 'np.allclose', (['SxIx', 'SxIx2'], {}), '(SxIx, SxIx2)\n', (612, 625), True, 'import numpy as np\n'), ((838, 852), 'numpy.dot', 'np.dot', (['Sz', 'Iz'], {}), '(Sz, Iz)\n', (844, 852), True, 'import numpy as np\n')]
|
'''
Created on Mar 6, 2018
@author: cef
hp functions for workign with dictionaries
'''
import logging, os, sys, math, copy, inspect
from collections import OrderedDict
from weakref import WeakValueDictionary as wdict
import numpy as np
import hp.basic
mod_logger = logging.getLogger(__name__) #creates a child logger of the root
def dict_2_logr(dict, logger= mod_logger): #log each value of the dictionary to fille
logger = logger.getChild('dict_2_logr')
msg = '\n'
for key, value in dict.iteritems():
msg = msg + ' key: %s\n value: %s \n'%(key, value)
logger.debug(msg)
def key_list(d, #return the intersection of the dict.keys() and the key_list
key_list, logger = mod_logger):
logger = logger.getChild('key_list')
#===========================================================================
# pre check
#===========================================================================
bool_list = hp.basic.bool_list_in_list(d.keys(), key_list)
if not bool_list.any(): raise IOError #check if any are not found
#===========================================================================
# build the found values
#===========================================================================
values_fnd_list = []
for key, value in d.iteritems():
if key in key_list: values_fnd_list.append(value)
return values_fnd_list
def build_nones_dict(key_list, logger=mod_logger): #add 'None' values to the passed keys
val_list = np.full((1, len(key_list)), None)
dict = dict(zip(key_list, val_list))
return dict
def merge_two_dicts(x, y):
if x is None: return y
if y is None: return x
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def value_by_ksearch(ksearch_str, d, #get the entry that matches the search str
logger=mod_logger, *search_args):
#===========================================================================
# take a shot at a perfect match
#===========================================================================
try:
return d[ksearch_str]
except:
#find a match for this key
k_fnd = hp.basic.list_search(d.keys(), ksearch_str, *search_args)
if k_fnd is None:
logger = logger.getChild('value_by_ksearch')
logger.debug('could not find \'%s\' in %i dict keys. returning None'%(ksearch_str, len(d)))
return None
else:
return d[k_fnd]
def merge(dl, dr, #intelligent dictionary merging
set_type = 'intersect',
method = 'exact',
container = dict(),
logger = mod_logger, *search_args):
if set_type == 'union':
if method == 'exact':
d_merge = merge_two_dicts(dl, dr, logger=logger)
else:
raise IOError #todo
elif set_type == 'intersect':
d_merge = subset(dl, dr.keys(), set_type = set_type,
method=method, container=container, logger=logger, *search_args)
else: raise IOError
logger.debug('got d_merge %i'%len(d_merge))
return container(d_merge)
def subset_pfx(d_big, prefix, logger=mod_logger):
#===========================================================================
# shortcuts
#===========================================================================
if len(d_big) == 0: return dict()
#===========================================================================
# defaults
#===========================================================================
logger = logger.getChild('subset_pfx')
d = copy.copy(d_big)
fnd_d = dict()
for k, v in d.iteritems():
if k.startswith(prefix):
fnd_d[k] = v
logger.debug('found %i entries with prefix \'%s\' \n'%(len(fnd_d), prefix))
return fnd_d
def subset(d_big, l, #get a dictionary subset using standard user inputs
#ordered = False, using containers instead
set_type = 'sub',
method = 'exact',
container = dict,
logger = mod_logger,
*search_args):
"""
#===========================================================================
# INPUTS
#===========================================================================
l: list of keys (within d_big) on which to erturn the sutset
set_type: how to treat the set
intersect: returna dictionary with only the common keys
sub: raise a flag if not every item in 'l' is found in d_big.keys()
method: what type of key search to perform (re.function)
search: look for a key in the dictionary that contains the list entry.
returned d is keyed by the list
"""
logger = logger.getChild('subset')
#===========================================================================
# setup[]
#==========================================================================
d = container()
"""
#dictionary setup
if ordered: d = OrderedDict()
else: d = dict()"""
#input list setup
if isinstance(l, list): pass
elif isinstance(l, basestring): l = [l]
elif l is None: return d
else: raise IOError
nofnd_l = []
#===========================================================================
# determine subset by kwarg
#===========================================================================
for k in l:
try: #attempt teh direct match
d[k] = d_big[k]
except:
#===================================================================
# try again using search functions
#===================================================================
try:
if method == 'search':
#search and return this value
v = value_by_ksearch(k, d_big, logger=logger, *search_args)
if not v is None:
d[k] = v
continue #not sure this is needed
else: raise ValueError
else: raise ValueError
#===================================================================
# nothing found. proceed based on set_type
#===================================================================
except:
logger.debug('unable to find \'%s\' in the dict with method \'%s\''%(k, method))
if set_type == 'sub':
boolar = hp.basic.bool_list_in_list(d_big.keys(), l)
if not np.all(boolar):
logger.error('%i entries in list not found in big_d'%(len(l) - boolar.sum()))
raise IOError
elif set_type == 'intersect': nofnd_l.append(k)
else: raise IOError
#===========================================================================
# wrap up
#===========================================================================
if len(nofnd_l) >0:
logger.debug('%i of %i list entries DO NOT intersect: %s'%(len(nofnd_l), len(l), nofnd_l))
if set_type == 'sub': raise IOError
#===========================================================================
# check
#===========================================================================
if len(d) == 0:
logger.warning('0 common values between d(%i) and l(%i)'%(len(d), len(l)))
logger.debug('returning d with %i entries: %s \n'%(len(d), d.keys()))
return container(d)
#===============================================================================
# def subset(d_big, l, #get a dictionary subset using standard user inputs
# ordered = False, set_type = 'sub', search = 'search',
# logger = mod_logger):
# """
# #===========================================================================
# # INPUTS
# #===========================================================================
# l: list of keys (within d_big) on which to erturn the sutset
#
# set_type: how to treat the set
# intersect: returna dictionary with only the common keys
# sub: raise a flag if not every item in 'l' is found in d_big.keys()
#
# search: what type of key search to perform (re.function)
# """
# logger = logger.getChild('subset')
#
# #===========================================================================
# # setup[]
# #==========================================================================
# #dictionary setup
# if ordered: d = OrderedDict()
# else: d = dict()
#
# #input list setup
# if isinstance(l, list): pass
# elif isinstance(l, basestring): l = [l]
# elif l is None: return None
# else: raise IOError
#
# #===========================================================================
# # determine subset by kwarg
# #===========================================================================
# if set_type == 'sub':
# try:
# for k in l:
# d[k] = d_big[k]
#
# except:
# boolar = hp.basic.bool_list_in_list(d_big.keys(), l)
#
# if not np.all(boolar):
# logger.error('%i entries in list not found in big_d'%(len(l) - boolar.sum()))
#
# raise IOError
#
# if len(d) == 0: raise IOError
#
# elif set_type == 'intersect':
# nofnd_l = []
# for k in l:
# try:
# d[k] = d_big[k]
# except:
# nofnd_l.append(k)
#
# if len(nofnd_l) >0:
# logger.debug('%i of %i list entries DO NOT intersect: %s'%(len(nofnd_l), len(l), nofnd_l))
#
# #===========================================================================
# # check
# #===========================================================================
# if len(d) == 0: logger.warning('0 common values between d(%i) and l(%i)'%
# (len(d), len(l)))
#
# return d
#===============================================================================
class deepcopier():
tries = 0 #keep track of the loop
def __init__(self,obj, logger=mod_logger):
self.logger = logger.getChild('deepcopier')
self.copy_o = obj
def tryit(self, obj=None): #make as deep a copy as possible
if obj is None: obj = self.copy_o
#===========================================================================
# simple try
#===========================================================================
try:
copy_o = copy.deepcopy(obj)
return copy_o
except:
self.logger.debug('failed first attempt')
self.tries += 1
#=======================================================================
# sophisiticated try
#=======================================================================
self.logger.debug('copy attempt %i'%self.tries)
if self.tries > 10: return self.copy_o
#try for each element of the dict
if isinstance(obj, dict):
new_d = dict()
for key, value in obj.iteritems():
try:
new_d[key] = self.tryit(obj = value)
except:
new_d[key] = copy.copy(obj)
self.logger.debug('returning new_d with %i entries: %s'%(len(new_d), new_d.keys()))
else: raise IOError
return new_d
from collections import OrderedDict
class MyOrderedDict(OrderedDict):
"""
as there is no builtin method to add to the head of an ordered dict,
here we add a method
https://stackoverflow.com/questions/16664874/how-can-i-add-an-element-at-the-top-of-an-ordereddict-in-python
"""
def prepend(self, key, value, dict_setitem=dict.__setitem__):
"""add entry to the front of myself"""
root = self._OrderedDict__root
first = root[1]
if key in self:
link = self._OrderedDict__map[key]
link_prev, link_next, _ = link
link_prev[1] = link_next
link_next[0] = link_prev
link[0] = root
link[1] = first
root[1] = first[0] = link
else:
root[1] = first[0] = self._OrderedDict__map[key] = [root, first, key]
dict_setitem(self, key, value)
|
[
"logging.getLogger",
"numpy.all",
"copy.copy",
"copy.deepcopy"
] |
[((291, 318), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (308, 318), False, 'import logging, os, sys, math, copy, inspect\n'), ((4046, 4062), 'copy.copy', 'copy.copy', (['d_big'], {}), '(d_big)\n', (4055, 4062), False, 'import logging, os, sys, math, copy, inspect\n'), ((11902, 11920), 'copy.deepcopy', 'copy.deepcopy', (['obj'], {}), '(obj)\n', (11915, 11920), False, 'import logging, os, sys, math, copy, inspect\n'), ((12697, 12711), 'copy.copy', 'copy.copy', (['obj'], {}), '(obj)\n', (12706, 12711), False, 'import logging, os, sys, math, copy, inspect\n'), ((7354, 7368), 'numpy.all', 'np.all', (['boolar'], {}), '(boolar)\n', (7360, 7368), True, 'import numpy as np\n')]
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras import backend as K
from keras import activations
def get_standard_values():
'''
These are just a set of floats used for testing the activation
functions, and are useful in multiple tests.
'''
return np.array([[0, 0.1, 0.5, 0.9, 1.0]], dtype=K.floatx())
def test_softmax():
'''
Test using a reference implementation of softmax
'''
def softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softmax(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softmax(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_time_distributed_softmax():
x = K.placeholder(shape=(1, 1, 5))
f = K.function([x], [activations.softmax(x)])
test_values = get_standard_values()
test_values = np.reshape(test_values, (1, 1, np.size(test_values)))
f([test_values])[0]
def test_softplus():
'''
Test using a reference softplus implementation
'''
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softplus(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softplus(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_softsign():
'''
Test using a reference softsign implementation
'''
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softsign(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softsign(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_sigmoid():
'''
Test using a numerically stable reference sigmoid implementation
'''
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_hard_sigmoid():
'''
Test using a reference hard sigmoid implementation
'''
def ref_hard_sigmoid(x):
'''
Reference hard sigmoid with slope and shift values from theano, see
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py
'''
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.hard_sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.relu(x)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
def test_elu():
x = K.placeholder(ndim=2)
f = K.function([x], [activations.elu(x, 0.5)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=K.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
assert_allclose(result, true_result)
def test_tanh():
test_values = get_standard_values()
x = K.placeholder(ndim=2)
exp = activations.tanh(x)
f = K.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_linear():
'''
This function does no input validation, it just returns the thing
that was passed in.
'''
xs = [1, 5, True, None, 'foo']
for x in xs:
assert(x == activations.linear(x))
if __name__ == '__main__':
pytest.main([__file__])
|
[
"keras.activations.linear",
"keras.backend.floatx",
"keras.activations.hard_sigmoid",
"numpy.testing.assert_allclose",
"keras.backend.placeholder",
"numpy.tanh",
"pytest.main",
"numpy.max",
"numpy.exp",
"keras.activations.softmax",
"keras.activations.sigmoid",
"numpy.size",
"keras.activations.softsign",
"numpy.vectorize",
"numpy.ones_like",
"keras.activations.relu",
"numpy.absolute",
"keras.activations.softplus",
"keras.activations.tanh",
"numpy.sum",
"keras.activations.elu",
"keras.backend.function"
] |
[((575, 596), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (588, 596), True, 'from keras import backend as K\n'), ((761, 806), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (776, 806), False, 'from numpy.testing import assert_allclose\n'), ((854, 884), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(1, 1, 5)'}), '(shape=(1, 1, 5))\n', (867, 884), True, 'from keras import backend as K\n'), ((1242, 1263), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (1255, 1263), True, 'from keras import backend as K\n'), ((1430, 1475), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (1445, 1475), False, 'from numpy.testing import assert_allclose\n'), ((1658, 1679), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (1671, 1679), True, 'from keras import backend as K\n'), ((1846, 1891), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (1861, 1891), False, 'from numpy.testing import assert_allclose\n'), ((2167, 2192), 'numpy.vectorize', 'np.vectorize', (['ref_sigmoid'], {}), '(ref_sigmoid)\n', (2179, 2192), True, 'import numpy as np\n'), ((2202, 2223), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (2215, 2223), True, 'from keras import backend as K\n'), ((2388, 2433), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (2403, 2433), False, 'from numpy.testing import assert_allclose\n'), ((2859, 2889), 'numpy.vectorize', 'np.vectorize', (['ref_hard_sigmoid'], {}), '(ref_hard_sigmoid)\n', (2871, 2889), True, 'import numpy as np\n'), ((2899, 2920), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (2912, 2920), True, 'from keras import backend as K\n'), ((3095, 3140), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (3110, 3140), False, 'from numpy.testing import assert_allclose\n'), ((3306, 3327), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (3319, 3327), True, 'from keras import backend as K\n'), ((3496, 3544), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'test_values'], {'rtol': '(1e-05)'}), '(result, test_values, rtol=1e-05)\n', (3511, 3544), False, 'from numpy.testing import assert_allclose\n'), ((3571, 3592), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (3584, 3592), True, 'from keras import backend as K\n'), ((3765, 3813), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'test_values'], {'rtol': '(1e-05)'}), '(result, test_values, rtol=1e-05)\n', (3780, 3813), False, 'from numpy.testing import assert_allclose\n'), ((3970, 4006), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'true_result'], {}), '(result, true_result)\n', (3985, 4006), False, 'from numpy.testing import assert_allclose\n'), ((4075, 4096), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (4088, 4096), True, 'from keras import backend as K\n'), ((4107, 4126), 'keras.activations.tanh', 'activations.tanh', (['x'], {}), '(x)\n', (4123, 4126), False, 'from keras import activations\n'), ((4135, 4157), 'keras.backend.function', 'K.function', (['[x]', '[exp]'], {}), '([x], [exp])\n', (4145, 4157), True, 'from keras import backend as K\n'), ((4207, 4227), 'numpy.tanh', 'np.tanh', (['test_values'], {}), '(test_values)\n', (4214, 4227), True, 'import numpy as np\n'), ((4232, 4277), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (4247, 4277), False, 'from numpy.testing import assert_allclose\n'), ((4537, 4560), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (4548, 4560), False, 'import pytest\n'), ((491, 505), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (497, 505), True, 'import numpy as np\n'), ((518, 536), 'numpy.exp', 'np.exp', (['(values - m)'], {}), '(values - m)\n', (524, 536), True, 'import numpy as np\n'), ((351, 361), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (359, 361), True, 'from keras import backend as K\n'), ((556, 565), 'numpy.sum', 'np.sum', (['e'], {}), '(e)\n', (562, 565), True, 'import numpy as np\n'), ((622, 644), 'keras.activations.softmax', 'activations.softmax', (['x'], {}), '(x)\n', (641, 644), False, 'from keras import activations\n'), ((910, 932), 'keras.activations.softmax', 'activations.softmax', (['x'], {}), '(x)\n', (929, 932), False, 'from keras import activations\n'), ((1024, 1044), 'numpy.size', 'np.size', (['test_values'], {}), '(test_values)\n', (1031, 1044), True, 'import numpy as np\n'), ((1289, 1312), 'keras.activations.softplus', 'activations.softplus', (['x'], {}), '(x)\n', (1309, 1312), False, 'from keras import activations\n'), ((1705, 1728), 'keras.activations.softsign', 'activations.softsign', (['x'], {}), '(x)\n', (1725, 1728), False, 'from keras import activations\n'), ((2112, 2121), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2118, 2121), True, 'import numpy as np\n'), ((2249, 2271), 'keras.activations.sigmoid', 'activations.sigmoid', (['x'], {}), '(x)\n', (2268, 2271), False, 'from keras import activations\n'), ((2946, 2973), 'keras.activations.hard_sigmoid', 'activations.hard_sigmoid', (['x'], {}), '(x)\n', (2970, 2973), False, 'from keras import activations\n'), ((3353, 3372), 'keras.activations.relu', 'activations.relu', (['x'], {}), '(x)\n', (3369, 3372), False, 'from keras import activations\n'), ((3618, 3641), 'keras.activations.elu', 'activations.elu', (['x', '(0.5)'], {}), '(x, 0.5)\n', (3633, 3641), False, 'from keras import activations\n'), ((3864, 3874), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3872, 3874), True, 'from keras import backend as K\n'), ((3932, 3955), 'numpy.exp', 'np.exp', (['negative_values'], {}), '(negative_values)\n', (3938, 3955), True, 'import numpy as np\n'), ((4481, 4502), 'keras.activations.linear', 'activations.linear', (['x'], {}), '(x)\n', (4499, 4502), False, 'from keras import activations\n'), ((1204, 1219), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1216, 1219), True, 'import numpy as np\n'), ((1222, 1231), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1228, 1231), True, 'import numpy as np\n'), ((1615, 1630), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1627, 1630), True, 'import numpy as np\n'), ((1633, 1647), 'numpy.absolute', 'np.absolute', (['x'], {}), '(x)\n', (1644, 1647), True, 'import numpy as np\n'), ((2070, 2080), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (2076, 2080), True, 'import numpy as np\n')]
|
##############################################################################
#
# Copyright (c) 2003-2020 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
# Development from 2019 by School of Earth and Environmental Sciences
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2020 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
transformations
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
:var DEG: unit of degree
:var RAD: unit of radiant
"""
__author__="<NAME>, <EMAIL>"
import numpy
import math
_TYPE=numpy.float64
DEG=math.pi/180.
RAD=1.
class Transformation(object):
"""
General class to define an affine transformation *x->Ax+b*.
"""
def __init__(self):
"""
Creates a linear transformation.
"""
pass
def __call__(self,x=numpy.zeros((3,))):
"""
Applies transformation to ``x``.
"""
raise NotImplementeError()
class Translation(Transformation):
"""
Defines a translation *x->x+b*.
"""
def __init__(self,b=numpy.zeros((3,),dtype=_TYPE)):
"""
Creates the linear transformation *x->x+b*.
"""
super(Translation, self).__init__()
self.__b=numpy.array(b,_TYPE)
def __call__(self,x=numpy.zeros((3,))):
"""
Applies translation to ``x``.
"""
return numpy.array(x,_TYPE)+self.__b
class Rotatation(Transformation):
"""
Defines a rotation.
"""
def __init__(self,axis=numpy.ones((3,),dtype=_TYPE),point=numpy.zeros((3,),dtype=_TYPE),angle=0.*RAD):
"""
Creates a rotation using an axis and a point on the axis.
"""
self.__axis=numpy.array(axis,dtype=_TYPE)
self.__point=numpy.array(point,dtype=_TYPE)
lax=numpy.dot(self.__axis,self.__axis)
if not lax>0:
raise ValueError("points must be distinct.")
self.__axis/=math.sqrt(lax)
self.__angle=float(angle)
def __call__(self,x=numpy.zeros((3,))):
"""
Applies the rotation to ``x``.
"""
x=numpy.array(x,_TYPE)
z=x-self.__point
z0=numpy.dot(z,self.__axis)
z_per=z-z0*self.__axis
lz_per=numpy.dot(z_per,z_per)
if lz_per>0:
axis1=z_per/math.sqrt(lz_per)
axis2=_cross(axis1,self.__axis)
lax2=numpy.dot(axis2,axis2)
if lax2>0:
axis2/=math.sqrt(lax2)
return z0*self.__axis+math.sqrt(lz_per)*(math.cos(self.__angle)*axis1-math.sin(self.__angle)*axis2)+self.__point
else:
return x
else:
return x
def _cross(x, y):
"""
Returns the cross product of ``x`` and ``y``.
"""
return numpy.array([x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y[1] - x[1] * y[0]], _TYPE)
class Dilation(Transformation):
"""
Defines a dilation.
"""
def __init__(self,factor=1.,center=numpy.zeros((3,),dtype=_TYPE)):
"""
Creates a dilation with a center and a given expansion/contraction
factor.
"""
if not abs(factor)>0:
raise ValueError("factor must be non-zero.")
self.__factor=factor
self.__center=numpy.array(center,dtype=_TYPE)
def __call__(self,x=numpy.zeros((3,))):
"""
Applies dilation to ``x``.
"""
x=numpy.array(x,_TYPE)
return self.__factor*(x-self.__center)+self.__center
class Reflection(Transformation):
"""
Defines a reflection on a plane.
"""
def __init__(self,normal=numpy.ones((3,),dtype=_TYPE),offset=0.):
"""
Defines a reflection on a plane defined in normal form.
"""
self.__normal=numpy.array(normal,dtype=_TYPE)
ln=math.sqrt(numpy.dot(self.__normal,self.__normal))
if not ln>0.:
raise ValueError("normal must have positive length.")
self.__normal/=ln
if isinstance(offset,float) or isinstance(offset,int):
self.__offset=offset/ln
else:
self.__offset=numpy.dot(numpy.array(offset,dtype=_TYPE),self.__normal)
def __call__(self,x=numpy.zeros((3,))):
"""
Applies reflection to ``x``.
"""
x=numpy.array(x,_TYPE)
return x - 2*(numpy.dot(x,self.__normal)-self.__offset)*self.__normal
|
[
"numpy.ones",
"math.sqrt",
"math.cos",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"math.sin"
] |
[((3470, 3576), 'numpy.array', 'numpy.array', (['[x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y[1] - x[1] *\n y[0]]', '_TYPE'], {}), '([x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y\n [1] - x[1] * y[0]], _TYPE)\n', (3481, 3576), False, 'import numpy\n'), ((1609, 1626), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (1620, 1626), False, 'import numpy\n'), ((1837, 1867), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (1848, 1867), False, 'import numpy\n'), ((2001, 2022), 'numpy.array', 'numpy.array', (['b', '_TYPE'], {}), '(b, _TYPE)\n', (2012, 2022), False, 'import numpy\n'), ((2047, 2064), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (2058, 2064), False, 'import numpy\n'), ((2272, 2301), 'numpy.ones', 'numpy.ones', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (2282, 2301), False, 'import numpy\n'), ((2307, 2337), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (2318, 2337), False, 'import numpy\n'), ((2458, 2488), 'numpy.array', 'numpy.array', (['axis'], {'dtype': '_TYPE'}), '(axis, dtype=_TYPE)\n', (2469, 2488), False, 'import numpy\n'), ((2508, 2539), 'numpy.array', 'numpy.array', (['point'], {'dtype': '_TYPE'}), '(point, dtype=_TYPE)\n', (2519, 2539), False, 'import numpy\n'), ((2550, 2585), 'numpy.dot', 'numpy.dot', (['self.__axis', 'self.__axis'], {}), '(self.__axis, self.__axis)\n', (2559, 2585), False, 'import numpy\n'), ((2681, 2695), 'math.sqrt', 'math.sqrt', (['lax'], {}), '(lax)\n', (2690, 2695), False, 'import math\n'), ((2754, 2771), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (2765, 2771), False, 'import numpy\n'), ((2843, 2864), 'numpy.array', 'numpy.array', (['x', '_TYPE'], {}), '(x, _TYPE)\n', (2854, 2864), False, 'import numpy\n'), ((2898, 2923), 'numpy.dot', 'numpy.dot', (['z', 'self.__axis'], {}), '(z, self.__axis)\n', (2907, 2923), False, 'import numpy\n'), ((2967, 2990), 'numpy.dot', 'numpy.dot', (['z_per', 'z_per'], {}), '(z_per, z_per)\n', (2976, 2990), False, 'import numpy\n'), ((3684, 3714), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (3695, 3714), False, 'import numpy\n'), ((3960, 3992), 'numpy.array', 'numpy.array', (['center'], {'dtype': '_TYPE'}), '(center, dtype=_TYPE)\n', (3971, 3992), False, 'import numpy\n'), ((4017, 4034), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (4028, 4034), False, 'import numpy\n'), ((4102, 4123), 'numpy.array', 'numpy.array', (['x', '_TYPE'], {}), '(x, _TYPE)\n', (4113, 4123), False, 'import numpy\n'), ((4300, 4329), 'numpy.ones', 'numpy.ones', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (4310, 4329), False, 'import numpy\n'), ((4447, 4479), 'numpy.array', 'numpy.array', (['normal'], {'dtype': '_TYPE'}), '(normal, dtype=_TYPE)\n', (4458, 4479), False, 'import numpy\n'), ((4864, 4881), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (4875, 4881), False, 'import numpy\n'), ((4951, 4972), 'numpy.array', 'numpy.array', (['x', '_TYPE'], {}), '(x, _TYPE)\n', (4962, 4972), False, 'import numpy\n'), ((2140, 2161), 'numpy.array', 'numpy.array', (['x', '_TYPE'], {}), '(x, _TYPE)\n', (2151, 2161), False, 'import numpy\n'), ((3104, 3127), 'numpy.dot', 'numpy.dot', (['axis2', 'axis2'], {}), '(axis2, axis2)\n', (3113, 3127), False, 'import numpy\n'), ((4499, 4538), 'numpy.dot', 'numpy.dot', (['self.__normal', 'self.__normal'], {}), '(self.__normal, self.__normal)\n', (4508, 4538), False, 'import numpy\n'), ((3031, 3048), 'math.sqrt', 'math.sqrt', (['lz_per'], {}), '(lz_per)\n', (3040, 3048), False, 'import math\n'), ((3166, 3181), 'math.sqrt', 'math.sqrt', (['lax2'], {}), '(lax2)\n', (3175, 3181), False, 'import math\n'), ((4792, 4824), 'numpy.array', 'numpy.array', (['offset'], {'dtype': '_TYPE'}), '(offset, dtype=_TYPE)\n', (4803, 4824), False, 'import numpy\n'), ((4993, 5020), 'numpy.dot', 'numpy.dot', (['x', 'self.__normal'], {}), '(x, self.__normal)\n', (5002, 5020), False, 'import numpy\n'), ((3216, 3233), 'math.sqrt', 'math.sqrt', (['lz_per'], {}), '(lz_per)\n', (3225, 3233), False, 'import math\n'), ((3235, 3257), 'math.cos', 'math.cos', (['self.__angle'], {}), '(self.__angle)\n', (3243, 3257), False, 'import math\n'), ((3264, 3286), 'math.sin', 'math.sin', (['self.__angle'], {}), '(self.__angle)\n', (3272, 3286), False, 'import math\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script run neural network model on a camera live stream
"""
import argparse
import cv2
import numpy as np
import os
import time
import sys
COMMANDS = {0: "move_forward", 1: "go_down", 2: "rot_10_deg",
3: "go_up", 4: "take_off", 5: "land", 6: "idle"}
def send_command(anafi, command_id):
"""
Function to send commands to an Anafi drone in function of the command id
"""
if command_id not in COMMANDS:
raise f"Command id not in COMMANDS choices: {command_id}"
print("The following command will be sent: ", COMMANDS[command_id])
if COMMANDS[command_id] == "move_forward":
anafi.move_relative(dx=1, dy=0, dz=0, dradians=0)
if COMMANDS[command_id] == "go_down":
anafi.move_relative(dx=0, dy=0, dz=-0.5, dradians=0)
if COMMANDS[command_id] == "rot_10_deg":
anafi.move_relative(dx=0, dy=0, dz=0, dradians=0.785)
if COMMANDS[command_id] == "go_up":
anafi.move_relative(dx=0, dy=0, dz=0.5, dradians=0)
if COMMANDS[command_id] == "take_off":
anafi.safe_takeoff(5)
if COMMANDS[command_id] == "land":
anafi.safe_land(5)
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--weight_path",
required=True,
type=str,
help="Path to load weights for the model."
)
parser.add_argument(
"-a",
"--pyparrot_path",
required=True,
type=str,
help="Path to pyparrot module downloaded from amymcgovern on github."
)
parser.add_argument(
"-w",
"--img_width",
required=False,
default=28,
type=int,
help="Image width."
)
parser.add_argument(
"-n",
"--num_classes",
required=False,
default=7,
type=int,
help="Number of classes."
)
parser.add_argument(
"-c",
"--crop",
required=False,
default=None,
type=str,
help="Crop image, format: MinWidth,MaxWidth,MinHeight,MaxHeight.\
Set -1 for the unchanged ones"
)
parser.add_argument(
"-r",
"--resize",
required=False,
default=None,
type=str,
help="Resize shape, format: height,width"
)
parser.add_argument(
"-b",
"--binarize",
required=False,
default=None,
type=str,
help="To binarize images, format for thresholding: min,max"
)
parser.add_argument(
"-g",
"--gray",
required=False,
action="store_true",
help="To save 1-channel images"
)
parser.add_argument(
"-e",
"--erode",
required=False,
default=None,
type=str,
help="Erode option, format: kernel_size,iteration"
)
parser.add_argument(
"-d",
"--dilate",
required=False,
default=None,
type=str,
help="Dilate option, format: kernel_size,iteration"
)
parser.add_argument(
"-m",
"--camid",
required=False,
default=0,
type=int,
help="Camera ID, default is 0"
)
parser.add_argument(
"-t",
"--tensorflow",
required=False,
action="store_true",
help="To specify if Tensorflow model is used."
)
parser.add_argument(
"-z",
"--number_of_confimation",
required=False,
default=3,
type=int,
help="Minimum number of identical commands before sending to drone."
)
args = parser.parse_args()
"""
Drone connection
"""
sys.path.append(args.pyparrot_path)
from pyparrot.Anafi import Anafi
print("Connecting to drone...")
anafi = Anafi(drone_type="Anafi", ip_address="192.168.42.1")
success = anafi.connect(10)
print(success)
print("Sleeping few seconds...")
anafi.smart_sleep(3)
"""
Load model
"""
print("Loading model...")
input_size = args.img_width**2
num_class = args.num_classes
hidden_size = 128
if args.tensorflow:
import tensorflow as tf
model = tf.keras.models.load_model(args.weight_path)
else:
script_path = os.path.realpath(__file__)
sys.path.append(os.path.dirname(script_path) + "/../")
from homemade_framework import framework as NN
model = NN.Sequential([NN.Linear(input_size, hidden_size),
NN.LeakyReLU(), NN.BatchNorm(),
NN.Linear(hidden_size, hidden_size),
NN.LeakyReLU(), NN.BatchNorm(),
NN.Linear(hidden_size, num_class),
NN.Softmax()], NN.LossMSE())
model.load(args.weight_path)
"""
Webcam process
"""
print("Start webcam...")
cam = cv2.VideoCapture(args.camid)
ret, frame = cam.read()
min_height, max_height = 0, frame.shape[0]
min_width, max_width = 0, frame.shape[1]
print("Cam resolution: {}x{}".format(max_width, max_height))
if args.crop is not None:
res = [int(x) for x in args.crop.split(',')]
if res[0] != -1:
min_width = res[0]
if res[1] != -1:
max_width = res[1]
if res[2] != -1:
min_height = res[2]
if res[3] != -1:
max_height = res[3]
print("Image cropped to minWidth:maxWidth, minHeight:maxHeight: {}:{}\
, {},{}".format(min_width, max_width, min_height, max_height))
pause = False
imgs = []
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
if args.crop is not None:
frame = frame[min_height:max_height, min_width:max_width]
cv2.imshow("Original image", frame)
k = cv2.waitKey(1)
if k % 256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k % 256 == ord('p'):
# p pressed
if pause:
pause = False
else:
pause = True
if not pause:
if args.gray:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if args.binarize:
frame = cv2.medianBlur(frame, 5)
min_thresh, max_thresh = [int(x) for x in
args.binarize.split(',')]
ret, frame = cv2.threshold(frame, min_thresh, max_thresh,
cv2.THRESH_BINARY)
if args.erode is not None:
k_size, iteration = [int(x) for x in args.erode.split(',')]
kernel = np.ones((k_size, k_size), np.uint8)
frame = cv2.erode(frame, kernel, iterations=int(iteration))
if args.dilate is not None:
k_size, iteration = [int(x) for x in args.dilate.split(',')]
kernel = np.ones((k_size, k_size), np.uint8)
frame = cv2.dilate(frame, kernel, iterations=int(iteration))
if args.resize:
height, width = [int(size) for size in args.resize.split(',')]
frame = cv2.resize(frame, (height, width),
interpolation=cv2.INTER_AREA)
image = np.asarray(frame)/255.
cv2.imshow("Input image for the model", frame)
image = image.reshape([np.prod(image.shape)])
if len(imgs) < args.number_of_confimation:
imgs.append(image)
else:
if args.tensorflow:
results = np.argmax(model(np.asarray(imgs)), axis=1)
else:
results = NN.get_inferences(model, np.asarray(imgs))
print("Model's output on buffer: ", results)
if np.unique(results).size == 1 and\
COMMANDS[results[0]] != "idle":
send_command(anafi, results[0])
imgs = []
imgs = imgs[1:]
imgs.append(image)
time.sleep(0.3)
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
[
"numpy.prod",
"time.sleep",
"cv2.imshow",
"homemade_framework.framework.LossMSE",
"homemade_framework.framework.Softmax",
"cv2.destroyAllWindows",
"tensorflow.keras.models.load_model",
"sys.path.append",
"homemade_framework.framework.LeakyReLU",
"argparse.ArgumentParser",
"cv2.threshold",
"numpy.asarray",
"cv2.medianBlur",
"homemade_framework.framework.Linear",
"cv2.waitKey",
"numpy.ones",
"pyparrot.Anafi.Anafi",
"os.path.dirname",
"cv2.cvtColor",
"cv2.resize",
"numpy.unique",
"homemade_framework.framework.BatchNorm",
"os.path.realpath",
"cv2.VideoCapture"
] |
[((1221, 1246), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1244, 1246), False, 'import argparse\n'), ((3712, 3747), 'sys.path.append', 'sys.path.append', (['args.pyparrot_path'], {}), '(args.pyparrot_path)\n', (3727, 3747), False, 'import sys\n'), ((3833, 3885), 'pyparrot.Anafi.Anafi', 'Anafi', ([], {'drone_type': '"""Anafi"""', 'ip_address': '"""192.168.42.1"""'}), "(drone_type='Anafi', ip_address='192.168.42.1')\n", (3838, 3885), False, 'from pyparrot.Anafi import Anafi\n'), ((4945, 4973), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.camid'], {}), '(args.camid)\n', (4961, 4973), False, 'import cv2\n'), ((8269, 8292), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8290, 8292), False, 'import cv2\n'), ((4224, 4268), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['args.weight_path'], {}), '(args.weight_path)\n', (4250, 4268), True, 'import tensorflow as tf\n'), ((4301, 4327), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4317, 4327), False, 'import os\n'), ((5898, 5933), 'cv2.imshow', 'cv2.imshow', (['"""Original image"""', 'frame'], {}), "('Original image', frame)\n", (5908, 5933), False, 'import cv2\n'), ((5947, 5961), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5958, 5961), False, 'import cv2\n'), ((4819, 4831), 'homemade_framework.framework.LossMSE', 'NN.LossMSE', ([], {}), '()\n', (4829, 4831), True, 'from homemade_framework import framework as NN\n'), ((7482, 7528), 'cv2.imshow', 'cv2.imshow', (['"""Input image for the model"""', 'frame'], {}), "('Input image for the model', frame)\n", (7492, 7528), False, 'import cv2\n'), ((8230, 8245), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (8240, 8245), False, 'import time\n'), ((4352, 4380), 'os.path.dirname', 'os.path.dirname', (['script_path'], {}), '(script_path)\n', (4367, 4380), False, 'import os\n'), ((4477, 4511), 'homemade_framework.framework.Linear', 'NN.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (4486, 4511), True, 'from homemade_framework import framework as NN\n'), ((4544, 4558), 'homemade_framework.framework.LeakyReLU', 'NN.LeakyReLU', ([], {}), '()\n', (4556, 4558), True, 'from homemade_framework import framework as NN\n'), ((4560, 4574), 'homemade_framework.framework.BatchNorm', 'NN.BatchNorm', ([], {}), '()\n', (4572, 4574), True, 'from homemade_framework import framework as NN\n'), ((4607, 4642), 'homemade_framework.framework.Linear', 'NN.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (4616, 4642), True, 'from homemade_framework import framework as NN\n'), ((4675, 4689), 'homemade_framework.framework.LeakyReLU', 'NN.LeakyReLU', ([], {}), '()\n', (4687, 4689), True, 'from homemade_framework import framework as NN\n'), ((4691, 4705), 'homemade_framework.framework.BatchNorm', 'NN.BatchNorm', ([], {}), '()\n', (4703, 4705), True, 'from homemade_framework import framework as NN\n'), ((4738, 4771), 'homemade_framework.framework.Linear', 'NN.Linear', (['hidden_size', 'num_class'], {}), '(hidden_size, num_class)\n', (4747, 4771), True, 'from homemade_framework import framework as NN\n'), ((4804, 4816), 'homemade_framework.framework.Softmax', 'NN.Softmax', ([], {}), '()\n', (4814, 4816), True, 'from homemade_framework import framework as NN\n'), ((6306, 6345), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (6318, 6345), False, 'import cv2\n'), ((6400, 6424), 'cv2.medianBlur', 'cv2.medianBlur', (['frame', '(5)'], {}), '(frame, 5)\n', (6414, 6424), False, 'import cv2\n'), ((6580, 6643), 'cv2.threshold', 'cv2.threshold', (['frame', 'min_thresh', 'max_thresh', 'cv2.THRESH_BINARY'], {}), '(frame, min_thresh, max_thresh, cv2.THRESH_BINARY)\n', (6593, 6643), False, 'import cv2\n'), ((6827, 6862), 'numpy.ones', 'np.ones', (['(k_size, k_size)', 'np.uint8'], {}), '((k_size, k_size), np.uint8)\n', (6834, 6862), True, 'import numpy as np\n'), ((7081, 7116), 'numpy.ones', 'np.ones', (['(k_size, k_size)', 'np.uint8'], {}), '((k_size, k_size), np.uint8)\n', (7088, 7116), True, 'import numpy as np\n'), ((7326, 7390), 'cv2.resize', 'cv2.resize', (['frame', '(height, width)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (height, width), interpolation=cv2.INTER_AREA)\n', (7336, 7390), False, 'import cv2\n'), ((7447, 7464), 'numpy.asarray', 'np.asarray', (['frame'], {}), '(frame)\n', (7457, 7464), True, 'import numpy as np\n'), ((7564, 7584), 'numpy.prod', 'np.prod', (['image.shape'], {}), '(image.shape)\n', (7571, 7584), True, 'import numpy as np\n'), ((7881, 7897), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (7891, 7897), True, 'import numpy as np\n'), ((7777, 7793), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (7787, 7793), True, 'import numpy as np\n'), ((7979, 7997), 'numpy.unique', 'np.unique', (['results'], {}), '(results)\n', (7988, 7997), True, 'import numpy as np\n')]
|
import numpy as np
from hypernet.src.general import const
from hypernet.src.general import utils
from hypernet.src.thermophysicalModels.reactionThermo.mixture import Basic
class MultiComponent(Basic):
# Initialization
###########################################################################
def __init__(
self,
specieThermos,
*args,
**kwargs
):
super(MultiComponent, self).__init__(specieThermos)
# Methods
###########################################################################
# Mixture properties ------------------------------------------------------
def update(self, XY, var='Y'):
# Update mass/molar fractions
for name, value in XY.items():
value = utils.check_XY(utils.convert_to_array(value))
setattr(self.spTh[name].specie, var, value)
# Update mixture/species properties
self.M = self.M_(var=var)
if var == 'Y':
self.Xi_()
elif var == 'X':
self.Yi_()
self.R = self.R_()
# Mixture properties ------------------------------------------------------
# Mass
def M_(self, var='Y'):
# [kg/mol]
if var == 'Y':
M = [spTh.specie.Y / spTh.specie.M for spTh in self.spTh.values()]
return 1./np.sum(np.concatenate(M))
elif var == 'X':
M = [spTh.specie.X * spTh.specie.M for spTh in self.spTh.values()]
return np.sum(np.concatenate(M))
# Specific gas constant
def R_(self):
R = [spTh.specie.Y * spTh.specie.R for spTh in self.spTh.values()]
return np.sum(np.concatenate(R))
# Pressure
def p_(self, rho, T):
return rho*self.R*T
# Density
def rho_(self, p, T):
return p/(self.R*T)
# Number density
def n_(self, rho):
self.ni_(rho=rho, var='Y')
n = [spTh.specie.n for spTh in self.spTh.values()]
return np.sum(np.concatenate(n))
# Enthalpy/Energy
def he_(self):
# [J/kg]
he = [spTh.specie.Y * spTh.thermo.he for spTh in self.spTh.values()]
return np.sum(np.concatenate(he))
def cpv_(self):
# [J/(kg K)]
cpv = [spTh.specie.Y * spTh.thermo.cpv for spTh in self.spTh.values()]
return np.sum(np.concatenate(cpv))
def dcpvdT_(self):
# [J/kg]
dcpvdT = [
spTh.specie.Y * spTh.thermo.dcpvdT for spTh in self.spTh.values()
]
return np.sum(np.concatenate(dcpvdT))
def dhedY_(self, dY):
# [J/kg]
dhedY = [
np.sum(dY[name] * spTh.thermo.he) \
for name, spTh in self.spTh.items()
]
return np.sum(dhedY)
# Species properties ------------------------------------------------------
def Yi_(self):
for spTh_ in self.spTh.values():
sp = spTh_.specie
sp.Y = sp.X * sp.M / self.M
def Xi_(self):
for spTh_ in self.spTh.values():
sp = spTh_.specie
sp.X = sp.Y * self.M / sp.M
def ni_(self, rho=None, n=None, var='Y'):
for spTh_ in self.spTh.values():
sp = spTh_.specie
if var == 'Y':
sp.n = sp.Y * rho / sp.M * const.UNA
elif var == 'X':
sp.n = sp.X * n
def rhoi_(self, rho=None, n=None, var='Y'):
for spTh_ in self.spTh.values():
sp = spTh_.specie
if var == 'Y':
sp.rho = sp.Y * rho
elif var == 'X':
sp.rho = sp.X * n * sp.M / const.UNA
|
[
"hypernet.src.general.utils.convert_to_array",
"numpy.sum",
"numpy.concatenate"
] |
[((2710, 2723), 'numpy.sum', 'np.sum', (['dhedY'], {}), '(dhedY)\n', (2716, 2723), True, 'import numpy as np\n'), ((1649, 1666), 'numpy.concatenate', 'np.concatenate', (['R'], {}), '(R)\n', (1663, 1666), True, 'import numpy as np\n'), ((1968, 1985), 'numpy.concatenate', 'np.concatenate', (['n'], {}), '(n)\n', (1982, 1985), True, 'import numpy as np\n'), ((2145, 2163), 'numpy.concatenate', 'np.concatenate', (['he'], {}), '(he)\n', (2159, 2163), True, 'import numpy as np\n'), ((2308, 2327), 'numpy.concatenate', 'np.concatenate', (['cpv'], {}), '(cpv)\n', (2322, 2327), True, 'import numpy as np\n'), ((2499, 2521), 'numpy.concatenate', 'np.concatenate', (['dcpvdT'], {}), '(dcpvdT)\n', (2513, 2521), True, 'import numpy as np\n'), ((2597, 2630), 'numpy.sum', 'np.sum', (['(dY[name] * spTh.thermo.he)'], {}), '(dY[name] * spTh.thermo.he)\n', (2603, 2630), True, 'import numpy as np\n'), ((782, 811), 'hypernet.src.general.utils.convert_to_array', 'utils.convert_to_array', (['value'], {}), '(value)\n', (804, 811), False, 'from hypernet.src.general import utils\n'), ((1337, 1354), 'numpy.concatenate', 'np.concatenate', (['M'], {}), '(M)\n', (1351, 1354), True, 'import numpy as np\n'), ((1486, 1503), 'numpy.concatenate', 'np.concatenate', (['M'], {}), '(M)\n', (1500, 1503), True, 'import numpy as np\n')]
|
from __future__ import print_function
from scipy.linalg import block_diag
from scipy.stats import norm as ndist
from scipy.interpolate import interp1d
import collections
import numpy as np
from numpy import log
from numpy.linalg import norm, qr, inv, eig
import pandas as pd
import regreg.api as rr
from .randomization import randomization
from ..base import restricted_estimator
from ..algorithms.barrier_affine import solve_barrier_affine_py as solver
from ..distributions.discrete_family import discrete_family
class group_lasso(object):
def __init__(self,
loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso=True, # should lasso solver be used where applicable - defaults to True
perturb=None):
_check_groups(groups) # make sure groups looks sensible
# log likelihood : quadratic loss
self.loglike = loglike
self.nfeature = self.loglike.shape[0]
# ridge parameter
self.ridge_term = ridge_term
# group lasso penalty (from regreg)
# use regular lasso penalty if all groups are size 1
if use_lasso and groups.size == np.unique(groups).size:
# need to provide weights an an np.array rather than a dictionary
weights_np = np.array([w[1] for w in sorted(weights.items())])
self.penalty = rr.weighted_l1norm(weights=weights_np,
lagrange=1.)
else:
self.penalty = rr.group_lasso(groups,
weights=weights,
lagrange=1.)
# store groups as a class variable since the non-group lasso doesn't
self.groups = groups
self._initial_omega = perturb
# gaussian randomization
self.randomizer = randomizer
def fit(self,
solve_args={'tol': 1.e-12, 'min_its': 50},
perturb=None):
# solve the randomized version of group lasso
(self.initial_soln,
self.initial_subgrad) = self._solve_randomized_problem(perturb=perturb,
solve_args=solve_args)
# initialize variables
active_groups = [] # active group labels
active_dirs = {} # dictionary: keys are group labels, values are unit-norm coefficients
unpenalized = [] # selected groups with no penalty
overall = np.ones(self.nfeature, np.bool) # mask of active features
ordered_groups = [] # active group labels sorted by label
ordered_opt = [] # gamma's ordered by group labels
ordered_vars = [] # indices "ordered" by sorting group labels
tol = 1.e-20
_, self.randomizer_prec = self.randomizer.cov_prec
# now we are collecting the directions and norms of the active groups
for g in sorted(np.unique(self.groups)): # g is group label
group_mask = self.groups == g
soln = self.initial_soln # do not need to keep setting this
if norm(soln[group_mask]) > tol * norm(soln): # is group g appreciably nonzero
ordered_groups.append(g)
# variables in active group
ordered_vars.extend(np.flatnonzero(group_mask))
if self.penalty.weights[g] == 0:
unpenalized.append(g)
else:
active_groups.append(g)
active_dirs[g] = soln[group_mask] / norm(soln[group_mask])
ordered_opt.append(norm(soln[group_mask]))
else:
overall[group_mask] = False
self.selection_variable = {'directions': active_dirs,
'active_groups': active_groups} # kind of redundant with keys of active_dirs
self._ordered_groups = ordered_groups
# exception if no groups are selected
if len(self.selection_variable['active_groups']) == 0:
return np.sign(soln), soln
# otherwise continue as before
self.observed_opt_state = np.hstack(ordered_opt) # gammas as array
_beta_unpenalized = restricted_estimator(self.loglike, # refit OLS on E
overall,
solve_args=solve_args)
beta_bar = np.zeros(self.nfeature)
beta_bar[overall] = _beta_unpenalized # refit OLS beta with zeros
self._beta_full = beta_bar
X, y = self.loglike.data
W = self._W = self.loglike.saturated_loss.hessian(X.dot(beta_bar)) # all 1's for LS
opt_linearNoU = np.dot(X.T, X[:, ordered_vars] * W[:, np.newaxis])
for i, var in enumerate(ordered_vars):
opt_linearNoU[var, i] += self.ridge_term
opt_offset = self.initial_subgrad
self.observed_score_state = -opt_linearNoU.dot(_beta_unpenalized)
self.observed_score_state[~overall] += self.loglike.smooth_objective(beta_bar, 'grad')[~overall]
active_signs = np.sign(self.initial_soln)
active = np.flatnonzero(active_signs)
self.active = active
def compute_Vg(ug):
pg = ug.size # figure out size of g'th group
if pg > 1:
Z = np.column_stack((ug, np.eye(pg, pg - 1)))
Q, _ = qr(Z)
Vg = Q[:, 1:] # drop the first column
else:
Vg = np.zeros((1, 0)) # if the group is size one, the orthogonal complement is empty
return Vg
def compute_Lg(g):
pg = active_dirs[g].size
Lg = self.penalty.weights[g] * np.eye(pg)
return Lg
sorted_active_dirs = collections.OrderedDict(sorted(active_dirs.items()))
Vs = [compute_Vg(ug) for ug in sorted_active_dirs.values()]
V = block_diag(*Vs) # unpack the list
Ls = [compute_Lg(g) for g in sorted_active_dirs]
L = block_diag(*Ls) # unpack the list
XE = X[:, ordered_vars] # changed to ordered_vars
Q = XE.T.dot(self._W[:, None] * XE)
QI = inv(Q)
C = V.T.dot(QI).dot(L).dot(V)
self.XE = XE
self.Q = Q
self.QI = QI
self.C = C
U = block_diag(*[ug for ug in sorted_active_dirs.values()]).T
self.opt_linear = opt_linearNoU.dot(U)
self.active_dirs = active_dirs
self.opt_offset = opt_offset
self.ordered_vars = ordered_vars
self.linear_part = -np.eye(self.observed_opt_state.shape[0])
self.offset = np.zeros(self.observed_opt_state.shape[0])
return active_signs, soln
def _solve_randomized_problem(self,
perturb=None,
solve_args={'tol': 1.e-15, 'min_its': 100}):
# take a new perturbation if supplied
if perturb is not None:
self._initial_omega = perturb
if self._initial_omega is None:
self._initial_omega = self.randomizer.sample()
quad = rr.identity_quadratic(self.ridge_term,
0,
-self._initial_omega,
0)
problem = rr.simple_problem(self.loglike, self.penalty)
# if all groups are size 1, set up lasso penalty and run usual lasso solver... (see existing code)...
initial_soln = problem.solve(quad, **solve_args)
initial_subgrad = -(self.loglike.smooth_objective(initial_soln,
'grad') +
quad.objective(initial_soln, 'grad'))
return initial_soln, initial_subgrad
@staticmethod
def gaussian(X,
Y,
groups,
weights,
sigma=1.,
quadratic=None,
ridge_term=0.,
perturb=None,
use_lasso=True, # should lasso solver be used when applicable - defaults to True
randomizer_scale=None):
loglike = rr.glm.gaussian(X, Y, coef=1. / sigma ** 2, quadratic=quadratic)
n, p = X.shape
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(Y) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.std(Y) * np.sqrt(n / (n - 1.))
randomizer = randomization.isotropic_gaussian((p,), randomizer_scale)
return group_lasso(loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso,
perturb)
def _setup_implied_gaussian(self):
_, prec = self.randomizer.cov_prec
if np.asarray(prec).shape in [(), (0,)]:
cond_precision = self.opt_linear.T.dot(self.opt_linear) * prec
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T) * prec
else:
cond_precision = self.opt_linear.T.dot(prec.dot(self.opt_linear))
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T).dot(prec)
cond_mean = -logdens_linear.dot(self.observed_score_state + self.opt_offset)
self.cond_mean = cond_mean
self.cond_cov = cond_cov
self.cond_precision = cond_precision
self.logdens_linear = logdens_linear
return cond_mean, cond_cov, cond_precision, logdens_linear
def selective_MLE(self,
solve_args={'tol': 1.e-12},
level=0.9,
useJacobian=True,
dispersion=None):
"""Do selective_MLE for group_lasso
Note: this masks the selective_MLE inherited from query
because that is not adapted for the group_lasso. Also, assumes
you have already run the fit method since this uses results
from that method.
Parameters
----------
observed_target: from selected_targets
target_cov: from selected_targets
target_cov_score: from selected_targets
init_soln: (opt_state) initial (observed) value of optimization variables
cond_mean: conditional mean of optimization variables (model on _setup_implied_gaussian)
cond_cov: conditional variance of optimization variables (model on _setup_implied_gaussian)
logdens_linear: (model on _setup_implied_gaussian)
linear_part: like A_scaling (from lasso)
offset: like b_scaling (from lasso)
solve_args: passed on to solver
level: level of confidence intervals
useC: whether to use python or C solver
JacobianPieces: (use self.C defined in fitting)
"""
self._setup_implied_gaussian() # Calculate useful quantities
(observed_target, target_cov, target_score_cov, alternatives) = self.selected_targets(dispersion)
init_soln = self.observed_opt_state # just the gammas
cond_mean = self.cond_mean
cond_cov = self.cond_cov
logdens_linear = self.logdens_linear
linear_part = self.linear_part
offset = self.offset
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
observed_target = np.atleast_1d(observed_target)
prec_target = inv(target_cov)
prec_opt = self.cond_precision
score_offset = self.observed_score_state + self.opt_offset
# target_lin determines how the conditional mean of optimization variables
# vary with target
# logdens_linear determines how the argument of the optimization density
# depends on the score, not how the mean depends on score, hence the minus sign
target_linear = target_score_cov.T.dot(prec_target)
target_offset = score_offset - target_linear.dot(observed_target)
target_lin = - logdens_linear.dot(target_linear)
target_off = cond_mean - target_lin.dot(observed_target)
if np.asarray(self.randomizer_prec).shape in [(), (0,)]:
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
prec_opt).dot(
target_lin)
else:
_P = target_linear.T.dot(self.randomizer_prec).dot(target_offset)
_prec = prec_target + (target_linear.T.dot(self.randomizer_prec).dot(target_linear)) - target_lin.T.dot(
prec_opt).dot(target_lin)
C = target_cov.dot(_P - target_lin.T.dot(prec_opt).dot(target_off))
conjugate_arg = prec_opt.dot(cond_mean)
val, soln, hess = solve_barrier_affine_jacobian_py(conjugate_arg,
prec_opt,
init_soln,
linear_part,
offset,
self.C,
self.active_dirs,
useJacobian,
**solve_args)
final_estimator = target_cov.dot(_prec).dot(observed_target) \
+ target_cov.dot(target_lin.T.dot(prec_opt.dot(cond_mean - soln))) + C
unbiased_estimator = target_cov.dot(_prec).dot(observed_target) + target_cov.dot(
_P - target_lin.T.dot(prec_opt).dot(target_off))
L = target_lin.T.dot(prec_opt)
observed_info_natural = _prec + L.dot(target_lin) - L.dot(hess.dot(L.T))
observed_info_mean = target_cov.dot(observed_info_natural.dot(target_cov))
Z_scores = final_estimator / np.sqrt(np.diag(observed_info_mean))
pvalues = ndist.cdf(Z_scores)
pvalues = 2 * np.minimum(pvalues, 1 - pvalues)
alpha = 1 - level
quantile = ndist.ppf(1 - alpha / 2.)
intervals = np.vstack([final_estimator -
quantile * np.sqrt(np.diag(observed_info_mean)),
final_estimator +
quantile * np.sqrt(np.diag(observed_info_mean))]).T
log_ref = val + conjugate_arg.T.dot(cond_cov).dot(conjugate_arg) / 2.
result = pd.DataFrame({'MLE': final_estimator,
'SE': np.sqrt(np.diag(observed_info_mean)),
'Zvalue': Z_scores,
'pvalue': pvalues,
'lower_confidence': intervals[:, 0],
'upper_confidence': intervals[:, 1],
'unbiased': unbiased_estimator})
return result, observed_info_mean, log_ref
def selected_targets(self,
dispersion=None,
solve_args={'tol': 1.e-12, 'min_its': 50}):
X, y = self.loglike.data
n, p = X.shape
XE = self.XE
Q = self.Q
observed_target = restricted_estimator(self.loglike, self.ordered_vars, solve_args=solve_args)
_score_linear = -XE.T.dot(self._W[:, None] * X).T
alternatives = ['twosided'] * len(self.active)
if dispersion is None: # use Pearson's X^2
dispersion = ((y - self.loglike.saturated_loss.mean_function(
XE.dot(observed_target))) ** 2 / self._W).sum() / (n - XE.shape[1])
cov_target = self.QI * dispersion
crosscov_target_score = _score_linear.dot(self.QI).T * dispersion
return (observed_target,
cov_target,
crosscov_target_score,
alternatives)
class approximate_grid_inference(object):
def __init__(self,
query,
dispersion,
solve_args={'tol': 1.e-12},
useIP=True):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
query : `gaussian_query`
A Gaussian query which has information
to describe implied Gaussian.
observed_target : ndarray
Observed estimate of target.
target_cov : ndarray
Estimated covaraince of target.
target_score_cov : ndarray
Estimated covariance of target and score of randomized query.
solve_args : dict, optional
Arguments passed to solver.
"""
self.solve_args = solve_args
result, inverse_info = query.selective_MLE(dispersion=dispersion)[:2]
self.linear_part = query.linear_part
self.offset = query.offset
self.logdens_linear = query.logdens_linear
self.cond_mean = query.cond_mean
self.prec_opt = np.linalg.inv(query.cond_cov)
self.cond_cov = query.cond_cov
self.C = query.C
self.active_dirs = query.active_dirs
(observed_target, target_cov, target_score_cov, alternatives) = query.selected_targets(dispersion)
self.observed_target = observed_target
self.target_score_cov = target_score_cov
self.target_cov = target_cov
self.init_soln = query.observed_opt_state
self.randomizer_prec = query.randomizer_prec
self.score_offset = query.observed_score_state + query.opt_offset
self.ntarget = ntarget = target_cov.shape[0]
_scale = 4 * np.sqrt(np.diag(inverse_info))
if useIP == False:
ngrid = 1000
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
else:
ngrid = 100
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
self.opt_linear = query.opt_linear
self.useIP = useIP
def summary(self,
alternatives=None,
parameter=None,
level=0.9):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
alternatives : [str], optional
Sequence of strings describing the alternatives,
should be values of ['twosided', 'less', 'greater']
parameter : np.array
Hypothesized value for parameter -- defaults to 0.
level : float
Confidence level.
"""
if parameter is not None:
pivots = self._approx_pivots(parameter,
alternatives=alternatives)
else:
pivots = None
pvalues = self._approx_pivots(np.zeros_like(self.observed_target),
alternatives=alternatives)
lower, upper = self._approx_intervals(level=level)
result = pd.DataFrame({'target': self.observed_target,
'pvalue': pvalues,
'lower_confidence': lower,
'upper_confidence': upper})
if not np.all(parameter == 0):
result.insert(4, 'pivot', pivots)
result.insert(5, 'parameter', parameter)
return result
def log_reference(self,
observed_target,
target_cov,
target_score_cov,
grid):
"""
Approximate the log of the reference density on a grid.
"""
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
prec_target = np.linalg.inv(target_cov)
target_lin = - self.logdens_linear.dot(target_score_cov.T.dot(prec_target))
ref_hat = []
for k in range(grid.shape[0]):
# in the usual D = N + Gamma theta.hat,
# target_lin is "something" times Gamma,
# where "something" comes from implied Gaussian
# cond_mean is "something" times D
# Gamma is target_score_cov.T.dot(prec_target)
num_opt = self.prec_opt.shape[0]
num_con = self.linear_part.shape[0]
cond_mean_grid = (target_lin.dot(np.atleast_1d(grid[k] - observed_target)) +
self.cond_mean)
#direction for decomposing o
eta = -self.prec_opt.dot(self.logdens_linear.dot(target_score_cov.T))
implied_mean = np.asscalar(eta.T.dot(cond_mean_grid))
implied_cov = np.asscalar(eta.T.dot(self.cond_cov).dot(eta))
implied_prec = 1./implied_cov
_A = self.cond_cov.dot(eta) * implied_prec
R = np.identity(num_opt) - _A.dot(eta.T)
A = self.linear_part.dot(_A).reshape((-1,))
b = self.offset-self.linear_part.dot(R).dot(self.init_soln)
conjugate_arg = implied_mean * implied_prec
val, soln, _ = solver(np.asarray([conjugate_arg]),
np.reshape(implied_prec, (1,1)),
eta.T.dot(self.init_soln),
A.reshape((A.shape[0],1)),
b,
**self.solve_args)
gamma_ = _A.dot(soln) + R.dot(self.init_soln)
log_jacob = jacobian_grad_hess(gamma_, self.C, self.active_dirs)
ref_hat.append(-val - ((conjugate_arg ** 2) * implied_cov)/ 2. + log_jacob[0])
return np.asarray(ref_hat)
def _construct_families(self):
self._construct_density()
self._families = []
for m in range(self.ntarget):
p = self.target_score_cov.shape[1]
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
var_target = 1. / ((self.precs[m])[0, 0])
log_ref = self.log_reference(observed_target_uni,
target_cov_uni,
target_score_cov_uni,
self.stat_grid[m])
if self.useIP == False:
logW = (log_ref - 0.5 * (self.stat_grid[m] - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(self.stat_grid[m],
np.exp(logW)))
else:
approx_fn = interp1d(self.stat_grid[m],
log_ref,
kind='quadratic',
bounds_error=False,
fill_value='extrapolate')
grid = np.linspace(self.stat_grid[m].min(), self.stat_grid[m].max(), 1000)
logW = (approx_fn(grid) -
0.5 * (grid - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(grid,
np.exp(logW)))
def _approx_pivots(self,
mean_parameter,
alternatives=None):
if not hasattr(self, "_families"):
self._construct_families()
if alternatives is None:
alternatives = ['twosided'] * self.ntarget
pivot = []
for m in range(self.ntarget):
family = self._families[m]
var_target = 1. / ((self.precs[m])[0, 0])
mean = self.S[m].dot(mean_parameter[m].reshape((1,))) + self.r[m]
_cdf = family.cdf((mean[0] - self.observed_target[m]) / var_target, x=self.observed_target[m])
print("variable completed ", m)
if alternatives[m] == 'twosided':
pivot.append(2 * min(_cdf, 1 - _cdf))
elif alternatives[m] == 'greater':
pivot.append(1 - _cdf)
elif alternatives[m] == 'less':
pivot.append(_cdf)
else:
raise ValueError('alternative should be in ["twosided", "less", "greater"]')
return pivot
def _approx_intervals(self,
level=0.9):
if not hasattr(self, "_families"):
self._construct_families()
lower, upper = [], []
for m in range(self.ntarget):
# construction of intervals from families follows `selectinf.learning.core`
family = self._families[m]
observed_target = self.observed_target[m]
l, u = family.equal_tailed_interval(observed_target,
alpha=1 - level)
var_target = 1. / ((self.precs[m])[0, 0])
lower.append(l * var_target + observed_target)
upper.append(u * var_target + observed_target)
return np.asarray(lower), np.asarray(upper)
### Private method
def _construct_density(self):
precs = {}
S = {}
r = {}
p = self.target_score_cov.shape[1]
for m in range(self.ntarget):
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
prec_target = 1. / target_cov_uni
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
target_linear = target_score_cov_uni.T.dot(prec_target)
target_offset = (self.score_offset - target_linear.dot(observed_target_uni)).reshape(
(target_linear.shape[0],))
target_lin = -self.logdens_linear.dot(target_linear)
target_off = (self.cond_mean - target_lin.dot(observed_target_uni)).reshape((target_lin.shape[0],))
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
self.prec_opt).dot(target_lin)
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_r = (1. / _prec).dot(target_lin.T.dot(self.prec_opt).dot(target_off) - _P)
_S = np.linalg.inv(_prec).dot(prec_target)
S[m] = _S
r[m] = _r
precs[m] = _prec
self.precs = precs
self.S = S
self.r = r
def solve_barrier_affine_jacobian_py(conjugate_arg,
precision,
feasible_point,
con_linear,
con_offset,
C,
active_dirs,
useJacobian=True,
step=1,
nstep=2000,
min_its=500,
tol=1.e-12):
"""
This needs to be updated to actually use the Jacobian information (in self.C)
arguments
conjugate_arg: \\bar{\\Sigma}^{-1} \bar{\\mu}
precision: \\bar{\\Sigma}^{-1}
feasible_point: gamma's from fitting
con_linear: linear part of affine constraint used for barrier function
con_offset: offset part of affine constraint used for barrier function
C: V^T Q^{-1} \\Lambda V
active_dirs:
"""
scaling = np.sqrt(np.diag(con_linear.dot(precision).dot(con_linear.T)))
if feasible_point is None:
feasible_point = 1. / scaling
def objective(gs):
p1 = -gs.T.dot(conjugate_arg)
p2 = gs.T.dot(precision).dot(gs) / 2.
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[0]
else:
p3 = 0
p4 = log(1. + 1. / ((con_offset - con_linear.dot(gs)) / scaling)).sum()
return p1 + p2 + p3 + p4
def grad(gs):
p1 = -conjugate_arg + precision.dot(gs)
p2 = -con_linear.T.dot(1. / (scaling + con_offset - con_linear.dot(gs)))
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[1]
else:
p3 = 0
p4 = 1. / (con_offset - con_linear.dot(gs))
return p1 + p2 + p3 + p4
def barrier_hessian(gs): # contribution of barrier and jacobian to hessian
p1 = con_linear.T.dot(np.diag(-1. / ((scaling + con_offset - con_linear.dot(gs)) ** 2.)
+ 1. / ((con_offset - con_linear.dot(gs)) ** 2.))).dot(con_linear)
if useJacobian:
p2 = - jacobian_grad_hess(gs, C, active_dirs)[2]
else:
p2 = 0
return p1 + p2
current = feasible_point
current_value = np.inf
for itercount in range(nstep):
cur_grad = grad(current)
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * cur_grad
if np.all(con_offset - con_linear.dot(proposal) > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
count += 1
proposal = current - step * cur_grad
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
if count >= 20:
if not (np.isnan(proposed_value) or np.isnan(current_value)):
break
else:
raise ValueError('value is NaN: %f, %f' % (proposed_value, current_value))
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value) and itercount >= min_its:
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
hess = inv(precision + barrier_hessian(current))
return current_value, current, hess
# Jacobian calculations
def calc_GammaMinus(gamma, active_dirs):
"""Calculate Gamma^minus (as a function of gamma vector, active directions)
"""
to_diag = [[g] * (ug.size - 1) for (g, ug) in zip(gamma, active_dirs.values())]
return block_diag(*[i for gp in to_diag for i in gp])
def jacobian_grad_hess(gamma, C, active_dirs):
""" Calculate the log-Jacobian (scalar), gradient (gamma.size vector) and hessian (gamma.size square matrix)
"""
if C.shape == (0, 0): # when all groups are size one, C will be an empty array
return 0, 0, 0
else:
GammaMinus = calc_GammaMinus(gamma, active_dirs)
# eigendecomposition
#evalues, evectors = eig(GammaMinus + C)
# log Jacobian
#J = log(evalues).sum()
J = np.log(np.linalg.det(GammaMinus + C))
# inverse
#GpC_inv = evectors.dot(np.diag(1 / evalues).dot(evectors.T))
GpC_inv = np.linalg.inv(GammaMinus + C)
# summing matrix (gamma.size by C.shape[0])
S = block_diag(*[np.ones((1, ug.size - 1)) for ug in active_dirs.values()])
# gradient
grad_J = S.dot(GpC_inv.diagonal())
# hessian
hess_J = -S.dot(np.multiply(GpC_inv, GpC_inv.T).dot(S.T))
return J, grad_J, hess_J
def _check_groups(groups):
"""Make sure that the user-specific groups are ok
There are a number of assumptions that group_lasso makes about
how groups are specified. Specifically, we assume that
`groups` is a 1-d array_like of integers that are sorted in
increasing order, start at 0, and have no gaps (e.g., if there
is a group 2 and a group 4, there must also be at least one
feature in group 3).
This function checks the user-specified group scheme and
raises an exception if it finds any problems.
Sorting feature groups is potentially tedious for the user and
in future we might do this for them.
"""
# check array_like
agroups = np.array(groups)
# check dimension
if len(agroups.shape) != 1:
raise ValueError("Groups are not a 1D array_like")
# check sorted
if np.any(agroups[:-1] > agroups[1:]) < 0:
raise ValueError("Groups are not sorted")
# check integers
if not np.issubdtype(agroups.dtype, np.integer):
raise TypeError("Groups are not integers")
# check starts with 0
if not np.amin(agroups) == 0:
raise ValueError("First group is not 0")
# check for no skipped groups
if not np.all(np.diff(np.unique(agroups)) == 1):
raise ValueError("Some group is skipped")
|
[
"regreg.api.simple_problem",
"numpy.sqrt",
"numpy.hstack",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.linalg.norm",
"scipy.stats.norm.cdf",
"numpy.multiply",
"numpy.linalg.qr",
"numpy.reshape",
"regreg.api.identity_quadratic",
"regreg.api.group_lasso",
"numpy.flatnonzero",
"numpy.asarray",
"regreg.api.weighted_l1norm",
"numpy.exp",
"numpy.issubdtype",
"numpy.dot",
"regreg.api.glm.gaussian",
"numpy.linspace",
"pandas.DataFrame",
"numpy.identity",
"numpy.eye",
"numpy.all",
"numpy.ones",
"numpy.amin",
"scipy.stats.norm.ppf",
"numpy.any",
"numpy.isnan",
"numpy.sign",
"numpy.std",
"numpy.atleast_1d",
"numpy.fabs",
"numpy.unique",
"numpy.minimum",
"numpy.linalg.det",
"numpy.diag",
"numpy.zeros",
"numpy.linalg.inv",
"scipy.linalg.block_diag",
"numpy.zeros_like"
] |
[((31360, 31406), 'scipy.linalg.block_diag', 'block_diag', (['*[i for gp in to_diag for i in gp]'], {}), '(*[i for gp in to_diag for i in gp])\n', (31370, 31406), False, 'from scipy.linalg import block_diag\n'), ((33085, 33101), 'numpy.array', 'np.array', (['groups'], {}), '(groups)\n', (33093, 33101), True, 'import numpy as np\n'), ((2537, 2568), 'numpy.ones', 'np.ones', (['self.nfeature', 'np.bool'], {}), '(self.nfeature, np.bool)\n', (2544, 2568), True, 'import numpy as np\n'), ((4189, 4211), 'numpy.hstack', 'np.hstack', (['ordered_opt'], {}), '(ordered_opt)\n', (4198, 4211), True, 'import numpy as np\n'), ((4463, 4486), 'numpy.zeros', 'np.zeros', (['self.nfeature'], {}), '(self.nfeature)\n', (4471, 4486), True, 'import numpy as np\n'), ((4748, 4798), 'numpy.dot', 'np.dot', (['X.T', '(X[:, ordered_vars] * W[:, np.newaxis])'], {}), '(X.T, X[:, ordered_vars] * W[:, np.newaxis])\n', (4754, 4798), True, 'import numpy as np\n'), ((5147, 5173), 'numpy.sign', 'np.sign', (['self.initial_soln'], {}), '(self.initial_soln)\n', (5154, 5173), True, 'import numpy as np\n'), ((5191, 5219), 'numpy.flatnonzero', 'np.flatnonzero', (['active_signs'], {}), '(active_signs)\n', (5205, 5219), True, 'import numpy as np\n'), ((5952, 5967), 'scipy.linalg.block_diag', 'block_diag', (['*Vs'], {}), '(*Vs)\n', (5962, 5967), False, 'from scipy.linalg import block_diag\n'), ((6056, 6071), 'scipy.linalg.block_diag', 'block_diag', (['*Ls'], {}), '(*Ls)\n', (6066, 6071), False, 'from scipy.linalg import block_diag\n'), ((6207, 6213), 'numpy.linalg.inv', 'inv', (['Q'], {}), '(Q)\n', (6210, 6213), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((6661, 6703), 'numpy.zeros', 'np.zeros', (['self.observed_opt_state.shape[0]'], {}), '(self.observed_opt_state.shape[0])\n', (6669, 6703), True, 'import numpy as np\n'), ((7143, 7209), 'regreg.api.identity_quadratic', 'rr.identity_quadratic', (['self.ridge_term', '(0)', '(-self._initial_omega)', '(0)'], {}), '(self.ridge_term, 0, -self._initial_omega, 0)\n', (7164, 7209), True, 'import regreg.api as rr\n'), ((7340, 7385), 'regreg.api.simple_problem', 'rr.simple_problem', (['self.loglike', 'self.penalty'], {}), '(self.loglike, self.penalty)\n', (7357, 7385), True, 'import regreg.api as rr\n'), ((8199, 8264), 'regreg.api.glm.gaussian', 'rr.glm.gaussian', (['X', 'Y'], {'coef': '(1.0 / sigma ** 2)', 'quadratic': 'quadratic'}), '(X, Y, coef=1.0 / sigma ** 2, quadratic=quadratic)\n', (8214, 8264), True, 'import regreg.api as rr\n'), ((11580, 11610), 'numpy.atleast_1d', 'np.atleast_1d', (['observed_target'], {}), '(observed_target)\n', (11593, 11610), True, 'import numpy as np\n'), ((11633, 11648), 'numpy.linalg.inv', 'inv', (['target_cov'], {}), '(target_cov)\n', (11636, 11648), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((14248, 14267), 'scipy.stats.norm.cdf', 'ndist.cdf', (['Z_scores'], {}), '(Z_scores)\n', (14257, 14267), True, 'from scipy.stats import norm as ndist\n'), ((14370, 14396), 'scipy.stats.norm.ppf', 'ndist.ppf', (['(1 - alpha / 2.0)'], {}), '(1 - alpha / 2.0)\n', (14379, 14396), True, 'from scipy.stats import norm as ndist\n'), ((17277, 17306), 'numpy.linalg.inv', 'np.linalg.inv', (['query.cond_cov'], {}), '(query.cond_cov)\n', (17290, 17306), True, 'import numpy as np\n'), ((19777, 19900), 'pandas.DataFrame', 'pd.DataFrame', (["{'target': self.observed_target, 'pvalue': pvalues, 'lower_confidence':\n lower, 'upper_confidence': upper}"], {}), "({'target': self.observed_target, 'pvalue': pvalues,\n 'lower_confidence': lower, 'upper_confidence': upper})\n", (19789, 19900), True, 'import pandas as pd\n'), ((20548, 20573), 'numpy.linalg.inv', 'np.linalg.inv', (['target_cov'], {}), '(target_cov)\n', (20561, 20573), True, 'import numpy as np\n'), ((22410, 22429), 'numpy.asarray', 'np.asarray', (['ref_hat'], {}), '(ref_hat)\n', (22420, 22429), True, 'import numpy as np\n'), ((32043, 32072), 'numpy.linalg.inv', 'np.linalg.inv', (['(GammaMinus + C)'], {}), '(GammaMinus + C)\n', (32056, 32072), True, 'import numpy as np\n'), ((33243, 33277), 'numpy.any', 'np.any', (['(agroups[:-1] > agroups[1:])'], {}), '(agroups[:-1] > agroups[1:])\n', (33249, 33277), True, 'import numpy as np\n'), ((33366, 33406), 'numpy.issubdtype', 'np.issubdtype', (['agroups.dtype', 'np.integer'], {}), '(agroups.dtype, np.integer)\n', (33379, 33406), True, 'import numpy as np\n'), ((1435, 1487), 'regreg.api.weighted_l1norm', 'rr.weighted_l1norm', ([], {'weights': 'weights_np', 'lagrange': '(1.0)'}), '(weights=weights_np, lagrange=1.0)\n', (1453, 1487), True, 'import regreg.api as rr\n'), ((1574, 1627), 'regreg.api.group_lasso', 'rr.group_lasso', (['groups'], {'weights': 'weights', 'lagrange': '(1.0)'}), '(groups, weights=weights, lagrange=1.0)\n', (1588, 1627), True, 'import regreg.api as rr\n'), ((2979, 3001), 'numpy.unique', 'np.unique', (['self.groups'], {}), '(self.groups)\n', (2988, 3001), True, 'import numpy as np\n'), ((6598, 6638), 'numpy.eye', 'np.eye', (['self.observed_opt_state.shape[0]'], {}), '(self.observed_opt_state.shape[0])\n', (6604, 6638), True, 'import numpy as np\n'), ((9138, 9157), 'numpy.linalg.inv', 'inv', (['cond_precision'], {}), '(cond_precision)\n', (9141, 9157), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((9341, 9360), 'numpy.linalg.inv', 'inv', (['cond_precision'], {}), '(cond_precision)\n', (9344, 9360), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((14291, 14323), 'numpy.minimum', 'np.minimum', (['pvalues', '(1 - pvalues)'], {}), '(pvalues, 1 - pvalues)\n', (14301, 14323), True, 'import numpy as np\n'), ((18024, 18050), 'numpy.zeros', 'np.zeros', (['(ntarget, ngrid)'], {}), '((ntarget, ngrid))\n', (18032, 18050), True, 'import numpy as np\n'), ((18395, 18421), 'numpy.zeros', 'np.zeros', (['(ntarget, ngrid)'], {}), '((ntarget, ngrid))\n', (18403, 18421), True, 'import numpy as np\n'), ((19598, 19633), 'numpy.zeros_like', 'np.zeros_like', (['self.observed_target'], {}), '(self.observed_target)\n', (19611, 19633), True, 'import numpy as np\n'), ((20006, 20028), 'numpy.all', 'np.all', (['(parameter == 0)'], {}), '(parameter == 0)\n', (20012, 20028), True, 'import numpy as np\n'), ((25922, 25939), 'numpy.asarray', 'np.asarray', (['lower'], {}), '(lower)\n', (25932, 25939), True, 'import numpy as np\n'), ((25941, 25958), 'numpy.asarray', 'np.asarray', (['upper'], {}), '(upper)\n', (25951, 25958), True, 'import numpy as np\n'), ((31905, 31934), 'numpy.linalg.det', 'np.linalg.det', (['(GammaMinus + C)'], {}), '(GammaMinus + C)\n', (31918, 31934), True, 'import numpy as np\n'), ((33497, 33513), 'numpy.amin', 'np.amin', (['agroups'], {}), '(agroups)\n', (33504, 33513), True, 'import numpy as np\n'), ((3156, 3178), 'numpy.linalg.norm', 'norm', (['soln[group_mask]'], {}), '(soln[group_mask])\n', (3160, 3178), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((4095, 4108), 'numpy.sign', 'np.sign', (['soln'], {}), '(soln)\n', (4102, 4108), True, 'import numpy as np\n'), ((5444, 5449), 'numpy.linalg.qr', 'qr', (['Z'], {}), '(Z)\n', (5446, 5449), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((5544, 5560), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {}), '((1, 0))\n', (5552, 5560), True, 'import numpy as np\n'), ((5755, 5765), 'numpy.eye', 'np.eye', (['pg'], {}), '(pg)\n', (5761, 5765), True, 'import numpy as np\n'), ((8422, 8436), 'numpy.sqrt', 'np.sqrt', (['(n - 1)'], {}), '(n - 1)\n', (8429, 8436), True, 'import numpy as np\n'), ((8545, 8567), 'numpy.sqrt', 'np.sqrt', (['(n / (n - 1.0))'], {}), '(n / (n - 1.0))\n', (8552, 8567), True, 'import numpy as np\n'), ((9002, 9018), 'numpy.asarray', 'np.asarray', (['prec'], {}), '(prec)\n', (9012, 9018), True, 'import numpy as np\n'), ((11452, 11479), 'numpy.asarray', 'np.asarray', (['observed_target'], {}), '(observed_target)\n', (11462, 11479), True, 'import numpy as np\n'), ((12307, 12339), 'numpy.asarray', 'np.asarray', (['self.randomizer_prec'], {}), '(self.randomizer_prec)\n', (12317, 12339), True, 'import numpy as np\n'), ((14200, 14227), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14207, 14227), True, 'import numpy as np\n'), ((17919, 17940), 'numpy.diag', 'np.diag', (['inverse_info'], {}), '(inverse_info)\n', (17926, 17940), True, 'import numpy as np\n'), ((18127, 18229), 'numpy.linspace', 'np.linspace', (['(observed_target[j] - 1.5 * _scale[j])', '(observed_target[j] + 1.5 * _scale[j])'], {'num': 'ngrid'}), '(observed_target[j] - 1.5 * _scale[j], observed_target[j] + 1.5 *\n _scale[j], num=ngrid)\n', (18138, 18229), True, 'import numpy as np\n'), ((18498, 18600), 'numpy.linspace', 'np.linspace', (['(observed_target[j] - 1.5 * _scale[j])', '(observed_target[j] + 1.5 * _scale[j])'], {'num': 'ngrid'}), '(observed_target[j] - 1.5 * _scale[j], observed_target[j] + 1.5 *\n _scale[j], num=ngrid)\n', (18509, 18600), True, 'import numpy as np\n'), ((20424, 20451), 'numpy.asarray', 'np.asarray', (['observed_target'], {}), '(observed_target)\n', (20434, 20451), True, 'import numpy as np\n'), ((21600, 21620), 'numpy.identity', 'np.identity', (['num_opt'], {}), '(num_opt)\n', (21611, 21620), True, 'import numpy as np\n'), ((21858, 21885), 'numpy.asarray', 'np.asarray', (['[conjugate_arg]'], {}), '([conjugate_arg])\n', (21868, 21885), True, 'import numpy as np\n'), ((21921, 21953), 'numpy.reshape', 'np.reshape', (['implied_prec', '(1, 1)'], {}), '(implied_prec, (1, 1))\n', (21931, 21953), True, 'import numpy as np\n'), ((23507, 23611), 'scipy.interpolate.interp1d', 'interp1d', (['self.stat_grid[m]', 'log_ref'], {'kind': '"""quadratic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(self.stat_grid[m], log_ref, kind='quadratic', bounds_error=False,\n fill_value='extrapolate')\n", (23515, 23611), False, 'from scipy.interpolate import interp1d\n'), ((30706, 30745), 'numpy.fabs', 'np.fabs', (['(current_value - proposed_value)'], {}), '(current_value - proposed_value)\n', (30713, 30745), True, 'import numpy as np\n'), ((1231, 1248), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (1240, 1248), True, 'import numpy as np\n'), ((3187, 3197), 'numpy.linalg.norm', 'norm', (['soln'], {}), '(soln)\n', (3191, 3197), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((3355, 3381), 'numpy.flatnonzero', 'np.flatnonzero', (['group_mask'], {}), '(group_mask)\n', (3369, 3381), True, 'import numpy as np\n'), ((3657, 3679), 'numpy.linalg.norm', 'norm', (['soln[group_mask]'], {}), '(soln[group_mask])\n', (3661, 3679), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((8389, 8398), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (8395, 8398), True, 'import numpy as np\n'), ((8401, 8419), 'numpy.sqrt', 'np.sqrt', (['mean_diag'], {}), '(mean_diag)\n', (8408, 8419), True, 'import numpy as np\n'), ((8533, 8542), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (8539, 8542), True, 'import numpy as np\n'), ((14838, 14865), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14845, 14865), True, 'import numpy as np\n'), ((21131, 21171), 'numpy.atleast_1d', 'np.atleast_1d', (['(grid[k] - observed_target)'], {}), '(grid[k] - observed_target)\n', (21144, 21171), True, 'import numpy as np\n'), ((27155, 27175), 'numpy.linalg.inv', 'np.linalg.inv', (['_prec'], {}), '(_prec)\n', (27168, 27175), True, 'import numpy as np\n'), ((30754, 30776), 'numpy.fabs', 'np.fabs', (['current_value'], {}), '(current_value)\n', (30761, 30776), True, 'import numpy as np\n'), ((32151, 32176), 'numpy.ones', 'np.ones', (['(1, ug.size - 1)'], {}), '((1, ug.size - 1))\n', (32158, 32176), True, 'import numpy as np\n'), ((33630, 33648), 'numpy.unique', 'np.unique', (['agroups'], {}), '(agroups)\n', (33639, 33648), True, 'import numpy as np\n'), ((3598, 3620), 'numpy.linalg.norm', 'norm', (['soln[group_mask]'], {}), '(soln[group_mask])\n', (3602, 3620), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((5400, 5418), 'numpy.eye', 'np.eye', (['pg', '(pg - 1)'], {}), '(pg, pg - 1)\n', (5406, 5418), True, 'import numpy as np\n'), ((8506, 8524), 'numpy.sqrt', 'np.sqrt', (['mean_diag'], {}), '(mean_diag)\n', (8513, 8524), True, 'import numpy as np\n'), ((22721, 22745), 'numpy.diag', 'np.diag', (['self.target_cov'], {}), '(self.target_cov)\n', (22728, 22745), True, 'import numpy as np\n'), ((23446, 23458), 'numpy.exp', 'np.exp', (['logW'], {}), '(logW)\n', (23452, 23458), True, 'import numpy as np\n'), ((24122, 24134), 'numpy.exp', 'np.exp', (['logW'], {}), '(logW)\n', (24128, 24134), True, 'import numpy as np\n'), ((26254, 26278), 'numpy.diag', 'np.diag', (['self.target_cov'], {}), '(self.target_cov)\n', (26261, 26278), True, 'import numpy as np\n'), ((30451, 30475), 'numpy.isnan', 'np.isnan', (['proposed_value'], {}), '(proposed_value)\n', (30459, 30475), True, 'import numpy as np\n'), ((30479, 30502), 'numpy.isnan', 'np.isnan', (['current_value'], {}), '(current_value)\n', (30487, 30502), True, 'import numpy as np\n'), ((32316, 32347), 'numpy.multiply', 'np.multiply', (['GpC_inv', 'GpC_inv.T'], {}), '(GpC_inv, GpC_inv.T)\n', (32327, 32347), True, 'import numpy as np\n'), ((14496, 14523), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14503, 14523), True, 'import numpy as np\n'), ((14625, 14652), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14632, 14652), True, 'import numpy as np\n')]
|
import BoltzmannMachine as bm
import QHO as qho
import numpy as np
import datetime
# Visualization imports
from IPython.display import clear_output
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.dpi']=300
def sigmoid(x):
return .5 * (1 + np.tanh(x / 2.))
# Set the quantum gas with N particles, a limit of 10 for the
# quantum numbers and default temperature and frequency
N = 10*10
gas = qho.QHOGas(N=N)
n_max = 10
training_size = 100000
# the amount of hidden units was set by trial and error
hidden_units = 70
# the recipe suggests to set the batchsize to 10, though it can range
# from 10 to 100
batchsize = 10
# the recipe suggests a learning rate that makes the weight updates about
# 1e-3 times the weights (to within an order of magnitude)
eta = 0.005
# the amount of steps was set by trial and error
nsteps = 300000
# define the validation set to be used in training_visualization
validation_set = gas.generate(amount=20)
def training_visualization(machine, current_step, total_steps, eta, a, b, w, da, db, dw):
# Every now and then (every 50k steps), let us know that the training
# is still running
if current_step%50000 == 0:
print("{:08d} / {:08d}".format(current_step, total_steps), end=" \r")
# After 'checkpoint_steps', show the suggested plots
checkpoint_steps = 10000
if current_step%checkpoint_steps == 0 or current_step == total_steps-1:
print(f"Showing at step {current_step}.")
# Produce a sample starting from the validation set after 100 steps
v_prime = machine.generate(validation_set, 100, a=a, b=b, w=w)
# print useful plots for training
plot_training(validation_set, v_prime, eta, a, b, w, da, db, dw)
def plot_training(v, v_prime, eta, a, b, w, da, db, dw):
clear_output(wait=True)
# Show how the weights light up for the state v
hMean = sigmoid(np.dot(v, w) + b)
image = Image.fromarray(hMean * 256).show()
# Create the grid for all the other plots we want
plt.rcParams.update({'font.size': 2})
# plot histogram of initial vs generated
n = np.arange(0,10)
generated_quantum_numbers = np.rint(v_prime*10)
plt.hist( generated_quantum_numbers.flatten(), bins=np.arange(0,10), density=True, label="Sampled" )
plt.plot( n, gas.p_n(n), label="Theor." )
plt.xlabel('n')
plt.ylabel('P(n)')
plt.legend()
# plot histogram of visible, hidden, weights
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(ncols=3, nrows=2)
def plotit(axis, values, title):
axis.hist(values)
axis.set_title(f"{title}: mm = {np.mean(np.fabs(values))}")
plotit(fig.add_subplot(gs[0,0]), a, 'a')
plotit(fig.add_subplot(gs[0,1]), w.flatten(), 'w')
plotit(fig.add_subplot(gs[0,2]), b, 'b')
# plot histogram of d_visible, d_hidden, d_weights
plotit(fig.add_subplot(gs[1,0]), eta*da, 'da')
plotit(fig.add_subplot(gs[1,1]), eta*dw.flatten(), 'dw')
plotit(fig.add_subplot(gs[1,2]), eta*db, 'db')
# show free energies of the average of samples
x = lambda vv : b + np.dot(vv, w)
free_training = -np.dot(v, a) - np.sum( np.log(1 + np.exp(x(v))), axis=1)
free_valdation = -np.dot(v_prime, a) - np.sum( np.log(1 + np.exp(x(v_prime))), axis=1)
print(f"\nF_training={np.average(free_training)} vs F_validation={np.average(free_valdation)}\n")
# Show.
# CAUTION! This will freeze the execution
plt.show()
# Init the boltzmann machine and train it while visualizing the suggested plots
training_set = gas.generate(amount=training_size, n_max=n_max)
m = bm.BoltzmannMachine(num_hidden=hidden_units)
a,b,w = m.train(training_set, batchsize=batchsize, eta=eta, nsteps=nsteps, do_while_training=None)
# Store in a file
run_id = int(datetime.datetime.now().timestamp())
np.savetxt(f"a_{run_id}.csv", a, delimiter=',')
np.savetxt(f"b_{run_id}.csv", b, delimiter=',')
np.savetxt(f"w_{run_id}.csv", w, delimiter=',')
|
[
"numpy.fabs",
"QHO.QHOGas",
"PIL.Image.fromarray",
"matplotlib.pyplot.ylabel",
"numpy.average",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"numpy.tanh",
"IPython.display.clear_output",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"numpy.dot",
"BoltzmannMachine.BoltzmannMachine",
"numpy.rint",
"numpy.savetxt",
"datetime.datetime.now",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((447, 462), 'QHO.QHOGas', 'qho.QHOGas', ([], {'N': 'N'}), '(N=N)\n', (457, 462), True, 'import QHO as qho\n'), ((3637, 3681), 'BoltzmannMachine.BoltzmannMachine', 'bm.BoltzmannMachine', ([], {'num_hidden': 'hidden_units'}), '(num_hidden=hidden_units)\n', (3656, 3681), True, 'import BoltzmannMachine as bm\n'), ((3849, 3896), 'numpy.savetxt', 'np.savetxt', (['f"""a_{run_id}.csv"""', 'a'], {'delimiter': '""","""'}), "(f'a_{run_id}.csv', a, delimiter=',')\n", (3859, 3896), True, 'import numpy as np\n'), ((3897, 3944), 'numpy.savetxt', 'np.savetxt', (['f"""b_{run_id}.csv"""', 'b'], {'delimiter': '""","""'}), "(f'b_{run_id}.csv', b, delimiter=',')\n", (3907, 3944), True, 'import numpy as np\n'), ((3945, 3992), 'numpy.savetxt', 'np.savetxt', (['f"""w_{run_id}.csv"""', 'w'], {'delimiter': '""","""'}), "(f'w_{run_id}.csv', w, delimiter=',')\n", (3955, 3992), True, 'import numpy as np\n'), ((1829, 1852), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (1841, 1852), False, 'from IPython.display import clear_output\n'), ((2050, 2087), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 2}"], {}), "({'font.size': 2})\n", (2069, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2142, 2158), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (2151, 2158), True, 'import numpy as np\n'), ((2190, 2211), 'numpy.rint', 'np.rint', (['(v_prime * 10)'], {}), '(v_prime * 10)\n', (2197, 2211), True, 'import numpy as np\n'), ((2365, 2380), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n"""'], {}), "('n')\n", (2375, 2380), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2403), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P(n)"""'], {}), "('P(n)')\n", (2395, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2408, 2420), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2418, 2420), True, 'import matplotlib.pyplot as plt\n'), ((2481, 2516), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (2491, 2516), True, 'import matplotlib.pyplot as plt\n'), ((3478, 3488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3486, 3488), True, 'import matplotlib.pyplot as plt\n'), ((295, 311), 'numpy.tanh', 'np.tanh', (['(x / 2.0)'], {}), '(x / 2.0)\n', (302, 311), True, 'import numpy as np\n'), ((1925, 1937), 'numpy.dot', 'np.dot', (['v', 'w'], {}), '(v, w)\n', (1931, 1937), True, 'import numpy as np\n'), ((1955, 1983), 'PIL.Image.fromarray', 'Image.fromarray', (['(hMean * 256)'], {}), '(hMean * 256)\n', (1970, 1983), False, 'from PIL import Image\n'), ((2266, 2282), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (2275, 2282), True, 'import numpy as np\n'), ((3130, 3143), 'numpy.dot', 'np.dot', (['vv', 'w'], {}), '(vv, w)\n', (3136, 3143), True, 'import numpy as np\n'), ((3166, 3178), 'numpy.dot', 'np.dot', (['v', 'a'], {}), '(v, a)\n', (3172, 3178), True, 'import numpy as np\n'), ((3245, 3263), 'numpy.dot', 'np.dot', (['v_prime', 'a'], {}), '(v_prime, a)\n', (3251, 3263), True, 'import numpy as np\n'), ((3812, 3835), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3833, 3835), False, 'import datetime\n'), ((3340, 3365), 'numpy.average', 'np.average', (['free_training'], {}), '(free_training)\n', (3350, 3365), True, 'import numpy as np\n'), ((3384, 3410), 'numpy.average', 'np.average', (['free_valdation'], {}), '(free_valdation)\n', (3394, 3410), True, 'import numpy as np\n'), ((2672, 2687), 'numpy.fabs', 'np.fabs', (['values'], {}), '(values)\n', (2679, 2687), True, 'import numpy as np\n')]
|
#! /usr/bin/env python3
# Copyright 2020 Tier IV, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import sys
from autoware_planning_msgs.msg import StopReasonArray
from case_converter import pascal2snake
from geometry_msgs.msg import PoseStamped
import numpy as np
import rclpy
from rclpy.node import Node
from rtree import index
from self_pose_listener import SelfPoseListener
class StopReason2PoseNode(Node):
def __init__(self, options):
super().__init__("stop_reason2pose_node")
self._options = options
self._sub_pose = self.create_subscription(
StopReasonArray, self._options.topic_name, self._on_stop_reasons, 1
)
self._pub_pose_map = {}
self._idx_map = {}
self._pose_map = {}
self._self_pose_listener = SelfPoseListener()
self.timer = self.create_timer((1.0 / 100), self._self_pose_listener.get_current_pose)
def _on_stop_reasons(self, msg):
for stop_reason in msg.stop_reasons:
snake_case_stop_reason = pascal2snake(stop_reason.reason)
if len(stop_reason.stop_factors) == 0:
self.get_logger().warn("stop_factor is null")
return
for stop_factor in stop_reason.stop_factors:
pose = PoseStamped()
pose.header = msg.header
pose.pose = stop_factor.stop_pose
# Get nearest pose
th_dist = 1.0
nearest_pose_id = self._get_nearest_pose_id(
snake_case_stop_reason, pose.pose, th_dist
)
if nearest_pose_id:
self._update_pose(snake_case_stop_reason, pose.pose, nearest_pose_id)
pose_id = nearest_pose_id
else:
pose_id = self._register_pose(snake_case_stop_reason, pose.pose)
pose_topic_name = "{snake_case_stop_reason}_{pose_id}".format(**locals())
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
if pose_topic_name not in self._pub_pose_map:
self._pub_pose_map[pose_topic_name] = self.create_publisher(
PoseStamped, topic_ns + pose_topic_name, 1
)
self._pub_pose_map[pose_topic_name].publish(pose)
# Publish nearest stop_reason without number
nearest_pose = PoseStamped()
nearest_pose.header = msg.header
nearest_pose.pose = self._get_nearest_pose_in_array(
stop_reason, self._self_pose_listener.self_pose
)
if nearest_pose.pose:
if snake_case_stop_reason not in self._pub_pose_map:
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
self._pub_pose_map[snake_case_stop_reason] = self.create_publisher(
PoseStamped, topic_ns + snake_case_stop_reason, 1
)
self._pub_pose_map[snake_case_stop_reason].publish(nearest_pose)
def _get_nearest_pose_in_array(self, stop_reason, self_pose):
poses = [stop_factor.stop_pose for stop_factor in stop_reason.stop_factors]
if not poses:
return None
distances = map(lambda p: StopReason2PoseNode.calc_distance2d(p, self_pose), poses)
nearest_idx = np.argmin(distances)
return poses[nearest_idx]
def _find_nearest_pose_id(self, name, pose):
if name not in self._idx_map:
self._idx_map[name] = index.Index()
return self._idx_map[name].nearest(StopReason2PoseNode.pose2boundingbox(pose), 1)
def _get_nearest_pose_id(self, name, pose, th_dist):
nearest_pose_ids = list(self._find_nearest_pose_id(name, pose))
if not nearest_pose_ids:
return None
nearest_pose_id = nearest_pose_ids[0]
nearest_pose = self._get_pose(name, nearest_pose_id)
if not nearest_pose:
return None
dist = StopReason2PoseNode.calc_distance2d(pose, nearest_pose)
if dist > th_dist:
return None
return nearest_pose_id
def _get_pose(self, name, pose_id):
if name not in self._pose_map:
return None
return self._pose_map[name][pose_id]
def _update_pose(self, name, pose, pose_id):
self._pose_map[name][id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
def _register_pose(self, name, pose):
if name not in self._pose_map:
self._pose_map[name] = {}
pose_id = len(self._pose_map[name]) + 1
self._pose_map[name][pose_id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
return pose_id
@staticmethod
def calc_distance2d(pose1, pose2):
p1 = pose1.position
p2 = pose2.position
return math.hypot(p1.x - p2.x, p1.y - p2.y)
@staticmethod
def pose2boundingbox(pose):
return [pose.position.x, pose.position.y, pose.position.x, pose.position.y]
def main(args):
rclpy.init()
parser = argparse.ArgumentParser()
parser.add_argument("topic_name", type=str)
ns = parser.parse_args(args)
stop_reason2pose_node = StopReason2PoseNode(ns)
rclpy.spin(stop_reason2pose_node)
stop_reason2pose_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"rclpy.spin",
"case_converter.pascal2snake",
"argparse.ArgumentParser",
"self_pose_listener.SelfPoseListener",
"geometry_msgs.msg.PoseStamped",
"rtree.index.Index",
"math.hypot",
"numpy.argmin",
"rclpy.init",
"rclpy.shutdown"
] |
[((5663, 5675), 'rclpy.init', 'rclpy.init', ([], {}), '()\n', (5673, 5675), False, 'import rclpy\n'), ((5690, 5715), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5713, 5715), False, 'import argparse\n'), ((5854, 5887), 'rclpy.spin', 'rclpy.spin', (['stop_reason2pose_node'], {}), '(stop_reason2pose_node)\n', (5864, 5887), False, 'import rclpy\n'), ((5933, 5949), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (5947, 5949), False, 'import rclpy\n'), ((1325, 1343), 'self_pose_listener.SelfPoseListener', 'SelfPoseListener', ([], {}), '()\n', (1341, 1343), False, 'from self_pose_listener import SelfPoseListener\n'), ((3901, 3921), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (3910, 3921), True, 'import numpy as np\n'), ((5469, 5505), 'math.hypot', 'math.hypot', (['(p1.x - p2.x)', '(p1.y - p2.y)'], {}), '(p1.x - p2.x, p1.y - p2.y)\n', (5479, 5505), False, 'import math\n'), ((1559, 1591), 'case_converter.pascal2snake', 'pascal2snake', (['stop_reason.reason'], {}), '(stop_reason.reason)\n', (1571, 1591), False, 'from case_converter import pascal2snake\n'), ((2945, 2958), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (2956, 2958), False, 'from geometry_msgs.msg import PoseStamped\n'), ((4079, 4092), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (4090, 4092), False, 'from rtree import index\n'), ((1810, 1823), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1821, 1823), False, 'from geometry_msgs.msg import PoseStamped\n')]
|
import mtrain
import numpy as np
import pandas as pd
import random
def simulate_games(num_players=4, domino_size=12, num_games=250, collect_data=True,
debug=False, players=["Random", "Greedy", "Probability", "Neural"],
file_name="PlayData/data4_12_250"):
"""
Runs the mexican train game repeatedly with different combinations of players to
generate data to be used in testing and training the neural net.
If collect_data is on, the play data is retrieved and stored into a .xlsx file for later use
The format for the file name for this is as follows:
PlayData/data + num_players + _ + domino_size + _ + num_games + .xlsx
This spreadsheet is to be used when training the neural net.
This script has no required parameters, and will run the game with the default params if
unchanged.
If collect_data is on, the players are selected randomly each game from:
["Random", "Greedy", "Probability"]
If collect_data is off, the players are selected in order from the parameter players.
When collect_data is off: len(players) must equal num_players
Returns a tuple of lists: (score_averages, win_percentage) corresponding to the players
"""
#Sets column names for building dataframe later on
column_names = ["round_number", "turn_number", "player_number", "play",
"t_num", "hand", "unknown", "potential_plays", "points"]
#Depending on mode of use, sets players and checks validity of player values
modes = []
if collect_data:
modes = ["Random", "Greedy", "Probability"]
else:
if not len(players) == num_players:
raise RuntimeError("len(players) must equal num_players when collect_data is off")
modes = players
#Simulates num_games of games
scores = np.ndarray((num_players, num_games))
wins = np.ndarray((num_players, num_games))
full_data = pd.DataFrame(columns=column_names)
current_index = 0
for game_num in range(0, num_games):
#Randomize players if in collect_data mode
game_modes = []
if collect_data:
for select in range(0, num_players):
game_modes.append(random.choice(modes))
else:
game_modes = modes
#Run game with parameters
results = mtrain.mexicantrain(num_players, domino_size, debug=debug,
modes=game_modes,
data_collection=collect_data,
data_index=current_index, file_name=file_name)
#If collecting data, data is stored into the dataframe
if collect_data:
current_index = results[2].index[-1] + 1
full_data = pd.concat([full_data, results[2]])
#Scores and wins are recorded into their respective arrays
for player_num in range(0, num_players):
scores[player_num, game_num] = results[0][player_num]
if results[1] == player_num:
wins[player_num, game_num] = 1
else:
wins[player_num, game_num] = 0
#Calculates performance of the players
score_averages = np.ndarray((num_players))
win_percentage = np.ndarray((num_players))
for player_num in range(0, num_players):
score_averages[player_num] = np.mean(scores[player_num, :])
win_percentage[player_num] = np.mean(wins[player_num, :])
#If collecting data, prints data to a .xlsx file
if collect_data:
filename = "PlayData/data" + str(num_players) + "_" + str(domino_size) + "_" + str(num_games) + ".xlsx"
writer = pd.ExcelWriter(filename)
full_data.to_excel(writer, "Sheet1")
writer.save()
#Prints results and returns them as well
if debug: print(score_averages)
if debug: print(win_percentage)
return score_averages, win_percentage
|
[
"numpy.mean",
"random.choice",
"numpy.ndarray",
"pandas.concat",
"pandas.DataFrame",
"pandas.ExcelWriter",
"mtrain.mexicantrain"
] |
[((1844, 1880), 'numpy.ndarray', 'np.ndarray', (['(num_players, num_games)'], {}), '((num_players, num_games))\n', (1854, 1880), True, 'import numpy as np\n'), ((1892, 1928), 'numpy.ndarray', 'np.ndarray', (['(num_players, num_games)'], {}), '((num_players, num_games))\n', (1902, 1928), True, 'import numpy as np\n'), ((1945, 1979), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (1957, 1979), True, 'import pandas as pd\n'), ((3240, 3263), 'numpy.ndarray', 'np.ndarray', (['num_players'], {}), '(num_players)\n', (3250, 3263), True, 'import numpy as np\n'), ((3287, 3310), 'numpy.ndarray', 'np.ndarray', (['num_players'], {}), '(num_players)\n', (3297, 3310), True, 'import numpy as np\n'), ((2355, 2517), 'mtrain.mexicantrain', 'mtrain.mexicantrain', (['num_players', 'domino_size'], {'debug': 'debug', 'modes': 'game_modes', 'data_collection': 'collect_data', 'data_index': 'current_index', 'file_name': 'file_name'}), '(num_players, domino_size, debug=debug, modes=game_modes,\n data_collection=collect_data, data_index=current_index, file_name=file_name\n )\n', (2374, 2517), False, 'import mtrain\n'), ((3395, 3425), 'numpy.mean', 'np.mean', (['scores[player_num, :]'], {}), '(scores[player_num, :])\n', (3402, 3425), True, 'import numpy as np\n'), ((3463, 3491), 'numpy.mean', 'np.mean', (['wins[player_num, :]'], {}), '(wins[player_num, :])\n', (3470, 3491), True, 'import numpy as np\n'), ((3696, 3720), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['filename'], {}), '(filename)\n', (3710, 3720), True, 'import pandas as pd\n'), ((2796, 2830), 'pandas.concat', 'pd.concat', (['[full_data, results[2]]'], {}), '([full_data, results[2]])\n', (2805, 2830), True, 'import pandas as pd\n'), ((2227, 2247), 'random.choice', 'random.choice', (['modes'], {}), '(modes)\n', (2240, 2247), False, 'import random\n')]
|
##############################################################################
#
# Below code is inspired on
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/pascal_voc.py
# --------------------------------------------------------
# Detectron2
# Licensed under the Apache 2.0 license.
# --------------------------------------------------------
from fvcore.common.file_io import PathManager
import os
import numpy as np
import xml.etree.ElementTree as ET
from detectron2.structures import BoxMode
from detectron2.data import DatasetCatalog, MetadataCatalog
__all__ = ["register_licenseplates_voc"]
CLASS_NAMES = [
"license_plate",
]
def load_voc_instances(dirname: str, split: str):
"""
Load licenseplates VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "annotations", "images"
split (str): one of "train", "test"
"""
with PathManager.open(os.path.join(dirname, split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
dicts = []
for fileid in fileids:
anno_file = os.path.join(dirname, "annotations", fileid + ".xml")
jpeg_file = os.path.join(dirname, "images", fileid + ".jpg")
tree = ET.parse(anno_file)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
instances.append(
{"category_id": CLASS_NAMES.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_licenseplates_voc(name, dirname, split):
DatasetCatalog.register(name,
lambda: load_voc_instances(dirname, split))
MetadataCatalog.get(name).set(thing_classes=CLASS_NAMES,
dirname=dirname,
split=split)
if __name__ == "__main__":
import random
import cv2
from detectron2.utils.visualizer import Visualizer
import argparse
# Parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("--split", default="train")
ap.add_argument("--samples", type=int, default=10)
ap.add_argument("--scale", type=float, default=1.0)
args = ap.parse_args()
dataset_name = f"licenseplates_{args.split}"
register_licenseplates_voc(dataset_name, "datasets/licenseplates", args.split)
dataset_dicts = DatasetCatalog.get(dataset_name)
for d in random.sample(dataset_dicts, args.samples):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1],
metadata=MetadataCatalog.get(dataset_name),
scale=args.scale)
vis = visualizer.draw_dataset_dict(d)
cv2.imshow(dataset_name, vis.get_image()[:, :, ::-1])
# Exit? Press ESC
if cv2.waitKey(0) & 0xFF == 27:
break
cv2.destroyAllWindows()
|
[
"random.sample",
"xml.etree.ElementTree.parse",
"argparse.ArgumentParser",
"os.path.join",
"cv2.waitKey",
"cv2.destroyAllWindows",
"detectron2.data.MetadataCatalog.get",
"numpy.loadtxt",
"cv2.imread",
"detectron2.data.DatasetCatalog.get"
] |
[((2443, 2468), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2466, 2468), False, 'import argparse\n'), ((2808, 2840), 'detectron2.data.DatasetCatalog.get', 'DatasetCatalog.get', (['dataset_name'], {}), '(dataset_name)\n', (2826, 2840), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((2854, 2896), 'random.sample', 'random.sample', (['dataset_dicts', 'args.samples'], {}), '(dataset_dicts, args.samples)\n', (2867, 2896), False, 'import random\n'), ((3312, 3335), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3333, 3335), False, 'import cv2\n'), ((1011, 1038), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'dtype': 'np.str'}), '(f, dtype=np.str)\n', (1021, 1038), True, 'import numpy as np\n'), ((1102, 1155), 'os.path.join', 'os.path.join', (['dirname', '"""annotations"""', "(fileid + '.xml')"], {}), "(dirname, 'annotations', fileid + '.xml')\n", (1114, 1155), False, 'import os\n'), ((1176, 1224), 'os.path.join', 'os.path.join', (['dirname', '"""images"""', "(fileid + '.jpg')"], {}), "(dirname, 'images', fileid + '.jpg')\n", (1188, 1224), False, 'import os\n'), ((1241, 1260), 'xml.etree.ElementTree.parse', 'ET.parse', (['anno_file'], {}), '(anno_file)\n', (1249, 1260), True, 'import xml.etree.ElementTree as ET\n'), ((2912, 2938), 'cv2.imread', 'cv2.imread', (["d['file_name']"], {}), "(d['file_name'])\n", (2922, 2938), False, 'import cv2\n'), ((948, 985), 'os.path.join', 'os.path.join', (['dirname', "(split + '.txt')"], {}), "(dirname, split + '.txt')\n", (960, 985), False, 'import os\n'), ((2106, 2131), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['name'], {}), '(name)\n', (2125, 2131), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((3029, 3062), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['dataset_name'], {}), '(dataset_name)\n', (3048, 3062), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((3260, 3274), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3271, 3274), False, 'import cv2\n')]
|
"""
kissim.cli.encode
Encode structures (generate fingerprints) from CLI arguments.
"""
import numpy as np
from kissim.api import encode
from kissim.cli.utils import configure_logger
def encode_from_cli(args):
"""
Encode structures.
Parameters
----------
args : argsparse.Namespace
CLI arguments.
"""
configure_logger(args.output)
structure_klifs_ids = _parse_structure_klifs_ids(args.input)
encode(structure_klifs_ids, args.output, args.local, args.ncores)
def _parse_structure_klifs_ids(args_input):
"""
Parse structure KLIFS IDs.
Parameters
----------
args_input : list of str
Either path to txt file with structure KLIFS ID (one ID per row) or one or more structure
KLIFS IDs.
Returns
-------
list of int
List of structure KLIFS IDs.
"""
if len(args_input) == 1:
try:
structure_klifs_ids = [int(args_input[0])]
except ValueError:
structure_klifs_ids = np.genfromtxt(fname=args_input[0], dtype=int).tolist()
else:
structure_klifs_ids = [int(i) for i in args_input]
return structure_klifs_ids
|
[
"kissim.cli.utils.configure_logger",
"kissim.api.encode",
"numpy.genfromtxt"
] |
[((344, 373), 'kissim.cli.utils.configure_logger', 'configure_logger', (['args.output'], {}), '(args.output)\n', (360, 373), False, 'from kissim.cli.utils import configure_logger\n'), ((443, 508), 'kissim.api.encode', 'encode', (['structure_klifs_ids', 'args.output', 'args.local', 'args.ncores'], {}), '(structure_klifs_ids, args.output, args.local, args.ncores)\n', (449, 508), False, 'from kissim.api import encode\n'), ((1016, 1061), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'args_input[0]', 'dtype': 'int'}), '(fname=args_input[0], dtype=int)\n', (1029, 1061), True, 'import numpy as np\n')]
|
import numpy as np
from util import *
def naiveDistanceProfile(tsA, idx, m, tsB = None):
"""Return the distance profile of query against ts. Use the naive all pairs comparison algorithm.
>>> np.round(naiveDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
distanceProfile = []
n = len(tsB)
for i in range(n - m + 1):
distanceProfile.append(zNormalizedEuclideanDistance(query, tsB[i : i + m]))
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
def stampDistanceProfile(tsA, idx, m, tsB = None):
"""
>>> np.round(stampDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
n = len(tsB)
distanceProfile = mass(query, tsB)
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"numpy.full",
"doctest.testmod"
] |
[((1672, 1689), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1687, 1689), False, 'import doctest\n'), ((887, 923), 'numpy.full', 'np.full', (['(n - m + 1)', 'idx'], {'dtype': 'float'}), '(n - m + 1, idx, dtype=float)\n', (894, 923), True, 'import numpy as np\n'), ((1581, 1617), 'numpy.full', 'np.full', (['(n - m + 1)', 'idx'], {'dtype': 'float'}), '(n - m + 1, idx, dtype=float)\n', (1588, 1617), True, 'import numpy as np\n')]
|
from itertools import product
from unittest.mock import patch
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from sm.engine.annotation.fdr import FDR, run_fdr_ranking
from sm.engine.formula_parser import format_modifiers
FDR_CONFIG = {'decoy_sample_size': 2}
@patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])
def test_fdr_decoy_adduct_selection_saves_corr():
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H', '+K', '[M]+'],
analysis_version=1,
)
exp_target_decoy_df = pd.DataFrame(
[
('H2O', '+H', '+He'),
('H2O', '+H', '+Li'),
('H2O', '+K', '+He'),
('H2O', '+K', '+Li'),
('H2O', '', '+He'),
('H2O', '', '+Li'),
],
columns=['formula', 'tm', 'dm'],
)
fdr.decoy_adducts_selection(target_formulas=['H2O'])
assert_frame_equal(
fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
)
@pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])])
def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs):
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=analysis_version,
)
fdr.fdr_levels = [0.2, 0.8]
fdr.td_df = pd.DataFrame(
[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
['H2O', '+Cu', 0.5],
['H2O', '+Co', 0.5],
['C2H2', '+Ag', 0.75],
['C2H2', '+Ar', 0.0],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
],
columns=['formula', 'modifier', 'msm'],
).assign(fdr=expected_fdrs)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_estimate_fdr_digitize_works():
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=1,
)
fdr.fdr_levels = [0.4, 0.8]
fdr.td_df = pd.DataFrame(
[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['C1', '+H', 1.0],
['C2', '+H', 0.75],
['C3', '+H', 0.5],
['C4', '+H', 0.25],
['C1', '+Cu', 0.75],
['C2', '+Ag', 0.3],
['C3', '+Cl', 0.25],
['C4', '+Co', 0.1],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['C1', '+H', 1.0, 0.4],
['C2', '+H', 0.75, 0.4],
['C3', '+H', 0.5, 0.4],
['C4', '+H', 0.25, 0.8],
],
columns=['formula', 'modifier', 'msm', 'fdr'],
)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_ions():
formulas = ['H2O', 'C5H2OH']
target_adducts = ['+H', '+Na']
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
assert (
len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts)
< len(ions)
<= len(formulas) * len(target_adducts) * decoy_sample_size
+ len(formulas) * len(target_adducts)
)
target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)]
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_chem_mods_and_neutral_losses():
formulas = ['H2O', 'C5H2OH']
chem_mods = ['-H+C']
neutral_losses = ['-O', '-C']
target_adducts = ['+H', '+Na', '[M]+']
target_modifiers = [
format_modifiers(cm, nl, ta)
for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts)
]
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=chem_mods,
neutral_losses=neutral_losses,
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
min_count = len(formulas) * len(target_modifiers)
max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size)
assert min_count < len(ions) <= max_count
target_ions = list(product(formulas, target_modifiers))
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_run_fdr_ranking():
target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])
decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1])
n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4])
expected_fdr = n_decoys / n_targets
expected_fdr_ros = (n_decoys + 1) / (n_targets + 1)
expected_fdr_mono = pd.Series(
[0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11]
)
fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False)
fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False)
fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True)
assert np.isclose(fdr, expected_fdr).all()
assert np.isclose(fdr_ros, expected_fdr_ros).all()
assert np.isclose(fdr_mono, expected_fdr_mono).all()
|
[
"pandas.Series",
"numpy.isclose",
"sm.engine.annotation.fdr.FDR",
"sm.engine.formula_parser.format_modifiers",
"itertools.product",
"sm.engine.annotation.fdr.run_fdr_ranking",
"pytest.mark.parametrize",
"pandas.DataFrame",
"unittest.mock.patch"
] |
[((322, 385), 'unittest.mock.patch', 'patch', (['"""sm.engine.annotation.fdr.DECOY_ADDUCTS"""', "['+He', '+Li']"], {}), "('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])\n", (327, 385), False, 'from unittest.mock import patch\n'), ((1185, 1286), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""analysis_version,expected_fdrs"""', '[(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])]'], {}), "('analysis_version,expected_fdrs', [(1, [0.2, 0.8]),\n (3, [1 / 4, 2 / 3])])\n", (1208, 1286), False, 'import pytest\n'), ((446, 567), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'FDR_CONFIG', 'chem_mods': '[]', 'neutral_losses': '[]', 'target_adducts': "['+H', '+K', '[M]+']", 'analysis_version': '(1)'}), "(fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=\n ['+H', '+K', '[M]+'], analysis_version=1)\n", (449, 567), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((637, 820), 'pandas.DataFrame', 'pd.DataFrame', (["[('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K', '+He'), ('H2O',\n '+K', '+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li')]"], {'columns': "['formula', 'tm', 'dm']"}), "([('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K',\n '+He'), ('H2O', '+K', '+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li')],\n columns=['formula', 'tm', 'dm'])\n", (649, 820), True, 'import pandas as pd\n'), ((1368, 1490), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'FDR_CONFIG', 'chem_mods': '[]', 'neutral_losses': '[]', 'target_adducts': "['+H']", 'analysis_version': 'analysis_version'}), "(fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=\n ['+H'], analysis_version=analysis_version)\n", (1371, 1490), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((1581, 1722), 'pandas.DataFrame', 'pd.DataFrame', (["[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2',\n '+H', '+Ar']]"], {'columns': "['formula', 'tm', 'dm']"}), "([['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H',\n '+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula', 'tm', 'dm'])\n", (1593, 1722), True, 'import pandas as pd\n'), ((1756, 1947), 'pandas.DataFrame', 'pd.DataFrame', (["[['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5], ['H2O',\n '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0]]"], {'columns': "['formula', 'modifier', 'msm']"}), "([['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5],\n ['H2O', '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0]],\n columns=['formula', 'modifier', 'msm'])\n", (1768, 1947), True, 'import pandas as pd\n'), ((2418, 2525), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'fdr_config', 'chem_mods': '[]', 'neutral_losses': '[]', 'target_adducts': "['+H']", 'analysis_version': '(1)'}), "(fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=\n ['+H'], analysis_version=1)\n", (2421, 2525), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((2616, 2751), 'pandas.DataFrame', 'pd.DataFrame', (["[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H',\n '+Co']]"], {'columns': "['formula', 'tm', 'dm']"}), "([['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'],\n ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'])\n", (2628, 2751), True, 'import pandas as pd\n'), ((2785, 3008), 'pandas.DataFrame', 'pd.DataFrame', (["[['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H', \n 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], [\n 'C4', '+Co', 0.1]]"], {'columns': "['formula', 'modifier', 'msm']"}), "([['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], [\n 'C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3',\n '+Cl', 0.25], ['C4', '+Co', 0.1]], columns=['formula', 'modifier', 'msm'])\n", (2797, 3008), True, 'import pandas as pd\n'), ((3146, 3313), 'pandas.DataFrame', 'pd.DataFrame', (["[['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4], [\n 'C4', '+H', 0.25, 0.8]]"], {'columns': "['formula', 'modifier', 'msm', 'fdr']"}), "([['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H',\n 0.5, 0.4], ['C4', '+H', 0.25, 0.8]], columns=['formula', 'modifier',\n 'msm', 'fdr'])\n", (3158, 3313), True, 'import pandas as pd\n'), ((3651, 3766), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'fdr_config', 'chem_mods': '[]', 'neutral_losses': '[]', 'target_adducts': 'target_adducts', 'analysis_version': '(1)'}), '(fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=\n target_adducts, analysis_version=1)\n', (3654, 3766), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((4890, 5024), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'fdr_config', 'chem_mods': 'chem_mods', 'neutral_losses': 'neutral_losses', 'target_adducts': 'target_adducts', 'analysis_version': '(1)'}), '(fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=\n neutral_losses, target_adducts=target_adducts, analysis_version=1)\n', (4893, 5024), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((5665, 5731), 'pandas.Series', 'pd.Series', (['[1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]'], {}), '([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])\n', (5674, 5731), True, 'import pandas as pd\n'), ((5751, 5783), 'pandas.Series', 'pd.Series', (['[0.8, 0.55, 0.2, 0.1]'], {}), '([0.8, 0.55, 0.2, 0.1])\n', (5760, 5783), True, 'import pandas as pd\n'), ((5800, 5846), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n', (5809, 5846), True, 'import pandas as pd\n'), ((5862, 5906), 'pandas.Series', 'pd.Series', (['[0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4]'], {}), '([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4])\n', (5871, 5906), True, 'import pandas as pd\n'), ((6027, 6121), 'pandas.Series', 'pd.Series', (['[0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11]'], {}), '([0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 /\n 11, 4 / 11])\n', (6036, 6121), True, 'import pandas as pd\n'), ((6143, 6204), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', (['target_scores', 'decoy_scores', '(1)', '(False)', '(False)'], {}), '(target_scores, decoy_scores, 1, False, False)\n', (6158, 6204), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((6219, 6279), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', (['target_scores', 'decoy_scores', '(1)', '(True)', '(False)'], {}), '(target_scores, decoy_scores, 1, True, False)\n', (6234, 6279), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((6295, 6355), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', (['target_scores', 'decoy_scores', '(1)', '(False)', '(True)'], {}), '(target_scores, decoy_scores, 1, False, True)\n', (6310, 6355), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((4655, 4683), 'sm.engine.formula_parser.format_modifiers', 'format_modifiers', (['cm', 'nl', 'ta'], {}), '(cm, nl, ta)\n', (4671, 4683), False, 'from sm.engine.formula_parser import format_modifiers\n'), ((5518, 5553), 'itertools.product', 'product', (['formulas', 'target_modifiers'], {}), '(formulas, target_modifiers)\n', (5525, 5553), False, 'from itertools import product\n'), ((2062, 2162), 'pandas.DataFrame', 'pd.DataFrame', (["[['H2O', '+H', 0.85], ['C2H2', '+H', 0.5]]"], {'columns': "['formula', 'modifier', 'msm']"}), "([['H2O', '+H', 0.85], ['C2H2', '+H', 0.5]], columns=['formula',\n 'modifier', 'msm'])\n", (2074, 2162), True, 'import pandas as pd\n'), ((4349, 4382), 'itertools.product', 'product', (['formulas', 'target_adducts'], {}), '(formulas, target_adducts)\n', (4356, 4382), False, 'from itertools import product\n'), ((4710, 4774), 'itertools.product', 'product', (["['', *chem_mods]", "['', *neutral_losses]", 'target_adducts'], {}), "(['', *chem_mods], ['', *neutral_losses], target_adducts)\n", (4717, 4774), False, 'from itertools import product\n'), ((6368, 6397), 'numpy.isclose', 'np.isclose', (['fdr', 'expected_fdr'], {}), '(fdr, expected_fdr)\n', (6378, 6397), True, 'import numpy as np\n'), ((6415, 6452), 'numpy.isclose', 'np.isclose', (['fdr_ros', 'expected_fdr_ros'], {}), '(fdr_ros, expected_fdr_ros)\n', (6425, 6452), True, 'import numpy as np\n'), ((6470, 6509), 'numpy.isclose', 'np.isclose', (['fdr_mono', 'expected_fdr_mono'], {}), '(fdr_mono, expected_fdr_mono)\n', (6480, 6509), True, 'import numpy as np\n')]
|
from __future__ import (division)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import DataFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import pandas
import random
import pickle
import numpy as np
nan = numpy.nan
def setup_multivariate_gaussian():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
mu, cov = [2, 2, 2], numpy.eye(3)
d2 = MultivariateGaussianDistribution(mu, cov)
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[-1.2, -1.8, -1.5],
[-1.8, 0.3, 0.5],
[ 0.7, -1.3, -0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[-1.2, -1.8, -1.5],
[ nan, 0.3, 0.5],
[ nan, -1.3, nan]])
def setup_multivariate_mixed():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
d21 = ExponentialDistribution(5)
d22 = LogNormalDistribution(0.2, 0.8)
d23 = PoissonDistribution(3)
d2 = IndependentComponentsDistribution([d21, d22, d23])
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[ 1.2, 1.8, 1.5],
[ 1.8, 0.3, 0.5],
[ 0.7, 1.3, 0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[ 1.2, 1.8, 1.5],
[ nan, 0.3, 0.5],
[ nan, 1.3, nan]])
def setup_hmm():
global model
global hmm1
global hmm2
global hmm3
rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )
unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )
hmm1 = HiddenMarkovModel()
hmm1.start = rigged
hmm1.add_transition(rigged, rigged, 1)
hmm1.bake()
hmm2 = HiddenMarkovModel()
hmm2.start = unrigged
hmm2.add_transition(unrigged, unrigged, 1)
hmm2.bake()
hmm3 = HiddenMarkovModel()
hmm3.add_transition(hmm3.start, unrigged, 0.5)
hmm3.add_transition(hmm3.start, rigged, 0.5)
hmm3.add_transition(rigged, rigged, 0.5)
hmm3.add_transition(rigged, unrigged, 0.5)
hmm3.add_transition(unrigged, rigged, 0.5)
hmm3.add_transition(unrigged, unrigged, 0.5)
hmm3.bake()
model = BayesClassifier([hmm1, hmm2, hmm3])
def setup_multivariate():
pass
def teardown():
pass
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.99533332e-02, -3.23995333e+00],
[ -1.17110067e+00, -3.71100666e-01],
[ -4.01814993e+00, -1.81499279e-02],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.80005545e+00, -5.54500620e-05],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.78390074e+00, -1.83900741e-01],
[ -3.05902274e-07, -1.50000003e+01],
[ -8.68361522e-02, -2.48683615e+00],
[ -1.00016521e-02, -4.61000165e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.57980882e-01, -1.20093223e+00],
[ -1.20735130e+00, -3.55230506e-01],
[ -2.43174286e-01, -1.53310132e+00],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.31781101e+00, -8.98143220e-05],
[ -6.29755079e-04, -7.37049444e+00],
[ -1.31307006e+00, -3.13332194e-01],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.29725479e-01, -1.58353505e+00],
[ -1.17299253e+00, -3.70251760e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 9.60834277e-01, 3.91657228e-02],
[ 3.10025519e-01, 6.89974481e-01],
[ 1.79862100e-02, 9.82013790e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 5.54485247e-05, 9.99944551e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 1.67981615e-01, 8.32018385e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.16827304e-01, 8.31726965e-02],
[ 9.90048198e-01, 9.95180187e-03]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 6.99086440e-01, 3.00913560e-01],
[ 2.98988163e-01, 7.01011837e-01],
[ 7.84134838e-01, 2.15865162e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 8.98102888e-05, 9.99910190e-01],
[ 9.99370443e-01, 6.29556825e-04],
[ 2.68992964e-01, 7.31007036e-01],
[ 7.69692511e-01, 2.30307489e-01],
[ 7.94751748e-01, 2.05248252e-01],
[ 3.09439547e-01, 6.90560453e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict():
y_hat = model.predict(X)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict():
y_hat = model.predict(X)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.28333333, 0.21666666]
cov1_t = [[1.3088888, 0.9272222, 0.6227777],
[0.9272222, 2.2513888, 1.3402777],
[0.6227777, 1.3402777, 0.9547222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687499, 0.23687499, 0.4793750],
[0.23687499, 0.40187499, 0.5318749],
[0.47937500, 0.53187499, 0.7868750]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [1.033333, 1.3166667, 0.75]
cov1_t = [[0.242222, 0.0594444, 0.178333],
[0.059444, 0.5980555, 0.414166],
[0.178333, 0.4141666, 0.439166]]
d21 = model.distributions[1].distributions[0]
d22 = model.distributions[1].distributions[1]
d23 = model.distributions[1].distributions[2]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(d21.parameters, [0.34188034])
assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])
assert_array_almost_equal(d23.parameters, [2.625])
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_from_samples():
model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.2833333, 0.21666666]
cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],
[0.927222222, 2.251388888, 1.340277777],
[0.622777777, 1.340277777, 0.9547222222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687500, 0.23687499, 0.47937500],
[0.23687499, 0.40187499, 0.53187499],
[0.47937500, 0.53187499, 0.78687500]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_hmm, teardown)
def test_model():
assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )
assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )
assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )
assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )
assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )
assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )
assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)
assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)
assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)
assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)
assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)
assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)
assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)
assert_equal(model.d, 1)
@with_setup(setup_hmm, teardown)
def test_hmm_log_proba():
logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(logs[0][0], -0.89097292388986515)
assert_almost_equal(logs[0][1], -1.3609765531356006)
assert_almost_equal(logs[0][2], -1.0986122886681096)
assert_almost_equal(logs[1][0], -0.93570553121744293)
assert_almost_equal(logs[1][1], -1.429425687080494)
assert_almost_equal(logs[1][2], -0.9990078376167526)
assert_almost_equal(logs[2][0], -3.9007882563128864)
assert_almost_equal(logs[2][1], -0.23562532881626597)
assert_almost_equal(logs[2][2], -1.6623251045711958)
assert_almost_equal(logs[3][0], -3.1703366478831185)
assert_almost_equal(logs[3][1], -0.49261403211260379)
assert_almost_equal(logs[3][2], -1.058478108940049)
assert_almost_equal(logs[4][0], -1.3058441172130273)
assert_almost_equal(logs[4][1], -1.4007102236822906)
assert_almost_equal(logs[4][2], -0.7284958836972919)
@with_setup(setup_hmm, teardown)
def test_hmm_proba():
probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(probs[0][0], 0.41025641025641024)
assert_almost_equal(probs[0][1], 0.25641025641025639)
assert_almost_equal(probs[0][2], 0.33333333333333331)
assert_almost_equal(probs[1][0], 0.39230898163446098)
assert_almost_equal(probs[1][1], 0.23944639992337707)
assert_almost_equal(probs[1][2], 0.36824461844216183)
assert_almost_equal(probs[2][0], 0.020225961918306088)
assert_almost_equal(probs[2][1], 0.79007663743383105)
assert_almost_equal(probs[2][2], 0.18969740064786292)
assert_almost_equal(probs[3][0], 0.041989459861032523)
assert_almost_equal(probs[3][1], 0.61102706038265642)
assert_almost_equal(probs[3][2], 0.346983479756311)
assert_almost_equal(probs[4][0], 0.27094373022369794)
assert_almost_equal(probs[4][1], 0.24642188711704707)
assert_almost_equal(probs[4][2], 0.48263438265925512)
@with_setup(setup_hmm, teardown)
def test_hmm_prediction():
predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_equal(predicts[0], 0)
assert_equal(predicts[1], 0)
assert_equal(predicts[2], 1)
assert_equal(predicts[3], 1)
assert_equal(predicts[4], 2)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_log_probability():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
logp1 = model.log_probability(X)
logp2 = model.log_probability(X2)
logp3 = model.log_probability(X3)
assert_array_almost_equal(logp1, logp2)
assert_array_almost_equal(logp1, logp3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict(X)
y_hat2 = model.predict(X2)
y_hat3 = model.predict(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_proba(X)
y_hat2 = model.predict_proba(X2)
y_hat3 = model.predict_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_log_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_log_proba(X)
y_hat2 = model.predict_log_proba(X2)
y_hat3 = model.predict_log_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_samples():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_samples(d, X=data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
|
[
"numpy.testing.assert_array_almost_equal",
"nose.tools.assert_almost_equal",
"nose.tools.with_setup",
"pomegranate.io.DataGenerator",
"pickle.dumps",
"pandas.DataFrame",
"nose.tools.assert_equal"
] |
[((3258, 3307), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (3268, 3307), False, 'from nose.tools import with_setup\n'), ((3449, 3495), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (3459, 3495), False, 'from nose.tools import with_setup\n'), ((3635, 3684), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (3645, 3684), False, 'from nose.tools import with_setup\n'), ((4230, 4276), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (4240, 4276), False, 'from nose.tools import with_setup\n'), ((4819, 4868), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (4829, 4868), False, 'from nose.tools import with_setup\n'), ((5422, 5468), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (5432, 5468), False, 'from nose.tools import with_setup\n'), ((6019, 6068), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (6029, 6068), False, 'from nose.tools import with_setup\n'), ((6633, 6679), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (6643, 6679), False, 'from nose.tools import with_setup\n'), ((7241, 7290), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (7251, 7290), False, 'from nose.tools import with_setup\n'), ((7828, 7874), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (7838, 7874), False, 'from nose.tools import with_setup\n'), ((8309, 8358), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (8319, 8358), False, 'from nose.tools import with_setup\n'), ((8904, 8950), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (8914, 8950), False, 'from nose.tools import with_setup\n'), ((9493, 9542), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (9503, 9542), False, 'from nose.tools import with_setup\n'), ((10099, 10145), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (10109, 10145), False, 'from nose.tools import with_setup\n'), ((10599, 10648), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (10609, 10648), False, 'from nose.tools import with_setup\n'), ((10797, 10843), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (10807, 10843), False, 'from nose.tools import with_setup\n'), ((10989, 11038), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (10999, 11038), False, 'from nose.tools import with_setup\n'), ((11195, 11241), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (11205, 11241), False, 'from nose.tools import with_setup\n'), ((11395, 11444), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (11405, 11444), False, 'from nose.tools import with_setup\n'), ((11612, 11658), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (11622, 11658), False, 'from nose.tools import with_setup\n'), ((11823, 11872), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (11833, 11872), False, 'from nose.tools import with_setup\n'), ((12630, 12676), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (12640, 12676), False, 'from nose.tools import with_setup\n'), ((13402, 13451), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (13412, 13451), False, 'from nose.tools import with_setup\n'), ((14282, 14331), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (14292, 14331), False, 'from nose.tools import with_setup\n'), ((14700, 14746), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (14710, 14746), False, 'from nose.tools import with_setup\n'), ((15113, 15162), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (15123, 15162), False, 'from nose.tools import with_setup\n'), ((15541, 15587), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (15551, 15587), False, 'from nose.tools import with_setup\n'), ((15964, 16013), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (15974, 16013), False, 'from nose.tools import with_setup\n'), ((16385, 16431), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (16395, 16431), False, 'from nose.tools import with_setup\n'), ((16801, 16832), 'nose.tools.with_setup', 'with_setup', (['setup_hmm', 'teardown'], {}), '(setup_hmm, teardown)\n', (16811, 16832), False, 'from nose.tools import with_setup\n'), ((18212, 18243), 'nose.tools.with_setup', 'with_setup', (['setup_hmm', 'teardown'], {}), '(setup_hmm, teardown)\n', (18222, 18243), False, 'from nose.tools import with_setup\n'), ((19217, 19248), 'nose.tools.with_setup', 'with_setup', (['setup_hmm', 'teardown'], {}), '(setup_hmm, teardown)\n', (19227, 19248), False, 'from nose.tools import with_setup\n'), ((20228, 20259), 'nose.tools.with_setup', 'with_setup', (['setup_hmm', 'teardown'], {}), '(setup_hmm, teardown)\n', (20238, 20259), False, 'from nose.tools import with_setup\n'), ((20561, 20610), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (20571, 20610), False, 'from nose.tools import with_setup\n'), ((20901, 20950), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (20911, 20950), False, 'from nose.tools import with_setup\n'), ((21216, 21265), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (21226, 21265), False, 'from nose.tools import with_setup\n'), ((21555, 21604), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (21565, 21604), False, 'from nose.tools import with_setup\n'), ((3361, 3385), 'nose.tools.assert_equal', 'assert_equal', (['model.d', '(3)'], {}), '(model.d, 3)\n', (3373, 3385), False, 'from nose.tools import assert_equal\n'), ((3387, 3411), 'nose.tools.assert_equal', 'assert_equal', (['model.n', '(2)'], {}), '(model.n, 2)\n', (3399, 3411), False, 'from nose.tools import assert_equal\n'), ((3413, 3446), 'nose.tools.assert_equal', 'assert_equal', (['model.is_vl_', '(False)'], {}), '(model.is_vl_, False)\n', (3425, 3446), False, 'from nose.tools import assert_equal\n'), ((3546, 3570), 'nose.tools.assert_equal', 'assert_equal', (['model.d', '(3)'], {}), '(model.d, 3)\n', (3558, 3570), False, 'from nose.tools import assert_equal\n'), ((3572, 3596), 'nose.tools.assert_equal', 'assert_equal', (['model.n', '(2)'], {}), '(model.n, 2)\n', (3584, 3596), False, 'from nose.tools import assert_equal\n'), ((3598, 3631), 'nose.tools.assert_equal', 'assert_equal', (['model.is_vl_', '(False)'], {}), '(model.is_vl_, False)\n', (3610, 3631), False, 'from nose.tools import assert_equal\n'), ((4191, 4226), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (4216, 4226), False, 'from numpy.testing import assert_array_almost_equal\n'), ((4780, 4815), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (4805, 4815), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5383, 5418), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (5408, 5418), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5980, 6015), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (6005, 6015), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6594, 6629), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (6619, 6629), False, 'from numpy.testing import assert_array_almost_equal\n'), ((7202, 7237), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (7227, 7237), False, 'from numpy.testing import assert_array_almost_equal\n'), ((7789, 7824), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (7814, 7824), False, 'from numpy.testing import assert_array_almost_equal\n'), ((8270, 8305), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (8295, 8305), False, 'from numpy.testing import assert_array_almost_equal\n'), ((8865, 8900), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (8890, 8900), False, 'from numpy.testing import assert_array_almost_equal\n'), ((9454, 9489), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (9479, 9489), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10060, 10095), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (10085, 10095), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10560, 10595), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (10585, 10595), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10758, 10793), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (10783, 10793), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10950, 10985), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (10975, 10985), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11156, 11191), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (11181, 11191), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11356, 11391), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (11381, 11391), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11573, 11608), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (11598, 11608), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11784, 11819), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (11809, 11819), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12468, 12505), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu1', 'mu1_t'], {}), '(mu1, mu1_t)\n', (12493, 12505), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12507, 12546), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov1', 'cov1_t'], {}), '(cov1, cov1_t)\n', (12532, 12546), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12548, 12585), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu2', 'mu2_t'], {}), '(mu2, mu2_t)\n', (12573, 12585), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12587, 12626), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov2', 'cov2_t'], {}), '(cov2, cov2_t)\n', (12612, 12626), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13142, 13179), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu1', 'mu1_t'], {}), '(mu1, mu1_t)\n', (13167, 13179), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13181, 13220), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov1', 'cov1_t'], {}), '(cov1, cov1_t)\n', (13206, 13220), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13222, 13277), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['d21.parameters', '[0.34188034]'], {}), '(d21.parameters, [0.34188034])\n', (13247, 13277), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13279, 13346), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['d22.parameters', '[1.01294275, 0.22658346]'], {}), '(d22.parameters, [1.01294275, 0.22658346])\n', (13304, 13346), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13348, 13398), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['d23.parameters', '[2.625]'], {}), '(d23.parameters, [2.625])\n', (13373, 13398), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14120, 14157), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu1', 'mu1_t'], {}), '(mu1, mu1_t)\n', (14145, 14157), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14159, 14198), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov1', 'cov1_t'], {}), '(cov1, cov1_t)\n', (14184, 14198), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14200, 14237), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu2', 'mu2_t'], {}), '(mu2, mu2_t)\n', (14225, 14237), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14239, 14278), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov2', 'cov2_t'], {}), '(cov2, cov2_t)\n', (14264, 14278), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14640, 14696), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (14665, 14696), False, 'from numpy.testing import assert_array_almost_equal\n'), ((15053, 15109), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (15078, 15109), False, 'from numpy.testing import assert_array_almost_equal\n'), ((15481, 15537), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (15506, 15537), False, 'from numpy.testing import assert_array_almost_equal\n'), ((15904, 15960), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (15929, 15960), False, 'from numpy.testing import assert_array_almost_equal\n'), ((16325, 16381), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (16350, 16381), False, 'from numpy.testing import assert_array_almost_equal\n'), ((16741, 16797), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (16766, 16797), False, 'from numpy.testing import assert_array_almost_equal\n'), ((18184, 18208), 'nose.tools.assert_equal', 'assert_equal', (['model.d', '(1)'], {}), '(model.d, 1)\n', (18196, 18208), False, 'from nose.tools import assert_equal\n'), ((18399, 18451), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[0][0]', '(-0.8909729238898652)'], {}), '(logs[0][0], -0.8909729238898652)\n', (18418, 18451), False, 'from nose.tools import assert_almost_equal\n'), ((18454, 18506), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[0][1]', '(-1.3609765531356006)'], {}), '(logs[0][1], -1.3609765531356006)\n', (18473, 18506), False, 'from nose.tools import assert_almost_equal\n'), ((18508, 18560), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[0][2]', '(-1.0986122886681096)'], {}), '(logs[0][2], -1.0986122886681096)\n', (18527, 18560), False, 'from nose.tools import assert_almost_equal\n'), ((18563, 18615), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[1][0]', '(-0.9357055312174429)'], {}), '(logs[1][0], -0.9357055312174429)\n', (18582, 18615), False, 'from nose.tools import assert_almost_equal\n'), ((18618, 18669), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[1][1]', '(-1.429425687080494)'], {}), '(logs[1][1], -1.429425687080494)\n', (18637, 18669), False, 'from nose.tools import assert_almost_equal\n'), ((18671, 18723), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[1][2]', '(-0.9990078376167526)'], {}), '(logs[1][2], -0.9990078376167526)\n', (18690, 18723), False, 'from nose.tools import assert_almost_equal\n'), ((18726, 18778), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[2][0]', '(-3.9007882563128864)'], {}), '(logs[2][0], -3.9007882563128864)\n', (18745, 18778), False, 'from nose.tools import assert_almost_equal\n'), ((18780, 18833), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[2][1]', '(-0.23562532881626597)'], {}), '(logs[2][1], -0.23562532881626597)\n', (18799, 18833), False, 'from nose.tools import assert_almost_equal\n'), ((18835, 18887), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[2][2]', '(-1.6623251045711958)'], {}), '(logs[2][2], -1.6623251045711958)\n', (18854, 18887), False, 'from nose.tools import assert_almost_equal\n'), ((18890, 18942), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[3][0]', '(-3.1703366478831185)'], {}), '(logs[3][0], -3.1703366478831185)\n', (18909, 18942), False, 'from nose.tools import assert_almost_equal\n'), ((18944, 18996), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[3][1]', '(-0.4926140321126038)'], {}), '(logs[3][1], -0.4926140321126038)\n', (18963, 18996), False, 'from nose.tools import assert_almost_equal\n'), ((18999, 19050), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[3][2]', '(-1.058478108940049)'], {}), '(logs[3][2], -1.058478108940049)\n', (19018, 19050), False, 'from nose.tools import assert_almost_equal\n'), ((19053, 19105), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[4][0]', '(-1.3058441172130273)'], {}), '(logs[4][0], -1.3058441172130273)\n', (19072, 19105), False, 'from nose.tools import assert_almost_equal\n'), ((19107, 19159), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[4][1]', '(-1.4007102236822906)'], {}), '(logs[4][1], -1.4007102236822906)\n', (19126, 19159), False, 'from nose.tools import assert_almost_equal\n'), ((19161, 19213), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[4][2]', '(-0.7284958836972919)'], {}), '(logs[4][2], -0.7284958836972919)\n', (19180, 19213), False, 'from nose.tools import assert_almost_equal\n'), ((19397, 19450), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[0][0]', '(0.41025641025641024)'], {}), '(probs[0][0], 0.41025641025641024)\n', (19416, 19450), False, 'from nose.tools import assert_almost_equal\n'), ((19452, 19504), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[0][1]', '(0.2564102564102564)'], {}), '(probs[0][1], 0.2564102564102564)\n', (19471, 19504), False, 'from nose.tools import assert_almost_equal\n'), ((19507, 19559), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[0][2]', '(0.3333333333333333)'], {}), '(probs[0][2], 0.3333333333333333)\n', (19526, 19559), False, 'from nose.tools import assert_almost_equal\n'), ((19563, 19614), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[1][0]', '(0.392308981634461)'], {}), '(probs[1][0], 0.392308981634461)\n', (19582, 19614), False, 'from nose.tools import assert_almost_equal\n'), ((19618, 19671), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[1][1]', '(0.23944639992337707)'], {}), '(probs[1][1], 0.23944639992337707)\n', (19637, 19671), False, 'from nose.tools import assert_almost_equal\n'), ((19673, 19726), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[1][2]', '(0.36824461844216183)'], {}), '(probs[1][2], 0.36824461844216183)\n', (19692, 19726), False, 'from nose.tools import assert_almost_equal\n'), ((19729, 19783), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[2][0]', '(0.020225961918306088)'], {}), '(probs[2][0], 0.020225961918306088)\n', (19748, 19783), False, 'from nose.tools import assert_almost_equal\n'), ((19785, 19836), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[2][1]', '(0.790076637433831)'], {}), '(probs[2][1], 0.790076637433831)\n', (19804, 19836), False, 'from nose.tools import assert_almost_equal\n'), ((19840, 19893), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[2][2]', '(0.18969740064786292)'], {}), '(probs[2][2], 0.18969740064786292)\n', (19859, 19893), False, 'from nose.tools import assert_almost_equal\n'), ((19896, 19949), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[3][0]', '(0.04198945986103252)'], {}), '(probs[3][0], 0.04198945986103252)\n', (19915, 19949), False, 'from nose.tools import assert_almost_equal\n'), ((19952, 20004), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[3][1]', '(0.6110270603826564)'], {}), '(probs[3][1], 0.6110270603826564)\n', (19971, 20004), False, 'from nose.tools import assert_almost_equal\n'), ((20007, 20058), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[3][2]', '(0.346983479756311)'], {}), '(probs[3][2], 0.346983479756311)\n', (20026, 20058), False, 'from nose.tools import assert_almost_equal\n'), ((20061, 20114), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[4][0]', '(0.27094373022369794)'], {}), '(probs[4][0], 0.27094373022369794)\n', (20080, 20114), False, 'from nose.tools import assert_almost_equal\n'), ((20116, 20169), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[4][1]', '(0.24642188711704707)'], {}), '(probs[4][1], 0.24642188711704707)\n', (20135, 20169), False, 'from nose.tools import assert_almost_equal\n'), ((20171, 20223), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[4][2]', '(0.4826343826592551)'], {}), '(probs[4][2], 0.4826343826592551)\n', (20190, 20223), False, 'from nose.tools import assert_almost_equal\n'), ((20410, 20438), 'nose.tools.assert_equal', 'assert_equal', (['predicts[0]', '(0)'], {}), '(predicts[0], 0)\n', (20422, 20438), False, 'from nose.tools import assert_equal\n'), ((20440, 20468), 'nose.tools.assert_equal', 'assert_equal', (['predicts[1]', '(0)'], {}), '(predicts[1], 0)\n', (20452, 20468), False, 'from nose.tools import assert_equal\n'), ((20470, 20498), 'nose.tools.assert_equal', 'assert_equal', (['predicts[2]', '(1)'], {}), '(predicts[2], 1)\n', (20482, 20498), False, 'from nose.tools import assert_equal\n'), ((20500, 20528), 'nose.tools.assert_equal', 'assert_equal', (['predicts[3]', '(1)'], {}), '(predicts[3], 1)\n', (20512, 20528), False, 'from nose.tools import assert_equal\n'), ((20530, 20558), 'nose.tools.assert_equal', 'assert_equal', (['predicts[4]', '(2)'], {}), '(predicts[4], 2)\n', (20542, 20558), False, 'from nose.tools import assert_equal\n'), ((20648, 20664), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X'], {}), '(X)\n', (20661, 20664), False, 'from pomegranate.io import DataGenerator\n'), ((20818, 20857), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['logp1', 'logp2'], {}), '(logp1, logp2)\n', (20843, 20857), False, 'from numpy.testing import assert_array_almost_equal\n'), ((20859, 20898), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['logp1', 'logp3'], {}), '(logp1, logp3)\n', (20884, 20898), False, 'from numpy.testing import assert_array_almost_equal\n'), ((20980, 20996), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X'], {}), '(X)\n', (20993, 20996), False, 'from pomegranate.io import DataGenerator\n'), ((21129, 21170), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat2'], {}), '(y_hat1, y_hat2)\n', (21154, 21170), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21172, 21213), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat3'], {}), '(y_hat1, y_hat3)\n', (21197, 21213), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21301, 21317), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X'], {}), '(X)\n', (21314, 21317), False, 'from pomegranate.io import DataGenerator\n'), ((21468, 21509), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat2'], {}), '(y_hat1, y_hat2)\n', (21493, 21509), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21511, 21552), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat3'], {}), '(y_hat1, y_hat3)\n', (21536, 21552), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21644, 21660), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X'], {}), '(X)\n', (21657, 21660), False, 'from pomegranate.io import DataGenerator\n'), ((21823, 21864), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat2'], {}), '(y_hat1, y_hat2)\n', (21848, 21864), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21866, 21907), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat3'], {}), '(y_hat1, y_hat3)\n', (21891, 21907), False, 'from numpy.testing import assert_array_almost_equal\n'), ((22069, 22097), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X', 'weights', 'y'], {}), '(X, weights, y)\n', (22082, 22097), False, 'from pomegranate.io import DataGenerator\n'), ((22571, 22610), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['logp1', 'logp2'], {}), '(logp1, logp2)\n', (22596, 22610), False, 'from numpy.testing import assert_array_almost_equal\n'), ((22781, 22809), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X', 'weights', 'y'], {}), '(X, weights, y)\n', (22794, 22809), False, 'from pomegranate.io import DataGenerator\n'), ((23040, 23079), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['logp1', 'logp2'], {}), '(logp1, logp2)\n', (23065, 23079), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14399, 14418), 'pickle.dumps', 'pickle.dumps', (['model'], {}), '(model)\n', (14411, 14418), False, 'import pickle\n'), ((14811, 14830), 'pickle.dumps', 'pickle.dumps', (['model'], {}), '(model)\n', (14823, 14830), False, 'import pickle\n'), ((20690, 20709), 'pandas.DataFrame', 'pandas.DataFrame', (['X'], {}), '(X)\n', (20706, 20709), False, 'import pandas\n'), ((21022, 21041), 'pandas.DataFrame', 'pandas.DataFrame', (['X'], {}), '(X)\n', (21038, 21041), False, 'import pandas\n'), ((21343, 21362), 'pandas.DataFrame', 'pandas.DataFrame', (['X'], {}), '(X)\n', (21359, 21362), False, 'import pandas\n'), ((21686, 21705), 'pandas.DataFrame', 'pandas.DataFrame', (['X'], {}), '(X)\n', (21702, 21705), False, 'import pandas\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 05:47:03 2018
@author: zg
"""
import numpy as np
#from scipy import io
import scipy.io
#import pickle
from sklearn.model_selection import StratifiedKFold
#import sklearn
from scipy.sparse import spdiags
from scipy.spatial import distance
#import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingClassifier
from sklearn import svm
#from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn import tree
import copy
import numpy.matlib
from sklearn.exceptions import NotFittedError
#import FuzzyRwrBagging as frb
#from joblib import Parallel, delayed
#import multiprocessing
def RWR(A, nSteps, laziness, p0 = None):
'''
% the random walk algorithm.
% A is the input net matrix, with the diag to be 0.
% nSteps: how many steps to walk
% laziness: the probablity to go back.
% p0: the initial probability. usually it is a zero matrix with the diag to
% be 1.
%
% for example, A could be:
% A = [0,2,2,0,0,0,0;...
% 2,0,1,1,0,0,0;...
% 2,1,0,0,1,0,0;...
% 0,1,0,0,0,1,1;...
% 0,0,1,0,0,0,0;...
% 0,0,0,1,0,0,1;...
% 0,0,0,1,0,1,0]
%
% if nSteps is 1000 and laziness is 0.3, p0 is default, the result is:
% [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;...
% 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;...
% 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;...
% 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;...
% 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425]
%
% Each column represents the propability for each node. each element in the
% column means the probability to go to that node.
% This algorithm will converge. For example, for the above matrix, nSteps =
% 100, 1000 or 10000, will give the same result.
'''
n = len(A)
if p0 == None:
p0 = np.eye(n)
'''
% In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be
% 0.2500 0 0 0 0 0 0
% 0 0.2500 0 0 0 0 0
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0
% 0 0 0 0 1.0000 0 0
% 0 0 0 0 0 0.5000 0
% 0 0 0 0 0 0 0.5000
% W will be:
% 0 0.5000 0.5000 0 0 0 0
% 0.5000 0 0.2500 0.3333 0 0 0
% 0.5000 0.2500 0 0 1.0000 0 0
% 0 0.2500 0 0 0 0.5000 0.5000
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0.5000
% 0 0 0 0.3333 0 0.5000 0
'''
#W = A * spdiags(sum(A)'.^(-1), 0, n, n);
#W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray()
W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \
0, n, n).toarray() )
p = p0
pl2norm = np.inf
unchanged = 0
for i in range(1, nSteps+1):
if i % 100 == 0:
print(' done rwr ' + str(i-1) )
pnew = (1-laziness) * W.dot(p) + laziness * p0
l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) )
p = pnew
if l2norm < np.finfo(float).eps:
break
else:
if l2norm == pl2norm:
unchanged = unchanged +1
if unchanged > 10:
break
else:
unchanged = 0
pl2norm = l2norm
return p
# test RWR()
'''
A = np.array([[0,2,2,0,0,0,0],\
[2,0,1,1,0,0,0],\
[2,1,0,0,1,0,0],\
[0,1,0,0,0,1,1],\
[0,0,1,0,0,0,0],\
[0,0,0,1,0,0,1],\
[0,0,0,1,0,1,0]])
nSteps = 1000
lazi = 0.3
RWR(A, nSteps, lazi, None)
'''
# test
#dst = distance.euclidean(A)
# corrent, the same as in Matlab
def f_sim_2_aRankNet(sim, k=3):
'''
% Convert the similarity matrix to a network graph where each node
% has k edges to other nodes (aRank).
'''
# delete the diagnal values.
# sim = sim-diag(diag(sim) );
np.fill_diagonal(sim, 0)
# [~, I] = sort(sim-diag(diag(sim) ) );
I = np.argsort(sim, kind='mergesort') + 1
# [~, I2] = sort(I);
I2 = (np.argsort(I, kind='mergesort').T + 1).T
# for every column, just keep the top k edges.
#aRankNet = (I2 >length(sim)-k);
aRankNet = I2 > (len(sim) - k)
# make it a diagonal matrix
# aRankNet = max(aRankNet, aRankNet');
aRankNet = np.logical_or(aRankNet, aRankNet.T)
# remove the diagonal 1s.
# aRankNet = aRankNet-diag(diag(aRankNet) );
np.fill_diagonal(aRankNet, False)
return aRankNet
# test
#sim = np.array([[0, 0.5566, 0.6448, 0.3289], \
# [0.5566, 0, -0.0842, -0.0170], \
# [0.6448, -0.0842, 0, 0.8405], \
# [0.3289, -0.0170, 0.8405, 0]])
#
#f_sim_2_aRankNet(sim,1)
#f_sim_2_aRankNet(sim,2)
#f_sim_2_aRankNet(sim,3)
#
#array([[False, True, True, False],
# [ True, False, False, False],
# [ True, False, False, True],
# [False, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, False],
# [ True, False, False, True],
# [ True, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, True],
# [ True, False, False, True],
# [ True, True, True, False]])
def f_find_centers_rwMat(rw_mat, k):
'''
% on the rw_mat matrix, find some nodes as the centroids for soft
% clustering. If we just random pickup some nodes as centroids, that is
% not good for fuzzy clusters.
% k is the number of centroids.
'''
ixs = []
# 1. find the most connected center node as the first centroid.
a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col
# % most connected node.
ix = np.argmax(a)
ixs.append(ix)
# % 2. iteratively find the rest nodes
for i in range(1, k):
tmp = rw_mat[:, ixs]
b = np.sum(tmp, axis=1)
b[ixs] = np.inf
# % find the farthest node
ix = np.argmin(b)
ixs.append(ix)
return ixs
# test
#tmp = f_find_centers_rwMat(rw_mat, 10)
def getCutoff(rw_mat, avgNeighborsSize):
tmp = rw_mat.flatten('F')
a = np.flip(np.sort(tmp), 0)
len1 = len(rw_mat)
#cutoffs = []
all_neibs = int( avgNeighborsSize * len1 )
print( all_neibs)
ct = a[all_neibs]
return ct
#test
#>>> a = np.array([[1,2], [3,4]])
#>>> a.flatten()
#array([1, 2, 3, 4])
#>>> a.flatten('F')
#array([1, 3, 2, 4])
'''
a = np.array( range(0,100) )
b = np.matlib.repmat(a, 100, 1)
ct = getCutoff(b, 70)
'''
def f_len_of_each_ele(c1):
#% Assume c1 is a 1-dimension cell array, and each element is a 1d double
#% array. This function counts the length of each double array.
lens = np.zeros(len(c1))
for i in range(0, len(c1)):
lens[i] = len(c1[i])
return lens
def f_eu_dist(X):
'''
calculate the euclidean distance between instances
'''
sim = np.zeros(( len(X), len(X) ))
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
np.fill_diagonal(sim, 0)
return sim
#test
#sim = f_eu_dist(X)
def f_eu_dist2(X1, X2):
'''
calculate the euclidean distance between instances from two datasets
'''
sim = np.zeros(( len(X1), len(X2) ))
for i in range(0, len(X1) ):
for j in range(0, len(X2) ):
tmp = distance.euclidean(X1[i], X2[j])
sim[i][j] = tmp
sim = -sim
return sim
#test
#sim = f_eu_dist2(X_tr, X_te)
def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None):
# X: data
# k: number of clusters
'''
The return variable clus stores the instance indices for each cluster.
However, this data structure is not easy to find for a instance, which are
the clusters it belongs to, thus we also need to convert clus to a
true-false matrix.
'''
if each_clus_sz == None:
# on average, how many clusters does one inst belongs to.
#overlap_factor = 2;
# the estimated size of each cluster. default is half the number of
# instances.
each_clus_sz=len(X)/3
print('RWR-based fuzzy clustering starts...')
print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) )
# sim = squareform(pdist(X));
# sim = -sim;
sim = np.zeros((len(X), len(X) ) )
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
print(' done calculating the Euclidean distance matrix')
# ---------------------------------------------------------------
aRank_k_neighbors = np.ceil(np.log10(len(sim)) )
ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors)
print(' done calculating the A-rank KNN graph')
# % -------- RWR --------
nSteps = 1000
lazi = 0.3
rw = RWR(ori_graph, nSteps, lazi)
# remove probability of returning start node
np.fill_diagonal(rw, 0)
rw_mat = rw
print(' done RWR')
# ---------------------------------------------------------------
ixs_centers = f_find_centers_rwMat(rw_mat, k)
ct = getCutoff(rw_mat, each_clus_sz)
rw_net = rw_mat > ct
# % set the diagnal to 1
np.fill_diagonal(rw_net, True)
clus = []
for i in range(0, k):
tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten()
clus.append(tmp)
# ---------------------------------------------------------------
# % sort the clusters
lens = f_len_of_each_ele(clus)
ix = np.argsort(lens)[::-1]
clus_ordered = [clus[i] for i in ix]
print(' center inst. index of each cluster: ')
ixs_centers = np.array(ixs_centers)
print(ixs_centers[ix])
print(' size of each cluster: ')
print(lens[ix])
print(' done RWR clustering')
return clus_ordered
#test
#clus = f_fuzzy_rwr_clusters(X, 100)
# pass
def f_clus_to_tfs(clus, n_inst):
#% convert the cluster information from cell array to mat. But for each
#% instance, the rank of clusters information will be lost - you won't know
#% what is the top 1/2/3 cluster it belongs to.
#%
#% clus e.g:
#% 1x5 cell
#% 1x195 double 1x193 double 1x169 double 1x161 double 1x62 double
#%
#% tfs e.g:
#% 295x5 double
#% 1 0 0 0 0
#% 1 1 1 1 0
#% 1 1 1 0 0
#% 1 1 0 0 0
#% 1 1 1 1 0
#% ...
#% 1 1 1 1 1
#% 1 0 0 0 0
#% 1 1 1 0 0
tfs = np.zeros((n_inst, len(clus)), dtype=bool)
for i in range(0, len(clus)):
tfs[clus[i], i] = True
return tfs
# test
#tfs = f_clus_to_tfs(clus, len(X))
# pass
def f_tfs_2_instClus(tfs):
'''
convert the boolean table representation of clustering result to for each
instance, what clusters it belongs to.
'''
inst_clus = []
for i in range(0, len(tfs)):
row = list( np.where(tfs[i, :] ) [0] )
inst_clus.append(row)
return inst_clus
# test
#inst_clus = f_tfs_2_instClus(tfs)
#def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te):
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
# test
'''
X_tr = X
y_tr = y
X_te = X
y_te = y
[y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te)
'''
#def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging):
# '''
# corresponds to f_weka_bg_svm_tr_te() in Matlab version
# '''
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
def f_tr(X_tr, y_tr, model):
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
return model_inner
def f_te(X_te, model):
y_pred = model.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
return y_pred
def f_tr_te(X_tr, y_tr, X_te, model):
'''
corresponds to f_weka_bg_svm_tr_te() in Matlab version
'''
#bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
#bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
y_pred = model_inner.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
#auc = roc_auc_score(y_te.flatten(), y_pred)
return y_pred
def f_k_fo(X, y, model, k_fold=10):
'''
corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version
'''
y = y.flatten()
y_pred = np.zeros(y.size)
skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True)
skf.get_n_splits(X, y)
for train_index, test_index in skf.split(X, y):
#print("TRAIN: ", train_index, " TEST: ", test_index)
X_tr, X_te = X[train_index], X[test_index]
#y_tr, y_te = y[train_index], y[test_index]
y_tr = y[train_index]
if np.unique(y_tr).size == 1:
y_pred_fo = np.zeros( len(test_index) )
#print len(X_te)
#print len(test_index)
#print y_pred_fo
y_pred_fo.fill(np.unique(y_tr)[0] )
#print y_pred_fo
else:
y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model)
y_pred[test_index] = y_pred_fo
#auc = roc_auc_score(y.flatten(), y_pred)
return y_pred
# test
#pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
#
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
#y_pred = f_k_fo(X, y, model, k_fold=10)
#
#print roc_auc_score(y.flatten(), y_pred)
# the easy dataset mesothelioma get 1.0 CV result.
# breast cancer get 0.599
# all results are correct.
def f_quantileNorm(templete, target):
'''
Templete is the standard, change the target to the values in the templete.
Target may have a very different range than the templete.
templete and target should be 1d n by 1 array.
f_my_quantileNorm()
'''
ix_target = np.argsort(target, kind='mergesort')
ix_templete = np.argsort(templete, kind='mergesort')
target[ix_target] = templete[ix_templete]
new = target
return new
# test
#templete = X[:, 0]
#target = X[:, 1]
#new = f_quantileNorm(templete, target)
#def f_bg_k_fo_3(X, y, k_fold=10):
# '''
# corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version
# corresponds to f_k_fo()
# '''
# y_pred = np.zeros((y.size, 1))
#
# skf = StratifiedKFold(n_splits=k_fold)
# skf.get_n_splits(X, y)
#
# for train_index, test_index in skf.split(X, y):
# #print("TRAIN:", train_index, "TEST:", test_index)
# X_tr, X_te = X[train_index], X[test_index]
# y_tr, y_te = y[train_index], y[test_index]
def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner):
'''
% using each cluster data to predict the whole instances, while self
% prediction using 10-fold CV.
corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version
'''
n_clusters = len(clus)
y_pred_multi = np.zeros((y.size, n_clusters) )
models = []
for j in range(0, n_clusters):
# for each cluster
Xj = X[clus[j].flatten(), :]
yj = y[clus[j].flatten() ]
model_a_clust = copy.deepcopy(model)
print(' Cluster '+str(j)+' started...')
#if len(yj) > 10:
if len(yj) > 15 and np.unique(yj).size != 1:
# ------------------ for self ------------------
#if np.unique(yj).size == 1:
# y_pred = np.zeros(yj.size)
# y_pred.fill(np.unique(yj)[0])
#else:
try:
y_pred = f_k_fo(Xj, yj, model, fo_inner)
# quantileNorm
templete = y_pred_whole[clus[j].flatten()]
target = y_pred
y_pred = f_quantileNorm(templete, target)
# copy the normed prediction to the whole data.
y_pred_multi[clus[j].flatten(), j] = y_pred
print(' c-'+str(j)+' done predicting local instances')
# ------------------ for other -----------------
ix_other = set(range(0, y.size)) - set(clus[j].flatten())
ix_other = list(ix_other)
#print ix_other
X_other = X[ix_other , :]
#y_other = y[ix_other ]
# predict
#y_pred = f_tr_te(Xj, yj, X_other, model)
#if np.unique(yj).size != 1:
model_a_clust.fit(Xj, yj)
y_pred = model_a_clust.predict_proba(X_other)
y_pred = y_pred[:, 1].flatten()
# quantileNorm
templete = y_pred_whole[ix_other]
target = y_pred
y_pred = f_quantileNorm(templete, target)
#else:
# y_pred = np.zeros(X_other.size)
# y_pred.fill(np.unique(yj)[0])
# copy to the whole array
y_pred_multi[ix_other, j] = y_pred
print(' c-'+str(j)+' done predicting remote instances')
except ValueError as e:
print(e)
print(' skip this cluster')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
else:
if len(yj) <= 15:
print (' '+str(len(yj))+' insts in cluster, <= 15, skip...')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
if np.unique(yj).size == 1:
print (' warning, #unique class label(s) == 1')
y_pred = np.zeros(y.size)
y_pred.fill(np.unique(yj)[0])
y_pred_multi[:, j] = y_pred
model_a_clust = np.unique(yj)[0]
models.append(model_a_clust)
return [y_pred_multi, models]
# test
#[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model)
#def f_dec_tab_4_bg_svm(X, y, clus):
# '''
# Calculate the decision table
# % This version changed from the cluster-cluster dec_mat to instance-cluster
# % dec_mat. This solution will avoid the case that if one cluster decision
# % is wrong leading entrie cluster prediction is wrong, which is the reason
# % of instability. However, we cannot use a systematic evaluation criteria
# % such as AUC, I will try using the predicted prob at first.
#
# % This version 3 adds the support for fuzzy clustering - one instance may
# % belongs to more than one cluster.
# % This updated version also outputs the predicted values of y.
# % support more than 3 clusters
# % normalization take place in y_pred_self and y_pred_other, thus do not
# % need normalization when predict y_pred_ICE.
# % ixsp is another cluster form.
#
# corresponds to f_dec_tab_4_bg_svm() in Matlab version
# '''
# #n_clusters = len(clus)
# ## dec_mat stores the prediction error.
# #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# #
# ## k_fold of inner cross-validation
# #fo_inner = 10
# # --------------------------- WHOLE -------------------------
#
# # --------------------------- SELF -------------------------
def f_err_mat(X, y, clus, model):
'''
Calculate the decision table
corresponds to f_dec_tab_4_bg_svm() in Matlab version
'''
n_clusters = len(clus)
# err_mat stores the prediction error.
pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# col 0 to col n_clusters-1 store the predictions by each cluster
# the last col stores the pred by whole data
#models = []
# k_fold of inner cross-validation
fo_inner = 5
# --------------------------- WHOLE -------------------------
# Predict each cluster using the whole data.
model_whole = copy.deepcopy(model)
y_pred_whole = f_k_fo(X, y, model_whole, fo_inner)
model_whole.fit(X, y) # fit a model using all data rather than only a fold
pred_prob_mat[:, n_clusters] = y_pred_whole
print (' Done evaluation using whole instances')
print (' Start to evaluate each cluster ')
# --------------------------- SELF -------------------------
# predict the whole instances using each cluster data, while self
# prediction using 10-fold CV.
[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \
y_pred_whole, model, fo_inner)
print (' Done evaluation using each cluster')
models.append(model_whole)
pred_prob_mat[:, 0:n_clusters] = y_pred_multi
# make a tmp array a stores y
tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1)
err_mat = abs(pred_prob_mat - tmp )
print (' Done calculating error table and fitting ICE models')
return [err_mat, models]
"""
#mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\
# '3_scripts/2017_4_4/data/names.mat')['names']
#mat = io.loadmat('/Users/zg/Desktop/a.mat')['names']
#test
pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
n_clus = 3
clus = f_fuzzy_rwr_clusters(X, n_clus)
tfs = f_clus_to_tfs(clus, len(X))
y = y.astype(float)
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVR(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVC(), \
model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X, y, clus, model)
"""
def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5):
'''
Convert the err table to decision table.
'''
dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool)
# dec_ixs: for each instance, which clusters should be used.
dec_ixs = []
inst_clus = f_tfs_2_instClus(tfs)
for i in range(0, len(err_mat)):
# Matlab code:
#dec_row = dec_mat(cur_nb_ix, :);
#dec_row(:, end ) = dec_row(:, end ) - adv_whole;
#dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self;
row = np.copy( err_mat[i, :] )
#print row
row[-1] = row[-1] - adv_whole
inst_i_clus = inst_clus[i]
if len(inst_i_clus) > 0:
row[inst_i_clus] = row[inst_i_clus] - adv_self
#print row
ix_good_clus = list( np.where( row < row[-1] ) [0] )
#print ix_good_clus
if len(ix_good_clus) > 0:
dec_mat[i, ix_good_clus] = True
dec_ixs.append(ix_good_clus)
else:
dec_ixs.append([])
return [dec_mat, dec_ixs]
#[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs)
def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True):
'''
Use the training data to predict the testing data.
Use whole training data to predict
Use each cluster of training data to predict the testing data.
'''
y_pred_all = np.zeros(( len(X_te), len(clus) + 1 ))
# the first col is the prediction using the whole data
model_whole = models[-1]
y_pred_all[:, 0] = f_te(X_te, model_whole)
#y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model)
#print 'whole model good '
# start from the second col, the result is by each cluster
for i in range(0, len(clus)):
#Xi = X_tr[clus[i].flatten(), :]
#yi = y_tr[clus[i].flatten() ]
model_i = models[i]
#model_a_clust = copy.deepcopy(model)
try:
y_pred_te = f_te(X_te, model_i)
except :
if model_i == 0:
y_pred_te = np.zeros(len(X_te))
elif model_i == 1:
y_pred_te = np.ones(len(X_te))
else:
y_pred_te = np.zeros(len(X_te))
y_pred_te.fill(np.nan)
#except NotFittedError as e:
# print(repr(e))
# y_pred_te = np.zeros(len(X_te))
# y_pred_te.fill(np.nan)
#print 'model '+str(i)+' good '
#y_pred_te = f_tr_te(Xi, yi, X_te, model)
if doNorm == True:
templete = y_pred_all[:, 0]
target = y_pred_te
y_pred = f_quantileNorm(templete, target)
else:
y_pred = y_pred_te
y_pred_all[:, i+1] = y_pred
return y_pred_all
# test
#y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model)
def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
#def_deal_miss_v_1(d):
'''
deal with missing values by replacing them by mean.
'''
def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
This version use the err mat to re-clustering
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# ******************** re-clustering ********************
n_iter = 2
for i in range(0, n_iter):
clus = f_fuzzy_rwr_clusters(err_mat, n_clus)
tfs = f_clus_to_tfs(clus, len(X_tr))
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# *******************************************************
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1):
'''
clus and inst_clus contains the same information that clus is the instances
ids for each cluster, while inst_clus stores that for each instance, which
cluster(s) it belongs to.
dec_ixs stores the good cluster(s) for each instance, which may include
even a remote cluster. each instance in dec_ixs does not contain the whole
set of instances.
'''
# the first col is the prediction using the whole data
# start from the second col, the result is by each cluster
y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models)
y_pred_ICE = np.zeros( len(X_te) )
neighbour_mat = f_eu_dist2(X_tr, X_te)
# ---------- for each testing instance ----------
#n_partials = np.zeros( len(X_te) )
#n_wholes = np.zeros( len(X_te) )
for j in range(0, len(X_te) ):
# for each testing instance
# find the top 10 neighbors for each test instance
neighbour_col = neighbour_mat[:, j].flatten()
ix = np.argsort(neighbour_col )
ix = ix[::-1]
ix_top_neighbors = ix[0:N]
#print 'testing inst ' + str(j)
#print ' ix of top neighbors:'
#print ix_top_neighbors
# ---------- find all neighbors' picks ----------
clus_ids_to_use = []
nei_labels = []
for cur_nb in range(0, N):
# for each neighbour
# find each neighbour's pick
cur_nb_ix = ix_top_neighbors[cur_nb]
clus_id_to_use = list( dec_ixs[cur_nb_ix] )
clus_ids_to_use = clus_ids_to_use + clus_id_to_use
# also find neighbor's label. maybe will be used later as KNN pred
# instead of using whole to pred.
nei_labels = nei_labels + list( y_tr[cur_nb_ix] )
#print ' clus_ids_to_use:'
#print clus_ids_to_use
# cluster id + 1 to make the ix fit the col id in y_pred_all
a = clus_ids_to_use
a = list( np.array(a) + 1 )
clus_ids_to_use = a
# number of partial models used
n_partial = len(clus_ids_to_use)
# number of whole models used, based on parameters alpha, beta and N.
n_whole = int( round( alpha*n_partial + beta*N ) )
clus_ids_to_use = clus_ids_to_use + [0] * n_whole
#print ' clus_ids_to_use:'
#print clus_ids_to_use
#print nei_labels
y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use])
print ('Done predicting testing instances.')
return y_pred_ICE
# test
# pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
# pa = '/Users/zg/Dropbox/bio/ICE_2018/'
# pa = './'
pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/'
n_clus = 100
w = 0.4
s = 0.5
N = 5
alpha = 1
beta = 1
k_fold = 10
aucs_ICE = []
aucs_whole = []
# f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt'
#f_res = pa + 'data/res_ICE_bg_svm_py.txt'
f_res = pa + 'data/res_ICE_SVM_py.txt'
f = open(f_res, 'w')
#for j in range(1, 50):
for j in range(1, 49):
try:
X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y']
#imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto')
#plt.show()
#sim = np.corrcoef(X)
#np.fill_diagonal(sim, 0)
#n_clus = 100
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
model = svm.SVC(kernel='linear', probability = True)
skf = StratifiedKFold(n_splits=k_fold)
skf.get_n_splits(X, y)
y_preds_ICE = np.zeros( y.size )
y_preds_whole = np.zeros( y.size )
fold_i = 1
for train_index, test_index in skf.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
X_tr, X_te = X[train_index], X[test_index]
y_tr, y_te = y[train_index], y[test_index]
[clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s)
#[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s)
y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta)
y_preds_ICE[test_index] = y_pred_ICE
y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model)
y_preds_whole[test_index] = y_pred_whole
print( j)
print( 'fold ' + str(fold_i) + ' finished')
fold_i = fold_i + 1
auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() )
auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() )
print (auc_ICE, auc_whole)
aucs_ICE.append(auc_ICE)
aucs_whole.append(auc_whole)
f.write(str(j) + '\t' + str(auc_ICE) + ' \t ' + str(auc_whole) + '\n')
except:
continue
|
[
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"numpy.argsort",
"numpy.nanmean",
"copy.deepcopy",
"numpy.where",
"numpy.float64",
"numpy.sort",
"numpy.argmin",
"numpy.eye",
"numpy.argmax",
"numpy.fill_diagonal",
"numpy.finfo",
"sklearn.svm.SVC",
"numpy.copy",
"numpy.unique",
"numpy.logical_or",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"scipy.spatial.distance.euclidean"
] |
[((4652, 4676), 'numpy.fill_diagonal', 'np.fill_diagonal', (['sim', '(0)'], {}), '(sim, 0)\n', (4668, 4676), True, 'import numpy as np\n'), ((5076, 5111), 'numpy.logical_or', 'np.logical_or', (['aRankNet', 'aRankNet.T'], {}), '(aRankNet, aRankNet.T)\n', (5089, 5111), True, 'import numpy as np\n'), ((5200, 5233), 'numpy.fill_diagonal', 'np.fill_diagonal', (['aRankNet', '(False)'], {}), '(aRankNet, False)\n', (5216, 5233), True, 'import numpy as np\n'), ((6446, 6468), 'numpy.sum', 'np.sum', (['rw_mat'], {'axis': '(1)'}), '(rw_mat, axis=1)\n', (6452, 6468), True, 'import numpy as np\n'), ((6536, 6548), 'numpy.argmax', 'np.argmax', (['a'], {}), '(a)\n', (6545, 6548), True, 'import numpy as np\n'), ((7966, 7990), 'numpy.fill_diagonal', 'np.fill_diagonal', (['sim', '(0)'], {}), '(sim, 0)\n', (7982, 7990), True, 'import numpy as np\n'), ((9931, 9954), 'numpy.fill_diagonal', 'np.fill_diagonal', (['rw', '(0)'], {}), '(rw, 0)\n', (9947, 9954), True, 'import numpy as np\n'), ((10224, 10254), 'numpy.fill_diagonal', 'np.fill_diagonal', (['rw_net', '(True)'], {}), '(rw_net, True)\n', (10240, 10254), True, 'import numpy as np\n'), ((10679, 10700), 'numpy.array', 'np.array', (['ixs_centers'], {}), '(ixs_centers)\n', (10687, 10700), True, 'import numpy as np\n'), ((13295, 13315), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (13308, 13315), False, 'import copy\n'), ((13802, 13822), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (13815, 13822), False, 'import copy\n'), ((14187, 14203), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (14195, 14203), True, 'import numpy as np\n'), ((14219, 14284), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k_fold', 'random_state': 'None', 'shuffle': '(True)'}), '(n_splits=k_fold, random_state=None, shuffle=True)\n', (14234, 14284), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((16031, 16067), 'numpy.argsort', 'np.argsort', (['target'], {'kind': '"""mergesort"""'}), "(target, kind='mergesort')\n", (16041, 16067), True, 'import numpy as np\n'), ((16086, 16124), 'numpy.argsort', 'np.argsort', (['templete'], {'kind': '"""mergesort"""'}), "(templete, kind='mergesort')\n", (16096, 16124), True, 'import numpy as np\n'), ((17127, 17157), 'numpy.zeros', 'np.zeros', (['(y.size, n_clusters)'], {}), '((y.size, n_clusters))\n', (17135, 17157), True, 'import numpy as np\n'), ((21893, 21927), 'numpy.zeros', 'np.zeros', (['(y.size, n_clusters + 1)'], {}), '((y.size, n_clusters + 1))\n', (21901, 21927), True, 'import numpy as np\n'), ((22300, 22320), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (22313, 22320), False, 'import copy\n'), ((2091, 2100), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2097, 2100), True, 'import numpy as np\n'), ((4734, 4767), 'numpy.argsort', 'np.argsort', (['sim'], {'kind': '"""mergesort"""'}), "(sim, kind='mergesort')\n", (4744, 4767), True, 'import numpy as np\n'), ((6683, 6702), 'numpy.sum', 'np.sum', (['tmp'], {'axis': '(1)'}), '(tmp, axis=1)\n', (6689, 6702), True, 'import numpy as np\n'), ((6784, 6796), 'numpy.argmin', 'np.argmin', (['b'], {}), '(b)\n', (6793, 6796), True, 'import numpy as np\n'), ((6971, 6983), 'numpy.sort', 'np.sort', (['tmp'], {}), '(tmp)\n', (6978, 6983), True, 'import numpy as np\n'), ((10535, 10551), 'numpy.argsort', 'np.argsort', (['lens'], {}), '(lens)\n', (10545, 10551), True, 'import numpy as np\n'), ((17338, 17358), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (17351, 17358), False, 'import copy\n'), ((24935, 24957), 'numpy.copy', 'np.copy', (['err_mat[i, :]'], {}), '(err_mat[i, :])\n', (24942, 24957), True, 'import numpy as np\n'), ((30166, 30191), 'numpy.argsort', 'np.argsort', (['neighbour_col'], {}), '(neighbour_col)\n', (30176, 30191), True, 'import numpy as np\n'), ((31645, 31687), 'numpy.nanmean', 'np.nanmean', (['y_pred_all[j, clus_ids_to_use]'], {}), '(y_pred_all[j, clus_ids_to_use])\n', (31655, 31687), True, 'import numpy as np\n'), ((33134, 33176), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'probability': '(True)'}), "(kernel='linear', probability=True)\n", (33141, 33176), False, 'from sklearn import svm\n'), ((33202, 33234), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k_fold'}), '(n_splits=k_fold)\n', (33217, 33234), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((33297, 33313), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (33305, 33313), True, 'import numpy as np\n'), ((33340, 33356), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (33348, 33356), True, 'import numpy as np\n'), ((7860, 7890), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['X[i]', 'X[j]'], {}), '(X[i], X[j])\n', (7878, 7890), False, 'from scipy.spatial import distance\n'), ((8283, 8315), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['X1[i]', 'X2[j]'], {}), '(X1[i], X2[j])\n', (8301, 8315), False, 'from scipy.spatial import distance\n'), ((9365, 9395), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['X[i]', 'X[j]'], {}), '(X[i], X[j])\n', (9383, 9395), False, 'from scipy.spatial import distance\n'), ((3839, 3854), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3847, 3854), True, 'import numpy as np\n'), ((4812, 4843), 'numpy.argsort', 'np.argsort', (['I'], {'kind': '"""mergesort"""'}), "(I, kind='mergesort')\n", (4822, 4843), True, 'import numpy as np\n'), ((10314, 10352), 'numpy.argwhere', 'np.argwhere', (['rw_net[:, ixs_centers[i]]'], {}), '(rw_net[:, ixs_centers[i]])\n', (10325, 10352), True, 'import numpy as np\n'), ((12060, 12079), 'numpy.where', 'np.where', (['tfs[i, :]'], {}), '(tfs[i, :])\n', (12068, 12079), True, 'import numpy as np\n'), ((14585, 14600), 'numpy.unique', 'np.unique', (['y_tr'], {}), '(y_tr)\n', (14594, 14600), True, 'import numpy as np\n'), ((19729, 19745), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (19737, 19745), True, 'import numpy as np\n'), ((19969, 19985), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (19977, 19985), True, 'import numpy as np\n'), ((25210, 25233), 'numpy.where', 'np.where', (['(row < row[-1])'], {}), '(row < row[-1])\n', (25218, 25233), True, 'import numpy as np\n'), ((31185, 31196), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (31193, 31196), True, 'import numpy as np\n'), ((14784, 14799), 'numpy.unique', 'np.unique', (['y_tr'], {}), '(y_tr)\n', (14793, 14799), True, 'import numpy as np\n'), ((17472, 17485), 'numpy.unique', 'np.unique', (['yj'], {}), '(yj)\n', (17481, 17485), True, 'import numpy as np\n'), ((19468, 19484), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (19476, 19484), True, 'import numpy as np\n'), ((19854, 19867), 'numpy.unique', 'np.unique', (['yj'], {}), '(yj)\n', (19863, 19867), True, 'import numpy as np\n'), ((20126, 20139), 'numpy.unique', 'np.unique', (['yj'], {}), '(yj)\n', (20135, 20139), True, 'import numpy as np\n'), ((20014, 20027), 'numpy.unique', 'np.unique', (['yj'], {}), '(yj)\n', (20023, 20027), True, 'import numpy as np\n'), ((3443, 3456), 'numpy.float64', 'np.float64', (['A'], {}), '(A)\n', (3453, 3456), True, 'import numpy as np\n')]
|
#########################
#########################
# Need to account for limit in input period
#########################
#########################
# Baseline M67 long script -- NO crowding
# New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file
# Doing this so we don't have to run analyse each time
# Can write separate script for p-ecc plots
# Quest paths in this version of script
import pandas as pd
import numpy as np
import os
from astropy.coordinates import SkyCoord
from astropy import units, constants
from astropy.modeling import models, fitting
import scipy.stats
from scipy.integrate import quad
#for Quest
import matplotlib
matplotlib.use('Agg')
doIndividualPlots = True
from matplotlib import pyplot as plt
def file_len(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass):
Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.)
return Phs.decompose().to(units.day)
#similar to field, but limiting by the hard-soft boundary
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
def RagNormal(x, cdf = False):
mean = 5.03
std = 2.28
if (cdf):
return scipy.stats.norm.cdf(x,mean,std)
return scipy.stats.norm.pdf(x,mean,std)
def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']):
c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster)
c2 = '#A62B1F' #Dai Red
c3 = '#BF8A26' #Dali Beige
fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2
histAll = np.insert(histAll,0,0)
histObs = np.insert(histObs,0,0)
for f in filters:
histRec[f] = np.insert(histRec[f],0,0)
#PDF
ax1.step(bin_edges, histAll/np.sum(histAll), color=c1)
ax1.step(bin_edges, histObs/np.sum(histObs), color=c2)
for f in filters:
lw = 1
if (f == 'all'):
lw = 0.5
ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw)
ax1.set_ylabel('PDF')
ax1.set_yscale('log')
ax1.set_title('Globular Clusters - Baseline', fontsize = 16)
ax1.set_xlabel(xtitle)
#CDF
#cdfAll = []
#cdfObs = []
#cdfRec = dict()
#for f in filters:
# cdfRec[f] = []
# for i in range(len(histAll)):
# cdfAll.append(np.sum(histAll[:i])/np.sum(histAll))
# for i in range(len(histObs)):
# cdfObs.append(np.sum(histObs[:i])/np.sum(histObs))
# for f in filters:
# for i in range(len(histRec[f])):
# cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f]))
#ax2.step(bin_edges, cdfAll, color=c1)
#ax2.step(bin_edges, cdfObs, color=c2)
#for f in filters:
# lw = 1
# if (f == 'all'):
# lw = 0.5
# ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw)
#ax2.set_ylabel('CDF')
#ax2.set_xlabel(xtitle)
fig.subplots_adjust(hspace=0)
fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight')
#write to a text file
with open('./eblsst_files/' + fname+'.csv','w') as fl:
outline = 'binEdges,histAll,histObs'
for f in filters:
outline += ','+f+'histRec'
outline += '\n'
fl.write(outline)
for i in range(len(bin_edges)):
outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i])
for f in filters:
outline += ','+str(histRec[f][i])
outline += '\n'
fl.write(outline)
if __name__ == "__main__":
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all']
#get the Raghavan binary fraction fit
fbFit= fitRagfb()
print(fbFit)
#to normalize
intAll, err = quad(RagNormal, -20, 20)
intCut, err = quad(RagNormal, -20, np.log10(365*10.))
intNorm = intCut/intAll
#cutoff in percent error for "recovered"
Pcut = 0.1
#assumed mean stellar mass
mMean = 0.5
#minimum number of lines to consider in file
Nlim = 3
if (doIndividualPlots):
fmass, axmass = plt.subplots()
fqrat, axqrat = plt.subplots()
fecc, axecc = plt.subplots()
flper, axlper = plt.subplots()
fdist, axdist = plt.subplots()
fmag, axmag = plt.subplots()
frad, axrad = plt.subplots()
#bins for all the histograms
Nbins = 25
mbins = np.arange(0,10, 0.1, dtype='float')
qbins = np.arange(0,1, 0.1, dtype='float')
ebins = np.arange(0, 1.05, 0.05, dtype='float')
lpbins = np.arange(-2, 10, 0.5, dtype='float')
dbins = np.arange(0, 40, 1, dtype='float')
magbins = np.arange(11, 25, 1, dtype='float')
rbins = np.arange(0, 100, 0.2, dtype='float')
#blanks for the histograms
#All
m1hAll = np.zeros_like(mbins)[1:]
qhAll = np.zeros_like(qbins)[1:]
ehAll = np.zeros_like(ebins)[1:]
lphAll = np.zeros_like(lpbins)[1:]
dhAll = np.zeros_like(dbins)[1:]
maghAll = np.zeros_like(magbins)[1:]
rhAll = np.zeros_like(rbins)[1:]
#Observable
m1hObs = np.zeros_like(mbins)[1:]
qhObs = np.zeros_like(qbins)[1:]
ehObs = np.zeros_like(ebins)[1:]
lphObs = np.zeros_like(lpbins)[1:]
dhObs = np.zeros_like(dbins)[1:]
maghObs = np.zeros_like(magbins)[1:]
rhObs = np.zeros_like(rbins)[1:]
#Recovered
m1hRec = dict()
qhRec = dict()
ehRec = dict()
lphRec = dict()
dhRec = dict()
maghRec = dict()
rhRec = dict()
for f in filters:
m1hRec[f] = np.zeros_like(mbins)[1:]
qhRec[f] = np.zeros_like(qbins)[1:]
ehRec[f] = np.zeros_like(ebins)[1:]
lphRec[f] = np.zeros_like(lpbins)[1:]
dhRec[f] = np.zeros_like(dbins)[1:]
maghRec[f] = np.zeros_like(magbins)[1:]
rhRec[f] = np.zeros_like(rbins)[1:]
RA = []
Dec = []
recFrac = []
recN = []
rawN = []
obsN = []
fileN = []
fileObsN = []
fileRecN = []
allNPrsa = []
obsNPrsa = []
recNPrsa = []
# Lists for period and eccentricity for Andrew's circularization plots
eccAll = []
eccObs = []
eccRec = []
pAll = []
pObs = []
pRec = []
# Using prsa dataframes for these lists because of period cutoff at 1000 days
# Dataframes to write to files later; 3 files for each sub-population - append everything to these
peccAll = pd.DataFrame(columns = ['e', 'p'])
peccObs = pd.DataFrame(columns = ['e', 'p'])
peccRec = pd.DataFrame(columns = ['e', 'p'])
#Read in all the data and make the histograms
d = "./input_files/"
files = os.listdir(d)
IDs = []
for i, f in enumerate(files):
print(round(i/len(files),4), f)
fl = file_len(d+f)
if (fl >= 4):
#read in the header
header = pd.read_csv(d+f, nrows=1)
######################
#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms
#####################
Nmult = header['clusterMass'][0]/mMean
#Nmult = 1.
RA.append(header['OpSimRA'])
Dec.append(header['OpSimDec'])
#read in rest of the file
data = pd.read_csv(d+f, header = 2).fillna(-999)
rF = 0.
rN = 0.
Nrec = 0.
Nobs = 0.
raN = 0.
obN = 0.
fiN = 0.
fioN = 0.
firN = 0.
NallPrsa = 0.
NobsPrsa = 0.
NrecPrsa = 0.
Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?)
prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]
# Appending for Andrew
eccAll.append(prsa['e'].values)
pAll.append(prsa['p'].values)
NallPrsa = len(prsa.index)
if (Nall >= Nlim):
#create histograms
#All
m1hAll0, m1b = np.histogram(data["m1"], bins=mbins)
qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins)
ehAll0, eb = np.histogram(data["e"], bins=ebins)
lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins)
dhAll0, db = np.histogram(data["d"], bins=dbins)
maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins)
rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins)
if (doIndividualPlots):
axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1)
axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1)
axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1)
axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1)
axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1)
axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1)
axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1)
#account for the binary fraction, as a function of mass
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val))
#account for the hard-soft boundary
Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value
fb *= RagNormal(np.log10(Phs), cdf = True)
print("fb, Phs = ", fb, Phs)
Nmult *= fb
m1hAll += m1hAll0/Nall*Nmult
qhAll += qhAll0/Nall*Nmult
ehAll += ehAll0/Nall*Nmult
lphAll += lphAll0/Nall*Nmult
dhAll += dhAll0/Nall*Nmult
maghAll += maghAll0/Nall*Nmult
rhAll += rhAll0/Nall*Nmult
#Obs
obs = data.loc[data['LSM_PERIOD'] != -999]
Nobs = len(obs.index)
prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)]
NobsPrsa = len(prsaObs.index)
# Appending for Andrew's files
eccObs.append(prsaObs['e'].values)
pObs.append(prsaObs['p'].values)
if (Nobs >= Nlim):
m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins)
qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins)
ehObs0, eb = np.histogram(obs["e"], bins=ebins)
lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins)
dhObs0, db = np.histogram(obs["d"], bins=dbins)
maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins)
rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins)
m1hObs += m1hObs0/Nall*Nmult
qhObs += qhObs0/Nall*Nmult
ehObs += ehObs0/Nall*Nmult
lphObs += lphObs0/Nall*Nmult
dhObs += dhObs0/Nall*Nmult
maghObs += maghObs0/Nall*Nmult
rhObs += rhObs0/Nall*Nmult
#Rec
recCombined = pd.DataFrame()
prsaRecCombined = pd.DataFrame()
for filt in filters:
key = filt+'LSS_PERIOD'
if (filt == 'all'):
key = 'LSM_PERIOD'
fullP = abs(data[key] - data['p'])/data['p']
halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p'])
twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p'])
rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
Nrec = len(rec.index)
#I'd like to account for all filters here to have more accurate numbers
recCombined = recCombined.append(rec)
prsaRecCombined = prsaRecCombined.append(prsaRec)
# Going to use prsaRecCombined for ecc-p plots to account for all filters
eccRec.append(prsaRec['e'].values)
pRec.append(prsaRec['p'].values)
if (filt == 'all'):
recCombined.drop_duplicates(inplace=True)
prsaRecCombined.drop_duplicates(inplace=True)
if (Nrec >= Nlim):
m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins)
qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins)
ehRec0, eb = np.histogram(rec["e"], bins=ebins)
lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins)
dhRec0, db = np.histogram(rec["d"], bins=dbins)
maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins)
rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins)
m1hRec[filt] += m1hRec0/Nall*Nmult
qhRec[filt] += qhRec0/Nall*Nmult
ehRec[filt] += ehRec0/Nall*Nmult
lphRec[filt] += lphRec0/Nall*Nmult
dhRec[filt] += dhRec0/Nall*Nmult
maghRec[filt] += maghRec0/Nall*Nmult
rhRec[filt] += rhRec0/Nall*Nmult
#for the mollweide
if (filt == 'all'):
Nrec = len(recCombined.index)
rF = Nrec/Nall
rN = Nrec/Nall*Nmult
raN = Nmult
obN = Nobs/Nall*Nmult
fiN = Nall
fioN = Nobs
firN = Nrec
NrecPrsa = len(prsaRecCombined.index)
NrecPrsa = NrecPrsa/Nall*Nmult
NobsPrsa = NobsPrsa/Nall*Nmult
NallPrsa = NallPrsa/Nall*Nmult
recFrac.append(rF)
recN.append(rN)
rawN.append(raN)
obsN.append(obN)
fileN.append(fiN)
fileObsN.append(fioN)
fileRecN.append(firN)
allNPrsa.append(NallPrsa)
obsNPrsa.append(NobsPrsa)
recNPrsa.append(NrecPrsa)
#print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN))
# Concatenating p and ecc lists
eccAll = np.concatenate(eccAll)
eccObs = np.concatenate(eccObs)
eccRec = np.concatenate(eccRec)
pAll = np.concatenate(pAll)
pObs = np.concatenate(pObs)
pRec = np.concatenate(pRec)
# print('Ecc lists:', eccAll, eccObs, eccRec)
# print('P lists:', pAll, pObs, pRec)
# Appending lists with all the p/ecc values to our dataframes
# All dataframe
peccAll['e'] = eccAll
peccAll['p'] = pAll
# Observable dataframe
peccObs['e'] = eccObs
peccObs['p'] = pObs
# Recovered dataframe
peccRec['e'] = eccRec
peccRec['p'] = pRec
# print('Final Dataframes:', peccAll, peccObs, peccRec)
# print(peccRec.columns)
# 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding)
peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p'])
peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p'])
peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p'])
#plot and save the histograms
saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist')
saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist')
saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist')
saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist')
saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist')
saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist')
saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist')
#make the mollweide
coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs')
lGal = coords.galactic.l.wrap_at(180.*units.degree).degree
bGal = coords.galactic.b.wrap_at(180.*units.degree).degree
RAwrap = coords.ra.wrap_at(180.*units.degree).degree
Decwrap = coords.dec.wrap_at(180.*units.degree).degree
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'% recovered')
f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight')
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'log10(N) recovered')
f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight')
if (doIndividualPlots):
fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight')
fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight')
fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight')
flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight')
fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight')
fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight')
frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight')
print("###################")
print("number of binaries in input files (raw, log):",np.sum(fileN), np.log10(np.sum(fileN)))
print("number of binaries in tested with gatspy (raw, log):",np.sum(fileObsN), np.log10(np.sum(fileObsN)))
print("number of binaries in recovered with gatspy (raw, log):",np.sum(fileRecN), np.log10(np.sum(fileRecN)))
print("recovered/observable*100 with gatspy:",np.sum(fileRecN)/np.sum(fileObsN)*100.)
print("###################")
print("total in sample (raw, log):",np.sum(rawN), np.log10(np.sum(rawN)))
print("total observable (raw, log):",np.sum(obsN), np.log10(np.sum(obsN)))
print("total recovered (raw, log):",np.sum(recN), np.log10(np.sum(recN)))
print("recovered/observable*100:",np.sum(recN)/np.sum(obsN)*100.)
print("###################")
print("total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(allNPrsa), np.log10(np.sum(allNPrsa)))
print("total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa)))
print("total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(recNPrsa), np.log10(np.sum(recNPrsa)))
print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.sum(recNPrsa)/np.sum(obsNPrsa)*100.)
|
[
"numpy.log10",
"numpy.sqrt",
"pandas.read_csv",
"numpy.array",
"numpy.arange",
"numpy.histogram",
"astropy.modeling.models.PowerLaw1D",
"os.listdir",
"numpy.diff",
"numpy.concatenate",
"pandas.DataFrame",
"matplotlib.use",
"scipy.integrate.quad",
"astropy.modeling.fitting.LevMarLSQFitter",
"numpy.insert",
"astropy.coordinates.SkyCoord",
"numpy.sum",
"numpy.ma.log10",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] |
[((710, 731), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (724, 731), False, 'import matplotlib\n'), ((1380, 1431), 'astropy.modeling.models.PowerLaw1D', 'models.PowerLaw1D', ([], {'amplitude': '(0.5)', 'x_0': '(1)', 'alpha': '(-1.0)'}), '(amplitude=0.5, x_0=1, alpha=-1.0)\n', (1397, 1431), False, 'from astropy.modeling import models, fitting\n'), ((1441, 1466), 'astropy.modeling.fitting.LevMarLSQFitter', 'fitting.LevMarLSQFitter', ([], {}), '()\n', (1464, 1466), False, 'from astropy.modeling import models, fitting\n'), ((1895, 1936), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)', 'sharex': '(True)'}), '(figsize=(8, 6), sharex=True)\n', (1907, 1936), True, 'from matplotlib import pyplot as plt\n'), ((1988, 2012), 'numpy.insert', 'np.insert', (['histAll', '(0)', '(0)'], {}), '(histAll, 0, 0)\n', (1997, 2012), True, 'import numpy as np\n'), ((2022, 2046), 'numpy.insert', 'np.insert', (['histObs', '(0)', '(0)'], {}), '(histObs, 0, 0)\n', (2031, 2046), True, 'import numpy as np\n'), ((3834, 3858), 'scipy.integrate.quad', 'quad', (['RagNormal', '(-20)', '(20)'], {}), '(RagNormal, -20, 20)\n', (3838, 3858), False, 'from scipy.integrate import quad\n'), ((4396, 4432), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.1)'], {'dtype': '"""float"""'}), "(0, 10, 0.1, dtype='float')\n", (4405, 4432), True, 'import numpy as np\n'), ((4441, 4476), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {'dtype': '"""float"""'}), "(0, 1, 0.1, dtype='float')\n", (4450, 4476), True, 'import numpy as np\n'), ((4485, 4524), 'numpy.arange', 'np.arange', (['(0)', '(1.05)', '(0.05)'], {'dtype': '"""float"""'}), "(0, 1.05, 0.05, dtype='float')\n", (4494, 4524), True, 'import numpy as np\n'), ((4535, 4572), 'numpy.arange', 'np.arange', (['(-2)', '(10)', '(0.5)'], {'dtype': '"""float"""'}), "(-2, 10, 0.5, dtype='float')\n", (4544, 4572), True, 'import numpy as np\n'), ((4582, 4616), 'numpy.arange', 'np.arange', (['(0)', '(40)', '(1)'], {'dtype': '"""float"""'}), "(0, 40, 1, dtype='float')\n", (4591, 4616), True, 'import numpy as np\n'), ((4628, 4663), 'numpy.arange', 'np.arange', (['(11)', '(25)', '(1)'], {'dtype': '"""float"""'}), "(11, 25, 1, dtype='float')\n", (4637, 4663), True, 'import numpy as np\n'), ((4673, 4710), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(0.2)'], {'dtype': '"""float"""'}), "(0, 100, 0.2, dtype='float')\n", (4682, 4710), True, 'import numpy as np\n'), ((6161, 6193), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['e', 'p']"}), "(columns=['e', 'p'])\n", (6173, 6193), True, 'import pandas as pd\n'), ((6207, 6239), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['e', 'p']"}), "(columns=['e', 'p'])\n", (6219, 6239), True, 'import pandas as pd\n'), ((6253, 6285), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['e', 'p']"}), "(columns=['e', 'p'])\n", (6265, 6285), True, 'import pandas as pd\n'), ((6367, 6380), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (6377, 6380), False, 'import os\n'), ((13000, 13022), 'numpy.concatenate', 'np.concatenate', (['eccAll'], {}), '(eccAll)\n', (13014, 13022), True, 'import numpy as np\n'), ((13033, 13055), 'numpy.concatenate', 'np.concatenate', (['eccObs'], {}), '(eccObs)\n', (13047, 13055), True, 'import numpy as np\n'), ((13066, 13088), 'numpy.concatenate', 'np.concatenate', (['eccRec'], {}), '(eccRec)\n', (13080, 13088), True, 'import numpy as np\n'), ((13098, 13118), 'numpy.concatenate', 'np.concatenate', (['pAll'], {}), '(pAll)\n', (13112, 13118), True, 'import numpy as np\n'), ((13127, 13147), 'numpy.concatenate', 'np.concatenate', (['pObs'], {}), '(pObs)\n', (13141, 13147), True, 'import numpy as np\n'), ((13156, 13176), 'numpy.concatenate', 'np.concatenate', (['pRec'], {}), '(pRec)\n', (13170, 13176), True, 'import numpy as np\n'), ((14422, 14488), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['RA', 'Dec'], {'unit': '(units.degree, units.degree)', 'frame': '"""icrs"""'}), "(RA, Dec, unit=(units.degree, units.degree), frame='icrs')\n", (14430, 14488), False, 'from astropy.coordinates import SkyCoord\n'), ((14729, 14797), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': 'mollweide'}", 'figsize': '(8, 5)'}), "(subplot_kw={'projection': 'mollweide'}, figsize=(8, 5))\n", (14741, 14797), True, 'from matplotlib import pyplot as plt\n'), ((15385, 15453), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': 'mollweide'}", 'figsize': '(8, 5)'}), "(subplot_kw={'projection': 'mollweide'}, figsize=(8, 5))\n", (15397, 15453), True, 'from matplotlib import pyplot as plt\n'), ((2079, 2106), 'numpy.insert', 'np.insert', (['histRec[f]', '(0)', '(0)'], {}), '(histRec[f], 0, 0)\n', (2088, 2106), True, 'import numpy as np\n'), ((3895, 3915), 'numpy.log10', 'np.log10', (['(365 * 10.0)'], {}), '(365 * 10.0)\n', (3903, 3915), True, 'import numpy as np\n'), ((4137, 4151), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4149, 4151), True, 'from matplotlib import pyplot as plt\n'), ((4170, 4184), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4182, 4184), True, 'from matplotlib import pyplot as plt\n'), ((4201, 4215), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4213, 4215), True, 'from matplotlib import pyplot as plt\n'), ((4234, 4248), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4246, 4248), True, 'from matplotlib import pyplot as plt\n'), ((4267, 4281), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4279, 4281), True, 'from matplotlib import pyplot as plt\n'), ((4298, 4312), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4310, 4312), True, 'from matplotlib import pyplot as plt\n'), ((4329, 4343), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4341, 4343), True, 'from matplotlib import pyplot as plt\n'), ((4756, 4776), 'numpy.zeros_like', 'np.zeros_like', (['mbins'], {}), '(mbins)\n', (4769, 4776), True, 'import numpy as np\n'), ((4790, 4810), 'numpy.zeros_like', 'np.zeros_like', (['qbins'], {}), '(qbins)\n', (4803, 4810), True, 'import numpy as np\n'), ((4824, 4844), 'numpy.zeros_like', 'np.zeros_like', (['ebins'], {}), '(ebins)\n', (4837, 4844), True, 'import numpy as np\n'), ((4859, 4880), 'numpy.zeros_like', 'np.zeros_like', (['lpbins'], {}), '(lpbins)\n', (4872, 4880), True, 'import numpy as np\n'), ((4894, 4914), 'numpy.zeros_like', 'np.zeros_like', (['dbins'], {}), '(dbins)\n', (4907, 4914), True, 'import numpy as np\n'), ((4930, 4952), 'numpy.zeros_like', 'np.zeros_like', (['magbins'], {}), '(magbins)\n', (4943, 4952), True, 'import numpy as np\n'), ((4966, 4986), 'numpy.zeros_like', 'np.zeros_like', (['rbins'], {}), '(rbins)\n', (4979, 4986), True, 'import numpy as np\n'), ((5014, 5034), 'numpy.zeros_like', 'np.zeros_like', (['mbins'], {}), '(mbins)\n', (5027, 5034), True, 'import numpy as np\n'), ((5048, 5068), 'numpy.zeros_like', 'np.zeros_like', (['qbins'], {}), '(qbins)\n', (5061, 5068), True, 'import numpy as np\n'), ((5082, 5102), 'numpy.zeros_like', 'np.zeros_like', (['ebins'], {}), '(ebins)\n', (5095, 5102), True, 'import numpy as np\n'), ((5117, 5138), 'numpy.zeros_like', 'np.zeros_like', (['lpbins'], {}), '(lpbins)\n', (5130, 5138), True, 'import numpy as np\n'), ((5152, 5172), 'numpy.zeros_like', 'np.zeros_like', (['dbins'], {}), '(dbins)\n', (5165, 5172), True, 'import numpy as np\n'), ((5188, 5210), 'numpy.zeros_like', 'np.zeros_like', (['magbins'], {}), '(magbins)\n', (5201, 5210), True, 'import numpy as np\n'), ((5224, 5244), 'numpy.zeros_like', 'np.zeros_like', (['rbins'], {}), '(rbins)\n', (5237, 5244), True, 'import numpy as np\n'), ((16721, 16734), 'numpy.sum', 'np.sum', (['fileN'], {}), '(fileN)\n', (16727, 16734), True, 'import numpy as np\n'), ((16823, 16839), 'numpy.sum', 'np.sum', (['fileObsN'], {}), '(fileObsN)\n', (16829, 16839), True, 'import numpy as np\n'), ((16934, 16950), 'numpy.sum', 'np.sum', (['fileRecN'], {}), '(fileRecN)\n', (16940, 16950), True, 'import numpy as np\n'), ((17134, 17146), 'numpy.sum', 'np.sum', (['rawN'], {}), '(rawN)\n', (17140, 17146), True, 'import numpy as np\n'), ((17210, 17222), 'numpy.sum', 'np.sum', (['obsN'], {}), '(obsN)\n', (17216, 17222), True, 'import numpy as np\n'), ((17285, 17297), 'numpy.sum', 'np.sum', (['recN'], {}), '(recN)\n', (17291, 17297), True, 'import numpy as np\n'), ((17482, 17498), 'numpy.sum', 'np.sum', (['allNPrsa'], {}), '(allNPrsa)\n', (17488, 17498), True, 'import numpy as np\n'), ((17601, 17617), 'numpy.sum', 'np.sum', (['obsNPrsa'], {}), '(obsNPrsa)\n', (17607, 17617), True, 'import numpy as np\n'), ((17719, 17735), 'numpy.sum', 'np.sum', (['recNPrsa'], {}), '(recNPrsa)\n', (17725, 17735), True, 'import numpy as np\n'), ((2141, 2156), 'numpy.sum', 'np.sum', (['histAll'], {}), '(histAll)\n', (2147, 2156), True, 'import numpy as np\n'), ((2197, 2212), 'numpy.sum', 'np.sum', (['histObs'], {}), '(histObs)\n', (2203, 2212), True, 'import numpy as np\n'), ((2315, 2333), 'numpy.sum', 'np.sum', (['histRec[f]'], {}), '(histRec[f])\n', (2321, 2333), True, 'import numpy as np\n'), ((5410, 5430), 'numpy.zeros_like', 'np.zeros_like', (['mbins'], {}), '(mbins)\n', (5423, 5430), True, 'import numpy as np\n'), ((5448, 5468), 'numpy.zeros_like', 'np.zeros_like', (['qbins'], {}), '(qbins)\n', (5461, 5468), True, 'import numpy as np\n'), ((5486, 5506), 'numpy.zeros_like', 'np.zeros_like', (['ebins'], {}), '(ebins)\n', (5499, 5506), True, 'import numpy as np\n'), ((5525, 5546), 'numpy.zeros_like', 'np.zeros_like', (['lpbins'], {}), '(lpbins)\n', (5538, 5546), True, 'import numpy as np\n'), ((5564, 5584), 'numpy.zeros_like', 'np.zeros_like', (['dbins'], {}), '(dbins)\n', (5577, 5584), True, 'import numpy as np\n'), ((5604, 5626), 'numpy.zeros_like', 'np.zeros_like', (['magbins'], {}), '(magbins)\n', (5617, 5626), True, 'import numpy as np\n'), ((5644, 5664), 'numpy.zeros_like', 'np.zeros_like', (['rbins'], {}), '(rbins)\n', (5657, 5664), True, 'import numpy as np\n'), ((6528, 6555), 'pandas.read_csv', 'pd.read_csv', (['(d + f)'], {'nrows': '(1)'}), '(d + f, nrows=1)\n', (6539, 6555), True, 'import pandas as pd\n'), ((16745, 16758), 'numpy.sum', 'np.sum', (['fileN'], {}), '(fileN)\n', (16751, 16758), True, 'import numpy as np\n'), ((16850, 16866), 'numpy.sum', 'np.sum', (['fileObsN'], {}), '(fileObsN)\n', (16856, 16866), True, 'import numpy as np\n'), ((16961, 16977), 'numpy.sum', 'np.sum', (['fileRecN'], {}), '(fileRecN)\n', (16967, 16977), True, 'import numpy as np\n'), ((17157, 17169), 'numpy.sum', 'np.sum', (['rawN'], {}), '(rawN)\n', (17163, 17169), True, 'import numpy as np\n'), ((17233, 17245), 'numpy.sum', 'np.sum', (['obsN'], {}), '(obsN)\n', (17239, 17245), True, 'import numpy as np\n'), ((17308, 17320), 'numpy.sum', 'np.sum', (['recN'], {}), '(recN)\n', (17314, 17320), True, 'import numpy as np\n'), ((17509, 17525), 'numpy.sum', 'np.sum', (['allNPrsa'], {}), '(allNPrsa)\n', (17515, 17525), True, 'import numpy as np\n'), ((17628, 17644), 'numpy.sum', 'np.sum', (['obsNPrsa'], {}), '(obsNPrsa)\n', (17634, 17644), True, 'import numpy as np\n'), ((17746, 17762), 'numpy.sum', 'np.sum', (['recNPrsa'], {}), '(recNPrsa)\n', (17752, 17762), True, 'import numpy as np\n'), ((7455, 7491), 'numpy.histogram', 'np.histogram', (["data['m1']"], {'bins': 'mbins'}), "(data['m1'], bins=mbins)\n", (7467, 7491), True, 'import numpy as np\n'), ((7509, 7558), 'numpy.histogram', 'np.histogram', (["(data['m2'] / data['m1'])"], {'bins': 'qbins'}), "(data['m2'] / data['m1'], bins=qbins)\n", (7521, 7558), True, 'import numpy as np\n'), ((7574, 7609), 'numpy.histogram', 'np.histogram', (["data['e']"], {'bins': 'ebins'}), "(data['e'], bins=ebins)\n", (7586, 7609), True, 'import numpy as np\n'), ((7716, 7751), 'numpy.histogram', 'np.histogram', (["data['d']"], {'bins': 'dbins'}), "(data['d'], bins=dbins)\n", (7728, 7751), True, 'import numpy as np\n'), ((7773, 7821), 'numpy.histogram', 'np.histogram', (["data['appMagMean_r']"], {'bins': 'magbins'}), "(data['appMagMean_r'], bins=magbins)\n", (7785, 7821), True, 'import numpy as np\n'), ((7839, 7888), 'numpy.histogram', 'np.histogram', (["(data['r2'] / data['r1'])"], {'bins': 'rbins'}), "(data['r2'] / data['r1'], bins=rbins)\n", (7851, 7888), True, 'import numpy as np\n'), ((8528, 8540), 'numpy.diff', 'np.diff', (['m1b'], {}), '(m1b)\n', (8535, 8540), True, 'import numpy as np\n'), ((15176, 15193), 'numpy.array', 'np.array', (['recFrac'], {}), '(recFrac)\n', (15184, 15193), True, 'import numpy as np\n'), ((15833, 15847), 'numpy.array', 'np.array', (['recN'], {}), '(recN)\n', (15841, 15847), True, 'import numpy as np\n'), ((17027, 17043), 'numpy.sum', 'np.sum', (['fileRecN'], {}), '(fileRecN)\n', (17033, 17043), True, 'import numpy as np\n'), ((17044, 17060), 'numpy.sum', 'np.sum', (['fileObsN'], {}), '(fileObsN)\n', (17050, 17060), True, 'import numpy as np\n'), ((17358, 17370), 'numpy.sum', 'np.sum', (['recN'], {}), '(recN)\n', (17364, 17370), True, 'import numpy as np\n'), ((17371, 17383), 'numpy.sum', 'np.sum', (['obsN'], {}), '(obsN)\n', (17377, 17383), True, 'import numpy as np\n'), ((17812, 17828), 'numpy.sum', 'np.sum', (['recNPrsa'], {}), '(recNPrsa)\n', (17818, 17828), True, 'import numpy as np\n'), ((17829, 17845), 'numpy.sum', 'np.sum', (['obsNPrsa'], {}), '(obsNPrsa)\n', (17835, 17845), True, 'import numpy as np\n'), ((1005, 1017), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1012, 1017), True, 'import numpy as np\n'), ((6833, 6861), 'pandas.read_csv', 'pd.read_csv', (['(d + f)'], {'header': '(2)'}), '(d + f, header=2)\n', (6844, 6861), True, 'import pandas as pd\n'), ((8771, 8784), 'numpy.log10', 'np.log10', (['Phs'], {}), '(Phs)\n', (8779, 8784), True, 'import numpy as np\n'), ((9512, 9547), 'numpy.histogram', 'np.histogram', (["obs['m1']"], {'bins': 'mbins'}), "(obs['m1'], bins=mbins)\n", (9524, 9547), True, 'import numpy as np\n'), ((9566, 9613), 'numpy.histogram', 'np.histogram', (["(obs['m2'] / obs['m1'])"], {'bins': 'qbins'}), "(obs['m2'] / obs['m1'], bins=qbins)\n", (9578, 9613), True, 'import numpy as np\n'), ((9630, 9664), 'numpy.histogram', 'np.histogram', (["obs['e']"], {'bins': 'ebins'}), "(obs['e'], bins=ebins)\n", (9642, 9664), True, 'import numpy as np\n'), ((9772, 9806), 'numpy.histogram', 'np.histogram', (["obs['d']"], {'bins': 'dbins'}), "(obs['d'], bins=dbins)\n", (9784, 9806), True, 'import numpy as np\n'), ((9829, 9876), 'numpy.histogram', 'np.histogram', (["obs['appMagMean_r']"], {'bins': 'magbins'}), "(obs['appMagMean_r'], bins=magbins)\n", (9841, 9876), True, 'import numpy as np\n'), ((9895, 9942), 'numpy.histogram', 'np.histogram', (["(obs['r2'] / obs['r1'])"], {'bins': 'rbins'}), "(obs['r2'] / obs['r1'], bins=rbins)\n", (9907, 9942), True, 'import numpy as np\n'), ((10203, 10217), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10215, 10217), True, 'import pandas as pd\n'), ((10241, 10255), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10253, 10255), True, 'import pandas as pd\n'), ((15099, 15115), 'numpy.array', 'np.array', (['RAwrap'], {}), '(RAwrap)\n', (15107, 15115), True, 'import numpy as np\n'), ((15136, 15153), 'numpy.array', 'np.array', (['Decwrap'], {}), '(Decwrap)\n', (15144, 15153), True, 'import numpy as np\n'), ((15747, 15763), 'numpy.array', 'np.array', (['RAwrap'], {}), '(RAwrap)\n', (15755, 15763), True, 'import numpy as np\n'), ((15784, 15801), 'numpy.array', 'np.array', (['Decwrap'], {}), '(Decwrap)\n', (15792, 15801), True, 'import numpy as np\n'), ((7642, 7671), 'numpy.ma.log10', 'np.ma.log10', (["data['p'].values"], {}), "(data['p'].values)\n", (7653, 7671), True, 'import numpy as np\n'), ((7952, 7967), 'numpy.sum', 'np.sum', (['m1hAll0'], {}), '(m1hAll0)\n', (7958, 7967), True, 'import numpy as np\n'), ((8029, 8043), 'numpy.sum', 'np.sum', (['qhAll0'], {}), '(qhAll0)\n', (8035, 8043), True, 'import numpy as np\n'), ((8104, 8118), 'numpy.sum', 'np.sum', (['ehAll0'], {}), '(ehAll0)\n', (8110, 8118), True, 'import numpy as np\n'), ((8182, 8197), 'numpy.sum', 'np.sum', (['lphAll0'], {}), '(lphAll0)\n', (8188, 8197), True, 'import numpy as np\n'), ((8259, 8273), 'numpy.sum', 'np.sum', (['dhAll0'], {}), '(dhAll0)\n', (8265, 8273), True, 'import numpy as np\n'), ((8338, 8354), 'numpy.sum', 'np.sum', (['maghAll0'], {}), '(maghAll0)\n', (8344, 8354), True, 'import numpy as np\n'), ((8415, 8429), 'numpy.sum', 'np.sum', (['rhAll0'], {}), '(rhAll0)\n', (8421, 8429), True, 'import numpy as np\n'), ((11393, 11428), 'numpy.histogram', 'np.histogram', (["rec['m1']"], {'bins': 'mbins'}), "(rec['m1'], bins=mbins)\n", (11405, 11428), True, 'import numpy as np\n'), ((11449, 11496), 'numpy.histogram', 'np.histogram', (["(rec['m2'] / rec['m1'])"], {'bins': 'qbins'}), "(rec['m2'] / rec['m1'], bins=qbins)\n", (11461, 11496), True, 'import numpy as np\n'), ((11515, 11549), 'numpy.histogram', 'np.histogram', (["rec['e']"], {'bins': 'ebins'}), "(rec['e'], bins=ebins)\n", (11527, 11549), True, 'import numpy as np\n'), ((11661, 11695), 'numpy.histogram', 'np.histogram', (["rec['d']"], {'bins': 'dbins'}), "(rec['d'], bins=dbins)\n", (11673, 11695), True, 'import numpy as np\n'), ((11720, 11767), 'numpy.histogram', 'np.histogram', (["rec['appMagMean_r']"], {'bins': 'magbins'}), "(rec['appMagMean_r'], bins=magbins)\n", (11732, 11767), True, 'import numpy as np\n'), ((11788, 11835), 'numpy.histogram', 'np.histogram', (["(rec['r2'] / rec['r1'])"], {'bins': 'rbins'}), "(rec['r2'] / rec['r1'], bins=rbins)\n", (11800, 11835), True, 'import numpy as np\n'), ((9698, 9726), 'numpy.ma.log10', 'np.ma.log10', (["obs['p'].values"], {}), "(obs['p'].values)\n", (9709, 9726), True, 'import numpy as np\n'), ((11585, 11613), 'numpy.ma.log10', 'np.ma.log10', (["rec['p'].values"], {}), "(rec['p'].values)\n", (11596, 11613), True, 'import numpy as np\n')]
|
import numpy as np
from itertools import product
from typing import List
from src.config import ConfigChess
from src.chess.board import Board
from src.chess.move import Move
def get_all_possible_moves() -> List[Move]:
all_possible_moves = set()
array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype("int8")
for i, j, piece in product(
range(ConfigChess.board_size), range(ConfigChess.board_size), ["Q", "N"]
):
array[i][j] = Board.piece_symbol_to_int(piece)
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[i][j] = 0
# underpromotion moves
array[1, :] = Board.piece_symbol_to_int("P")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[0, :] = Board.piece_symbol_to_int("p")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
# no need to add castling moves: they have already be added with queen moves under UCI notation
return sorted(list(all_possible_moves))
|
[
"numpy.zeros",
"src.chess.board.Board.piece_symbol_to_int",
"src.chess.board.Board"
] |
[((715, 745), 'src.chess.board.Board.piece_symbol_to_int', 'Board.piece_symbol_to_int', (['"""P"""'], {}), "('P')\n", (740, 745), False, 'from src.chess.board import Board\n'), ((885, 915), 'src.chess.board.Board.piece_symbol_to_int', 'Board.piece_symbol_to_int', (['"""p"""'], {}), "('p')\n", (910, 915), False, 'from src.chess.board import Board\n'), ((480, 512), 'src.chess.board.Board.piece_symbol_to_int', 'Board.piece_symbol_to_int', (['piece'], {}), '(piece)\n', (505, 512), False, 'from src.chess.board import Board\n'), ((264, 322), 'numpy.zeros', 'np.zeros', (['(ConfigChess.board_size, ConfigChess.board_size)'], {}), '((ConfigChess.board_size, ConfigChess.board_size))\n', (272, 322), True, 'import numpy as np\n'), ((828, 846), 'src.chess.board.Board', 'Board', ([], {'array': 'array'}), '(array=array)\n', (833, 846), False, 'from src.chess.board import Board\n'), ((998, 1016), 'src.chess.board.Board', 'Board', ([], {'array': 'array'}), '(array=array)\n', (1003, 1016), False, 'from src.chess.board import Board\n'), ((603, 621), 'src.chess.board.Board', 'Board', ([], {'array': 'array'}), '(array=array)\n', (608, 621), False, 'from src.chess.board import Board\n')]
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Tuple
import numpy as np
import torch
from ignite.engine import Engine
from monai.handlers import SurfaceDistance
def create_spherical_seg_3d(
radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)
) -> np.ndarray:
"""
Return a 3D image with a sphere inside. Voxel values will be
1 inside the sphere, and 0 elsewhere.
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
im_shape: shape of image to create
See also:
:py:meth:`~create_test_image_3d`
"""
# Create image
image = np.zeros(im_shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]
]
circle = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)
# test input a list of channel-first tensor
sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]
sampler_sphere_zeros = torch.zeros_like(sampler_sphere)
TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]
TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]
TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]
TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]
class TestHandlerSurfaceDistance(unittest.TestCase):
# TODO test multi node Surface Distance
def test_compute(self):
sur_metric = SurfaceDistance(include_background=True)
def _val_func(engine, batch):
pass
engine = Engine(_val_func)
sur_metric.attach(engine, "surface_distance")
y_pred, y = TEST_SAMPLE_1
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4)
y_pred, y = TEST_SAMPLE_2
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4)
y_pred, y = TEST_SAMPLE_3
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
y_pred, y = TEST_SAMPLE_4
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
def test_shape_mismatch(self):
sur_metric = SurfaceDistance(include_background=True)
with self.assertRaises((AssertionError, ValueError)):
y_pred = TEST_SAMPLE_1[0]
y = torch.ones((1, 1, 10, 10, 10))
sur_metric.update([y_pred, y])
if __name__ == "__main__":
unittest.main()
|
[
"monai.handlers.SurfaceDistance",
"ignite.engine.Engine",
"numpy.zeros",
"unittest.main",
"torch.zeros_like",
"torch.ones"
] |
[((1895, 1927), 'torch.zeros_like', 'torch.zeros_like', (['sampler_sphere'], {}), '(sampler_sphere)\n', (1911, 1927), False, 'import torch\n'), ((1285, 1319), 'numpy.zeros', 'np.zeros', (['im_shape'], {'dtype': 'np.int32'}), '(im_shape, dtype=np.int32)\n', (1293, 1319), True, 'import numpy as np\n'), ((3383, 3398), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3396, 3398), False, 'import unittest\n'), ((2304, 2344), 'monai.handlers.SurfaceDistance', 'SurfaceDistance', ([], {'include_background': '(True)'}), '(include_background=True)\n', (2319, 2344), False, 'from monai.handlers import SurfaceDistance\n'), ((2419, 2436), 'ignite.engine.Engine', 'Engine', (['_val_func'], {}), '(_val_func)\n', (2425, 2436), False, 'from ignite.engine import Engine\n'), ((3119, 3159), 'monai.handlers.SurfaceDistance', 'SurfaceDistance', ([], {'include_background': '(True)'}), '(include_background=True)\n', (3134, 3159), False, 'from monai.handlers import SurfaceDistance\n'), ((3276, 3306), 'torch.ones', 'torch.ones', (['(1, 1, 10, 10, 10)'], {}), '((1, 1, 10, 10, 10))\n', (3286, 3306), False, 'import torch\n')]
|
#!/usr/bin/env python3
import sys
import logging
import yaml
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from sklearn.impute import SimpleImputer
from anoflows.hpo import find_best_flows
from data_loading import load_data
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) == 1:
logging.error("YAML data specification missing from the command line arguments")
exit(1)
spec_file = sys.argv[1]
df, spec = load_data(spec_file)
max_rows = min(len(df), spec.get("max_rows", 40000))
novelty_detection = spec.get("novelty", True)
normal_classes = spec["normal_classes"]
precision = defaultdict(list)
for rounds in range(spec.get("rounds", 1)):
# random sampling
df = df.sample(n=max_rows, replace=False)
label_col = spec["label_column"]
y = df[label_col].values
other = df.drop(label_col, inplace=False, axis=1)
X = other.values
# imputing
X = SimpleImputer(copy=False).fit_transform(X)
# train/test split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, shuffle=False, test_size=0.5)
if novelty_detection:
keep = np.where(np.isin(y_train, normal_classes))[0]
X_train = X_train[keep, :]
y_train = y_train[keep]
# training
#flows, loss = find_best_flows(X_train, device='cpu', n_trials=1)
from anoflows.anoflow_bagging import AnoFlowBagging
flows = AnoFlowBagging()
flows.fit(X_train)
iforest = IsolationForest().fit(X_train)
# prediction
pred = {
"anoflows": flows.likelihood(X_test),
"iforest": iforest.decision_function(X_test)
}
# evaluation
y_true = np.where(np.isin(y_test, spec["anomaly_classes"]))[0]
ref = np.zeros(len(y_test))
ref[y_true] = 1
k = len(y_true)
for name, y_pred in pred.items():
anomaly_indices = y_pred.argsort()[:k]
prec = ref[anomaly_indices].sum() / k
logging.info("%s: %.1f%% (%d anomalies / %d rows)" % (name, 100*prec, k, len(y_test)))
precision[name].append(prec)
logging.info("* SUMMARY %s", spec_file)
for name, prec in precision.items():
prec = 100 * np.array(prec)
mean = np.mean(prec)
std = np.std(prec)
logging.info("%s; mean=%.1f%% std=%.1f%%" % (name, mean, std))
|
[
"logging.getLogger",
"numpy.mean",
"anoflows.anoflow_bagging.AnoFlowBagging",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.IsolationForest",
"logging.info",
"numpy.isin",
"numpy.array",
"collections.defaultdict",
"sklearn.impute.SimpleImputer",
"numpy.std",
"logging.error",
"data_loading.load_data"
] |
[((556, 576), 'data_loading.load_data', 'load_data', (['spec_file'], {}), '(spec_file)\n', (565, 576), False, 'from data_loading import load_data\n'), ((729, 746), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (740, 746), False, 'from collections import defaultdict\n'), ((2144, 2183), 'logging.info', 'logging.info', (['"""* SUMMARY %s"""', 'spec_file'], {}), "('* SUMMARY %s', spec_file)\n", (2156, 2183), False, 'import logging\n'), ((427, 512), 'logging.error', 'logging.error', (['"""YAML data specification missing from the command line arguments"""'], {}), "('YAML data specification missing from the command line arguments'\n )\n", (440, 512), False, 'import logging\n'), ((1141, 1193), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'shuffle': '(False)', 'test_size': '(0.5)'}), '(X, y, shuffle=False, test_size=0.5)\n', (1157, 1193), False, 'from sklearn.model_selection import train_test_split\n'), ((1502, 1518), 'anoflows.anoflow_bagging.AnoFlowBagging', 'AnoFlowBagging', ([], {}), '()\n', (1516, 1518), False, 'from anoflows.anoflow_bagging import AnoFlowBagging\n'), ((2264, 2277), 'numpy.mean', 'np.mean', (['prec'], {}), '(prec)\n', (2271, 2277), True, 'import numpy as np\n'), ((2288, 2300), 'numpy.std', 'np.std', (['prec'], {}), '(prec)\n', (2294, 2300), True, 'import numpy as np\n'), ((2305, 2367), 'logging.info', 'logging.info', (["('%s; mean=%.1f%% std=%.1f%%' % (name, mean, std))"], {}), "('%s; mean=%.1f%% std=%.1f%%' % (name, mean, std))\n", (2317, 2367), False, 'import logging\n'), ((356, 375), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (373, 375), False, 'import logging\n'), ((2238, 2252), 'numpy.array', 'np.array', (['prec'], {}), '(prec)\n', (2246, 2252), True, 'import numpy as np\n'), ((1025, 1050), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'copy': '(False)'}), '(copy=False)\n', (1038, 1050), False, 'from sklearn.impute import SimpleImputer\n'), ((1556, 1573), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {}), '()\n', (1571, 1573), False, 'from sklearn.ensemble import IsolationForest\n'), ((1763, 1803), 'numpy.isin', 'np.isin', (['y_test', "spec['anomaly_classes']"], {}), "(y_test, spec['anomaly_classes'])\n", (1770, 1803), True, 'import numpy as np\n'), ((1244, 1276), 'numpy.isin', 'np.isin', (['y_train', 'normal_classes'], {}), '(y_train, normal_classes)\n', (1251, 1276), True, 'import numpy as np\n')]
|
"""Helper functions to tests."""
import numpy as np
def norm(vs: np.array) -> float:
"""Compute the norm of a vector."""
return np.sqrt(np.dot(vs, vs))
def create_random_matrix(size: int) -> np.array:
"""Create a numpy random matrix."""
return np.random.normal(size=size ** 2).reshape(size, size)
def create_symmetic_matrix(size: int) -> np.array:
"""Create a numpy symmetric matrix."""
xs = create_random_matrix(size)
return xs + xs.T
def check_eigenpairs(
matrix: np.ndarray, eigenvalues: np.ndarray,
eigenvectors: np.ndarray) -> bool:
"""Check that the eigenvalue equation holds."""
for i, value in enumerate(eigenvalues):
residue = np.dot(
matrix, eigenvectors[:, i]) - value * eigenvectors[:, i]
assert norm(residue) < 1e-8
|
[
"numpy.random.normal",
"numpy.dot"
] |
[((147, 161), 'numpy.dot', 'np.dot', (['vs', 'vs'], {}), '(vs, vs)\n', (153, 161), True, 'import numpy as np\n'), ((265, 297), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(size ** 2)'}), '(size=size ** 2)\n', (281, 297), True, 'import numpy as np\n'), ((705, 739), 'numpy.dot', 'np.dot', (['matrix', 'eigenvectors[:, i]'], {}), '(matrix, eigenvectors[:, i])\n', (711, 739), True, 'import numpy as np\n')]
|
#pylint:disable=no-member
import cv2 as cv
import numpy as np
img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg')
cv.imshow('Cats', img)
#
blank = np.zeros(img.shape[:2], dtype='uint8')
cv.imshow('Blank', blank)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)
#
blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)
cv.imshow('Blur', blur)
canny = cv.Canny(blur, 125, 175)
cv.imshow('Canny Edges', canny)
#
ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)
cv.imshow('Thresh', thresh)
#
contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
print(f'{len(contours)} contour(s) found!')
#
cv.drawContours(blank, contours, -1, (200,120,100), 1)
cv.imshow('Contours Drawn', blank)
cv.waitKey(0)
|
[
"cv2.drawContours",
"cv2.threshold",
"cv2.Canny",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.findContours",
"cv2.GaussianBlur",
"cv2.imread"
] |
[((70, 188), 'cv2.imread', 'cv.imread', (['"""/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg"""'], {}), "(\n '/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg'\n )\n", (79, 188), True, 'import cv2 as cv\n'), ((179, 201), 'cv2.imshow', 'cv.imshow', (['"""Cats"""', 'img'], {}), "('Cats', img)\n", (188, 201), True, 'import cv2 as cv\n'), ((212, 250), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': '"""uint8"""'}), "(img.shape[:2], dtype='uint8')\n", (220, 250), True, 'import numpy as np\n'), ((251, 276), 'cv2.imshow', 'cv.imshow', (['"""Blank"""', 'blank'], {}), "('Blank', blank)\n", (260, 276), True, 'import cv2 as cv\n'), ((285, 320), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (296, 320), True, 'import cv2 as cv\n'), ((321, 344), 'cv2.imshow', 'cv.imshow', (['"""Gray"""', 'gray'], {}), "('Gray', gray)\n", (330, 344), True, 'import cv2 as cv\n'), ((354, 402), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(5, 5)', 'cv.BORDER_DEFAULT'], {}), '(gray, (5, 5), cv.BORDER_DEFAULT)\n', (369, 402), True, 'import cv2 as cv\n'), ((402, 425), 'cv2.imshow', 'cv.imshow', (['"""Blur"""', 'blur'], {}), "('Blur', blur)\n", (411, 425), True, 'import cv2 as cv\n'), ((435, 459), 'cv2.Canny', 'cv.Canny', (['blur', '(125)', '(175)'], {}), '(blur, 125, 175)\n', (443, 459), True, 'import cv2 as cv\n'), ((460, 491), 'cv2.imshow', 'cv.imshow', (['"""Canny Edges"""', 'canny'], {}), "('Canny Edges', canny)\n", (469, 491), True, 'import cv2 as cv\n'), ((508, 554), 'cv2.threshold', 'cv.threshold', (['gray', '(125)', '(255)', 'cv.THRESH_BINARY'], {}), '(gray, 125, 255, cv.THRESH_BINARY)\n', (520, 554), True, 'import cv2 as cv\n'), ((555, 582), 'cv2.imshow', 'cv.imshow', (['"""Thresh"""', 'thresh'], {}), "('Thresh', thresh)\n", (564, 582), True, 'import cv2 as cv\n'), ((609, 669), 'cv2.findContours', 'cv.findContours', (['canny', 'cv.RETR_LIST', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n', (624, 669), True, 'import cv2 as cv\n'), ((716, 772), 'cv2.drawContours', 'cv.drawContours', (['blank', 'contours', '(-1)', '(200, 120, 100)', '(1)'], {}), '(blank, contours, -1, (200, 120, 100), 1)\n', (731, 772), True, 'import cv2 as cv\n'), ((771, 805), 'cv2.imshow', 'cv.imshow', (['"""Contours Drawn"""', 'blank'], {}), "('Contours Drawn', blank)\n", (780, 805), True, 'import cv2 as cv\n'), ((807, 820), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (817, 820), True, 'import cv2 as cv\n')]
|
"""Read, write, create Brainvoyager VMR file format."""
import struct
import numpy as np
from bvbabel.utils import (read_variable_length_string,
write_variable_length_string)
# =============================================================================
def read_vmr(filename):
"""Read Brainvoyager VMR file.
Parameters
----------
filename : string
Path to file.
Returns
-------
header : dictionary
Pre-data and post-data headers.
data : 3D numpy.array
Image data.
"""
header = dict()
with open(filename, 'rb') as f:
# ---------------------------------------------------------------------
# VMR Pre-Data Header
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): VMR files contain anatomical 3D data sets,
# typically containing the whole brain (head) of subjects. The
# intensity values are stored as a series of bytes. See the V16 format
# for a version storing each intensity value with two bytes (short
# integers). The VMR format contains a small header followed by the
# actual data followed by a second, more extensive, header. The current
# version of VMR files is "4", which is only slightly different from
# version 3 (as indicated below). Version 3 added offset values to
# format 2 in order to represent large data sets efficiently, e.g. in
# the context of advanced segmentation processing. Compared to the
# original file version "1", file versions 2 and higher contain
# additional header information after the actual data ("post-data
# header"). This allows to read VMR data sets with minimal header
# checking if the extended information is not needed. The information
# in the post-data header contains position information (if available)
# and stores a series of spatial transformations, which might have been
# performed to the original data set ("history record"). The
# post-header data can be probably ignored for custom routines, but is
# important in BrainVoyager QX for spatial transformation and
# coregistration routines as well as for proper visualization.
# Expected binary data: unsigned short int (2 bytes)
data, = struct.unpack('<H', f.read(2))
header["File version"] = data
data, = struct.unpack('<H', f.read(2))
header["DimX"] = data
data, = struct.unpack('<H', f.read(2))
header["DimY"] = data
data, = struct.unpack('<H', f.read(2))
header["DimZ"] = data
# ---------------------------------------------------------------------
# VMR Data
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): Each data element (intensity value) is
# represented in 1 byte. The data is organized in three loops:
# DimZ
# DimY
# DimX
#
# The axes terminology follows the internal BrainVoyager (BV) format.
# The mapping to Talairach axes is as follows:
# BV (X front -> back) [axis 2 after np.reshape] = Y in Tal space
# BV (Y top -> bottom) [axis 1 after np.reshape] = Z in Tal space
# BV (Z left -> right) [axis 0 after np.reshape] = X in Tal space
# Expected binary data: unsigned char (1 byte)
data_img = np.zeros((header["DimZ"] * header["DimY"] * header["DimX"]),
dtype="<B")
for i in range(data_img.size):
data_img[i], = struct.unpack('<B', f.read(1))
data_img = np.reshape(
data_img, (header["DimZ"], header["DimY"], header["DimX"]))
data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal
data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes
# ---------------------------------------------------------------------
# VMR Post-Data Header
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): The first four entries of the post-data
# header are new since file version "3" and contain offset values for
# each dimension as well as a value indicating the size of a cube with
# iso-dimensions to which the data set will be internally "expanded"
# for certain operations. The axes labels are in terms of
# BrainVoyager's internal format. These four entries are followed by
# scan position information from the original file headers, e.g. from
# DICOM files. The coordinate axes labels in these entries are not in
# terms of BrainVoyager's internal conventions but follow the DICOM
# standard. Then follows eventually a section listing spatial
# transformations which have been eventually performed to create the
# current VMR (e.g. ACPC transformation). Finally, additional
# information further descries the data set, including the assumed
# left-right convention, the reference space (e.g. Talairach after
# normalization) and voxel resolution.
if header["File version"] >= 3:
# NOTE(Developer Guide 2.6): These four entries have been added in
# file version "3" with BrainVoyager QX 1.7. All other entries are
# identical to file version "2".
# Expected binary data: short int (2 bytes)
data, = struct.unpack('<h', f.read(2))
header["OffsetX"] = data
data, = struct.unpack('<h', f.read(2))
header["OffsetY"] = data
data, = struct.unpack('<h', f.read(2))
header["OffsetZ"] = data
data, = struct.unpack('<h', f.read(2))
header["FramingCubeDim"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PosInfosVerified"] = data
data, = struct.unpack('<i', f.read(4))
header["CoordinateSystem"] = data
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterX"] = data # First slice center X coordinate
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterY"] = data # First slice center Y coordinate
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterZ"] = data # First slice center Z coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterX"] = data # Last slice center X coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterY"] = data # Last slice center Y coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterZ"] = data # Last slice center Z coordinate
data, = struct.unpack('<f', f.read(4))
header["RowDirX"] = data # Slice row direction vector X component
data, = struct.unpack('<f', f.read(4))
header["RowDirY"] = data # Slice row direction vector Y component
data, = struct.unpack('<f', f.read(4))
header["RowDirZ"] = data # Slice row direction vector Z component
data, = struct.unpack('<f', f.read(4))
header["ColDirX"] = data # Slice column direction vector X component
data, = struct.unpack('<f', f.read(4))
header["ColDirY"] = data # Slice column direction vector Y component
data, = struct.unpack('<f', f.read(4))
header["ColDirZ"] = data # Slice column direction vector Z component
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["NRows"] = data # Nr of rows of slice image matrix
data, = struct.unpack('<i', f.read(4))
header["NCols"] = data # Nr of columns of slice image matrix
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["FoVRows"] = data # Field of view extent in row direction [mm]
data, = struct.unpack('<f', f.read(4))
header["FoVCols"] = data # Field of view extent in column dir. [mm]
data, = struct.unpack('<f', f.read(4))
header["SliceThickness"] = data # Slice thickness [mm]
data, = struct.unpack('<f', f.read(4))
header["GapThickness"] = data # Gap thickness [mm]
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["NrOfPastSpatialTransformations"] = data
if header["NrOfPastSpatialTransformations"] != 0:
# NOTE(Developer Guide 2.6): For each past transformation, the
# information specified in the following table is stored. The
# "type of transformation" is a value determining how many
# subsequent values define the transformation:
# "1": Rigid body+scale (3 translation, 3 rotation, 3 scale)
# "2": Affine transformation (16 values, 4x4 matrix)
# "4": Talairach transformation
# "5": Un-Talairach transformation (1 - 5 -> BV axes)
header["PastTransformation"] = []
for i in range(header["NrOfPastSpatialTransformations"]):
header["PastTransformation"].append(dict())
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["PastTransformation"][i]["Name"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PastTransformation"][i]["Type"] = data
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["PastTransformation"][i]["SourceFileName"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PastTransformation"][i]["NrOfValues"] = data
# Store transformation values as a list
trans_values = []
for j in range(header["PastTransformation"][i]["NrOfValues"]):
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
trans_values.append(data)
header["PastTransformation"][i]["Values"] = trans_values
# Expected binary data: char (1 byte)
data, = struct.unpack('<B', f.read(1))
header["LeftRightConvention"] = data # modified in v4
data, = struct.unpack('<B', f.read(1))
header["ReferenceSpaceVMR"] = data # new in v4
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeX"] = data # Voxel resolution along X axis
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeY"] = data # Voxel resolution along Y axis
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeZ"] = data # Voxel resolution along Z axis
# Expected binary data: char (1 byte)
data, = struct.unpack('<B', f.read(1))
header["VoxelResolutionVerified"] = data
data, = struct.unpack('<B', f.read(1))
header["VoxelResolutionInTALmm"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MinValue"] = data # 16-bit data min intensity
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MeanValue"] = data # 16-bit data mean intensity
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MaxValue"] = data # 16-bit data max intensity
return header, data_img
# =============================================================================
def write_vmr(filename, header, data_img):
"""Protocol to write Brainvoyager VMR file.
Parameters
----------
filename : string
Output filename.
header : dictionary
Header of VMR file.
data_img : numpy.array, 3D
Image.
"""
with open(filename, 'wb') as f:
# ---------------------------------------------------------------------
# VMR Pre-Data Header
# ---------------------------------------------------------------------
# Expected binary data: unsigned short int (2 bytes)
data = header["File version"]
f.write(struct.pack('<H', data))
data = header["DimX"]
f.write(struct.pack('<H', data))
data = header["DimY"]
f.write(struct.pack('<H', data))
data = header["DimZ"]
f.write(struct.pack('<H', data))
# ---------------------------------------------------------------------
# VMR Data
# ---------------------------------------------------------------------
# Convert axes from Nifti standard back to BV standard
data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes
data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal
# Expected binary data: unsigned char (1 byte)
data_img = data_img.flatten()
for i in range(data_img.size):
f.write(struct.pack('<B', data_img[i]))
# ---------------------------------------------------------------------
# VMR Post-Data Header
# ---------------------------------------------------------------------
if header["File version"] >= 3:
# Expected binary data: short int (2 bytes)
data = header["OffsetX"]
f.write(struct.pack('<h', data))
data = header["OffsetY"]
f.write(struct.pack('<h', data))
data = header["OffsetZ"]
f.write(struct.pack('<h', data))
data = header["FramingCubeDim"]
f.write(struct.pack('<h', data))
# Expected binary data: int (4 bytes)
data = header["PosInfosVerified"]
f.write(struct.pack('<i', data))
data = header["CoordinateSystem"]
f.write(struct.pack('<i', data))
# Expected binary data: float (4 bytes)
data = header["Slice1CenterX"]
f.write(struct.pack('<f', data))
data = header["Slice1CenterY"]
f.write(struct.pack('<f', data))
data = header["Slice1CenterZ"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterX"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterY"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterZ"]
f.write(struct.pack('<f', data))
data = header["RowDirX"]
f.write(struct.pack('<f', data))
data = header["RowDirY"]
f.write(struct.pack('<f', data))
data = header["RowDirZ"]
f.write(struct.pack('<f', data))
data = header["ColDirX"]
f.write(struct.pack('<f', data))
data = header["ColDirY"]
f.write(struct.pack('<f', data))
data = header["ColDirZ"]
f.write(struct.pack('<f', data))
# Expected binary data: int (4 bytes)
data = header["NRows"]
f.write(struct.pack('<i', data))
data = header["NCols"]
f.write(struct.pack('<i', data))
# Expected binary data: float (4 bytes)
data = header["FoVRows"]
f.write(struct.pack('<f', data))
data = header["FoVCols"]
f.write(struct.pack('<f', data))
data = header["SliceThickness"]
f.write(struct.pack('<f', data))
data = header["GapThickness"]
f.write(struct.pack('<f', data))
# Expected binary data: int (4 bytes)
data = header["NrOfPastSpatialTransformations"]
f.write(struct.pack('<i', data))
if header["NrOfPastSpatialTransformations"] != 0:
for i in range(header["NrOfPastSpatialTransformations"]):
# Expected binary data: variable-length string
data = header["PastTransformation"][i]["Name"]
write_variable_length_string(f, data)
# Expected binary data: int (4 bytes)
data = header["PastTransformation"][i]["Type"]
f.write(struct.pack('<i', data))
# Expected binary data: variable-length string
data = header["PastTransformation"][i]["SourceFileName"]
write_variable_length_string(f, data)
# Expected binary data: int (4 bytes)
data = header["PastTransformation"][i]["NrOfValues"]
f.write(struct.pack('<i', data))
# Transformation values are stored as a list
trans_values = header["PastTransformation"][i]["Values"]
for j in range(header["PastTransformation"][i]["NrOfValues"]):
# Expected binary data: float (4 bytes)
f.write(struct.pack('<f', trans_values[j]))
# Expected binary data: char (1 byte)
data = header["LeftRightConvention"]
f.write(struct.pack('<B', data))
data = header["ReferenceSpaceVMR"]
f.write(struct.pack('<B', data))
# Expected binary data: float (4 bytes)
data = header["VoxelSizeX"]
f.write(struct.pack('<f', data))
data = header["VoxelSizeY"]
f.write(struct.pack('<f', data))
data = header["VoxelSizeZ"]
f.write(struct.pack('<f', data))
# Expected binary data: char (1 byte)
data = header["VoxelResolutionVerified"]
f.write(struct.pack('<B', data))
data = header["VoxelResolutionInTALmm"]
f.write(struct.pack('<B', data))
# Expected binary data: int (4 bytes)
data = header["VMROrigV16MinValue"]
f.write(struct.pack('<i', data))
data = header["VMROrigV16MeanValue"]
f.write(struct.pack('<i', data))
data = header["VMROrigV16MaxValue"]
f.write(struct.pack('<i', data))
return print("VMR saved.")
|
[
"bvbabel.utils.write_variable_length_string",
"numpy.reshape",
"struct.pack",
"numpy.zeros",
"numpy.transpose",
"bvbabel.utils.read_variable_length_string"
] |
[((3535, 3605), 'numpy.zeros', 'np.zeros', (["(header['DimZ'] * header['DimY'] * header['DimX'])"], {'dtype': '"""<B"""'}), "(header['DimZ'] * header['DimY'] * header['DimX'], dtype='<B')\n", (3543, 3605), True, 'import numpy as np\n'), ((3752, 3822), 'numpy.reshape', 'np.reshape', (['data_img', "(header['DimZ'], header['DimY'], header['DimX'])"], {}), "(data_img, (header['DimZ'], header['DimY'], header['DimX']))\n", (3762, 3822), True, 'import numpy as np\n'), ((3856, 3889), 'numpy.transpose', 'np.transpose', (['data_img', '(0, 2, 1)'], {}), '(data_img, (0, 2, 1))\n', (3868, 3889), True, 'import numpy as np\n'), ((13096, 13129), 'numpy.transpose', 'np.transpose', (['data_img', '(0, 2, 1)'], {}), '(data_img, (0, 2, 1))\n', (13108, 13129), True, 'import numpy as np\n'), ((12534, 12557), 'struct.pack', 'struct.pack', (['"""<H"""', 'data'], {}), "('<H', data)\n", (12545, 12557), False, 'import struct\n'), ((12605, 12628), 'struct.pack', 'struct.pack', (['"""<H"""', 'data'], {}), "('<H', data)\n", (12616, 12628), False, 'import struct\n'), ((12676, 12699), 'struct.pack', 'struct.pack', (['"""<H"""', 'data'], {}), "('<H', data)\n", (12687, 12699), False, 'import struct\n'), ((12747, 12770), 'struct.pack', 'struct.pack', (['"""<H"""', 'data'], {}), "('<H', data)\n", (12758, 12770), False, 'import struct\n'), ((14056, 14079), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (14067, 14079), False, 'import struct\n'), ((14139, 14162), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (14150, 14162), False, 'import struct\n'), ((14268, 14291), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14279, 14291), False, 'import struct\n'), ((14348, 14371), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14359, 14371), False, 'import struct\n'), ((14428, 14451), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14439, 14451), False, 'import struct\n'), ((14508, 14531), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14519, 14531), False, 'import struct\n'), ((14588, 14611), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14599, 14611), False, 'import struct\n'), ((14668, 14691), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14679, 14691), False, 'import struct\n'), ((14742, 14765), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14753, 14765), False, 'import struct\n'), ((14816, 14839), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14827, 14839), False, 'import struct\n'), ((14890, 14913), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14901, 14913), False, 'import struct\n'), ((14964, 14987), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14975, 14987), False, 'import struct\n'), ((15038, 15061), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15049, 15061), False, 'import struct\n'), ((15112, 15135), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15123, 15135), False, 'import struct\n'), ((15231, 15254), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (15242, 15254), False, 'import struct\n'), ((15303, 15326), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (15314, 15326), False, 'import struct\n'), ((15426, 15449), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15437, 15449), False, 'import struct\n'), ((15500, 15523), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15511, 15523), False, 'import struct\n'), ((15581, 15604), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15592, 15604), False, 'import struct\n'), ((15660, 15683), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15671, 15683), False, 'import struct\n'), ((15804, 15827), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (15815, 15827), False, 'import struct\n'), ((17115, 17138), 'struct.pack', 'struct.pack', (['"""<B"""', 'data'], {}), "('<B', data)\n", (17126, 17138), False, 'import struct\n'), ((17199, 17222), 'struct.pack', 'struct.pack', (['"""<B"""', 'data'], {}), "('<B', data)\n", (17210, 17222), False, 'import struct\n'), ((17325, 17348), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (17336, 17348), False, 'import struct\n'), ((17402, 17425), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (17413, 17425), False, 'import struct\n'), ((17479, 17502), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (17490, 17502), False, 'import struct\n'), ((17616, 17639), 'struct.pack', 'struct.pack', (['"""<B"""', 'data'], {}), "('<B', data)\n", (17627, 17639), False, 'import struct\n'), ((17705, 17728), 'struct.pack', 'struct.pack', (['"""<B"""', 'data'], {}), "('<B', data)\n", (17716, 17728), False, 'import struct\n'), ((17837, 17860), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (17848, 17860), False, 'import struct\n'), ((17923, 17946), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (17934, 17946), False, 'import struct\n'), ((18008, 18031), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (18019, 18031), False, 'import struct\n'), ((9457, 9487), 'bvbabel.utils.read_variable_length_string', 'read_variable_length_string', (['f'], {}), '(f)\n', (9484, 9487), False, 'from bvbabel.utils import read_variable_length_string, write_variable_length_string\n'), ((9811, 9841), 'bvbabel.utils.read_variable_length_string', 'read_variable_length_string', (['f'], {}), '(f)\n', (9838, 9841), False, 'from bvbabel.utils import read_variable_length_string, write_variable_length_string\n'), ((13296, 13326), 'struct.pack', 'struct.pack', (['"""<B"""', 'data_img[i]'], {}), "('<B', data_img[i])\n", (13307, 13326), False, 'import struct\n'), ((13673, 13696), 'struct.pack', 'struct.pack', (['"""<h"""', 'data'], {}), "('<h', data)\n", (13684, 13696), False, 'import struct\n'), ((13755, 13778), 'struct.pack', 'struct.pack', (['"""<h"""', 'data'], {}), "('<h', data)\n", (13766, 13778), False, 'import struct\n'), ((13837, 13860), 'struct.pack', 'struct.pack', (['"""<h"""', 'data'], {}), "('<h', data)\n", (13848, 13860), False, 'import struct\n'), ((13926, 13949), 'struct.pack', 'struct.pack', (['"""<h"""', 'data'], {}), "('<h', data)\n", (13937, 13949), False, 'import struct\n'), ((16100, 16137), 'bvbabel.utils.write_variable_length_string', 'write_variable_length_string', (['f', 'data'], {}), '(f, data)\n', (16128, 16137), False, 'from bvbabel.utils import read_variable_length_string, write_variable_length_string\n'), ((16458, 16495), 'bvbabel.utils.write_variable_length_string', 'write_variable_length_string', (['f', 'data'], {}), '(f, data)\n', (16486, 16495), False, 'from bvbabel.utils import read_variable_length_string, write_variable_length_string\n'), ((16280, 16303), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (16291, 16303), False, 'import struct\n'), ((16644, 16667), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (16655, 16667), False, 'import struct\n'), ((16971, 17005), 'struct.pack', 'struct.pack', (['"""<f"""', 'trans_values[j]'], {}), "('<f', trans_values[j])\n", (16982, 17005), False, 'import struct\n')]
|
from dataset.baseset import BaseSet
import random, cv2
import numpy as np
class iNaturalist(BaseSet):
def __init__(self, mode='train', cfg=None, transform=None):
super(iNaturalist, self).__init__(mode, cfg, transform)
random.seed(0)
self.class_dict = self._get_class_dict()
def __getitem__(self, index):
if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" and self.mode == 'train':
assert self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE in ["balance", 'square', 'progressive']
if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "balance":
sample_class = random.randint(0, self.num_classes - 1)
elif self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "square":
sample_class = np.random.choice(np.arange(self.num_classes), p=self.square_p)
else:
sample_class = np.random.choice(np.arange(self.num_classes), p=self.progress_p)
sample_indexes = self.class_dict[sample_class]
index = random.choice(sample_indexes)
now_info = self.data[index]
img = self._get_image(now_info)
image = self.transform(img)
meta = dict()
image_label = now_info['category_id'] # 0-index
return image, image_label, meta
|
[
"random.choice",
"random.randint",
"random.seed",
"numpy.arange"
] |
[((248, 262), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (259, 262), False, 'import random, cv2\n'), ((1059, 1088), 'random.choice', 'random.choice', (['sample_indexes'], {}), '(sample_indexes)\n', (1072, 1088), False, 'import random, cv2\n'), ((651, 690), 'random.randint', 'random.randint', (['(0)', '(self.num_classes - 1)'], {}), '(0, self.num_classes - 1)\n', (665, 690), False, 'import random, cv2\n'), ((816, 843), 'numpy.arange', 'np.arange', (['self.num_classes'], {}), '(self.num_classes)\n', (825, 843), True, 'import numpy as np\n'), ((930, 957), 'numpy.arange', 'np.arange', (['self.num_classes'], {}), '(self.num_classes)\n', (939, 957), True, 'import numpy as np\n')]
|
import inspect
from typing import List, Union, Set, Any
import numpy as np
from fruits.cache import Cache, CoquantileCache
from fruits.scope import force_input_shape, FitTransform
from fruits.core.callback import AbstractCallback
from fruits.signature.iss import SignatureCalculator, CachePlan
from fruits.words.word import Word
from fruits.sieving.abstract import FeatureSieve
from fruits.preparation.abstract import DataPreparateur
class Fruit:
"""Feature Extractor using iterated sums.
A Fruit consists of a number of
:class:`~fruits.core.fruit.FruitBranch` objects.
At the end of the pipeline, each branch returns their own features
and they will be concatenated by this class.
A simple example (using two branches):
.. code-block:: python
fruit = fruits.Fruit("My Fruit")
# optional: add preparateurs for preprocessing
fruit.add(fruits.preparation.INC)
# add words for iterated sums calculation
fruit.add(fruits.words.creation.simplewords_by_weight(4))
# choose sieves
fruit.add(fruits.sieving.PPV(0.5))
fruit.add(fruits.sieving.END)
# add a new branch without INC
fruit.fork()
fruit.add(fruits.words.creation.simplewords_by_weight(4))
fruit.add(fruits.sieving.PPV(0.5))
fruit.add(fruits.sieving.END)
# configure the fruit
fruit.configure(mode="extended")
# fit the fruit on a time series dataset
fruit.fit(X_train)
# transform the dataset
X_train_transformed = fruit.transform(X_train)
X_test_tranformed = fruit.transform(X_test)
# use the transformed results (features) in a classifier
...
The ``fruit`` above will result in ``2*8*2=32`` features per time
series.
"""
def __init__(self, name: str = ""):
self.name: str = name
# list of FruitBranches
self._branches: List[FruitBranch] = []
# pointer for the current branch index
self._cbi: int = 0
self._fitted: bool = False
@property
def name(self) -> str:
"""Simple identifier for the Fruit object."""
return self._name
@name.setter
def name(self, name: str):
self._name = name
def fork(self, branch: "FruitBranch" = None):
"""Adds a new branch to the pipeline. If none is given, an
empty FruitBranch will be created and switched to.
:type branch: FruitBranch, optional
"""
if branch is None:
branch = FruitBranch()
self._branches.append(branch)
self._cbi = len(self._branches) - 1
self._fitted = False
def branch(self, index: int = None):
"""Returns the currently selected branch or the branch with the
given index.
:rtype: FruitBranch
"""
if index is None:
return self._branches[self._cbi]
return self._branches[index]
def branches(self) -> list:
"""Returns all branches of this Fruit object.
:rtype: list
"""
return self._branches
def switch_branch(self, index: int):
"""Switches to the branch with the given index.
:param index: Integer in ``[0, 1, ..., len(self.branches())-1]``
:type index: int
"""
if not (0 <= index < len(self._branches)):
raise IndexError("Index has to be in [0, len(self.branches()))")
self._cbi = index
def add(self, *objects: Union[FitTransform, Word, type]):
"""Adds one or multiple object(s) to the currently selected
branch.
:param objects: One or more objects of the following types:
- :class:`~fruits.preparation.abstract.DataPreparateur`
- :class:`~fruits.words.word.Word`
- :class:`~fruits.sieving.abstract.FeatureSieve`
:type objects: Union[FitTransform, Word]
"""
if len(self._branches) == 0:
self.fork()
self._branches[self._cbi].add(*objects)
self._fitted = False
def nfeatures(self) -> int:
"""Returns the total number of features of all branches
combined.
:rtype: int
"""
return sum([branch.nfeatures() for branch in self._branches])
def configure(self, **kwargs: Any):
"""Makes changes to the default configuration of a all branches
if arguments differ from ``None``.
:param kwargs: For possible options, have a look at
:meth:`fruits.core.fruit.FruitBranch.configure`.
:type kwargs: Any
"""
for branch in self._branches:
branch.configure(**kwargs)
def fit(self, X: np.ndarray):
"""Fits all branches to the given data.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
"""
for branch in self._branches:
branch.fit(X)
self._fitted = True
def transform(self, X: np.ndarray,
callbacks: List[AbstractCallback] = []) -> np.ndarray:
"""Returns a two dimensional array of all features from all
branches this Fruit object contains.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
:param callbacks: List of callbacks. To write your own callback,
override the class
:class:`~fruits.core.callback.AbstractCallback`.,
defaults to None
:type callbacks: List[AbstractCallback], optional
:rtype: np.ndarray
:raises: RuntimeError if Fruit.fit wasn't called
"""
if not self._fitted:
raise RuntimeError("Missing call of self.fit")
result = np.zeros((X.shape[0], self.nfeatures()))
index = 0
for branch in self._branches:
for callback in callbacks:
callback.on_next_branch()
k = branch.nfeatures()
result[:, index:index+k] = branch.transform(X, callbacks)
index += k
result = np.nan_to_num(result, copy=False, nan=0.0)
return result
def fit_transform(self, X: np.ndarray) -> np.ndarray:
"""Fits all branches to the given dataset and returns the
transformed results of X from all branches.
:param X: (Multidimensional) time series dataset
:type X: np.ndarray
:returns: Two dimensional feature array
:rtype: np.ndarray
"""
self.fit(X)
return self.transform(X)
def summary(self) -> str:
"""Returns a summary of this object. The summary contains a
summary for each FruitBranch in this Fruit object.
:rtype: str
"""
summary = "{:=^80}".format(f"Summary of fruits.Fruit: '{self.name}'")
summary += f"\nBranches: {len(self.branches())}"
summary += f"\nFeatures: {self.nfeatures()}"
for branch in self.branches():
summary += "\n\n" + branch.summary()
summary += "\n{:=^80}".format(f"End of Summary")
return summary
def copy(self) -> "Fruit":
"""Creates a shallow copy of this Fruit object.
This also creates shallow copies of all branches in this object.
:rtype: Fruit
"""
copy_ = Fruit(self.name+" (Copy)")
for branch in self._branches:
copy_.fork(branch.copy())
return copy_
def deepcopy(self) -> "Fruit":
"""Creates a deep copy of this Fruit object.
This also creates deep copies of all branches in this object.
:rtype: Fruit
"""
copy_ = Fruit(self.name+" (Copy)")
for branch in self._branches:
copy_.fork(branch.deepcopy())
return copy_
class FruitBranch:
"""One branch of a Fruit object.
A FruitBranch object extracts values from time series data that are
somehow representative of the input data.
The user can customize any of the following three steps.
- Preparing data:
Apply functions at the start of the extraction procedure.
There are many so called
:class:`~fruits.preparation.abstract.DataPreparateur`
objects in fruits available for preprocessing. The
preparateurs will be applied sequentially to the input data.
- Calculating Iterated Sums:
The preprocessed data is now used to calculate the iterated
sums signature for different
:class:`~fruits.words.word.Word` objects the user can
specify.
- Extracting Features:
Each :class:`~fruits.sieving.abstract.FeatureSieve` added to
the branch will be fitted on the iterated sums from the
previous step. The branch then returns an array of numbers
(the transformed results from those sieves), i.e. the
features for each time series.
"""
def __init__(self):
# lists of used classes for data processing
self._preparateurs: list = []
self._words: list = []
self._sieves: list = []
# calculator options used in the ISS calculation
self._calculator_options: dict = {"batch_size": 1, "mode": "single"}
# list with inner lists containing sieves
# all sieves in one list are trained on one specific output
# of an ISS-result
self._sieves_extended: list = []
# configurations for fitting
self._fitted: bool = False
self._fit_sample_size: Union[float, int] = 1
# cache that is calculated at fitting and also used in the
# transformation process
self._cache: Cache
def configure(self,
mode: str = None,
batch_size: int = None,
fit_sample_size: Union[float, int] = None):
"""Makes changes to the default configuration of a fruit branch
if arguments differ from ``None``.
:param mode: See
:meth:`fruits.signature.iss.SignatureCalculator.transform`,
defaults to None
:type mode: str, optional
:param batch_size: See
:meth:`~ruits.signature.iss.SignatureCalculator.transform`,
defaults to None
:type batch_size: int, optional
:param fit_sample_size: Size of the random time series sample
that is used for fitting. This is represented as a float
which will be multiplied by ``X.shape[0]`` or ``1`` for one
random time series., defaults to 1
:type fit_sample_size: Union[float, int]
"""
if mode is not None:
self._calculator_options["mode"] = mode
if batch_size is not None:
self._calculator_options["batch_size"] = batch_size
if fit_sample_size is not None:
self._fit_sample_size = fit_sample_size
def add_preparateur(self, preparateur: DataPreparateur):
"""Adds a preparateur to the branch.
:type preparateur: DataPreparateur
"""
if not isinstance(preparateur, DataPreparateur):
raise TypeError
self._preparateurs.append(preparateur)
self._fitted = False
def get_preparateurs(self) -> List[DataPreparateur]:
"""Returns a list of all preparateurs added to the
branch.
:rtype: List[DataPreparateur]
"""
return self._preparateurs
def clear_preparateurs(self):
"""Removes all preparateurs that were added to this branch."""
self._preparateurs = []
self._fitted = False
def add_word(self, word: Word):
"""Adds a word to the branch.
:type word: Word
"""
if not isinstance(word, Word):
raise TypeError
self._words.append(word)
self._fitted = False
def get_words(self) -> List[Word]:
"""Returns a list of all words in the branch.
:rtype: List[Word]
"""
return self._words
def clear_words(self):
"""Removes all words that were added to this branch."""
self._words = []
self._sieves_extended = []
self._fitted = False
def add_sieve(self, sieve: FeatureSieve):
"""Appends a new feature sieve to the FruitBranch.
:type sieve: FeatureSieve
"""
if not isinstance(sieve, FeatureSieve):
raise TypeError
self._sieves.append(sieve)
self._fitted = False
def get_sieves(self) -> List[FeatureSieve]:
"""Returns a list of all feature sieves added to the branch.
:rtype: List[FeatureSieve]
"""
return self._sieves
def clear_sieves(self):
"""Removes all feature sieves that were added to this branch."""
self._sieves = []
self._sieve_prerequisites = None
self._sieves_extended = []
self._fitted = False
def add(self, *objects: Union[FitTransform, Word, type]):
"""Adds one or multiple object(s) to the branch.
:type objects: One or more objects of the following types:
- :class:`~fruits.preparation.abstract.DataPreparateur`
- :class:`~fruits.words.word.Word`
- :class:`~fruits.sieving.abstract.FeatureSieve`
"""
objects_flattened = np.array(objects, dtype=object).flatten()
for obj in objects_flattened:
if inspect.isclass(obj):
obj = obj()
if isinstance(obj, DataPreparateur):
self.add_preparateur(obj)
elif isinstance(obj, Word):
self.add_word(obj)
elif isinstance(obj, FeatureSieve):
self.add_sieve(obj)
else:
raise TypeError("Cannot add variable of type"+str(type(obj)))
def clear(self):
"""Clears all settings, configurations and calculated results
the branch has.
After the branch is cleared, it has the same settings as a newly
created FruitBranch object.
"""
self.clear_preparateurs()
self.clear_words()
self.clear_sieves()
self._calculator_options = {"batch_size": 1, "mode": "single"}
def nfeatures(self) -> int:
"""Returns the total number of features the current
configuration produces.
:rtype: int
"""
if self._calculator_options["mode"] == "extended":
return (
sum([s.nfeatures() for s in self._sieves])
* CachePlan(self._words).n_iterated_sums(
list(range(len(self._words)))
)
)
else:
return (
sum([s.nfeatures() for s in self._sieves])
* len(self._words)
)
def _compile(self):
# checks if the FruitBranch is configured correctly and ready
# for fitting
if not self._words:
raise RuntimeError("No words specified for ISS calculation")
if not self._sieves:
raise RuntimeError("No FeatureSieve objects specified")
def _collect_cache_keys(self) -> Set[str]:
# collects cache keys of all FitTransformers in the branch
keys: Set[str] = set()
for prep in self._preparateurs:
prep_keys = prep._get_cache_keys()
if 'coquantile' in prep_keys:
keys = keys.union(prep_keys['coquantile'])
for sieve in self._sieves:
sieve_keys = sieve._get_cache_keys()
if 'coquantile' in sieve_keys:
keys = keys.union(sieve_keys['coquantile'])
return keys
def _get_cache(self, X: np.ndarray):
# returns the already processed cache needed in this branch
self._cache = CoquantileCache()
self._cache.process(X, list(self._collect_cache_keys()))
def _select_fit_sample(self, X: np.ndarray) -> np.ndarray:
# returns a sample of the data used for fitting
if (isinstance(self._fit_sample_size, int)
and self._fit_sample_size == 1):
ind = np.random.randint(0, X.shape[0])
return X[ind:ind+1, :, :]
else:
s = int(self._fit_sample_size * X.shape[0])
if s < 1:
s = 1
indices = np.random.choice(X.shape[0], size=s, replace=False)
return X[indices, :, :]
def fit(self, X: np.ndarray):
"""Fits the branch to the given dataset. What this action
explicitly does depends on the FruitBranch configuration.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
"""
self._compile()
self._get_cache(X)
prepared_data = self._select_fit_sample(X)
for prep in self._preparateurs:
prep.fit(prepared_data)
prepared_data = prep.transform(prepared_data, cache=self._cache)
self._sieves_extended = []
iss_calculations = SignatureCalculator().transform(
prepared_data,
words=self._words,
**self._calculator_options
)[0]
for iterated_data in iss_calculations:
iterated_data = iterated_data.reshape(iterated_data.shape[0]
* iterated_data.shape[1],
iterated_data.shape[2])
sieves_copy = [sieve.copy() for sieve in self._sieves]
for sieve in sieves_copy:
sieve.fit(iterated_data[:, :])
self._sieves_extended.append(sieves_copy)
self._fitted = True
def transform(self, X: np.ndarray,
callbacks: List[AbstractCallback] = []) -> np.ndarray:
"""Transforms the given time series dataset. The results are
the calculated features for the different time series.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
:param callbacks: List of callbacks. To write your own callback,
override the class
:class:`~fruits.core.callback.AbstractCallback`.,
defaults to []
:type callbacks: List[AbstractCallback], optional
:rtype: np.ndarray
:raises: RuntimeError if ``self.fit`` wasn't called
"""
if not self._fitted:
raise RuntimeError("Missing call of self.fit")
self._get_cache(X)
prepared_data = force_input_shape(X)
for prep in self._preparateurs:
prepared_data = prep.transform(prepared_data, cache=self._cache)
for callback in callbacks:
callback.on_preparateur(prepared_data)
for callback in callbacks:
callback.on_preparation_end(prepared_data)
sieved_data = np.zeros((prepared_data.shape[0],
self.nfeatures()))
k = 0
iss_calculations = SignatureCalculator().transform(
prepared_data,
words=self._words,
**self._calculator_options
)[0]
for i, iterated_data in enumerate(iss_calculations):
for callback in callbacks:
callback.on_iterated_sum(iterated_data)
for sieve in self._sieves_extended[i]:
nf = sieve.nfeatures()
new_features = nf * iterated_data.shape[1]
for it in range(iterated_data.shape[1]):
sieved_data[:, k+it*nf:k+(it+1)*nf] = sieve.transform(
iterated_data[:, it, :],
cache=self._cache,
)
for callback in callbacks:
callback.on_sieve(sieved_data[k:k+new_features])
k += new_features
for callback in callbacks:
callback.on_sieving_end(sieved_data)
return sieved_data
def fit_transform(self, X: np.ndarray) -> np.ndarray:
"""This function does the same that calling ``self.fit(X)`` and
``self.transform(X)`` consecutively does.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
`:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
:returns: Array of features.
:rtype: np.ndarray
"""
self.fit(X)
return self.transform(X)
def summary(self) -> str:
"""Returns a summary of this object. The summary contains all
added preparateurs, words and sieves.
:rtype: str
"""
summary = "{:-^80}".format("fruits.FruitBranch")
summary += f"\nNumber of features: {self.nfeatures()}"
summary += f"\n\nPreparateurs ({len(self._preparateurs)}): "
if len(self._preparateurs) == 0:
summary += "-"
else:
summary += "\n\t+ " + \
"\n\t+ ".join([str(x) for x in self._preparateurs])
summary += f"\nIterators ({len(self._words)}): "
if len(self._words) == 0:
summary += "-"
elif len(self._words) > 10:
summary += "\n\t+ " + \
"\n\t+ ".join([str(x) for x in self._words[:9]])
summary += "\n\t..."
else:
summary += "\n\t+ " + \
"\n\t+ ".join([str(x) for x in self._words])
summary += f"\nSieves ({len(self._sieves)}): "
if len(self._sieves) == 0:
summary += "-"
else:
for x in self._sieves:
lines = x.summary().split("\n")
summary += "\n\t+ " + lines[0]
summary += "\n\t "
summary += "\n\t ".join(lines[1:])
return summary
def copy(self) -> "FruitBranch":
"""Returns a shallow copy of this FruitBranch object.
:returns: Copy of the branch with same settings but all
calculations done erased.
:rtype: FruitBranch
"""
copy_ = FruitBranch()
for preparateur in self._preparateurs:
copy_.add(preparateur)
for iterator in self._words:
copy_.add(iterator)
for sieve in self._sieves:
copy_.add(sieve)
return copy_
def deepcopy(self) -> "FruitBranch":
"""Returns a deep copy of this FruitBranch object.
:returns: Deepcopy of the branch with same settings but all
calculations done erased.
:rtype: FruitBranch
"""
copy_ = FruitBranch()
for preparateur in self._preparateurs:
copy_.add(preparateur.copy())
for iterator in self._words:
copy_.add(iterator.copy())
for sieve in self._sieves:
copy_.add(sieve.copy())
copy_._calculator_options = self._calculator_options.copy()
return copy_
|
[
"fruits.signature.iss.CachePlan",
"numpy.random.choice",
"fruits.signature.iss.SignatureCalculator",
"numpy.array",
"numpy.random.randint",
"fruits.scope.force_input_shape",
"inspect.isclass",
"fruits.cache.CoquantileCache",
"numpy.nan_to_num"
] |
[((6257, 6299), 'numpy.nan_to_num', 'np.nan_to_num', (['result'], {'copy': '(False)', 'nan': '(0.0)'}), '(result, copy=False, nan=0.0)\n', (6270, 6299), True, 'import numpy as np\n'), ((15954, 15971), 'fruits.cache.CoquantileCache', 'CoquantileCache', ([], {}), '()\n', (15969, 15971), False, 'from fruits.cache import Cache, CoquantileCache\n'), ((18848, 18868), 'fruits.scope.force_input_shape', 'force_input_shape', (['X'], {}), '(X)\n', (18865, 18868), False, 'from fruits.scope import force_input_shape, FitTransform\n'), ((13590, 13610), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (13605, 13610), False, 'import inspect\n'), ((16275, 16307), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X.shape[0]'], {}), '(0, X.shape[0])\n', (16292, 16307), True, 'import numpy as np\n'), ((16482, 16533), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]'], {'size': 's', 'replace': '(False)'}), '(X.shape[0], size=s, replace=False)\n', (16498, 16533), True, 'import numpy as np\n'), ((13495, 13526), 'numpy.array', 'np.array', (['objects'], {'dtype': 'object'}), '(objects, dtype=object)\n', (13503, 13526), True, 'import numpy as np\n'), ((17267, 17288), 'fruits.signature.iss.SignatureCalculator', 'SignatureCalculator', ([], {}), '()\n', (17286, 17288), False, 'from fruits.signature.iss import SignatureCalculator, CachePlan\n'), ((19319, 19340), 'fruits.signature.iss.SignatureCalculator', 'SignatureCalculator', ([], {}), '()\n', (19338, 19340), False, 'from fruits.signature.iss import SignatureCalculator, CachePlan\n'), ((14699, 14721), 'fruits.signature.iss.CachePlan', 'CachePlan', (['self._words'], {}), '(self._words)\n', (14708, 14721), False, 'from fruits.signature.iss import SignatureCalculator, CachePlan\n')]
|
import numpy as np
from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix
from sklearn.decomposition import PCA
from sklearn import random_projection
from sklearn import svm
from sklearn.ensemble import IsolationForest
import matplotlib.pyplot as plt
from keras.layers import Dense, Input, Dropout
from keras.models import Model
from keras import regularizers
from keras.models import Sequential
from keras.optimizers import Adam
from keras.regularizers import l2
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
import xgboost as xgb
def one_class_svm(x_train, x_test, x_attacks, svm_results):
# SVM Hyper-parameters
nus = [0.01]
gammas = ['auto']
dimensions = [int(i*x_test.shape[1]) for i in [0.25, 0.35, 0.5, 0.75, 0.9, 1]]
dimensions = list(filter(lambda x: x > 0, dimensions))
for n in dimensions:
x_reduced_pca, test_reduced_pca, attack_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA',
attack=x_attacks)
for nu in nus:
for gamma in gammas:
# Fit classifier with PCA reduced data
classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)
classifier.fit(x_reduced_pca)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_pca,
test_reduced_pca,
attack_reduced_pca)
svm_results = svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': n, 'TPR_train': tpr_train,
'TPR_test': tpr_test, 'TNR': tnr, 'model': 'svm', 'auc': area,
'f_beta': fb, 'projection': 'PCA'}, ignore_index=True)
# Fit classifier with RP reduced data
classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)
classifier.fit(x_train)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_train,
x_test, x_attacks)
svm_results = svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': x_test.shape[1],
'TPR_train': tpr_train,
'TPR_test': tpr_test, 'TNR': tnr, 'model': 'svm', 'auc': area,
'f_beta': fb, 'projection': 'None'}, ignore_index=True)
return svm_results
def isolation_forest(x_train, x_test, x_attacks, isolation_results):
# Isolation Forest Hyper-parameters
estimators = [200, 100]
contaminations = [0.01]
dimensions = [int(i*x_test.shape[1]) for i in [0.25, 0.5, 0.9, 1]]
dimensions = list(filter(lambda x: x > 0, dimensions))
for n in dimensions:
x_reduced_pca, test_reduced_pca, attack_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA',
attack=x_attacks)
x_reduced_rp, test_reduced_rp, attack_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP',
attack=x_attacks)
max_features = list(range(1, n + 1, 4))
for estimator in estimators:
for contamination in contaminations:
for max_feature in max_features:
classifier = IsolationForest(n_estimators=estimator,
contamination=contamination,
max_features=max_feature,
n_jobs=7)
classifier.fit(x_reduced_pca)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_pca,
test_reduced_pca, attack_reduced_pca)
isolation_results = isolation_results.append({'estimators': estimator, 'contamination': contamination,
'n_components': n, 'max_features': max_feature,
'TPR_train': tpr_train,
'TPR_test': tpr_test,
'TNR': tnr,
'model': 'isolation_forest',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = IsolationForest(n_estimators=estimator,
contamination=contamination,
max_features=max_feature,
n_jobs=7)
classifier.fit(x_reduced_rp)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_rp,
test_reduced_rp, attack_reduced_rp)
isolation_results = isolation_results.append({'estimators': estimator, 'contamination': contamination,
'n_components': n, 'max_features': max_feature,
'TPR_train': tpr_train,
'TPR_test': tpr_test,
'TNR': tnr,
'model': 'isolation_forest',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
return isolation_results
def autoencoder(x_train, x_test, x_attacks, ae_svm_results):
latent_dim = 3
input_vector = Input(shape=(x_train.shape[1],))
encoded = Dense(latent_dim, activation='relu')(input_vector)
decoded = Dense(x_train.shape[1], activity_regularizer=regularizers.l1(10e-5))(encoded)
autoencoder = Model(input_vector, decoded)
encoder = Model(input_vector, encoded)
autoencoder.compile(optimizer=Adam(lr=0.001), loss='mse')
network_history = autoencoder.fit(x_train, x_train, shuffle=True, batch_size=16, epochs=10,
validation_data=(x_test, x_test), verbose=True)
plot_history(network_history, 'AE history')
print('Mean loss on train: {}'.format(autoencoder.evaluate(x_train, x_train, batch_size=8, verbose=False)))
print('Mean loss on test: {}'.format(autoencoder.evaluate(x_test, x_test, batch_size=8, verbose=False)))
print('Mean loss on attacks: {}'.format(autoencoder.evaluate(x_attacks, x_attacks, batch_size=8, verbose=False)))
x_train_red = encoder.predict(x_train, batch_size=8)
x_test_red = encoder.predict(x_test, batch_size=8)
x_attacks_red = encoder.predict(x_attacks, batch_size=8)
nus = [0.01]
gammas = [x_train_red.shape[1], 2*x_train_red.shape[1], x_train_red.shape[1]/2, 'auto']
for nu in nus:
for gamma in gammas:
classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)
classifier.fit(x_train_red)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_train_red,
x_test_red, x_attacks_red)
ae_svm_results = ae_svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': latent_dim,
'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr,
'model': 'ae-svm', 'auc': area, 'f_beta': fb}, ignore_index=True)
return ae_svm_results
def unsupervised_evaluation(classifier, train_set, test_set, attack_set, beta=20):
y_pred_train = classifier.predict(train_set)
y_pred_test = classifier.predict(test_set)
y_pred_outliers = classifier.predict(attack_set)
n_accurate_train = y_pred_train[y_pred_train == 1].size
n_accurate_test = y_pred_test[y_pred_test == 1].size
n_accurate_outliers = y_pred_outliers[y_pred_outliers == -1].size
fpr, tpr, _ = roc_curve(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]),
np.concatenate([y_pred_test, y_pred_outliers]), pos_label=1)
fb = fbeta_score(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]),
np.concatenate([y_pred_test, y_pred_outliers]), beta=beta, pos_label=1)
tnr = n_accurate_outliers/attack_set.shape[0]
tpr_test = n_accurate_test/test_set.shape[0]
tpr_train = n_accurate_train/train_set.shape[0]
area = auc(fpr, tpr)
return fb, area, tnr, tpr_train, tpr_test
def neural_network(x_train, y_train, x_test, y_test):
model = Sequential()
model.add(Dense(128, input_shape=(x_train.shape[1],), activation='relu', kernel_regularizer=l2(0.01)))
model.add(Dropout(0.1))
model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.01)))
model.add(Dropout(0.2))
model.add(Dense(128, kernel_initializer='glorot_uniform', activation='sigmoid'))
model.add(Dropout(0.4))
model.add(Dense(64, kernel_initializer='glorot_uniform', activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(32, kernel_initializer='glorot_uniform', activation='tanh'))
model.add(Dropout(0.4))
model.add(Dense(128, kernel_initializer='glorot_uniform', activation='tanh'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
network_history = model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=0,
validation_data=(x_test, y_test))
plot_history_with_acc(network_history)
return model
def random_forest(x_train, y_train, x_test, y_test, random_forest_results):
# Random forest Hyper-parameters
estimators = [150, 200]
dimensions = [int(i*x_test.shape[1]) for i in [1]]
for estimator in estimators:
for n in dimensions:
x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA')
x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP')
classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7)
classifier.fit(x_reduced_pca, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test)
random_forest_results = random_forest_results.append({'estimators': estimator,
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'random_forest',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7)
classifier.fit(x_reduced_rp, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test)
random_forest_results = random_forest_results.append({'estimators': estimator,
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'random_forest',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7)
classifier.fit(x_train, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, x_test, y_test)
random_forest_results = random_forest_results.append({'estimators': estimator,
'n_components': x_test.shape[1],
'TPR': tpr,
'TNR': tnr,
'model': 'random_forest',
'auc': area,
'f_beta': fb,
'projection': 'None'}, ignore_index=True)
return random_forest_results
def ada_boost(x_train, y_train, x_test, y_test, ada_boost_results):
# AdaBoost Hyper-parameters
learning_rates = [0.55]
dimensions = [int(i*x_test.shape[1]) for i in [1]]
for n in dimensions:
x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA')
x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP')
for lr in learning_rates:
classifier = AdaBoostClassifier(learning_rate=lr)
classifier.fit(x_reduced_pca, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test)
ada_boost_results = ada_boost_results.append({'LR': lr,
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'ada_boost',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = AdaBoostClassifier(learning_rate=lr)
classifier.fit(x_reduced_rp, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test)
ada_boost_results = ada_boost_results.append({'LR': lr,
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'ada_boost',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
return ada_boost_results
def svm_classifier(x_train, y_train, x_test, y_test, svm_results):
# SVC Hyper-parameters
dimensions = [int(i*x_test.shape[1]) for i in [1]]
for n in dimensions:
x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA')
x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP')
classifier = svm.SVC(gamma='auto', cache_size=7000)
classifier.fit(x_reduced_pca, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test)
svm_results = svm_results.append({
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'svm',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = svm.SVC(gamma='auto', cache_size=7000)
classifier.fit(x_reduced_rp, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test)
svm_results = svm_results.append({
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'svm',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
return svm_results
def xg_boost(x_train, y_train, x_test, y_test, xg_boost_results):
# XGBoost Hyper-parameters
dimensions = [int(i*x_test.shape[1]) for i in [1]]
for n in dimensions:
x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA')
x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP')
classifier = xgb.XGBClassifier()
grid = {'max_depth': 10}
classifier.set_params(**grid)
classifier.fit(x_reduced_pca, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test)
xg_boost_results = xg_boost_results.append({
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'xgboost',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = xgb.XGBClassifier()
grid = {'max_depth': 10}
classifier.set_params(**grid)
classifier.fit(x_reduced_rp, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test)
xg_boost_results = xg_boost_results.append({
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'xgboost',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
classifier = xgb.XGBClassifier()
grid = {'max_depth': 10}
classifier.set_params(**grid)
classifier.fit(x_train, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, x_test, y_test)
xg_boost_results = xg_boost_results.append({
'n_components': x_test.shape[1],
'TPR': tpr,
'TNR': tnr,
'model': 'xgboost',
'auc': area,
'f_beta': fb,
'projection': 'None'}, ignore_index=True)
return xg_boost_results
def supervised_evaluation(classifier, x_test, y_test, beta=20, nn=False):
if not nn:
y_pred = classifier.predict(x_test)
confusion_matrix(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
fb = fbeta_score(y_test, y_pred, beta=beta, pos_label=1)
area = auc(fpr, tpr)
tpr = tpr[1]
tnr = 1 - fpr[1]
return fb, area, tnr, tpr
def plot_roc(classifier, test, attacks, title):
y_pred_test = classifier.predict(test)
y_pred_outliers = classifier.predict(attacks)
fpr, tpr, _ = roc_curve(np.concatenate([np.ones(y_pred_test.shape[0]),
-1*np.ones(y_pred_outliers.shape[0])]),
np.concatenate([y_pred_test, y_pred_outliers]), pos_label=1)
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: {}'.format(title))
plt.legend(loc='lower right')
plt.show()
def plot_roc_supervised(classifier, x_test, y_test, title, nn=False):
y_pred = classifier.predict(x_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
if nn:
y_pred = [round(x[0]) for x in y_pred]
print(confusion_matrix(y_test, y_pred))
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic {}'.format(title))
plt.legend(loc='lower right')
plt.show()
def plot_history(network_history, title):
plt.figure(figsize=(10, 5))
plt.title(title)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.semilogy(network_history.history['loss'])
plt.semilogy(network_history.history['val_loss'])
plt.legend(['Training', 'Validation'])
plt.show()
def plot_history_with_acc(network_history, title='Loss and Accuracy'):
plt.figure(figsize=(15, 10))
plt.subplot(211)
plt.title(title)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.semilogy(network_history.history['loss'])
plt.semilogy(network_history.history['val_loss'])
plt.legend(['Training', 'Validation'])
plt.subplot(212)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.plot(network_history.history['acc'])
plt.plot(network_history.history['val_acc'])
plt.legend(['Training', 'Validation'], loc='lower right')
plt.show()
def reduce_dimensionality(n_components, train, test, method, attack=None):
if method == 'PCA':
matrix = PCA(n_components=n_components)
elif method == 'RP':
matrix = random_projection.SparseRandomProjection(n_components=n_components, random_state=7)
else:
print('unknown projection method, choose either RP or PCA')
return None
train = matrix.fit_transform(train)
test = matrix.transform(test)
if attack is None:
return train, test
attack = matrix.transform(attack)
return train, test, attack
|
[
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.metrics.roc_curve",
"keras.layers.Dense",
"matplotlib.pyplot.semilogy",
"sklearn.decomposition.PCA",
"sklearn.random_projection.SparseRandomProjection",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.models.Model",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"sklearn.svm.OneClassSVM",
"keras.regularizers.l1",
"sklearn.metrics.confusion_matrix",
"keras.optimizers.Adam",
"numpy.ones",
"sklearn.ensemble.RandomForestClassifier",
"keras.models.Sequential",
"sklearn.metrics.fbeta_score",
"keras.regularizers.l2",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"xgboost.XGBClassifier",
"matplotlib.pyplot.show",
"sklearn.svm.SVC",
"sklearn.ensemble.IsolationForest",
"keras.layers.Input",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot"
] |
[((6712, 6744), 'keras.layers.Input', 'Input', ([], {'shape': '(x_train.shape[1],)'}), '(shape=(x_train.shape[1],))\n', (6717, 6744), False, 'from keras.layers import Dense, Input, Dropout\n'), ((6920, 6948), 'keras.models.Model', 'Model', (['input_vector', 'decoded'], {}), '(input_vector, decoded)\n', (6925, 6948), False, 'from keras.models import Model\n'), ((6963, 6991), 'keras.models.Model', 'Model', (['input_vector', 'encoded'], {}), '(input_vector, encoded)\n', (6968, 6991), False, 'from keras.models import Model\n'), ((9640, 9653), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (9643, 9653), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((9768, 9780), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9778, 9780), False, 'from keras.models import Sequential\n'), ((19241, 19260), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (19258, 19260), True, 'import xgboost as xgb\n'), ((20524, 20537), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (20527, 20537), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((20542, 20554), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20552, 20554), True, 'import matplotlib.pyplot as plt\n'), ((20570, 20664), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': 'lw', 'label': "('ROC curve (area = %0.2f)' % roc_auc)"}), "(fpr, tpr, color='darkorange', lw=lw, label=\n 'ROC curve (area = %0.2f)' % roc_auc)\n", (20578, 20664), True, 'import matplotlib.pyplot as plt\n'), ((20677, 20738), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (20685, 20738), True, 'import matplotlib.pyplot as plt\n'), ((20743, 20763), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (20751, 20763), True, 'import matplotlib.pyplot as plt\n'), ((20768, 20789), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (20776, 20789), True, 'import matplotlib.pyplot as plt\n'), ((20794, 20827), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (20804, 20827), True, 'import matplotlib.pyplot as plt\n'), ((20832, 20864), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (20842, 20864), True, 'import matplotlib.pyplot as plt\n'), ((20938, 20967), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (20948, 20967), True, 'import matplotlib.pyplot as plt\n'), ((20972, 20982), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20980, 20982), True, 'import matplotlib.pyplot as plt\n'), ((21114, 21139), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (21123, 21139), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((21258, 21271), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (21261, 21271), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((21276, 21288), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21286, 21288), True, 'import matplotlib.pyplot as plt\n'), ((21304, 21398), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': 'lw', 'label': "('ROC curve (area = %0.2f)' % roc_auc)"}), "(fpr, tpr, color='darkorange', lw=lw, label=\n 'ROC curve (area = %0.2f)' % roc_auc)\n", (21312, 21398), True, 'import matplotlib.pyplot as plt\n'), ((21411, 21472), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (21419, 21472), True, 'import matplotlib.pyplot as plt\n'), ((21477, 21497), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (21485, 21497), True, 'import matplotlib.pyplot as plt\n'), ((21502, 21523), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (21510, 21523), True, 'import matplotlib.pyplot as plt\n'), ((21528, 21561), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (21538, 21561), True, 'import matplotlib.pyplot as plt\n'), ((21566, 21598), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (21576, 21598), True, 'import matplotlib.pyplot as plt\n'), ((21671, 21700), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (21681, 21700), True, 'import matplotlib.pyplot as plt\n'), ((21705, 21715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21713, 21715), True, 'import matplotlib.pyplot as plt\n'), ((21764, 21791), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (21774, 21791), True, 'import matplotlib.pyplot as plt\n'), ((21796, 21812), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (21805, 21812), True, 'import matplotlib.pyplot as plt\n'), ((21817, 21837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (21827, 21837), True, 'import matplotlib.pyplot as plt\n'), ((21842, 21860), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (21852, 21860), True, 'import matplotlib.pyplot as plt\n'), ((21865, 21910), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["network_history.history['loss']"], {}), "(network_history.history['loss'])\n", (21877, 21910), True, 'import matplotlib.pyplot as plt\n'), ((21915, 21964), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["network_history.history['val_loss']"], {}), "(network_history.history['val_loss'])\n", (21927, 21964), True, 'import matplotlib.pyplot as plt\n'), ((21969, 22007), 'matplotlib.pyplot.legend', 'plt.legend', (["['Training', 'Validation']"], {}), "(['Training', 'Validation'])\n", (21979, 22007), True, 'import matplotlib.pyplot as plt\n'), ((22012, 22022), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22020, 22022), True, 'import matplotlib.pyplot as plt\n'), ((22100, 22128), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (22110, 22128), True, 'import matplotlib.pyplot as plt\n'), ((22133, 22149), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (22144, 22149), True, 'import matplotlib.pyplot as plt\n'), ((22154, 22170), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (22163, 22170), True, 'import matplotlib.pyplot as plt\n'), ((22175, 22195), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (22185, 22195), True, 'import matplotlib.pyplot as plt\n'), ((22200, 22218), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (22210, 22218), True, 'import matplotlib.pyplot as plt\n'), ((22223, 22268), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["network_history.history['loss']"], {}), "(network_history.history['loss'])\n", (22235, 22268), True, 'import matplotlib.pyplot as plt\n'), ((22273, 22322), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["network_history.history['val_loss']"], {}), "(network_history.history['val_loss'])\n", (22285, 22322), True, 'import matplotlib.pyplot as plt\n'), ((22327, 22365), 'matplotlib.pyplot.legend', 'plt.legend', (["['Training', 'Validation']"], {}), "(['Training', 'Validation'])\n", (22337, 22365), True, 'import matplotlib.pyplot as plt\n'), ((22371, 22387), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (22382, 22387), True, 'import matplotlib.pyplot as plt\n'), ((22392, 22412), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (22402, 22412), True, 'import matplotlib.pyplot as plt\n'), ((22417, 22439), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (22427, 22439), True, 'import matplotlib.pyplot as plt\n'), ((22444, 22484), 'matplotlib.pyplot.plot', 'plt.plot', (["network_history.history['acc']"], {}), "(network_history.history['acc'])\n", (22452, 22484), True, 'import matplotlib.pyplot as plt\n'), ((22489, 22533), 'matplotlib.pyplot.plot', 'plt.plot', (["network_history.history['val_acc']"], {}), "(network_history.history['val_acc'])\n", (22497, 22533), True, 'import matplotlib.pyplot as plt\n'), ((22538, 22595), 'matplotlib.pyplot.legend', 'plt.legend', (["['Training', 'Validation']"], {'loc': '"""lower right"""'}), "(['Training', 'Validation'], loc='lower right')\n", (22548, 22595), True, 'import matplotlib.pyplot as plt\n'), ((22600, 22610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22608, 22610), True, 'import matplotlib.pyplot as plt\n'), ((6759, 6795), 'keras.layers.Dense', 'Dense', (['latent_dim'], {'activation': '"""relu"""'}), "(latent_dim, activation='relu')\n", (6764, 6795), False, 'from keras.layers import Dense, Input, Dropout\n'), ((9214, 9260), 'numpy.concatenate', 'np.concatenate', (['[y_pred_test, y_pred_outliers]'], {}), '([y_pred_test, y_pred_outliers])\n', (9228, 9260), True, 'import numpy as np\n'), ((9404, 9450), 'numpy.concatenate', 'np.concatenate', (['[y_pred_test, y_pred_outliers]'], {}), '([y_pred_test, y_pred_outliers])\n', (9418, 9450), True, 'import numpy as np\n'), ((9903, 9915), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (9910, 9915), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10005, 10017), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (10012, 10017), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10034, 10103), 'keras.layers.Dense', 'Dense', (['(128)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""sigmoid"""'}), "(128, kernel_initializer='glorot_uniform', activation='sigmoid')\n", (10039, 10103), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10119, 10131), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (10126, 10131), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10148, 10213), 'keras.layers.Dense', 'Dense', (['(64)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""tanh"""'}), "(64, kernel_initializer='glorot_uniform', activation='tanh')\n", (10153, 10213), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10229, 10241), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (10236, 10241), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10258, 10323), 'keras.layers.Dense', 'Dense', (['(32)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""tanh"""'}), "(32, kernel_initializer='glorot_uniform', activation='tanh')\n", (10263, 10323), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10339, 10351), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (10346, 10351), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10368, 10434), 'keras.layers.Dense', 'Dense', (['(128)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""tanh"""'}), "(128, kernel_initializer='glorot_uniform', activation='tanh')\n", (10373, 10434), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10450, 10462), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (10457, 10462), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10479, 10538), 'keras.layers.Dense', 'Dense', (['(1)'], {'kernel_initializer': '"""normal"""', 'activation': '"""sigmoid"""'}), "(1, kernel_initializer='normal', activation='sigmoid')\n", (10484, 10538), False, 'from keras.layers import Dense, Input, Dropout\n'), ((16503, 16541), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '"""auto"""', 'cache_size': '(7000)'}), "(gamma='auto', cache_size=7000)\n", (16510, 16541), False, 'from sklearn import svm\n'), ((17166, 17204), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '"""auto"""', 'cache_size': '(7000)'}), "(gamma='auto', cache_size=7000)\n", (17173, 17204), False, 'from sklearn import svm\n'), ((18210, 18229), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (18227, 18229), True, 'import xgboost as xgb\n'), ((18729, 18748), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (18746, 18748), True, 'import xgboost as xgb\n'), ((19862, 19894), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (19878, 19894), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((19917, 19942), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (19926, 19942), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((19956, 20007), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['y_test', 'y_pred'], {'beta': 'beta', 'pos_label': '(1)'}), '(y_test, y_pred, beta=beta, pos_label=1)\n', (19967, 20007), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((20023, 20036), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (20026, 20036), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((20449, 20495), 'numpy.concatenate', 'np.concatenate', (['[y_pred_test, y_pred_outliers]'], {}), '([y_pred_test, y_pred_outliers])\n', (20463, 20495), True, 'import numpy as np\n'), ((21209, 21241), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (21225, 21241), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((22729, 22759), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (22732, 22759), False, 'from sklearn.decomposition import PCA\n'), ((7026, 7040), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (7030, 7040), False, 'from keras.optimizers import Adam\n'), ((7980, 8046), 'sklearn.svm.OneClassSVM', 'svm.OneClassSVM', ([], {'kernel': '"""rbf"""', 'gamma': 'gamma', 'nu': 'nu', 'cache_size': '(7000)'}), "(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)\n", (7995, 8046), False, 'from sklearn import svm\n'), ((11317, 11373), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'estimator', 'n_jobs': '(7)'}), '(n_estimators=estimator, n_jobs=7)\n', (11339, 11373), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((12236, 12292), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'estimator', 'n_jobs': '(7)'}), '(n_estimators=estimator, n_jobs=7)\n', (12258, 12292), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13152, 13208), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'estimator', 'n_jobs': '(7)'}), '(n_estimators=estimator, n_jobs=7)\n', (13174, 13208), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14491, 14527), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (14509, 14527), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((15307, 15343), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (15325, 15343), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((22802, 22889), 'sklearn.random_projection.SparseRandomProjection', 'random_projection.SparseRandomProjection', ([], {'n_components': 'n_components', 'random_state': '(7)'}), '(n_components=n_components,\n random_state=7)\n', (22842, 22889), False, 'from sklearn import random_projection\n'), ((1260, 1326), 'sklearn.svm.OneClassSVM', 'svm.OneClassSVM', ([], {'kernel': '"""rbf"""', 'gamma': 'gamma', 'nu': 'nu', 'cache_size': '(7000)'}), "(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)\n", (1275, 1326), False, 'from sklearn import svm\n'), ((2091, 2157), 'sklearn.svm.OneClassSVM', 'svm.OneClassSVM', ([], {'kernel': '"""rbf"""', 'gamma': 'gamma', 'nu': 'nu', 'cache_size': '(7000)'}), "(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)\n", (2106, 2157), False, 'from sklearn import svm\n'), ((6869, 6892), 'keras.regularizers.l1', 'regularizers.l1', (['(0.0001)'], {}), '(0.0001)\n', (6884, 6892), False, 'from keras import regularizers\n'), ((9115, 9144), 'numpy.ones', 'np.ones', (['y_pred_test.shape[0]'], {}), '(y_pred_test.shape[0])\n', (9122, 9144), True, 'import numpy as np\n'), ((9312, 9341), 'numpy.ones', 'np.ones', (['y_pred_test.shape[0]'], {}), '(y_pred_test.shape[0])\n', (9319, 9341), True, 'import numpy as np\n'), ((9878, 9886), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (9880, 9886), False, 'from keras.regularizers import l2\n'), ((9980, 9988), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (9982, 9988), False, 'from keras.regularizers import l2\n'), ((20306, 20335), 'numpy.ones', 'np.ones', (['y_pred_test.shape[0]'], {}), '(y_pred_test.shape[0])\n', (20313, 20335), True, 'import numpy as np\n'), ((3784, 3892), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'n_estimators': 'estimator', 'contamination': 'contamination', 'max_features': 'max_feature', 'n_jobs': '(7)'}), '(n_estimators=estimator, contamination=contamination,\n max_features=max_feature, n_jobs=7)\n', (3799, 3892), False, 'from sklearn.ensemble import IsolationForest\n'), ((5203, 5311), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'n_estimators': 'estimator', 'contamination': 'contamination', 'max_features': 'max_feature', 'n_jobs': '(7)'}), '(n_estimators=estimator, contamination=contamination,\n max_features=max_feature, n_jobs=7)\n', (5218, 5311), False, 'from sklearn.ensemble import IsolationForest\n'), ((9149, 9182), 'numpy.ones', 'np.ones', (['y_pred_outliers.shape[0]'], {}), '(y_pred_outliers.shape[0])\n', (9156, 9182), True, 'import numpy as np\n'), ((9346, 9379), 'numpy.ones', 'np.ones', (['y_pred_outliers.shape[0]'], {}), '(y_pred_outliers.shape[0])\n', (9353, 9379), True, 'import numpy as np\n'), ((20384, 20417), 'numpy.ones', 'np.ones', (['y_pred_outliers.shape[0]'], {}), '(y_pred_outliers.shape[0])\n', (20391, 20417), True, 'import numpy as np\n')]
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing Invert op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as F
import mindspore.dataset.vision.c_transforms as C
from mindspore import log as logger
from util import visualize_list, save_and_check_md5, diff_mse
DATA_DIR = "../data/dataset/testImageNetData/train/"
GENERATE_GOLDEN = False
def test_invert_py(plot=False):
"""
Test Invert python op
"""
logger.info("Test Invert Python op")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.ToTensor()])
ds_original = data_set.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512)
for idx, (image, _) in enumerate(ds_original):
if idx == 0:
images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1))
else:
images_original = np.append(images_original,
np.transpose(image.asnumpy(), (0, 2, 3, 1)),
axis=0)
# Color Inverted Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.Invert(),
F.ToTensor()])
ds_invert = data_set.map(operations=transforms_invert, input_columns="image")
ds_invert = ds_invert.batch(512)
for idx, (image, _) in enumerate(ds_invert):
if idx == 0:
images_invert = np.transpose(image.asnumpy(), (0, 2, 3, 1))
else:
images_invert = np.append(images_invert,
np.transpose(image.asnumpy(), (0, 2, 3, 1)),
axis=0)
num_samples = images_original.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = np.mean((images_invert[i] - images_original[i]) ** 2)
logger.info("MSE= {}".format(str(np.mean(mse))))
if plot:
visualize_list(images_original, images_invert)
def test_invert_c(plot=False):
"""
Test Invert Cpp op
"""
logger.info("Test Invert cpp op")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = [C.Decode(), C.Resize(size=[224, 224])]
ds_original = data_set.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512)
for idx, (image, _) in enumerate(ds_original):
if idx == 0:
images_original = image.asnumpy()
else:
images_original = np.append(images_original,
image.asnumpy(),
axis=0)
# Invert Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transform_invert = [C.Decode(), C.Resize(size=[224, 224]),
C.Invert()]
ds_invert = data_set.map(operations=transform_invert, input_columns="image")
ds_invert = ds_invert.batch(512)
for idx, (image, _) in enumerate(ds_invert):
if idx == 0:
images_invert = image.asnumpy()
else:
images_invert = np.append(images_invert,
image.asnumpy(),
axis=0)
if plot:
visualize_list(images_original, images_invert)
num_samples = images_original.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_invert[i], images_original[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
def test_invert_py_c(plot=False):
"""
Test Invert Cpp op and python op
"""
logger.info("Test Invert cpp and python op")
# Invert Images in cpp
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
ds_c_invert = data_set.map(operations=C.Invert(), input_columns="image")
ds_c_invert = ds_c_invert.batch(512)
for idx, (image, _) in enumerate(ds_c_invert):
if idx == 0:
images_c_invert = image.asnumpy()
else:
images_c_invert = np.append(images_c_invert,
image.asnumpy(),
axis=0)
# invert images in python
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
transforms_p_invert = mindspore.dataset.transforms.py_transforms.Compose([lambda img: img.astype(np.uint8),
F.ToPIL(),
F.Invert(),
np.array])
ds_p_invert = data_set.map(operations=transforms_p_invert, input_columns="image")
ds_p_invert = ds_p_invert.batch(512)
for idx, (image, _) in enumerate(ds_p_invert):
if idx == 0:
images_p_invert = image.asnumpy()
else:
images_p_invert = np.append(images_p_invert,
image.asnumpy(),
axis=0)
num_samples = images_c_invert.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_p_invert[i], images_c_invert[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
if plot:
visualize_list(images_c_invert, images_p_invert, visualize_mode=2)
def test_invert_one_channel():
"""
Test Invert cpp op with one channel image
"""
logger.info("Test Invert C Op With One Channel Images")
c_op = C.Invert()
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
data_set.map(operations=c_op, input_columns="image")
except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "The shape" in str(e)
def test_invert_md5_py():
"""
Test Invert python op with md5 check
"""
logger.info("Test Invert python op with md5 check")
# Generate dataset
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Invert(),
F.ToTensor()])
data = data_set.map(operations=transforms_invert, input_columns="image")
# Compare with expected md5 from images
filename = "invert_01_result_py.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
def test_invert_md5_c():
"""
Test Invert cpp op with md5 check
"""
logger.info("Test Invert cpp op with md5 check")
# Generate dataset
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_invert = [C.Decode(),
C.Resize(size=[224, 224]),
C.Invert(),
F.ToTensor()]
data = data_set.map(operations=transforms_invert, input_columns="image")
# Compare with expected md5 from images
filename = "invert_01_result_c.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
if __name__ == "__main__":
test_invert_py(plot=False)
test_invert_c(plot=False)
test_invert_py_c(plot=False)
test_invert_one_channel()
test_invert_md5_py()
test_invert_md5_c()
|
[
"mindspore.log.info",
"util.save_and_check_md5",
"numpy.mean",
"util.diff_mse",
"mindspore.dataset.vision.py_transforms.Resize",
"mindspore.dataset.vision.py_transforms.ToPIL",
"util.visualize_list",
"mindspore.dataset.vision.c_transforms.Resize",
"mindspore.dataset.vision.py_transforms.Invert",
"mindspore.dataset.vision.c_transforms.Invert",
"mindspore.dataset.vision.py_transforms.Decode",
"numpy.array",
"numpy.zeros",
"mindspore.dataset.ImageFolderDataset",
"mindspore.dataset.vision.c_transforms.Decode",
"mindspore.dataset.vision.py_transforms.ToTensor"
] |
[((1160, 1196), 'mindspore.log.info', 'logger.info', (['"""Test Invert Python op"""'], {}), "('Test Invert Python op')\n", (1171, 1196), True, 'from mindspore import log as logger\n'), ((1235, 1293), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (1256, 1293), True, 'import mindspore.dataset as ds\n'), ((2102, 2160), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (2123, 2160), True, 'import mindspore.dataset as ds\n'), ((3041, 3062), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (3049, 3062), True, 'import numpy as np\n'), ((3365, 3398), 'mindspore.log.info', 'logger.info', (['"""Test Invert cpp op"""'], {}), "('Test Invert cpp op')\n", (3376, 3398), True, 'from mindspore import log as logger\n'), ((3437, 3495), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (3458, 3495), True, 'import mindspore.dataset as ds\n'), ((4023, 4081), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (4044, 4081), True, 'import mindspore.dataset as ds\n'), ((4707, 4728), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (4715, 4728), True, 'import numpy as np\n'), ((4972, 5016), 'mindspore.log.info', 'logger.info', (['"""Test Invert cpp and python op"""'], {}), "('Test Invert cpp and python op')\n", (4983, 5016), True, 'from mindspore import log as logger\n'), ((5060, 5118), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (5081, 5118), True, 'import mindspore.dataset as ds\n'), ((5680, 5738), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (5701, 5738), True, 'import mindspore.dataset as ds\n'), ((6698, 6719), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (6706, 6719), True, 'import numpy as np\n'), ((7062, 7117), 'mindspore.log.info', 'logger.info', (['"""Test Invert C Op With One Channel Images"""'], {}), "('Test Invert C Op With One Channel Images')\n", (7073, 7117), True, 'from mindspore import log as logger\n'), ((7130, 7140), 'mindspore.dataset.vision.c_transforms.Invert', 'C.Invert', ([], {}), '()\n', (7138, 7140), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((7697, 7748), 'mindspore.log.info', 'logger.info', (['"""Test Invert python op with md5 check"""'], {}), "('Test Invert python op with md5 check')\n", (7708, 7748), True, 'from mindspore import log as logger\n'), ((7788, 7846), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (7809, 7846), True, 'import mindspore.dataset as ds\n'), ((8282, 8349), 'util.save_and_check_md5', 'save_and_check_md5', (['data', 'filename'], {'generate_golden': 'GENERATE_GOLDEN'}), '(data, filename, generate_golden=GENERATE_GOLDEN)\n', (8300, 8349), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((8435, 8483), 'mindspore.log.info', 'logger.info', (['"""Test Invert cpp op with md5 check"""'], {}), "('Test Invert cpp op with md5 check')\n", (8446, 8483), True, 'from mindspore import log as logger\n'), ((8523, 8581), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (8544, 8581), True, 'import mindspore.dataset as ds\n'), ((8914, 8981), 'util.save_and_check_md5', 'save_and_check_md5', (['data', 'filename'], {'generate_golden': 'GENERATE_GOLDEN'}), '(data, filename, generate_golden=GENERATE_GOLDEN)\n', (8932, 8981), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((3113, 3166), 'numpy.mean', 'np.mean', (['((images_invert[i] - images_original[i]) ** 2)'], {}), '((images_invert[i] - images_original[i]) ** 2)\n', (3120, 3166), True, 'import numpy as np\n'), ((3242, 3288), 'util.visualize_list', 'visualize_list', (['images_original', 'images_invert'], {}), '(images_original, images_invert)\n', (3256, 3288), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((3524, 3534), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (3532, 3534), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((3536, 3561), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', ([], {'size': '[224, 224]'}), '(size=[224, 224])\n', (3544, 3561), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((4107, 4117), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (4115, 4117), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((4119, 4144), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', ([], {'size': '[224, 224]'}), '(size=[224, 224])\n', (4127, 4144), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((4170, 4180), 'mindspore.dataset.vision.c_transforms.Invert', 'C.Invert', ([], {}), '()\n', (4178, 4180), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((4606, 4652), 'util.visualize_list', 'visualize_list', (['images_original', 'images_invert'], {}), '(images_original, images_invert)\n', (4620, 4652), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((4779, 4825), 'util.diff_mse', 'diff_mse', (['images_invert[i]', 'images_original[i]'], {}), '(images_invert[i], images_original[i])\n', (4787, 4825), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((6770, 6818), 'util.diff_mse', 'diff_mse', (['images_p_invert[i]', 'images_c_invert[i]'], {}), '(images_p_invert[i], images_c_invert[i])\n', (6778, 6818), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((6894, 6960), 'util.visualize_list', 'visualize_list', (['images_c_invert', 'images_p_invert'], {'visualize_mode': '(2)'}), '(images_c_invert, images_p_invert, visualize_mode=2)\n', (6908, 6960), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((7170, 7228), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (7191, 7228), True, 'import mindspore.dataset as ds\n'), ((8608, 8618), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (8616, 8618), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((8645, 8670), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', ([], {'size': '[224, 224]'}), '(size=[224, 224])\n', (8653, 8670), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((8697, 8707), 'mindspore.dataset.vision.c_transforms.Invert', 'C.Invert', ([], {}), '()\n', (8705, 8707), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((8734, 8746), 'mindspore.dataset.vision.py_transforms.ToTensor', 'F.ToTensor', ([], {}), '()\n', (8744, 8746), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((1373, 1383), 'mindspore.dataset.vision.py_transforms.Decode', 'F.Decode', ([], {}), '()\n', (1381, 1383), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((1463, 1483), 'mindspore.dataset.vision.py_transforms.Resize', 'F.Resize', (['(224, 224)'], {}), '((224, 224))\n', (1471, 1483), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((1563, 1575), 'mindspore.dataset.vision.py_transforms.ToTensor', 'F.ToTensor', ([], {}), '()\n', (1573, 1575), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((2238, 2248), 'mindspore.dataset.vision.py_transforms.Decode', 'F.Decode', ([], {}), '()\n', (2246, 2248), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((2326, 2346), 'mindspore.dataset.vision.py_transforms.Resize', 'F.Resize', (['(224, 224)'], {}), '((224, 224))\n', (2334, 2346), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((2424, 2434), 'mindspore.dataset.vision.py_transforms.Invert', 'F.Invert', ([], {}), '()\n', (2432, 2434), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((2512, 2524), 'mindspore.dataset.vision.py_transforms.ToTensor', 'F.ToTensor', ([], {}), '()\n', (2522, 2524), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((5262, 5272), 'mindspore.dataset.vision.c_transforms.Invert', 'C.Invert', ([], {}), '()\n', (5270, 5272), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((6030, 6039), 'mindspore.dataset.vision.py_transforms.ToPIL', 'F.ToPIL', ([], {}), '()\n', (6037, 6039), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((6119, 6129), 'mindspore.dataset.vision.py_transforms.Invert', 'F.Invert', ([], {}), '()\n', (6127, 6129), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((7924, 7934), 'mindspore.dataset.vision.py_transforms.Decode', 'F.Decode', ([], {}), '()\n', (7932, 7934), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((8012, 8022), 'mindspore.dataset.vision.py_transforms.Invert', 'F.Invert', ([], {}), '()\n', (8020, 8022), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((8100, 8112), 'mindspore.dataset.vision.py_transforms.ToTensor', 'F.ToTensor', ([], {}), '()\n', (8110, 8112), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((3204, 3216), 'numpy.mean', 'np.mean', (['mse'], {}), '(mse)\n', (3211, 3216), True, 'import numpy as np\n'), ((4863, 4875), 'numpy.mean', 'np.mean', (['mse'], {}), '(mse)\n', (4870, 4875), True, 'import numpy as np\n'), ((5159, 5169), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (5167, 5169), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((5171, 5191), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', (['(224, 224)'], {}), '((224, 224))\n', (5179, 5191), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((5779, 5789), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (5787, 5789), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((5791, 5811), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', (['(224, 224)'], {}), '((224, 224))\n', (5799, 5811), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((6856, 6868), 'numpy.mean', 'np.mean', (['mse'], {}), '(mse)\n', (6863, 6868), True, 'import numpy as np\n'), ((7273, 7283), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (7281, 7283), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((7285, 7305), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', (['(224, 224)'], {}), '((224, 224))\n', (7293, 7305), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((7363, 7385), 'numpy.array', 'np.array', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (7371, 7385), True, 'import numpy as np\n')]
|
import numpy
g = open('/home/srallaba/mgc/transposed/arctic_a0404.mgc','w')
x = numpy.loadtxt('/home/srallaba/mgc_spaces/arctic_a0404.mgc')
numpy.savetxt(g, numpy.transpose(x))
g.close()
|
[
"numpy.loadtxt",
"numpy.transpose"
] |
[((82, 141), 'numpy.loadtxt', 'numpy.loadtxt', (['"""/home/srallaba/mgc_spaces/arctic_a0404.mgc"""'], {}), "('/home/srallaba/mgc_spaces/arctic_a0404.mgc')\n", (95, 141), False, 'import numpy\n'), ((159, 177), 'numpy.transpose', 'numpy.transpose', (['x'], {}), '(x)\n', (174, 177), False, 'import numpy\n')]
|
"""Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
return image_data, box_data
def get_random_data2(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
image = Image.open(line[0])
w, h = image.size #13 14
dx, dy = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
x_min = w
x_max = 0
y_min = h
y_max = 0
for bbox in box:
x_min = min(x_min, bbox[0])
y_min = min(y_min, bbox[1])
x_max = max(x_max, bbox[2])
y_max = max(y_max, bbox[3])
name = bbox[4]
# 包含所有目标框的最小框到各个边的距离
d_to_left = x_min
d_to_right = w - x_max
d_to_top = y_min
d_to_bottom = h - y_max
# 随机扩展这个最小范围
crop_x_min = int(x_min - rand(0, d_to_left))
crop_y_min = int(y_min - rand(0, d_to_top))
crop_x_max = int(x_max + rand(0, d_to_right))
crop_y_max = int(y_max + rand(0, d_to_bottom))
# 确保不出界
crop_x_min = max(0, crop_x_min)
crop_y_min = max(0, crop_y_min)
crop_x_max = min(w, crop_x_max)
crop_y_max = min(h, crop_y_max)
cropped = image.crop((crop_x_min, crop_y_min, crop_x_max, crop_y_max)) # (left, upper, right, lower)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(cropped, (dx, dy))
image_data = np.array(new_image)/255.
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:,0] = box[:,0]-crop_y_min
box[:,1] = box[:,1]-crop_y_min
box[:,2] = box[:,2]-crop_x_min
box[:,3] = box[:,3]-crop_y_min
box_data[:len(box)] = box
return image_data, box_data
def get_random_data2(annotation_line, input_shape, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
line = annotation_line.split()
img = cv2.imread(line[0])
h_img, w_img, _ = img.shape
w, h = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
max_bbox = np.concatenate([np.min(box[:, 0:2], axis=0), np.max(box[:, 2:4], axis=0)], axis=-1)# 取得所有bbox中的最大bbox
#包含所有目標框的最大框到各個邊的距離
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w_img - max_bbox[2]
max_d_trans = h_img - max_bbox[3]
#隨機擴展框最大範圍
crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)*2))
crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)*2))
crop_xmax = max(w_img, int(max_bbox[2] + random.uniform(0, max_r_trans)*2))
crop_ymax = max(h_img, int(max_bbox[3] + random.uniform(0, max_d_trans)*2))
img = img[crop_ymin : crop_ymax, crop_xmin : crop_xmax] #進行裁剪
image = Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB)) #因為目前圖片格式是cv2,因此要轉換為PIL格式做貼上的語法
new_image = Image.new('RGB', (w,h), (128,128,128)) #產出一個(416,416)的灰色圖片
new_image.paste(image, (0, 0)) #將轉為PIL格式的圖片 貼到灰色圖片中
img2 = cv2.cvtColor(np.asarray(new_image),cv2.COLOR_RGB2BGR) #再將格式轉回cv2
box_data = np.zeros((max_boxes,5)) #box最多有max_boxes個,即shap->(20,5)
#將剪裁後位移的框與原始框進行相減,避免變換之後的值過大或過小,並去除異常的box
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0, 2]] = box[:, [0, 2]] - crop_xmin
box[:, [1, 3]] = box[:, [1, 3]] - crop_ymin
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
#標框線
# light_blue = (255,200,100)
# for boxs in box:
# cv2.rectangle(img2,(boxs[0],boxs[1]),(boxs[2],boxs[3]),light_blue,2)
# writename=os.path.basename(line[0]) #取檔名
# cv2.imshow('My Image', img2)
# cv2.waitKey(0)
return img2, box_data
|
[
"PIL.Image.open",
"numpy.random.rand",
"numpy.logical_and",
"PIL.Image.new",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.zeros",
"matplotlib.colors.hsv_to_rgb",
"numpy.min",
"numpy.random.shuffle"
] |
[((847, 886), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', '(128, 128, 128)'], {}), "('RGB', size, (128, 128, 128))\n", (856, 886), False, 'from PIL import Image\n'), ((1257, 1276), 'PIL.Image.open', 'Image.open', (['line[0]'], {}), '(line[0])\n', (1267, 1276), False, 'from PIL import Image\n'), ((2591, 2632), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (2600, 2632), False, 'from PIL import Image\n'), ((3169, 3182), 'matplotlib.colors.hsv_to_rgb', 'hsv_to_rgb', (['x'], {}), '(x)\n', (3179, 3182), False, 'from matplotlib.colors import rgb_to_hsv, hsv_to_rgb\n'), ((3241, 3265), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (3249, 3265), True, 'import numpy as np\n'), ((4090, 4109), 'PIL.Image.open', 'Image.open', (['line[0]'], {}), '(line[0])\n', (4100, 4109), False, 'from PIL import Image\n'), ((5109, 5150), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (5118, 5150), False, 'from PIL import Image\n'), ((5245, 5269), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (5253, 5269), True, 'import numpy as np\n'), ((6714, 6755), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (6723, 6755), False, 'from PIL import Image\n'), ((6921, 6945), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (6929, 6945), True, 'import numpy as np\n'), ((1882, 1906), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (1890, 1906), True, 'import numpy as np\n'), ((3292, 3314), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (3309, 3314), True, 'import numpy as np\n'), ((5204, 5223), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (5212, 5223), True, 'import numpy as np\n'), ((5296, 5318), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (5313, 5318), True, 'import numpy as np\n'), ((6853, 6874), 'numpy.asarray', 'np.asarray', (['new_image'], {}), '(new_image)\n', (6863, 6874), True, 'import numpy as np\n'), ((7052, 7074), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (7069, 7074), True, 'import numpy as np\n'), ((989, 1005), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1003, 1005), True, 'import numpy as np\n'), ((1704, 1745), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (1713, 1745), False, 'from PIL import Image\n'), ((1941, 1963), 'numpy.random.shuffle', 'np.random.shuffle', (['box'], {}), '(box)\n', (1958, 1963), True, 'import numpy as np\n'), ((2973, 2988), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2981, 2988), True, 'import numpy as np\n'), ((3667, 3703), 'numpy.logical_and', 'np.logical_and', (['(box_w > 1)', '(box_h > 1)'], {}), '(box_w > 1, box_h > 1)\n', (3681, 3703), True, 'import numpy as np\n'), ((5954, 5981), 'numpy.min', 'np.min', (['box[:, 0:2]'], {'axis': '(0)'}), '(box[:, 0:2], axis=0)\n', (5960, 5981), True, 'import numpy as np\n'), ((5983, 6010), 'numpy.max', 'np.max', (['box[:, 2:4]'], {'axis': '(0)'}), '(box[:, 2:4], axis=0)\n', (5989, 6010), True, 'import numpy as np\n'), ((7397, 7433), 'numpy.logical_and', 'np.logical_and', (['(box_w > 1)', '(box_h > 1)'], {}), '(box_w > 1, box_h > 1)\n', (7411, 7433), True, 'import numpy as np\n'), ((1813, 1832), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (1821, 1832), True, 'import numpy as np\n')]
|
"""
A class hierarchy relating to fields of all kinds.
"""
from __future__ import print_function, division
import numpy as np
from ciabatta.meta import make_repr_str
from fealty import lattice, field_numerics, walled_field_numerics
class Space(object):
def __init__(self, L, dim):
self.L = L
self.dim = dim
@property
def L_half(self):
return self.L / 2.0
@property
def A(self):
return self.L ** self.dim
def iterate(self, *args, **kwargs):
pass
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim)]
return make_repr_str(self, fs)
class Field(Space):
def __init__(self, L, dim, dx):
Space.__init__(self, L, dim)
self.M = int(round(self.L / dx))
@property
def dx(self):
return self.L / self.M
@property
def A_i(self):
return self.M ** self.dim
@property
def dA(self):
return self.dx ** self.dim
def density_field(self, r):
return density(r, self.L, self.dx)
def r_to_i(self, r):
return lattice.r_to_i(r, self.L, self.dx)
def i_to_r(self, i):
return lattice.i_to_r(i, self.L, self.dx)
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx)]
return make_repr_str(self, fs)
class Scalar(Field):
def __init__(self, L, dim, dx, a_0=0.0):
Field.__init__(self, L, dim, dx)
self.a = np.ones(self.dim * (self.M,), dtype=np.float) * a_0
def grad(self):
return _grad(self.a, self.dx)
def grad_i(self, r):
return _grad_i(self.a, self.r_to_i(r), self.dx)
def laplacian(self):
return _laplace(self.a, self.dx)
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx),
('a_0', self.a_0)]
return make_repr_str(self, fs)
class Diffusing(Scalar):
def __init__(self, L, dim, dx, D, dt, a_0=0.0):
Scalar.__init__(self, L, dim, dx, a_0=a_0)
self.D = D
self.dt = dt
if self.D > self.dx ** 2 / (2.0 * self.dim * self.dt):
raise Exception('Unstable diffusion constant')
def iterate(self):
self.a += self.D * self.laplacian() * self.dt
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx),
('D', self.D), ('dt', self.dt), ('a_0', self.a_0)]
return make_repr_str(self, fs)
class WalledScalar(Scalar):
def __init__(self, L, dim, dx, walls, a_0=0.0):
Scalar.__init__(self, L, dim, dx, a_0=a_0)
self.walls = walls
# Make field zero-valued where obstructed
self.a *= np.logical_not(self.walls)
def grad(self):
return _walled_grad(self.a, self.dx, self.walls)
def grad_i(self, r):
return _walled_grad_i(self.a, self.r_to_i(r), self.dx,
self.walls)
def laplacian(self):
return _walled_laplace(self.a, self.dx, self.walls)
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx),
('walls', self.walls), ('a_0', self.a_0)]
return make_repr_str(self, fs)
# Note, inheritance order matters to get walled grad & laplacian call
# (see diamond problem on wikipedia and how python handles it)
class WalledDiffusing(WalledScalar, Diffusing):
def __init__(self, L, dim, dx, walls, D, dt, a_0=0.0):
Diffusing.__init__(self, L, dim, dx, D, dt, a_0=a_0)
WalledScalar.__init__(self, L, dim, dx, walls, a_0=a_0)
def __repr__(self):
fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx),
('walls', self.walls), ('D', self.D), ('dt', self.dt),
('a_0', self.a_0)]
return make_repr_str(self, fs)
def density(r, L, dx):
assert r.ndim == 2
M = int(round(L / dx))
dx = L / M
inds = lattice.r_to_i(r, L, dx)
f = np.zeros(r.shape[1] * (M,), dtype=np.int)
if f.ndim == 1:
field_numerics.density_1d(inds, f)
elif f.ndim == 2:
field_numerics.density_2d(inds, f)
elif f.ndim == 3:
field_numerics.density_3d(inds, f)
else:
raise Exception('Density calc not implemented in this dimension')
return f / dx ** r.shape[1]
def _laplace(field, dx):
assert dx > 0.0
laplace = np.empty_like(field)
if field.ndim == 1:
field_numerics.laplace_1d(field, laplace, dx)
elif field.ndim == 2:
field_numerics.laplace_2d(field, laplace, dx)
elif field.ndim == 3:
field_numerics.laplace_3d(field, laplace, dx)
else:
raise Exception('Laplacian not implemented in this dimension')
return laplace
def _grad_i(field, inds, dx):
assert dx > 0.0
assert inds.ndim == 2
assert field.ndim == inds.shape[1]
grad_i = np.empty(inds.shape, dtype=field.dtype)
if field.ndim == 1:
field_numerics.grad_i_1d(field, inds, grad_i, dx)
elif field.ndim == 2:
field_numerics.grad_i_2d(field, inds, grad_i, dx)
elif field.ndim == 3:
field_numerics.grad_i_3d(field, grad_i, dx)
else:
raise Exception("Grad_i not implemented in this dimension")
return grad_i
def _grad(field, dx):
assert dx > 0.0
grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype)
if field.ndim == 1:
field_numerics.grad_1d(field, grad, dx)
elif field.ndim == 2:
field_numerics.grad_2d(field, grad, dx)
elif field.ndim == 3:
field_numerics.grad_3d(field, grad, dx)
else:
raise Exception('Grad not implemented in this dimension')
return grad
def _div(field, dx):
assert dx > 0.0
div = np.empty(field.shape[:-1], dtype=field.dtype)
if field.ndim == 2:
field_numerics.div_1d(field, div, dx)
elif field.ndim == 3:
field_numerics.div_2d(field, div, dx)
elif field.ndim == 4:
field_numerics.div_3d(field, div, dx)
else:
raise Exception('Divergence not implemented in this dimension')
return div
def _walled_grad(field, dx, walls):
assert field.shape == walls.shape
assert dx > 0.0
grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype)
if field.ndim == 1:
walled_field_numerics.grad_1d(field, grad, dx, walls)
elif field.ndim == 2:
walled_field_numerics.grad_2d(field, grad, dx, walls)
elif field.ndim == 3:
walled_field_numerics.grad_3d(field, grad, dx, walls)
else:
raise Exception("Walled grad not implemented in this dimension")
return grad
def _walled_grad_i(field, inds, dx, walls):
assert field.shape == walls.shape
assert dx > 0.0
assert inds.ndim == 2
assert field.ndim == inds.shape[1]
grad_i = np.empty(inds.shape, dtype=field.dtype)
if field.ndim == 1:
walled_field_numerics.grad_i_1d(field, inds, grad_i, dx, walls)
elif field.ndim == 2:
walled_field_numerics.grad_i_2d(field, inds, grad_i, dx, walls)
elif field.ndim == 3:
walled_field_numerics.grad_i_3d(field, inds, grad_i, dx, walls)
else:
raise Exception("Walled Grad_i not implemented in this dimension")
return grad_i
def _walled_laplace(field, dx, walls):
assert field.shape == walls.shape
assert dx > 0.0
laplace = np.empty_like(field)
if field.ndim == 1:
walled_field_numerics.laplace_1d(field, laplace, dx, walls)
elif field.ndim == 2:
walled_field_numerics.laplace_2d(field, laplace, dx, walls)
elif field.ndim == 3:
walled_field_numerics.laplace_3d(field, laplace, dx, walls)
else:
raise Exception('Laplacian not implemented in this dimension')
return laplace
|
[
"numpy.logical_not",
"fealty.field_numerics.density_3d",
"fealty.walled_field_numerics.grad_3d",
"fealty.field_numerics.div_1d",
"fealty.lattice.i_to_r",
"ciabatta.meta.make_repr_str",
"fealty.walled_field_numerics.grad_i_1d",
"fealty.field_numerics.grad_i_1d",
"numpy.empty",
"fealty.walled_field_numerics.laplace_2d",
"fealty.field_numerics.laplace_2d",
"fealty.field_numerics.div_2d",
"fealty.field_numerics.grad_3d",
"numpy.ones",
"fealty.walled_field_numerics.grad_i_2d",
"fealty.field_numerics.laplace_1d",
"fealty.field_numerics.density_2d",
"fealty.field_numerics.div_3d",
"fealty.walled_field_numerics.grad_2d",
"fealty.field_numerics.grad_2d",
"fealty.lattice.r_to_i",
"fealty.walled_field_numerics.grad_1d",
"fealty.field_numerics.grad_i_3d",
"fealty.field_numerics.density_1d",
"fealty.walled_field_numerics.grad_i_3d",
"fealty.walled_field_numerics.laplace_3d",
"fealty.field_numerics.grad_1d",
"fealty.field_numerics.grad_i_2d",
"numpy.zeros",
"numpy.empty_like",
"fealty.field_numerics.laplace_3d",
"fealty.walled_field_numerics.laplace_1d"
] |
[((3872, 3896), 'fealty.lattice.r_to_i', 'lattice.r_to_i', (['r', 'L', 'dx'], {}), '(r, L, dx)\n', (3886, 3896), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((3905, 3946), 'numpy.zeros', 'np.zeros', (['(r.shape[1] * (M,))'], {'dtype': 'np.int'}), '(r.shape[1] * (M,), dtype=np.int)\n', (3913, 3946), True, 'import numpy as np\n'), ((4317, 4337), 'numpy.empty_like', 'np.empty_like', (['field'], {}), '(field)\n', (4330, 4337), True, 'import numpy as np\n'), ((4806, 4845), 'numpy.empty', 'np.empty', (['inds.shape'], {'dtype': 'field.dtype'}), '(inds.shape, dtype=field.dtype)\n', (4814, 4845), True, 'import numpy as np\n'), ((5241, 5297), 'numpy.empty', 'np.empty', (['(field.shape + (field.ndim,))'], {'dtype': 'field.dtype'}), '(field.shape + (field.ndim,), dtype=field.dtype)\n', (5249, 5297), True, 'import numpy as np\n'), ((5663, 5708), 'numpy.empty', 'np.empty', (['field.shape[:-1]'], {'dtype': 'field.dtype'}), '(field.shape[:-1], dtype=field.dtype)\n', (5671, 5708), True, 'import numpy as np\n'), ((6127, 6183), 'numpy.empty', 'np.empty', (['(field.shape + (field.ndim,))'], {'dtype': 'field.dtype'}), '(field.shape + (field.ndim,), dtype=field.dtype)\n', (6135, 6183), True, 'import numpy as np\n'), ((6727, 6766), 'numpy.empty', 'np.empty', (['inds.shape'], {'dtype': 'field.dtype'}), '(inds.shape, dtype=field.dtype)\n', (6735, 6766), True, 'import numpy as np\n'), ((7275, 7295), 'numpy.empty_like', 'np.empty_like', (['field'], {}), '(field)\n', (7288, 7295), True, 'import numpy as np\n'), ((603, 626), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (616, 626), False, 'from ciabatta.meta import make_repr_str\n'), ((1081, 1115), 'fealty.lattice.r_to_i', 'lattice.r_to_i', (['r', 'self.L', 'self.dx'], {}), '(r, self.L, self.dx)\n', (1095, 1115), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((1157, 1191), 'fealty.lattice.i_to_r', 'lattice.i_to_r', (['i', 'self.L', 'self.dx'], {}), '(i, self.L, self.dx)\n', (1171, 1191), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((1297, 1320), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (1310, 1320), False, 'from ciabatta.meta import make_repr_str\n'), ((1846, 1869), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (1859, 1869), False, 'from ciabatta.meta import make_repr_str\n'), ((2412, 2435), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (2425, 2435), False, 'from ciabatta.meta import make_repr_str\n'), ((2665, 2691), 'numpy.logical_not', 'np.logical_not', (['self.walls'], {}), '(self.walls)\n', (2679, 2691), True, 'import numpy as np\n'), ((3148, 3171), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (3161, 3171), False, 'from ciabatta.meta import make_repr_str\n'), ((3747, 3770), 'ciabatta.meta.make_repr_str', 'make_repr_str', (['self', 'fs'], {}), '(self, fs)\n', (3760, 3770), False, 'from ciabatta.meta import make_repr_str\n'), ((3975, 4009), 'fealty.field_numerics.density_1d', 'field_numerics.density_1d', (['inds', 'f'], {}), '(inds, f)\n', (4000, 4009), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4370, 4415), 'fealty.field_numerics.laplace_1d', 'field_numerics.laplace_1d', (['field', 'laplace', 'dx'], {}), '(field, laplace, dx)\n', (4395, 4415), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4878, 4927), 'fealty.field_numerics.grad_i_1d', 'field_numerics.grad_i_1d', (['field', 'inds', 'grad_i', 'dx'], {}), '(field, inds, grad_i, dx)\n', (4902, 4927), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5330, 5369), 'fealty.field_numerics.grad_1d', 'field_numerics.grad_1d', (['field', 'grad', 'dx'], {}), '(field, grad, dx)\n', (5352, 5369), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5741, 5778), 'fealty.field_numerics.div_1d', 'field_numerics.div_1d', (['field', 'div', 'dx'], {}), '(field, div, dx)\n', (5762, 5778), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6216, 6269), 'fealty.walled_field_numerics.grad_1d', 'walled_field_numerics.grad_1d', (['field', 'grad', 'dx', 'walls'], {}), '(field, grad, dx, walls)\n', (6245, 6269), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6799, 6862), 'fealty.walled_field_numerics.grad_i_1d', 'walled_field_numerics.grad_i_1d', (['field', 'inds', 'grad_i', 'dx', 'walls'], {}), '(field, inds, grad_i, dx, walls)\n', (6830, 6862), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((7328, 7387), 'fealty.walled_field_numerics.laplace_1d', 'walled_field_numerics.laplace_1d', (['field', 'laplace', 'dx', 'walls'], {}), '(field, laplace, dx, walls)\n', (7360, 7387), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((1448, 1493), 'numpy.ones', 'np.ones', (['(self.dim * (self.M,))'], {'dtype': 'np.float'}), '(self.dim * (self.M,), dtype=np.float)\n', (1455, 1493), True, 'import numpy as np\n'), ((4040, 4074), 'fealty.field_numerics.density_2d', 'field_numerics.density_2d', (['inds', 'f'], {}), '(inds, f)\n', (4065, 4074), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4450, 4495), 'fealty.field_numerics.laplace_2d', 'field_numerics.laplace_2d', (['field', 'laplace', 'dx'], {}), '(field, laplace, dx)\n', (4475, 4495), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4962, 5011), 'fealty.field_numerics.grad_i_2d', 'field_numerics.grad_i_2d', (['field', 'inds', 'grad_i', 'dx'], {}), '(field, inds, grad_i, dx)\n', (4986, 5011), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5404, 5443), 'fealty.field_numerics.grad_2d', 'field_numerics.grad_2d', (['field', 'grad', 'dx'], {}), '(field, grad, dx)\n', (5426, 5443), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5813, 5850), 'fealty.field_numerics.div_2d', 'field_numerics.div_2d', (['field', 'div', 'dx'], {}), '(field, div, dx)\n', (5834, 5850), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6304, 6357), 'fealty.walled_field_numerics.grad_2d', 'walled_field_numerics.grad_2d', (['field', 'grad', 'dx', 'walls'], {}), '(field, grad, dx, walls)\n', (6333, 6357), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6897, 6960), 'fealty.walled_field_numerics.grad_i_2d', 'walled_field_numerics.grad_i_2d', (['field', 'inds', 'grad_i', 'dx', 'walls'], {}), '(field, inds, grad_i, dx, walls)\n', (6928, 6960), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((7422, 7481), 'fealty.walled_field_numerics.laplace_2d', 'walled_field_numerics.laplace_2d', (['field', 'laplace', 'dx', 'walls'], {}), '(field, laplace, dx, walls)\n', (7454, 7481), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4105, 4139), 'fealty.field_numerics.density_3d', 'field_numerics.density_3d', (['inds', 'f'], {}), '(inds, f)\n', (4130, 4139), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((4530, 4575), 'fealty.field_numerics.laplace_3d', 'field_numerics.laplace_3d', (['field', 'laplace', 'dx'], {}), '(field, laplace, dx)\n', (4555, 4575), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5046, 5089), 'fealty.field_numerics.grad_i_3d', 'field_numerics.grad_i_3d', (['field', 'grad_i', 'dx'], {}), '(field, grad_i, dx)\n', (5070, 5089), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5478, 5517), 'fealty.field_numerics.grad_3d', 'field_numerics.grad_3d', (['field', 'grad', 'dx'], {}), '(field, grad, dx)\n', (5500, 5517), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((5885, 5922), 'fealty.field_numerics.div_3d', 'field_numerics.div_3d', (['field', 'div', 'dx'], {}), '(field, div, dx)\n', (5906, 5922), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6392, 6445), 'fealty.walled_field_numerics.grad_3d', 'walled_field_numerics.grad_3d', (['field', 'grad', 'dx', 'walls'], {}), '(field, grad, dx, walls)\n', (6421, 6445), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((6995, 7058), 'fealty.walled_field_numerics.grad_i_3d', 'walled_field_numerics.grad_i_3d', (['field', 'inds', 'grad_i', 'dx', 'walls'], {}), '(field, inds, grad_i, dx, walls)\n', (7026, 7058), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n'), ((7516, 7575), 'fealty.walled_field_numerics.laplace_3d', 'walled_field_numerics.laplace_3d', (['field', 'laplace', 'dx', 'walls'], {}), '(field, laplace, dx, walls)\n', (7548, 7575), False, 'from fealty import lattice, field_numerics, walled_field_numerics\n')]
|
# -*- coding: future_fstrings -*-
#
# Copyright 2019 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import sys, os, json
import numpy as np
from ctypes import *
from casadi import CasadiMeta, Function, SX
from copy import deepcopy
from .generate_c_code_explicit_ode import generate_c_code_explicit_ode
from .generate_c_code_implicit_ode import generate_c_code_implicit_ode
from .generate_c_code_gnsf import generate_c_code_gnsf
from .generate_c_code_constraint import generate_c_code_constraint
from .generate_c_code_nls_cost import generate_c_code_nls_cost
from .generate_c_code_external_cost import generate_c_code_external_cost
from .acados_ocp import AcadosOcp
from .acados_model import acados_model_strip_casadi_symbolics
from .utils import is_column, is_empty, casadi_length, render_template, acados_class2dict,\
format_class_dict, ocp_check_against_layout, np_array_to_list, make_model_consistent,\
set_up_imported_gnsf_model
def make_ocp_dims_consistent(acados_ocp):
dims = acados_ocp.dims
cost = acados_ocp.cost
constraints = acados_ocp.constraints
model = acados_ocp.model
opts = acados_ocp.solver_options
# nx
if is_column(model.x):
dims.nx = casadi_length(model.x)
else:
raise Exception('model.x should be column vector!')
# nu
if is_empty(model.u):
dims.nu = 0
else:
dims.nu = casadi_length(model.u)
# nz
if is_empty(model.z):
dims.nz = 0
else:
dims.nz = casadi_length(model.z)
# np
if is_empty(model.p):
dims.np = 0
else:
dims.np = casadi_length(model.p)
if acados_ocp.parameter_values.shape[0] != dims.np:
raise Exception('inconsistent dimension np, regarding model.p and parameter_values.')
## cost
# path
if cost.cost_type == 'LINEAR_LS':
ny = cost.W.shape[0]
if cost.Vx.shape[0] != ny or cost.Vu.shape[0] != ny:
raise Exception('inconsistent dimension ny, regarding W, Vx, Vu.' + \
f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}]\n')
if dims.nz != 0 and cost.Vz.shape[0] != ny:
raise Exception('inconsistent dimension ny, regarding W, Vx, Vu, Vz.' + \
f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}], Vz[{cost.Vz.shape}]\n')
if cost.Vx.shape[1] != dims.nx and ny != 0:
raise Exception('inconsistent dimension: Vx should have nx columns.')
if cost.Vu.shape[1] != dims.nu and ny != 0:
raise Exception('inconsistent dimension: Vu should have nu columns.')
if cost.yref.shape[0] != ny:
raise Exception('inconsistent dimension: regarding W, yref.' + \
f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n')
dims.ny = ny
elif cost.cost_type == 'NONLINEAR_LS':
ny = cost.W.shape[0]
if is_empty(model.cost_y_expr) and ny != 0:
raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.')
elif casadi_length(model.cost_y_expr) != ny:
raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.')
if cost.yref.shape[0] != ny:
raise Exception('inconsistent dimension: regarding W, yref.' + \
f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n')
dims.ny = ny
# terminal
if cost.cost_type_e == 'LINEAR_LS':
ny_e = cost.W_e.shape[0]
if cost.Vx_e.shape[0] != ny_e:
raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.' + \
f'\nGot W_e[{cost.W_e.shape}], Vx_e[{cost.Vx_e.shape}]')
if cost.Vx_e.shape[1] != dims.nx and ny_e != 0:
raise Exception('inconsistent dimension: Vx_e should have nx columns.')
if cost.yref_e.shape[0] != ny_e:
raise Exception('inconsistent dimension: regarding W_e, yref_e.')
dims.ny_e = ny_e
elif cost.cost_type_e == 'NONLINEAR_LS':
ny_e = cost.W_e.shape[0]
if is_empty(model.cost_y_expr_e) and ny_e != 0:
raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.')
elif casadi_length(model.cost_y_expr_e) != ny_e:
raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.')
if cost.yref_e.shape[0] != ny_e:
raise Exception('inconsistent dimension: regarding W_e, yref_e.')
dims.ny_e = ny_e
## constraints
# initial
if (constraints.lbx_0 == [] and constraints.ubx_0 == []):
dims.nbx_0 = 0
else:
this_shape = constraints.lbx_0.shape
other_shape = constraints.ubx_0.shape
if not this_shape == other_shape:
raise Exception('lbx_0, ubx_0 have different shapes!')
if not is_column(constraints.lbx_0):
raise Exception('lbx_0, ubx_0 must be column vectors!')
dims.nbx_0 = constraints.lbx_0.size
if all(constraints.lbx_0 == constraints.ubx_0):
dims.nbxe_0 = dims.nbx_0
# path
nbx = constraints.idxbx.shape[0]
if constraints.ubx.shape[0] != nbx or constraints.lbx.shape[0] != nbx:
raise Exception('inconsistent dimension nbx, regarding idxbx, ubx, lbx.')
else:
dims.nbx = nbx
nbu = constraints.idxbu.shape[0]
if constraints.ubu.shape[0] != nbu or constraints.lbu.shape[0] != nbu:
raise Exception('inconsistent dimension nbu, regarding idxbu, ubu, lbu.')
else:
dims.nbu = nbu
ng = constraints.lg.shape[0]
if constraints.ug.shape[0] != ng or constraints.C.shape[0] != ng \
or constraints.D.shape[0] != ng:
raise Exception('inconsistent dimension ng, regarding lg, ug, C, D.')
else:
dims.ng = ng
if not is_empty(model.con_h_expr):
nh = casadi_length(model.con_h_expr)
else:
nh = 0
if constraints.uh.shape[0] != nh or constraints.lh.shape[0] != nh:
raise Exception('inconsistent dimension nh, regarding lh, uh, con_h_expr.')
else:
dims.nh = nh
if is_empty(model.con_phi_expr):
dims.nphi = 0
dims.nr = 0
else:
dims.nphi = casadi_length(model.con_phi_expr)
if is_empty(model.con_r_expr):
raise Exception('convex over nonlinear constraints: con_r_expr but con_phi_expr is nonempty')
else:
dims.nr = casadi_length(model.con_r_expr)
# terminal
nbx_e = constraints.idxbx_e.shape[0]
if constraints.ubx_e.shape[0] != nbx_e or constraints.lbx_e.shape[0] != nbx_e:
raise Exception('inconsistent dimension nbx_e, regarding idxbx_e, ubx_e, lbx_e.')
else:
dims.nbx_e = nbx_e
ng_e = constraints.lg_e.shape[0]
if constraints.ug_e.shape[0] != ng_e or constraints.C_e.shape[0] != ng_e:
raise Exception('inconsistent dimension ng_e, regarding_e lg_e, ug_e, C_e.')
else:
dims.ng_e = ng_e
if not is_empty(model.con_h_expr_e):
nh_e = casadi_length(model.con_h_expr_e)
else:
nh_e = 0
if constraints.uh_e.shape[0] != nh_e or constraints.lh_e.shape[0] != nh_e:
raise Exception('inconsistent dimension nh_e, regarding lh_e, uh_e, con_h_expr_e.')
else:
dims.nh_e = nh_e
if is_empty(model.con_phi_expr_e):
dims.nphi_e = 0
dims.nr_e = 0
else:
dims.nphi_e = casadi_length(model.con_phi_expr_e)
if is_empty(model.con_r_expr_e):
raise Exception('convex over nonlinear constraints: con_r_expr_e but con_phi_expr_e is nonempty')
else:
dims.nr_e = casadi_length(model.con_r_expr_e)
# Slack dimensions
nsbx = constraints.idxsbx.shape[0]
if is_empty(constraints.lsbx):
constraints.lsbx = np.zeros((nsbx,))
elif constraints.lsbx.shape[0] != nsbx:
raise Exception('inconsistent dimension nsbx, regarding idxsbx, lsbx.')
if is_empty(constraints.usbx):
constraints.usbx = np.zeros((nsbx,))
elif constraints.usbx.shape[0] != nsbx:
raise Exception('inconsistent dimension nsbx, regarding idxsbx, usbx.')
dims.nsbx = nsbx
nsbu = constraints.idxsbu.shape[0]
if is_empty(constraints.lsbu):
constraints.lsbu = np.zeros((nsbu,))
elif constraints.lsbu.shape[0] != nsbu:
raise Exception('inconsistent dimension nsbu, regarding idxsbu, lsbu.')
if is_empty(constraints.usbu):
constraints.usbu = np.zeros((nsbu,))
elif constraints.usbu.shape[0] != nsbu:
raise Exception('inconsistent dimension nsbu, regarding idxsbu, usbu.')
dims.nsbu = nsbu
nsh = constraints.idxsh.shape[0]
if is_empty(constraints.lsh):
constraints.lsh = np.zeros((nsh,))
elif constraints.lsh.shape[0] != nsh:
raise Exception('inconsistent dimension nsh, regarding idxsh, lsh.')
if is_empty(constraints.ush):
constraints.ush = np.zeros((nsh,))
elif constraints.ush.shape[0] != nsh:
raise Exception('inconsistent dimension nsh, regarding idxsh, ush.')
dims.nsh = nsh
nsphi = constraints.idxsphi.shape[0]
if is_empty(constraints.lsphi):
constraints.lsphi = np.zeros((nsphi,))
elif constraints.lsphi.shape[0] != nsphi:
raise Exception('inconsistent dimension nsphi, regarding idxsphi, lsphi.')
if is_empty(constraints.usphi):
constraints.usphi = np.zeros((nsphi,))
elif constraints.usphi.shape[0] != nsphi:
raise Exception('inconsistent dimension nsphi, regarding idxsphi, usphi.')
dims.nsphi = nsphi
nsg = constraints.idxsg.shape[0]
if is_empty(constraints.lsg):
constraints.lsg = np.zeros((nsg,))
elif constraints.lsg.shape[0] != nsg:
raise Exception('inconsistent dimension nsg, regarding idxsg, lsg.')
if is_empty(constraints.usg):
constraints.usg = np.zeros((nsg,))
elif constraints.usg.shape[0] != nsg:
raise Exception('inconsistent dimension nsg, regarding idxsg, usg.')
dims.nsg = nsg
ns = nsbx + nsbu + nsh + nsg + nsphi
wrong_field = ""
if cost.Zl.shape[0] != ns:
wrong_field = "Zl"
dim = cost.Zl.shape[0]
elif cost.Zu.shape[0] != ns:
wrong_field = "Zu"
dim = cost.Zu.shape[0]
elif cost.zl.shape[0] != ns:
wrong_field = "zl"
dim = cost.zl.shape[0]
elif cost.zu.shape[0] != ns:
wrong_field = "zu"
dim = cost.zu.shape[0]
if wrong_field != "":
raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\
+ f'Detected ns = {ns} = nsbx + nsbu + nsg + nsh + nsphi.\n\t'\
+ f'With nsbx = {nsbx}, nsbu = {nsbu}, nsg = {nsg}, nsh = {nsh}, nsphi = {nsphi}')
dims.ns = ns
nsbx_e = constraints.idxsbx_e.shape[0]
if is_empty(constraints.lsbx_e):
constraints.lsbx_e = np.zeros((nsbx_e,))
elif constraints.lsbx_e.shape[0] != nsbx_e:
raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, lsbx_e.')
if is_empty(constraints.usbx_e):
constraints.usbx_e = np.zeros((nsbx_e,))
elif constraints.usbx_e.shape[0] != nsbx_e:
raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, usbx_e.')
dims.nsbx_e = nsbx_e
nsh_e = constraints.idxsh_e.shape[0]
if is_empty(constraints.lsh_e):
constraints.lsh_e = np.zeros((nsh_e,))
elif constraints.lsh_e.shape[0] != nsh_e:
raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, lsh_e.')
if is_empty(constraints.ush_e):
constraints.ush_e = np.zeros((nsh_e,))
elif constraints.ush_e.shape[0] != nsh_e:
raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, ush_e.')
dims.nsh_e = nsh_e
nsg_e = constraints.idxsg_e.shape[0]
if is_empty(constraints.lsg_e):
constraints.lsg_e = np.zeros((nsg_e,))
elif constraints.lsg_e.shape[0] != nsg_e:
raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, lsg_e.')
if is_empty(constraints.usg_e):
constraints.usg_e = np.zeros((nsg_e,))
elif constraints.usg_e.shape[0] != nsg_e:
raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, usg_e.')
dims.nsg_e = nsg_e
nsphi_e = constraints.idxsphi_e.shape[0]
if is_empty(constraints.lsphi_e):
constraints.lsphi_e = np.zeros((nsphi_e,))
elif constraints.lsphi_e.shape[0] != nsphi_e:
raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, lsphi_e.')
if is_empty(constraints.usphi_e):
constraints.usphi_e = np.zeros((nsphi_e,))
elif constraints.usphi_e.shape[0] != nsphi_e:
raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, usphi_e.')
dims.nsphi_e = nsphi_e
# terminal
ns_e = nsbx_e + nsh_e + nsg_e + nsphi_e
wrong_field = ""
if cost.Zl_e.shape[0] != ns_e:
wrong_field = "Zl_e"
dim = cost.Zl_e.shape[0]
elif cost.Zu_e.shape[0] != ns_e:
wrong_field = "Zu_e"
dim = cost.Zu_e.shape[0]
elif cost.zl_e.shape[0] != ns_e:
wrong_field = "zl_e"
dim = cost.zl_e.shape[0]
elif cost.zu_e.shape[0] != ns_e:
wrong_field = "zu_e"
dim = cost.zu_e.shape[0]
if wrong_field != "":
raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\
+ f'Detected ns_e = {ns_e} = nsbx_e + nsg_e + nsh_e + nsphi_e.\n\t'\
+ f'With nsbx_e = {nsbx_e}, nsg_e = {nsg_e}, nsh_e = {nsh_e}, nsphi_e = {nsphi_e}')
dims.ns_e = ns_e
# discretization
if is_empty(opts.time_steps) and is_empty(opts.shooting_nodes):
# uniform discretization
opts.time_steps = opts.tf / dims.N * np.ones((dims.N,))
elif not is_empty(opts.shooting_nodes):
if np.shape(opts.shooting_nodes)[0] != dims.N+1:
raise Exception('inconsistent dimension N, regarding shooting_nodes.')
time_steps = np.zeros((dims.N,))
for i in range(dims.N):
time_steps[i] = opts.shooting_nodes[i+1] - opts.shooting_nodes[i]
opts.time_steps = time_steps
elif (not is_empty(opts.time_steps)) and (not is_empty(opts.shooting_nodes)):
Exception('Please provide either time_steps or shooting_nodes for nonuniform discretization')
tf = np.sum(opts.time_steps)
if (tf - opts.tf) / tf > 1e-15:
raise Exception(f'Inconsistent discretization: {opts.tf}'\
f' = tf != sum(opts.time_steps) = {tf}.')
def get_ocp_nlp_layout():
current_module = sys.modules[__name__]
acados_path = os.path.dirname(current_module.__file__)
with open(acados_path + '/acados_layout.json', 'r') as f:
ocp_nlp_layout = json.load(f)
return ocp_nlp_layout
def ocp_formulation_json_dump(acados_ocp, json_file='acados_ocp_nlp.json'):
# Load acados_ocp_nlp structure description
ocp_layout = get_ocp_nlp_layout()
# Copy input ocp object dictionary
ocp_nlp_dict = dict(deepcopy(acados_ocp).__dict__)
# TODO: maybe make one funciton with formatting
for acados_struct, v in ocp_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
# setattr(ocp_nlp, acados_struct, dict(getattr(acados_ocp, acados_struct).__dict__))
# Copy ocp object attributes dictionaries
ocp_nlp_dict[acados_struct]=dict(getattr(acados_ocp, acados_struct).__dict__)
ocp_nlp_dict = format_class_dict(ocp_nlp_dict)
# strip symbolics
ocp_nlp_dict['model'] = acados_model_strip_casadi_symbolics(ocp_nlp_dict['model'])
# strip shooting_nodes
ocp_nlp_dict['solver_options'].pop('shooting_nodes', None)
dims_dict = acados_class2dict(acados_ocp.dims)
ocp_check_against_layout(ocp_nlp_dict, dims_dict)
with open(json_file, 'w') as f:
json.dump(ocp_nlp_dict, f, default=np_array_to_list, indent=4, sort_keys=True)
def ocp_formulation_json_load(json_file='acados_ocp_nlp.json'):
# Load acados_ocp_nlp structure description
ocp_layout = get_ocp_nlp_layout()
with open(json_file, 'r') as f:
ocp_nlp_json = json.load(f)
ocp_nlp_dict = json2dict(ocp_nlp_json, ocp_nlp_json['dims'])
# Instantiate AcadosOcp object
acados_ocp = AcadosOcp()
# load class dict
acados_ocp.__dict__ = ocp_nlp_dict
# laod class attributes dict, dims, constraints, etc
for acados_struct, v in ocp_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
acados_attribute = getattr(acados_ocp, acados_struct)
acados_attribute.__dict__ = ocp_nlp_dict[acados_struct]
setattr(acados_ocp, acados_struct, acados_attribute)
return acados_ocp
def ocp_generate_external_functions(acados_ocp, model):
model = make_model_consistent(model)
if acados_ocp.solver_options.integrator_type == 'ERK':
# explicit model -- generate C code
generate_c_code_explicit_ode(model)
elif acados_ocp.solver_options.integrator_type == 'IRK':
# implicit model -- generate C code
opts = dict(generate_hess=1)
generate_c_code_implicit_ode(model, opts)
elif acados_ocp.solver_options.integrator_type == 'GNSF':
generate_c_code_gnsf(model)
else:
raise Exception("ocp_generate_external_functions: unknown integrator type.")
if acados_ocp.solver_options.hessian_approx == 'EXACT':
opts = dict(generate_hess=1)
else:
opts = dict(generate_hess=0)
if acados_ocp.dims.nphi > 0 or acados_ocp.dims.nh > 0:
generate_c_code_constraint(model, model.name, False, opts)
if acados_ocp.dims.nphi_e > 0 or acados_ocp.dims.nh_e > 0:
generate_c_code_constraint(model, model.name, True, opts)
# dummy matrices
if not acados_ocp.cost.cost_type == 'LINEAR_LS':
acados_ocp.cost.Vx = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nx))
acados_ocp.cost.Vu = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nu))
if not acados_ocp.cost.cost_type_e == 'LINEAR_LS':
acados_ocp.cost.Vx_e = np.zeros((acados_ocp.dims.ny_e, acados_ocp.dims.nx))
if acados_ocp.cost.cost_type == 'NONLINEAR_LS':
generate_c_code_nls_cost(model, model.name, False)
elif acados_ocp.cost.cost_type == 'EXTERNAL':
generate_c_code_external_cost(model, False)
if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS':
generate_c_code_nls_cost(model, model.name, True)
elif acados_ocp.cost.cost_type_e == 'EXTERNAL':
generate_c_code_external_cost(model, True)
def ocp_render_templates(acados_ocp, json_file):
name = acados_ocp.model.name
# setting up loader and environment
json_path = '{cwd}/{json_file}'.format(
cwd=os.getcwd(),
json_file=json_file)
if not os.path.exists(json_path):
raise Exception('{} not found!'.format(json_path))
template_dir = 'c_generated_code/'
## Render templates
in_file = 'main.in.c'
out_file = 'main_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver.in.c'
out_file = 'acados_solver_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver.in.h'
out_file = 'acados_solver_{}.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'Makefile.in'
out_file = 'Makefile'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver_sfun.in.c'
out_file = 'acados_solver_sfunction_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'make_sfun.in.m'
out_file = 'make_sfun.m'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_sim_solver.in.c'
out_file = 'acados_sim_solver_{}.c'.format(name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_sim_solver.in.h'
out_file = 'acados_sim_solver_{}.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
## folder model
template_dir = 'c_generated_code/{}_model/'.format(name)
in_file = 'model.in.h'
out_file = '{}_model.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# constraints on convex over nonlinear function
if acados_ocp.constraints.constr_type == 'BGP' and acados_ocp.dims.nphi > 0:
# constraints on outer function
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'phi_constraint.in.h'
out_file = '{}_phi_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# terminal constraints on convex over nonlinear function
if acados_ocp.constraints.constr_type_e == 'BGP' and acados_ocp.dims.nphi_e > 0:
# terminal constraints on outer function
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'phi_e_constraint.in.h'
out_file = '{}_phi_e_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# nonlinear constraints
if acados_ocp.constraints.constr_type == 'BGH' and acados_ocp.dims.nh > 0:
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'h_constraint.in.h'
out_file = '{}_h_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# terminal nonlinear constraints
if acados_ocp.constraints.constr_type_e == 'BGH' and acados_ocp.dims.nh_e > 0:
template_dir = 'c_generated_code/{}_constraints/'.format(name)
in_file = 'h_e_constraint.in.h'
out_file = '{}_h_e_constraint.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# nonlinear cost function
if acados_ocp.cost.cost_type == 'NONLINEAR_LS':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'cost_y_fun.in.h'
out_file = '{}_cost_y_fun.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# terminal nonlinear cost function
if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'cost_y_e_fun.in.h'
out_file = '{}_cost_y_e_fun.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# external cost
if acados_ocp.cost.cost_type == 'EXTERNAL':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'external_cost.in.h'
out_file = '{}_external_cost.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
# external cost - terminal
if acados_ocp.cost.cost_type_e == 'EXTERNAL':
template_dir = 'c_generated_code/{}_cost/'.format(name)
in_file = 'external_cost_e.in.h'
out_file = '{}_external_cost_e.h'.format(name)
render_template(in_file, out_file, template_dir, json_path)
class AcadosOcpSolver:
"""
class to interact with the acados ocp solver C object
"""
def __init__(self, acados_ocp, json_file='acados_ocp_nlp.json'):
self.solver_created = False
model = acados_ocp.model
# make dims consistent
make_ocp_dims_consistent(acados_ocp)
if acados_ocp.solver_options.integrator_type == 'GNSF':
set_up_imported_gnsf_model(acados_ocp)
# set integrator time automatically
acados_ocp.solver_options.Tsim = acados_ocp.solver_options.time_steps[0]
# generate external functions
ocp_generate_external_functions(acados_ocp, model)
# dump to json
ocp_formulation_json_dump(acados_ocp, json_file)
# render templates
ocp_render_templates(acados_ocp, json_file)
## Compile solver
os.chdir('c_generated_code')
os.system('make clean_ocp_shared_lib')
os.system('make ocp_shared_lib')
os.chdir('..')
self.shared_lib_name = 'c_generated_code/libacados_ocp_solver_' + model.name + '.so'
# get
self.shared_lib = CDLL(self.shared_lib_name)
self.shared_lib.acados_create()
self.solver_created = True
self.shared_lib.acados_get_nlp_opts.restype = c_void_p
self.nlp_opts = self.shared_lib.acados_get_nlp_opts()
self.shared_lib.acados_get_nlp_dims.restype = c_void_p
self.nlp_dims = self.shared_lib.acados_get_nlp_dims()
self.shared_lib.acados_get_nlp_config.restype = c_void_p
self.nlp_config = self.shared_lib.acados_get_nlp_config()
self.shared_lib.acados_get_nlp_out.restype = c_void_p
self.nlp_out = self.shared_lib.acados_get_nlp_out()
self.shared_lib.acados_get_nlp_in.restype = c_void_p
self.nlp_in = self.shared_lib.acados_get_nlp_in()
self.shared_lib.acados_get_nlp_solver.restype = c_void_p
self.nlp_solver = self.shared_lib.acados_get_nlp_solver()
self.acados_ocp = acados_ocp
def solve(self):
"""
solve the ocp with current input
"""
status = self.shared_lib.acados_solve()
return status
def get(self, stage_, field_):
"""
get the last solution of the solver:
:param stage: integer corresponding to shooting node
:param field_: string in ['x', 'u', 'z', 'pi', 'lam', 't', 'sl', 'su',]
.. note:: regarding lam, t: \n
the inequalities are internally organized in the following order: \n
[ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n
lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi]
.. note:: pi: multipliers for dynamics equality constraints \n
lam: multipliers for inequalities \n
t: slack variables corresponding to evaluation of all inequalities (at the solution) \n
sl: slack variables of soft lower inequality constraints \n
su: slack variables of soft upper inequality constraints \n
"""
out_fields = ['x', 'u', 'z', 'pi', 'lam', 't']
mem_fields = ['sl', 'su']
field = field_
field = field.encode('utf-8')
if (field_ not in out_fields + mem_fields):
raise Exception('AcadosOcpSolver.get(): {} is an invalid argument.\
\n Possible values are {}. Exiting.'.format(field_, out_fields + mem_fields))
self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p]
self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int
dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field)
out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
if (field_ in out_fields):
self.shared_lib.ocp_nlp_out_get.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_out_get(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, out_data)
elif field_ in mem_fields:
self.shared_lib.ocp_nlp_get_at_stage.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_get_at_stage(self.nlp_config, \
self.nlp_dims, self.nlp_solver, stage_, field, out_data)
return out
def print_statistics(self):
stat = self.get_stats("statistics")
if self.acados_ocp.solver_options.nlp_solver_type == 'SQP':
print('\niter\tres_stat\tres_eq\t\tres_ineq\tres_comp\tqp_stat\tqp_iter')
if stat.shape[0]>7:
print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp')
for jj in range(stat.shape[1]):
print('{:d}\t{:e}\t{:e}\t{:e}\t{:e}\t{:d}\t{:d}'.format( \
int(stat[0][jj]), stat[1][jj], stat[2][jj], \
stat[3][jj], stat[4][jj], int(stat[5][jj]), int(stat[6][jj])))
if stat.shape[0]>7:
print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \
stat[7][jj], stat[8][jj], stat[9][jj], stat[10][jj]))
print('\n')
elif self.acados_ocp.solver_options.nlp_solver_type == 'SQP_RTI':
print('\niter\tqp_stat\tqp_iter')
if stat.shape[0]>3:
print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp')
for jj in range(stat.shape[1]):
print('{:d}\t{:d}\t{:d}'.format( int(stat[0][jj]), int(stat[1][jj]), int(stat[2][jj])))
if stat.shape[0]>3:
print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \
stat[3][jj], stat[4][jj], stat[5][jj], stat[6][jj]))
print('\n')
return
def get_stats(self, field_):
"""
get the information of the last solver call:
:param field_: string in ['statistics', 'time_tot', 'time_lin', 'time_sim', 'time_sim_ad', 'time_sim_la', 'time_qp', 'time_qp_solver_call', 'time_reg', 'sqp_iter']
"""
fields = ['time_tot', # total cpu time previous call
'time_lin', # cpu time for linearization
'time_sim', # cpu time for integrator
'time_sim_ad', # cpu time for integrator contribution of external function calls
'time_sim_la', # cpu time for integrator contribution of linear algebra
'time_qp', # cpu time qp solution
'time_qp_solver_call', # cpu time inside qp solver (without converting the QP)
'time_qp_xcond',
'time_reg', # cpu time regularization
'sqp_iter', # number of SQP iterations
'statistics', # table with info about last iteration
'stat_m',
'stat_n',
]
field = field_
field = field.encode('utf-8')
if (field_ not in fields):
raise Exception('AcadosOcpSolver.get_stats(): {} is not a valid argument.\
\n Possible values are {}. Exiting.'.format(fields, fields))
if field_ in ['sqp_iter', 'stat_m', 'stat_n']:
out = np.ascontiguousarray(np.zeros((1,)), dtype=np.int64)
out_data = cast(out.ctypes.data, POINTER(c_int64))
elif field_ == 'statistics':
sqp_iter = self.get_stats("sqp_iter")
stat_m = self.get_stats("stat_m")
stat_n = self.get_stats("stat_n")
min_size = min([stat_m, sqp_iter+1])
out = np.ascontiguousarray(
np.zeros( (stat_n[0]+1, min_size[0]) ), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
else:
out = np.ascontiguousarray(np.zeros((1,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
self.shared_lib.ocp_nlp_get.argtypes = [c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data)
return out
# Note: this function should not be used anymore, better use cost_set, constraints_set
def set(self, stage_, field_, value_):
cost_fields = ['y_ref', 'yref']
constraints_fields = ['lbx', 'ubx', 'lbu', 'ubu']
out_fields = ['x', 'u', 'pi', 'lam', 't']
# cast value_ to avoid conversion issues
value_ = value_.astype(float)
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
# treat parameters separately
if field_ is 'p':
self.shared_lib.acados_update_params.argtypes = [c_int, POINTER(c_double)]
self.shared_lib.acados_update_params.restype = c_int
value_data = cast(value_.ctypes.data, POINTER(c_double))
self.shared_lib.acados_update_params(stage, value_data, value_.shape[0])
else:
if field_ not in constraints_fields + cost_fields + out_fields:
raise Exception("AcadosOcpSolver.set(): {} is not a valid argument.\
\nPossible values are {}. Exiting.".format(field, \
constraints_fields + cost_fields + out_fields + ['p']))
self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p]
self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int
dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field)
if value_.shape[0] != dims:
msg = 'AcadosOcpSolver.set(): mismatching dimension for field "{}" '.format(field_)
msg += 'with dimension {} (you have {})'.format(dims, value_.shape[0])
raise Exception(msg)
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
if field_ in constraints_fields:
self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
elif field_ in cost_fields:
self.shared_lib.ocp_nlp_cost_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
elif field_ in out_fields:
self.shared_lib.ocp_nlp_out_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_out_set(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage, field, value_data_p)
return
def cost_set(self, stage_, field_, value_):
"""
set numerical data in the cost module of the solver:
:param stage_: integer corresponding to shooting node
:param field_: string, e.g. 'yref', 'W', 'ext_cost_num_hess'
:param value_: of appropriate size
"""
# cast value_ to avoid conversion issues
value_ = value_.astype(float)
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
self.shared_lib.ocp_nlp_cost_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)]
self.shared_lib.ocp_nlp_cost_dims_get_from_attr.restype = c_int
dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims_data = cast(dims.ctypes.data, POINTER(c_int))
self.shared_lib.ocp_nlp_cost_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, dims_data)
value_shape = value_.shape
if len(value_shape) == 1:
value_shape = (value_shape[0], 0)
if value_shape != tuple(dims):
raise Exception('AcadosOcpSolver.cost_set(): mismatching dimension', \
' for field "{}" with dimension {} (you have {})'.format( \
field_, tuple(dims), value_shape))
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
self.shared_lib.ocp_nlp_cost_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
return
def constraints_set(self, stage_, field_, value_):
"""
set numerical data in the constraint module of the solver:
Parameters:
:param stage_: integer corresponding to shooting node
:param field_: string, e.g. 'lbx'
:param value_: of appropriate size
"""
# cast value_ to avoid conversion issues
value_ = value_.astype(float)
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)]
self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.restype = c_int
dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims_data = cast(dims.ctypes.data, POINTER(c_int))
self.shared_lib.ocp_nlp_constraint_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, dims_data)
value_shape = value_.shape
if len(value_shape) == 1:
value_shape = (value_shape[0], 0)
if value_shape != tuple(dims):
raise Exception('AcadosOcpSolver.constraints_set(): mismatching dimension' \
' for field "{}" with dimension {} (you have {})'.format(field_, tuple(dims), value_shape))
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
return
def options_set(self, field_, value_):
"""
set options of the solver:
Parameters:
:param field_: string, e.g. 'print_level', 'rti_phase', 'initialize_t_slacks', 'step_length'
:param value_: of type int, float
"""
int_fields = ['print_level', 'rti_phase', 'initialize_t_slacks']
double_fields = ['step_length']
string_fields = ['globalization']
if field_ in int_fields:
if not isinstance(value_, int):
raise Exception('solver option {} must be of type int. You have {}.'.format(field_, type(value_)))
else:
value_ctypes = c_int(value_)
elif field_ in double_fields:
if not isinstance(value_, float):
raise Exception('solver option {} must be of type float. You have {}.'.format(field_, type(value_)))
else:
value_ctypes = c_double(value_)
elif field_ in string_fields:
if not isinstance(value_, str):
raise Exception('solver option {} must be of type str. You have {}.'.format(field_, type(value_)))
else:
value_ctypes = value_.encode('utf-8')
if field_ == 'rti_phase':
if value_ < 0 or value_ > 2:
raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can '
'take only values 0, 1, 2 for SQP-RTI-type solvers')
if self.acados_ocp.solver_options.nlp_solver_type != 'SQP_RTI' and value_ > 0:
raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can '
'take only value 0 for SQP-type solvers')
field = field_
field = field.encode('utf-8')
if field_ in string_fields:
self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \
[c_void_p, c_void_p, c_char_p, c_char_p]
self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \
self.nlp_opts, field, value_ctypes)
else:
self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \
[c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \
self.nlp_opts, field, byref(value_ctypes))
return
def __del__(self):
if self.solver_created:
self.shared_lib.acados_free()
del self.shared_lib
# NOTE: DLL cannot be easily unloaded!!!
# see https://stackoverflow.com/questions/359498/how-can-i-unload-a-dll-using-ctypes-in-python
# while isLoaded(self.shared_lib_name):
# dlclose(handle)
|
[
"os.path.exists",
"numpy.ones",
"os.getcwd",
"os.chdir",
"os.path.dirname",
"numpy.sum",
"numpy.zeros",
"copy.deepcopy",
"json.load",
"os.system",
"numpy.shape",
"json.dump"
] |
[((15748, 15771), 'numpy.sum', 'np.sum', (['opts.time_steps'], {}), '(opts.time_steps)\n', (15754, 15771), True, 'import numpy as np\n'), ((16019, 16059), 'os.path.dirname', 'os.path.dirname', (['current_module.__file__'], {}), '(current_module.__file__)\n', (16034, 16059), False, 'import sys, os, json\n'), ((9215, 9232), 'numpy.zeros', 'np.zeros', (['(nsbx,)'], {}), '((nsbx,))\n', (9223, 9232), True, 'import numpy as np\n'), ((9419, 9436), 'numpy.zeros', 'np.zeros', (['(nsbx,)'], {}), '((nsbx,))\n', (9427, 9436), True, 'import numpy as np\n'), ((9684, 9701), 'numpy.zeros', 'np.zeros', (['(nsbu,)'], {}), '((nsbu,))\n', (9692, 9701), True, 'import numpy as np\n'), ((9888, 9905), 'numpy.zeros', 'np.zeros', (['(nsbu,)'], {}), '((nsbu,))\n', (9896, 9905), True, 'import numpy as np\n'), ((10149, 10165), 'numpy.zeros', 'np.zeros', (['(nsh,)'], {}), '((nsh,))\n', (10157, 10165), True, 'import numpy as np\n'), ((10345, 10361), 'numpy.zeros', 'np.zeros', (['(nsh,)'], {}), '((nsh,))\n', (10353, 10361), True, 'import numpy as np\n'), ((10606, 10624), 'numpy.zeros', 'np.zeros', (['(nsphi,)'], {}), '((nsphi,))\n', (10614, 10624), True, 'import numpy as np\n'), ((10818, 10836), 'numpy.zeros', 'np.zeros', (['(nsphi,)'], {}), '((nsphi,))\n', (10826, 10836), True, 'import numpy as np\n'), ((11087, 11103), 'numpy.zeros', 'np.zeros', (['(nsg,)'], {}), '((nsg,))\n', (11095, 11103), True, 'import numpy as np\n'), ((11283, 11299), 'numpy.zeros', 'np.zeros', (['(nsg,)'], {}), '((nsg,))\n', (11291, 11299), True, 'import numpy as np\n'), ((12287, 12306), 'numpy.zeros', 'np.zeros', (['(nsbx_e,)'], {}), '((nsbx_e,))\n', (12295, 12306), True, 'import numpy as np\n'), ((12507, 12526), 'numpy.zeros', 'np.zeros', (['(nsbx_e,)'], {}), '((nsbx_e,))\n', (12515, 12526), True, 'import numpy as np\n'), ((12792, 12810), 'numpy.zeros', 'np.zeros', (['(nsh_e,)'], {}), '((nsh_e,))\n', (12800, 12810), True, 'import numpy as np\n'), ((13004, 13022), 'numpy.zeros', 'np.zeros', (['(nsh_e,)'], {}), '((nsh_e,))\n', (13012, 13022), True, 'import numpy as np\n'), ((13281, 13299), 'numpy.zeros', 'np.zeros', (['(nsg_e,)'], {}), '((nsg_e,))\n', (13289, 13299), True, 'import numpy as np\n'), ((13493, 13511), 'numpy.zeros', 'np.zeros', (['(nsg_e,)'], {}), '((nsg_e,))\n', (13501, 13511), True, 'import numpy as np\n'), ((13778, 13798), 'numpy.zeros', 'np.zeros', (['(nsphi_e,)'], {}), '((nsphi_e,))\n', (13786, 13798), True, 'import numpy as np\n'), ((14006, 14026), 'numpy.zeros', 'np.zeros', (['(nsphi_e,)'], {}), '((nsphi_e,))\n', (14014, 14026), True, 'import numpy as np\n'), ((16147, 16159), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16156, 16159), False, 'import sys, os, json\n'), ((17261, 17339), 'json.dump', 'json.dump', (['ocp_nlp_dict', 'f'], {'default': 'np_array_to_list', 'indent': '(4)', 'sort_keys': '(True)'}), '(ocp_nlp_dict, f, default=np_array_to_list, indent=4, sort_keys=True)\n', (17270, 17339), False, 'import sys, os, json\n'), ((17553, 17565), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17562, 17565), False, 'import sys, os, json\n'), ((19295, 19345), 'numpy.zeros', 'np.zeros', (['(acados_ocp.dims.ny, acados_ocp.dims.nx)'], {}), '((acados_ocp.dims.ny, acados_ocp.dims.nx))\n', (19303, 19345), True, 'import numpy as np\n'), ((19375, 19425), 'numpy.zeros', 'np.zeros', (['(acados_ocp.dims.ny, acados_ocp.dims.nu)'], {}), '((acados_ocp.dims.ny, acados_ocp.dims.nu))\n', (19383, 19425), True, 'import numpy as np\n'), ((19512, 19564), 'numpy.zeros', 'np.zeros', (['(acados_ocp.dims.ny_e, acados_ocp.dims.nx)'], {}), '((acados_ocp.dims.ny_e, acados_ocp.dims.nx))\n', (19520, 19564), True, 'import numpy as np\n'), ((20232, 20257), 'os.path.exists', 'os.path.exists', (['json_path'], {}), '(json_path)\n', (20246, 20257), False, 'import sys, os, json\n'), ((25354, 25382), 'os.chdir', 'os.chdir', (['"""c_generated_code"""'], {}), "('c_generated_code')\n", (25362, 25382), False, 'import sys, os, json\n'), ((25391, 25429), 'os.system', 'os.system', (['"""make clean_ocp_shared_lib"""'], {}), "('make clean_ocp_shared_lib')\n", (25400, 25429), False, 'import sys, os, json\n'), ((25438, 25470), 'os.system', 'os.system', (['"""make ocp_shared_lib"""'], {}), "('make ocp_shared_lib')\n", (25447, 25470), False, 'import sys, os, json\n'), ((25479, 25493), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (25487, 25493), False, 'import sys, os, json\n'), ((15160, 15178), 'numpy.ones', 'np.ones', (['(dims.N,)'], {}), '((dims.N,))\n', (15167, 15178), True, 'import numpy as np\n'), ((15386, 15405), 'numpy.zeros', 'np.zeros', (['(dims.N,)'], {}), '((dims.N,))\n', (15394, 15405), True, 'import numpy as np\n'), ((16414, 16434), 'copy.deepcopy', 'deepcopy', (['acados_ocp'], {}), '(acados_ocp)\n', (16422, 16434), False, 'from copy import deepcopy\n'), ((20178, 20189), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20187, 20189), False, 'import sys, os, json\n'), ((28362, 28379), 'numpy.zeros', 'np.zeros', (['(dims,)'], {}), '((dims,))\n', (28370, 28379), True, 'import numpy as np\n'), ((36504, 36518), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (36512, 36518), True, 'import numpy as np\n'), ((38275, 38289), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (38283, 38289), True, 'import numpy as np\n'), ((31963, 31977), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (31971, 31977), True, 'import numpy as np\n'), ((15235, 15264), 'numpy.shape', 'np.shape', (['opts.shooting_nodes'], {}), '(opts.shooting_nodes)\n', (15243, 15264), True, 'import numpy as np\n'), ((32353, 32391), 'numpy.zeros', 'np.zeros', (['(stat_n[0] + 1, min_size[0])'], {}), '((stat_n[0] + 1, min_size[0]))\n', (32361, 32391), True, 'import numpy as np\n'), ((32529, 32543), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (32537, 32543), True, 'import numpy as np\n')]
|
"""
Module containing all the spectrogram classes
"""
# 0.2.0
import torch
import torch.nn as nn
from torch.nn.functional import conv1d, conv2d, fold
import numpy as np
from time import time
from nnAudio.librosa_functions import *
from nnAudio.utils import *
sz_float = 4 # size of a float
epsilon = 10e-8 # fudge factor for normalization
### --------------------------- Spectrogram Classes ---------------------------###
class STFT(torch.nn.Module):
"""This function is to calculate the short-time Fourier transform (STFT) of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the STFT kernel, if ``True``, the time index is the center of
the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
inverse : bool
To activate the iSTFT module or not. By default, it is False to save GPU memory.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``
output_format : str
Control the spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``.
The output_format can also be changed during the ``forward`` method.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cpu'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.STFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, pad_mode='reflect', iSTFT=False,
fmin=50, fmax=6000, sr=22050, trainable=False,
output_format="Complex", verbose=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.output_format = output_format
self.trainable = trainable
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.freq_bins = freq_bins
self.trainable = trainable
self.pad_amount = self.n_fft // 2
self.window = window
self.win_length = win_length
self.iSTFT = iSTFT
self.trainable = trainable
start = time()
# Create filter windows for stft
kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=freq_bins,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=verbose)
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float)
# In this way, the inverse kernel and the forward kernel do not share the same memory...
kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0)
kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0)
if iSTFT:
self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1))
self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1))
# Making all these variables nn.Parameter, so that the model can be used with nn.Parallel
# self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable)
# self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable)
# Applying window functions to the Fourier kernels
window_mask = torch.tensor(window_mask)
wsin = kernel_sin * window_mask
wcos = kernel_cos * window_mask
if self.trainable==False:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if self.trainable==True:
wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable)
wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
# Prepare the shape of window mask so that it can be used later in inverse
self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1))
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
output_format : str
Control the type of spectrogram to be return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``.
Default value is ``Complex``.
"""
output_format = output_format or self.output_format
self.num_samples = x.shape[-1]
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.pad_amount, 0)
elif self.pad_mode == 'reflect':
if self.num_samples < self.pad_amount:
raise AssertionError("Signal length shorter than reflect padding length (n_fft // 2).")
padding = nn.ReflectionPad1d(self.pad_amount)
x = padding(x)
spec_imag = conv1d(x, self.wsin, stride=self.stride)
spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d
# remove redundant parts
spec_real = spec_real[:, :self.freq_bins, :]
spec_imag = spec_imag[:, :self.freq_bins, :]
if output_format=='Magnitude':
spec = spec_real.pow(2) + spec_imag.pow(2)
if self.trainable==True:
return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to output=0
else:
return torch.sqrt(spec)
elif output_format=='Complex':
return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary part
elif output_format=='Phase':
return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads to error in calculating phase
def inverse(self, X, onesided=True, length=None, refresh_win=True):
"""
This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class,
which is to convert spectrograms back to waveforms.
It only works for the complex value spectrograms. If you have the magnitude spectrograms,
please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
Parameters
----------
onesided : bool
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
length : int
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
refresh_win : bool
Recalculating the window sum square. If you have an input with fixed number of timesteps,
you can increase the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True``
"""
if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True):
raise NameError("Please activate the iSTFT module by setting `iSTFT=True` if you want to use `inverse`")
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)."\
"\nIf you have a magnitude spectrogram, please consider using Griffin-Lim."
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
def extra_repr(self) -> str:
return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format(
self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable
)
class MelSpectrogram(torch.nn.Module):
"""This function is to calculate the Melspectrogram of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio.
It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Mel filter banks. The filter banks maps the n_fft to mel bins.
Default value is 128.
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``,
the time index is the beginning of the STFT kernel, if ``True``, the time index is the
center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the
Mel scale is logarithmic. The default value is ``False``.
fmin : int
The starting frequency for the lowest Mel filter bank.
fmax : int
The ending frequency for the highest Mel filter bank.
trainable_mel : bool
Determine if the Mel filter banks are trainable or not. If ``True``, the gradients for Mel
filter banks will also be calculated and the Mel filter banks will be updated during model
training. Default value is ``False``.
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MelSpectrogram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512,
window='hann', center=True, pad_mode='reflect', power=2.0, htk=False,
fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False,
verbose=True, **kwargs):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.power = power
self.trainable_mel = trainable_mel
self.trainable_STFT = trainable_STFT
self.verbose = verbose
# Preparing for the stft layer. No need for center
self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window,
freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT,
output_format="Magnitude", verbose=verbose, **kwargs)
# Create filter windows for stft
start = time()
# Creating kernel for mel spectrogram
start = time()
mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)
mel_basis = torch.tensor(mel_basis)
if verbose==True:
print("STFT filter created, time used = {:.4f} seconds".format(time()-start))
print("Mel filter created, time used = {:.4f} seconds".format(time()-start))
else:
pass
if trainable_mel:
# Making everything nn.Parameter, so that this model can support nn.DataParallel
mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel)
self.register_parameter('mel_basis', mel_basis)
else:
self.register_buffer('mel_basis', mel_basis)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self, x):
"""
Convert a batch of waveforms to Mel spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
spec = self.stft(x, output_format='Magnitude')**self.power
melspec = torch.matmul(self.mel_basis, spec)
return melspec
def extra_repr(self) -> str:
return 'Mel filter banks size = {}, trainable_mel={}'.format(
(*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT
)
def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None):
"""
Best-attempt spectrogram inversion
"""
def loss_fn(pred, target):
pred = pred.unsqueeze(1) if pred.ndim == 3 else pred
target = target.unsqueeze(1) if target.ndim == 3 else target
loss = (pred - target).pow(2).sum(-2).mean()
return loss
verbose = verbose or self.verbose
# SGD arguments
default_sgd_kwargs = dict(lr=1e3, momentum=0.9)
if sgd_kwargs:
default_sgd_kwargs.update(sgd_kwargs)
sgd_kwargs = default_sgd_kwargs
mel_basis = self.mel_basis.detach()
shape = melspec.shape
batch_size, n_mels, time = shape[0], shape[-2], shape[-1]
_, n_freq = mel_basis.shape
melspec = melspec.detach().view(-1, n_mels, time)
if random_start:
pred_stft_shape = (batch_size, n_freq, time)
pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps)
else:
pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps)
pred_stft = nn.Parameter(pred_stft, requires_grad=True)
sgd_kwargs["lr"] = sgd_kwargs["lr"] * batch_size
optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs)
losses = []
for i in range(max_steps):
optimizer.zero_grad()
pred_mel = mel_basis @ pred_stft
loss = loss_fn(pred_mel, melspec)
losses.append(loss.item())
loss.backward()
optimizer.step()
# Check conditions
if not loss.isfinite():
raise OverflowError("Overflow encountered in Mel -> STFT optimization")
if loss_threshold and loss < loss_threshold:
if verbose:
print(f"Target error of {loss_threshold} reached. Stopping optimization.")
break
if grad_threshold and pred_stft.grad.max() < grad_threshold:
if verbose:
print(f"Target max gradient of {grad_threshold} reached. Stopping optimization.")
break
pred_stft = pred_stft.detach().clamp(eps) ** 0.5
pred_stft = pred_stft.view((*shape[:-2], n_freq, time))
if return_extras:
return pred_stft, pred_mel.detach(), losses
return pred_stft
def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None):
default_mel_inversion_params = {}
default_stft_inversion_params = {}
mel_inversion_params = mel_inversion_params or {}
stft_inversion_params = stft_inversion_params or {}
if mel_inversion_params:
mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params}
if stft_inversion_params:
stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params}
recon_stft = self.to_stft(melspec, **mel_inversion_params)
recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params)
return recon_audio
class MFCC(torch.nn.Module):
"""This function is to calculate the Mel-frequency cepstral coefficients (MFCCs) of the input signal.
This algorithm first extracts Mel spectrograms from the audio clips,
then the discrete cosine transform is calcuated to obtain the final MFCCs.
Therefore, the Mel spectrogram part can be made trainable using
``trainable_mel`` and ``trainable_STFT``.
It only support type-II DCT at the moment. Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_mfcc : int
The number of Mel-frequency cepstral coefficients
norm : string
The default value is 'ortho'. Normalization for DCT basis
**kwargs
Other arguments for Melspectrogram such as n_fft, n_mels, hop_length, and window
Returns
-------
MFCCs : torch.tensor
It returns a tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MFCC()
>>> mfcc = spec_layer(x)
"""
def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs):
super().__init__()
self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs)
self.m_mfcc = n_mfcc
# attributes that will be used for _power_to_db
if amin <= 0:
raise ParameterError('amin must be strictly positive')
amin = torch.tensor([amin])
ref = torch.abs(torch.tensor([ref]))
self.register_buffer('amin', amin)
self.register_buffer('ref', ref)
self.top_db = top_db
self.n_mfcc = n_mfcc
def _power_to_db(self, S):
'''
Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db
for the original implmentation.
'''
log_spec = 10.0 * torch.log10(torch.max(S, self.amin))
log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref))
if self.top_db is not None:
if self.top_db < 0:
raise ParameterError('top_db must be non-negative')
# make the dim same as log_spec so that it can be broadcasted
batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1)
log_spec = torch.max(log_spec, batch_wise_max - self.top_db)
return log_spec
def _dct(self, x, norm=None):
'''
Refer to https://github.com/zh217/torch-dct for the original implmentation.
'''
x = x.permute(0,2,1) # make freq the last axis, since dct applies to the frequency axis
x_shape = x.shape
N = x_shape[-1]
v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2)
Vc = torch.rfft(v, 1, onesided=False)
# TODO: Can make the W_r and W_i trainable here
k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, :, 0] * W_r - Vc[:, :, :, 1] * W_i
if norm == 'ortho':
V[:, :, 0] /= np.sqrt(N) * 2
V[:, :, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V
return V.permute(0,2,1) # swapping back the time axis and freq axis
def forward(self, x):
"""
Convert a batch of waveforms to MFCC.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = self.melspec_layer(x)
x = self._power_to_db(x)
x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:]
return x
def extra_repr(self) -> str:
return 'n_mfcc = {}'.format(
(self.n_mfcc)
)
class CQT1992(torch.nn.Module):
"""
This alogrithm uses the method proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more
computational and memory efficient version.
[1] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
trainable_STFT : bool
Determine if the time to frequency domain transformation kernel for the input audio is trainable or not.
Default is ``False``
trainable_CQT : bool
Determine if the frequency domain CQT kernel is trainable or not.
Default is ``False``
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84,
trainable_STFT=False, trainable_CQT=False, bins_per_octave=12,
output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'):
super().__init__()
# norm arg is not functioning
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.norm = norm
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1]
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# creating kernels for stft
# self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa
# self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width,
window='ones',
freq_scale='no')
# Converting kernels from numpy arrays to torch tensors
wsin = torch.tensor(kernel_sin * window)
wcos = torch.tensor(kernel_cos * window)
cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# STFT
fourier_real = conv1d(x, self.wcos, stride=self.hop_length)
fourier_imag = conv1d(x, self.wsin, stride=self.hop_length)
# CQT
CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag),
(fourier_real, fourier_imag))
CQT = torch.stack((CQT_real,-CQT_imag),-1)
if self.norm:
CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT2010(torch.nn.Module):
"""
This algorithm is using the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave.
Then we keep downsampling the input audio by a factor of 2 to convoluting it with the
small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled
input is equavalent to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code
from the 1992 alogrithm [2]
[1] <NAME>. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12,
norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False,
trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.output_format = output_format
self.earlydownsample = earlydownsample # TODO: activate early downsampling later if possible
# This will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.5,
kernelLength=256,
transitionBandwidth=0.001
)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Calculate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
# print("n_octaves = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
# print("Q = {}, fmin_t = {}, n_filters = {}".format(Q, self.fmin_t, n_filters))
basis, self.n_fft, _ = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# This is for the normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis=basis
fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to freq domain
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32))
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# Preparing kernels for Short-Time Fourier Transform (STFT)
# We set the frequency range in the CQT filter instead of here.
if verbose==True:
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no')
wsin = kernel_sin * window
wcos = kernel_cos * window
wsin = torch.tensor(wsin)
wcos = torch.tensor(wcos)
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins
if self.norm:
CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1)
# is make it same mag as 1992
CQT = CQT*self.downsample_factor
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT1992v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the method proposed in [1]. I slightly modify it so that it runs faster
than the original 1992 algorithm, that is why I call it version 2.
[1] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect',
trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
# norm arg is not functioning
self.trainable = trainable
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1)
cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
else:
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8)
return CQT
elif output_format=='Complex':
return torch.stack((CQT_real,CQT_imag),-1)
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def forward_manual(self,x):
"""
Method for debugging
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)
CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
return CQT*torch.sqrt(self.lenghts.view(-1,1))
class CQT2010v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the
input audio by a factor of 2 to convoluting it with the small CQT kernel.
Everytime the input audio is downsampled, the CQT relative to the downsampled input is equivalent
to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the
code from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the
argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically.
Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : bool
Normalization for the CQT result.
basis_norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``
output_format : str
Determine the return type.
'Magnitude' will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins, time_steps)``;
'Complex' will return the STFT result in complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``;
'Phase' will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT2010v2()
>>> specs = spec_layer(x)
"""
# To DO:
# need to deal with the filter and other tensors
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect',
earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.earlydownsample = earlydownsample # We will activate early downsampling later if possible
self.trainable = trainable
self.output_format = output_format
# It will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
# self.lowpass_filter = torch.tensor(
# create_lowpass_filter(
# band_center = 0.50,
# kernelLength=256,
# transitionBandwidth=0.001))
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.50,
kernelLength=256,
transitionBandwidth=0.001)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Caluate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
if verbose==True:
print("num_octave = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
basis, self.n_fft, lenghts = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# For normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis = basis
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1)
cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x,output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins
# print("downsample_factor = ",self.downsample_factor)
# print(CQT.shape)
# print(self.lenghts.view(-1,1).shape)
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it
# same mag as 1992
CQT = CQT*self.downsample_factor
# Normalize again to get same result as librosa
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
else:
return torch.sqrt(CQT.pow(2).sum(-1)+1e-8)
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
class CQT(CQT1992v2):
"""An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation"""
pass
# The section below is for developing purpose
# Please don't use the following classes
#
class DFT(torch.nn.Module):
"""
Experimental feature before `torch.fft` was made avaliable.
The inverse function only works for 1 single frame. i.e. input shape = (batch, n_fft, 1)
"""
def __init__(self, n_fft=2048, freq_bins=None, hop_length=512,
window='hann', freq_scale='no', center=True, pad_mode='reflect',
fmin=50, fmax=6000, sr=22050):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
# Create filter windows for stft
wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float)
self.wcos = torch.tensor(wcos, dtype=torch.float)
def forward(self,x):
"""
Convert a batch of waveforms to spectrums.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
imag = conv1d(x, self.wsin, stride=self.stride)
real = conv1d(x, self.wcos, stride=self.stride)
return (real, -imag)
def inverse(self,x_real,x_imag):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x_real : torch tensor
Real part of the signal.
x_imag : torch tensor
Imaginary part of the signal.
"""
x_real = broadcast_dim(x_real)
x_imag = broadcast_dim(x_imag)
x_real.transpose_(1,2) # Prepare the right shape to do inverse
x_imag.transpose_(1,2) # Prepare the right shape to do inverse
# if self.center:
# if self.pad_mode == 'constant':
# padding = nn.ConstantPad1d(self.n_fft//2, 0)
# elif self.pad_mode == 'reflect':
# padding = nn.ReflectionPad1d(self.n_fft//2)
# x_real = padding(x_real)
# x_imag = padding(x_imag)
# Watch out for the positive and negative signs
# ifft = e^(+2\pi*j)*X
# ifft(X_real) = (a1, a2)
# ifft(X_imag)*1j = (b1, b2)*1j
# = (-b2, b1)
a1 = conv1d(x_real, self.wcos, stride=self.stride)
a2 = conv1d(x_real, self.wsin, stride=self.stride)
b1 = conv1d(x_imag, self.wcos, stride=self.stride)
b2 = conv1d(x_imag, self.wsin, stride=self.stride)
imag = a2+b1
real = a1-b2
return (real/self.n_fft, imag/self.n_fft)
class iSTFT(torch.nn.Module):
"""This class is to convert spectrograms back to waveforms. It only works for the complex value spectrograms.
If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
The parameters (e.g. n_fft, window) need to be the same as the STFT in order to obtain the correct inverse.
If trainability is not required, it is recommended to use the ``inverse`` method under the ``STFT`` class
to save GPU/RAM memory.
When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that the inverse is perfect, please
use with extra care.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins
Please make sure the value is the same as the forward STFT.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable_kernels : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
trainable_window : bool
Determine if the window function is trainable or not.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a batch of waveforms.
Examples
--------
>>> spec_layer = Spectrogram.iSTFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False,
trainable_window=False, verbose=True, refresh_win=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.n_fft = n_fft
self.win_length = win_length
self.stride = hop_length
self.center = center
self.pad_amount = self.n_fft // 2
self.refresh_win = refresh_win
start = time()
# Create the window function and prepare the shape for batch-wise-time-wise multiplication
# Create filter windows for inverse
kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=False)
window_mask = get_window(window,int(win_length), fftbins=True)
# For inverse, the Fourier kernels do not need to be windowed
window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1)
# kernel_sin and kernel_cos have the shape (freq_bins, 1, n_fft, 1) to support 2D Conv
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1)
# Decide if the Fourier kernels are trainable
if trainable_kernels:
# Making all these variables trainable
kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels)
kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels)
self.register_parameter('kernel_sin', kernel_sin)
self.register_parameter('kernel_cos', kernel_cos)
else:
self.register_buffer('kernel_sin', kernel_sin)
self.register_buffer('kernel_cos', kernel_cos)
# Decide if the window function is trainable
if trainable_window:
window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window)
self.register_parameter('window_mask', window_mask)
else:
self.register_buffer('window_mask', window_mask)
if verbose==True:
print("iSTFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, X, onesided=False, length=None, refresh_win=None):
"""
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
If your input spectrograms X are of the same length, please use ``refresh_win=None`` to increase
computational speed.
"""
if refresh_win==None:
refresh_win=self.refresh_win
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)"
# If the input spectrogram contains only half of the n_fft
# Use extend_fbins function to get back another half
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
class Griffin_Lim(torch.nn.Module):
"""
Converting Magnitude spectrograms back to waveforms based on the "fast Griffin-Lim"[1].
This Griffin Lim is a direct clone from librosa.griffinlim.
[1] <NAME>., <NAME>., & <NAME>. “A fast Griffin-Lim algorithm,”
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4), Oct. 2013.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
n_iter=32 : int
The number of iterations for Griffin-Lim. The default value is ``32``
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
momentum : float
The momentum for the update rule. The default value is ``0.99``.
device : str
Choose which device to initialize this layer. Default value is 'cpu'
"""
def __init__(self,
n_fft,
n_iter=32,
hop_length=None,
win_length=None,
window='hann',
center=True,
pad_mode='reflect',
momentum=0.99,
device='cpu'):
super().__init__()
self.n_fft = n_fft
self.win_length = win_length
self.n_iter = n_iter
self.center = center
self.pad_mode = pad_mode
self.momentum = momentum
self.device = device
if win_length==None:
self.win_length=n_fft
else:
self.win_length=win_length
if hop_length==None:
self.hop_length = n_fft//4
else:
self.hop_length = hop_length
# Creating window function for stft and istft later
self.w = torch.tensor(get_window(window,
int(self.win_length),
fftbins=True),
device=device).float()
def forward(self, S):
"""
Convert a batch of magnitude spectrograms to waveforms.
Parameters
----------
S : torch tensor
Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)``
"""
assert S.dim()==3 , "Please make sure your input is in the shape of (batch, freq_bins, timesteps)"
# Initializing Random Phase
rand_phase = torch.randn(*S.shape, device=self.device)
angles = torch.empty((*S.shape,2), device=self.device)
angles[:, :,:,0] = torch.cos(2 * np.pi * rand_phase)
angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase)
# Initializing the rebuilt magnitude spectrogram
rebuilt = torch.zeros(*angles.shape, device=self.device)
for _ in range(self.n_iter):
tprev = rebuilt # Saving previous rebuilt magnitude spec
# spec2wav conversion
# print(f'win_length={self.win_length}\tw={self.w.shape}')
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
# wav2spec conversion
rebuilt = torch.stft(inverse,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
pad_mode=self.pad_mode)
# Phase update rule
angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1 + self.momentum)) * tprev[:,:,:]
# Phase normalization
angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase
# Using the final phase to reconstruct the waveforms
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
return inverse
|
[
"torch.nn.functional.conv2d",
"numpy.sqrt",
"torch.nn.functional.conv1d",
"torch.sin",
"torch.max",
"torch.sqrt",
"torch.cos",
"torch.arange",
"torch.nn.ReflectionPad1d",
"torch.nn.ConstantPad1d",
"torch.rfft",
"torch.matmul",
"torch.pinverse",
"torch.randn",
"torch.optim.SGD",
"numpy.ceil",
"torch.stft",
"torch.empty",
"time.time",
"torch.cat",
"numpy.float",
"torch.stack",
"torch.atan2",
"torch.tensor",
"torch.nn.Parameter",
"torch.zeros"
] |
[((4751, 4757), 'time.time', 'time', ([], {}), '()\n', (4755, 4757), False, 'from time import time\n'), ((5660, 5703), 'torch.tensor', 'torch.tensor', (['kernel_sin'], {'dtype': 'torch.float'}), '(kernel_sin, dtype=torch.float)\n', (5672, 5703), False, 'import torch\n'), ((5725, 5768), 'torch.tensor', 'torch.tensor', (['kernel_cos'], {'dtype': 'torch.float'}), '(kernel_cos, dtype=torch.float)\n', (5737, 5768), False, 'import torch\n'), ((6576, 6601), 'torch.tensor', 'torch.tensor', (['window_mask'], {}), '(window_mask)\n', (6588, 6601), False, 'import torch\n'), ((8628, 8668), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wsin'], {'stride': 'self.stride'}), '(x, self.wsin, stride=self.stride)\n', (8634, 8668), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((8689, 8729), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wcos'], {'stride': 'self.stride'}), '(x, self.wcos, stride=self.stride)\n', (8695, 8729), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((11428, 11481), 'torch.nn.functional.conv2d', 'conv2d', (['X_real_bc', 'self.kernel_cos_inv'], {'stride': '(1, 1)'}), '(X_real_bc, self.kernel_cos_inv, stride=(1, 1))\n', (11434, 11481), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((11494, 11547), 'torch.nn.functional.conv2d', 'conv2d', (['X_imag_bc', 'self.kernel_sin_inv'], {'stride': '(1, 1)'}), '(X_imag_bc, self.kernel_sin_inv, stride=(1, 1))\n', (11500, 11547), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((16903, 16909), 'time.time', 'time', ([], {}), '()\n', (16907, 16909), False, 'from time import time\n'), ((16973, 16979), 'time.time', 'time', ([], {}), '()\n', (16977, 16979), False, 'from time import time\n'), ((17075, 17098), 'torch.tensor', 'torch.tensor', (['mel_basis'], {}), '(mel_basis)\n', (17087, 17098), False, 'import torch\n'), ((18463, 18497), 'torch.matmul', 'torch.matmul', (['self.mel_basis', 'spec'], {}), '(self.mel_basis, spec)\n', (18475, 18497), False, 'import torch\n'), ((19991, 20034), 'torch.nn.Parameter', 'nn.Parameter', (['pred_stft'], {'requires_grad': '(True)'}), '(pred_stft, requires_grad=True)\n', (20003, 20034), True, 'import torch.nn as nn\n'), ((20113, 20155), 'torch.optim.SGD', 'torch.optim.SGD', (['[pred_stft]'], {}), '([pred_stft], **sgd_kwargs)\n', (20128, 20155), False, 'import torch\n'), ((23995, 24015), 'torch.tensor', 'torch.tensor', (['[amin]'], {}), '([amin])\n', (24007, 24015), False, 'import torch\n'), ((25300, 25332), 'torch.rfft', 'torch.rfft', (['v', '(1)'], {'onesided': '(False)'}), '(v, 1, onesided=False)\n', (25310, 25332), False, 'import torch\n'), ((25493, 25505), 'torch.cos', 'torch.cos', (['k'], {}), '(k)\n', (25502, 25505), False, 'import torch\n'), ((25520, 25532), 'torch.sin', 'torch.sin', (['k'], {}), '(k)\n', (25529, 25532), False, 'import torch\n'), ((31204, 31210), 'time.time', 'time', ([], {}), '()\n', (31208, 31210), False, 'from time import time\n'), ((32272, 32278), 'time.time', 'time', ([], {}), '()\n', (32276, 32278), False, 'from time import time\n'), ((32619, 32652), 'torch.tensor', 'torch.tensor', (['(kernel_sin * window)'], {}), '(kernel_sin * window)\n', (32631, 32652), False, 'import torch\n'), ((32668, 32701), 'torch.tensor', 'torch.tensor', (['(kernel_cos * window)'], {}), '(kernel_cos * window)\n', (32680, 32701), False, 'import torch\n'), ((34726, 34770), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wcos'], {'stride': 'self.hop_length'}), '(x, self.wcos, stride=self.hop_length)\n', (34732, 34770), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((34794, 34838), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wsin'], {'stride': 'self.hop_length'}), '(x, self.wsin, stride=self.hop_length)\n', (34800, 34838), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((35029, 35067), 'torch.stack', 'torch.stack', (['(CQT_real, -CQT_imag)', '(-1)'], {}), '((CQT_real, -CQT_imag), -1)\n', (35040, 35067), False, 'import torch\n'), ((37939, 37945), 'time.time', 'time', ([], {}), '()\n', (37943, 37945), False, 'from time import time\n'), ((40885, 40891), 'time.time', 'time', ([], {}), '()\n', (40889, 40891), False, 'from time import time\n'), ((41562, 41585), 'numpy.ceil', 'np.ceil', (['(Q * sr / freqs)'], {}), '(Q * sr / freqs)\n', (41569, 41585), True, 'import numpy as np\n'), ((42451, 42457), 'time.time', 'time', ([], {}), '()\n', (42455, 42457), False, 'from time import time\n'), ((42671, 42689), 'torch.tensor', 'torch.tensor', (['wsin'], {}), '(wsin)\n', (42683, 42689), False, 'import torch\n'), ((42705, 42723), 'torch.tensor', 'torch.tensor', (['wcos'], {}), '(wcos)\n', (42717, 42723), False, 'import torch\n'), ((50717, 50723), 'time.time', 'time', ([], {}), '()\n', (50721, 50723), False, 'from time import time\n'), ((54329, 54385), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.cqt_kernels_real'], {'stride': 'self.hop_length'}), '(x, self.cqt_kernels_real, stride=self.hop_length)\n', (54335, 54385), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((54405, 54461), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.cqt_kernels_imag'], {'stride': 'self.hop_length'}), '(x, self.cqt_kernels_imag, stride=self.hop_length)\n', (54411, 54461), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((60134, 60140), 'time.time', 'time', ([], {}), '()\n', (60138, 60140), False, 'from time import time\n'), ((63381, 63387), 'time.time', 'time', ([], {}), '()\n', (63385, 63387), False, 'from time import time\n'), ((64029, 64052), 'numpy.ceil', 'np.ceil', (['(Q * sr / freqs)'], {}), '(Q * sr / freqs)\n', (64036, 64052), True, 'import numpy as np\n'), ((69081, 69118), 'torch.tensor', 'torch.tensor', (['wsin'], {'dtype': 'torch.float'}), '(wsin, dtype=torch.float)\n', (69093, 69118), False, 'import torch\n'), ((69139, 69176), 'torch.tensor', 'torch.tensor', (['wcos'], {'dtype': 'torch.float'}), '(wcos, dtype=torch.float)\n', (69151, 69176), False, 'import torch\n'), ((69909, 69949), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wsin'], {'stride': 'self.stride'}), '(x, self.wsin, stride=self.stride)\n', (69915, 69949), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((69965, 70005), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.wcos'], {'stride': 'self.stride'}), '(x, self.wcos, stride=self.stride)\n', (69971, 70005), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((71093, 71138), 'torch.nn.functional.conv1d', 'conv1d', (['x_real', 'self.wcos'], {'stride': 'self.stride'}), '(x_real, self.wcos, stride=self.stride)\n', (71099, 71138), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((71152, 71197), 'torch.nn.functional.conv1d', 'conv1d', (['x_real', 'self.wsin'], {'stride': 'self.stride'}), '(x_real, self.wsin, stride=self.stride)\n', (71158, 71197), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((71211, 71256), 'torch.nn.functional.conv1d', 'conv1d', (['x_imag', 'self.wcos'], {'stride': 'self.stride'}), '(x_imag, self.wcos, stride=self.stride)\n', (71217, 71256), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((71270, 71315), 'torch.nn.functional.conv1d', 'conv1d', (['x_imag', 'self.wsin'], {'stride': 'self.stride'}), '(x_imag, self.wsin, stride=self.stride)\n', (71276, 71315), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((75587, 75593), 'time.time', 'time', ([], {}), '()\n', (75591, 75593), False, 'from time import time\n'), ((79208, 79257), 'torch.nn.functional.conv2d', 'conv2d', (['X_real_bc', 'self.kernel_cos'], {'stride': '(1, 1)'}), '(X_real_bc, self.kernel_cos, stride=(1, 1))\n', (79214, 79257), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((79270, 79319), 'torch.nn.functional.conv2d', 'conv2d', (['X_imag_bc', 'self.kernel_sin'], {'stride': '(1, 1)'}), '(X_imag_bc, self.kernel_sin, stride=(1, 1))\n', (79276, 79319), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((83586, 83627), 'torch.randn', 'torch.randn', (['*S.shape'], {'device': 'self.device'}), '(*S.shape, device=self.device)\n', (83597, 83627), False, 'import torch\n'), ((83645, 83691), 'torch.empty', 'torch.empty', (['(*S.shape, 2)'], {'device': 'self.device'}), '((*S.shape, 2), device=self.device)\n', (83656, 83691), False, 'import torch\n'), ((83718, 83751), 'torch.cos', 'torch.cos', (['(2 * np.pi * rand_phase)'], {}), '(2 * np.pi * rand_phase)\n', (83727, 83751), False, 'import torch\n'), ((83778, 83811), 'torch.sin', 'torch.sin', (['(2 * np.pi * rand_phase)'], {}), '(2 * np.pi * rand_phase)\n', (83787, 83811), False, 'import torch\n'), ((83888, 83934), 'torch.zeros', 'torch.zeros', (['*angles.shape'], {'device': 'self.device'}), '(*angles.shape, device=self.device)\n', (83899, 83934), False, 'import torch\n'), ((6864, 6918), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wsin'], {'requires_grad': 'self.trainable'}), '(wsin, requires_grad=self.trainable)\n', (6882, 6918), False, 'import torch\n'), ((6938, 6992), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wcos'], {'requires_grad': 'self.trainable'}), '(wcos, requires_grad=self.trainable)\n', (6956, 6992), False, 'import torch\n'), ((17476, 17534), 'torch.nn.Parameter', 'torch.nn.Parameter', (['mel_basis'], {'requires_grad': 'trainable_mel'}), '(mel_basis, requires_grad=trainable_mel)\n', (17494, 17534), False, 'import torch\n'), ((24040, 24059), 'torch.tensor', 'torch.tensor', (['[ref]'], {}), '([ref])\n', (24052, 24059), False, 'import torch\n'), ((24852, 24901), 'torch.max', 'torch.max', (['log_spec', '(batch_wise_max - self.top_db)'], {}), '(log_spec, batch_wise_max - self.top_db)\n', (24861, 24901), False, 'import torch\n'), ((32904, 32961), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wsin'], {'requires_grad': 'trainable_kernels'}), '(wsin, requires_grad=trainable_kernels)\n', (32922, 32961), False, 'import torch\n'), ((32981, 33038), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wcos'], {'requires_grad': 'trainable_kernels'}), '(wcos, requires_grad=trainable_kernels)\n', (32999, 33038), False, 'import torch\n'), ((33305, 33374), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_real'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_real, requires_grad=trainable_kernels)\n', (33323, 33374), False, 'import torch\n'), ((33406, 33475), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_imag'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_imag, requires_grad=trainable_kernels)\n', (33424, 33475), False, 'import torch\n'), ((39877, 39883), 'time.time', 'time', ([], {}), '()\n', (39881, 39883), False, 'from time import time\n'), ((42889, 42946), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wsin'], {'requires_grad': 'trainable_kernels'}), '(wsin, requires_grad=trainable_kernels)\n', (42907, 42946), False, 'import torch\n'), ((42966, 43023), 'torch.nn.Parameter', 'torch.nn.Parameter', (['wcos'], {'requires_grad': 'trainable_kernels'}), '(wcos, requires_grad=trainable_kernels)\n', (42984, 43023), False, 'import torch\n'), ((43290, 43359), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_real'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_real, requires_grad=trainable_kernels)\n', (43308, 43359), False, 'import torch\n'), ((43391, 43460), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_imag'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_imag, requires_grad=trainable_kernels)\n', (43409, 43460), False, 'import torch\n'), ((43968, 44004), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.n_fft // 2)', '(0)'], {}), '(self.n_fft // 2, 0)\n', (43984, 44004), True, 'import torch.nn as nn\n'), ((45215, 45240), 'torch.cat', 'torch.cat', (['(CQT1, CQT)', '(1)'], {}), '((CQT1, CQT), 1)\n', (45224, 45240), False, 'import torch\n'), ((51581, 51650), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_real'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_real, requires_grad=trainable_kernels)\n', (51599, 51650), False, 'import torch\n'), ((51682, 51751), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_imag'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_imag, requires_grad=trainable_kernels)\n', (51700, 51751), False, 'import torch\n'), ((53026, 53082), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.cqt_kernels_real'], {'stride': 'self.hop_length'}), '(x, self.cqt_kernels_real, stride=self.hop_length)\n', (53032, 53082), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((62378, 62384), 'time.time', 'time', ([], {}), '()\n', (62382, 62384), False, 'from time import time\n'), ((64462, 64531), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_real'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_real, requires_grad=trainable_kernels)\n', (64480, 64531), False, 'import torch\n'), ((64563, 64632), 'torch.nn.Parameter', 'torch.nn.Parameter', (['cqt_kernels_imag'], {'requires_grad': 'trainable_kernels'}), '(cqt_kernels_imag, requires_grad=trainable_kernels)\n', (64581, 64632), False, 'import torch\n'), ((65322, 65358), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.n_fft // 2)', '(0)'], {}), '(self.n_fft // 2, 0)\n', (65338, 65358), True, 'import torch.nn as nn\n'), ((66616, 66641), 'torch.cat', 'torch.cat', (['(CQT1, CQT)', '(1)'], {}), '((CQT1, CQT), 1)\n', (66625, 66641), False, 'import torch\n'), ((77039, 77102), 'torch.nn.Parameter', 'torch.nn.Parameter', (['kernel_sin'], {'requires_grad': 'trainable_kernels'}), '(kernel_sin, requires_grad=trainable_kernels)\n', (77057, 77102), False, 'import torch\n'), ((77128, 77191), 'torch.nn.Parameter', 'torch.nn.Parameter', (['kernel_cos'], {'requires_grad': 'trainable_kernels'}), '(kernel_cos, requires_grad=trainable_kernels)\n', (77146, 77191), False, 'import torch\n'), ((77558, 77621), 'torch.nn.Parameter', 'torch.nn.Parameter', (['window_mask'], {'requires_grad': 'trainable_window'}), '(window_mask, requires_grad=trainable_window)\n', (77576, 77621), False, 'import torch\n'), ((84526, 84645), 'torch.stft', 'torch.stft', (['inverse', 'self.n_fft', 'self.hop_length'], {'win_length': 'self.win_length', 'window': 'self.w', 'pad_mode': 'self.pad_mode'}), '(inverse, self.n_fft, self.hop_length, win_length=self.win_length,\n window=self.w, pad_mode=self.pad_mode)\n', (84536, 84645), False, 'import torch\n'), ((8272, 8308), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['self.pad_amount', '(0)'], {}), '(self.pad_amount, 0)\n', (8288, 8308), True, 'import torch.nn as nn\n'), ((9055, 9079), 'torch.sqrt', 'torch.sqrt', (['(spec + 1e-08)'], {}), '(spec + 1e-08)\n', (9065, 9079), False, 'import torch\n'), ((9171, 9187), 'torch.sqrt', 'torch.sqrt', (['spec'], {}), '(spec)\n', (9181, 9187), False, 'import torch\n'), ((9247, 9287), 'torch.stack', 'torch.stack', (['(spec_real, -spec_imag)', '(-1)'], {}), '((spec_real, -spec_imag), -1)\n', (9258, 9287), False, 'import torch\n'), ((24437, 24460), 'torch.max', 'torch.max', (['S', 'self.amin'], {}), '(S, self.amin)\n', (24446, 24460), False, 'import torch\n'), ((24501, 24531), 'torch.max', 'torch.max', (['self.amin', 'self.ref'], {}), '(self.amin, self.ref)\n', (24510, 24531), False, 'import torch\n'), ((25645, 25655), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (25652, 25655), True, 'import numpy as np\n'), ((25687, 25701), 'numpy.sqrt', 'np.sqrt', (['(N / 2)'], {}), '(N / 2)\n', (25694, 25701), True, 'import numpy as np\n'), ((34505, 34548), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.kernel_width // 2)', '(0)'], {}), '(self.kernel_width // 2, 0)\n', (34521, 34548), True, 'import torch.nn as nn\n'), ((41604, 41625), 'torch.tensor', 'torch.tensor', (['lenghts'], {}), '(lenghts)\n', (41616, 41625), False, 'import torch\n'), ((44071, 44106), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.n_fft // 2)'], {}), '(self.n_fft // 2)\n', (44089, 44106), True, 'import torch.nn as nn\n'), ((51412, 51442), 'torch.tensor', 'torch.tensor', (['cqt_kernels.real'], {}), '(cqt_kernels.real)\n', (51424, 51442), False, 'import torch\n'), ((51483, 51513), 'torch.tensor', 'torch.tensor', (['cqt_kernels.imag'], {}), '(cqt_kernels.imag)\n', (51495, 51513), False, 'import torch\n'), ((52810, 52853), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.kernel_width // 2)', '(0)'], {}), '(self.kernel_width // 2, 0)\n', (52826, 52853), True, 'import torch.nn as nn\n'), ((53163, 53219), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.cqt_kernels_imag'], {'stride': 'self.hop_length'}), '(x, self.cqt_kernels_imag, stride=self.hop_length)\n', (53169, 53219), False, 'from torch.nn.functional import conv1d, conv2d, fold\n'), ((53635, 53672), 'torch.stack', 'torch.stack', (['(CQT_real, CQT_imag)', '(-1)'], {}), '((CQT_real, CQT_imag), -1)\n', (53646, 53672), False, 'import torch\n'), ((54113, 54156), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.kernel_width // 2)', '(0)'], {}), '(self.kernel_width // 2, 0)\n', (54129, 54156), True, 'import torch.nn as nn\n'), ((64071, 64092), 'torch.tensor', 'torch.tensor', (['lenghts'], {}), '(lenghts)\n', (64083, 64092), False, 'import torch\n'), ((65425, 65460), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.n_fft // 2)'], {}), '(self.n_fft // 2)\n', (65443, 65460), True, 'import torch.nn as nn\n'), ((69725, 69761), 'torch.nn.ConstantPad1d', 'nn.ConstantPad1d', (['(self.n_fft // 2)', '(0)'], {}), '(self.n_fft // 2, 0)\n', (69741, 69761), True, 'import torch.nn as nn\n'), ((76741, 76784), 'torch.tensor', 'torch.tensor', (['kernel_sin'], {'dtype': 'torch.float'}), '(kernel_sin, dtype=torch.float)\n', (76753, 76784), False, 'import torch\n'), ((76820, 76863), 'torch.tensor', 'torch.tensor', (['kernel_cos'], {'dtype': 'torch.float'}), '(kernel_cos, dtype=torch.float)\n', (76832, 76863), False, 'import torch\n'), ((8544, 8579), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['self.pad_amount'], {}), '(self.pad_amount)\n', (8562, 8579), True, 'import torch.nn as nn\n'), ((9390, 9430), 'torch.atan2', 'torch.atan2', (['(-spec_imag + 0.0)', 'spec_real'], {}), '(-spec_imag + 0.0, spec_real)\n', (9401, 9430), False, 'import torch\n'), ((31973, 31979), 'time.time', 'time', ([], {}), '()\n', (31977, 31979), False, 'from time import time\n'), ((33853, 33859), 'time.time', 'time', ([], {}), '()\n', (33857, 33859), False, 'from time import time\n'), ((34618, 34660), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.kernel_width // 2)'], {}), '(self.kernel_width // 2)\n', (34636, 34660), True, 'import torch.nn as nn\n'), ((35621, 35662), 'torch.stack', 'torch.stack', (['(phase_real, phase_imag)', '(-1)'], {}), '((phase_real, phase_imag), -1)\n', (35632, 35662), False, 'import torch\n'), ((41517, 41542), 'numpy.float', 'np.float', (['bins_per_octave'], {}), '(bins_per_octave)\n', (41525, 41542), True, 'import numpy as np\n'), ((46038, 46079), 'torch.stack', 'torch.stack', (['(phase_real, phase_imag)', '(-1)'], {}), '((phase_real, phase_imag), -1)\n', (46049, 46079), False, 'import torch\n'), ((52923, 52965), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.kernel_width // 2)'], {}), '(self.kernel_width // 2)\n', (52941, 52965), True, 'import torch.nn as nn\n'), ((53862, 53903), 'torch.stack', 'torch.stack', (['(phase_real, phase_imag)', '(-1)'], {}), '((phase_real, phase_imag), -1)\n', (53873, 53903), False, 'import torch\n'), ((54226, 54268), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.kernel_width // 2)'], {}), '(self.kernel_width // 2)\n', (54244, 54268), True, 'import torch.nn as nn\n'), ((63984, 64009), 'numpy.float', 'np.float', (['bins_per_octave'], {}), '(bins_per_octave)\n', (63992, 64009), True, 'import numpy as np\n'), ((67645, 67686), 'torch.stack', 'torch.stack', (['(phase_real, phase_imag)', '(-1)'], {}), '((phase_real, phase_imag), -1)\n', (67656, 67686), False, 'import torch\n'), ((69831, 69866), 'torch.nn.ReflectionPad1d', 'nn.ReflectionPad1d', (['(self.n_fft // 2)'], {}), '(self.n_fft // 2)\n', (69849, 69866), True, 'import torch.nn as nn\n'), ((7367, 7373), 'time.time', 'time', ([], {}), '()\n', (7371, 7373), False, 'from time import time\n'), ((17201, 17207), 'time.time', 'time', ([], {}), '()\n', (17205, 17207), False, 'from time import time\n'), ((17290, 17296), 'time.time', 'time', ([], {}), '()\n', (17294, 17296), False, 'from time import time\n'), ((19923, 19948), 'torch.pinverse', 'torch.pinverse', (['mel_basis'], {}), '(mel_basis)\n', (19937, 19948), False, 'import torch\n'), ((25404, 25451), 'torch.arange', 'torch.arange', (['N'], {'dtype': 'x.dtype', 'device': 'x.device'}), '(N, dtype=x.dtype, device=x.device)\n', (25416, 25451), False, 'import torch\n'), ((35503, 35534), 'torch.atan2', 'torch.atan2', (['CQT_imag', 'CQT_real'], {}), '(CQT_imag, CQT_real)\n', (35514, 35534), False, 'import torch\n'), ((35570, 35601), 'torch.atan2', 'torch.atan2', (['CQT_imag', 'CQT_real'], {}), '(CQT_imag, CQT_real)\n', (35581, 35601), False, 'import torch\n'), ((38595, 38601), 'time.time', 'time', ([], {}), '()\n', (38599, 38601), False, 'from time import time\n'), ((42130, 42136), 'time.time', 'time', ([], {}), '()\n', (42134, 42136), False, 'from time import time\n'), ((42827, 42833), 'time.time', 'time', ([], {}), '()\n', (42831, 42833), False, 'from time import time\n'), ((45904, 45949), 'torch.atan2', 'torch.atan2', (['CQT[:, :, :, 1]', 'CQT[:, :, :, 0]'], {}), '(CQT[:, :, :, 1], CQT[:, :, :, 0])\n', (45915, 45949), False, 'import torch\n'), ((45979, 46024), 'torch.atan2', 'torch.atan2', (['CQT[:, :, :, 1]', 'CQT[:, :, :, 0]'], {}), '(CQT[:, :, :, 1], CQT[:, :, :, 0])\n', (45990, 46024), False, 'import torch\n'), ((52158, 52164), 'time.time', 'time', ([], {}), '()\n', (52162, 52164), False, 'from time import time\n'), ((53744, 53775), 'torch.atan2', 'torch.atan2', (['CQT_imag', 'CQT_real'], {}), '(CQT_imag, CQT_real)\n', (53755, 53775), False, 'import torch\n'), ((53811, 53842), 'torch.atan2', 'torch.atan2', (['CQT_imag', 'CQT_real'], {}), '(CQT_imag, CQT_real)\n', (53822, 53842), False, 'import torch\n'), ((61071, 61077), 'time.time', 'time', ([], {}), '()\n', (61075, 61077), False, 'from time import time\n'), ((65040, 65046), 'time.time', 'time', ([], {}), '()\n', (65044, 65046), False, 'from time import time\n'), ((67511, 67556), 'torch.atan2', 'torch.atan2', (['CQT[:, :, :, 1]', 'CQT[:, :, :, 0]'], {}), '(CQT[:, :, :, 1], CQT[:, :, :, 0])\n', (67522, 67556), False, 'import torch\n'), ((67586, 67631), 'torch.atan2', 'torch.atan2', (['CQT[:, :, :, 1]', 'CQT[:, :, :, 0]'], {}), '(CQT[:, :, :, 1], CQT[:, :, :, 0])\n', (67597, 67631), False, 'import torch\n'), ((76571, 76596), 'torch.tensor', 'torch.tensor', (['window_mask'], {}), '(window_mask)\n', (76583, 76596), False, 'import torch\n'), ((77866, 77872), 'time.time', 'time', ([], {}), '()\n', (77870, 77872), False, 'from time import time\n'), ((19786, 19861), 'torch.zeros', 'torch.zeros', (['*pred_stft_shape'], {'dtype': 'torch.float32', 'device': 'mel_basis.device'}), '(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device)\n', (19797, 19861), False, 'import torch\n'), ((40686, 40692), 'time.time', 'time', ([], {}), '()\n', (40690, 40692), False, 'from time import time\n'), ((63183, 63189), 'time.time', 'time', ([], {}), '()\n', (63187, 63189), False, 'from time import time\n')]
|
""" Represent a triangulated surface using a 3D boolean grid"""
import logging
import numpy as np
from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element
from rpl.tools.geometry import geom_utils
import data_io
class BSP_Grid(object):
def __init__(self, node_array, tris, allocate_step=100000):
"""
Store the triangles with an enumeration so that even when they are subdivided their
identity is not lost.
"""
tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1))
minus_ones = -np.ones((len(tris), 6), dtype=np.int32)
self.tris = np.hstack((tris, minus_ones, tri_nums))
self.allocate_step = allocate_step
self.node_array = node_array # Reference to the full list of nodes
self._resize()
self.next_free = len(node_array)
self.split_cache = np.zeros(len(self.tris), dtype=np.int32)
def _resize(self):
"""
Increase node array size by the allocate_step amount.
"""
self.array_size = len(self.node_array) + self.allocate_step
self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3))))
def add_node(self, node):
"""
Adds a new node to the end of the node array (expanding if required). Returns the index of
the newly added node.
"""
if self.next_free == self.array_size:
self._resize()
self.node_array[self.next_free] = node
self.next_free += 1
return self.next_free - 1
def prepare_add(self, num_add_nodes):
"""
Make sure that ``num_add_nodes`` can be added later without needing a resize.
Useful if adding nodes from within cython where resizing is tricky.
"""
if self.next_free + num_add_nodes >= self.array_size:
self._resize()
return self.next_free
def make_grid(veh_surfs, settings):
"""
Make coordinates of voxelated grid based on overall list of vehicle surfaces
"""
## Find overall bounding box
x_min, x_max = 1e30, -1e30
y_min, y_max = 1e30, -1e30
z_min, z_max = 1e30, -1e30
for key, veh_surf in veh_surfs.items():
x_min, x_max = min(x_min, np.min(veh_surf["x"])), max(x_max, np.max(veh_surf["x"]))
y_min, y_max = min(y_min, np.min(veh_surf["y"])), max(y_max, np.max(veh_surf["y"]))
z_min, z_max = min(z_min, np.min(veh_surf["z"])), max(z_max, np.max(veh_surf["z"]))
x_min, x_max = x_min - settings["voxel_size"], x_max + settings["voxel_size"]
y_min, y_max = y_min - settings["voxel_size"], y_max + settings["voxel_size"]
z_min, z_max = z_min - settings["voxel_size"], z_max + settings["voxel_size"]
###########################################
# Create the uniformly spaced grid points
x_grid = np.arange(x_min, x_max + settings["voxel_size"], settings["voxel_size"])
y_grid = np.arange(y_min, y_max + settings["voxel_size"], settings["voxel_size"])
z_grid = np.arange(z_min, z_max + settings["voxel_size"], settings["voxel_size"])
return x_grid, y_grid, z_grid
def convert_geom(veh_surf, tr_mat):
"""
Rotate nodes using provided transformation matrix; convert xyz node dict to nodes array
"""
veh_surf["nodes"] = np.vstack((veh_surf["x"], veh_surf["y"], veh_surf["z"])).T
veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3])
veh_surf["x"] = veh_surf['nodes'][:, 0]
veh_surf["y"] = veh_surf['nodes'][:, 1]
veh_surf["z"] = veh_surf['nodes'][:, 2]
return veh_surf
def find_occupied_voxels(surf, surf_mask, voxel_data):
"""
Voxels with any triangle from ``surf`` are considered occupied and or'ed with ``group_mask``.
If the supplied ``occupied_voxels`` is None a voxel array is created and returned.
"""
nodes = surf["nodes"]
tris = surf["tris"]
x_pts, y_pts, z_pts = [voxel_data[k] for k in ("x_grid", "y_grid", "z_grid")]
vox_size = voxel_data["vox_size"]
## Find the local extents of this part
min_x, max_x = np.min(surf["x"]) - vox_size, np.max(surf["x"]) + vox_size
min_y, max_y = np.min(surf["y"]) - vox_size, np.max(surf["y"]) + vox_size
min_z, max_z = np.min(surf["z"]) - vox_size, np.max(surf["z"]) + vox_size
b_tree = BSP_Grid(nodes, tris)
# Create BSP tree elements- we're not using a tree, but we are using some of the functions
b_x_root = BSP_Element(b_tree.tris, b_tree)
size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts)
## Create the occupied voxels if none were supplied
if voxel_data["value"] is None:
voxel_data["value"] = np.zeros((size_i - 1, size_j - 1, size_k - 1), dtype=np.uint32)
occupied_voxels = voxel_data["value"]
## The [1:] is because to make n voxels in a given direction we need n-1 splits
for i, x_pos in enumerate(x_pts[1:]):
if x_pos < min_x: continue
if x_pos > max_x: break
b_above_x, b_below_x = b_x_root.split_at(0, x_pos)
b_y_root = b_below_x
for j, y_pos in enumerate(y_pts[1:]):
if b_y_root is None:
break
if y_pos < min_y: continue
if y_pos > max_y: break
b_above_y, b_below_y = b_y_root.split_at(1, y_pos)
b_z_root = b_below_y
for k, z_pos in enumerate(z_pts[1:]):
if b_z_root is None:
break
if z_pos < min_z: continue
if z_pos > max_z: break
b_above_z, b_below_z = b_z_root.split_at(2, z_pos)
if not (b_below_z and (len(b_below_z.tris) == 0)):
## There is at least part of triangle here so mark as occupied
occupied_voxels[i, j, k] |= surf_mask
b_z_root = b_above_z
b_y_root = b_above_y
b_x_root = b_above_x
return voxel_data
#############
# Main code
def main(vehicle_comp_coords, tr_mat, voxel_masks, settings):
"""
Perform voxelization for all vehicle geometries in a list of parts. Combine on a uniform grid.
"""
for key, veh_surf in vehicle_comp_coords.items():
# Convert coordinates and find overall best bounding box
veh_surf = convert_geom(veh_surf, tr_mat)
x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings)
voxel_data = {"x_grid": x_grid,
"y_grid": y_grid,
"z_grid": z_grid,
"vox_size": settings["voxel_size"],
"csys_trans": tr_mat,
"value": None}
for key, veh_surf in vehicle_comp_coords.items():
# Build up the voxel_data
logging.debug("Sampling component: {}".format(key))
## Default mask is 1 for anything not in an identified set
surf_mask = 1
for mask, geo_set in voxel_masks.items():
if veh_surf['part_class'] in geo_set:
surf_mask |= mask
voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data)
return voxel_data
if __name__ == "__main__":
from rpl.tools.api import test_bench_api as tb_api
SETTINGS = tb_api.load_settings("settings.js")
DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'}
HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'}
HULLS = {"Hull_Assembly_Parametric", 'Hull_Assembly_Example_With_Connector'}
MANIKINS = {"Manikin"}
# Special labels applied to specific types of voxels
VOXEL_LABELS = {2: HULLS,
4: DOORS,
8: HATCHES,
16: MANIKINS}
vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False)
# Modify node coords so object aligns with cartesian axes of occ voxel grid, +z=up
# Vector to rotate around is cross product of current z axis and sfc normal
veh_up = np.array([0., 1., 0.])
rot_around = np.cross(veh_up, np.array([0, 0, 1]))
rot_ang = -np.arccos(veh_up[2])
tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang)
# voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)
vox_veh_folder = r"voxelated_models/vehicles/{}/{}".format(SETTINGS["run_id"],
SETTINGS["voxel_size"])
vox_veh_file = "voxels_{}_vox{}_hacked".format(SETTINGS["run_id"],
SETTINGS["voxel_size"])
try:
voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True)
except:
voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)
from mayavi import mlab
xo, yo, zo = np.where(voxel_data["value"] == 1)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(0.9, 0.9, 0.9),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
xo, yo, zo = np.where(voxel_data["value"] & 2)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(1, 1, 1),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=0.05)
xo, yo, zo = np.where(voxel_data["value"] & 4)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(1.0, 0.5, 0.5),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
xo, yo, zo = np.where(voxel_data["value"] & 8)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(0.6, 0.6, 1.0),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
# No manikins included, no need to plot them
# xo, yo, zo = np.where(voxel_data["value"] & 16)
# plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
# voxel_data["y_grid"][yo],
# voxel_data["z_grid"][zo],
# color=(0.5, 1.0, 0.8),
# scale_mode="none", scale_factor=voxel_data["vox_size"],
# mode='cube', opacity=1.0)
mlab.show()
# Save the voxelated model of the vehicle (sans door and other excluded parts)
data_io.save_multi_array(vox_veh_folder, vox_veh_file, voxel_data)
|
[
"mayavi.mlab.points3d",
"numpy.arccos",
"data_io.save_multi_array",
"mayavi.mlab.show",
"rpl.tools.geometry.geom_utils.rotation_about_vector",
"numpy.where",
"numpy.hstack",
"numpy.min",
"numpy.max",
"rpl.tools.ray_tracing.bsp_tree_poly.BSP_Element",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.vstack",
"rpl.tools.api.test_bench_api.load_settings",
"data_io.load_array",
"rpl.tools.api.test_bench_api.get_all_geom_set",
"numpy.arange"
] |
[((2930, 3002), 'numpy.arange', 'np.arange', (['x_min', "(x_max + settings['voxel_size'])", "settings['voxel_size']"], {}), "(x_min, x_max + settings['voxel_size'], settings['voxel_size'])\n", (2939, 3002), True, 'import numpy as np\n'), ((3017, 3089), 'numpy.arange', 'np.arange', (['y_min', "(y_max + settings['voxel_size'])", "settings['voxel_size']"], {}), "(y_min, y_max + settings['voxel_size'], settings['voxel_size'])\n", (3026, 3089), True, 'import numpy as np\n'), ((3104, 3176), 'numpy.arange', 'np.arange', (['z_min', "(z_max + settings['voxel_size'])", "settings['voxel_size']"], {}), "(z_min, z_max + settings['voxel_size'], settings['voxel_size'])\n", (3113, 3176), True, 'import numpy as np\n'), ((3475, 3516), 'numpy.dot', 'np.dot', (["veh_surf['nodes']", 'tr_mat[:3, :3]'], {}), "(veh_surf['nodes'], tr_mat[:3, :3])\n", (3481, 3516), True, 'import numpy as np\n'), ((4565, 4597), 'rpl.tools.ray_tracing.bsp_tree_poly.BSP_Element', 'BSP_Element', (['b_tree.tris', 'b_tree'], {}), '(b_tree.tris, b_tree)\n', (4576, 4597), False, 'from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element\n'), ((7387, 7422), 'rpl.tools.api.test_bench_api.load_settings', 'tb_api.load_settings', (['"""settings.js"""'], {}), "('settings.js')\n", (7407, 7422), True, 'from rpl.tools.api import test_bench_api as tb_api\n'), ((8159, 8184), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (8167, 8184), True, 'import numpy as np\n'), ((8289, 8342), 'rpl.tools.geometry.geom_utils.rotation_about_vector', 'geom_utils.rotation_about_vector', (['rot_around', 'rot_ang'], {}), '(rot_around, rot_ang)\n', (8321, 8342), False, 'from rpl.tools.geometry import geom_utils\n'), ((8967, 9001), 'numpy.where', 'np.where', (["(voxel_data['value'] == 1)"], {}), "(voxel_data['value'] == 1)\n", (8975, 9001), True, 'import numpy as np\n'), ((9022, 9224), 'mayavi.mlab.points3d', 'mlab.points3d', (["voxel_data['x_grid'][xo]", "voxel_data['y_grid'][yo]", "voxel_data['z_grid'][zo]"], {'color': '(0.9, 0.9, 0.9)', 'scale_mode': '"""none"""', 'scale_factor': "voxel_data['vox_size']", 'mode': '"""cube"""', 'opacity': '(1)'}), "(voxel_data['x_grid'][xo], voxel_data['y_grid'][yo],\n voxel_data['z_grid'][zo], color=(0.9, 0.9, 0.9), scale_mode='none',\n scale_factor=voxel_data['vox_size'], mode='cube', opacity=1)\n", (9035, 9224), False, 'from mayavi import mlab\n'), ((9407, 9440), 'numpy.where', 'np.where', (["(voxel_data['value'] & 2)"], {}), "(voxel_data['value'] & 2)\n", (9415, 9440), True, 'import numpy as np\n'), ((9461, 9660), 'mayavi.mlab.points3d', 'mlab.points3d', (["voxel_data['x_grid'][xo]", "voxel_data['y_grid'][yo]", "voxel_data['z_grid'][zo]"], {'color': '(1, 1, 1)', 'scale_mode': '"""none"""', 'scale_factor': "voxel_data['vox_size']", 'mode': '"""cube"""', 'opacity': '(0.05)'}), "(voxel_data['x_grid'][xo], voxel_data['y_grid'][yo],\n voxel_data['z_grid'][zo], color=(1, 1, 1), scale_mode='none',\n scale_factor=voxel_data['vox_size'], mode='cube', opacity=0.05)\n", (9474, 9660), False, 'from mayavi import mlab\n'), ((9843, 9876), 'numpy.where', 'np.where', (["(voxel_data['value'] & 4)"], {}), "(voxel_data['value'] & 4)\n", (9851, 9876), True, 'import numpy as np\n'), ((9897, 10099), 'mayavi.mlab.points3d', 'mlab.points3d', (["voxel_data['x_grid'][xo]", "voxel_data['y_grid'][yo]", "voxel_data['z_grid'][zo]"], {'color': '(1.0, 0.5, 0.5)', 'scale_mode': '"""none"""', 'scale_factor': "voxel_data['vox_size']", 'mode': '"""cube"""', 'opacity': '(1)'}), "(voxel_data['x_grid'][xo], voxel_data['y_grid'][yo],\n voxel_data['z_grid'][zo], color=(1.0, 0.5, 0.5), scale_mode='none',\n scale_factor=voxel_data['vox_size'], mode='cube', opacity=1)\n", (9910, 10099), False, 'from mayavi import mlab\n'), ((10282, 10315), 'numpy.where', 'np.where', (["(voxel_data['value'] & 8)"], {}), "(voxel_data['value'] & 8)\n", (10290, 10315), True, 'import numpy as np\n'), ((10336, 10538), 'mayavi.mlab.points3d', 'mlab.points3d', (["voxel_data['x_grid'][xo]", "voxel_data['y_grid'][yo]", "voxel_data['z_grid'][zo]"], {'color': '(0.6, 0.6, 1.0)', 'scale_mode': '"""none"""', 'scale_factor': "voxel_data['vox_size']", 'mode': '"""cube"""', 'opacity': '(1)'}), "(voxel_data['x_grid'][xo], voxel_data['y_grid'][yo],\n voxel_data['z_grid'][zo], color=(0.6, 0.6, 1.0), scale_mode='none',\n scale_factor=voxel_data['vox_size'], mode='cube', opacity=1)\n", (10349, 10538), False, 'from mayavi import mlab\n'), ((11212, 11223), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (11221, 11223), False, 'from mayavi import mlab\n'), ((11315, 11381), 'data_io.save_multi_array', 'data_io.save_multi_array', (['vox_veh_folder', 'vox_veh_file', 'voxel_data'], {}), '(vox_veh_folder, vox_veh_file, voxel_data)\n', (11339, 11381), False, 'import data_io\n'), ((635, 674), 'numpy.hstack', 'np.hstack', (['(tris, minus_ones, tri_nums)'], {}), '((tris, minus_ones, tri_nums))\n', (644, 674), True, 'import numpy as np\n'), ((3391, 3447), 'numpy.vstack', 'np.vstack', (["(veh_surf['x'], veh_surf['y'], veh_surf['z'])"], {}), "((veh_surf['x'], veh_surf['y'], veh_surf['z']))\n", (3400, 3447), True, 'import numpy as np\n'), ((4792, 4855), 'numpy.zeros', 'np.zeros', (['(size_i - 1, size_j - 1, size_k - 1)'], {'dtype': 'np.uint32'}), '((size_i - 1, size_j - 1, size_k - 1), dtype=np.uint32)\n', (4800, 4855), True, 'import numpy as np\n'), ((8217, 8236), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (8225, 8236), True, 'import numpy as np\n'), ((8254, 8274), 'numpy.arccos', 'np.arccos', (['veh_up[2]'], {}), '(veh_up[2])\n', (8263, 8274), True, 'import numpy as np\n'), ((8772, 8826), 'data_io.load_array', 'data_io.load_array', (['vox_veh_folder', 'vox_veh_file', '(True)'], {}), '(vox_veh_folder, vox_veh_file, True)\n', (8790, 8826), False, 'import data_io\n'), ((4192, 4209), 'numpy.min', 'np.min', (["surf['x']"], {}), "(surf['x'])\n", (4198, 4209), True, 'import numpy as np\n'), ((4222, 4239), 'numpy.max', 'np.max', (["surf['x']"], {}), "(surf['x'])\n", (4228, 4239), True, 'import numpy as np\n'), ((4271, 4288), 'numpy.min', 'np.min', (["surf['y']"], {}), "(surf['y'])\n", (4277, 4288), True, 'import numpy as np\n'), ((4301, 4318), 'numpy.max', 'np.max', (["surf['y']"], {}), "(surf['y'])\n", (4307, 4318), True, 'import numpy as np\n'), ((4350, 4367), 'numpy.min', 'np.min', (["surf['z']"], {}), "(surf['z'])\n", (4356, 4367), True, 'import numpy as np\n'), ((4380, 4397), 'numpy.max', 'np.max', (["surf['z']"], {}), "(surf['z'])\n", (4386, 4397), True, 'import numpy as np\n'), ((7917, 7942), 'rpl.tools.api.test_bench_api.get_all_geom_set', 'tb_api.get_all_geom_set', ([], {}), '()\n', (7940, 7942), True, 'from rpl.tools.api import test_bench_api as tb_api\n'), ((1187, 1220), 'numpy.zeros', 'np.zeros', (['(self.allocate_step, 3)'], {}), '((self.allocate_step, 3))\n', (1195, 1220), True, 'import numpy as np\n'), ((2320, 2341), 'numpy.min', 'np.min', (["veh_surf['x']"], {}), "(veh_surf['x'])\n", (2326, 2341), True, 'import numpy as np\n'), ((2355, 2376), 'numpy.max', 'np.max', (["veh_surf['x']"], {}), "(veh_surf['x'])\n", (2361, 2376), True, 'import numpy as np\n'), ((2413, 2434), 'numpy.min', 'np.min', (["veh_surf['y']"], {}), "(veh_surf['y'])\n", (2419, 2434), True, 'import numpy as np\n'), ((2448, 2469), 'numpy.max', 'np.max', (["veh_surf['y']"], {}), "(veh_surf['y'])\n", (2454, 2469), True, 'import numpy as np\n'), ((2506, 2527), 'numpy.min', 'np.min', (["veh_surf['z']"], {}), "(veh_surf['z'])\n", (2512, 2527), True, 'import numpy as np\n'), ((2541, 2562), 'numpy.max', 'np.max', (["veh_surf['z']"], {}), "(veh_surf['z'])\n", (2547, 2562), True, 'import numpy as np\n')]
|
from colicoords.synthetic_data import add_readout_noise, draw_poisson
from colicoords import load
import numpy as np
import mahotas as mh
from tqdm import tqdm
import os
import tifffile
def chunk_list(l, sizes):
prev = 0
for s in sizes:
result = l[prev:prev+s]
prev += s
yield result
def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape):
nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int)
nums = nums[nums > 0]
assert sum(nums) < len(cell_list), 'Not enough cells'
chunked = [chunk for chunk in tqdm(chunk_list(cell_list, nums))]
dicts = [generate_image(cells, shape) for cells in tqdm(chunked)]
out_dict = {}
for i, d in enumerate(dicts):
for k, v in d.items():
if 'storm' in k:
v['frame'] = i + 1
if k in out_dict:
out_dict[k] = np.append(out_dict[k], v)
else:
out_dict[k] = v
else:
if k in out_dict:
out_dict[k][i] = v
else:
out_dict[k] = np.zeros((num_images, *shape))
out_dict[k][i] = v
return out_dict
def generate_image(cells, shape, max_dist=5):
thetas = 360 * np.random.rand(len(cells))
data_list = [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)]
assert all([data.names == data_list[0].names for data in data_list]), 'All cells must have the same data elements'
out_dict = {name: np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'}
for i, data in enumerate(data_list):
valid_position = False
while not valid_position:
pos_x = int(np.round(shape[1] * np.random.rand()))
pos_y = int(np.round(shape[0] * np.random.rand()))
min1 = pos_y - int(np.floor(data.shape[0]))
max1 = min1 + data.shape[0]
min2 = pos_x - int(np.floor(data.shape[1]))
max2 = min2 + data.shape[1]
# Crop the data for when the cell is on the border of the image
d_min1 = np.max([0 - min1, 0])
d_max1 = np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]])
d_min2 = np.max([0 - min2, 0])
d_max2 = np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]])
data_cropped = data[d_min1:d_max1, d_min2:d_max2]
# Limit image position to the edges of the image
min1 = np.max([min1, 0])
max1 = np.min([max1, shape[0]])
min2 = np.max([min2, 0])
max2 = np.min([max2, shape[1]])
temp_binary = np.zeros(shape)
temp_binary[min1:max1, min2:max2] = data_cropped.binary_img
out_binary = (out_dict['binary'] > 0).astype(int)
distance_map = mh.distance(1 - out_binary, metric='euclidean')
if np.any(distance_map[temp_binary.astype(bool)] < max_dist):
continue
valid_position = True
for name in data.names:
data_elem = data_cropped.data_dict[name]
if data_elem.dclass == 'storm':
data_elem['x'] += min2
data_elem['y'] += min1
xmax, ymax = shape[1], shape[0]
bools = (data_elem['x'] < 0) + (data_elem['x'] > xmax) + (data_elem['y'] < 0) + (data_elem['y'] > ymax)
data_out = data_elem[~bools].copy()
if name in out_dict:
out_dict[name] = np.append(out_dict[name], data_out)
else:
out_dict[name] = data_out
continue
elif data_elem.dclass == 'binary':
out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem)
else:
out_dict[name][min1:max1, min2:max2] += data_elem
return out_dict
def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3):
xmax = shape[1]
ymax = shape[0]
step = 1
xi = np.arange(step / 2, xmax, step)
yi = np.arange(step / 2, ymax, step)
x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T
y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi))
x, y = storm_table['x'], storm_table['y']
img = np.zeros_like(x_coords)
intensities = storm_table['intensity']
sigma = sigma * np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x))
for _sigma, _int, _x, _y in zip(sigma, intensities, x, y):
img += _int * np.exp(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2)
return img
def gen_im(data_dir):
"""Generate microscopy images from a list of cell objects by placing them randomly oriented in the image."""
cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5'))
out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512))
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary'])
np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield'])
np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner'])
np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer'])
np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner'])
np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer'])
tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary'])
tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner'])
def noise_bf(data_dir):
"""add poissonian and readout noise to brightfield images"""
noise = 20
img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy'))
for photons in [10000, 1000, 500]:
ratio = 1.0453 # ratio between 'background' (no cells) and cell wall
img = (photons*(ratio-1))*img_stack + photons
img = draw_poisson(img)
img = add_readout_noise(img, noise)
tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img)
np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img)
if __name__ == '__main__':
np.random.seed(42)
data_dir = r'.'
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
gen_im(data_dir)
noise_bf(data_dir)
|
[
"numpy.random.normal",
"numpy.ones_like",
"mahotas.distance",
"colicoords.synthetic_data.draw_poisson",
"numpy.random.rand",
"tqdm.tqdm",
"os.path.join",
"numpy.floor",
"numpy.max",
"numpy.exp",
"numpy.append",
"numpy.zeros",
"colicoords.synthetic_data.add_readout_noise",
"numpy.random.seed",
"numpy.min",
"numpy.zeros_like",
"numpy.arange"
] |
[((4100, 4131), 'numpy.arange', 'np.arange', (['(step / 2)', 'xmax', 'step'], {}), '(step / 2, xmax, step)\n', (4109, 4131), True, 'import numpy as np\n'), ((4141, 4172), 'numpy.arange', 'np.arange', (['(step / 2)', 'ymax', 'step'], {}), '(step / 2, ymax, step)\n', (4150, 4172), True, 'import numpy as np\n'), ((4361, 4384), 'numpy.zeros_like', 'np.zeros_like', (['x_coords'], {}), '(x_coords)\n', (4374, 4384), True, 'import numpy as np\n'), ((6879, 6897), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (6893, 6897), True, 'import numpy as np\n'), ((1573, 1588), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1581, 1588), True, 'import numpy as np\n'), ((4874, 4937), 'os.path.join', 'os.path.join', (['data_dir', '"""cell_obj"""', '"""cells_final_selected.hdf5"""'], {}), "(data_dir, 'cell_obj', 'cells_final_selected.hdf5')\n", (4886, 4937), False, 'import os\n'), ((5133, 5179), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""binary.npy"""'], {}), "(data_dir, 'images', 'binary.npy')\n", (5145, 5179), False, 'import os\n'), ((5213, 5264), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""brightfield.npy"""'], {}), "(data_dir, 'images', 'brightfield.npy')\n", (5225, 5264), False, 'import os\n'), ((5303, 5353), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""foci_inner.npy"""'], {}), "(data_dir, 'images', 'foci_inner.npy')\n", (5315, 5353), False, 'import os\n'), ((5391, 5441), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""foci_outer.npy"""'], {}), "(data_dir, 'images', 'foci_outer.npy')\n", (5403, 5441), False, 'import os\n'), ((5479, 5530), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""storm_inner.npy"""'], {}), "(data_dir, 'images', 'storm_inner.npy')\n", (5491, 5530), False, 'import os\n'), ((5569, 5620), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""storm_outer.npy"""'], {}), "(data_dir, 'images', 'storm_outer.npy')\n", (5581, 5620), False, 'import os\n'), ((5668, 5714), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""binary.tif"""'], {}), "(data_dir, 'images', 'binary.tif')\n", (5680, 5714), False, 'import os\n'), ((5756, 5807), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""brightfield.tif"""'], {}), "(data_dir, 'images', 'brightfield.tif')\n", (5768, 5807), False, 'import os\n'), ((5854, 5904), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""foci_inner.tif"""'], {}), "(data_dir, 'images', 'foci_inner.tif')\n", (5866, 5904), False, 'import os\n'), ((5950, 6000), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""foci_outer.tif"""'], {}), "(data_dir, 'images', 'foci_outer.tif')\n", (5962, 6000), False, 'import os\n'), ((6041, 6092), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""storm_inner.txt"""'], {}), "(data_dir, 'images', 'storm_inner.txt')\n", (6053, 6092), False, 'import os\n'), ((6134, 6185), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""storm_outer.txt"""'], {}), "(data_dir, 'images', 'storm_outer.txt')\n", (6146, 6185), False, 'import os\n'), ((6342, 6393), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""', '"""brightfield.npy"""'], {}), "(data_dir, 'images', 'brightfield.npy')\n", (6354, 6393), False, 'import os\n'), ((6580, 6597), 'colicoords.synthetic_data.draw_poisson', 'draw_poisson', (['img'], {}), '(img)\n', (6592, 6597), False, 'from colicoords.synthetic_data import add_readout_noise, draw_poisson\n'), ((6612, 6641), 'colicoords.synthetic_data.add_readout_noise', 'add_readout_noise', (['img', 'noise'], {}), '(img, noise)\n', (6629, 6641), False, 'from colicoords.synthetic_data import add_readout_noise, draw_poisson\n'), ((706, 719), 'tqdm.tqdm', 'tqdm', (['chunked'], {}), '(chunked)\n', (710, 719), False, 'from tqdm import tqdm\n'), ((2202, 2223), 'numpy.max', 'np.max', (['[0 - min1, 0]'], {}), '([0 - min1, 0])\n', (2208, 2223), True, 'import numpy as np\n'), ((2245, 2304), 'numpy.min', 'np.min', (['[data.shape[0] + (shape[0] - pos_y), data.shape[0]]'], {}), '([data.shape[0] + (shape[0] - pos_y), data.shape[0]])\n', (2251, 2304), True, 'import numpy as np\n'), ((2327, 2348), 'numpy.max', 'np.max', (['[0 - min2, 0]'], {}), '([0 - min2, 0])\n', (2333, 2348), True, 'import numpy as np\n'), ((2370, 2429), 'numpy.min', 'np.min', (['[data.shape[1] + (shape[1] - pos_x), data.shape[1]]'], {}), '([data.shape[1] + (shape[1] - pos_x), data.shape[1]])\n', (2376, 2429), True, 'import numpy as np\n'), ((2574, 2591), 'numpy.max', 'np.max', (['[min1, 0]'], {}), '([min1, 0])\n', (2580, 2591), True, 'import numpy as np\n'), ((2611, 2635), 'numpy.min', 'np.min', (['[max1, shape[0]]'], {}), '([max1, shape[0]])\n', (2617, 2635), True, 'import numpy as np\n'), ((2655, 2672), 'numpy.max', 'np.max', (['[min2, 0]'], {}), '([min2, 0])\n', (2661, 2672), True, 'import numpy as np\n'), ((2692, 2716), 'numpy.min', 'np.min', (['[max2, shape[1]]'], {}), '([max2, shape[1]])\n', (2698, 2716), True, 'import numpy as np\n'), ((2744, 2759), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2752, 2759), True, 'import numpy as np\n'), ((2921, 2968), 'mahotas.distance', 'mh.distance', (['(1 - out_binary)'], {'metric': '"""euclidean"""'}), "(1 - out_binary, metric='euclidean')\n", (2932, 2968), True, 'import mahotas as mh\n'), ((4448, 4463), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (4460, 4463), True, 'import numpy as np\n'), ((4619, 4704), 'numpy.exp', 'np.exp', (['(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2)'], {}), '(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2\n )\n', (4625, 4704), True, 'import numpy as np\n'), ((5034, 5066), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""'], {}), "(data_dir, 'images')\n", (5046, 5066), False, 'import os\n'), ((5086, 5118), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""'], {}), "(data_dir, 'images')\n", (5098, 5118), False, 'import os\n'), ((6944, 6976), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""'], {}), "(data_dir, 'images')\n", (6956, 6976), False, 'import os\n'), ((6996, 7028), 'os.path.join', 'os.path.join', (['data_dir', '"""images"""'], {}), "(data_dir, 'images')\n", (7008, 7028), False, 'import os\n'), ((423, 483), 'numpy.random.normal', 'np.random.normal', (['cell_per_img', 'cell_per_img_std', 'num_images'], {}), '(cell_per_img, cell_per_img_std, num_images)\n', (439, 483), True, 'import numpy as np\n'), ((936, 961), 'numpy.append', 'np.append', (['out_dict[k]', 'v'], {}), '(out_dict[k], v)\n', (945, 961), True, 'import numpy as np\n'), ((1167, 1197), 'numpy.zeros', 'np.zeros', (['(num_images, *shape)'], {}), '((num_images, *shape))\n', (1175, 1197), True, 'import numpy as np\n'), ((1942, 1965), 'numpy.floor', 'np.floor', (['data.shape[0]'], {}), '(data.shape[0])\n', (1950, 1965), True, 'import numpy as np\n'), ((2039, 2062), 'numpy.floor', 'np.floor', (['data.shape[1]'], {}), '(data.shape[1])\n', (2047, 2062), True, 'import numpy as np\n'), ((3607, 3642), 'numpy.append', 'np.append', (['out_dict[name]', 'data_out'], {}), '(out_dict[name], data_out)\n', (3616, 3642), True, 'import numpy as np\n'), ((1828, 1844), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1842, 1844), True, 'import numpy as np\n'), ((1891, 1907), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1905, 1907), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
def flow_to_img(flow, normalize=True):
"""Convert flow to viewable image, using color hue to encode flow vector orientation, and color saturation to
encode vector length. This is similar to the OpenCV tutorial on dense optical flow, except that they map vector
length to the value plane of the HSV color model, instead of the saturation plane, as we do here.
Args:
flow: optical flow
normalize: Normalize flow to 0..255
Returns:
img: viewable representation of the dense optical flow in RGB format
Ref:
https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py
"""
hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32))
# A couple times, we've gotten NaNs out of the above...
nans = np.isnan(flow_magnitude)
if np.any(nans):
nans = np.where(nans)
flow_magnitude[nans] = 0.
# Normalize
hsv[..., 0] = flow_angle * 180 / np.pi / 2
if normalize is True:
hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX)
else:
hsv[..., 1] = flow_magnitude
hsv[..., 2] = 255
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return img
|
[
"cv2.normalize",
"numpy.where",
"numpy.any",
"numpy.zeros",
"numpy.isnan",
"cv2.cvtColor"
] |
[((764, 823), 'numpy.zeros', 'np.zeros', (['(flow.shape[0], flow.shape[1], 3)'], {'dtype': 'np.uint8'}), '((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)\n', (772, 823), True, 'import numpy as np\n'), ((1011, 1035), 'numpy.isnan', 'np.isnan', (['flow_magnitude'], {}), '(flow_magnitude)\n', (1019, 1035), True, 'import numpy as np\n'), ((1043, 1055), 'numpy.any', 'np.any', (['nans'], {}), '(nans)\n', (1049, 1055), True, 'import numpy as np\n'), ((1373, 1409), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2RGB'], {}), '(hsv, cv2.COLOR_HSV2RGB)\n', (1385, 1409), False, 'import cv2\n'), ((1072, 1086), 'numpy.where', 'np.where', (['nans'], {}), '(nans)\n', (1080, 1086), True, 'import numpy as np\n'), ((1233, 1293), 'cv2.normalize', 'cv2.normalize', (['flow_magnitude', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX)\n', (1246, 1293), False, 'import cv2\n')]
|
#!/usr/bin/env python
import chainer
from algs import trpo
from env_makers import EnvMaker
from models import GaussianMLPPolicy, MLPBaseline
from utils import SnapshotSaver
import numpy as np
import os
import logger
log_dir = "data/local/trpo-pendulum"
np.random.seed(42)
# Clean up existing logs
os.system("rm -rf {}".format(log_dir))
with logger.session(log_dir):
env_maker = EnvMaker('Pendulum-v0')
env = env_maker.make()
policy = GaussianMLPPolicy(
observation_space=env.observation_space,
action_space=env.action_space,
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=chainer.functions.tanh,
)
baseline = MLPBaseline(
observation_space=env.observation_space,
action_space=env.action_space,
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=chainer.functions.tanh,
)
trpo(
env=env,
env_maker=env_maker,
n_envs=16,
policy=policy,
baseline=baseline,
batch_size=10000,
n_iters=100,
snapshot_saver=SnapshotSaver(log_dir),
)
|
[
"models.GaussianMLPPolicy",
"env_makers.EnvMaker",
"models.MLPBaseline",
"numpy.random.seed",
"logger.session",
"utils.SnapshotSaver"
] |
[((256, 274), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (270, 274), True, 'import numpy as np\n'), ((346, 369), 'logger.session', 'logger.session', (['log_dir'], {}), '(log_dir)\n', (360, 369), False, 'import logger\n'), ((387, 410), 'env_makers.EnvMaker', 'EnvMaker', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (395, 410), False, 'from env_makers import EnvMaker\n'), ((451, 635), 'models.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'observation_space': 'env.observation_space', 'action_space': 'env.action_space', 'env_spec': 'env.spec', 'hidden_sizes': '(64, 64)', 'hidden_nonlinearity': 'chainer.functions.tanh'}), '(observation_space=env.observation_space, action_space=env\n .action_space, env_spec=env.spec, hidden_sizes=(64, 64),\n hidden_nonlinearity=chainer.functions.tanh)\n', (468, 635), False, 'from models import GaussianMLPPolicy, MLPBaseline\n'), ((689, 867), 'models.MLPBaseline', 'MLPBaseline', ([], {'observation_space': 'env.observation_space', 'action_space': 'env.action_space', 'env_spec': 'env.spec', 'hidden_sizes': '(64, 64)', 'hidden_nonlinearity': 'chainer.functions.tanh'}), '(observation_space=env.observation_space, action_space=env.\n action_space, env_spec=env.spec, hidden_sizes=(64, 64),\n hidden_nonlinearity=chainer.functions.tanh)\n', (700, 867), False, 'from models import GaussianMLPPolicy, MLPBaseline\n'), ((1101, 1123), 'utils.SnapshotSaver', 'SnapshotSaver', (['log_dir'], {}), '(log_dir)\n', (1114, 1123), False, 'from utils import SnapshotSaver\n')]
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.collections as mcoll
from matplotlib.ticker import MaxNLocator
plt.style.use('seaborn-darkgrid')
class BaseTraj:
def __init__(self, model, X):
self.model = model
assert len(X.shape) == 2, f"X should be 2-d, instead got shape {X.shape}"
self.X = X
self.means = self.model.means_.copy()
self.states = self.model.predict(X)
self.n_states = len(np.unique(self.states))
self.trans = self.model.transmat_.copy()
def rho_dt_bins(self, rho, theta, dt, bins=12):
"""
Bin rho values and dwell time on polar coordinates.
:param rho:
:param theta:
:param dt:
:param bins:
:return:
"""
bins = np.linspace(-np.pi, np.pi, bins+1)
bin_means = (bins[:-1] + bins[1:]) / 2
bin_ix = np.digitize(theta, bins)
bin_rd = [rho[(bin_ix == i) & (rho > 0)].mean()
if len(rho[(bin_ix == i) & (rho > 0)]) > 0 else
0 for i in range(1, len(bins))]
bin_dt = [dt[(bin_ix == i) & (dt > 0)].sum()
if len(dt[(bin_ix == i) & (dt > 0)]) > 0 else
0 for i in range(1, len(bins))]
return bin_means, bin_rd, bin_dt
def transition_vectors(self):
"""
Transition vectors between states on polar coordinates.
:return:
"""
mu_x, mu_y = self.means[:, 0], self.means[:, 1]
mu_x_dist = mu_x - mu_x[:, np.newaxis]
mu_y_dist = mu_y - mu_y[:, np.newaxis]
dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten()))
trans_rho, trans_theta = self.cart2pol(dist_vect)
trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten()
return trans_rho, trans_theta
def design_transition(self, thresh=0.1):
design_trans = self.trans
diag_ix = np.diag_indices(len(design_trans))
design_trans[diag_ix] = 0
design_trans[design_trans < thresh] = 0
design_trans[design_trans >= thresh] = 1
return design_trans
def norm_trans_time(self):
"""
Normalized transition time.
:return:
"""
unique, counts = np.unique(self.states, return_counts=True)
sort_ix = unique.argsort()
counts = counts[sort_ix]
# normalize by transition probability
dt = (counts * self.design_transition()).flatten()
return dt / dt.sum()
def norm_state_time(self):
"""
Normalized state time.
:return:
"""
unique, counts = np.unique(self.states, return_counts=True)
sort_ix = unique.argsort()
counts = counts[sort_ix]
return counts / counts.sum()
@staticmethod
def cart2pol(arr):
"""
Cartesion space to polar space.
Args:
arr (numpy.array): Array of shape [n_state x dims]
"""
x, y = arr[:, 0], arr[:, 1]
rho = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan2(y, x)
return rho, theta
class PhenoSign(BaseTraj):
"""Phenotypic Signature class."""
def __init__(self, model, X):
super(PhenoSign, self).__init__(model, X)
self.bin_means, self.signature = self.get_signature()
def get_signature(self):
"""
Calculate phenotypic signature for a given model.
:return: bin_means, array of shape [4 x n_bins] with
1. state radial distances
2. state dwell times
3. transition distances
3. transition dwell times
"""
# states
mu_rho, mu_theta = self.cart2pol(self.means)
state_dt = self.norm_state_time()
bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt)
# transitions
trans_rho, trans_theta = self.transition_vectors()
trans_dt = self.norm_trans_time()
bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt)
assert (bin_means_1 == bin_means_2).all(), "state and transition vectors are binned differently and can" \
"not be concatenated."
return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins))
class Saphire(PhenoSign):
"""Implementation of the SAPHIRE algorithm for plotting Hidden Markov Models.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>. Time series modeling of live-cell shape dynamics for
image-based phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90.
"""
def __init__(self, model, X):
super(Saphire, self).__init__(model, X)
def plot_traj(self, projection='cartesian', ymax=None):
"""
Plot cell trajectory.
Args:
projection (str): cartesian or polar.
ymax (int)
"""
avail_proj = ['cartesian', 'polar']
projection = projection.lower()
assert projection in avail_proj, f"projection unknown: {projection}"
if projection == 'cartesian':
projection = None
cmap = plt.get_cmap('binary')
cmap = truncate_colormap(cmap, minval=0.2)
if projection == 'polar':
y, x = self.cart2pol(self.X)
y_mu, x_mu = self.cart2pol(self.means)
else:
x, y = self.X[:, 0], self.X[:, 1]
x_mu, y_mu = self.means[:, 0], self.means[:, 1]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection})
ax.scatter(x, y,
c=self.states, cmap='Set1', zorder=2)
traj = ax.scatter(x_mu, y_mu,
c=np.unique(self.states), cmap='Set1',
s=200, zorder=2, edgecolor='black', alpha=0.6)
legend = ax.legend(*traj.legend_elements(),
loc="upper right", bbox_to_anchor=(1.2, 0.94),
title="States")
ax.add_artist(legend)
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
colorline(x, y, cmap=cmap, zorder=1)
norm = mpl.colors.Normalize(vmin=0, vmax=48)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Time')
plt.show()
return fig, ax
def plot_states(self, ymax=None):
"""
Plot cell states.
"""
bin_rd, bin_dt = self.signature[0, :], self.signature[1, :]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})
cmap = plt.get_cmap("Oranges")
N = 12
width = (2 * np.pi) / N
ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt))
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
norm = mpl.colors.Normalize(vmin=0, vmax=1)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Increasing state dwell time',
ticks=[0, 0.5, 1])
return fig, ax
def plot_transition(self, ymax=None):
"""
Plot transition between cell states.
"""
bin_rd, bin_dt = self.signature[2, :], self.signature[3, :]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})
cmap = plt.get_cmap("Blues")
N = 12
width = (2 * np.pi) / N
ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt))
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
norm = mpl.colors.Normalize(vmin=0, vmax=1)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Increasing transition dwell time',
ticks=[0, 0.5, 1])
return fig, ax
def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0),
linewidth=3, alpha=1.0, zorder=1):
"""
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth, alpha=alpha, zorder=zorder)
ax = plt.gca()
ax.add_collection(lc)
return lc
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
https://stackoverflow.com/a/18926541
'''
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
|
[
"numpy.sqrt",
"matplotlib.collections.LineCollection",
"numpy.array",
"matplotlib.ticker.MaxNLocator",
"numpy.arctan2",
"matplotlib.pyplot.Normalize",
"numpy.asarray",
"matplotlib.pyplot.style.use",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"numpy.vstack",
"numpy.concatenate",
"numpy.digitize",
"matplotlib.pyplot.gca",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"numpy.unique",
"matplotlib.pyplot.subplots"
] |
[((158, 191), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (171, 191), True, 'import matplotlib.pyplot as plt\n'), ((8329, 8351), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""copper"""'], {}), "('copper')\n", (8341, 8351), True, 'import matplotlib.pyplot as plt\n'), ((8358, 8381), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (8371, 8381), True, 'import matplotlib.pyplot as plt\n'), ((8871, 8884), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (8881, 8884), True, 'import numpy as np\n'), ((8930, 9045), 'matplotlib.collections.LineCollection', 'mcoll.LineCollection', (['segments'], {'array': 'z', 'cmap': 'cmap', 'norm': 'norm', 'linewidth': 'linewidth', 'alpha': 'alpha', 'zorder': 'zorder'}), '(segments, array=z, cmap=cmap, norm=norm, linewidth=\n linewidth, alpha=alpha, zorder=zorder)\n', (8950, 9045), True, 'import matplotlib.collections as mcoll\n'), ((9081, 9090), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9088, 9090), True, 'import matplotlib.pyplot as plt\n'), ((9420, 9469), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (9434, 9469), True, 'import numpy as np\n'), ((815, 851), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(bins + 1)'], {}), '(-np.pi, np.pi, bins + 1)\n', (826, 851), True, 'import numpy as np\n'), ((914, 938), 'numpy.digitize', 'np.digitize', (['theta', 'bins'], {}), '(theta, bins)\n', (925, 938), True, 'import numpy as np\n'), ((2323, 2365), 'numpy.unique', 'np.unique', (['self.states'], {'return_counts': '(True)'}), '(self.states, return_counts=True)\n', (2332, 2365), True, 'import numpy as np\n'), ((2699, 2741), 'numpy.unique', 'np.unique', (['self.states'], {'return_counts': '(True)'}), '(self.states, return_counts=True)\n', (2708, 2741), True, 'import numpy as np\n'), ((3077, 3101), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (3084, 3101), True, 'import numpy as np\n'), ((3118, 3134), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (3128, 3134), True, 'import numpy as np\n'), ((5254, 5276), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""binary"""'], {}), "('binary')\n", (5266, 5276), True, 'import matplotlib.pyplot as plt\n'), ((5594, 5661), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)', 'subplot_kw': "{'projection': projection}"}), "(figsize=(5, 5), subplot_kw={'projection': projection})\n", (5606, 5661), True, 'import matplotlib.pyplot as plt\n'), ((6303, 6340), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(48)'}), '(vmin=0, vmax=48)\n', (6323, 6340), True, 'import matplotlib as mpl\n'), ((6535, 6545), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6543, 6545), True, 'import matplotlib.pyplot as plt\n'), ((6747, 6811), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)', 'subplot_kw': "{'projection': 'polar'}"}), "(figsize=(5, 5), subplot_kw={'projection': 'polar'})\n", (6759, 6811), True, 'import matplotlib.pyplot as plt\n'), ((6827, 6850), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Oranges"""'], {}), "('Oranges')\n", (6839, 6850), True, 'import matplotlib.pyplot as plt\n'), ((7109, 7145), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(1)'}), '(vmin=0, vmax=1)\n', (7129, 7145), True, 'import matplotlib as mpl\n'), ((7619, 7683), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)', 'subplot_kw': "{'projection': 'polar'}"}), "(figsize=(5, 5), subplot_kw={'projection': 'polar'})\n", (7631, 7683), True, 'import matplotlib.pyplot as plt\n'), ((7699, 7720), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Blues"""'], {}), "('Blues')\n", (7711, 7720), True, 'import matplotlib.pyplot as plt\n'), ((7979, 8015), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(1)'}), '(vmin=0, vmax=1)\n', (7999, 8015), True, 'import matplotlib as mpl\n'), ((8848, 8861), 'numpy.array', 'np.array', (['[z]'], {}), '([z])\n', (8856, 8861), True, 'import numpy as np\n'), ((9654, 9672), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (9666, 9672), True, 'import matplotlib.pyplot as plt\n'), ((490, 512), 'numpy.unique', 'np.unique', (['self.states'], {}), '(self.states)\n', (499, 512), True, 'import numpy as np\n'), ((4349, 4420), 'numpy.vstack', 'np.vstack', (['(state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)'], {}), '((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins))\n', (4358, 4420), True, 'import numpy as np\n'), ((6216, 6241), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (6227, 6241), False, 'from matplotlib.ticker import MaxNLocator\n'), ((6414, 6457), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (6435, 6457), True, 'import matplotlib as mpl\n'), ((7067, 7092), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (7078, 7092), False, 'from matplotlib.ticker import MaxNLocator\n'), ((7219, 7262), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (7240, 7262), True, 'import matplotlib as mpl\n'), ((7937, 7962), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (7948, 7962), False, 'from matplotlib.ticker import MaxNLocator\n'), ((8089, 8132), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (8110, 8132), True, 'import matplotlib as mpl\n'), ((9825, 9855), 'numpy.linspace', 'np.linspace', (['minval', 'maxval', 'n'], {}), '(minval, maxval, n)\n', (9836, 9855), True, 'import numpy as np\n'), ((5810, 5832), 'numpy.unique', 'np.unique', (['self.states'], {}), '(self.states)\n', (5819, 5832), True, 'import numpy as np\n'), ((9368, 9384), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (9376, 9384), True, 'import numpy as np\n')]
|
import cv2
from cv2 import *
import numpy as np
from matplotlib import pyplot as plt
###############################SIFT MATCH Function#################################
def SIFTMATCH(img1,img2):
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
cv2.moveWindow('output', 150,150) # Move it to (40,30)
cv2.imshow('output',img3)
cv2.waitKey(0) #The function waits for specified milliseconds for any keyboard event
cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the windows we created
###################################################################################################
#################################Function#########################
def CercleDetection(img1):
# Read Image
raw_image = cv2.imread(img1)
# Bilateral filtering forms a very good way to preserve edges. It is a non-linear filter and helps reduce noise
# The parameters used are: the image, window size for averaging the neighbour, sigmaColor(Sigma value in the color space.
bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175)
# Canny edge detector to detect edges in the image It takes 3 parameters: image, lower threshold and upper threshold.
edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)
# Find Contours
_, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_list = []
for contour in contours:
approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
area = cv2.contourArea(contour)
if ((len(approx) > 8) & (len(approx) < 23) & (area > 50000) ):
contour_list.append(contour)
print("area %.3f"%(area))
M = cv2.moments(contour)
# calculate x,y coordinate of center
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
cv2.circle(raw_image, (cX, cY), 5, (255, 255, 255), -1)
cv2.putText(raw_image, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# Draw Contours of circles
cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0), 3)
# Display Images
cv2.imshow("Objects Detected",raw_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return cX,cY
############################################################
###########################MAIN#############################
MIN_MATCH_COUNT = 10
e1 = cv2.getTickCount()
# # initialize the camera
# cam = VideoCapture(0) # 0 -> index of camera
# s, img1 = cam.read()
# ret = cam.set(3,1920);
# ret = cam.set(4,1080);
# if s: # frame captured without any errors
# cv2.namedWindow("output", cv2.WINDOW_NORMAL)
# cv2.imshow("cam-test",img1)
# waitKey(0)
# destroyWindow("cam-test")
# imwrite("Scene.jpg",img1) #save image
# del(cam)
# Scene image in Grayscale
# imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
imgray = cv2.imread('Scene.jpg', 0) # queryImage
# Reference Piece Image
img1 = cv2.imread('img3.jpg',0) # queryImage
# SIFT Algorithm fore Object Detection
SIFTMATCH(img1, imgray)
# image de reference
cX, cY = CercleDetection('img3.jpg')
print('cX = %.3f , cY =%.3f' % (cX, cY))
# Image Webcam
cX2, cY2 = CercleDetection('img3.jpg')
print('cX2 = %.3f , cY2 =%.3f' % (cX2, cY2))
deltaX = (cX2-cX)
deltaY = -(CY2-cY)
# Write X and Y values to File
file = open("values.txt", "w")
file.write("%.3f \n" % deltaX)
file.write("%.3f \n" % deltaY)
file.close()
#Calculate time of execution
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency()
print('time needed to execute')
print(time)
|
[
"numpy.int32",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.xfeatures2d.SIFT_create",
"cv2.moveWindow",
"cv2.arcLength",
"cv2.contourArea",
"cv2.perspectiveTransform",
"cv2.waitKey",
"cv2.getTickFrequency",
"cv2.drawContours",
"cv2.findHomography",
"cv2.putText",
"cv2.circle",
"cv2.moments",
"cv2.Canny",
"cv2.imread",
"cv2.bilateralFilter",
"cv2.getTickCount",
"cv2.FlannBasedMatcher",
"cv2.findContours",
"cv2.drawMatches",
"numpy.float32"
] |
[((4277, 4295), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (4293, 4295), False, 'import cv2\n'), ((4792, 4818), 'cv2.imread', 'cv2.imread', (['"""Scene.jpg"""', '(0)'], {}), "('Scene.jpg', 0)\n", (4802, 4818), False, 'import cv2\n'), ((4869, 4894), 'cv2.imread', 'cv2.imread', (['"""img3.jpg"""', '(0)'], {}), "('img3.jpg', 0)\n", (4879, 4894), False, 'import cv2\n'), ((5432, 5450), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (5448, 5450), False, 'import cv2\n'), ((252, 281), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (279, 281), False, 'import cv2\n'), ((588, 638), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (609, 638), False, 'import cv2\n'), ((1808, 1872), 'cv2.drawMatches', 'cv2.drawMatches', (['img1', 'kp1', 'img2', 'kp2', 'good', 'None'], {}), '(img1, kp1, img2, kp2, good, None, **draw_params)\n', (1823, 1872), False, 'import cv2\n'), ((1876, 1910), 'cv2.moveWindow', 'cv2.moveWindow', (['"""output"""', '(150)', '(150)'], {}), "('output', 150, 150)\n", (1890, 1910), False, 'import cv2\n'), ((1937, 1963), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'img3'], {}), "('output', img3)\n", (1947, 1963), False, 'import cv2\n'), ((1968, 1982), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1979, 1982), False, 'import cv2\n'), ((2062, 2085), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2083, 2085), False, 'import cv2\n'), ((2394, 2410), 'cv2.imread', 'cv2.imread', (['img1'], {}), '(img1)\n', (2404, 2410), False, 'import cv2\n'), ((2691, 2734), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['raw_image', '(5)', '(175)', '(175)'], {}), '(raw_image, 5, 175, 175)\n', (2710, 2734), False, 'import cv2\n'), ((2887, 2931), 'cv2.Canny', 'cv2.Canny', (['bilateral_filtered_image', '(75)', '(200)'], {}), '(bilateral_filtered_image, 75, 200)\n', (2896, 2931), False, 'import cv2\n'), ((2987, 3064), 'cv2.findContours', 'cv2.findContours', (['edge_detected_image', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3003, 3064), False, 'import cv2\n'), ((3906, 3967), 'cv2.drawContours', 'cv2.drawContours', (['raw_image', 'contour_list', '(-1)', '(0, 255, 0)', '(3)'], {}), '(raw_image, contour_list, -1, (0, 255, 0), 3)\n', (3922, 3967), False, 'import cv2\n'), ((4000, 4041), 'cv2.imshow', 'cv2.imshow', (['"""Objects Detected"""', 'raw_image'], {}), "('Objects Detected', raw_image)\n", (4010, 4041), False, 'import cv2\n'), ((4046, 4060), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4057, 4060), False, 'import cv2\n'), ((4066, 4089), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4087, 4089), False, 'import cv2\n'), ((5470, 5492), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (5490, 5492), False, 'import cv2\n'), ((1085, 1138), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC', '(5.0)'], {}), '(src_pts, dst_pts, cv2.RANSAC, 5.0)\n', (1103, 1138), False, 'import cv2\n'), ((1305, 1337), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['pts', 'M'], {}), '(pts, M)\n', (1329, 1337), False, 'import cv2\n'), ((3218, 3242), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (3233, 3242), False, 'import cv2\n'), ((3413, 3433), 'cv2.moments', 'cv2.moments', (['contour'], {}), '(contour)\n', (3424, 3433), False, 'import cv2\n'), ((3663, 3718), 'cv2.circle', 'cv2.circle', (['raw_image', '(cX, cY)', '(5)', '(255, 255, 255)', '(-1)'], {}), '(raw_image, (cX, cY), 5, (255, 255, 255), -1)\n', (3673, 3718), False, 'import cv2\n'), ((3732, 3842), 'cv2.putText', 'cv2.putText', (['raw_image', '"""centroid"""', '(cX - 25, cY - 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(255, 255, 255)', '(2)'], {}), "(raw_image, 'centroid', (cX - 25, cY - 25), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n", (3743, 3842), False, 'import cv2\n'), ((915, 961), 'numpy.float32', 'np.float32', (['[kp1[m.queryIdx].pt for m in good]'], {}), '([kp1[m.queryIdx].pt for m in good])\n', (925, 961), True, 'import numpy as np\n'), ((999, 1045), 'numpy.float32', 'np.float32', (['[kp2[m.trainIdx].pt for m in good]'], {}), '([kp2[m.trainIdx].pt for m in good])\n', (1009, 1045), True, 'import numpy as np\n'), ((1226, 1286), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]'], {}), '([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]])\n', (1236, 1286), True, 'import numpy as np\n'), ((1375, 1388), 'numpy.int32', 'np.int32', (['dst'], {}), '(dst)\n', (1383, 1388), True, 'import numpy as np\n'), ((3168, 3196), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (3181, 3196), False, 'import cv2\n')]
|
import dash
from dash.exceptions import PreventUpdate
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import dash_table
import plotly.express as ex
import plotly.graph_objects as go
import pandas as pd
import numpy as np
data = pd.read_csv("./data/Phone_dataset_new.csv", header=0)
details = pd.read_csv("./data/Phone_details.csv", header=0)
names = details.loc[0]
data = data.rename(columns=names)
details = details.rename(columns=names)
maxi = details.loc[1].astype(int)
details_on_card = details.loc[2].astype(int)
details_on_card = details.columns[details_on_card == 1]
fitness_columns = {
"Memory": -1,
"RAM": -1,
"Camera (MP)": -1,
"Price (Euros)": 1,
}
fitness_data = data[fitness_columns] * maxi[fitness_columns].values
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.LITERA],
eager_loading=True,
suppress_callback_exceptions=True,
)
app.layout = html.Div(
children=[
# .container class is fixed, .container.scalable is scalable
dbc.Row(
[
dbc.Col(
html.H1(
children="What is your optimal phone?",
className="text-center mt-4",
)
)
]
),
dbc.Row(
[
dbc.Col(
children=[
# Top card with details(?)
dbc.Card(
children=[
dbc.CardBody(
[
html.H4(
"Researcher's Night Event",
className="card-title text-center",
),
html.P(
(
"This app uses decision support tools to "
"quickly and easily find phones which reflect "
"the user's desires. Input your preferences "
"below. The box on top right shows the phone "
"which matches the preferences the best. "
"The box on bottom right provides some "
"close alternatives."
),
className="card-text",
),
]
)
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.Form(
[
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired operating system",
html_for="os-choice",
),
dbc.RadioItems(
options=[
{
"label": "Android",
"value": "Android",
},
{"label": "iOS", "value": "IOS"},
{
"label": "No preference",
"value": "both",
},
],
id="os-choice",
value="both",
inline=True,
# className="text-center mt-4",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired Memory capacity (GB)",
html_for="memory-choice",
),
dcc.Slider(
id="memory-choice",
min=16,
max=256,
step=None,
included=False,
value=256,
marks={
16: "16",
32: "32",
64: "64",
128: "128",
256: "256",
},
# className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired RAM capacity (GB)",
html_for="ram-choice",
),
dcc.Slider(
id="ram-choice",
min=2,
max=12,
step=1,
value=12,
included=False,
marks={
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "10",
11: "11",
12: "12",
},
className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired camera resolution (MP)",
html_for="cam-choice",
),
dcc.Slider(
id="cam-choice",
min=0,
max=130,
step=1,
included=False,
value=70,
marks={
0: "0",
10: "10",
30: "30",
50: "50",
70: "70",
90: "90",
110: "110",
130: "130",
},
className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired budget (Euros)",
html_for="cost-choice",
),
dcc.Slider(
id="cost-choice",
min=0,
max=1400,
step=1,
included=False,
value=100,
marks={
0: "0",
200: "200",
400: "400",
600: "600",
800: "800",
1000: "1000",
1200: "1200",
1400: "1400",
},
className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
],
style={"maxHeight": "560px", "overflow": "auto"},
),
],
width={"size": 5, "offset": 1},
),
dbc.Col(
children=[
dbc.Card(
children=[
dbc.CardHeader("The best phone for you is:"),
dbc.CardBody(id="results"),
],
className="mb-4",
),
dbc.Card(
children=[
dbc.CardHeader("Other great phones:"),
dbc.CardBody(
id="other-results",
children=(
[
html.P(
html.Span(
f"{i}. ",
id=f"other-results-list-{i}",
)
)
for i in range(2, 6)
]
+ [
dbc.Tooltip(
id=f"other-results-tooltip-{i}",
target=f"other-results-list-{i}",
placement="right",
style={
"maxWidth": 700,
"background-color": "white",
"color": "white",
"border-style": "solid",
"border-color": "black",
},
)
for i in range(2, 6)
]
),
),
],
className="mt-4",
),
html.Div(id="tooltips"),
],
width={"size": 5, "offset": 0},
className="mb-2 mt-2",
),
]
),
dbc.Row([html.Div(id="callback-dump")]),
],
)
@app.callback(
[
Output("results", "children"),
*[Output(f"other-results-list-{i}", "children") for i in range(2, 6)],
*[Output(f"other-results-tooltip-{i}", "children") for i in range(2, 6)],
],
[
Input(f"{attr}-choice", "value")
for attr in ["os", "memory", "ram", "cam", "cost"]
],
)
def results(*choices):
if choices[0] == "both":
choice_data = data
elif choices[0] == "IOS":
choice_data = data[[True if "IOS" in st else False for st in data["OS"]]]
if choices[0] == "Android":
choice_data = data[[True if "Android" in st else False for st in data["OS"]]]
relevant_data = choice_data[
["Memory", "RAM", "Camera (MP)", "Price (Euros)",]
].reset_index(drop=True)
card_data = choice_data[details_on_card].reset_index(drop=True)
maxi = np.asarray([-1, -1, -1, 1])
relevant_data = relevant_data * maxi
ideal = relevant_data.min().values
nadir = relevant_data.max().values
aspirations = choices[1:] * maxi
distance = (aspirations - relevant_data) / (ideal - nadir)
distance = distance.max(axis=1)
distance_order = np.argsort(distance)
best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:])
total_number = len(distance_order)
if total_number >= 4:
others, tooltips = other_options(card_data.loc[distance_order.values[1:5]])
else:
others, tooltips = other_options(
card_data.loc[distance_order.values[1:total_number]]
)
others = others + [f"{i}. -" for i in range(len(others) + 2, 6)]
tooltips = tooltips + [None for i in range(len(tooltips) + 2, 6)]
return (best, *others, *tooltips)
"""@app.callback(Output("tooltips", "children"), [Input("callback-dump", "children")])
def tooltips(tooldict):
num = len(tooldict["ids"])
content = []
for i in range(num):
content.append(dbc.Tooltip(tooldict["tables"][i], target=tooldict["ids"][i]))
return content"""
def table_from_data(data, choices):
# print(choices)
to_compare = ["Memory", "RAM", "Camera (MP)", "Price (Euros)"]
# print(data[to_compare].values)
diff = (data[to_compare].values - choices) * [1, 1, 1, -1]
colors = [None, None, None] + ["green" if x >= 0 else "red" for x in diff]
# print(np.sign(diff))
return dbc.Table(
[
html.Tbody(
[
html.Tr(
[
html.Th(col),
html.Td([str(data[col]),],),
html.Td([html.Span(" ▉", style={"color": c,},)],),
]
)
for (col, c) in zip(data.index, colors)
]
)
]
)
def table_from_data_horizontal(data):
header = [html.Thead(html.Tr([html.Th(col) for col in data.index]))]
body = [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])]
return dbc.Table(header + body)
def other_options(data):
contents = []
tables = []
ids = []
i = 2
for index, row in data.iterrows():
contents.append(f"{i}. {row['Model']}")
tables.append(table_from_data_horizontal(row))
i = i + 1
return contents, tables
if __name__ == "__main__":
app.run_server(debug=False)
|
[
"pandas.read_csv",
"dash.dependencies.Input",
"numpy.argsort",
"dash_html_components.Td",
"dash_bootstrap_components.Label",
"dash_html_components.Div",
"dash.Dash",
"dash_bootstrap_components.Tooltip",
"dash.dependencies.Output",
"dash_html_components.Span",
"numpy.asarray",
"dash_bootstrap_components.Table",
"dash_html_components.Th",
"dash_bootstrap_components.RadioItems",
"dash_html_components.H1",
"dash_core_components.Slider",
"dash_html_components.P",
"dash_html_components.H4",
"dash_bootstrap_components.CardHeader",
"dash_bootstrap_components.CardBody"
] |
[((345, 398), 'pandas.read_csv', 'pd.read_csv', (['"""./data/Phone_dataset_new.csv"""'], {'header': '(0)'}), "('./data/Phone_dataset_new.csv', header=0)\n", (356, 398), True, 'import pandas as pd\n'), ((409, 458), 'pandas.read_csv', 'pd.read_csv', (['"""./data/Phone_details.csv"""'], {'header': '(0)'}), "('./data/Phone_details.csv', header=0)\n", (420, 458), True, 'import pandas as pd\n'), ((947, 1068), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': '[dbc.themes.LITERA]', 'eager_loading': '(True)', 'suppress_callback_exceptions': '(True)'}), '(__name__, external_stylesheets=[dbc.themes.LITERA], eager_loading\n =True, suppress_callback_exceptions=True)\n', (956, 1068), False, 'import dash\n'), ((14486, 14513), 'numpy.asarray', 'np.asarray', (['[-1, -1, -1, 1]'], {}), '([-1, -1, -1, 1])\n', (14496, 14513), True, 'import numpy as np\n'), ((14790, 14810), 'numpy.argsort', 'np.argsort', (['distance'], {}), '(distance)\n', (14800, 14810), True, 'import numpy as np\n'), ((16645, 16669), 'dash_bootstrap_components.Table', 'dbc.Table', (['(header + body)'], {}), '(header + body)\n', (16654, 16669), True, 'import dash_bootstrap_components as dbc\n'), ((13663, 13692), 'dash.dependencies.Output', 'Output', (['"""results"""', '"""children"""'], {}), "('results', 'children')\n", (13669, 13692), False, 'from dash.dependencies import Input, Output, State\n'), ((13876, 13908), 'dash.dependencies.Input', 'Input', (['f"""{attr}-choice"""', '"""value"""'], {}), "(f'{attr}-choice', 'value')\n", (13881, 13908), False, 'from dash.dependencies import Input, Output, State\n'), ((13704, 13749), 'dash.dependencies.Output', 'Output', (['f"""other-results-list-{i}"""', '"""children"""'], {}), "(f'other-results-list-{i}', 'children')\n", (13710, 13749), False, 'from dash.dependencies import Input, Output, State\n'), ((13783, 13831), 'dash.dependencies.Output', 'Output', (['f"""other-results-tooltip-{i}"""', '"""children"""'], {}), "(f'other-results-tooltip-{i}', 'children')\n", (13789, 13831), False, 'from dash.dependencies import Input, Output, State\n'), ((13591, 13619), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""callback-dump"""'}), "(id='callback-dump')\n", (13599, 13619), True, 'import dash_html_components as html\n'), ((16516, 16528), 'dash_html_components.Th', 'html.Th', (['col'], {}), '(col)\n', (16523, 16528), True, 'import dash_html_components as html\n'), ((1268, 1345), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""What is your optimal phone?"""', 'className': '"""text-center mt-4"""'}), "(children='What is your optimal phone?', className='text-center mt-4')\n", (1275, 1345), True, 'import dash_html_components as html\n'), ((16588, 16606), 'dash_html_components.Td', 'html.Td', (['data[col]'], {}), '(data[col])\n', (16595, 16606), True, 'import dash_html_components as html\n'), ((16136, 16148), 'dash_html_components.Th', 'html.Th', (['col'], {}), '(col)\n', (16143, 16148), True, 'import dash_html_components as html\n'), ((13387, 13410), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""tooltips"""'}), "(id='tooltips')\n", (13395, 13410), True, 'import dash_html_components as html\n'), ((16244, 16279), 'dash_html_components.Span', 'html.Span', (['""" ▉"""'], {'style': "{'color': c}"}), "(' ▉', style={'color': c})\n", (16253, 16279), True, 'import dash_html_components as html\n'), ((11217, 11261), 'dash_bootstrap_components.CardHeader', 'dbc.CardHeader', (['"""The best phone for you is:"""'], {}), "('The best phone for you is:')\n", (11231, 11261), True, 'import dash_bootstrap_components as dbc\n'), ((11295, 11321), 'dash_bootstrap_components.CardBody', 'dbc.CardBody', ([], {'id': '"""results"""'}), "(id='results')\n", (11307, 11321), True, 'import dash_bootstrap_components as dbc\n'), ((11532, 11569), 'dash_bootstrap_components.CardHeader', 'dbc.CardHeader', (['"""Other great phones:"""'], {}), "('Other great phones:')\n", (11546, 11569), True, 'import dash_bootstrap_components as dbc\n'), ((1795, 1866), 'dash_html_components.H4', 'html.H4', (['"""Researcher\'s Night Event"""'], {'className': '"""card-title text-center"""'}), '("Researcher\'s Night Event", className=\'card-title text-center\')\n', (1802, 1866), True, 'import dash_html_components as html\n'), ((2039, 2351), 'dash_html_components.P', 'html.P', (['"""This app uses decision support tools to quickly and easily find phones which reflect the user\'s desires. Input your preferences below. The box on top right shows the phone which matches the preferences the best. The box on bottom right provides some close alternatives."""'], {'className': '"""card-text"""'}), '(\n "This app uses decision support tools to quickly and easily find phones which reflect the user\'s desires. Input your preferences below. The box on top right shows the phone which matches the preferences the best. The box on bottom right provides some close alternatives."\n , className=\'card-text\')\n', (2045, 2351), True, 'import dash_html_components as html\n'), ((3265, 3331), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired operating system"""'], {'html_for': '"""os-choice"""'}), "('Choose desired operating system', html_for='os-choice')\n", (3274, 3331), True, 'import dash_bootstrap_components as dbc\n'), ((3504, 3700), 'dash_bootstrap_components.RadioItems', 'dbc.RadioItems', ([], {'options': "[{'label': 'Android', 'value': 'Android'}, {'label': 'iOS', 'value': 'IOS'},\n {'label': 'No preference', 'value': 'both'}]", 'id': '"""os-choice"""', 'value': '"""both"""', 'inline': '(True)'}), "(options=[{'label': 'Android', 'value': 'Android'}, {'label':\n 'iOS', 'value': 'IOS'}, {'label': 'No preference', 'value': 'both'}],\n id='os-choice', value='both', inline=True)\n", (3518, 3700), True, 'import dash_bootstrap_components as dbc\n'), ((4767, 4841), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired Memory capacity (GB)"""'], {'html_for': '"""memory-choice"""'}), "('Choose desired Memory capacity (GB)', html_for='memory-choice')\n", (4776, 4841), True, 'import dash_bootstrap_components as dbc\n'), ((5014, 5180), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""memory-choice"""', 'min': '(16)', 'max': '(256)', 'step': 'None', 'included': '(False)', 'value': '(256)', 'marks': "{(16): '16', (32): '32', (64): '64', (128): '128', (256): '256'}"}), "(id='memory-choice', min=16, max=256, step=None, included=False,\n value=256, marks={(16): '16', (32): '32', (64): '64', (128): '128', (\n 256): '256'})\n", (5024, 5180), True, 'import dash_core_components as dcc\n'), ((6154, 6222), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired RAM capacity (GB)"""'], {'html_for': '"""ram-choice"""'}), "('Choose desired RAM capacity (GB)', html_for='ram-choice')\n", (6163, 6222), True, 'import dash_bootstrap_components as dbc\n'), ((6395, 6638), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""ram-choice"""', 'min': '(2)', 'max': '(12)', 'step': '(1)', 'value': '(12)', 'included': '(False)', 'marks': "{(2): '2', (3): '3', (4): '4', (5): '5', (6): '6', (7): '7', (8): '8', (9):\n '9', (10): '10', (11): '11', (12): '12'}", 'className': '"""text-center mt-5"""'}), "(id='ram-choice', min=2, max=12, step=1, value=12, included=False,\n marks={(2): '2', (3): '3', (4): '4', (5): '5', (6): '6', (7): '7', (8):\n '8', (9): '9', (10): '10', (11): '11', (12): '12'}, className=\n 'text-center mt-5')\n", (6405, 6638), True, 'import dash_core_components as dcc\n'), ((7852, 7925), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired camera resolution (MP)"""'], {'html_for': '"""cam-choice"""'}), "('Choose desired camera resolution (MP)', html_for='cam-choice')\n", (7861, 7925), True, 'import dash_bootstrap_components as dbc\n'), ((8098, 8321), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""cam-choice"""', 'min': '(0)', 'max': '(130)', 'step': '(1)', 'included': '(False)', 'value': '(70)', 'marks': "{(0): '0', (10): '10', (30): '30', (50): '50', (70): '70', (90): '90', (110\n ): '110', (130): '130'}", 'className': '"""text-center mt-5"""'}), "(id='cam-choice', min=0, max=130, step=1, included=False, value=\n 70, marks={(0): '0', (10): '10', (30): '30', (50): '50', (70): '70', (\n 90): '90', (110): '110', (130): '130'}, className='text-center mt-5')\n", (8108, 8321), True, 'import dash_core_components as dcc\n'), ((9400, 9466), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired budget (Euros)"""'], {'html_for': '"""cost-choice"""'}), "('Choose desired budget (Euros)', html_for='cost-choice')\n", (9409, 9466), True, 'import dash_bootstrap_components as dbc\n'), ((9639, 9885), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""cost-choice"""', 'min': '(0)', 'max': '(1400)', 'step': '(1)', 'included': '(False)', 'value': '(100)', 'marks': "{(0): '0', (200): '200', (400): '400', (600): '600', (800): '800', (1000):\n '1000', (1200): '1200', (1400): '1400'}", 'className': '"""text-center mt-5"""'}), "(id='cost-choice', min=0, max=1400, step=1, included=False, value\n =100, marks={(0): '0', (200): '200', (400): '400', (600): '600', (800):\n '800', (1000): '1000', (1200): '1200', (1400): '1400'}, className=\n 'text-center mt-5')\n", (9649, 9885), True, 'import dash_core_components as dcc\n'), ((12308, 12540), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', ([], {'id': 'f"""other-results-tooltip-{i}"""', 'target': 'f"""other-results-list-{i}"""', 'placement': '"""right"""', 'style': "{'maxWidth': 700, 'background-color': 'white', 'color': 'white',\n 'border-style': 'solid', 'border-color': 'black'}"}), "(id=f'other-results-tooltip-{i}', target=\n f'other-results-list-{i}', placement='right', style={'maxWidth': 700,\n 'background-color': 'white', 'color': 'white', 'border-style': 'solid',\n 'border-color': 'black'})\n", (12319, 12540), True, 'import dash_bootstrap_components as dbc\n'), ((11862, 11911), 'dash_html_components.Span', 'html.Span', (['f"""{i}. """'], {'id': 'f"""other-results-list-{i}"""'}), "(f'{i}. ', id=f'other-results-list-{i}')\n", (11871, 11911), True, 'import dash_html_components as html\n')]
|
# %% [markdown]
# # Testing python-som with audio dataset
# %% [markdown]
# # Imports
# %%
import matplotlib.pyplot as plt
# import librosa as lr
# import librosa.display as lrdisp
import numpy as np
import pandas as pd
import pickle
import seaborn as sns
import sklearn.preprocessing
from python_som import SOM
FILE_PREFIX = 'som64_u_grupo1'
# %% [markdown]
# # Loading dataset
# %%
df = pd.read_csv('features_means.csv', index_col=0, verbose=True)
df.index = pd.to_datetime(df.index)
df['rac'] = False
df.loc['2020-09-22':, 'rac'] = True # type: ignore
df.sort_index(inplace=True)
# %% [markdown]
# ## Checking for and dropping duplicates
# %%
# Resetting index for duplicate analysis
df.reset_index(inplace=True)
print("Duplicates by filename:",
df.duplicated(subset=['file_name']).value_counts(),
sep='\n')
df.drop_duplicates(subset=['file_name'], inplace=True)
print("Duplicates by (datetime, ala, grupo):",
df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(),
sep='\n')
df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True)
# Rebuilding dataframe index
df.set_index('datetime', inplace=True)
# %%
# Filtering dataset by 'group'
df = df[df['grupo'] == 1]
# %%
# Dropping tail of dataset for class balancing
# tail_size = abs(
# len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0]))
# df.drop(df.tail(tail_size).index, inplace=True)
# %% [markdown]
# ## Visualizing distribution of sample dates
# %%
df_tmp = pd.DataFrame(df['file_name'].resample('1D').count())
df_tmp['count'] = df_tmp['file_name']
del df_tmp['file_name']
df_tmp['rac'] = False
df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac'])
plt.draw()
df_tmp = pd.DataFrame(df['file_name'].resample('1H').count())
df_tmp['count'] = df_tmp['file_name']
del df_tmp['file_name']
df_tmp['rac'] = False
df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
df_tmp = df_tmp.reset_index()
df_tmp['hour'] = df_tmp['datetime'].dt.hour
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h')
plt.draw()
# %%
df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine')
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine')
for p in ax.patches:
ax.annotate(f'\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()),
ha='center',
va='top',
color='white',
size=18)
plt.draw()
# %%
# using sklearn's MinMaxScaler
scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1))
df_train = df.iloc[:, 3:-1].copy()
df_train = scaler.fit_transform(df_train)
# %%
# Defining first element of SOM shape
# Second element will be assigned based on the ratio between the
# first two principal components of the train dataset
som_x: int = 64
try:
with open(f'./{FILE_PREFIX}.obj', 'rb') as f:
som = pickle.load(f)
except FileNotFoundError:
som = SOM(x=som_x,
y=None,
input_len=df_train.shape[1],
learning_rate=0.5,
neighborhood_radius=1.0,
neighborhood_function='gaussian',
cyclic_x=True,
cyclic_y=True,
data=df_train)
# Training SOM
som.weight_initialization(mode='linear', data=df_train)
som.train(data=df_train, mode='random', verbose=True)
with open(f'./{FILE_PREFIX}.obj', 'wb') as f:
pickle.dump(som, f)
# %%
som_x, som_y = som.get_shape()
print('SOM shape:', (som_x, som_y))
# %%
# Visualizing distance matrix and activation matrix
umatrix = som.distance_matrix()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))
sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True)
sns.heatmap(som.activation_matrix(data=df_train).T,
cmap='mako',
ax=ax2,
robust=True)
ax1.invert_yaxis()
ax2.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png',
bbox_inches='tight',
transparent=True)
plt.draw()
# %%
# Visualizing distance matrix anc activation matrix separately
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png',
bbox_inches='tight',
transparent=True)
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(som.activation_matrix(data=df_train).T,
cmap='mako',
robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png',
bbox_inches='tight',
transparent=True)
# %% [markdown]
# ## Visualizing distribution of features
# %%
for column in df.iloc[:, 3:-1].columns:
hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap, robust=True, cmap='BrBG')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.close(fig=fig)
# %% [markdown]
# ## Visualizing distribution of audios by metadata (day, hour, ...)
# Each node is colorized according to its most frequent label
# %%
df['days'] = df.index.date
df['days'] = (df['days'] - df['days'][0])
df['days'] = df['days'].apply(lambda x: x.days)
df['hour'] = df.index.hour
# %%
# Visualizing 'rac' distribution
class_assignments = som.label_map(np.array(df_train), np.array(df['rac']))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1
except Exception:
continue
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'grupo'
print(df.groupby('grupo')['rac'].count())
column = 'grupo'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = 0
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'days'
print(df.groupby('days')['rac'].count())
column = 'days'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = -1
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap, cmap='viridis')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'hour'
print(df.groupby('hour')['rac'].count())
column = 'hour'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = -1
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.diverging_palette(150,
250,
s=100,
l=20,
sep=1,
n=26,
center='light'),
center=12)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
|
[
"pickle.dump",
"pandas.read_csv",
"seaborn.color_palette",
"seaborn.diverging_palette",
"pickle.load",
"seaborn.heatmap",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"python_som.SOM",
"seaborn.barplot",
"seaborn.countplot",
"matplotlib.pyplot.draw",
"pandas.melt",
"matplotlib.pyplot.subplots",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] |
[((395, 455), 'pandas.read_csv', 'pd.read_csv', (['"""features_means.csv"""'], {'index_col': '(0)', 'verbose': '(True)'}), "('features_means.csv', index_col=0, verbose=True)\n", (406, 455), True, 'import pandas as pd\n'), ((467, 491), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (481, 491), True, 'import pandas as pd\n'), ((1699, 1727), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1709, 1727), True, 'import matplotlib.pyplot as plt\n'), ((1822, 1887), 'seaborn.barplot', 'sns.barplot', ([], {'y': 'df_tmp.index', 'x': "df_tmp['count']", 'hue': "df_tmp['rac']"}), "(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac'])\n", (1833, 1887), True, 'import seaborn as sns\n'), ((1888, 1898), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1896, 1898), True, 'import matplotlib.pyplot as plt\n'), ((2177, 2205), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2187, 2205), True, 'import matplotlib.pyplot as plt\n'), ((2300, 2379), 'seaborn.barplot', 'sns.barplot', ([], {'y': "df_tmp['hour']", 'x': "df_tmp['count']", 'hue': "df_tmp['rac']", 'orient': '"""h"""'}), "(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h')\n", (2311, 2379), True, 'import seaborn as sns\n'), ((2380, 2390), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2388, 2390), True, 'import matplotlib.pyplot as plt\n'), ((2407, 2464), 'pandas.melt', 'pd.melt', (['df'], {'value_vars': "['rac']", 'value_name': '"""ractopamine"""'}), "(df, value_vars=['rac'], value_name='ractopamine')\n", (2414, 2464), True, 'import pandas as pd\n'), ((2465, 2493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2475, 2493), True, 'import matplotlib.pyplot as plt\n'), ((2593, 2656), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'df_melt', 'x': '"""ractopamine"""', 'hue': '"""ractopamine"""'}), "(data=df_melt, x='ractopamine', hue='ractopamine')\n", (2606, 2656), True, 'import seaborn as sns\n'), ((2865, 2875), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2873, 2875), True, 'import matplotlib.pyplot as plt\n'), ((4037, 4072), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 9)'}), '(1, 2, figsize=(16, 9))\n', (4049, 4072), True, 'import matplotlib.pyplot as plt\n'), ((4073, 4131), 'seaborn.heatmap', 'sns.heatmap', (['umatrix.T'], {'cmap': '"""bone_r"""', 'ax': 'ax1', 'robust': '(True)'}), "(umatrix.T, cmap='bone_r', ax=ax1, robust=True)\n", (4084, 4131), True, 'import seaborn as sns\n'), ((4431, 4441), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4439, 4441), True, 'import matplotlib.pyplot as plt\n'), ((4517, 4544), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (4527, 4544), True, 'import matplotlib.pyplot as plt\n'), ((4550, 4600), 'seaborn.heatmap', 'sns.heatmap', (['umatrix.T'], {'cmap': '"""bone_r"""', 'robust': '(True)'}), "(umatrix.T, cmap='bone_r', robust=True)\n", (4561, 4600), True, 'import seaborn as sns\n'), ((4754, 4781), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (4764, 4781), True, 'import matplotlib.pyplot as plt\n'), ((5935, 5959), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (5943, 5959), True, 'import numpy as np\n'), ((6145, 6172), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6155, 6172), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6526, 6528), True, 'import matplotlib.pyplot as plt\n'), ((6702, 6726), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (6710, 6726), True, 'import numpy as np\n'), ((6914, 6941), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6924, 6941), True, 'import matplotlib.pyplot as plt\n'), ((7292, 7302), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7300, 7302), True, 'import matplotlib.pyplot as plt\n'), ((7473, 7497), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (7481, 7497), True, 'import numpy as np\n'), ((7686, 7713), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (7696, 7713), True, 'import matplotlib.pyplot as plt\n'), ((7719, 7752), 'seaborn.heatmap', 'sns.heatmap', (['hmap'], {'cmap': '"""viridis"""'}), "(hmap, cmap='viridis')\n", (7730, 7752), True, 'import seaborn as sns\n'), ((7900, 7910), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7908, 7910), True, 'import matplotlib.pyplot as plt\n'), ((8081, 8105), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (8089, 8105), True, 'import numpy as np\n'), ((8294, 8321), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (8304, 8321), True, 'import matplotlib.pyplot as plt\n'), ((8881, 8891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8889, 8891), True, 'import matplotlib.pyplot as plt\n'), ((5249, 5276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (5259, 5276), True, 'import matplotlib.pyplot as plt\n'), ((5286, 5329), 'seaborn.heatmap', 'sns.heatmap', (['hmap'], {'robust': '(True)', 'cmap': '"""BrBG"""'}), "(hmap, robust=True, cmap='BrBG')\n", (5297, 5329), True, 'import seaborn as sns\n'), ((5497, 5515), 'matplotlib.pyplot.close', 'plt.close', ([], {'fig': 'fig'}), '(fig=fig)\n', (5506, 5515), True, 'import matplotlib.pyplot as plt\n'), ((5887, 5905), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (5895, 5905), True, 'import numpy as np\n'), ((5907, 5926), 'numpy.array', 'np.array', (["df['rac']"], {}), "(df['rac'])\n", (5915, 5926), True, 'import numpy as np\n'), ((6653, 6671), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (6661, 6671), True, 'import numpy as np\n'), ((6673, 6693), 'numpy.array', 'np.array', (['df[column]'], {}), '(df[column])\n', (6681, 6693), True, 'import numpy as np\n'), ((7424, 7442), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (7432, 7442), True, 'import numpy as np\n'), ((7444, 7464), 'numpy.array', 'np.array', (['df[column]'], {}), '(df[column])\n', (7452, 7464), True, 'import numpy as np\n'), ((8032, 8050), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (8040, 8050), True, 'import numpy as np\n'), ((8052, 8072), 'numpy.array', 'np.array', (['df[column]'], {}), '(df[column])\n', (8060, 8072), True, 'import numpy as np\n'), ((1771, 1820), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': '(6)', 'desat': '(1.0)'}), "('muted', n_colors=6, desat=1.0)\n", (1788, 1820), True, 'import seaborn as sns\n'), ((2249, 2298), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': '(6)', 'desat': '(1.0)'}), "('muted', n_colors=6, desat=1.0)\n", (2266, 2298), True, 'import seaborn as sns\n'), ((2537, 2586), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': '(6)', 'desat': '(1.0)'}), "('muted', n_colors=6, desat=1.0)\n", (2554, 2586), True, 'import seaborn as sns\n'), ((3305, 3319), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3316, 3319), False, 'import pickle\n'), ((3356, 3537), 'python_som.SOM', 'SOM', ([], {'x': 'som_x', 'y': 'None', 'input_len': 'df_train.shape[1]', 'learning_rate': '(0.5)', 'neighborhood_radius': '(1.0)', 'neighborhood_function': '"""gaussian"""', 'cyclic_x': '(True)', 'cyclic_y': '(True)', 'data': 'df_train'}), "(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5,\n neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=\n True, cyclic_y=True, data=df_train)\n", (3359, 3537), False, 'from python_som import SOM\n'), ((6218, 6286), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': "['#000000', 'blue', 'orange']", 'n_colors': '(3)'}), "(palette=['#000000', 'blue', 'orange'], n_colors=3)\n", (6235, 6286), True, 'import seaborn as sns\n'), ((6987, 7055), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': "['#000000', 'blue', 'orange']", 'n_colors': '(3)'}), "(palette=['#000000', 'blue', 'orange'], n_colors=3)\n", (7004, 7055), True, 'import seaborn as sns\n'), ((8367, 8440), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(150)', '(250)'], {'s': '(100)', 'l': '(20)', 'sep': '(1)', 'n': '(26)', 'center': '"""light"""'}), "(150, 250, s=100, l=20, sep=1, n=26, center='light')\n", (8388, 8440), True, 'import seaborn as sns\n'), ((3836, 3855), 'pickle.dump', 'pickle.dump', (['som', 'f'], {}), '(som, f)\n', (3847, 3855), False, 'import pickle\n')]
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
Description :
Author : cmy
date: 2020/1/2
-------------------------------------------------
"""
import datetime
import heapq
import numpy as np
import tensorflow as tf
import time
from metrics import ndcg_at_k
from train import get_user_record
from DMF import DMF
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5 # maximun alloc gpu50% of MEM
config.gpu_options.allow_growth = True #allocate dynamically
def train(args, data, show_loss, show_topk, log_dir):
n_user, n_item = data[0], data[1]
train_data, eval_data, test_data = data[2], data[3], data[4]
model = DMF(args, n_user, n_item)
user_num = 100
k_list = [1, 2, 5, 10, 20, 50, 100]
train_record = get_user_record(train_data, True)
test_record = get_user_record(test_data, False)
user_list = list(set(train_record.keys()) & set(test_record.keys()))
if len(user_list) > user_num:
user_list = np.random.choice(user_list, size=user_num, replace=False)
item_set = set(list(range(n_item)))
with tf.Session(config=config) as sess,\
open(log_dir + 'result_' + str(args.epochs) + '_' + str(args.lr) + '_' + str(int(time.time())) + '.txt', 'w') as f_result:
sess.run(tf.global_variables_initializer())
for step in range(args.epochs):
f_result.write('**************************epoch_i:' + str(step) + '********************' + '\n')
# RS training
np.random.shuffle(train_data)
start = 0
batch_i = 0
while start < train_data.shape[0]:
_, loss = model.train_dmf(sess, get_feed_dict_for_dmf(model, train_data, start, start + args.batch_size, 0.5))
start += args.batch_size
if show_loss:
if (step * (len(train_data) // args.batch_size) + batch_i) % 20 == 0:
time_str = datetime.datetime.now().isoformat()
print('{}: Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
time_str,
step,
batch_i,
(len(train_data) // args.batch_size),
loss))
# print(loss)
batch_i += 1
# CTR evaluation
# train_auc, train_acc = model.eval(sess, get_feed_dict_for_dmf(model, train_data, 0, train_data.shape[0]))
eval_auc, eval_acc = model.eval(sess, get_feed_dict_for_dmf(model, eval_data, 0, eval_data.shape[0]))
test_auc, test_acc = model.eval(sess, get_feed_dict_for_dmf(model, test_data, 0, test_data.shape[0]))
# eval_str = 'epoch %d train auc: %.4f acc: %.4f eval auc: %.4f acc: %.4f test auc: %.4f acc: %.4f' \
# % (step, train_auc, train_acc, eval_auc, eval_acc, test_auc, test_acc)
eval_str = 'epoch %d eval auc: %.4f acc: %.4f test auc: %.4f acc: %.4f' \
% (step, eval_auc, eval_acc, test_auc, test_acc)
print(eval_str)
f_result.write(eval_str + '\n')
# top-K evaluation
if show_topk:
topk_str = ''
precision, recall, f1, hr, ndcg = topk_eval(
sess, model, user_list, train_record, test_record, item_set, k_list)
print('precision: ', end='')
topk_str += 'precision: '
for i in precision:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('recall: ', end='')
topk_str += '\n' + 'recall: '
for i in recall:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('f1: ', end='')
topk_str += '\n' + 'f1: '
for i in f1:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('hr: ', end='')
topk_str += '\n' + 'hr: '
for i in hr:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('ndcg: ', end='')
topk_str += '\n' + 'ndcg: '
for i in ndcg:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
f_result.write(topk_str + '\n')
def get_feed_dict_for_dmf(model, data, start, end, keep_drop=0.0):
feed_dict = {model.user_indices: data[start:end, 0],
model.item_indices: data[start:end, 1],
model.labels: data[start:end, 2],
model.keep_drop: keep_drop}
return feed_dict
def topk_eval(sess, model, user_list, train_record, test_record, item_set, k_list):
precision_list = {k: [] for k in k_list}
recall_list = {k: [] for k in k_list}
hr_list = {k: [] for k in k_list}
ndcg_list = {k: [] for k in k_list}
total_test = 0
for user in user_list:
test_item_list = list(item_set - train_record[user])
item_score_map = dict()
items, scores = model.get_scores(sess, {model.user_indices: [user] * len(test_item_list),
model.item_indices: test_item_list, model.keep_drop: 0.0})
for item, score in zip(items, scores):
item_score_map[item] = score
item_score_pair_sorted = sorted(item_score_map.items(), key=lambda x: x[1], reverse=True)
item_sorted = [i[0] for i in item_score_pair_sorted]
K_max_item_score = heapq.nlargest(k_list[-1], item_score_map, key=item_score_map.get)
r = []
for i in K_max_item_score:
if i in test_record[user]:
r.append(1)
else:
r.append(0)
for k in k_list:
hit_num = len(set(item_sorted[:k]) & test_record[user])
precision_list[k].append(hit_num / k)
recall_list[k].append(hit_num / len(test_record[user]))
hr_list[k].append(hit_num)
ndcg_list[k].append(ndcg_at_k(r, k))
total_test += len(test_record[user])
precision = [np.mean(precision_list[k]) for k in k_list]
recall = [np.mean(recall_list[k]) for k in k_list]
f1 = [2 / (1 / precision[i] + 1 / recall[i]) for i in range(len(k_list))]
hr = [np.sum(hr_list[k]) / total_test for k in k_list]
ndcg = [np.mean(ndcg_list[k]) for k in k_list]
return precision, recall, f1, hr, ndcg
|
[
"numpy.mean",
"train.get_user_record",
"numpy.random.choice",
"metrics.ndcg_at_k",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"heapq.nlargest",
"numpy.sum",
"datetime.datetime.now",
"DMF.DMF",
"tensorflow.ConfigProto",
"time.time",
"numpy.random.shuffle"
] |
[((429, 445), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (443, 445), True, 'import tensorflow as tf\n'), ((766, 791), 'DMF.DMF', 'DMF', (['args', 'n_user', 'n_item'], {}), '(args, n_user, n_item)\n', (769, 791), False, 'from DMF import DMF\n'), ((870, 903), 'train.get_user_record', 'get_user_record', (['train_data', '(True)'], {}), '(train_data, True)\n', (885, 903), False, 'from train import get_user_record\n'), ((922, 955), 'train.get_user_record', 'get_user_record', (['test_data', '(False)'], {}), '(test_data, False)\n', (937, 955), False, 'from train import get_user_record\n'), ((1083, 1140), 'numpy.random.choice', 'np.random.choice', (['user_list'], {'size': 'user_num', 'replace': '(False)'}), '(user_list, size=user_num, replace=False)\n', (1099, 1140), True, 'import numpy as np\n'), ((1191, 1216), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1201, 1216), True, 'import tensorflow as tf\n'), ((5904, 5970), 'heapq.nlargest', 'heapq.nlargest', (['k_list[-1]', 'item_score_map'], {'key': 'item_score_map.get'}), '(k_list[-1], item_score_map, key=item_score_map.get)\n', (5918, 5970), False, 'import heapq\n'), ((6499, 6525), 'numpy.mean', 'np.mean', (['precision_list[k]'], {}), '(precision_list[k])\n', (6506, 6525), True, 'import numpy as np\n'), ((6557, 6580), 'numpy.mean', 'np.mean', (['recall_list[k]'], {}), '(recall_list[k])\n', (6564, 6580), True, 'import numpy as np\n'), ((6747, 6768), 'numpy.mean', 'np.mean', (['ndcg_list[k]'], {}), '(ndcg_list[k])\n', (6754, 6768), True, 'import numpy as np\n'), ((1375, 1408), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1406, 1408), True, 'import tensorflow as tf\n'), ((1597, 1626), 'numpy.random.shuffle', 'np.random.shuffle', (['train_data'], {}), '(train_data)\n', (1614, 1626), True, 'import numpy as np\n'), ((6686, 6704), 'numpy.sum', 'np.sum', (['hr_list[k]'], {}), '(hr_list[k])\n', (6692, 6704), True, 'import numpy as np\n'), ((6418, 6433), 'metrics.ndcg_at_k', 'ndcg_at_k', (['r', 'k'], {}), '(r, k)\n', (6427, 6433), False, 'from metrics import ndcg_at_k\n'), ((1316, 1327), 'time.time', 'time.time', ([], {}), '()\n', (1325, 1327), False, 'import time\n'), ((2043, 2066), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2064, 2066), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
"""
Sony Colourspaces
=================
Defines the *Sony* colourspaces:
- :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT`.
- :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT3`.
- :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT3_CINE`.
- :attr:`colour.models.RGB_COLOURSPACE_VENICE_S_GAMUT3`.
- :attr:`colour.models.RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE`.
Notes
-----
- The *Venice S-Gamut3* and *Venice S-Gamut3.Cine* primaries and whitepoint
were derived with the following `Google Colab Notebook \
<https://colab.research.google.com/drive/1ZGTij7jT8eZRMPUkyWlv_x5ix5Q5twMB>`__.
References
----------
- :cite:`Gaggioni` : <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., & <NAME>. (n.d.). S-Log: A new LUT for digital production
mastering and interchange applications (Vol. 709, pp. 1-13).
http://pro.sony.com/bbsccms/assets/files/mkt/cinema/solutions/slog_manual.pdf
- :cite:`SonyCorporation` : Sony Corporation. (n.d.). S-Log Whitepaper (pp.
1-17). http://www.theodoropoulos.info/attachments/076_on%20S-Log.pdf
- :cite:`SonyCorporationd` : Sony Corporation. (n.d.). Technical Summary
for S-Gamut3.Cine/S-Log3 and S-Gamut3/S-Log3 (pp. 1-7).
http://community.sony.com/sony/attachments/sony/\
large-sensor-camera-F5-F55/12359/2/\
TechnicalSummary_for_S-Gamut3Cine_S-Gamut3_S-Log3_V1_00.pdf
- :cite:`SonyCorporatione` : Sony Corporation. (n.d.).
S-Gamut3_S-Gamut3Cine_Matrix.xlsx.
https://community.sony.com/sony/attachments/sony/\
large-sensor-camera-F5-F55/12359/3/S-Gamut3_S-Gamut3Cine_Matrix.xlsx
- :cite:`SonyElectronicsCorporation2020` : Sony Electronics Corporation.
(2020). IDT.Sony.Venice_SLog3_SGamut3.ctl. https://github.com/ampas/\
aces-dev/blob/710ecbe52c87ce9f4a1e02c8ddf7ea0d6b611cc8/transforms/ctl/idt/\
vendorSupplied/sony/IDT.Sony.Venice_SLog3_SGamut3.ctl
- :cite:`SonyElectronicsCorporation2020a` : Sony Electronics Corporation.
(2020). IDT.Sony.Venice_SLog3_SGamut3Cine.ctl. https://github.com/ampas/\
aces-dev/blob/710ecbe52c87ce9f4a1e02c8ddf7ea0d6b611cc8/transforms/ctl/idt/\
vendorSupplied/sony/IDT.Sony.Venice_SLog3_SGamut3Cine.ctl
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import CCS_ILLUMINANTS
from colour.models.rgb import (RGB_Colourspace, log_encoding_SLog2,
log_decoding_SLog2, log_encoding_SLog3,
log_decoding_SLog3, normalised_primary_matrix)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'PRIMARIES_S_GAMUT', 'WHITEPOINT_NAME_S_GAMUT', 'CCS_WHITEPOINT_S_GAMUT',
'MATRIX_S_GAMUT_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT',
'RGB_COLOURSPACE_S_GAMUT', 'PRIMARIES_S_GAMUT3',
'WHITEPOINT_NAME_S_GAMUT3', 'CCS_WHITEPOINT_S_GAMUT3',
'MATRIX_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3',
'RGB_COLOURSPACE_S_GAMUT3', 'PRIMARIES_S_GAMUT3_CINE',
'WHITEPOINT_NAME_S_GAMUT3_CINE', 'CCS_WHITEPOINT_S_GAMUT3_CINE',
'MATRIX_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3_CINE',
'RGB_COLOURSPACE_S_GAMUT3_CINE', 'PRIMARIES_VENICE_S_GAMUT3',
'WHITEPOINT_NAME_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3',
'MATRIX_VENICE_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3',
'RGB_COLOURSPACE_VENICE_S_GAMUT3', 'PRIMARIES_VENICE_S_GAMUT3_CINE',
'WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE',
'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE',
'MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE',
'RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE'
]
PRIMARIES_S_GAMUT = np.array([
[0.7300, 0.2800],
[0.1400, 0.8550],
[0.1000, -0.0500],
])
"""
*S-Gamut* colourspace primaries.
PRIMARIES_S_GAMUT : ndarray, (3, 2)
"""
WHITEPOINT_NAME_S_GAMUT = 'D65'
"""
*S-Gamut* colourspace whitepoint name.
WHITEPOINT_NAME_S_GAMUT : unicode
"""
CCS_WHITEPOINT_S_GAMUT = (CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer'][WHITEPOINT_NAME_S_GAMUT])
"""
*S-Gamut* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_S_GAMUT : ndarray
"""
MATRIX_S_GAMUT_TO_XYZ = np.array([
[0.7064827132, 0.1288010498, 0.1151721641],
[0.2709796708, 0.7866064112, -0.0575860820],
[-0.0096778454, 0.0046000375, 1.0941355587],
])
"""
*S-Gamut* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_S_GAMUT_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_S_GAMUT = np.array([
[1.5073998991, -0.2458221374, -0.1716116808],
[-0.5181517271, 1.3553912409, 0.1258786682],
[0.0155116982, -0.0078727714, 0.9119163656],
])
"""
*CIE XYZ* tristimulus values to *S-Gamut* colourspace matrix.
MATRIX_XYZ_TO_S_GAMUT : array_like, (3, 3)
"""
RGB_COLOURSPACE_S_GAMUT = RGB_Colourspace(
'S-Gamut',
PRIMARIES_S_GAMUT,
CCS_WHITEPOINT_S_GAMUT,
WHITEPOINT_NAME_S_GAMUT,
MATRIX_S_GAMUT_TO_XYZ,
MATRIX_XYZ_TO_S_GAMUT,
log_encoding_SLog2,
log_decoding_SLog2,
)
RGB_COLOURSPACE_S_GAMUT.__doc__ = """
*S-Gamut* colourspace.
References
----------
:cite:`Gaggioni`, :cite:`SonyCorporation`
RGB_COLOURSPACE_S_GAMUT : RGB_Colourspace
"""
PRIMARIES_S_GAMUT3 = PRIMARIES_S_GAMUT
"""
*S-Gamut3* colourspace primaries.
PRIMARIES_S_GAMUT3 : ndarray, (3, 2)
"""
WHITEPOINT_NAME_S_GAMUT3 = WHITEPOINT_NAME_S_GAMUT
"""
*S-Gamut3* colourspace whitepoint name.
WHITEPOINT_NAME_S_GAMUT3 : unicode
"""
CCS_WHITEPOINT_S_GAMUT3 = CCS_WHITEPOINT_S_GAMUT
"""
*S-Gamut3* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_S_GAMUT3 : ndarray
"""
MATRIX_S_GAMUT3_TO_XYZ = MATRIX_S_GAMUT_TO_XYZ
"""
*S-Gamut3* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_S_GAMUT3_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_S_GAMUT3 = MATRIX_XYZ_TO_S_GAMUT
"""
*CIE XYZ* tristimulus values to *S-Gamut3* colourspace matrix.
MATRIX_XYZ_TO_S_GAMUT3 : array_like, (3, 3)
"""
RGB_COLOURSPACE_S_GAMUT3 = RGB_Colourspace(
'S-Gamut3',
PRIMARIES_S_GAMUT3,
CCS_WHITEPOINT_S_GAMUT3,
WHITEPOINT_NAME_S_GAMUT3,
MATRIX_S_GAMUT3_TO_XYZ,
MATRIX_XYZ_TO_S_GAMUT3,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_S_GAMUT3.__doc__ = """
*S-Gamut3* colourspace.
References
----------
:cite:`SonyCorporationd`
RGB_COLOURSPACE_S_GAMUT3 : RGB_Colourspace
"""
PRIMARIES_S_GAMUT3_CINE = np.array([
[0.76600, 0.27500],
[0.22500, 0.80000],
[0.08900, -0.08700],
])
"""
*S-Gamut3.Cine* colourspace primaries.
PRIMARIES_S_GAMUT3_CINE : ndarray, (3, 2)
"""
WHITEPOINT_NAME_S_GAMUT3_CINE = WHITEPOINT_NAME_S_GAMUT
"""
*S-Gamut3.Cine* colourspace whitepoint name.
WHITEPOINT_NAME_S_GAMUT3_CINE : unicode
"""
CCS_WHITEPOINT_S_GAMUT3_CINE = CCS_WHITEPOINT_S_GAMUT
"""
*S-Gamut3.Cine* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_S_GAMUT3_CINE : ndarray
"""
MATRIX_S_GAMUT3_CINE_TO_XYZ = np.array([
[0.5990839208, 0.2489255161, 0.1024464902],
[0.2150758201, 0.8850685017, -0.1001443219],
[-0.0320658495, -0.0276583907, 1.1487819910],
])
"""
*S-Gamut3.Cine* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_S_GAMUT3_CINE_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_S_GAMUT3_CINE = np.array([
[1.8467789693, -0.5259861230, -0.2105452114],
[-0.4441532629, 1.2594429028, 0.1493999729],
[0.0408554212, 0.0156408893, 0.8682072487],
])
"""
*CIE XYZ* tristimulus values to *S-Gamut3.Cine* colourspace matrix.
MATRIX_XYZ_TO_S_GAMUT3_CINE : array_like, (3, 3)
"""
RGB_COLOURSPACE_S_GAMUT3_CINE = RGB_Colourspace(
'S-Gamut3.Cine',
PRIMARIES_S_GAMUT3_CINE,
CCS_WHITEPOINT_S_GAMUT3_CINE,
WHITEPOINT_NAME_S_GAMUT3_CINE,
MATRIX_S_GAMUT3_CINE_TO_XYZ,
MATRIX_XYZ_TO_S_GAMUT3_CINE,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_S_GAMUT3_CINE.__doc__ = """
*S-Gamut3.Cine* colourspace.
References
----------
:cite:`SonyCorporatione`
RGB_COLOURSPACE_S_GAMUT3_CINE : RGB_Colourspace
"""
PRIMARIES_VENICE_S_GAMUT3 = np.array([
[0.740464264304292, 0.279364374750660],
[0.089241145423286, 0.893809528608105],
[0.110488236673827, -0.052579333080476],
])
"""
*Venice S-Gamut3* colourspace primaries.
PRIMARIES_VENICE_S_GAMUT3 : ndarray, (3, 2)
"""
WHITEPOINT_NAME_VENICE_S_GAMUT3 = WHITEPOINT_NAME_S_GAMUT
"""
*Venice S-Gamut3* colourspace whitepoint name.
WHITEPOINT_NAME_VENICE_S_GAMUT3 : unicode
"""
CCS_WHITEPOINT_VENICE_S_GAMUT3 = CCS_WHITEPOINT_S_GAMUT
"""
*Venice S-Gamut3* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_VENICE_S_GAMUT3 : ndarray
"""
MATRIX_VENICE_S_GAMUT3_TO_XYZ = normalised_primary_matrix(
PRIMARIES_VENICE_S_GAMUT3, CCS_WHITEPOINT_VENICE_S_GAMUT3)
"""
*Venice S-Gamut3* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_VENICE_S_GAMUT3_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_VENICE_S_GAMUT3 = np.linalg.inv(MATRIX_VENICE_S_GAMUT3_TO_XYZ)
"""
*CIE XYZ* tristimulus values to *Venice S-Gamut3* colourspace matrix.
MATRIX_XYZ_TO_VENICE_S_GAMUT3 : array_like, (3, 3)
"""
RGB_COLOURSPACE_VENICE_S_GAMUT3 = RGB_Colourspace(
'Venice S-Gamut3',
PRIMARIES_VENICE_S_GAMUT3,
CCS_WHITEPOINT_VENICE_S_GAMUT3,
WHITEPOINT_NAME_VENICE_S_GAMUT3,
MATRIX_VENICE_S_GAMUT3_TO_XYZ,
MATRIX_XYZ_TO_VENICE_S_GAMUT3,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_VENICE_S_GAMUT3.__doc__ = """
*Venice S-Gamut3* colourspace.
References
----------
:cite:`SonyElectronicsCorporation2020`
RGB_COLOURSPACE_VENICE_S_GAMUT3 : RGB_Colourspace
"""
PRIMARIES_VENICE_S_GAMUT3_CINE = np.array([
[0.775901871567345, 0.274502392854799],
[0.188682902773355, 0.828684937020288],
[0.101337382499301, -0.089187517306263],
])
"""
*Venice S-Gamut3.Cine* colourspace primaries.
PRIMARIES_VENICE_S_GAMUT3_CINE : ndarray, (3, 2)
"""
WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE = WHITEPOINT_NAME_S_GAMUT
"""
*Venice S-Gamut3.Cine* colourspace whitepoint name.
WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE : unicode
"""
CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE = CCS_WHITEPOINT_S_GAMUT
"""
*Venice S-Gamut3.Cine* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE : ndarray
"""
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ = normalised_primary_matrix(
PRIMARIES_VENICE_S_GAMUT3_CINE, CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE)
"""
*Venice S-Gamut3.Cine* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE = np.linalg.inv(
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ)
"""
*CIE XYZ* tristimulus values to *Venice S-Gamut3.Cine* colourspace matrix.
MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE : array_like, (3, 3)
"""
RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE = RGB_Colourspace(
'Venice S-Gamut3.Cine',
PRIMARIES_VENICE_S_GAMUT3_CINE,
CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE,
WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE,
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ,
MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE.__doc__ = """
*Venice S-Gamut3.Cine* colourspace.
References
----------
:cite:`SonyElectronicsCorporation2020a`
RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE : RGB_Colourspace
"""
|
[
"numpy.array",
"colour.models.rgb.normalised_primary_matrix",
"colour.models.rgb.RGB_Colourspace",
"numpy.linalg.inv"
] |
[((3760, 3813), 'numpy.array', 'np.array', (['[[0.73, 0.28], [0.14, 0.855], [0.1, -0.05]]'], {}), '([[0.73, 0.28], [0.14, 0.855], [0.1, -0.05]])\n', (3768, 3813), True, 'import numpy as np\n'), ((4273, 4421), 'numpy.array', 'np.array', (['[[0.7064827132, 0.1288010498, 0.1151721641], [0.2709796708, 0.7866064112, -\n 0.057586082], [-0.0096778454, 0.0046000375, 1.0941355587]]'], {}), '([[0.7064827132, 0.1288010498, 0.1151721641], [0.2709796708, \n 0.7866064112, -0.057586082], [-0.0096778454, 0.0046000375, 1.0941355587]])\n', (4281, 4421), True, 'import numpy as np\n'), ((4572, 4723), 'numpy.array', 'np.array', (['[[1.5073998991, -0.2458221374, -0.1716116808], [-0.5181517271, 1.3553912409,\n 0.1258786682], [0.0155116982, -0.0078727714, 0.9119163656]]'], {}), '([[1.5073998991, -0.2458221374, -0.1716116808], [-0.5181517271, \n 1.3553912409, 0.1258786682], [0.0155116982, -0.0078727714, 0.9119163656]])\n', (4580, 4723), True, 'import numpy as np\n'), ((4875, 5063), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""S-Gamut"""', 'PRIMARIES_S_GAMUT', 'CCS_WHITEPOINT_S_GAMUT', 'WHITEPOINT_NAME_S_GAMUT', 'MATRIX_S_GAMUT_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT', 'log_encoding_SLog2', 'log_decoding_SLog2'], {}), "('S-Gamut', PRIMARIES_S_GAMUT, CCS_WHITEPOINT_S_GAMUT,\n WHITEPOINT_NAME_S_GAMUT, MATRIX_S_GAMUT_TO_XYZ, MATRIX_XYZ_TO_S_GAMUT,\n log_encoding_SLog2, log_decoding_SLog2)\n", (4890, 5063), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((6029, 6223), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""S-Gamut3"""', 'PRIMARIES_S_GAMUT3', 'CCS_WHITEPOINT_S_GAMUT3', 'WHITEPOINT_NAME_S_GAMUT3', 'MATRIX_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('S-Gamut3', PRIMARIES_S_GAMUT3, CCS_WHITEPOINT_S_GAMUT3,\n WHITEPOINT_NAME_S_GAMUT3, MATRIX_S_GAMUT3_TO_XYZ,\n MATRIX_XYZ_TO_S_GAMUT3, log_encoding_SLog3, log_decoding_SLog3)\n", (6044, 6223), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((6437, 6494), 'numpy.array', 'np.array', (['[[0.766, 0.275], [0.225, 0.8], [0.089, -0.087]]'], {}), '([[0.766, 0.275], [0.225, 0.8], [0.089, -0.087]])\n', (6445, 6494), True, 'import numpy as np\n'), ((6964, 7113), 'numpy.array', 'np.array', (['[[0.5990839208, 0.2489255161, 0.1024464902], [0.2150758201, 0.8850685017, -\n 0.1001443219], [-0.0320658495, -0.0276583907, 1.148781991]]'], {}), '([[0.5990839208, 0.2489255161, 0.1024464902], [0.2150758201, \n 0.8850685017, -0.1001443219], [-0.0320658495, -0.0276583907, 1.148781991]])\n', (6972, 7113), True, 'import numpy as np\n'), ((7282, 7431), 'numpy.array', 'np.array', (['[[1.8467789693, -0.525986123, -0.2105452114], [-0.4441532629, 1.2594429028,\n 0.1493999729], [0.0408554212, 0.0156408893, 0.8682072487]]'], {}), '([[1.8467789693, -0.525986123, -0.2105452114], [-0.4441532629, \n 1.2594429028, 0.1493999729], [0.0408554212, 0.0156408893, 0.8682072487]])\n', (7290, 7431), True, 'import numpy as np\n'), ((7602, 7830), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""S-Gamut3.Cine"""', 'PRIMARIES_S_GAMUT3_CINE', 'CCS_WHITEPOINT_S_GAMUT3_CINE', 'WHITEPOINT_NAME_S_GAMUT3_CINE', 'MATRIX_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3_CINE', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('S-Gamut3.Cine', PRIMARIES_S_GAMUT3_CINE,\n CCS_WHITEPOINT_S_GAMUT3_CINE, WHITEPOINT_NAME_S_GAMUT3_CINE,\n MATRIX_S_GAMUT3_CINE_TO_XYZ, MATRIX_XYZ_TO_S_GAMUT3_CINE,\n log_encoding_SLog3, log_decoding_SLog3)\n", (7617, 7830), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((8057, 8192), 'numpy.array', 'np.array', (['[[0.740464264304292, 0.27936437475066], [0.089241145423286, \n 0.893809528608105], [0.110488236673827, -0.052579333080476]]'], {}), '([[0.740464264304292, 0.27936437475066], [0.089241145423286, \n 0.893809528608105], [0.110488236673827, -0.052579333080476]])\n', (8065, 8192), True, 'import numpy as np\n'), ((8662, 8750), 'colour.models.rgb.normalised_primary_matrix', 'normalised_primary_matrix', (['PRIMARIES_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3'], {}), '(PRIMARIES_VENICE_S_GAMUT3,\n CCS_WHITEPOINT_VENICE_S_GAMUT3)\n', (8687, 8750), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((8915, 8959), 'numpy.linalg.inv', 'np.linalg.inv', (['MATRIX_VENICE_S_GAMUT3_TO_XYZ'], {}), '(MATRIX_VENICE_S_GAMUT3_TO_XYZ)\n', (8928, 8959), True, 'import numpy as np\n'), ((9125, 9365), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""Venice S-Gamut3"""', 'PRIMARIES_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3', 'WHITEPOINT_NAME_VENICE_S_GAMUT3', 'MATRIX_VENICE_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('Venice S-Gamut3', PRIMARIES_VENICE_S_GAMUT3,\n CCS_WHITEPOINT_VENICE_S_GAMUT3, WHITEPOINT_NAME_VENICE_S_GAMUT3,\n MATRIX_VENICE_S_GAMUT3_TO_XYZ, MATRIX_XYZ_TO_VENICE_S_GAMUT3,\n log_encoding_SLog3, log_decoding_SLog3)\n", (9140, 9365), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((9617, 9753), 'numpy.array', 'np.array', (['[[0.775901871567345, 0.274502392854799], [0.188682902773355, \n 0.828684937020288], [0.101337382499301, -0.089187517306263]]'], {}), '([[0.775901871567345, 0.274502392854799], [0.188682902773355, \n 0.828684937020288], [0.101337382499301, -0.089187517306263]])\n', (9625, 9753), True, 'import numpy as np\n'), ((10267, 10365), 'colour.models.rgb.normalised_primary_matrix', 'normalised_primary_matrix', (['PRIMARIES_VENICE_S_GAMUT3_CINE', 'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE'], {}), '(PRIMARIES_VENICE_S_GAMUT3_CINE,\n CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE)\n', (10292, 10365), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((10545, 10594), 'numpy.linalg.inv', 'np.linalg.inv', (['MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ'], {}), '(MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ)\n', (10558, 10594), True, 'import numpy as np\n'), ((10780, 11054), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""Venice S-Gamut3.Cine"""', 'PRIMARIES_VENICE_S_GAMUT3_CINE', 'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE', 'WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE', 'MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('Venice S-Gamut3.Cine', PRIMARIES_VENICE_S_GAMUT3_CINE,\n CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE,\n WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE,\n MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ, MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE,\n log_encoding_SLog3, log_decoding_SLog3)\n", (10795, 11054), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n')]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import os
import pprint
import unittest
import numpy as np
# pyre-fixme[21]: Could not find module `pytest`.
import pytest
import torch
from parameterized import parameterized
from reagent.core.types import RewardOptions
from reagent.gym.agents.agent import Agent
from reagent.gym.agents.post_step import train_with_replay_buffer_post_step
from reagent.gym.envs.union import Env__Union
from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode
from reagent.gym.utils import build_normalizer, fill_replay_buffer
from reagent.model_managers.model_manager import ModelManager
from reagent.model_managers.union import ModelManager__Union
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
from reagent.tensorboardX import summary_writer_context
from reagent.test.base.horizon_test_base import HorizonTestBase
from torch.utils.tensorboard import SummaryWriter
try:
# Use internal runner or OSS otherwise
from reagent.runners.fb.fb_batch_runner import FbBatchRunner as BatchRunner
except ImportError:
from reagent.runners.oss_batch_runner import OssBatchRunner as BatchRunner
# for seeding the environment
SEED = 0
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
"""
Put on-policy gym tests here in the format (test name, path to yaml config).
Format path to be: "configs/<env_name>/<model_name>_<env_name>_online.yaml."
NOTE: These tests should ideally finish quickly (within 10 minutes) since they are
unit tests which are run many times.
"""
GYM_TESTS = [
("Discrete DQN Cartpole", "configs/cartpole/discrete_dqn_cartpole_online.yaml"),
("Discrete C51 Cartpole", "configs/cartpole/discrete_c51_cartpole_online.yaml"),
("Discrete QR Cartpole", "configs/cartpole/discrete_qr_cartpole_online.yaml"),
(
"Discrete DQN Open Gridworld",
"configs/open_gridworld/discrete_dqn_open_gridworld.yaml",
),
("SAC Pendulum", "configs/pendulum/sac_pendulum_online.yaml"),
("TD3 Pendulum", "configs/pendulum/td3_pendulum_online.yaml"),
("Parametric DQN Cartpole", "configs/cartpole/parametric_dqn_cartpole_online.yaml"),
(
"Parametric SARSA Cartpole",
"configs/cartpole/parametric_sarsa_cartpole_online.yaml",
),
(
"Sparse DQN Changing Arms",
"configs/sparse/discrete_dqn_changing_arms_online.yaml",
),
("SlateQ RecSim", "configs/recsim/slate_q_recsim_online.yaml"),
("PossibleActionsMask DQN", "configs/functionality/dqn_possible_actions_mask.yaml"),
]
curr_dir = os.path.dirname(__file__)
class TestGym(HorizonTestBase):
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(GYM_TESTS)
def test_gym_cpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on CPU")
self.run_from_config(
run_test=run_test,
config_path=os.path.join(curr_dir, config_path),
use_gpu=False,
)
logger.info(f"{name} passes!")
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(GYM_TESTS)
@pytest.mark.serial
# pyre-fixme[56]: Argument `not torch.cuda.is_available()` to decorator factory
# `unittest.skipIf` could not be resolved in a global scope.
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_gym_gpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on GPU")
self.run_from_config(
run_test=run_test,
config_path=os.path.join(curr_dir, config_path),
use_gpu=True,
)
logger.info(f"{name} passes!")
def run_test(
env: Env__Union,
model: ModelManager__Union,
replay_memory_size: int,
train_every_ts: int,
train_after_ts: int,
num_train_episodes: int,
passing_score_bar: float,
num_eval_episodes: int,
use_gpu: bool,
):
env = env.value
env.seed(SEED)
env.action_space.seed(SEED)
normalization = build_normalizer(env)
logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")
manager: ModelManager = model.value
runner = BatchRunner(use_gpu, manager, RewardOptions(), normalization)
trainer = runner.initialize_trainer()
reporter = manager.get_reporter()
trainer.reporter = reporter
training_policy = manager.create_policy(trainer)
replay_buffer = ReplayBuffer(
replay_capacity=replay_memory_size, batch_size=trainer.minibatch_size
)
device = torch.device("cuda") if use_gpu else torch.device("cpu")
# first fill the replay buffer to burn_in
train_after_ts = max(train_after_ts, trainer.minibatch_size)
fill_replay_buffer(
env=env, replay_buffer=replay_buffer, desired_size=train_after_ts
)
post_step = train_with_replay_buffer_post_step(
replay_buffer=replay_buffer,
env=env,
trainer=trainer,
training_freq=train_every_ts,
batch_size=trainer.minibatch_size,
device=device,
)
agent = Agent.create_for_env(
env, policy=training_policy, post_transition_callback=post_step, device=device
)
writer = SummaryWriter()
with summary_writer_context(writer):
train_rewards = []
for i in range(num_train_episodes):
trajectory = run_episode(
env=env, agent=agent, mdp_id=i, max_steps=env.max_steps
)
ep_reward = trajectory.calculate_cumulative_reward()
train_rewards.append(ep_reward)
logger.info(
f"Finished training episode {i} (len {len(trajectory)})"
f" with reward {ep_reward}."
)
logger.info("============Train rewards=============")
logger.info(train_rewards)
logger.info(f"average: {np.mean(train_rewards)};\tmax: {np.max(train_rewards)}")
# Check whether the max score passed the score bar; we explore during training
# the return could be bad (leading to flakiness in C51 and QRDQN).
assert np.max(train_rewards) >= passing_score_bar, (
f"max reward ({np.max(train_rewards)})after training for "
f"{len(train_rewards)} episodes is less than < {passing_score_bar}.\n"
)
serving_policy = manager.create_serving_policy(normalization, trainer)
agent = Agent.create_for_env_with_serving_policy(env, serving_policy)
eval_rewards = evaluate_for_n_episodes(
n=num_eval_episodes, env=env, agent=agent, max_steps=env.max_steps
).squeeze(1)
logger.info("============Eval rewards==============")
logger.info(eval_rewards)
mean_eval = np.mean(eval_rewards)
logger.info(f"average: {mean_eval};\tmax: {np.max(eval_rewards)}")
assert (
mean_eval >= passing_score_bar
), f"Eval reward is {mean_eval}, less than < {passing_score_bar}.\n"
if __name__ == "__main__":
unittest.main()
|
[
"logging.getLogger",
"reagent.gym.utils.build_normalizer",
"reagent.gym.utils.fill_replay_buffer",
"torch.cuda.is_available",
"reagent.replay_memory.circular_replay_buffer.ReplayBuffer",
"unittest.main",
"torch.utils.tensorboard.SummaryWriter",
"numpy.mean",
"reagent.tensorboardX.summary_writer_context",
"parameterized.parameterized.expand",
"reagent.core.types.RewardOptions",
"numpy.max",
"reagent.gym.agents.agent.Agent.create_for_env",
"reagent.gym.agents.agent.Agent.create_for_env_with_serving_policy",
"pprint.pformat",
"os.path.dirname",
"reagent.gym.agents.post_step.train_with_replay_buffer_post_step",
"torch.device",
"reagent.gym.runners.gymrunner.evaluate_for_n_episodes",
"reagent.gym.runners.gymrunner.run_episode",
"os.path.join"
] |
[((1286, 1313), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1303, 1313), False, 'import logging\n'), ((2639, 2664), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2654, 2664), False, 'import os\n'), ((2776, 2807), 'parameterized.parameterized.expand', 'parameterized.expand', (['GYM_TESTS'], {}), '(GYM_TESTS)\n', (2796, 2807), False, 'from parameterized import parameterized\n'), ((3188, 3219), 'parameterized.parameterized.expand', 'parameterized.expand', (['GYM_TESTS'], {}), '(GYM_TESTS)\n', (3208, 3219), False, 'from parameterized import parameterized\n'), ((4117, 4138), 'reagent.gym.utils.build_normalizer', 'build_normalizer', (['env'], {}), '(env)\n', (4133, 4138), False, 'from reagent.gym.utils import build_normalizer, fill_replay_buffer\n'), ((4513, 4601), 'reagent.replay_memory.circular_replay_buffer.ReplayBuffer', 'ReplayBuffer', ([], {'replay_capacity': 'replay_memory_size', 'batch_size': 'trainer.minibatch_size'}), '(replay_capacity=replay_memory_size, batch_size=trainer.\n minibatch_size)\n', (4525, 4601), False, 'from reagent.replay_memory.circular_replay_buffer import ReplayBuffer\n'), ((4797, 4887), 'reagent.gym.utils.fill_replay_buffer', 'fill_replay_buffer', ([], {'env': 'env', 'replay_buffer': 'replay_buffer', 'desired_size': 'train_after_ts'}), '(env=env, replay_buffer=replay_buffer, desired_size=\n train_after_ts)\n', (4815, 4887), False, 'from reagent.gym.utils import build_normalizer, fill_replay_buffer\n'), ((4914, 5092), 'reagent.gym.agents.post_step.train_with_replay_buffer_post_step', 'train_with_replay_buffer_post_step', ([], {'replay_buffer': 'replay_buffer', 'env': 'env', 'trainer': 'trainer', 'training_freq': 'train_every_ts', 'batch_size': 'trainer.minibatch_size', 'device': 'device'}), '(replay_buffer=replay_buffer, env=env,\n trainer=trainer, training_freq=train_every_ts, batch_size=trainer.\n minibatch_size, device=device)\n', (4948, 5092), False, 'from reagent.gym.agents.post_step import train_with_replay_buffer_post_step\n'), ((5152, 5257), 'reagent.gym.agents.agent.Agent.create_for_env', 'Agent.create_for_env', (['env'], {'policy': 'training_policy', 'post_transition_callback': 'post_step', 'device': 'device'}), '(env, policy=training_policy, post_transition_callback=\n post_step, device=device)\n', (5172, 5257), False, 'from reagent.gym.agents.agent import Agent\n'), ((5281, 5296), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5294, 5296), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((6426, 6487), 'reagent.gym.agents.agent.Agent.create_for_env_with_serving_policy', 'Agent.create_for_env_with_serving_policy', (['env', 'serving_policy'], {}), '(env, serving_policy)\n', (6466, 6487), False, 'from reagent.gym.agents.agent import Agent\n'), ((6730, 6751), 'numpy.mean', 'np.mean', (['eval_rewards'], {}), '(eval_rewards)\n', (6737, 6751), True, 'import numpy as np\n'), ((6981, 6996), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6994, 6996), False, 'import unittest\n'), ((4295, 4310), 'reagent.core.types.RewardOptions', 'RewardOptions', ([], {}), '()\n', (4308, 4310), False, 'from reagent.core.types import RewardOptions\n'), ((4625, 4645), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4637, 4645), False, 'import torch\n'), ((4662, 4681), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4674, 4681), False, 'import torch\n'), ((5306, 5336), 'reagent.tensorboardX.summary_writer_context', 'summary_writer_context', (['writer'], {}), '(writer)\n', (5328, 5336), False, 'from reagent.tensorboardX import summary_writer_context\n'), ((6140, 6161), 'numpy.max', 'np.max', (['train_rewards'], {}), '(train_rewards)\n', (6146, 6161), True, 'import numpy as np\n'), ((3419, 3444), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3442, 3444), False, 'import torch\n'), ((5434, 5502), 'reagent.gym.runners.gymrunner.run_episode', 'run_episode', ([], {'env': 'env', 'agent': 'agent', 'mdp_id': 'i', 'max_steps': 'env.max_steps'}), '(env=env, agent=agent, mdp_id=i, max_steps=env.max_steps)\n', (5445, 5502), False, 'from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode\n'), ((6209, 6230), 'numpy.max', 'np.max', (['train_rewards'], {}), '(train_rewards)\n', (6215, 6230), True, 'import numpy as np\n'), ((6508, 6603), 'reagent.gym.runners.gymrunner.evaluate_for_n_episodes', 'evaluate_for_n_episodes', ([], {'n': 'num_eval_episodes', 'env': 'env', 'agent': 'agent', 'max_steps': 'env.max_steps'}), '(n=num_eval_episodes, env=env, agent=agent,\n max_steps=env.max_steps)\n', (6531, 6603), False, 'from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode\n'), ((2997, 3032), 'os.path.join', 'os.path.join', (['curr_dir', 'config_path'], {}), '(curr_dir, config_path)\n', (3009, 3032), False, 'import os\n'), ((3657, 3692), 'os.path.join', 'os.path.join', (['curr_dir', 'config_path'], {}), '(curr_dir, config_path)\n', (3669, 3692), False, 'import os\n'), ((4178, 4207), 'pprint.pformat', 'pprint.pformat', (['normalization'], {}), '(normalization)\n', (4192, 4207), False, 'import pprint\n'), ((5917, 5939), 'numpy.mean', 'np.mean', (['train_rewards'], {}), '(train_rewards)\n', (5924, 5939), True, 'import numpy as np\n'), ((5949, 5970), 'numpy.max', 'np.max', (['train_rewards'], {}), '(train_rewards)\n', (5955, 5970), True, 'import numpy as np\n'), ((6799, 6819), 'numpy.max', 'np.max', (['eval_rewards'], {}), '(eval_rewards)\n', (6805, 6819), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.